content
stringlengths 5
1.05M
|
|---|
"""Integrations with external services."""
|
class Elf:
def nall_nin(self):
print('Elf says: Calling the Overlord ...')
class Dwarf:
def estver_narho(self):
print('Dwarf says: Calling the Overlord ...')
class Human:
def ring_mig(self):
print('Human says: Calling the Overlord ...')
if __name__ == '__main__':
minions = [Elf(), Dwarf(), Human()]
for minion in minions:
if isinstance(minion, Elf):
minion.nall_nin()
elif isinstance(minion, Dwarf):
minion.estver_narho()
else:
minion.ring_mig()
|
"""
# ============================================================================= #
| Project: [MuJoCo Practivces]
| Title: Python + mujoco-py
| Author: Moses C. Nah
| Email: [Moses ] mosesnah@mit.edu
| Creation Date: Monday, September 7th, 2020
# ============================================================================= #
# ============================================================================= #
| (0A) [DESCRIPTION]
|
| - Python Script for running multiple models with its corresponding controllers.
| - This will be useful for educational purpose.
|
# ============================================================================= #
# ============================================================================= #
| (0B) [KEYWORDS DEFINITION]
| : type the following "keywords" for cases as...
| - [BACKUP] [NAME]: Back-up code in case it's needed for the near future
| - [TIP]: The reason why the following code was written.
| - [TODO]: The part where modification is needed in the near future
# ============================================================================= #
# ============================================================================= #
| (0C) [PYTHON NAMING CONVENTION]
| Our project will follow the python naming convention, [REF]: https://stackoverflow.com/a/8423697/13437196
| ---------------------------------------------------------------------------------------------------------
| module_name, package_name, ClassName, method_name, ExceptionName, function_name,
| GLOBAL_CONSTANT_NAME, global_var_name, instance_var_name, function_parameter_name, local_var_name.
# ============================================================================= #
# ============================================================================= #
| (0D) [DOCOPT PARSE]
| From now on, the written comments are specifically for "docopt" function.
| [REF] http://docopt.org/
# ============================================================================= #
Usage:
run.py [options]
run.py -h | --help
Arguments:
Options:
-h --help Showing the usage and options
--version Show version
-s --saveData Saving the neccessary data from MuJoCo simulation as a txt file in the current directory
[default: False]
-r --recordVideo Record simulation video as a .mp4 file in the current directory
[default: False]
--runTime=TIME The total time of the simulation
[default: 5.0]
--startTime=TIME The start time of the movement, or controller
[default: 1.0]
--modelName=NAME Setting the xml model file name which will be used for the simulation.
The starting number of the xml model file indicates the type of simulation, hence the --modelName
[default: 1_mass_PD.xml]
--camPos=STRING Setting the Camera Position of the simulation.
default is None
--quiet Print less text
[default: False]
--verbose Print more text
[default: False]
Examples, try:
python3 run.py --help
python3 run.py --version
python3 run.py --modelName="mass_PD.xml" --findCamPos
"""
# ============================================================================= #
# (0A) [IMPORT MODULES]
# Importing necessary modules + declaring basic configurations for running the whole mujoco simulator.
# [Built-in modules]
import sys
import os
import re
import argparse
import datetime
import shutil
# [3rd party modules]
import numpy as np
import cv2
# [3rd party modules] - mujoco-py
try:
import mujoco_py as mjPy
except ImportError as e:
raise error.DependencyNotInstalled( "{}. (HINT: you need to install mujoco_py, \
and also perform the setup instructions here: \
https://github.com/openai/mujoco-py/.)".format( e ) )
from docopt import docopt
# [3rd party modules] - pyPlot for graphs
import matplotlib.pyplot as plt
# import nevergrad as ng # [BACKUP] Needed for Optimization
# [Local modules]
from modules.constants import Constants
from modules.controllers import ( PID_Controller )
from modules.utils import ( args_cleanup, my_print, my_mkdir, my_rmdir )
from modules.simulation import Simulation
# from modules.output_funcs import (dist_from_tip2target, tip_velocity )
# from modules.input_ctrls import ( ImpedanceController, Excitator, ForwardKinematics, PositionController )
# from modules.utils import ( add_whip_model, my_print, my_mkdir, args_cleanup,
# my_rmdir, str2float, camel2snake, snake2camel )
# ============================================================================= #
# ============================================================================= #
# (0B) [SYSTEM SETTINGS]
# [Printing Format]
np.set_printoptions( linewidth = Constants.PRINT_LW ,
suppress = True ,
precision = Constants.PREC ) # Setting the numpy print options, useful for printing out data with consistent pattern.
args = docopt( __doc__, version = Constants.VERSION ) # Parsing the Argument
args = args_cleanup( args, '--' ) # Cleaning up the dictionary, discard prefix string '--' for the variables
if sys.version_info[ : 3 ] < ( 3, 0, 0 ): # Simple version check of the python version. python3+ is recommended for this file.
my_print( NOTIFICATION = " PYTHON3+ is recommended for this script " )
# If video needs to be recorded or data should be saved, then append 'saveDir' element to args dictionary
args[ 'saveDir' ] = my_mkdir( ) if args[ 'recordVideo' ] or args[ 'saveData' ] else None
assert not ( args[ 'quiet' ] and args[ 'verbose' ] ) # If quiet and verbose are true at the same time, assert!
my_print( saveDir = args[ 'saveDir' ] )
# ============================================================================= #
# ============================================================================= #
def main( ):
# ============================================================================= #
model_name = args[ 'modelName' ] # Calling Model
my_print( modelName = model_name )
mySim = Simulation( model_name = model_name,
arg_parse = args )
sim_type = model_name[ 0 ] # The first charater of model name is the index of simulation type.
if "1" == sim_type: # 1: Simple Mass Simulation
controller_object = PID_Controller( mySim.mjModel, mySim.mjData,
Kp = 0, Kd = 0, Ki = 0, ref_type = 0)
mySim.attach_controller( controller_object )
mySim.run( )
if args[ 'saveDir' ] is not None:
mySim.save_simulation_data( args[ 'saveDir' ] )
shutil.copyfile( Constants.MODEL_DIR + model_name,
args[ 'saveDir' ] + model_name )
mySim.reset( )
# ============================================================================= #
if __name__ == "__main__":
try:
main( )
except KeyboardInterrupt:
print( "Ctrl-C was inputted. Halting the program. ", end = ' ' )
if args[ 'saveDir' ] is not None:
my_rmdir( args[ 'saveDir' ] )
except ( FileNotFoundError, IndexError, ValueError ) as e:
print( e, end = ' ' )
if args[ 'saveDir' ] is not None:
my_rmdir( args[ 'saveDir' ] )
|
class Solution:
def judgeCircle(self, moves: str) -> bool:
x = y = 0
for m in moves:
if m == 'R':
x += 1
elif m == 'L':
x -= 1
elif m == 'U':
y -= 1
else:
y += 1
return x == 0 and y == 0
|
str1 = "P@#yn26at^&i5ve"
charCount = 0
digitCount = 0
symbolCount = 0
for i in str1:
if i.isalpha():
charCount += 1
elif i.isnumeric():
digitCount += 1
else:
symbolCount += 1
print("Chars = ", charCount)
print("Digits = ", digitCount)
print("Symbol = ", symbolCount)
|
# -*- coding: utf-8 -*-
#
from collections import namedtuple
import numpy as np
XY2D = namedtuple('XY2D', ['x', 'y'])
XY2D.__doc__ = """
Namedtuple for 2D point coordinate
"""
def point_rot2D(target=XY2D(1, 1), origin=XY2D(0, 0), radians=0):
"""
Rotate a 2D point coordinate around an origin 2D point coordinate by an angle in radians
Parameters
----------
target : XY2D
A 2D point coordinate
origin : XY2D
Rotation origin location
radians : float
Rotation angle in radian
Returns
-------
result : XY2D
Result after rotation
"""
cos_rad = np.cos(radians)
sin_rad = np.sin(radians)
adjusted = XY2D(x=target.x - origin.x,
y=target.y - origin.y)
return XY2D(x=origin.x + cos_rad * adjusted.x - sin_rad * adjusted.y,
y=origin.y + sin_rad * adjusted.x + cos_rad * adjusted.y)
def point_rot2D_y_inv(target=XY2D(1, 1), origin=XY2D(0, 0), radians=0):
"""
Rotate a 2D point coordinate around an origin 2D point coordinate by an angle in radians
And flip y in the end
Parameters
----------
target : XY2D
A 2D point coordinate
origin : XY2D
Rotation origin location
radians : float
Rotation angle in radian
Returns
-------
result : XY2D
Result after rotation and flipping in y
"""
result = point_rot2D(target=XY2D(x=target.x, y=-target.y),
origin=XY2D(x=origin.x, y=-origin.y),
radians=radians)
return XY2D(x=result.x, y=-result.y)
|
from time import sleep
import platform
import os
red = '\033[1;31m'
yel = '\033[;33m'
yelf = '\033[1;33m'
fim = '\033[m'
blue = '\033[1;34m'
green = '\033[1;32m'
print('Critica social: Um jogo sem nome')
nomej = str(input('Qual o seu nome? : '))
def clear():
if platform.system() == 'Windows':
os.system("cls")
elif platform.system() == "Linux":
os.system("clear")
else:
os.system("clear")
sleep(2)
clear()
print('{}Bem vindo, {}, esse jogo foi feito por Macacci (mesmo que você não saiba quem seja...{})'.format(yel, nomej, fim))
sleep(2)
clear()
tutorial = str(input('{}Quando o texto está em amarelo, é história, quando é um {}{}amarelo forte{}{}, é uma escolha, quando está {}{}azul{}{},é uma fala sua,\nquando está {}{}vermelho{}{}, é a fala de algum npc, e quando está {}{}verde{}{}, indica algo...\nEntendeu?\n1) Sim\n2) Não\n>>> '.format(yel, fim, yelf, fim, yel, fim, blue, fim, yel, fim, red, fim, yel, fim, green, fim, yel)))
if tutorial != '1' and tutorial != '2':
print('Opção inválida, tente novamente')
while tutorial != '1':
sleep(1)
clear()
sleep(1)
tutorial = str(input('{}=Tutorial=\nQuando o texto está em amarelo, é história, quando é um {}{}amarelo forte{}{}, é uma escolha, quando está {}{}azul{}{},é uma fala sua,\nquando está {}{}vermelho{}{}, é a fala de algum npc, e quando está {}{}verde{}{}, indica algo...\nEntendeu?\n1) Sim\n2) Não\n>>> '.format(yel, fim, yelf, fim, yel, fim, blue, fim, yel, fim, red, fim, yel, fim, green, fim, yel)))
while tutorial != '1':
sleep(1)
clear()
sleep(1)
tutorial = str(input('{}=Tutorial=\nQuando o texto está em amarelo, é história, quando é um {}{}amarelo forte{}{}, é uma escolha, quando está {}{}azul{}{},é uma fala sua,\nquando está {}{}vermelho{}{}, é a fala de algum npc, e quando está {}{}verde{}{}, indica algo...\nEntendeu?\n1) Sim\n2) Não\n>>> '.format(yel, fim, yelf, fim, yel, fim, blue, fim, yel, fim, red, fim, yel, fim, green, fim, yel)))
sleep(2)
clear()
print('{}Bom, você basicamente nasceu em 1123 num vilarejo chamado koskavinitsky{}'.format(yel, fim))
sexo = str(input('{}Você vai ser menino ou menina?\n1) Menino\n2) Menina\n>>> {}'.format(yelf, fim)))
while sexo != '1' and sexo != '2':
print('Opção inválida, tente novamente')
sleep(2)
clear()
sexo = str(input('{}Você vai ser menino ou menina?\n1) Menino\n2) Menina\n>>> {}'.format(yelf, fim)))
if sexo == '1':
sleep(2)
clear()
print('{}Nasce então, um novo menino, chamado {}, e ele é normal (aparentemente){}'.format(yel, nomej, fim))
elif sexo == '2':
print('{}Nasce então, uma nova menina, chamada {}, e ela é normal (aparentemente){}'.format(yel, nomej, fim))
|
import json
import logging
import requests
import gzip
from os.path import abspath, dirname, join
from os import remove
from pydantic import BaseModel
from typing import List
LOCAL_PATH_TO_DATA = join(
abspath(dirname(__file__)),
'../../data'
)
FILTERED_FILENAME = 'filtered_list_of_works.json'
# Wellcome_DATASET_URL contains the entire list of works
# S3_DATASET contains a subset, already filtered by description and
# pre-annotated.
WELLCOME_DATASET_URL = "https://data.wellcomecollection.org/" \
"catalogue/v2/works.json.gz"
S3_DATASET_URL = "https://wellcome-collection-data.s3.eu-west-2" \
".amazonaws.com/annotated-data/filtered_list_of_works.json"
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class SimilarWork(BaseModel):
id: str
title: str
full_image_uri: str
collection_url: str
class ArtWork(BaseModel):
id: str
title: str
description: str
full_image_uri: str
collection_url: str
contributors: List
similar_works: List[SimilarWork]
def convert_iiif_width(uri, width="full"):
""" Utility to convert IIIF to image URI with given width or "full" """
uri_end = uri.split("//")[1].split("/")
uri_end[4] = ("full" if width == "full" else str(width) + ",")
return "https://" + "/".join(uri_end)
def get_data(exclude_used=False,
exclude_sensitive=False,
only_interesting=True):
"""
Gets data from the cached file, if it fails, downloads from the s3 bucket,
and if the s3 bucket fails, downloads from the Wellcome Collection API.
Args:
exclude_used: whether to exclude works with the key "used"
exclude_sensitive: whether to exclude works with the key "sensitivity"
Returns: dict of works
"""
logger.info("Recovering dataset.")
try:
with open(join(LOCAL_PATH_TO_DATA, FILTERED_FILENAME), 'r') as f:
works = json.load(f)
logger.info("Hit cache! Loading from local file.")
except (FileNotFoundError, json.decoder.JSONDecodeError):
logger.info("Cache not loaded, downloading dataset from S3.")
try:
works = download_data_from_annotated_s3_bucket()
except:
logger.info("Could not connect to S3 bucket. "
"Downloading dataset from Wellcome API URI")
works = download_data_from_source()
works = {
idx: work for idx, work in works.items()
if (not work.get('used') or not exclude_used) and
(not work.get('sensitivity') or not exclude_sensitive) and
(work.get('interesting') or not only_interesting)
}
if not exclude_used:
for id in works.keys():
works[id]["used"] = False
logger.info("Finished loading {} filtered art works.".format(len(works)))
return works
def update_data(work_id, **kwargs):
""" Update json file with boolean kwargs from false to true. """
logger.info("Updating data")
try:
f = open(join(LOCAL_PATH_TO_DATA, FILTERED_FILENAME), 'r')
works = json.load(f)
f.close()
except (FileNotFoundError, json.decoder.JSONDecodeError):
logger.info("File not loaded, downloading data first.")
works = get_data()
finally:
logging.info("Work {} updated with {}".format(work_id, kwargs))
for key, value in kwargs.items():
if not works[work_id].get(key):
works[work_id][key] = value
save_data_locally(works)
def download_data_from_annotated_s3_bucket():
""" Gets annotated data, pre-filtered and with annotated sensitivity """
works = requests.get(S3_DATASET_URL).json()
save_data_locally(works)
return works
def save_data_locally(works):
""" Saves a json with works locally"""
with open(join(LOCAL_PATH_TO_DATA, FILTERED_FILENAME), 'w') as f:
json.dump(works, f)
def download_data_from_source():
""" Downloads data from Wellcome API """
counter = 0
filtered_list_of_works = []
# Filters artwork that has description with length in [min_len, max_len]
min_len = 200
max_len = 2000
logger.info("Downloading data.")
dataset_content = requests.get(WELLCOME_DATASET_URL).content
logger.info("Saving tmp files.")
tmp_file = join(LOCAL_PATH_TO_DATA, 'works.json.gz')
with open(tmp_file, 'wb') as f:
f.write(dataset_content)
logger.info("Unzipping file and reading raw content.")
for line in gzip.open(tmp_file, 'rb'):
if counter % 100000 == 0:
logger.info("Processing work number {}".format(counter))
import pdb
pdb.set_trace()
counter += 1
json_line = json.loads(line.decode('utf-8'))
if json_line.get("thumbnail") and json_line["thumbnail"].get("url") \
and json_line.get("description") \
and json_line.get("contributors") \
and min_len < len(json_line.get("description")) < max_len:
filtered_list_of_works += [json_line]
filtered_list_of_works = sorted(filtered_list_of_works,
key=lambda x: len(x['description']),
reverse=True)
works = [
{
"id": work["id"],
"title": work["title"],
"description": work["description"],
"full_image_uri": convert_iiif_width(work["thumbnail"]["url"]),
"collection_url": "https://wellcomecollection.org/works/" +
work["id"],
"contributors": work["contributors"]
}
for work in filtered_list_of_works
]
logger.info("Removing tmp file.")
remove(tmp_file)
# Converts to dictionary indexed by id for O(1) acesss
works = {work['id']: work for work in works}
with open(join(LOCAL_PATH_TO_DATA, FILTERED_FILENAME), 'w') as f:
json.dump(works, f)
return works
if __name__ == "__main__":
get_data()
#download_data_from_source()
|
#! /usr/bin/env python3
# coding: utf-8
from __future__ import annotations
import datetime as dt
import html
import random
import re
import typing as t
import aiosql
import bbcode
from htmlslacker import HTMLSlacker
from psycopg2.extensions import connection
from gargbot_3000 import config
forum_queries = aiosql.from_path("sql/post.sql", "psycopg2")
msn_queries = aiosql.from_path("sql/message.sql", "psycopg2")
def _sanitize_post(inp, bbcode_uid: str):
smls = re.compile(
r'<!-- s.*? --><img src=\\?"\{SMILIES_PATH\}/.*?\\?" '
'alt=\\?"(.*?)\\?" title=\\?".*?" /><!-- s.*? -->'
)
inp = re.sub(smls, r"\1", inp)
inp = html.unescape(inp)
inp = inp.replace(":" + bbcode_uid, "")
img_tags = re.compile(r"\[/?img\]")
inp = re.sub(img_tags, "", inp)
youtube_embeds = re.compile(
r'\[html\]<iframe width="\d+" height="\d+" '
'src="//www.youtube.com/embed/([^"]+)" frameborder='
r'"0" allowfullscreen></iframe>\[/html\]'
)
inp = re.sub(youtube_embeds, r"https://www.youtube.com/watch?v=\1", inp)
inp = bbcode.render_html(
inp, drop_unrecognized=True, escape_html=False, replace_links=False
)
inp = inp.replace('rel="nofollow"', "")
inp = HTMLSlacker(inp).get_output()
return inp
def forum(
conn: connection, args: t.Optional[list[str]]
) -> tuple[str, str, str, dt.datetime, str, str]:
user = args[0] if args else None
desc = " "
post = None
if user:
post = forum_queries.post_for_user(conn, slack_nick=user)
if not post:
desc = (
f"Gargling not found: {user}. Husk å bruke slack nick. "
"Her er et tilfeldig quote i stedet."
)
if not post:
post = forum_queries.random_post(conn)
text = _sanitize_post(post["content"], post["bbcode_uid"])
avatarurl = f"{config.forum_url}/download/file.php?avatar={post['avatar']}".strip()
url = f"{config.forum_url}/viewtopic.php?p={post['id']}#p{post['id']}"
return (text, post["slack_nick"], avatarurl, post["posted_at"], url, desc)
def msn(
conn: connection, args: t.Optional[list[str]]
) -> tuple[dt.datetime, list, t.Optional[str]]:
user = args[0] if args else None
desc = None
messages = None
if user:
messages = msn_queries.message_session_for_user_id(conn, slack_nick=user)
if messages:
first = next(i for i, message in enumerate(messages) if message["is_user"])
else:
desc = (
f"Gargling not found: {user}. Husk å bruke slack nick. "
"Her er en tilfeldig samtale i stedet."
)
if not messages:
messages = msn_queries.random_message_session(conn)
if len(messages) <= 10:
first = 0
else: # no test coverage
first = random.randint(0, len(messages) - 10)
conversation = messages[first : first + 10]
date = conversation[0]["sent_at"].strftime("%d.%m.%y %H:%M")
squashed: list[list[str]] = []
for message in conversation:
if squashed:
prev_from_user, prev_content, prev_color = squashed[-1]
if message["from_user"] == prev_from_user: # no test coverage
squashed[-1][1] = "\n".join([prev_content, message["content"]])
continue
squashed.append([message["from_user"], message["content"], message["color"]])
return date, squashed, desc
|
import datetime
import pandas as pd
from secrets import CLIENT_ID, CLIENT_SECRET
import requests
import json
import spotipy
from spotipy.oauth2 import SpotifyOAuth
def get_spotify_data():
# Request headers
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(client_id=CLIENT_ID, client_secret=CLIENT_SECRET,
redirect_uri="http://localhost/callback", scope="user-read-recently-played"))
# Variables to be used for request
today = datetime.datetime.now()
yesterday = today - datetime.timedelta(days=1)
yesterday_unix_timestamp = int(yesterday.timestamp()) * 1000
# Stores response as json object
data = sp.current_user_recently_played(after=yesterday_unix_timestamp)
print(data)
# Lists to store extracted data
song_names = []
artist_names = []
played_at = []
timestamps = []
# Extracting only relevant data from json response
for song in data["items"]:
song_names.append(song["track"]["name"])
artist_names.append(song["track"]["album"]["artists"][0]["name"])
played_at.append(song["played_at"])
timestamps.append(song["played_at"][0:10])
# Create dictionary from lists to turn into pandas dataframe
song_dict = {
"song_name": song_names,
"artist_name": artist_names,
"played_at": played_at,
"timestamp": timestamps
}
# Pandas dataframe creation
song_df = pd.DataFrame(song_dict, columns = ["song_name", "artist_name", "played_at", "timestamp"])
return song_df
|
import numpy as np
def waysToArrange(N, K, k):
C = np.zeros((N + 1, N + 1))
for i in range(N + 1):
for j in range(i + 1):
if (j == 0 or j == i):
C[i][j] = 1
else:
C[i][j] = (C[i - 1][j - 1] +
C[i - 1][j])
dp = np.zeros((K + 1))
count = 0
dp[0] = 1
for i in range(K):
dp[i + 1] = (dp[i] * C[count + k[i] - 1][k[i] - 1])
count += k[i]
return dp[K]
if __name__ == "__main__":
N = 4
k = [2, 2]
K = len(k)
print(int(waysToArrange(N, K, k)))
|
from django.db import models
class Parent(models.Model):
name = models.CharField(max_length=200)
age = models.PositiveIntegerField()
def __str__(self):
return f'Name: {self.name}, Age: {self.age}'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import socket
import traceback
from functools import wraps
import eventlet
eventlet.monkey_patch(thread=True, time=True)
from builtins import bytes
import click
import msgpack
import flask
from flask import (Flask, render_template, make_response, copy_current_request_context,
jsonify, request, Response)
from flask_socketio import SocketIO
from bowtie._component import COMPONENT_REGISTRY
# python 2 compatibility
try:
FileNotFoundError
except NameError:
FileNotFoundError = OSError
class GetterNotDefined(AttributeError):
pass
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
return username == 'username' and password == 'password'
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
# import the user created module
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import yelp
app = Flask(__name__)
app.debug = True
socketio = SocketIO(app, binary=True, path='' + 'socket.io')
# not sure if this is secure or how much it matters
app.secret_key = os.urandom(256)
def context(func):
def foo():
with app.app_context():
func()
return foo
class Scheduler(object):
def __init__(self, seconds, func):
self.seconds = seconds
self.func = func
self.thread = None
def start(self):
self.thread = eventlet.spawn(self.run)
def run(self):
ret = eventlet.spawn(context(self.func))
eventlet.sleep(self.seconds)
try:
ret.wait()
except:
traceback.print_exc()
self.thread = eventlet.spawn(self.run)
def stop(self):
if self.thread:
self.thread.cancel()
@app.route('/')
def index():
return render_template('index.html')
@app.route('/static/bundle.js')
def getbundle():
basedir = os.path.dirname(os.path.realpath(__file__))
bundle_path = basedir + '/static/bundle.js'
bundle_path_gz = bundle_path + '.gz'
try:
if os.path.getmtime(bundle_path) > os.path.getmtime(bundle_path_gz):
return open(bundle_path, 'r').read()
else:
bundle = open(bundle_path_gz, 'rb').read()
response = flask.make_response(bundle)
response.headers['Content-Encoding'] = 'gzip'
response.headers['Vary'] = 'Accept-Encoding'
response.headers['Content-Length'] = len(response.data)
return response
except FileNotFoundError:
if os.path.isfile(bundle_path_gz):
bundle = open(bundle_path_gz, 'rb').read()
response = flask.make_response(bundle)
response.headers['Content-Encoding'] = 'gzip'
response.headers['Vary'] = 'Accept-Encoding'
response.headers['Content-Length'] = len(response.data)
return response
else:
return open(bundle_path, 'r').read()
@socketio.on('1#change')
def _(*args):
def wrapuser():
uniq_events = set()
uniq_events.update([('1#change', 1, 'get')])
uniq_events.remove(('1#change', 1, 'get'))
event_data = {}
for ev in uniq_events:
comp = COMPONENT_REGISTRY[ev[1]]
if ev[2] is None:
ename = ev[0]
raise GetterNotDefined('{ctype} has no getter associated with event "on_{ename}"'
.format(ctype=type(comp), ename=ename[ename.find('#') + 1:]))
getter = getattr(comp, ev[2])
event_data[ev[0]] = getter()
# if there is no getter, then there is no data to unpack
# if there is a getter, then we need to unpack the data sent
event_data['1#change'] = COMPONENT_REGISTRY[1]._get(
msgpack.unpackb(bytes(args[0]['data']), encoding='utf8')
)
# gather the remaining data from the other events through their getter methods
user_args = []
user_args.append(event_data['1#change'])
# finally call the user method
yelp.viz(*user_args)
foo = copy_current_request_context(wrapuser)
eventlet.spawn(foo)
@socketio.on('3#click')
def _(*args):
def wrapuser():
uniq_events = set()
uniq_events.update([('3#click', 3, 'get')])
uniq_events.remove(('3#click', 3, 'get'))
event_data = {}
for ev in uniq_events:
comp = COMPONENT_REGISTRY[ev[1]]
if ev[2] is None:
ename = ev[0]
raise GetterNotDefined('{ctype} has no getter associated with event "on_{ename}"'
.format(ctype=type(comp), ename=ename[ename.find('#') + 1:]))
getter = getattr(comp, ev[2])
event_data[ev[0]] = getter()
# if there is no getter, then there is no data to unpack
# if there is a getter, then we need to unpack the data sent
event_data['3#click'] = COMPONENT_REGISTRY[3]._get(
msgpack.unpackb(bytes(args[0]['data']), encoding='utf8')
)
# gather the remaining data from the other events through their getter methods
user_args = []
user_args.append(event_data['3#click'])
# finally call the user method
yelp.vizplace(*user_args)
foo = copy_current_request_context(wrapuser)
eventlet.spawn(foo)
@click.command()
@click.option('--host', '-h', default='0.0.0.0', help='Host IP')
@click.option('--port', '-p', default=9991, help='port number')
def main(host, port):
scheduled = not app.debug or os.environ.get('WERKZEUG_RUN_MAIN') == 'true'
if scheduled:
scheds = []
for sched in scheds:
sched.start()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((host, port))
if result == 0:
raise Exception('Port {} is unavailable on host {}, aborting.'.format(port, host))
socketio.run(app, host=host, port=port)
if scheduled:
for sched in scheds:
sched.stop()
if __name__ == '__main__':
main()
|
from data_sources import DataSource
from config import config
import random as rand
class JSONonsense:
CONFIG = config["JSONONSENSE"]
MIN_KEY_PER_OBJECT = int(CONFIG['MIN_KEY_PER_OBJECT'])
MAX_KEY_PER_OBJECT = int(CONFIG['MAX_KEY_PER_OBJECT'])
MAX_DEPTH = int(CONFIG['MAX_DEPTH'])
OBJ_CHANCE = float(CONFIG['OBJ_CHANCE'])
def __init__(self, data_source: DataSource = DataSource(), config=None):
if config:
for key, value in config:
if hasattr(JSONonsense, key):
setattr(JSONonsense, key, value)
if JSONonsense.MAX_DEPTH > 20:
JSONonsense.MAX_DEPTH = 20
self.data_source = data_source
self.keys = data_source.get_keys()
self.vals = data_source.get_vals()
self.key_amount = 0
self.val_amount = 0
def _get_key(self):
try:
key = next(self.keys)
if not key:
raise StopIteration
except StopIteration:
self.data_source.refresh_keys()
key = next(self.keys)
self.key_amount += 1
return key
def _get_val(self):
try:
val = next(self.vals)
if not val:
raise StopIteration
except StopIteration:
self.data_source.refresh_vals()
val = next(self.vals)
self.val_amount += 1
return val
def populate(self, depth=0):
depth += 1
if depth == JSONonsense.MAX_DEPTH + 1:
return
obj = {}
for _ in range(rand.randint(JSONonsense.MIN_KEY_PER_OBJECT, JSONonsense.MAX_KEY_PER_OBJECT)):
key = self._get_key()
# Roll the dice, could be a value, could be another object!
if rand.random() < JSONonsense.OBJ_CHANCE:
obj[key] = self.populate(depth)
else:
val = self._get_val()
obj[key] = val
return obj
def create(self):
return self.populate()
if __name__ == "__main__":
from pprint import pprint
pprint(JSONonsense().create())
|
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.cog.SuitFollowBossBehavior
from lib.coginvasion.cog.SuitPathBehavior import SuitPathBehavior
from lib.coginvasion.cog.SuitHabitualBehavior import SuitHabitualBehavior
from lib.coginvasion.cog import SuitAttacks
from lib.coginvasion.globals import CIGlobals
from SuitFlyToRandomSpotBehavior import SuitFlyToRandomSpotBehavior
from SuitAttackBehavior import SuitAttackBehavior
import SuitPathDataAI, SuitUtils
from direct.fsm import ClassicFSM, State
from direct.task.Task import Task
from direct.interval.IntervalGlobal import Sequence, Wait, Func
from direct.distributed.ClockDelta import globalClockDelta
import random
class SuitFollowBossBehavior(SuitPathBehavior, SuitHabitualBehavior):
LEEWAY_DISTANCE = 4
MAX_BOSS_HELPERS = 5
HEAL_SPEED = 50.0
def __init__(self, suit, boss):
SuitPathBehavior.__init__(self, suit, exitOnWalkFinish=False)
self.fsm = ClassicFSM.ClassicFSM('SuitFollowBossBehavior', [State.State('off', self.enterOff, self.exitOff),
State.State('follow', self.enterFollow, self.exitFollow),
State.State('protect', self.enterProtect, self.exitProtect)], 'off', 'off')
self.fsm.enterInitialState()
self.boss = boss
self.bossSpotKey = None
self.healInProgress = False
self.suitHealTrack = None
self.followBossTaskName = self.suit.uniqueName('followBoss')
self.pathFinder = SuitPathDataAI.getPathFinder(self.suit.hood)
return
def isHealing(self):
return self.healInProgress
def enterOff(self):
pass
def exitOff(self):
pass
def enter(self):
SuitPathBehavior.enter(self)
self.fsm.request('follow')
def exit(self):
SuitPathBehavior.exit(self)
self.fsm.requestFinalState()
taskMgr.remove(self.followBossTaskName)
def unload(self):
SuitPathBehavior.unload(self)
del self.boss
del self.followBossTaskName
del self.bossSpotKey
del self.fsm
del self.suitHealTrack
del self.healInProgress
def enterProtect(self):
base.taskMgr.add(self.__protectTick, self.suit.uniqueName('protectBossTick'))
def __protectTick(self, task):
if self.boss.isDead():
self.suit.b_setAnimState('neutral')
self.exit()
return task.done
if self.bossSpotKey != self.boss.getCurrentPath():
self.fsm.request('follow')
return task.done
if self.isHealing():
return task.cont
if self.boss.brain.currentBehavior.__class__ == SuitAttackBehavior:
choice = random.randint(0, 1)
if choice == 0:
return task.cont
if choice == 1:
self.doHeal()
return task.cont
def __attemptToHealBoss(self, hp, currBossPos):
if self.isBossAvailable():
if (self.boss.getPos(render) - currBossPos).length() <= 1:
self.boss.b_setHealth(self.boss.getHealth() + hp)
self.boss.d_announceHealth(1, hp)
self.suit.d_handleWeaponTouch()
def isBossAvailable(self):
if not self.boss.isEmpty() and not hasattr(self.boss, 'DELETED') and not self.boss.isDead():
return True
return False
def __disableBoss(self):
if self.isBossAvailable():
if self.boss.getBrain():
self.boss.getBrain().stopThinking()
self.boss.b_setAnimState('neutral')
def __enableBoss(self):
if self.isBossAvailable():
self.boss.getBrain().startThinking()
def __toggleHeal(self):
if self.healInProgress:
self.healInProgress = False
else:
self.healInProgress = True
def doHeal(self):
self.__toggleHeal()
attack = random.randint(0, 6)
attackName = SuitAttacks.SuitAttackLengths.keys()[attack]
timestamp = globalClockDelta.getFrameNetworkTime()
self.suit.sendUpdate('doAttack', [attack, self.boss.doId, timestamp])
distance = self.suit.getDistance(self.boss)
timeUntilHeal = distance / self.HEAL_SPEED
timeUntilRelease = 1.0
self.suit.d_setChat(CIGlobals.SuitHealTaunt)
hp = int(self.suit.maxHealth / SuitAttacks.SuitAttackDamageFactors[attackName])
if hp == 0:
hp = 1
if self.boss.getHealth() + hp > self.boss.getMaxHealth():
hp = self.boss.getMaxHealth() - self.boss.getHealth()
if attackName != 'glowerpower':
timeUntilRelease = 2.2
self.suitHealTrack = Sequence(Wait(timeUntilRelease + timeUntilHeal), Func(self.__attemptToHealBoss, hp, self.boss.getPos(render)), Func(self.faceOtherDirection), Wait(3.0), Func(self.__toggleHeal))
self.suitHealTrack.start()
def faceOtherDirection(self):
self.suit.b_setAnimState('neutral')
self.suit.setH(self.suit.getH() - 180)
self.suit.d_setH(self.suit.getH())
def exitProtect(self):
base.taskMgr.remove(self.suit.uniqueName('protectBossTick'))
if self.suitHealTrack:
self.suitHealTrack.pause()
self.suitHealTrack = None
return
def enterFollow(self):
self.__updatePath()
taskMgr.add(self.__followBoss, self.followBossTaskName)
def exitFollow(self):
taskMgr.remove(self.followBossTaskName)
def __updatePath(self):
if self.boss.isDead():
self.suit.b_setAnimState('neutral')
self.exit()
return task.done
self.clearWalkTrack()
if hasattr(self.boss, 'currentPath'):
bossSpot = self.boss.getCurrentPath()
self.bossSpotKey = bossSpot
pos = self.boss.getPosFromCurrentPath()
self.createPath(pos=(pos[0], pos[1]))
else:
self.exit()
def __followBoss(self, task):
if not hasattr(self, 'suit') or self.boss.isEmpty() or not self.boss.isEmpty() and self.boss.isDead():
return Task.done
if self.boss.getCurrentPath() != self.bossSpotKey:
self.__updatePath()
if self.suit.getDistance(self.boss) <= self.LEEWAY_DISTANCE and self.boss.brain.currentBehavior.__class__ != SuitFlyToRandomSpotBehavior:
self.clearWalkTrack(andTurnAround=1)
self.suit.b_setAnimState('neutral')
self.suit.setH(self.suit.getH() - 180)
self.suit.d_setH(self.suit.getH())
self.fsm.request('protect')
return Task.done
return Task.cont
def shouldAbandonFollow(self):
suitsByBoss = self.getSuitsByBoss()
backupCalledIn = self.getBackupCalledIn()
if backupCalledIn == 0:
backupCalledIn = 1
return float(len(suitsByBoss)) / float(backupCalledIn) >= 0.4
def getSuitsByBoss(self):
suits = []
for obj in base.air.doId2do.values():
className = obj.__class__.__name__
if className == 'DistributedSuitAI':
if obj.zoneId == self.suit.zoneId:
if not obj.isDead() and not obj == self.boss and not obj == self.suit:
if obj.getDistance(self.boss) <= self.LEEWAY_DISTANCE * 3:
suits.append(obj)
return suits
def getBackupCalledIn(self):
from lib.coginvasion.cog.SuitCallInBackupBehavior import SuitCallInBackupBehavior
behaviorClass = SuitCallInBackupBehavior
if hasattr(self.boss, 'DELETED') or not self.boss.getBrain():
return 0
behavior = self.boss.getBrain().getBehavior(behaviorClass)
return behavior.getCalledInBackup()
def isBossInManager(self):
return self.boss in self.suit.getManager().suits.values()
def shouldStart(self):
if self.boss and not self.boss.isDead() and self.isBossInManager() and self.suit.getDistance(self.boss) > self.LEEWAY_DISTANCE:
_helper_suits = 0
for suit in self.suit.getManager().suits.values():
if suit.doId != self.suit.doId:
if suit.brain:
if suit.brain.currentBehavior.__class__ == SuitFollowBossBehavior:
_helper_suits += 1
if _helper_suits < self.MAX_BOSS_HELPERS:
return True
return False
|
"""Shared functionality for interacting with Galaxy.
"""
|
import sys
import logging
from rsync_workflow.RsyncContext import RsyncManager, RsyncContext, set_debug
if len(sys.argv) != 3:
logging.error('missing arguments')
logging.error('rsync_clone.py [remote_path] [local_path]')
sys.exit(-1)
rsync_manager = RsyncManager()
rsync_manager.clone(*sys.argv[1:])
|
#!/usr/bin/env python
import sys
import time
from nsnitro import *
import argparse
import datetime
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Check Netscaler HA status')
parser.add_argument('--host', metavar='HOSTNAME', required=True, help='Netscaler hostname')
parser.add_argument('--user', metavar='USERNAME', default='nagios', help='Netscaler username')
parser.add_argument('--password', metavar='PASSWORD', default='api_user', help='Netscaler password')
parser.add_argument('--ssl', action="store_true", help='turn ssl on')
parser.add_argument('-w', '--warning', metavar='WARNING', help='seconds since last change')
parser.add_argument('--dargs', action='store_true', help='show args')
args = parser.parse_args()
if args.dargs:
print(args)
sys.exit(3)
nitro = NSNitro(args.host, args.user, args.password, args.ssl)
try:
nitro.login()
node = NSHANode()
node.set_id("0")
node = NSHANode.get(nitro, node)
status = node.get_hastatus()
state = node.get_state().lower()
if status != "UP":
print "CRITICAL: " + node.get_name() + " " + status
nitro.logout()
sys.exit(2)
elif ( status == "UP") & ( state == "primary"):
if args.warning:
if int(node.get_masterstatetime()) <= int(args.warning):
print "WARNING: " + node.get_name() + " " + status + " " + node.get_state() + " since " + str(datetime.timedelta(seconds=node.get_masterstatetime()))
nitro.logout()
sys.exit(1)
print "OK: " + node.get_name() + " " + status + " " + node.get_state() + " since " + str(datetime.timedelta(seconds=node.get_masterstatetime()))
nitro.logout()
sys.exit(0)
elif status == "UP":
print "OK: " + node.get_name() + " " + status + " " + node.get_state()
nitro.logout()
sys.exit(0)
nitro.logout()
sys.exit(3)
except NSNitroError, e:
print "Error: %s" % e.message
sys.exit(3)
|
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.firefox.options import Options as FireFoxOptions
from selenium.webdriver.support.expected_conditions import NoSuchElementException
from .captcha_handler import CaptchaHandler
chrome_options = ChromeOptions()
chrome_options.binary_location = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe'
firefox_options = FireFoxOptions()
firefox_options.add_argument('--headless')
firefox_options.add_argument("--disable-gpu")
firefox_options.binary_location = 'C:\\Program Files (x86)\\Mozilla Firefox\\firefox.exe'
class ThreatDefenceBypass():
"""
Custom RedirectMiddleware
Using Selenium and chromedriver with a --headless flag
Checks if redirected to a CAPTCHA page or a browser identification page and acts accordingly
"""
def __init__(self):
self.threat_defence = 'threat_defence.php'
self.driver = webdriver.Chrome(r"chromedriver.exe",chrome_options=chrome_options)
#self.driver = webdriver.Firefox('C:\\utils\\geckodriver\\geckodriver.exe',firefox_options=firefox_options)
self.tries = 0
self.captcha_handler = CaptchaHandler()
self.cookies = None
def bypass_threat_defense(self, url):
time.sleep(3)
# LOGGER.info('Number of tries: #{0}'.format(self.tries))
self.driver.get(url)
# While loop to decide whether we are on a browser detection (redirect) page or a captcha page
while self.tries <= 5: # Current limit is 5 giving pytesseract % of success
print('Waiting for browser detection')
time.sleep(3)
try:
self.cookies = self.find_solve_submit_captcha()
break
except NoSuchElementException:
print()
print('No CAPTCHA found in page')
try:
self.redirect_retry()
break
except NoSuchElementException:
print('No Link in page either. EXITING')
break
# If the solution was wrong and we are prompt with another try call method again
if self.threat_defence in self.driver.current_url:
self.tries += 1
# LOGGER.info('CAPTCHA solution was wrong. Trying again')
self.bypass_threat_defense(self.driver.current_url)
if self.cookies:
self.driver.close()
return self.cookies
exit('Something went wrong')
# Press retry link if reached a redirect page without captcha
def redirect_retry(self):
# LOGGER.info('Looking for `retry` link in page')
link = self.driver.find_element_by_partial_link_text('Click')
# LOGGER.info('Retrying to get CAPTCHA page')
self.tries += 1
self.bypass_threat_defense(link.get_attribute('href'))
def find_solve_submit_captcha(self):
# LOGGER.info('Looking for CAPTCHA image in page')
# Find
captcha = self.driver.find_element_by_xpath("//img[contains(@src, 'captcha')]")
# LOGGER.info('Found CAPTCHA image: {0}'.format(captcha.get_attribute('src')))
# Solve
solved_captcha = self.captcha_handler.get_captcha(src=captcha.get_attribute('src'))
# LOGGER.info('CAPTCHA solved: {0}'.format(solved_captcha))
input_field = self.driver.find_element_by_id('solve_string')
input_field.send_keys(solved_captcha)
# LOGGER.info('Submitting solution')
# Submit
self.driver.find_element_by_id('button_submit').click()
return self.driver.get_cookies()
|
from django.db import models
from django.dispatch import receiver
from django.db.models.signals import post_save
from authors.apps.authentication.models import User
from django.contrib.postgres.fields import ArrayField
class UserProfile(models.Model):
photo = models.URLField(blank=True)
user = models.OneToOneField(User,on_delete= models.CASCADE)
bio = models.TextField(blank=True)
fun_fact = models.TextField(blank=True)
time_when_updated = models.DateTimeField(auto_now=True)
favorite_article = ArrayField(models.CharField(max_length=100), default=list)
def __str__(self):
return self.user.username
class Follow(models.Model):
follower = models.ForeignKey(User, on_delete=models.CASCADE)
followed = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
user_profile=UserProfile(user=instance)
return user_profile.save()
|
from .base import *
DEBUG = True
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# BASE_URL required for notification emails
BASE_URL = 'http://localhost:8000'
try:
from .local import *
except ImportError:
pass
|
"""Responses."""
from io import StringIO
from csv import DictWriter
from typing import List
from flask import Response, jsonify, make_response
from pma_api.__version__ import __version__
class ApiResult:
"""A representation of a generic JSON API result."""
def __init__(self, data, metadata=None, **kwargs):
"""Store input arguments.
Args:
data (dict): A dictionary built up for the API to return
metadata (dict): A dictionary of keys and values to add to the
metadata field of the return object.
"""
self.data = data
self.extra_metadata = metadata
self.kwargs = kwargs
def to_response(self) -> jsonify:
"""Make a response from the data."""
metadata = self.metadata(self.extra_metadata)
obj = {
**self.data,
**self.kwargs,
'metadata': metadata
}
return jsonify(obj)
@staticmethod
def metadata(extra_metadata=None):
"""Return metadata."""
from pma_api.models import ApiMetadata
obj = {
'version': __version__,
'datasetMetadata': [item.to_json() for item in
ApiMetadata.query.all()]
}
if extra_metadata:
obj.update(extra_metadata)
return obj
class QuerySetApiResult(ApiResult):
"""A representation of a list of records (Python dictionaries)."""
def __init__(self, record_list, return_format, metadata=None, **kwargs):
"""Store the list of records and the format."""
super().__init__(record_list, metadata, **kwargs)
self.record_list = record_list
self.return_format = return_format
def to_response(self):
"""Convert the list of records into a response."""
if self.return_format == 'csv' and self.record_list:
return self.csv_response(self.record_list)
elif self.return_format == 'csv': # and not self.record_list
return make_response('', 204)
# Default is JSON
return self.json_response(self.record_list, self.extra_metadata,
**self.kwargs)
@staticmethod
def csv_response(record_list):
"""CSV Response."""
string_io = StringIO()
header = record_list[0].keys()
writer = DictWriter(f=string_io, fieldnames=header)
writer.writeheader()
writer.writerows((item for item in record_list))
result = string_io.getvalue()
response = Response(result, mimetype='text/csv')
response.headers['Content-Disposition'] = \
'attachment; filename=data.csv'
return response
@staticmethod
def _remove_bytes_values(record_list: List[dict]) -> List[dict]:
"""From a list of dicts, remove any dict value that is bytes.
Args:
record_list(list(dict)): Records queried from db
Returns:
list(dict): Records with any non-JSON-serializable bytes values
replaced with the string 'bytes'
"""
try:
formatted_list: [dict] = [
{
k: 'bytes' if isinstance(v, bytes) else
v
for k, v in x.items()
}
for x in record_list
]
return formatted_list
except AttributeError:
# record_list is not a List[dict]
return record_list
@staticmethod
def json_response(record_list, extra_metadata, **kwargs):
"""Convert a list of records into a JSON response."""
# TODO: instead of remove bytes values, convert bytes value to URL
# to download file
formatted_list = QuerySetApiResult._remove_bytes_values(record_list)
obj = {
**kwargs,
'results': formatted_list,
'resultSize': len(record_list),
'metadata': ApiResult.metadata(extra_metadata)
}
return jsonify(obj)
# TODO: (jef/jkp 2017-08-29) Add methods for:
# * return warnings, errors
# * return version number
# * documentation
# Needs: Decision on how these should be returned.
|
#!/usr/bin/env python3.3
import gzip, sys
if __name__=='__main__':
feature = sys.argv[1] # has to be either 'cds' or 'exon' or 'gene'
gene_list = None
feature_in_gene_list = set([])
if len(sys.argv) == 3:
gene_list = set([])
with open(sys.argv[2]) as gl:
for line in gl:
toks = line.rstrip().split('\t')
gene_list.add( toks[0] )
for ln, line in enumerate(gzip.open('refGene.txt.gz', 'rb')):
toks = line.decode().rstrip().split('\t')
_, tid, chrom, strand, tx_start, tx_end, cds_start, cds_end, exon_count, exon_starts, exon_ends, _, symbol, _, _, _ = toks
chrom = chrom.replace('chr','')
if chrom.find('_') >= 0:
continue
tx_start, tx_end = int(tx_start), int(tx_end)
cds_start, cds_end = int(cds_start), int(cds_end)
exon_count = int(exon_count)
exon_starts = [ int(v) for v in exon_starts.split(',') if v ]
exon_ends = [ int(v) for v in exon_ends.split(',') if v ]
if gene_list is not None:
if symbol not in gene_list:
continue
else:
feature_in_gene_list.add( symbol )
if feature == 'exon':
for s, e in zip(exon_starts, exon_ends):
print( '{}\t{}\t{}\t{}|{}'.format(chrom, s, e, symbol, tid) )
elif feature == 'cds':
# non-coding
if cds_start == cds_end:
continue
for s, e in zip(exon_starts, exon_ends):
if s <= cds_start <= e:
s = cds_start
if s <= cds_end <= e:
e = cds_end
print('{}\t{}\t{}\t{}|{}'.format(chrom, s, e, symbol, tid))
elif feature == 'gene':
print('{}\t{}\t{}\t{}'.format(chrom, min(exon_starts), max(exon_ends), symbol) )
if gene_list is not None:
print('Checking for genes NOT INCLUDED in refGene', file=sys.stderr)
for g in sorted(list(gene_list)) :
if g not in feature_in_gene_list:
print('Gene in list that is NOT in refGene: {}'.format( g ), file=sys.stderr )
|
'''
Copyright (C) 2019-2021, Mo Zhou <cdluminate@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import torch as th
import numpy as np
from .. import configs
import torch
import torch.nn as nn
from torch.autograd import Variable
from .miner import miner
import functools as ft
import itertools as it
import pytest
def fn__pglift(repres: th.Tensor, labels: th.Tensor, *, metric: str):
'''
Generalized lifted-structure loss function
'''
# Determine the margin for the specific metric
if metric in ('C', 'N'):
margin = configs.glift.margin_cosine
repres = th.nn.functional.normalize(repres, p=2, dim=-1)
elif metric in ('E',):
margin = configs.glift.margin_euclidean
# Sampling
anc, pos, neg = miner(repres, labels, method='spc2-lifted', metric=metric)
# Calculate Loss
losses = []
for (i, idx) in enumerate(anc):
repA = repres[idx, :].view(-1)
repP = repres[pos[i], :]
repN = repres[neg[i], :]
#
if metric in ('E', 'N'):
__pdist = ft.partial(th.nn.functional.pairwise_distance, p=2)
else:
def __pdist(p, n): return 1 - \
th.nn.functional.cosine_similarity(p, n, dim=-1)
pos_term = th.logsumexp(__pdist(repA, repP), dim=-1)
neg_term = th.logsumexp(margin - __pdist(repA, repN), dim=-1)
losses.append((pos_term + neg_term).relu())
loss = th.mean(th.stack(losses)) + configs.glift.l2_weight * \
th.mean(repres.norm(p=2, dim=-1))
return loss
class pglift(th.nn.Module):
_datasetspec = 'SPC-2'
def __call__(self, *args, **kwargs):
return ft.partial(fn__pglift, metric=self._metric)(*args, **kwargs)
def determine_metric(self):
return self._metric
def datasetspec(self):
return self._datasetspec
class pgliftC(pglift):
_metric = 'C'
class pgliftE(pglift):
_metric = 'E'
class pgliftN(pglift):
_metric = 'N'
@pytest.mark.parametrize('metric', ('C', 'E', 'N'))
def test_fn_glift(metric):
output, labels = th.rand(10, 32, requires_grad=True), th.randint(3, (10,))
loss = fn__pglift(output, labels, metric=metric)
loss.backward()
@pytest.mark.parametrize('func', (pgliftC, pgliftE, pgliftN))
def test_glift(func):
output, labels = th.rand(10, 32, requires_grad=True), th.randint(3, (10,))
loss = func()(output, labels)
loss.backward()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# API module of cve-search. Returns queries in JSON format
#
# Software is free software released under the "Modified BSD license"
#
# Copyright (c) 2013-2016 Alexandre Dulaunoy - a@foo.be
# Copyright (c) 2014-2017 Pieter-Jan Moreels - pieterjan.moreels@gmail.com
# imports
import json
import logging
import os
import random
import re
import signal
import sys
import time
import urllib
_runPath = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(_runPath, ".."))
from bson import json_util
from bson.json_util import DEFAULT_JSON_OPTIONS
DEFAULT_JSON_OPTIONS.datetime_representation = 2
from dateutil.parser import parse as parse_datetime
from flask import Flask, request, Response, render_template
from functools import wraps
from logging.handlers import RotatingFileHandler
from redis import exceptions as redisExceptions
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.wsgi import WSGIContainer
import datetime
from datetime import timedelta
import lib.CVEs as cves
import lib.DatabaseLayer as db
import lib.Query as query
import lib.Toolkit as tk
from lib.Config import Configuration
def convertDatetime(dct=None):
if isinstance(dct,(list, tuple, set)):
for item in dct:
convertDatetime(item)
elif type(dct) is dict:
for key, val in dct.items():
if isinstance(val, datetime.datetime):
dct[key] = val.isoformat()
if isinstance(val, (dict, list)):
convertDatetime(val)
return dct
class APIError(Exception):
def __init__(self, message, status=500):
self.message = message
self.status = status
class API():
app = Flask(__name__, static_folder='static', static_url_path='/static')
app.config['MONGO_DBNAME'] = Configuration.getMongoDB()
app.config['SECRET_KEY'] = str(random.getrandbits(256))
def __init__(self):
routes = [{'r': '/api/', 'm': ['GET'], 'f': self.api_documentation},
{'r': '/api/cpe2.3/<path:cpe>', 'm': ['GET'], 'f': self.api_cpe23},
{'r': '/api/cpe2.2/<path:cpe>', 'm': ['GET'], 'f': self.api_cpe22},
{'r': '/api/cvefor/<path:cpe>', 'm': ['GET'], 'f': self.api_cvesFor},
{'r': '/api/cve/<cveid>', 'm': ['GET'], 'f': self.api_cve},
{'r': '/api/cwe', 'm': ['GET'], 'f': self.api_cwe},
{'r': '/api/cwe/<int:cwe_id>', 'm': ['GET'], 'f': self.api_cwe},
{'r': '/api/capec/<cweid>', 'm': ['GET'], 'f': self.api_capec},
{'r': '/api/last', 'm': ['GET'], 'f': self.api_last},
{'r': '/api/last/', 'm': ['GET'], 'f': self.api_last},
{'r': '/api/last/<int:limit>', 'm': ['GET'], 'f': self.api_last},
{'r': '/api/query', 'm': ['GET'], 'f': self.api_query},
{'r': '/api/browse', 'm': ['GET'], 'f': self.api_browse},
{'r': '/api/browse/', 'm': ['GET'], 'f': self.api_browse},
{'r': '/api/browse/<path:vendor>', 'm': ['GET'], 'f': self.api_browse},
{'r': '/api/search/<vendor>/<path:product>', 'm': ['GET'], 'f': self.api_search},
{'r': '/api/search/<path:search>', 'm': ['GET'], 'f': self.api_text_search},
{'r': '/api/search/days/<days>/<path:search>', 'm': ['GET'], 'f': self.api_text_limitday_search},
{'r': '/api/link/<key>/<value>', 'm': ['GET'], 'f': self.api_link},
{'r': '/api/dbInfo', 'm': ['GET'], 'f': self.api_dbInfo}]
for route in routes: self.addRoute(route)
def addRoute(self, route):
self.app.add_url_rule(route['r'], view_func=route['f'], methods=route['m'])
#############
# Decorator #
#############
def api(funct):
@wraps(funct)
def api_wrapper(*args, **kwargs):
data = error = None
# Get data (and possibly errors)
try:
data = funct(*args, **kwargs)
except APIError as e:
error = ({'status': 'error', 'reason': e.message}, e.status)
except Exception as e:
print(e)
error = ({'status': 'error', 'reason': 'Internal server error'}, 500)
# Check if data should be returned as html or data
try:
returnType = 'application/json'
if (request.url_rule.rule.lower().startswith("/api/") or
request.url_rule.rule.lower().endswith(".json") ):
# Support JSONP
if request.args.get('callback', False):
data="%s(%s)"%(request.args.get('callback'), data)
# Check API version for backwards compatibility. We'll call the old API v1.0
elif request.headers.get('Version') in ['1.1']:
# Get the requested return type
returnType = request.headers.get('Accept', '*/*')
# Default to JSON
if any(t in returnType for t in ['json', 'application/*', 'text/*', '*/*']):
data = error if error else {'status': 'success', 'data': data}
elif 'plain' in returnType:
pass # No need to do anything, but needs to be accepted
else:
data = ({'status': 'error', 'reason': 'Unknown Content-type requested'}, 415)
returnType = 'application/json'
if type(data) is not str:
if type(data) is tuple:
data = list(data)
data[0] = json.dumps(convertDatetime(dct=data[0]), indent=4, sort_keys=True, default=json_util.default)
else:
data = (json.dumps(convertDatetime(dct=data), indent=4, sort_keys=True, default=json_util.default), 200)
return Response(data[0], mimetype=returnType), data[1]
except Exception as e:
print(e)
pass
if error and error[1] == 500: raise(APIError(error[0]['reason']))
return data
return api_wrapper
#############
# FUNCTIONS #
#############
def generate_minimal_query(self, f):
query = []
# retrieving lists
if f['rejectedSelect'] == "hide":
exp = "^(?!\*\* REJECT \*\*\s+DO NOT USE THIS CANDIDATE NUMBER.*)"
query.append({'summary': re.compile(exp)})
# cvss logic
if f['cvssSelect'] == "above": query.append({'cvss': {'$gt': float(f['cvss'])}})
elif f['cvssSelect'] == "equals": query.append({'cvss': float(f['cvss'])})
elif f['cvssSelect'] == "below": query.append({'cvss': {'$lt': float(f['cvss'])}})
# date logic
if f['timeSelect'] != "all":
if f['startDate']:
startDate = parse_datetime(f['startDate'], ignoretz=True, dayfirst=True)
if f['endDate']:
endDate = parse_datetime(f['endDate'], ignoretz=True, dayfirst=True)
if f['timeSelect'] == "from":
query.append({f['timeTypeSelect']: {'$gt': startDate}})
elif f['timeSelect'] == "until":
query.append({f['timeTypeSelect']: {'$lt': endDate}})
elif f['timeSelect'] == "between":
query.append({f['timeTypeSelect']: {'$gt': startDate, '$lt': endDate}})
elif f['timeSelect'] == "outside":
query.append({'$or': [{f['timeTypeSelect']: {'$lt': startDate}}, {f['timeTypeSelect']: {'$gt': endDate}}]})
return query
def filter_logic(self, filters, skip, limit=None):
query = self.generate_minimal_query(filters)
limit = limit if limit else self.args['pageLength']
return db.getCVEs(limit=limit, skip=skip, query=query)
##########
# ROUTES #
##########
# /api
def api_documentation(self):
return render_template('api.html')
# /api/cpe2.3/<cpe>
@api
def api_cpe23(self, cpe):
cpe = tk.toStringFormattedCPE(cpe)
return cpe if cpe else "None"
# /api/cpe2.2/<cpe>
@api
def api_cpe22(self, cpe):
cpe = tk.toOldCPE(cpe)
return cpe if cpe else "None"
# /api/cvefor/<cpe>
@api
def api_cvesFor(self, cpe):
cpe = urllib.parse.unquote_plus(cpe)
return query.cvesForCPE(cpe)
# /api/cve/<cveid>
@api
def api_cve(self, cveid):
cvesp = cves.last(rankinglookup=True, namelookup=True, via4lookup=True, capeclookup=True)
cve = cvesp.getcve(cveid=cveid.upper())
if not cve: raise(APIError('cve not found', 404))
return cve
# /api/cwe
# /api/cwe/<cwe_id>
@api
def api_cwe(self, cwe_id=None):
return db.getCAPECFor(str(cwe_id)) if cwe_id else db.getCWEs()
# /api/capec/<cweid>
@api
def api_capec(self, cweid):
return db.getCAPEC(cweid)
# /api/last
# /api/last/
# /api/last/<limit>
@api
def api_last(self, limit=None):
limit = limit if limit else 30
cvesp = cves.last(rankinglookup=True, namelookup=True, via4lookup=True, capeclookup=True)
cve = cvesp.get(limit=limit)
return cve
# /query
@api
def api_query(self):
f={'rejectedSelect': request.headers.get('rejected'),
'cvss': request.headers.get('cvss_score'),
'cvssSelect': request.headers.get('cvss_modifier'),
'startDate': request.headers.get('time_start'),
'endDate': request.headers.get('time_end'),
'timeSelect': request.headers.get('time_modifier'),
'timeTypeSelect': request.headers.get('time_type'),
'skip': request.headers.get('skip'),
'limit': request.headers.get('limit')}
try:
skip = int(f['skip']) if f['skip'] else 0
except:
raise(APIError('skip must be an int', 400))
try:
limit = int(f['limit']) if f['limit'] else 0
except:
raise(APIError('limit must be an int', 400))
return self.filter_logic(f, skip, limit)
# /api/browse
# /api/browse/
# /api/browse/<vendor>
@api
def api_browse(self, vendor=None):
if vendor:
vendor = urllib.parse.quote_plus(vendor).lower()
try:
browseList = query.getBrowseList(vendor)
except redisExceptions.ConnectionError:
raise(APIError("Server could not connect to the browsing repository", 503))
if isinstance(browseList, dict):
return browseList
else:
return {}
# /api/search/<vendor>/<path:product>
@api
def api_search(self, vendor=None, product=None):
if not (vendor and product): return {}
search = vendor + ":" + product
# Not using query.cvesForCPE, because that one gives too much info
#return json.dumps(db.cvesForCPE(search), default=json_util.default)
return db.cvesForCPE(search)
# /api/search/<path:search>
@api
def api_text_search(self, search=None):
return db.getSearchResults(search)
# /api/search/days/<days>/<path:search>
@api
def api_text_limitday_search(self, days=None, search=None):
if not (search or days): return {}
cve_d = []
date_n_days_ago = datetime.datetime.now() - timedelta(int(days))
result = db.getSearchResults(search)
cve=result['data']
for item in cve:
if item['Modified'] > date_n_days_ago:
cve_d.append(item)
return cve_d
# /api/link/<key>/<value>
@api
def api_link(self, key=None,value=None):
key=self.htmlDecode(key)
value=self.htmlDecode(value)
regex = re.compile(re.escape(value), re.I)
data = {'cves': db.via4Linked(key, regex)}
cvssList=[float(x['cvss']) for x in data['cves'] if x.get('cvss')]
if cvssList:
data['stats']={'maxCVSS': max(cvssList), 'minCVSS': min(cvssList),'count':len(data['cves'])}
else:
data['stats']={'maxCVSS': 0, 'minCVSS': 0, 'count':len(data['cves'])}
return data
# /api/dbInfo
@api
def api_dbInfo(self):
return db.getDBStats()
########################
# Web Server Functions #
########################
# signal handlers
def sig_handler(self, sig, frame):
print('Caught signal: %s' % sig)
IOLoop.instance().add_callback(self.shutdown)
def shutdown(self):
MAX_WAIT_SECONDS_BEFORE_SHUTDOWN = 3
print('Stopping http server')
self.http_server.stop()
print('Will shutdown in %s seconds ...' % MAX_WAIT_SECONDS_BEFORE_SHUTDOWN)
io_loop = IOLoop.instance()
deadline = time.time() + MAX_WAIT_SECONDS_BEFORE_SHUTDOWN
def stop_loop():
now = time.time()
if now < deadline and (io_loop._callbacks or io_loop._timeouts):
io_loop.add_timeout(now + 1, stop_loop)
else:
io_loop.stop()
print('Shutdown')
stop_loop()
def start(self):
# get properties
flaskHost = Configuration.getFlaskHost()
flaskPort = Configuration.getFlaskPort()
flaskDebug = Configuration.getFlaskDebug()
# logging
if Configuration.getLogging():
logfile = Configuration.getLogfile()
pathToLog = logfile.rsplit('/', 1)[0]
if not os.path.exists(pathToLog):
os.makedirs(pathToLog)
maxLogSize = Configuration.getMaxLogSize()
backlog = Configuration.getBacklog()
file_handler = RotatingFileHandler(logfile, maxBytes=maxLogSize, backupCount=backlog)
file_handler.setLevel(logging.ERROR)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
file_handler.setFormatter(formatter)
self.app.logger.addHandler(file_handler)
if flaskDebug:
# start debug flask server
self.app.run(host=flaskHost, port=flaskPort, debug=flaskDebug)
else:
# start asynchronous server using tornado wrapper for flask
# ssl connection
print("Server starting...")
if Configuration.useSSL():
ssl_options = {"certfile": os.path.join(_runPath, "../", Configuration.getSSLCert()),
"keyfile": os.path.join(_runPath, "../", Configuration.getSSLKey())}
else:
ssl_options = None
signal.signal(signal.SIGTERM, self.sig_handler)
signal.signal(signal.SIGINT, self.sig_handler)
self.http_server = HTTPServer(WSGIContainer(self.app), ssl_options=ssl_options)
self.http_server.bind(flaskPort, address=flaskHost)
self.http_server.start(0) # Forks multiple sub-processes
IOLoop.instance().start()
if __name__ == '__main__':
server = API()
server.start()
|
#!/usr/bin/env python3
from subprocess import call
if __name__ == "__main__":
call(["vklabs_pypi", "publish-package", "."])
|
"""
Alex Staley -- Student ID: 919519311
Assignment 2 -- February 2020
### HERE BEGINS THE Main.py FILE ###
This code creates a neural network of perceptron objects, runs the experiments
described in assignment 2, and displays the results in the prescribed format.
Parameters are set via global constants declared in the Perceptron.py file.
This includes the primary variables of each experiment in the assignment:
# NUM_HIDDEN_UNITS # Experiment 1 variable--default 100
# PARTIAL # Experiment 2 variable--default 1
# MOMENTUM # Experiment 3 variable--default 0
File paths are defined in this file, lines 22/23. Change as needed.
"""
from neuralNetwork.Experiment import *
# DEFINE FILE PATHS FOR TRAINING AND VALIDATION DATA HERE:
trainingFilepath = "./venv/mnist_train.csv"
validationFilepath = "./venv/mnist_test.csv"
print("\nImporting training file...")
trainingSet, trainingVector, trainingValues = parseData(trainingFilepath, PARTIAL)
print("Importing validation file...")
validationSet, validationVector, validationValues = parseData(validationFilepath, partial=1)
print("\nInitializing neural network...\n")
network = NeuralNetwork()
print("######## RUNNING EXPERIMENT: ########", end="\n\t\t")
print(NUM_HIDDEN_UNITS, "hidden units", end="\n\t\t")
print(PCT_DATA, "of training data", sep="% ")
print("\t\tMomentum =", MOMENTUM, '\n')
trainingAccuracy, validationAccuracy, confusionMatrix = runExperiment(
network, trainingSet, trainingVector, validationSet, validationVector, validationValues)
print("\nExperiment complete! Displaying results")
produceGraph(trainingAccuracy, validationAccuracy)
produceMatrix(confusionMatrix)
|
import pandas as pd; import numpy as np
import matplotlib.pyplot as plt
import os
def IMF(m):
'''
Calculates the probability of 18<m1<m
'''
return -60.307/1.3*(m**-1.3 - 18**-1.3)
# Define directory and load data
dataDir = 'COMPAS_Output_solar_metallicity'
sysParams = pd.read_csv(os.path.join(dataDir, 'BSE_System_Parameters.csv'), skiprows=2)
# Remove those binaries that are equilibrated at birth
EAB = sysParams.loc[sysParams['Equilibrated_At_Birth'] == 1]
sysParams.drop(sysParams.loc[sysParams[" SEED "].isin(EAB[" SEED "])].index, inplace=True)
# Find binaries where both stars are BH progenitors
largeBinary = sysParams.loc[(sysParams[' Mass@ZAMS(1) ']>18)&(sysParams[' Mass@ZAMS(2) ']>18)]
# Calculate the q values
q = sysParams[' Mass@ZAMS(2) ']/sysParams[' Mass@ZAMS(1) ']
qBig = largeBinary[" Mass@ZAMS(2) "]/largeBinary[" Mass@ZAMS(1) "]
# Make function of the q = 18/M1
mass = np.linspace(18, 130, 500)
qFunction = 18/mass
# Find cumulative counts from Kroupa IMF selection of primary.
probMass = IMF(mass)
# Plot the q vs m1
plt.plot(sysParams[' Mass@ZAMS(1) '], q, 'x')
plt.plot(largeBinary[' Mass@ZAMS(1) '], qBig, '.')
plt.plot(mass, qFunction, '-g')
plt.plot(mass, probMass, '--k')
print('Fraction of systems with small secondary mass: {0:.1%}'.format(1-len(largeBinary)/len(sysParams)))
plt.show()
|
A=["1","2","3"]
A.append("4");
def B(C):
for figure in C:
print("数字是:",C)
B(A)
|
from django.urls import path
from chat.views import ChatDetailView, HomePageView
app_name = 'chat'
urlpatterns = [
path('', HomePageView.as_view(), name='home'),
path('<int:user_1>/<int:user_2>/chat/', ChatDetailView.as_view(), name='chatroom_detail'),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 27 16:04:18 2019
"""
##https://github.com/Bjarten/early-stopping-pytorch/blob/master/pytorchtools.py
import os
import sys
import torch
import numpy as np
# Useful PyTorch classes
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
# Modified by Rakshit Kothari
def __init__(self,
patience=7,
verbose=False,
delta=0,
mode='min',
fName = 'checkpoint.pt',
path2save = '/home/rakshit/Documents/Python_Scripts/RIT-Eyes/RK/checkpoints'):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
fName (str): Name of the checkpoint file.
path2save (str): Location of the checkpoint file.
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf if mode == 'min' else -np.Inf
self.delta = delta
self.path2save = path2save
self.fName = fName
self.mode = mode
def __call__(self, val_loss, model):
score = -val_loss if self.mode =='min' else val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model)
elif score < (self.best_score + self.delta):
self.counter += 1
print('EarlyStopping counter: {} out of {}'.format(self.counter, self.patience))
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model)
self.counter = 0
def save_checkpoint(self, val_loss, model_dict):
'''Saves model when validation loss decreases.'''
if self.verbose and self.mode == 'min':
print('Validation metric decreased ({:.6f} --> {:.6f}). Saving model ...'.format(self.val_loss_min, val_loss))
elif self.verbose and self.mode == 'max':
print('Validation metric increased ({:.6f} --> {:.6f}). Saving model ...'.format(self.val_loss_min, val_loss))
torch.save(model_dict, os.path.join(self.path2save, self.fName))
self.val_loss_min = val_loss
# Useful PyTorch functions
def weights_init(ObjVar):
# Function to initialize weights
for name, val in ObjVar.named_parameters():
if 'weight' in name and len(val.shape) >= 2:
torch.nn.init.xavier_normal_(val, gain=1)
elif 'bias' in name:
torch.nn.init.zeros_(val)
elif ('nalu' in name) or ('nac' in name):
torch.nn.init.zeros_(val)
elif '_' in name:
print('{}. Ignoring.'.format(name))
else:
print('{}. No init.'.format(name))
return ObjVar
def partial_weight_loading(net, net_odict):
# Load all weights which have a matching string.
# WARNING: This code can break in multiple ways.
# Use with caution. If you the data loading does
# not look right, retrain from scratch.
available_keys = [key for key in net_odict.keys()]
for name, param in net.named_parameters():
matchedkey = [key for key in available_keys if name in key]
if len(matchedkey) == 1:
if net_odict[matchedkey[0]].data.shape == param.data.shape:
param.data = net_odict[matchedkey[0]].cpu().data
else:
print('Shapes did not match. Ignoring weight: {}.'.format(name))
else:
print('Could not match: {}. Ignoring this parameter.'.format(name))
print('Values loaded!')
return net
def move_to_multi(model_dict):
'''
Convert dictionary of weights and keys
to a multiGPU format. It simply appends
a 'module.' in front of keys.
'''
multiGPU_dict = {}
for key, value in model_dict.items():
multiGPU_dict['module.'+key] = value
return multiGPU_dict
def move_to_single(model_dict):
'''
Convert dictionary of weights and keys
to a singleGPU format. It removes the
'module.' in front of keys.
'''
singleGPU_dict = {}
for key, value in model_dict.items():
singleGPU_dict[key.replace('module.', '')] = value
return singleGPU_dict
def my_collate(batch):
'''
batch: list of information acquired from __getitem__
'''
I = torch.stack([item[0] for item in batch], dim=0)
M = torch.stack([item[1] for item in batch], dim=0)
M_nS = torch.stack([item[2] for item in batch], dim=0)
spatW = torch.stack([item[3] for item in batch], dim=0)
distM = torch.stack([item[4] for item in batch], dim=0)
subjectID = [item[5] for item in batch]
fName = [item[6] for item in batch]
pupilPhi = torch.stack([item[7][0] for item in batch], dim=0)
irisPhi = torch.stack([item[7][1] for item in batch], dim=0)
return I, M, M_nS, spatW, distM, subjectID, fName, (pupilPhi, irisPhi)
def load_from_file(paths_file):
# Loads model weights from paths_file, a tuple of filepaths
# Sequentially moves from first file, attempts to load and if unsuccessful
# loads the next file and so on ...
for path in paths_file:
if path:
try:
netDict = torch.load(path)
print('File loaded from: {}'.format(path))
break
except:
print('WARNING. Path found but failed to load: {}'.format(path))
else:
netDict = {}
return netDict
|
#!/usr/bin/env python
#
# Copyright 2012 Steven Le (stevenle08@gmail.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flings a URI to a Google TV server."""
__author__ = 'stevenle08@gmail.com (Steven Le)'
import optparse
import os
import sys
import googletv
def get_parser():
"""Creates an optparse.OptionParser object used by this script."""
usage = 'Usage: %prog [--host=] [--port=9551] [--cert=cert.pem] <uri>'
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'--cert',
default='cert.pem',
help='Path to cert file.')
parser.add_option(
'--host',
default='NSZGT1-6131194.local',
help='Host of the Google TV server.')
parser.add_option(
'--port',
default=9551,
type='int',
help='Port number.')
return parser
def main():
parser = get_parser()
options, args = parser.parse_args()
if not args:
sys.exit(parser.get_usage())
host = options.host
port = options.port
cert = options.cert
if not os.path.isfile(cert):
sys.exit('No cert file. Use --cert.')
uri = args[0]
with googletv.AnymoteProtocol(host, cert, port=port) as gtv:
gtv.fling(uri)
if __name__ == '__main__':
main()
|
# Under MIT license, see LICENSE.txt
from .ai_command import AICommand
from .engine_command import EngineCommand
from .pose import Pose
from .position import Position
from .path import Path
import Util.geometry as geometry
from .singleton import Singleton
|
"""
Finger exercise: Write a program that examines three variables— x , y , and z —
and prints the largest odd number among them. If none of them are odd, it
should print a message to that effect
"""
x = int(input("Enter the value for x: "))
y = int(input("Enter the value for y: "))
z = int(input("Enter the value for z: "))
biggest = 0 # The variable that will hold the biggest odd number
### There are a couple of ways to solve this, I'll show the one I think it's easier to understand
############# First we check if all 3 numbers are odd and search for the biggest #############
if x % 2 != 0 and z % 2 != 0 and y % 2 != 0:
if x >= y and x >= z:
biggest = x
elif y >= z and y > x: # Over here we don't have to check if y is equal to x, because we already tested for that
biggest = y
else:
biggest = z
############# Now we check if x and either y or z is odd #############
elif x % 2 != 0:
if y % 2 != 0:
if x >= y:
biggest = x
else:
biggest = y
elif z % 2 != 0:
if x >= z:
biggest = x
else:
biggest = z
else:
biggest = x
############# Now we check if y is odd or z #############
elif y % 2 != 0:
# We already know that x is not odd so we only check for z
if z % 2 != 0:
if y >= z:
biggest = y
else:
biggest = z
else:
biggest = y
############# Now we check if z is odd #############
elif z % 2 != 0:
biggest = z
else:
print("No odd numbers were entered")
if biggest > 0:
print(f"Biggest odd number is {biggest}")
|
from setuptools import setup, find_packages
with open("README.rst", "r", encoding="utf-8") as f:
readme = f.read()
setup(
name="discord-interactions.py",
version="0.0.7",
description="A library around the Discord Interactions API",
long_description=readme,
long_description_content_type="text/x-rst",
url="https://github.com/LiBa001/discord-interactions.py",
author="Linus Bartsch",
author_email="pypi@libasoft.de",
license="MIT",
classifiers=[
"Development Status :: 1 - Planning",
"Environment :: Web Environment",
"Framework :: Flask",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Internet",
"Topic :: Software Development :: Libraries",
],
python_requires=">=3.8",
install_requires=["flask", "pynacl", "requests"],
keywords="discord discord-py discord-bot wrapper",
packages=find_packages(exclude=["examples", "tests"]),
data_files=None,
)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
class UserJSONPresenter(object):
"""
Present a user in the JSON format returned by API requests.
Note that this presenter as of now returns some information
that should not be publicly available, like the users email
address. This is fine for now because it is only used in
places where the caller has access to this. We would need
to refactor this as soon as we use this presenter for a
public API.
"""
def __init__(self, user):
self.user = user
def asdict(self):
return {
"authority": self.user.authority,
"email": self.user.email,
"userid": self.user.userid,
"username": self.user.username,
"display_name": self.user.display_name,
}
|
import pandas as pd
import numpy as np
# tfidf
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import Ridge
from sklearn import linear_model
class ContentBase:
@staticmethod
def getTfidf(x_train):
transformer = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = transformer.fit_transform(x_train.tolist()).toarray()
return tfidf
@staticmethod
def get_items_rated_by_user(rate_matrix, user_id):
y = rate_matrix[:, 0]
ids = np.where(y == user_id)
item_ids = rate_matrix[ids, 1];
scores = rate_matrix[ids, 2]
return item_ids, scores
@staticmethod
def getIndexInArr(index_arr, list_item):
return list(map((lambda x: np.where(index_arr == x)[0][0]), list_item))
@staticmethod #Find the model for each user
def GetRidgeRegression(self, n_users, rate_train, tfidf, W, b, index_arr):
for n in n_users:
i = np.where(n_users == n[0])[0][0]
ids, scores = self.get_items_rated_by_user(rate_train, n[0])
clf = Ridge(alpha=0.01, fit_intercept=True)
tests = self.getIndexInArr(index_arr, ids[0])
Xhat = tfidf[tests, :]
if Xhat.size != 0:
clf.fit(Xhat, scores[0])
W[:, i] = clf.coef_
b[0, i] = clf.intercept_
else:
W[:, i] = 0
b[0, i] = 0
return W, b
|
import scrapy
class StarWarsSpider(scrapy.Spider):
name = 'starwars'
allowed_domains = ["starwars.com"]
start_urls = ['https://www.starwars.com/databank']
def parse(self, response):
# TODO - to investigate why this line of css selector code does not work on starwars site!!?
# urls = response.css('.blocks-container.ref-1-3 .building-block-config .image-wrapper a::attr(href)').extract()
urls = response.css('.building-block-config .image-wrapper a::attr(href)').extract()
for url in urls:
url = response.urljoin(url)
yield scrapy.Request(url=url, callback=self.parse_details)
def parse_details(self, response):
# TODO - why did the character text returns null??
yield {
'Image': response.css('.ratio-16x9 .building-block .image-wrapper .aspect img::attr(src)').extract_first(),
'Character': response.css('.ratio-16x9 .building-block .content-wrapper .content-info .title span::text').extract_first(),
}
|
from dataclasses import dataclass
from typing import List, Optional
from urllib.parse import urljoin
from pyvultr.utils import BaseDataclass, VultrPagination, get_only_value
from .base import BaseVultrV2, command
from .enums import ACL
@dataclass
class UserInfo(BaseDataclass):
id: str # The User's id.
api_enabled: bool # Permit API access for this User.
email: str # The User's email address.
acls: List[str] # An array of permission granted.
name: str = None # The User's name.
password: str = None # The User's password.
class UserAPI(BaseVultrV2):
"""Vultr User API.
Reference: https://www.vultr.com/api/#tag/users
Vultr supports multiple users in each account, and each user has individual access permissions.
Users have unique API keys, which respect the permission for that user.
Attributes:
api_key: Vultr API key, we get it from env variable `$VULTR_API_KEY` if not provided.
"""
def __init__(self, api_key: Optional[str] = None):
super().__init__(api_key)
@property
def base_url(self):
"""Get base url for all API in this section."""
return urljoin(super().base_url, "users")
@command
def list(self, per_page: int = None, cursor: str = None, capacity: int = None) -> VultrPagination[UserInfo]:
"""Get a list of all Users in your account.
Args:
per_page: Number of items requested per page. Default is 100 and Max is 500.
cursor: Cursor for paging.
capacity: The capacity of the VultrPagination[UserInfo], see `VultrPagination` for details.
Returns:
VultrPagination[UserInfo]: A list-like object of `UserInfo` object.
"""
return VultrPagination[UserInfo](
fetcher=self._get,
cursor=cursor,
page_size=per_page,
return_type=UserInfo,
capacity=capacity,
)
@command
def create(
self,
name: str,
email: str,
password: str,
api_enabled: bool = None,
acl_group: List[ACL] = None,
) -> UserInfo:
"""Create a new User. The `email`, `name`, and `password` attributes are required.
Args:
name: The User's name.
email: The User's email address.
password: The User's password.
api_enabled: API access is permitted for this User.
acl_group: An array of permission granted.
Returns:
UserInfo: User info.
"""
_json = {
"name": name,
"email": email,
"password": password,
"api_enabled": api_enabled,
"acls": acl_group and [i.value for i in acl_group],
}
resp = self._post(json=_json)
return UserInfo.from_dict(get_only_value(resp))
@command
def get(self, user_id: str) -> UserInfo:
"""Get information about a User.
Args:
user_id: The User id.
Returns:
UserInfo: User info.
"""
resp = self._get(f"/{user_id}")
return UserInfo.from_dict(get_only_value(resp))
@command
def update(
self,
user_id: str,
name: str = None,
email: str = None,
password: str = None,
api_enabled: bool = None,
acl_group: List[ACL] = None,
):
"""Update information for a User.
All attributes are optional. If not set, the attributes will retain their original values.
Args:
user_id: The User id.
name: The User's name.
email: The User's email address.
password: The User's password.
api_enabled: API access is permitted for this User.
acl_group: An array of permission granted.
Returns:
STATUS CODE: 204
/NO CONTENT/
"""
_json = {
"name": name,
"email": email,
"password": password,
"api_enabled": api_enabled,
"acls": acl_group and [i.value for i in acl_group],
}
return self._patch(f"/{user_id}", json=_json)
@command
def delete(self, user_id: str):
"""Delete a User.
Args:
user_id: The User id.
Returns:
STATUS CODE: 204
/NO CONTENT/
"""
return self._delete(f"/{user_id}")
|
import socket, sys, os, base64, json
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.fernet import Fernet
from PyTerminalColor.TerminalColor import TerminalColor
class Client:
def __init__(self, ip:str='127.0.0.1', port:int=4444, save_path:str='') -> None:
self.SEPARATOR = "<D|M>"
self.BUFFER_SIZE = 4096
self.SAVE_PATH = save_path
self.LINE_SIZE = 60
self.ip = ip
self.port = port
self.passwd_hash = None
self.colorize = TerminalColor(fgcolor='YELLOW', bgcolor='BLACK', style='BOLD')
def __gen_key_from_pass(self, passwd:str)->bytes:
'''
Generates key from password.
'''
salt = b'SecretSalt'
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend()
)
key = base64.urlsafe_b64encode(kdf.derive(passwd.encode('utf-8')))
return key
def encrypt_data(self, user:str, data:str)->bytes:
'''
encrypts data which is passed with the help of key as
another parameter and returns encrypted data in form
of bytes
'''
KEY = self.__gen_key_from_pass(user)
data = data.encode('utf-8')
encrypter = Fernet(KEY)
enc_data = encrypter.encrypt(data)
return enc_data
def decrypt_data(self, data:str)->bytes:
'''
decrypts data which is passed with the help of key as
another parameter and returns decrypted data in form
of bytes
'''
KEY = self.passwd_hash
if type(data) == str:
data = data.encode('utf-8')
decrypter = Fernet(KEY)
dec_data = decrypter.decrypt(data)
return dec_data
def send(self, data:str):
'''
sends data serially
'''
if type(data) == bytes:
data = str(data, encoding='utf-8')
json_data = json.dumps(data)
bytes_json_data = bytes(json_data, encoding='utf-8')
self.connection.send(bytes_json_data)
def receive(self):
'''
receives data serially
'''
bytes_json_data = b''
while True:
try:
bytes_json_data += self.connection.recv(self.BUFFER_SIZE)
data = json.loads(bytes_json_data)
return data
except json.JSONDecodeError:
continue
def save_file(self, file_name:str, data:bytes):
'''
receive and save file over the connection
'''
# packet = transfer_send (sep) filename (sep) data
# create file save path
file_name = os.path.join(self.SAVE_PATH, file_name)
# Start receiving file packets
self.colorize.cprint(f'[*] Receiving File {file_name}:')
with open(file_name, "wb") as f:
decrypted_file_data = self.decrypt_data(data)
# decode base64 data
data = base64.b64decode(decrypted_file_data)
f.write(data)
# inform server that the transfer has been completed
self.colorize.cprint('[*] Transfer Complete', use_default=False, fgcolor='GREEN', style='BOLD')
self.send('transfer_completed')
def start(self):
'''
start client
'''
print()
print('-'*self.LINE_SIZE)
self.colorize.cprint(f'[*] Trying to connect to {self.ip}:{self.port}', use_default=False, fgcolor='YELLOW', bgcolor='RED', style='BOLD')
print('-'*self.LINE_SIZE)
# create socket for connection and connect to server
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# try to connect to the server
connected = False
while not connected:
try:
self.connection.connect((self.ip, self.port))
connected = True
except ConnectionRefusedError:
self.colorize.cprint('\r[*] Peer seems to be offline.', end='', use_default=False, fgcolor='YELLOW', bgcolor='RED', style='BOLD')
print()
self.colorize.cprint('[*] Connection Established', use_default=False, fgcolor='GREEN', style='ITALIC')
print('-'*self.LINE_SIZE)
try:
while True:
message = self.receive()
# split string to get data
message_list = message.split(self.SEPARATOR)
# authenticate user
if message == 'exit':
self.colorize.cprint('[!] Connection closed by remote host.', use_default=False, fgcolor='YELLOW', bgcolor='RED', style='BOLD')
print()
self.connection.close()
sys.exit()
elif 'auth_user' == message:
username = input('[+] Enter your username: ')
self.send(username)
passwd = input('[+] Enter your password: ')
self.send(passwd)
print('-'*self.LINE_SIZE)
auth_result = self.receive()
if 'exit' == auth_result:
self.colorize.cprint('[!] Invalid Details. Exiting.', use_default=False, fgcolor='YELLOW', bgcolor='RED', style='BOLD')
break
else:
self.colorize.cprint('[*] Authenticated', use_default=False, fgcolor='GREEN', style='BOLD')
self.passwd_hash = self.__gen_key_from_pass(passwd)
print('-'*self.LINE_SIZE)
# receive file from server peer
elif 'transfer_send' == message_list[0] :
self.colorize.cprint('[*] Encrypted File Incoming')
self.save_file(file_name=message_list[1], data=message_list[2].encode('utf-8'))
break
except KeyboardInterrupt:
self.colorize.cprint('\r\n[!] ctrl+c detected! Exiting Progam')
sys.exit()
finally:
self.connection.close()
print('-'*self.LINE_SIZE)
if __name__ == '__main__':
SAVE_PATH = r'C:\Users\there\Desktop'
IP = '127.0.0.1'
PORT = 4444
client = Client(IP, PORT, SAVE_PATH)
client.start()
|
#-------------------------------------------------------------------------------
# Copyright (C) 2017 Carlos Guzman (cguZZman) carlosguzmang@protonmail.com
#
# This file is part of Cloud Drive Common Module for Kodi
#
# Cloud Drive Common Module for Kodi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Cloud Drive Common Module for Kodi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import datetime
import sqlite3
import time
from clouddrive.common.ui.logger import Logger
from clouddrive.common.ui.utils import KodiUtils
class Cache(object):
_addonid = None
_name = None
_expiration = None
_monitor = None
_abort = False
def __init__(self, addonid, name, expiration):
self._addonid = addonid
self._name = name
self._expiration = expiration
self._monitor = KodiUtils.get_system_monitor()
def __del__(self):
self._abort = True
del self._monitor
def _get_connection(self):
profile_path = KodiUtils.get_addon_info("profile", self._addonid)
if not KodiUtils.file_exists(profile_path):
KodiUtils.mkdirs(profile_path)
db = KodiUtils.translate_path("%s/cache_%s.db" % (profile_path, self._name,))
con = sqlite3.connect(db, timeout=30, isolation_level=None)
rs = con.execute("select name from sqlite_master where type='table' AND name='cache'")
if not rs.fetchone():
try:
con.execute("create table cache(key text unique, value text, expiration integer)")
except Exception as ex:
Logger.debug(ex)
return con
def _get_datetime(self, ts):
return int(time.mktime(ts.timetuple()))
def get(self, key):
row = self._read(key)
if row and row[1] > self._get_datetime(datetime.datetime.now()):
return eval(row[0])
return
def set(self, key, value):
expiration = self._get_datetime(datetime.datetime.now() + self._expiration)
self._insert(key, value, expiration)
def setmany(self, key_value_list):
expiration = self._get_datetime(datetime.datetime.now() + self._expiration)
for kv in key_value_list:
kv[1] = repr(kv[1])
kv.append(expiration)
self._execute_sql("insert or replace into cache(key, value, expiration) values(?,?,?)", key_value_list)
def remove(self, key):
self._execute_sql("delete from cache where key = ?", (key,))
def clear(self):
self._execute_sql("delete from cache")
Logger.debug("Cache '%s' cleared" % self._name)
def _read(self, key):
return self._execute_sql("select value, expiration from cache where key = ?", (key,))
def _insert(self, key, value, expiration):
self._execute_sql("insert or replace into cache(key, value, expiration) values(?,?,?)", (key, repr(value), expiration,))
def _execute_sql(self, query, data=None):
result = None
con = self._get_connection()
with con:
retries = 0
error = None
while retries < 15 and not self._abort:
try:
con.execute("delete from cache where expiration < ?", (self._get_datetime(datetime.datetime.now()),))
if isinstance(data, list):
result = con.executemany(query, data).fetchone()
elif data:
result = con.execute(query, data).fetchone()
else:
result = con.execute(query).fetchone()
break
except sqlite3.OperationalError as error:
if "_database is locked" in error:
retries += 1
Logger.debug("Cache query retrying #d [%s]: %s" % (retries, query, str(data),))
self._monitor.waitForAbort(0.3)
else:
break
except Exception as error:
break
if error:
Logger.debug("Error executing cache query [%s]: %s" % (query, str(error),))
con.close()
return result
|
# Creates new option in main menu bar
# EDITED: 2020-08-25
# ============================================
import pymel.core as pm
import toolbox
def create_my_menu(standalone=False):
'''Creates a CSA menu with my scripts'''
reload(toolbox)
# find main maya window
main_maya_window = pm.language.melGlobals['gMainWindow']
# check of menu exists
if pm.menu('CSA', exists=True): pm.deleteUI('CSA')
# create a new menu and parent to main maya window
custom_menu = pm.menu('CSA', parent=main_maya_window)
# Build a menu item and parent under the 'customMenu'
pm.menuItem(label='Open Toolbox', parent=custom_menu, command=lambda *args: toolbox.create_my_toolbox())
toolbox.create_my_toolbox()
|
#!/usr/bin/env python3
import yaml, argparse
import logging as log
from pathlib import Path
from cliva_fl.experiment import Experiment
# ARGUMENT PARSING
parser = argparse.ArgumentParser(description='Argument parser for log processing and plot creation')
parser.add_argument('-r', '--repeat', type=int, default=1, required=False, help='Number of times to repeat the experiement.')
parser.add_argument('-c', '--conf', type=Path, default=Path('experiment.yml'), required=False, help='Path to the experiment config file.')
args = parser.parse_args()
# LOGGING
log.basicConfig(
format='%(asctime)s [%(levelname)s] %(message)s',
datefmt='%m/%d/%Y %H:%M',
level=log.DEBUG)
with open(args.conf, 'r') as f:
config = yaml.load(f, Loader=yaml.Loader)
dataset_cnf = config['dataset']
model_cnf = config['model']
optimizer_cnf = config['optimizer']
loss_fn_cnf = config['loss_fn']
training_cnf = config['training']
validation_cnf = config['validation']
log.debug(f'Dataset Config: {dataset_cnf}')
log.debug(f'Model Config: {model_cnf}')
log.debug(f'Optimizer Config: {optimizer_cnf}')
log.debug(f'Loss Function Config: {loss_fn_cnf}')
log.debug(f'Training Config: {training_cnf}')
log.debug(f'Validation Config: {validation_cnf}')
if __name__ == "__main__":
exp = Experiment.from_config(args.conf)
for i in range(args.repeat):
log.info(f'Starting experiment {i+1} of {args.repeat}')
exp.run(training_cnf['n_epochs'], max_buffer_len=training_cnf['max_buffer_len'], shuffle_batches=training_cnf['shuffle_batches'], log_dir=training_cnf['log_dir'], use_gpu=training_cnf['use_gpu'])
# exp.run(**training_cnf)
exp.stats()
exp.reset()
|
import io
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
# Avoids IDE errors, but actual version is read from version.py
__version__ = None
exec(open('api/version.py').read())
# Get the long description from the README file
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
install_requires = [
'numpy >= 1.12.1'
]
extras_requires = {
'tests': [
'coverage >= 4.3.4',
'codecov >= 2.0.15',
'pytest >= 3.0.3',
'pytest-cov >= 2.4.0',
'flake8 >= 3.0.0',
'flake8_docstrings >= 1.0.2'],
}
setup(
name="TextParser",
version=__version__,
author="Zhaohui Li, Yangzhou, Yuanzheng Wang, Yingyan Li, Yixing Fa et al.",
author_email="lizhaohui@software.ict.ac.cn",
description=("This project is implemented for the connection between the \
App layer and the base TextParser layer."),
license="Apache 2.0",
keywords="natural language understanding api",
url="https://github.com/faneshion/TextParser",
packages=find_packages(),
long_description=long_description,
classifiers=[
"Development Status :: 3 - Alpha",
'Environment :: Console',
'Operating System :: POSIX :: Linux',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
"License :: OSI Approved :: Apache Software License",
'Programming Language :: Python :: 3.6'
],
install_requires=install_requires,
extras_require=extras_requires
)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 17 23:2 7:28 2019
@author: Winham
网络测试
"""
import os
import numpy as np
from keras.models import load_model
from keras.utils import to_categorical
from data_preprocess import *
import mit_utils as utils
import time
import matplotlib.pyplot as plt
import tensorflow_addons as tfa
target_class = ['W', 'N1', 'N2', 'N3', 'REM']
target_sig_length = 3072
tic = time.time()
trainX, trainY, TestX, TestY = dataload('channel0.npz')
toc = time.time()
markov_matrix = [[66927., 3996., 179., 6., 86.],
[2252., 17891., 4269., 9., 753.],
[1271., 2262., 80861., 3546., 1043.],
[179., 113., 3247., 15892., 23.],
[565., 912., 427., 1., 32279.]]
markov_matrix = np.array(markov_matrix)
# markov_matrix_copy = markov_matrix.copy()
# for i in range(5):
# markov_matrix_copy[i] /= markov_matrix_copy[i].sum()
# print(markov_matrix_copy)
markov_matrix = np.log2(markov_matrix) ** 3
for i in range(5):
max = np.max(markov_matrix[i])
markov_matrix[i] /= max
# print(markov_matrix)
# assert False
print('Time for data processing--- '+str(toc-tic)+' seconds---')
model_name = 'myNet.h5'
model = load_model(model_name)
# model.summary()
pred_vt = model.predict(TestX, batch_size=256, verbose=1)
pred_v = np.argmax(pred_vt, axis=1)
true_v = np.argmax(TestY, axis=1)
def weight_decay(order):
weights = []
for i in range(order):
weights.append(4 ** (-i))
return weights
order = 6
weight = weight_decay(order)
for i in range(1,len(pred_vt)-order):
factor = 1
if pred_v[i-1] != pred_v[i]:
for j in range(1,order+1):
if pred_v[i+j] == pred_v[i-1]:
factor += weight[j-1]*2.1
elif pred_v[i+j] == pred_v[i]:
factor -= 0.55 * weight[j-1]
if factor < 0.1:
factor = 0.1
vector = markov_matrix[pred_v[i - 1]].copy()
vector[pred_v[i-1]] *= factor
re_pred = pred_vt[i] * vector
# print(re_pred)
pred_v[i] = np.argmax(re_pred)
# f1 = 3.1
# f2 = 0.45
# for i in range(1,len(pred_vt)-1):
# if pred_v[i-1] != pred_v[i]:
# if pred_v[i-1] == pred_v[i+1]:
# factor = f1
# elif pred_v[i] == pred_v[i+1]:
# factor = f2
# else:
# factor = 1
# # print(pred_vt[i])
# vector = markov_matrix[pred_v[i - 1]].copy()
# vector[pred_v[i-1]] *= factor
# re_pred = pred_vt[i] * vector
# # print(re_pred)
# pred_v[i] = np.argmax(re_pred)
utils.plot_confusion_matrix(true_v, pred_v, np.array(target_class))
utils.print_results(true_v, pred_v, target_class)
plt.savefig('cm.png')
# pred_v = pred_v[:10000]
# pred_v.resize((100,100))
# plt.subplot(121)
# plt.matshow(pred_v, cmap = plt.cm.Blues)
# plt.savefig('cm_pred.png')
#
# true_v = true_v[:10000]
# true_v.resize((100,100))
# plt.subplot(122)
# plt.matshow(true_v, cmap = plt.cm.Blues)
# plt.savefig('cm_true.png')
|
class AddInId(object, IDisposable):
"""
Identifies an AddIn registered with Revit
AddInId(val: Guid)
"""
def Dispose(self):
""" Dispose(self: AddInId) """
pass
def GetAddInName(self):
"""
GetAddInName(self: AddInId) -> str
name of addin associated with this AddInId
Attempts to obtain the name from
loaded Third Party AddIns
Returns: name of addin
"""
pass
def GetAddInNameFromDocument(self, aDoc):
"""
GetAddInNameFromDocument(self: AddInId,aDoc: Document) -> str
name of application associated with this ApplicationId
First attempts to
obtain the name from AddInIds stored in the document.
If unsuccessful,
attempts to obtain the name from loaded Third Party AddIns.
aDoc: target document
Returns: name of application
"""
pass
def GetGUID(self):
"""
GetGUID(self: AddInId) -> Guid
value of the AddInId as a GUID
Returns: GUID value of the AddInId
"""
pass
def ReleaseUnmanagedResources(self, *args):
""" ReleaseUnmanagedResources(self: AddInId,disposing: bool) """
pass
def __enter__(self, *args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, val):
""" __new__(cls: type,val: Guid) """
pass
def __repr__(self, *args):
""" __repr__(self: object) -> str """
pass
IsValidObject = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: AddInId) -> bool
"""
|
import numpy as np
import sidpy
from tqdm import trange, tqdm
from sklearn.feature_extraction import image
from sklearn.utils.extmath import randomized_svd
def clean_svd(im, pixel_size=1, source_size=5):
"""De-noising of image by using first component of single value decomposition"""
if not isinstance(im, sidpy.Dataset):
raise TypeError('We need a sidpy.Dataset')
if im.data_type.name != 'IMAGE':
raise TypeError('We need sidpy.Dataset of sidpy.Datatype: IMAGE')
patch_size = int(source_size/pixel_size)
if patch_size < 3:
patch_size = 3
patches = image.extract_patches_2d(im, (patch_size, patch_size))
patches = patches.reshape(patches.shape[0], patches.shape[1]*patches.shape[2])
num_components = 32
u, s, v = randomized_svd(patches, num_components)
u_im_size = int(np.sqrt(u.shape[0]))
reduced_image = u[:, 0].reshape(u_im_size, u_im_size)
reduced_image = reduced_image/reduced_image.sum()*im.sum()
out_dataset = im.like_data(reduced_image)
out_dataset.title = 'Major SVD component'
out_dataset.data_type = 'image'
return out_dataset
# Deconvolution
def make_gauss(size_x, size_y, width=1.0, x0=0.0, y0=0.0, intensity=1.0):
"""Make a Gaussian shaped probe """
size_x = size_x/2
size_y = size_y/2
x, y = np.mgrid[-size_x:size_x, -size_y:size_y]
g = np.exp(-((x-x0)**2 + (y-y0)**2) / 2.0 / width**2)
probe = g / g.sum() * intensity
return probe
def decon_lr(o_image, resolution=0.1, verbose=False):
"""
# This task generates a restored image from an input image and point spread function (PSF) using
# the algorithm developed independently by Lucy (1974, Astron. J. 79, 745) and Richardson
# (1972, J. Opt. Soc. Am. 62, 55) and adapted for HST imagery by Snyder
# (1990, in Restoration of HST Images and Spectra, ST ScI Workshop Proceedings; see also
# Snyder, Hammoud, & White, JOSA, v. 10, no. 5, May 1993, in press).
# Additional options developed by Rick White (STScI) are also included.
#
# The Lucy-Richardson method can be derived from the maximum likelihood expression for data
# with a Poisson noise distribution. Thus, it naturally applies to optical imaging data such as HST.
# The method forces the restored image to be positive, in accord with photon-counting statistics.
#
# The Lucy-Richardson algorithm generates a restored image through an iterative method. The essence
# of the iteration is as follows: the (n+1)th estimate of the restored image is given by the nth estimate
# of the restored image multiplied by a correction image. That is,
#
# original data
# image = image --------------- * reflect(PSF)
# n+1 n image * PSF
# n
# where the *'s represent convolution operators and reflect(PSF) is the reflection of the PSF, i.e.
# reflect((PSF)(x,y)) = PSF(-x,-y). When the convolutions are carried out using fast Fourier transforms
# (FFTs), one can use the fact that FFT(reflect(PSF)) = conj(FFT(PSF)), where conj is the complex conjugate
# operator.
Parameters
----------
o_image: sidpy_Dataset with DataType='image'
the image to be dconvoluted
resolution:
width of resolution function
Returns
-------
out_dataset: sidpy.Dataset
the deconvoluted dataset
"""
if len(o_image) < 1:
return o_image
scale_x = sidpy.base.num_utils.get_slope(o_image.dim_0)
gauss_diameter = resolution/scale_x
probe = make_gauss(o_image.shape[0], o_image.shape[1], gauss_diameter)
probe_c = np.ones(probe.shape, dtype=np.complex64)
probe_c.real = probe
error = np.ones(o_image.shape, dtype=np.complex64)
est = np.ones(o_image.shape, dtype=np.complex64)
source = np.ones(o_image.shape, dtype=np.complex64)
source.real = o_image
response_ft = np.fft.fft2(probe_c)
dx = o_image.x[1]-o_image.x[0]
dk = 1.0 / float(o_image.x[-1]) # last value of x axis is field of view
screen_width = 1 / dx
aperture = np.ones(o_image.shape, dtype=np.complex64)
# Mask for the aperture before the Fourier transform
n = o_image.shape[0]
size_x = o_image.shape[0]
size_y = o_image.shape[1]
theta_x = np.array(-size_x / 2. + np.arange(size_x))
theta_y = np.array(-size_y / 2. + np.arange(size_y))
t_xv, t_yv = np.meshgrid(theta_x, theta_y)
tp1 = t_xv ** 2 + t_yv ** 2 >= o_image.shape[0]*4/5 ** 2
aperture[tp1.T] = 0.
# print(app_ratio, screen_width, dk)
progress = tqdm(total=500)
# de = 100
dest = 100
i = 0
while abs(dest) > 0.0001: # or abs(de) > .025:
i += 1
error_old = np.sum(error.real)
est_old = est.copy()
error = source / np.real(np.fft.fftshift(np.fft.ifft2(np.fft.fft2(est) * response_ft)))
est = est * np.real(np.fft.fftshift(np.fft.ifft2(np.fft.fft2(error) * np.conjugate(response_ft))))
error_new = np.real(np.sum(np.power(error, 2))) - error_old
dest = np.sum(np.power((est - est_old).real, 2)) / np.sum(est) * 100
if error_old != 0:
de = error_new / error_old * 1.0
else:
de = error_new
if verbose:
print(
' LR Deconvolution - Iteration: {0:d} Error: {1:.2f} = change: {2:.5f}%, {3:.5f}%'.format(i, error_new,
de,
abs(dest)))
if i > 500:
dest = 0.0
print('terminate')
progress.update(1)
progress.write(f"converged in {i} iterations")
#progress.close()
print('\n Lucy-Richardson deconvolution converged in ' + str(i) + ' iterations')
est2 = np.real(np.fft.ifft2(np.fft.fft2(est) * np.fft.fftshift(aperture)))
out_dataset = o_image.like_data(np.real(est))
out_dataset.title = 'Lucy Richardson deconvolution'
out_dataset.data_type = 'image'
return out_dataset
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
import time
import plugins
import urllib2
import json
class Plugin(plugins.BasePlugin):
__name__ = 'powerdns'
def run(self, config):
'''
Experimental plugin for PowerDNS authoritative server. Might also work with PowerDNS recursor,
but it may need extra delta_keys / absolute_keys.
Add to /etc/nixstats.ini:
[powerdns]
enabled=yes
statistics_url=http://localhost:8081/api/v1/servers/localhost/statistics
api_key=changeme
;ca_file=
;ca_path=
;timeout=10
'''
# Create request to configured URL
request = urllib2.Request(config.get(__name__, 'statistics_url'), headers={'X-API-Key': '%s' % config.get(__name__, 'api_key')})
# defaults
timeout = 10
results = dict()
raw_response = None
# next / previous cached metrics (for calculating deltas)
next_cache = dict()
prev_cache = self.get_agent_cache()
# use timeout from config if specified
if config.has_option(__name__, 'timeout'):
timeout = int(config.get(__name__, 'timeout'))
# create response based on configuration
if config.has_option(__name__, 'ca_file'):
raw_response = urllib2.urlopen(request, timeout=timeout, cafile=config.get(__name__, 'ca_file'))
elif config.has_option(__name__, 'ca_path'):
raw_response = urllib2.urlopen(request, timeout=timeout, capath=config.get(__name__, 'ca_path'))
else:
raw_response = urllib2.urlopen(request, timeout=timeout)
# set next_cache timestamp
next_cache['ts'] = time.time()
# parse raw response as JSON
try:
stats = json.loads(raw_response.read())
except Exception:
return False
# keys for which we should calculate the delta
delta_keys = (
'corrupt-packets',
'deferred-cache-inserts',
'deferred-cache-lookup',
'deferred-packetcache-inserts',
'deferred-packetcache-lookup',
'dnsupdate-answers',
'dnsupdate-changes',
'dnsupdate-queries',
'dnsupdate-refused',
'incoming-notifications',
'overload-drops',
'packetcache-hit',
'packetcache-miss',
'query-cache-hit',
'query-cache-miss',
'rd-queries',
'recursing-answers',
'recursing-questions',
'recursion-unanswered',
'servfail-packets',
'signatures',
'sys-msec',
'tcp-answers',
'tcp-answers-bytes',
'tcp-queries',
'tcp4-answers',
'tcp4-answers-bytes',
'tcp4-queries',
'tcp6-answers',
'tcp6-answers-bytes',
'tcp6-queries',
'timedout-packets',
'udp-answers',
'udp-answers-bytes',
'udp-do-queries',
'udp-in-errors',
'udp-noport-errors',
'udp-queries',
'udp-recvbuf-errors',
'udp-sndbuf-errors',
'udp4-answers',
'udp4-answers-bytes',
'udp4-queries',
'udp6-answers',
'udp6-answers-bytes',
'udp6-queries',
'user-msec'
)
# keys for which we should store the absolute value:
absolute_keys = (
'key-cache-size',
'latency',
'fd-usage',
'meta-cache-size',
'open-tcp-connections',
'packetcache-size',
'qsize-q',
'query-cache-size',
'real-memory-usage',
'security-status',
'signature-cache-size',
'uptime'
)
data = dict()
for stat in stats:
if 'name' in stat and 'value' in stat and 'type' in stat:
if stat['type'] == 'StatisticItem':
if stat['name'] in delta_keys:
results[stat['name']] = self.absolute_to_per_second(stat['name'], float(stat['value']), prev_cache)
data[stat['name']] = float(stat['value'])
elif stat['name'] in absolute_keys:
results[stat['name']] = float(stat['value'])
data['ts'] = time.time()
self.set_agent_cache(data)
return results
if __name__ == '__main__':
Plugin().execute()
|
import subprocess
import typing
def run_process(args: typing.List[str], env=None) -> bytes:
run = subprocess.run(
args,
stdout=subprocess.PIPE,
env=env,
)
if run.returncode:
raise Exception(f"Exited with code {run.returncode}")
return run.stdout
|
class GeometryBase(CommonObject,IDisposable,ISerializable):
""" Provides a common base for most geometric classes. This class is abstract. """
def ComponentIndex(self):
"""
ComponentIndex(self: GeometryBase) -> ComponentIndex
If this piece of geometry is a component in something larger,like a BrepEdge
in a
Brep,then this function returns the component index.
Returns: This object's component index. If this object is not a sub-piece of a larger
geometric entity,then the returned index has
m_type=ComponentIndex.InvalidType
and m_index=-1.
"""
pass
def ConstructConstObject(self,*args):
"""
ConstructConstObject(self: CommonObject,parentObject: object,subobject_index: int)
Assigns a parent object and a subobject index to this.
parentObject: The parent object.
subobject_index: The subobject index.
"""
pass
def Dispose(self):
"""
Dispose(self: CommonObject,disposing: bool)
For derived class implementers.
This method is called with argument true when class
user calls Dispose(),while with argument false when
the Garbage Collector invokes
the finalizer,or Finalize() method.You must reclaim all used unmanaged resources in both cases,
and can use this chance to call Dispose on disposable fields if the argument is true.Also,you
must call the base virtual method within your overriding method.
disposing: true if the call comes from the Dispose() method; false if it comes from the Garbage Collector
finalizer.
"""
pass
def Duplicate(self):
"""
Duplicate(self: GeometryBase) -> GeometryBase
Constructs a deep (full) copy of this object.
Returns: An object of the same type as this,with the same properties and behavior.
"""
pass
def DuplicateShallow(self):
"""
DuplicateShallow(self: GeometryBase) -> GeometryBase
Constructs a light copy of this object. By "light",it is meant that the same
underlying data is used until something is done to attempt to change it. For example,
you could have a shallow copy of a very heavy mesh object and the same underlying
data will be used when doing things like inspecting the number of faces on the mesh.
If you modify the location of one of the mesh vertices,the shallow copy will create
a full duplicate of the underlying mesh data and the shallow copy will become a
deep copy.
Returns: An object of the same type as this object.
This behavior is overridden by
implementing classes.
"""
pass
def GetBoundingBox(self,*__args):
"""
GetBoundingBox(self: GeometryBase,plane: Plane) -> BoundingBox
Aligned Boundingbox solver. Gets the plane aligned boundingbox.
plane: Orientation plane for BoundingBox.
Returns: A BoundingBox in plane coordinates.
GetBoundingBox(self: GeometryBase,plane: Plane) -> (BoundingBox,Box)
Aligned Boundingbox solver. Gets the plane aligned boundingbox.
plane: Orientation plane for BoundingBox.
Returns: A BoundingBox in plane coordinates.
GetBoundingBox(self: GeometryBase,accurate: bool) -> BoundingBox
Boundingbox solver. Gets the world axis aligned boundingbox for the geometry.
accurate: If true,a physically accurate boundingbox will be computed.
If not,a boundingbox
estimate will be computed. For some geometry types there is no
difference between
the estimate and the accurate boundingbox. Estimated boundingboxes
can be computed
much (much) faster than accurate (or "tight") bounding boxes.
Estimated bounding
boxes are always similar to or larger than accurate bounding boxes.
Returns: The boundingbox of the geometry in world coordinates or BoundingBox.Empty
if not
bounding box could be found.
GetBoundingBox(self: GeometryBase,xform: Transform) -> BoundingBox
Aligned Boundingbox solver. Gets the world axis aligned boundingbox for the transformed geometry.
xform: Transformation to apply to object prior to the BoundingBox computation.
The
geometry itself is not modified.
Returns: The accurate boundingbox of the transformed geometry in world coordinates
or
BoundingBox.Empty if not bounding box could be found.
"""
pass
def GetUserString(self,key):
"""
GetUserString(self: GeometryBase,key: str) -> str
Gets user string from this geometry.
key: id used to retrieve the string.
Returns: string associated with the key if successful. null if no key was found.
"""
pass
def GetUserStrings(self):
"""
GetUserStrings(self: GeometryBase) -> NameValueCollection
Gets a copy of all (user key string,user value string) pairs attached to this geometry.
Returns: A new collection.
"""
pass
def MakeDeformable(self):
"""
MakeDeformable(self: GeometryBase) -> bool
If possible,converts the object into a form that can be accurately modified
with
"squishy" transformations like projections,shears,an non-uniform scaling.
Returns: false if object cannot be converted to a deformable object. true if object was
already deformable or was converted into a deformable object.
"""
pass
def MemoryEstimate(self):
"""
MemoryEstimate(self: GeometryBase) -> UInt32
Computes an estimate of the number of bytes that this object is using in memory.
Returns: An estimated memory footprint.
"""
pass
def NonConstOperation(self,*args):
"""
NonConstOperation(self: CommonObject)
For derived classes implementers.
Defines the necessary implementation to free the
instance from being const.
"""
pass
def OnSwitchToNonConst(self,*args):
"""
OnSwitchToNonConst(self: GeometryBase)
Is called when a non-const operation occurs.
"""
pass
def Rotate(self,angleRadians,rotationAxis,rotationCenter):
"""
Rotate(self: GeometryBase,angleRadians: float,rotationAxis: Vector3d,rotationCenter: Point3d) -> bool
Rotates the object about the specified axis. A positive rotation
angle results in
a counter-clockwise rotation about the axis (right hand rule).
angleRadians: Angle of rotation in radians.
rotationAxis: Direction of the axis of rotation.
rotationCenter: Point on the axis of rotation.
Returns: true if geometry successfully rotated.
"""
pass
def Scale(self,scaleFactor):
"""
Scale(self: GeometryBase,scaleFactor: float) -> bool
Scales the object by the specified factor. The scale is centered at the origin.
scaleFactor: The uniform scaling factor.
Returns: true if geometry successfully scaled.
"""
pass
def SetUserString(self,key,value):
"""
SetUserString(self: GeometryBase,key: str,value: str) -> bool
Attach a user string (key,value combination) to this geometry.
key: id used to retrieve this string.
value: string associated with key.
Returns: true on success.
"""
pass
def Transform(self,xform):
"""
Transform(self: GeometryBase,xform: Transform) -> bool
Transforms the geometry. If the input Transform has a SimilarityType of
OrientationReversing,you may want to consider flipping the transformed
geometry
after calling this function when it makes sense. For example,
you may want to call
Flip() on a Brep after transforming it.
xform: Transformation to apply to geometry.
Returns: true if geometry successfully transformed.
"""
pass
def Translate(self,*__args):
"""
Translate(self: GeometryBase,x: float,y: float,z: float) -> bool
Translates the object along the specified vector.
x: The X component.
y: The Y component.
z: The Z component.
Returns: true if geometry successfully translated.
Translate(self: GeometryBase,translationVector: Vector3d) -> bool
Translates the object along the specified vector.
translationVector: A moving vector.
Returns: true if geometry successfully translated.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*args): #cannot find CLR constructor
""" __new__(cls: type,info: SerializationInfo,context: StreamingContext) """
pass
def __reduce_ex__(self,*args):
pass
HasBrepForm=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns true if the Brep.TryConvertBrep function will be successful for this object
Get: HasBrepForm(self: GeometryBase) -> bool
"""
IsDeformable=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""true if object can be accurately modified with "squishy" transformations like
projections,shears,and non-uniform scaling.
Get: IsDeformable(self: GeometryBase) -> bool
"""
IsDocumentControlled=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""If true this object may not be modified. Any properties or functions that attempt
to modify this object when it is set to "IsReadOnly" will throw a NotSupportedException.
Get: IsDocumentControlled(self: GeometryBase) -> bool
"""
ObjectType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Useful for switch statements that need to differentiate between
basic object types like points,curves,surfaces,and so on.
Get: ObjectType(self: GeometryBase) -> ObjectType
"""
UserStringCount=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the amount of user strings.
Get: UserStringCount(self: GeometryBase) -> int
"""
|
import torch
import torchvision
import net
import numpy as np
from PIL import Image
import glob
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
test_list = glob.glob("test_images/*")
dehaze_net = net.dehaze_net().to(device)
if torch.cuda.is_available():
dehaze_net.load_state_dict(torch.load('snapshots/dehazer.pth'))
else:
dehaze_net.load_state_dict(torch.load('snapshots/dehazer.pth', map_location=lambda storage, loc: storage))
for image in test_list:
data_hazy = Image.open(image)
data_hazy = (np.asarray(data_hazy)/255.0)
data_hazy = torch.from_numpy(data_hazy).float()
data_hazy = data_hazy.permute(2,0,1)
data_hazy = data_hazy.to(device).unsqueeze(0)
clean_image = dehaze_net(data_hazy)
torchvision.utils.save_image(torch.cat((data_hazy, clean_image),0), "results\\" + image.split("\\")[-1])
print(image, "done!")
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 31 08:13:13 2019
@author: BigBoss
"""
import serial, time, threading
def medir(x):
try:
while True:
y = x.readline().decode('utf-8')
#print(y.decode("ascii",end=""))
print(y)
except:
pass
x = serial.Serial("COM8",9600)
time.sleep(2)
thread = threading.Thread(target = medir, args=(x,))
thread.start()
while True:
r = input("r.- Rojo:\na.- Amarillo:\nv.- Verde:\n Elige una opcion: ")
r = r.encode("utf-8")
print(r)
if r == b"x":
break
x.write(r)
x.close()
|
import numpy as np
import logging
logger = logging.getLogger(__name__)
def translate_array(arr,shifts):
"""
'translate' performs integer translation on a 2D numpy array.
Params:
arr - a 2D nparray
shifts - (xshift, yshift)
Returns
new array of same shape and datatype as arr
Comments:
Positive values shift up and left
Areas outside of the original array are filled with np.NAN
Correction - they would be if we could store NAN values, otherwise they are coerced to 0
"""
#check array is only 2D
if not len(arr.shape)==2:
logger.debug('Only 2D arrays are supported')
raise RuntimeError
if not len(shifts)==2:
logger.debug('Only 2D arrays can be translated')
raise RuntimeError
if not shifts[0]==int(shifts[0]) and shifts[1]==int(shifts[1]):
logger.debug('Only integer translation is supported')
raise RuntimeError
new_data = np.zeros(arr.shape, arr.dtype)
new_data[:]=np.NAN
s=arr.shape
new_data[max(0,0-shifts[0]):min(s[0],s[0]-shifts[0]),
max(0,0-shifts[1]):min(s[1],s[1]-shifts[1])] \
= arr[max(0,shifts[0]):min(s[0],s[0]+shifts[0]),
max(0,shifts[1]):min(s[1],s[1]+shifts[1])]
# Seems we cant store NAN values in an integer array
if np.any(new_data<0):
logger.warn('Coercing array values to 0')
new_data[new_data<0] = 0
return new_data
if __name__=="__main__":
orig = np.zeros((5,5),np.uint8)
orig[2,2] = 1
new = translate_array(orig, (2,0))
print(new[0,2] == 1)
|
#!/usr/bin/python
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Name: Haohang Xu
# Date: 10/1/2015
#
# Some experimentation with multithreaded programming
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
###########################################
### IMPORTS ###
###########################################
import random
import threading
###########################################
### GLOBALS ###
###########################################
###########################################
### CLASSES ###
###########################################
#==============================================================================
# Define thread class
class Print_thread (threading.Thread):
def __init__(self, to_print):
threading.Thread.__init__(self)
self.to_print = to_print
def run(self):
while True:
print self.to_print
###########################################
### FUNCTION DEFS ###
###########################################
#==============================================================================
# Define function to start three threads that print 1, 2, and 3
def start_threads ():
thread1 = Print_thread(1)
thread2 = Print_thread(2)
thread3 = Print_thread(3)
thread1.start()
thread2.start()
thread3.start()
###########################################
### MAIN FUNCTION ###
###########################################
start_threads()
|
"""iKala Dataset Loader
.. admonition:: Dataset Info
:class: dropdown
The iKala dataset is comprised of 252 30-second excerpts sampled from 206 iKala
songs (plus 100 hidden excerpts reserved for MIREX).
The music accompaniment and the singing voice are recorded at the left and right
channels respectively and can be found under the Wavfile directory.
In addition, the human-labeled pitch contours and timestamped lyrics can be
found under PitchLabel and Lyrics respectively.
For more details, please visit: http://mac.citi.sinica.edu.tw/ikala/
"""
import csv
import os
import librosa
import logging
import numpy as np
from typing import BinaryIO, Optional, TextIO, Tuple
from mirdata import download_utils
from mirdata import jams_utils
from mirdata import core
from mirdata import annotations
from mirdata import io
BIBTEX = """@inproceedings{chan2015vocal,
title={Vocal activity informed singing voice separation with the iKala dataset},
author={Chan, Tak-Shing and Yeh, Tzu-Chun and Fan, Zhe-Cheng and Chen, Hung-Wei and Su, Li and Yang, Yi-Hsuan and
Jang, Roger},
booktitle={2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages={718--722},
year={2015},
organization={IEEE}
}"""
TIME_STEP = 0.032 # seconds
REMOTES = {
"metadata": download_utils.RemoteFileMetadata(
filename="id_mapping.txt",
url="http://mac.citi.sinica.edu.tw/ikala/id_mapping.txt",
checksum="81097b587804ce93e56c7a331ba06abc",
)
}
DOWNLOAD_INFO = """
Unfortunately the iKala dataset is not available for download.
If you have the iKala dataset, place the contents into a folder called
iKala with the following structure:
> iKala/
> Lyrics/
> PitchLabel/
> Wavfile/
and copy the iKala folder to {}
"""
LICENSE_INFO = """
When it was distributed, Ikala used to have a custom license.
Visit http://mac.citi.sinica.edu.tw/ikala/ for more details.
"""
class Track(core.Track):
"""ikala Track class
Args:
track_id (str): track id of the track
Attributes:
audio_path (str): path to the track's audio file
f0_path (str): path to the track's f0 annotation file
lyrics_path (str): path to the track's lyric annotation file
section (str): section. Either 'verse' or 'chorus'
singer_id (str): singer id
song_id (str): song id of the track
track_id (str): track id
Cached Properties:
f0 (F0Data): human-annotated singing voice pitch
lyrics (LyricsData): human-annotated lyrics
"""
def __init__(
self,
track_id,
data_home,
dataset_name,
index,
metadata,
):
super().__init__(
track_id,
data_home,
dataset_name,
index,
metadata,
)
self.f0_path = os.path.join(self._data_home, self._track_paths["pitch"][0])
self.lyrics_path = os.path.join(self._data_home, self._track_paths["lyrics"][0])
self.audio_path = os.path.join(self._data_home, self._track_paths["audio"][0])
self.song_id = track_id.split("_")[0]
self.section = track_id.split("_")[1]
@property
def singer_id(self):
return self._track_metadata.get(self.song_id)
@core.cached_property
def f0(self) -> Optional[annotations.F0Data]:
return load_f0(self.f0_path)
@core.cached_property
def lyrics(self) -> Optional[annotations.LyricData]:
return load_lyrics(self.lyrics_path)
@property
def vocal_audio(self) -> Optional[Tuple[np.ndarray, float]]:
"""solo vocal audio (mono)
Returns:
* np.ndarray - audio signal
* float - sample rate
"""
return load_vocal_audio(self.audio_path)
@property
def instrumental_audio(self) -> Optional[Tuple[np.ndarray, float]]:
"""instrumental audio (mono)
Returns:
* np.ndarray - audio signal
* float - sample rate
"""
return load_instrumental_audio(self.audio_path)
@property
def mix_audio(self) -> Optional[Tuple[np.ndarray, float]]:
"""mixture audio (mono)
Returns:
* np.ndarray - audio signal
* float - sample rate
"""
return load_mix_audio(self.audio_path)
def to_jams(self):
"""Get the track's data in jams format
Returns:
jams.JAMS: the track's data in jams format
"""
return jams_utils.jams_converter(
audio_path=self.audio_path,
f0_data=[(self.f0, None)],
lyrics_data=[(self.lyrics, None)],
metadata={
"section": self.section,
"singer_id": self.singer_id,
"track_id": self.track_id,
"song_id": self.song_id,
},
)
@io.coerce_to_bytes_io
def load_vocal_audio(fhandle: BinaryIO) -> Tuple[np.ndarray, float]:
"""Load ikala vocal audio
Args:
fhandle (str or file-like): File-like object or path to audio file
Returns:
* np.ndarray - audio signal
* float - sample rate
"""
audio, sr = librosa.load(fhandle, sr=None, mono=False)
vocal_channel = audio[1, :]
return vocal_channel, sr
@io.coerce_to_bytes_io
def load_instrumental_audio(fhandle: BinaryIO) -> Tuple[np.ndarray, float]:
"""Load ikala instrumental audio
Args:
fhandle (str or file-like): File-like object or path to audio file
Returns:
* np.ndarray - audio signal
* float - sample rate
"""
audio, sr = librosa.load(fhandle, sr=None, mono=False)
instrumental_channel = audio[0, :]
return instrumental_channel, sr
@io.coerce_to_bytes_io
def load_mix_audio(fhandle: BinaryIO) -> Tuple[np.ndarray, float]:
"""Load an ikala mix.
Args:
fhandle (str or file-like): File-like object or path to audio file
Returns:
* np.ndarray - audio signal
* float - sample rate
"""
mixed_audio, sr = librosa.load(fhandle, sr=None, mono=True)
# multipy by 2 because librosa averages the left and right channel.
return 2.0 * mixed_audio, sr
@io.coerce_to_string_io
def load_f0(fhandle: TextIO) -> annotations.F0Data:
"""Load an ikala f0 annotation
Args:
fhandle (str or file-like): File-like object or path to f0 annotation file
Raises:
IOError: If f0_path does not exist
Returns:
F0Data: the f0 annotation data
"""
lines = fhandle.readlines()
f0_midi = np.array([float(line) for line in lines])
f0_hz = librosa.midi_to_hz(f0_midi) * (f0_midi > 0)
confidence = (f0_hz > 0).astype(float)
times = (np.arange(len(f0_midi)) * TIME_STEP) + (TIME_STEP / 2.0)
f0_data = annotations.F0Data(times, f0_hz, confidence)
return f0_data
@io.coerce_to_string_io
def load_lyrics(fhandle: TextIO) -> annotations.LyricData:
"""Load an ikala lyrics annotation
Args:
fhandle (str or file-like): File-like object or path to lyric annotation file
Raises:
IOError: if lyrics_path does not exist
Returns:
LyricData: lyric annotation data
"""
# input: start time (ms), end time (ms), lyric, [pronunciation]
reader = csv.reader(fhandle, delimiter=" ")
start_times = []
end_times = []
lyrics = []
pronunciations = []
for line in reader:
start_times.append(float(line[0]) / 1000.0)
end_times.append(float(line[1]) / 1000.0)
lyrics.append(line[2])
if len(line) > 2:
pronunciation = " ".join(line[3:])
pronunciations.append(pronunciation)
else:
pronunciations.append("")
lyrics_data = annotations.LyricData(
np.array([start_times, end_times]).T,
lyrics,
pronunciations,
)
return lyrics_data
@core.docstring_inherit(core.Dataset)
class Dataset(core.Dataset):
"""
The ikala dataset
"""
def __init__(self, data_home=None):
super().__init__(
data_home,
name="ikala",
track_class=Track,
bibtex=BIBTEX,
remotes=REMOTES,
download_info=DOWNLOAD_INFO,
license_info=LICENSE_INFO,
)
@core.cached_property
def _metadata(self):
id_map_path = os.path.join(self.data_home, "id_mapping.txt")
if not os.path.exists(id_map_path):
raise FileNotFoundError("Metadata not found. Did you run .download()?")
with open(id_map_path, "r") as fhandle:
reader = csv.reader(fhandle, delimiter="\t")
singer_map = {}
for line in reader:
if line[0] == "singer":
continue
singer_map[line[1]] = line[0]
return singer_map
@core.copy_docs(load_vocal_audio)
def load_vocal_audio(self, *args, **kwargs):
return load_vocal_audio(*args, **kwargs)
@core.copy_docs(load_instrumental_audio)
def load_instrumental_audio(self, *args, **kwargs):
return load_instrumental_audio(*args, **kwargs)
@core.copy_docs(load_mix_audio)
def load_mix_audio(self, *args, **kwargs):
return load_mix_audio(*args, **kwargs)
@core.copy_docs(load_f0)
def load_f0(self, *args, **kwargs):
return load_f0(*args, **kwargs)
@core.copy_docs(load_lyrics)
def load_lyrics(self, *args, **kwargs):
return load_lyrics(*args, **kwargs)
|
import autograd.numpy as anp
import numpy as np
from autograd.scipy.stats import norm
from viabel.approximations import MFGaussian, MFStudentT
from viabel.objectives import AlphaDivergence, DISInclusiveKL, ExclusiveKL
from viabel.optimization import RMSProp
def _test_objective(objective_cls, num_mc_samples, **kwargs):
np.random.seed(851)
# mean = np.random.randn(1,dimension)
# stdev = np.exp(np.random.randn(1,dimension))
mean = np.array([1., -1.])[np.newaxis, :]
stdev = np.array([2., 5.])[np.newaxis, :]
def log_p(x):
return anp.sum(norm.logpdf(x, loc=mean, scale=stdev), axis=1)
approx = MFStudentT(2, 100)
objective = objective_cls(approx, log_p, num_mc_samples, **kwargs)
# large number of MC samples and smaller epsilon and learning rate to ensure accuracy
init_param = np.array([0, 0, 1, 1], dtype=np.float32)
opt = RMSProp(0.1)
opt_results = opt.optimize(1000, objective, init_param)
# iterate averaging introduces some bias, so use last iterate
est_mean, est_cov = approx.mean_and_cov(opt_results['opt_param'])
est_stdev = np.sqrt(np.diag(est_cov))
print(est_stdev, stdev)
np.testing.assert_almost_equal(mean.squeeze(), est_mean, decimal=1)
np.testing.assert_almost_equal(stdev.squeeze(), est_stdev, decimal=1)
def test_ExclusiveKL():
_test_objective(ExclusiveKL, 100)
def test_ExclusiveKL_path_deriv():
_test_objective(ExclusiveKL, 100, use_path_deriv=True)
def test_DISInclusiveKL():
dim = 2
_test_objective(DISInclusiveKL, 100,
temper_prior=MFGaussian(dim),
temper_prior_params=np.concatenate([[0] * dim, [1] * dim]),
ess_target=50)
def test_AlphaDivergence():
_test_objective(AlphaDivergence, 100, alpha=2)
|
from privyfilter.privyfilter import Privyfilter as pf
faces, imger = pf.faceScrub("../TestPics/10.jpg")
print(faces)
img = pf.RemoveMetaData("../TestPics/10.jpg")
print(img)
#Synthetic face replacement ( Simple Deepfake)
#get random sythetic face
fakeface = pf.getFakeFace()
fakeface.save_image("fakeface.jpg")
faces, imger = pf.faceScrub("fakeface.jpg")
print(faces)
#Swap faces
faceswap = pf.swapFaces("fakeface.jpg", "../TestPics/9.jpg")
print(fakeface)
quit()
|
import sys
class Physical_person():
def __init__(self, by, dr, dt, ad):
self.birth_year = by
self.disability_rate = dr
self.disability_type = dt
self.address = ad
def __repr__(self):
return "birth_year: " + str(self.birth_year) + "\n" +\
"disability_rate: " + str(self.disability_rate) + "\n" +\
"disability_type: " + str(self.disability_type) + "\n" +\
"address: " + str(self.address) + "\n"
class Tax_payer(Physical_person):
def __init__(self, by, dr, dt, ad, ir):
Physical_person.__init__(self, by, dr, dt, ad)
self.is_resident = ir
self.children = []
self.income_pension = []
self.income_employment = []
self.income_other = []
def __repr__(self):
return Physical_person.__repr__(self) +\
"is_resident: " + str(self.is_resident) + "\n" +\
"nb_child: " + str(len(self.children)) + "\n" +\
"income_pension: " + str(self.income_pension) + "\n" +\
"income_employment: " + str(self.income_employment) + "\n" +\
"income_other: " + str(self.income_other) + "\n"
class Child(Physical_person):
def __init__(self, by, dr, dt, ad):
Physical_person.__init__(self, by, dr, dt, ad)
def __repr__(self):
return Physical_person.__repr__(self)
def return_interval(param, name):
#print(name + ": " + str(param))
if 0 <= param < 0.33:
return "True;False;False"
elif 0.33 <= param <= 0.67:
return "False;True;False"
elif 0.67 < param <= 1:
return "False;False;True"
else:
print("ERROR: invalid " + name)
exit()
def return_interval_array(param, name):
#print(name + ": " + str(param))
L = False
M = False
H = False
for p in param:
if 0 <= p < 0.33:
L = True
elif 0.33 <= p <= 0.67:
M = True
elif 0.67 < p <= 1:
H = True
else:
print("ERROR: invalid " + name)
exit()
return str(L) + ";" + str(M) + ";" + str(H)
def analyse_nb_tax_payer():
return return_interval((nb_tax_payer-1)/100, "nb_tax_payer")
def analyse_birth_year():
tmp = []
for t in tax_payer_array:
tmp.append((t.birth_year-1920)/100)
return return_interval_array(tmp, "birth_year")
def analyse_disability_rate():
tmp = []
for t in tax_payer_array:
tmp.append(t.disability_rate)
return return_interval_array(tmp, "disability_rate")
def analyse_disability_type():
type_none = False
type_vision = False
type_a = False
for t in tax_payer_array:
if t.disability_type == "None":
type_none = True
elif t.disability_type == "Vision":
type_vision = True
else:
type_a = True
return str(type_none) + ";" + str(type_vision) + ";" + str(type_a)
def analyse_is_resident():
tr = False
fa = False
for t in tax_payer_array:
if t.is_resident == "True":
tr = True
else:
fa = True
return str(tr) + ";" + str(fa)
def analyse_tax_payer_nb_address():
tmp = []
for t in tax_payer_array:
tmp.append((len(t.address)-1)/2)
return return_interval_array(tmp, "tax_payer_nb_address")
def analyse_tax_payer_address():
lu = False
fr = False
be = False
de = False
ot = False
for t in tax_payer_array:
for a in t.address:
if a == "LU":
lu = True
elif a == "FR":
fr = True
elif a == "BE":
be = True
elif a == "DE":
de = True
else:
ot = True
return str(lu) + ";" + str(fr) + ";" + str(be) + ";" + str(de) + ";" + str(ot)
def analyse_nb_child():
zer = False
one = False
two = False
thr = False
for t in tax_payer_array:
nb_child = len(t.children)
if nb_child == 0:
zer = True
elif nb_child == 1:
one = True
elif nb_child == 2:
two = True
else:
thr = True
return str(zer) + ";" + str(one) + ";" + str(two) + ";" + str(thr)
def analyse_child_birth_year():
tmp = []
for t in tax_payer_array:
for c in t.children:
tmp.append((c.birth_year-1920)/100)
return return_interval_array(tmp, "child_birth_year")
def analyse_child_disability_rate():
tmp = []
for t in tax_payer_array:
for c in t.children:
tmp.append(t.disability_rate)
return return_interval_array(tmp, "child_disability_rate")
def analyse_child_disability_type():
type_none = False
type_vision = False
type_a = False
for t in tax_payer_array:
for c in t.children:
if c.disability_type == "None":
type_none = True
elif c.disability_type == "Vision":
type_vision = True
else:
type_a = True
return str(type_none) + ";" + str(type_vision) + ";" + str(type_a)
def analyse_child_nb_address():
tmp = []
for t in tax_payer_array:
for c in t.children:
tmp.append((len(t.address)-0.5)/3)
return return_interval_array(tmp, "child_nb_address")
def analyse_child_address():
lu = False
fr = False
be = False
de = False
ot = False
for t in tax_payer_array:
for c in t.children:
for a in t.address:
if a == "LU":
lu = True
elif a == "FR":
fr = True
elif a == "BE":
be = True
elif a == "DE":
de = True
else:
ot = True
return str(lu) + ";" + str(fr) + ";" + str(be) + ";" + str(de) + ";" + str(ot)
def analyse_nb_pension():
zer = False
one = False
two = False
thr = False
for t in tax_payer_array:
nb_pension = len(t.income_pension)
if nb_pension == 0:
zer = True
elif nb_pension == 1:
one = True
elif nb_pension == 2:
two = True
else:
thr = True
return str(zer) + ";" + str(one) + ";" + str(two) + ";" + str(thr)
def analyse_nb_employment():
zer = False
one = False
two = False
thr = False
for t in tax_payer_array:
nb_employment = len(t.income_employment)
if nb_employment == 0:
zer = True
elif nb_employment == 1:
one = True
elif nb_employment == 2:
two = True
else:
thr = True
return str(zer) + ";" + str(one) + ";" + str(two) + ";" + str(thr)
def analyse_nb_other():
zer = False
one = False
two = False
thr = False
for t in tax_payer_array:
nb_other = len(t.income_other)
if nb_other == 0:
zer = True
elif nb_other == 1:
one = True
elif nb_other == 2:
two = True
else:
thr = True
return str(zer) + ";" + str(one) + ";" + str(two) + ";" + str(thr)
def analyse_pension_is_local():
tr = False
fa = False
for t in tax_payer_array:
for i in t.income_pension:
if i == "True":
tr = True
else:
fa = True
return str(tr) + ";" + str(fa)
def analyse_employment_is_local():
tr = False
fa = False
for t in tax_payer_array:
for i in t.income_employment:
if i == "True":
tr = True
else:
fa = True
return str(tr) + ";" + str(fa)
def analyse_other_is_local():
tr = False
fa = False
for t in tax_payer_array:
for i in t.income_other:
if i == "True":
tr = True
else:
fa = True
return str(tr) + ";" + str(fa)
def analyse_nb_income():
tmp = []
for t in tax_payer_array:
tmp.append((len(t.income_pension) + len(t.income_employment) + len(t.income_other) - 1)/2)
return return_interval_array(tmp, "nb_income")
def analyse_lu_address():
no_lu = False
lu = False
all_lu = False
for t in tax_payer_array:
counter = 0
for a in t.address:
if a == "LU":
counter += 1
if counter == len(t.address):
all_lu = True
elif counter == 0:
no_lu = True
else:
lu = True
return str(no_lu) + ";" + str(lu) + ";" + str(all_lu)
def analyse_c4():
pension_lu = False
employment_lu = False
other_lu = False
for t in tax_payer_array:
tmp = False
for a in t.address:
if a == "LU":
tmp = True
if tmp:
for i in t.income_pension:
if i == "True":
pension_lu = True
for i in t.income_employment:
if i == "True":
employment_lu = True
for i in t.income_other:
if i == "True":
other_lu = True
return str(pension_lu) + ";" + str(employment_lu) + ";" + str(other_lu)
tax_payer_array = []
with open(sys.argv[1] + "tax.csv", "r") as f:
nb_tax_payer = int(f.readline()[:-1])
for i in range(nb_tax_payer):
tmp = f.readline()[:-1].split(";")
address = f.readline()[:-1].split(";")
t = Tax_payer(int(tmp[0]), float(tmp[1]), tmp[2], address, tmp[3])
nb_child = int(f.readline()[:-1])
for j in range(nb_child):
tmp = f.readline()[:-1].split(";")
address = f.readline()[:-1].split(";")
c = Child(int(tmp[0]), float(tmp[1]), tmp[2], address)
t.children.append(c)
nb_income_pension = int(f.readline()[:-1])
if nb_income_pension > 0:
t.income_pension = f.readline()[:-1].split(";")
nb_income_employment = int(f.readline()[:-1])
if nb_income_employment > 0:
t.income_employment = f.readline()[:-1].split(";")
nb_income_other = int(f.readline()[:-1])
if nb_income_other > 0:
t.income_other = f.readline()[:-1].split(";")
tax_payer_array.append(t)
with open(sys.argv[2] + "analysis.csv", "a") as f:
f.write(analyse_nb_tax_payer() + ";" + analyse_birth_year() + ";" + analyse_disability_rate() + ";" + analyse_disability_type() + ";" + analyse_is_resident() + ";" + analyse_tax_payer_nb_address() + ";" + analyse_tax_payer_address() + ";" + analyse_nb_child() + ";" + analyse_child_birth_year() + ";" + analyse_child_disability_rate() + ";" + analyse_child_disability_type() + ";" + analyse_child_nb_address() + ";" + analyse_child_address() + ";" + analyse_nb_pension() + ";" + analyse_nb_employment() + ";" + analyse_nb_other() + ";" + analyse_pension_is_local() + ";" + analyse_employment_is_local() + ";" + analyse_other_is_local() + ";" + analyse_nb_income() + ";" + analyse_lu_address() + ";" + analyse_c4() + "\n")
|
import sys
import os
import json
import string
# Natural Language Toolkit Packages
from nltk import CFG, ChartParser, Tree
from nltk.tokenize import word_tokenize
from nltk.parse.generate import generate
def parse_blazon(blazon):
blazon = blazon.lower()
to_discard = set(string.punctuation)
to_discard.remove("&")
blazon = ''.join(c for c in blazon if c not in to_discard)
# Convert raw data to tokens to be parsed
tokens = word_tokenize(blazon)
# Replace instances of '1st', '2nd', etc with their non abbreviated forms
for (index, item) in enumerate(tokens):
if (item in abbr_to_full):
tokens[index] = abbr_to_full[item]
elif (item == "&"):
tokens[index] = "and"
# Sanitise tokens
tokens = disambiguate_colours(tokens)
tokens = reorder(tokens)
# Construct grammar and parser
with open('app/parser_cfg.txt') as f:
raw_cfg = f.read()
parser_grammar = CFG.fromstring(raw_cfg)
parser = ChartParser(parser_grammar)
# Parse data into tree
output_data = None
for tree in parser.parse(tokens):
output_data = tree
if (output_data is None):
print("Error: Parse failed, please check input is of correct format.")
else:
# Convert Tree to dict to prepare it for JSON serialisation
output_data = tree_to_dict(output_data)
# If a tincture is in the top level of the dictionary, change its name to "field"
if ("tincture" in output_data.keys()):
output_data["field"] = output_data["tincture"]
output_data.pop("tincture")
# Convert dict to JSON
return (output_data)
def generate_blazons(grammarfile, n, depth = None):
bs = []
with open(grammarfile) as g:
raw_cfg = g.read()
parser_grammar = CFG.fromstring(raw_cfg)
for blazon in generate(parser_grammar, n = n, depth = depth):
bwords = blazon
field = bwords[0]
z = ((isColour(field) and not any(map(isColour, bwords[1:]))) or
(isMetal(field) and not any(map(isMetal, bwords[1:])))) and (field not in bwords[1:])
if z:
bs.append(' '.join(blazon))
return bs
def main():
# Check arguments
if (len(sys.argv) == 1):
print("Too few arguments\nUsage: $ python generate.py <INPUT_FILE> [OUTPUT_FILE]")
sys.exit(0)
elif (len(sys.argv) > 3):
print("Too many arguments\nUsage: $ python generate.py <INPUT_FILE> [OUTPUT_FILE]")
sys.exit(0)
# Initialise paths
WORKING_DIR = sys.path[0]
INPUT_FILE = os.path.join(WORKING_DIR, sys.argv[1])
if (len(sys.argv) == 3):
OUTPUT_FILE = os.path.join(WORKING_DIR, sys.argv[2])
else:
# Extract base filename of input file
OUTPUT_NAME = os.path.basename(INPUT_FILE)
# Strip off file extension and add own (.esc for escutcheon)
OUTPUT_NAME = "trees/" + os.path.splitext(OUTPUT_NAME)[0] + ".esc"
OUTPUT_FILE = os.path.join(WORKING_DIR, OUTPUT_NAME)
# Read in input data
with open(INPUT_FILE) as f:
raw_data = f.read().lower()
to_discard = set(string.punctuation)
to_discard.remove("&")
raw_data = ''.join(c for c in raw_data if c not in to_discard)
# Convert raw data to tokens to be parsed
tokens = word_tokenize(raw_data)
# Replace instances of '1st', '2nd', etc with their non abbreviated forms
for (index, item) in enumerate(tokens):
if (item in abbr_to_full):
tokens[index] = abbr_to_full[item]
elif (item == "&"):
tokens[index] = "and"
# Sanitise tokens
tokens = disambiguate_colours(tokens)
tokens = reorder(tokens)
# Construct grammar and parser
with open('parser_cfg.txt') as f:
raw_cfg = f.read()
parser_grammar = CFG.fromstring(raw_cfg)
parser = ChartParser(parser_grammar)
# Parse data into tree
output_data = None
for tree in parser.parse(tokens):
output_data = tree
if (output_data is None):
print("Error: Parse failed, please check input is of correct format.")
else:
# Convert Tree to dict to prepare it for JSON serialisation
output_data = tree_to_dict(output_data)
# If a tincture is in the top level of the dictionary, change its name to "field"
if ("tincture" in output_data.keys()):
output_data["field"] = output_data["tincture"]
output_data.pop("tincture")
# Convert dict to JSON
with open(OUTPUT_FILE, 'w+') as f:
json.dump(output_data, f, indent=2)
def disambiguate_colours(tokens):
i = 0 # The index for navigating the array
palette = []
expecting_colour = False
while (i < len(tokens)):
t = tokens[i]
if (t in colours):
# Any colours we see will potentially be referenced later and must be saved in
# the order they appear
palette.append(t)
elif (t == "of"):
# "of the" is the phrase to expect before a colour replacement word, so if we see "of"
# we can begin to anticipate a colour reference word
expecting_colour = True
elif (t == "the") and expecting_colour:
# If "of" is followed by "the", then there's more reason to expect a colour reference
expecting_colour = True
elif (t in position_to_num and expecting_colour):
# Convert "first", "second", etc to its corresponding colour in the palette
colour = position_to_num[t]
tokens[i] = palette[colour - 1]
# Remove "of the" phrase from the token array
tokens.pop(i - 1)
tokens.pop(i - 2)
else:
# If none of the conditions are met, there's no reason to expect a colour
expecting_colour = False
i = i + 1 # Iterate the index
return tokens
def reorder(tokens):
sections = [[]]
current_sections = [0]
duplicate_section = False # For cases where we have 'First & Fourth' etc
for t in tokens:
if (t == "quarterly"):
sections.extend([[], [], [], []]) # Set aside four expected sectionections
for s in current_sections:
sections[s].append(t)
elif (t in position_to_num):
sections[position_to_num[t]].append(t)
if (duplicate_section):
current_sections.append(position_to_num[t])
duplicate_section = False
else:
current_sections = [position_to_num[t]]
elif (t == "and"):
duplicate_section = True
else:
for s in current_sections:
sections[s].append(t)
tokens = [token for section in sections for token in section]
return tokens
def tree_to_dict(tree):
new_dict = {
"charges": [],
}
if (tree.label() in ["QFirst", "QSecond", "QThird", "QFourth"]):
if (len(tree) == 3):
return tree_to_dict(tree[2])
else:
return tree_to_dict(tree[1])
for t in tree:
if (isinstance(t, Tree)):
if (t.label() in alias_table):
# For Charge Phrases
if (t.label() == "Charges"):
if (len(t) == 1):
new_dict['charges'].extend(tree_to_dict(t)['charges'] )
else:
# print(tree_to_dict(t))
new_dict['charges'].extend(tree_to_dict(t)['charges'])
# print(tree_to_dict(t[1]))
elif (t.label() == "ChP"):
# Get number of copies of charges on field
charge_no = tree_to_dict(t[0])
# Convert string to numeral
charge_no = word_to_num[charge_no]
# Get the rest of the details of the field
charge_details = Tree('Field', t[1:])
charge_details = tree_to_dict(charge_details)
# Populate array with correct number of copies of the details
charge_array = []
for i in range(0, charge_no):
charge_array.append(charge_details)
new_dict.update({alias_table[t.label()]: charge_array})
# For Quarterly Phrases
elif (t.label() == "QuartP"):
# Add field label to indicate quarterly type
new_dict.update({"field": "quarterly"})
new_dict.update({"quarters": list(map(tinc_to_field, [tree_to_dict(t[1]), tree_to_dict(t[2]),
tree_to_dict(t[3]), tree_to_dict(t[4])]))})
elif (t.label() == "Orient"):
if (t[0] == "sinister"):
new_dict.update({"sinister": True})
else:
new_dict.update({"sinister": False})
else:
new_dict.update({alias_table[t.label()]: tree_to_dict(t)})
else:
return tree_to_dict(t)
else:
return t
return new_dict
def tinc_to_field(output_data):
if ("tincture" in output_data.keys()) and ("charges" in output_data.keys()):
output_data["field"] = output_data["tincture"]
output_data.pop("tincture")
return output_data
colours = [
"gules",
"vert",
"azure",
"sable",
"or",
"argent",
"jaune",
"purpure"
]
alias_table = {
"Tinc": "tincture",
"Charge": "charge",
"Charges" : "charges",
"ChP": "charges",
"QuartP": "quarters",
"Orient": "sinister"
}
word_to_num = {
"a": 1,
"an": 1,
"two": 2,
"three": 3,
"four": 4,
"five": 5,
"six": 6,
"seven": 7,
"eight": 8,
"nine": 9,
"ten": 10,
"eleven": 11,
"twelve": 12
}
position_to_num = {
"first": 1,
"second": 2,
"third": 3,
"fourth": 4,
"fifth": 5,
"sixth": 6,
"seventh": 7,
"eighth": 8,
"ninth": 9,
"tenth": 10
}
abbr_to_full = {
"1st": "first",
"2nd": "second",
"3rd": "third",
"4th": "fourth",
"5th": "fifth",
"6th": "sixth",
"7th": "seventh",
"8th": "eighth"
}
isMetal = lambda field : field in ['or', 'argent']
isColour = lambda field : field in ['vert', 'gules', 'sable', 'azure','purpure']
if __name__ == "__main__":
main()
|
from setuptools import setup
from setuptools import find_packages
setup(
name='Formula1_data_scraper',
version='0.0.6',
description='Package that allows you to collect and store into an AWS postgreSQL RDS, the qualifying and practice data from any formula 1 race weekend from 2006 onwards',
url='https://github.com/ishtyaq123/Webscraper_Formula_1_Practice_And_Qualifying_Data.git',
author='Ishtyaq Nabi',
license='MIT',
packages=['Scraper','AWS_rds_uploader'],
install_requires=['SQLAlchemy', 'psycopg2-binary', 'selenium', 'pandas'],
)
|
# Copyright 2017 NYU-FOXTROT. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Models for Inventory in inventory
All of the models are stored in this module
Models
------
Inventory - Inventory used in inventory management and operation
Attributes:
-----------
id (int) - uuid used to identify one inventory record
name (string) - the name of the product
quantity(int) - amount of Product Inventories
status (string) - the status of the inventory : new, openBox and used
"""
"""
Inventory Model that uses Redis
You must initlaize this class before use by calling inititlize().
This class looks for an environment variable called VCAP_SERVICES
to get it's database credentials from. If it cannot find one, it
tries to connect to Redis on the localhost. If that fails it looks
for a server name 'redis' to connect to.
"""
import threading
# import enum
import os
import json
import logging
import pickle
from cerberus import Validator
from redis import Redis
from redis.exceptions import ConnectionError
class DataValidationError(Exception):
""" Used for an data validation errors when deserializing """
pass
class DatabaseConnectionError(ConnectionError):
pass
######################################################################
# Inventory Model for database
# This class must be initialized with use_db(redis) before using
# where redis is a value connection to a Redis database
######################################################################
class Inventory(object):
""" Inventory interface to database """
logger = logging.getLogger(__name__)
redis = None
schema = {
'id': {'type': 'integer'},
'name': {'type': 'string', 'required': True},
'quantity': {'type': 'integer', 'required': True},
'status': {'type': 'string', 'required': True}
}
__validator = Validator(schema)
lock = threading.Lock()
data = []
index = 0
def __init__(self, inventoryid=0, name='', quantity=0, status=''):
""" Initialize a Inventory """
self.id = inventoryid
self.name = name
self.quantity = quantity
self.status = status
def save(self):
""" Saves an Inventory in the database """
if self.name is None: # name is the only required field
raise DataValidationError('name attribute is not set')
if self.id == 0:
self.id = Inventory.__next_index()
Inventory.redis.set(self.id, pickle.dumps(self.serialize()))
# """
# Saves inventory (in - memory data), replace the old inventory record if it already exists
# """
# if self.id == 0:
# self.id = self.__next_index()
# Inventory.data.append(self)
# else:
# for i in range(len(Inventory.data)):
# if Inventory.data[i].id == self.id:
# Inventory.data[i] = self
# break
def delete(self):
""" Deletes an Inventory from the database """
Inventory.redis.delete(self.id)
# """ Delete inventory record with specified id, name, quantiy and status"""
# Inventory.data.remove(self)
def serialize(self):
""" Serializes an Inventory into a dictionary """
return {
"id": self.id,
"name": self.name,
"quantity": self.quantity,
"status": self.status
}
# """ Serializes Inventory into a dictionary """
# return {"id": self.id, "name": self.name, "quantity": self.quantity, "status": self.status}
def deserialize(self, data):
""" Deserializes an Inventory, marshalling the data """
if isinstance(data, dict) and Inventory.__validator.validate(data):
self.name = data['name']
self.quantity = data['quantity']
self.status = data['status']
else:
raise DataValidationError('Invalid Inventory data: ' + str(Inventory.__validator.errors))
return self
# """
# Deserializes a Inventory from a dictionary
#
# Args:
# data (dict): A dictionary containing the Inventory data
# """
# if not isinstance(data, dict):
# raise DataValidationError('Invalid Inventory: body of request contained bad or no data')
# if data.has_key('id'):
# self.id = data['id']
# try:
# self.name = data['name']
# self.quantity = data['quantity']
# self.status = data['status']
# except KeyError as err:
# raise DataValidationError('Invalid Inventory: missing ' + err.args[0])
# return
######################################################################
# S T A T I C D A T A B S E M E T H O D S
######################################################################
@staticmethod
def __next_index():
""" Increments the index and returns it """
return Inventory.redis.incr('index')
# """ Generates the next index in a continual sequence """
# with Inventory.lock:
# Inventory.index += 1
# return Inventory.index
@staticmethod
def use_db(redis):
Inventory.__redis = redis
@staticmethod
def all():
""" Query that returns all Inventories """
results = []
for key in Inventory.redis.keys():
if key != 'index': # filer out our id index
data = pickle.loads(Inventory.redis.get(key))
inventory = Inventory(data['id']).deserialize(data)
results.append(inventory)
return results
# """ Returns all of the Inventories in the database """
# return [inventory for inventory in Inventory.data]
@staticmethod
def remove_all():
""" Removes all Inventories from the database """
Inventory.redis.flushall()
# """ Removes all of the Inventories from the database """
# del Inventory.data[:]
# Inventory.index = 0
# return Inventory.data
######################################################################
# F I N D E R M E T H O D S
######################################################################
@staticmethod
def find(inventory_id):
""" Query that finds Inventories by their id """
if Inventory.redis.exists(inventory_id):
data = pickle.loads(Inventory.redis.get(inventory_id))
inventory = Inventory(data['id']).deserialize(data)
return inventory
return None
# """ Finds a Inventory by it's ID """
# if not Inventory.data:
# return None
# Inventories = [inventory for inventory in Inventory.data if
# inventory.id == ProductInventory_id]
# if Inventories:
# return Inventories[0]
# return None
@staticmethod
def __find_by(attribute, value):
""" Generic Query that finds a key with a specific value """
# return [inventory for inventory in Inventory.__data if inventory.category == category]
Inventory.logger.info('Processing %s query for %s', attribute, value)
if isinstance(value, str):
search_criteria = value.lower() # make case insensitive
else:
search_criteria = value
results = []
for key in Inventory.redis.keys():
if key != 'index': # filer out our id index
data = pickle.loads(Inventory.redis.get(key))
# perform case insensitive search on strings
if isinstance(data[attribute], str):
test_value = data[attribute].lower()
else:
test_value = data[attribute]
if test_value == search_criteria:
results.append(Inventory(data['id']).deserialize(data))
return results
@staticmethod
def find_by_status(status):
""" Query that finds Inventories by their status """
return Inventory.__find_by('status', status)
# """ Returns all of the Inventories in a status
#
# Args:
# status (string): the status of the Inventories you want to match
# """
# return [inventory for inventory in Inventory.data if inventory.status == status]
@staticmethod
def find_by_quantity(quantity):
""" Query that finds Inventories by their quantity """
return Inventory.__find_by('quantity', quantity)
# """ Returns all of the Inventories in a status
#
# Args:
# quantity (int): the quantity of the Inventories you want to match
# """
# return [inventory for inventory in Inventory.data if inventory.quantity == quantity]
@staticmethod
def find_by_name(name):
""" Query that finds Inventories by their name """
return Inventory.__find_by('name', name)
# """ Returns all Inventories with the given name
#
# Args:
# name (string): the name of the Inventories you want to match
# """
# return [inventory for inventory in Inventory.data if inventory.name == name]
######################################################################
# R E D I S D A T A B A S E C O N N E C T I O N M E T H O D S
######################################################################
@staticmethod
def connect_to_redis(hostname, port, password):
""" Connects to Redis and tests the connection """
Inventory.logger.info("Testing Connection to: %s:%s", hostname, port)
Inventory.redis = Redis(host=hostname, port=port, password=password)
try:
Inventory.redis.ping()
Inventory.logger.info("Connection established")
except ConnectionError:
Inventory.logger.info("Connection Error from: %s:%s", hostname, port)
Inventory.redis = None
return Inventory.redis
@staticmethod
def init_db(redis=None):
"""
Initialized Redis database connection
This method will work in the following conditions:
1) In Bluemix with Redis bound through VCAP_SERVICES
2) With Redis running on the local server as with Travis CI
3) With Redis --link in a Docker container called 'redis'
4) Passing in your own Redis connection object
Exception:
----------
redis.ConnectionError - if ping() test fails
"""
if redis:
Inventory.logger.info("Using client connection...")
Inventory.redis = redis
try:
Inventory.redis.ping()
Inventory.logger.info("Connection established")
except ConnectionError:
Inventory.logger.error("Client Connection Error!")
Inventory.redis = None
raise ConnectionError('Could not connect to the Redis Service')
return
# Get the credentials from the Bluemix environment
if 'VCAP_SERVICES' in os.environ:
Inventory.logger.info("Using VCAP_SERVICES...")
vcap_services = os.environ['VCAP_SERVICES']
services = json.loads(vcap_services)
creds = services['rediscloud'][0]['credentials']
Inventory.logger.info("Conecting to Redis on host %s port %s",
creds['hostname'], creds['port'])
Inventory.connect_to_redis(creds['hostname'], creds['port'], creds['password'])
else:
Inventory.logger.info("VCAP_SERVICES not found, checking localhost for Redis")
Inventory.connect_to_redis('127.0.0.1', 6379, None)
if not Inventory.redis:
Inventory.logger.info("No Redis on localhost, looking for redis host")
Inventory.connect_to_redis('redis', 6379, None)
if not Inventory.redis:
# if you end up here, redis instance is down.
Inventory.logger.fatal('*** FATAL ERROR: Could not connect to the Redis Service')
raise ConnectionError('Could not connect to the Redis Service')
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 0.8.6
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Lesson 6: pets revisited
# %%
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
from fastai import *
from fastai.vision import *
# from my_python_tricks import *
# %%
gpu_device = 0
defaults.device = torch.device(f'cuda:{gpu_device}')
torch.cuda.set_device(gpu_device)
# %%
bs = 64
NW = 8
# %%
path = untar_data(URLs.PETS)/'images'
# %% [markdown]
# ## Data augmentation
# %%
tfms = get_transforms(max_rotate=20, max_zoom=1.3, max_lighting=0.4, max_warp=0.4,
p_affine=1., p_lighting=1.)
# %%
doc(get_transforms)
# %%
src = ImageItemList.from_folder(path).random_split_by_pct(0.2, seed=2)
# %%
def get_data(size, bs, padding_mode='reflection'):
return (src.label_from_re(r'([^/]+)_\d+.jpg$')
.transform(tfms, size=size, padding_mode=padding_mode)
.databunch(bs=bs,num_workers=NW).normalize(imagenet_stats)) # Number of Workers = NW
# %%
data = get_data(224, bs, 'zeros')
# %% [markdown]
# plot_multi needs updating fastai
# %%
def _plot(i,j,ax):
x,y = data.train_ds[3]
x.show(ax, y=y)
plot_multi(_plot, 3, 3, figsize=(8,8))
# %%
data = get_data(224,bs)
# %%
plot_multi(_plot, 3, 3, figsize=(8,8))
# %% [markdown]
# ## Train a model
# %%
gc.collect()
learn = create_cnn(data, models.resnet34, metrics=error_rate, bn_final=True)
# %%
learn.fit_one_cycle(3, slice(1e-2), pct_start=0.8)
# %%
doc(learn.fit_one_cycle)
# %%
learn.unfreeze()
learn.fit_one_cycle(2, max_lr=slice(1e-6,1e-3), pct_start=0.8)
# pct_start : Percentage of total number of iterations when learning rate rises during one cycle.
# which means: lr both increase and decrease during same epoch. In your example, say, you have 100 iterations per epoch, then for half an epoch (0.8 * 100 * 2 epochs) = 160) lr will rise, then slowly decrease.
# %%
data = get_data(352,bs)
learn.data = data
# %%
learn.fit_one_cycle(2, max_lr=slice(1e-6,1e-4))
# %%
learn.save('352')
# %%
from my_python_tricks import *
notify_me()
# %% [markdown]
# ## Convolution kernel
# %%
data = get_data(352,16)
# %%
learn = create_cnn(data, models.resnet34, metrics=error_rate, bn_final=True).load('352')
# %%
idx=0
x,y = data.valid_ds[idx]
x.show()
data.valid_ds.y[idx]
# %%
k = tensor([
[0. ,-5/3,1],
[-5/3,-5/3,1],
[1. ,1 ,1],
]).expand(1,3,3,3)/6
# %%
from fastai.callbacks.hooks import *
# %%
k
# %%
k.shape
# %%
t = data.valid_ds[0][0].data; t.shape
# %%
t[None].shape
# %%
edge = F.conv2d(t[None], k)
# %%
show_image(edge[0], figsize=(5,5));
# %%
data.c
# %%
learn.model
# %%
learn.summary()
# %% [markdown]
# ## Heatmap
# %%
m = learn.model.eval();
# %%
xb,_ = data.one_item(x)
xb_im = Image(data.denorm(xb)[0])
xb = xb.cuda()
# %%
from fastai.callbacks.hooks import *
# %%
def hooked_backward(cat=y):
with hook_output(m[0]) as hook_a:
with hook_output(m[0], grad=True) as hook_g:
preds = m(xb)
preds[0,int(cat)].backward()
return hook_a,hook_g
# %%
hook_a,hook_g = hooked_backward()
# %%
acts = hook_a.stored[0].cpu()
acts.shape
# %%
avg_acts = acts.mean(0)
avg_acts.shape
# %%
def show_heatmap(hm):
_,ax = plt.subplots()
xb_im.show(ax)
ax.imshow(hm, alpha=0.6, extent=(0,352,352,0),
interpolation='bilinear', cmap='magma');
# %%
show_heatmap(avg_acts)
# %% [markdown]
# ## Grad-CAM
# %% [markdown]
# Paper: [Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization](https://arxiv.org/abs/1610.02391)
# %%
grad = hook_g.stored[0][0].cpu()
grad_chan = grad.mean(1).mean(1)
grad.shape,grad_chan.shape
# %%
mult = (acts*grad_chan[...,None,None]).mean(0)
# %%
show_heatmap(mult)
# %%
fn = path/'../other/bulldog_maine.jpg'
# %%
x = open_image(fn); x
# %%
xb,_ = data.one_item(x)
xb_im = Image(data.denorm(xb)[0])
xb = xb.cuda()
# %%
hook_a,hook_g = hooked_backward()
# %%
acts = hook_a.stored[0].cpu()
grad = hook_g.stored[0][0].cpu()
grad_chan = grad.mean(1).mean(1)
mult = (acts*grad_chan[...,None,None]).mean(0)
# %%
show_heatmap(mult)
# %%
data.classes[0]
# %%
hook_a,hook_g = hooked_backward(0)
# %%
acts = hook_a.stored[0].cpu()
grad = hook_g.stored[0][0].cpu()
grad_chan = grad.mean(1).mean(1)
mult = (acts*grad_chan[...,None,None]).mean(0)
# %%
show_heatmap(mult)
# %% [markdown]
# ## fin
# %%
|
# Copyright 2018 Max Shinn <maxwell.shinn@yale.edu>
# 2018 Norman Lam <norman.lam@yale.edu>
#
# This file is part of PyDDM, and is available under the MIT license.
# Please see LICENSE.txt in the root directory for more information.
__all__ = ["Overlay", "OverlayNone", "OverlayChain", "OverlayUniformMixture", "OverlayPoissonMixture", "OverlayExponentialMixture", "OverlayNonDecision", "OverlayNonDecisionGamma", "OverlayNonDecisionUniform", "OverlaySimplePause", "OverlayBlurredPause"]
import numpy as np
from scipy.special import gamma as sp_gamma # https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.gamma.html
import scipy.stats
from paranoid import accepts, returns, requires, ensures, Self, paranoidclass, paranoidconfig, Range, Positive, Number, List, Positive0, NDArray, Unchecked
from .paranoid_types import Conditions
from .base import Dependence
from ..solution import Solution
# TODO in unit test, ensure all overlays with reasonable parameters
# applied to a Solution add up to 1
# TODO unit tests for apply_trajectory
class Overlay(Dependence):
"""Subclasses can modify distributions after they have been generated.
This abstract class provides the methods which define how a
distribution should be modified after solving the model, for
example for a mixture model. To subclass it, implement apply.
Also, since it inherits from Dependence, subclasses must also
assign a `name` and `required_parameters` (see documentation for
Dependence.)
"""
depname = "Overlay"
def apply(self, solution):
"""Apply the overlay to a Solution object.
This function must be redefined in subclasses.
This function takes a Solution object as its argument and
returns a Solution object which was modified in some way.
Often times, this will be by modifying `solution.corr` and
`solution.err`. See the documentation for Solution for more
information about this object.
Note that while this does not take `conditions` as an
argument, conditions may still be accessed via
`solution.conditions`.
Conceptually, this function performs some transformation on
the simulated response time (first passage time)
distributions. It is especially useful for non-decision times
and mixture models, potentially in a parameter-dependent or
condition-dependent manner.
"""
raise NotImplementedError("Overlay model %s invalid: must define the apply(self, solution) function" % self.__class__.__name__)
def apply_trajectory(self, trajectory, model, rk4, seed, conditions={}):
"""Apply the overlay to a simulated decision variable trajectory.
This function is optional and may be redefined in subclasses.
It is expected to implement the same mechanism as the method
"apply", but to do so on simulated trajectories (i.e. from
Model.simulate_trial) instead of on a Solution object.
This function takes the t domain, the trajectory itself, and
task conditions. It returns the modified trajectory.
"""
raise NotImplementedError("Overlay model %s not compatible with trajectory simulations" % self.__class__.__name__)
@paranoidclass
class OverlayNone(Overlay):
"""No overlay. An identity function for Solutions.
Example usage:
| overlay = OverlayNone()
"""
name = "No overlay"
required_parameters = []
@staticmethod
def _test(v):
pass
@staticmethod
def _generate():
yield OverlayNone()
@accepts(Self, Solution)
@returns(Solution)
def apply(self, solution):
return solution
@accepts(Self, NDArray(d=1, t=Number))
@returns(NDArray(d=1, t=Number))
def apply_trajectory(self, trajectory, **kwargs):
return trajectory
# NOTE: This class is likely to break if any changes are made to the
# Dependence constructor. In theory, no changes should be made to the
# Dependence constructor, but just in case...
@paranoidclass
class OverlayChain(Overlay):
"""Join together multiple overlays.
Unlike other model components, Overlays are not mutually
exclusive. It is possible to transform the output solution many
times. Thus, this allows joining together multiple Overlay
objects into a single object.
It accepts one parameter: `overlays`. This should be a list of
Overlay objects, in the order which they should be applied to the
Solution object.
One key technical caveat is that the overlays which are chained
together may not have the same parameter names. Parameter names
must be given different names in order to be a part of the same
overlay. This allows those parameters to be accessed by their
name inside of an OverlayChain object.
Example usage:
| overlay = OverlayChain(overlays=[OverlayNone(), OverlayNone(), OverlayNone()]) # Still equivalent to OverlayNone
| overlay = OverlayChain(overlays=[OverlayPoissonMixture(pmixturecoef=.01, rate=1),
| OverlayUniformMixture(umixturecoef=.01)]) # Apply a Poission mixture and then a Uniform mixture
"""
name = "Chain overlay"
required_parameters = ["overlays"]
@staticmethod
def _test(v):
assert v.overlays in List(Overlay), "overlays must be a list of Overlay objects"
@staticmethod
def _generate():
yield OverlayChain(overlays=[OverlayNone()])
yield OverlayChain(overlays=[OverlayUniformMixture(umixturecoef=.3), OverlayPoissonMixture(pmixturecoef=.2, rate=.7)])
yield OverlayChain(overlays=[OverlayNonDecision(nondectime=.1), OverlayPoissonMixture(pmixturecoef=.1, rate=1), OverlayUniformMixture(umixturecoef=.1)])
def __init__(self, **kwargs):
Overlay.__init__(self, **kwargs)
object.__setattr__(self, "required_parameters", [])
object.__setattr__(self, "required_conditions", [])
for o in self.overlays:
self.required_parameters.extend(o.required_parameters)
self.required_conditions.extend(o.required_conditions)
assert len(self.required_parameters) == len(set(self.required_parameters)), "Two overlays in chain cannot have the same parameter names"
object.__setattr__(self, "required_conditions", list(set(self.required_conditions))) # Avoid duplicates
def __setattr__(self, name, value):
if "required_parameters" in self.__dict__:
if name in self.required_parameters:
for o in self.overlays:
if name in o.required_parameters:
return setattr(o, name, value)
return Overlay.__setattr__(self, name, value)
def __getattr__(self, name):
if name in self.required_parameters:
for o in self.overlays:
if name in o.required_parameters:
return getattr(o, name)
else:
return Overlay.__getattribute__(self, name)
def __repr__(self):
overlayreprs = list(map(repr, self.overlays))
return "OverlayChain(overlays=[" + ", ".join(overlayreprs) + "])"
@accepts(Self, Solution)
@returns(Solution)
def apply(self, solution):
assert isinstance(solution, Solution)
newsol = solution
for o in self.overlays:
newsol = o.apply(newsol)
return newsol
@accepts(Self, NDArray(d=1, t=Number))
@returns(NDArray(d=1, t=Number))
@paranoidconfig(unit_test=False)
def apply_trajectory(self, trajectory, **kwargs):
for o in self.overlays:
trajectory = o.apply_trajectory(trajectory=trajectory, **kwargs)
return trajectory
@paranoidclass
class OverlayUniformMixture(Overlay):
"""A uniform mixture distribution.
The output distribution should be umixturecoef*100 percent uniform
distribution and (1-umixturecoef)*100 percent of the distribution
to which this overlay is applied.
A mixture with the uniform distribution can be used to confer
robustness when fitting using likelihood.
Example usage:
| overlay = OverlayUniformMixture(umixturecoef=.01)
"""
name = "Uniform distribution mixture model"
required_parameters = ["umixturecoef"]
@staticmethod
def _test(v):
assert v.umixturecoef in Range(0, 1), "Invalid mixture coef"
@staticmethod
def _generate():
yield OverlayUniformMixture(umixturecoef=0)
yield OverlayUniformMixture(umixturecoef=1)
yield OverlayUniformMixture(umixturecoef=.02)
yield OverlayUniformMixture(umixturecoef=.5)
@accepts(Self, Solution)
@returns(Solution)
def apply(self, solution):
assert self.umixturecoef >= 0 and self.umixturecoef <= 1
corr = solution.corr
err = solution.err
m = solution.model
cond = solution.conditions
undec = solution.undec
evolution = solution.evolution
# To make this work with undecided probability, we need to
# normalize by the sum of the decided density. That way, this
# function will never touch the undecided pieces.
norm = np.sum(corr)+np.sum(err)
corr = corr*(1-self.umixturecoef) + .5*self.umixturecoef/len(m.t_domain())*norm
err = err*(1-self.umixturecoef) + .5*self.umixturecoef/len(m.t_domain())*norm
return Solution(corr, err, m, cond, undec, evolution)
@paranoidclass
class OverlayExponentialMixture(Overlay):
"""An exponential mixture distribution.
The output distribution should be pmixturecoef*100 percent exponential
distribution and (1-umixturecoef)*100 percent of the distribution
to which this overlay is applied.
A mixture with the exponential distribution can be used to confer
robustness when fitting using likelihood.
Note that this is called OverlayPoissonMixture and not
OverlayExponentialMixture because the exponential distribution is
formed from a Poisson process, i.e. modeling a uniform lapse rate.
Example usage:
| overlay = OverlayPoissonMixture(pmixturecoef=.02, rate=1)
"""
name = "Poisson distribution mixture model (lapse rate)"
required_parameters = ["pmixturecoef", "rate"]
@staticmethod
def _test(v):
assert v.pmixturecoef in Range(0, 1), "Invalid mixture coef"
assert v.rate in Positive(), "Invalid rate"
@staticmethod
def _generate():
yield OverlayPoissonMixture(pmixturecoef=0, rate=1)
yield OverlayPoissonMixture(pmixturecoef=.5, rate=.1)
yield OverlayPoissonMixture(pmixturecoef=.02, rate=10)
yield OverlayPoissonMixture(pmixturecoef=1, rate=1)
@accepts(Self, Solution)
@returns(Solution)
def apply(self, solution):
assert self.pmixturecoef >= 0 and self.pmixturecoef <= 1
assert isinstance(solution, Solution)
corr = solution.corr
err = solution.err
m = solution.model
cond = solution.conditions
undec = solution.undec
evolution = solution.evolution
# To make this work with undecided probability, we need to
# normalize by the sum of the decided density. That way, this
# function will never touch the undecided pieces.
norm = np.sum(corr)+np.sum(err)
lapses = lambda t : 2*self.rate*np.exp(-1*self.rate*t)
X = m.dt * np.arange(0, len(corr))
Y = lapses(X)
Y /= np.sum(Y)
corr = corr*(1-self.pmixturecoef) + .5*self.pmixturecoef*Y*norm # Assume numpy ndarrays, not lists
err = err*(1-self.pmixturecoef) + .5*self.pmixturecoef*Y*norm
#print(corr)
#print(err)
return Solution(corr, err, m, cond, undec, evolution)
# Backward compatibility
class OverlayPoissonMixture(OverlayExponentialMixture):
pass
@paranoidclass
class OverlayNonDecision(Overlay):
"""Add a non-decision time
This shifts the reaction time distribution by `nondectime` seconds
in order to create a non-decision time.
Example usage:
| overlay = OverlayNonDecision(nondectime=.2)
"""
name = "Add a non-decision by shifting the histogram"
required_parameters = ["nondectime"]
@staticmethod
def _test(v):
assert v.nondectime in Number(), "Invalid non-decision time"
@staticmethod
def _generate():
yield OverlayNonDecision(nondectime=0)
yield OverlayNonDecision(nondectime=.5)
yield OverlayNonDecision(nondectime=-.5)
@accepts(Self, Solution)
@returns(Solution)
@ensures("set(return.corr.tolist()) - set(solution.corr.tolist()).union({0.0}) == set()")
@ensures("set(return.err.tolist()) - set(solution.err.tolist()).union({0.0}) == set()")
@ensures("solution.prob_undecided() <= return.prob_undecided()")
def apply(self, solution):
corr = solution.corr
err = solution.err
m = solution.model
cond = solution.conditions
undec = solution.undec
evolution = solution.evolution
shifts = int(self.nondectime/m.dt) # truncate
newcorr = np.zeros(corr.shape, dtype=corr.dtype)
newerr = np.zeros(err.shape, dtype=err.dtype)
if shifts > 0:
newcorr[shifts:] = corr[:-shifts]
newerr[shifts:] = err[:-shifts]
elif shifts < 0:
newcorr[:shifts] = corr[-shifts:]
newerr[:shifts] = err[-shifts:]
else:
newcorr = corr
newerr = err
return Solution(newcorr, newerr, m, cond, undec, evolution)
@accepts(Self, NDArray(d=1, t=Number), Unchecked)
@returns(NDArray(d=1, t=Number))
def apply_trajectory(self, trajectory, model, **kwargs):
shift = int(self.nondectime/model.dt)
if shift > 0:
trajectory = np.append([trajectory[0]]*shift, trajectory)
elif shift < 0:
if len(trajectory) > abs(shift):
trajectory = trajectory[abs(shift):]
else:
trajectory = np.asarray([trajectory[-1]])
return trajectory
@paranoidclass
class OverlayNonDecisionUniform(Overlay):
"""Add a uniformly-distributed non-decision time.
The center of the distribution of non-decision times is at
`nondectime`, and it extends `halfwidth` on each side.
Example usage:
| overlay = OverlayNonDecisionUniform(nondectime=.2, halfwidth=.02)
"""
name = "Uniformly-distributed non-decision time"
required_parameters = ["nondectime", "halfwidth"]
@staticmethod
def _test(v):
assert v.nondectime in Number(), "Invalid non-decision time"
assert v.halfwidth in Positive0(), "Invalid halfwidth parameter"
@staticmethod
def _generate():
yield OverlayNonDecisionUniform(nondectime=.3, halfwidth=.01)
yield OverlayNonDecisionUniform(nondectime=0, halfwidth=.1)
@accepts(Self, Solution)
@returns(Solution)
@ensures("np.sum(return.corr) <= np.sum(solution.corr)")
@ensures("np.sum(return.err) <= np.sum(solution.err)")
def apply(self, solution):
# Make sure params are within range
assert self.halfwidth >= 0, "Invalid st parameter"
# Extract components of the solution object for convenience
corr = solution.corr
err = solution.err
m = solution.model
cond = solution.conditions
undec = solution.undec
evolution = solution.evolution
# Describe the width and shift of the uniform distribution in
# terms of list indices
shift = int(self.nondectime/m.dt) # Discretized non-decision time
width = int(self.halfwidth/m.dt) # Discretized uniform distribution half-width
offsets = list(range(shift-width, shift+width+1))
# Create new correct and error distributions and iteratively
# add shifts of each distribution to them. Use this over the
# np.convolution because it handles negative non-decision
# times.
newcorr = np.zeros(corr.shape, dtype=corr.dtype)
newerr = np.zeros(err.shape, dtype=err.dtype)
for offset in offsets:
if offset > 0:
newcorr[offset:] += corr[:-offset]/len(offsets)
newerr[offset:] += err[:-offset]/len(offsets)
elif offset < 0:
newcorr[:offset] += corr[-offset:]/len(offsets)
newerr[:offset] += err[-offset:]/len(offsets)
else:
newcorr += corr/len(offsets)
newerr += err/len(offsets)
return Solution(newcorr, newerr, m, cond, undec, evolution)
@accepts(Self, NDArray(d=1, t=Number), Unchecked)
@returns(NDArray(d=1, t=Number))
def apply_trajectory(self, trajectory, model, **kwargs):
ndtime = np.random.rand()*2*self.halfwidth + (self.nondectime-self.halfwidth)
shift = int(ndtime/model.dt)
if shift > 0:
np.append([trajectory[0]]*shift, trajectory)
elif shift < 0:
if len(trajectory) > abs(shift):
trajectory = trajectory[abs(shift):]
else:
trajectory = np.asarray([trajectory[-1]])
return trajectory
@paranoidclass
class OverlayNonDecisionGamma(Overlay):
"""Add a gamma-distributed non-decision time
This shifts the reaction time distribution by an amount of time
specified by the gamma distribution with shape parameter `shape`
(sometimes called "k") and scale parameter `scale` (sometimes
called "theta"). The distribution is then further shifted by
`nondectime` seconds.
Example usage:
| overlay = OverlayNonDecisionGamma(nondectime=.2, shape=1.5, scale=.05)
"""
name = "Add a gamma-distributed non-decision time"
required_parameters = ["nondectime", "shape", "scale"]
@staticmethod
def _test(v):
assert v.nondectime in Number(), "Invalid non-decision time"
assert v.shape in Positive0(), "Invalid shape parameter"
assert v.shape >= 1, "Shape parameter must be >= 1"
assert v.scale in Positive(), "Invalid scale parameter"
@staticmethod
def _generate():
yield OverlayNonDecisionGamma(nondectime=.3, shape=2, scale=.01)
yield OverlayNonDecisionGamma(nondectime=0, shape=1.1, scale=.1)
@accepts(Self, Solution)
@returns(Solution)
@ensures("np.sum(return.corr) <= np.sum(solution.corr)")
@ensures("np.sum(return.err) <= np.sum(solution.err)")
@ensures("np.all(return.corr[0:int(self.nondectime//return.model.dt)] == 0)")
def apply(self, solution):
# Make sure params are within range
assert self.shape >= 1, "Invalid shape parameter"
assert self.scale > 0, "Invalid scale parameter"
# Extract components of the solution object for convenience
corr = solution.corr
err = solution.err
dt = solution.model.dt
# Create the weights for different timepoints
times = np.asarray(list(range(-len(corr), len(corr))))*dt # https://numpy.org/doc/stable/reference/generated/numpy.asarray.html
weights = scipy.stats.gamma(a=self.shape, scale=self.scale, loc=self.nondectime).pdf(times) # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gamma.html
if np.sum(weights) > 0:
weights /= np.sum(weights) # Ensure it integrates to 1
# Divide by 1+1e-14 to avoid numerical errors after the convolution, which are on the order of 10^-16
newcorr = np.convolve(corr, weights, mode="full")[len(corr):(2*len(corr))]/(1+1e-14) # https://numpy.org/doc/stable/reference/generated/numpy.convolve.html
newerr = np.convolve(err, weights, mode="full")[len(corr):(2*len(corr))]/(1+1e-14)
return Solution(newcorr, newerr, solution.model,
solution.conditions, solution.undec, solution.evolution)
@accepts(Self, NDArray(d=1, t=Number), Unchecked)
@returns(NDArray(d=1, t=Number))
def apply_trajectory(self, trajectory, model, **kwargs):
ndtime = scipy.stats.gamma(a=self.shape, scale=self.scale, loc=self.nondectime).rvs()
shift = int(ndtime/model.dt)
if shift > 0:
np.append([trajectory[0]]*shift, trajectory)
elif shift < 0:
if len(trajectory) > abs(shift):
trajectory = trajectory[abs(shift):]
else:
trajectory = np.asarray([trajectory[-1]])
return trajectory
@paranoidclass
class OverlaySimplePause(Overlay):
name = "Brief pause in integration by shifting part of the histogram"
required_parameters = ['pausestart', 'pausestop']
@staticmethod
def _test(v):
assert v.pausestart in Positive0(), "Invalid start time"
assert v.pausestop in Positive0(), "Invalid non-decision time"
assert v.pausestart <= v.pausestop, "Pause start time must be before stop time"
@staticmethod
def _generate():
yield OverlaySimplePause(pausestart=0, pausestop=0)
yield OverlaySimplePause(pausestart=.1, pausestop=.2)
@accepts(Self, Solution)
@returns(Solution)
@ensures("set(return.corr.tolist()) - set(solution.corr.tolist()).union({0.0}) == set()")
@ensures("set(return.err.tolist()) - set(solution.err.tolist()).union({0.0}) == set()")
@ensures("solution.prob_undecided() <= return.prob_undecided()")
@ensures('self.pausestart == self.pausestop --> solution == return')
def apply(self, solution):
corr = solution.corr
err = solution.err
m = solution.model
cond = solution.conditions
undec = solution.undec
evolution = solution.evolution
start = int(self.pausestart/m.dt) # truncate
stop = int((self.pausestop)/m.dt) # truncate
if stop <= start:
return solution
newcorr = np.zeros(corr.shape, dtype=corr.dtype)
newerr = np.zeros(err.shape, dtype=err.dtype)
newcorr[0:start] = corr[0:start]
newerr[0:start] = err[0:start]
newcorr[stop:] = corr[start:-(stop-start)]
newerr[stop:] = err[start:-(stop-start)]
return Solution(newcorr, newerr, m, cond, undec, evolution)
@paranoidclass
class OverlayBlurredPause(Overlay):
name = "Brief pause in integration, pause length by gamma distribution"
required_parameters = ['pausestart', 'pausestop', 'pauseblurwidth']
@staticmethod
def _test(v):
assert v.pausestart in Positive0(), "Invalid start time"
assert v.pausestop in Positive0(), "Invalid stop time"
assert v.pauseblurwidth in Positive(), "Invalid width"
assert v.pausestart <= v.pausestop, "Pause start time must be before stop time"
assert v.pausestart + v.pauseblurwidth/2 <= v.pausestop, "Blur must be shorter than pause"
@staticmethod
def _generate():
yield OverlayBlurredPause(pausestart=0, pausestop=.1, pauseblurwidth=.1)
yield OverlayBlurredPause(pausestart=.1, pausestop=.2, pauseblurwidth=.01)
@accepts(Self, Solution)
@returns(Solution)
@ensures("solution.prob_undecided() <= return.prob_undecided()")
@ensures('self.pausestart == self.pausestop --> solution == return')
def apply(self, solution):
corr = solution.corr
err = solution.err
m = solution.model
cond = solution.conditions
undec = solution.undec
evolution = solution.evolution
# Make gamma distribution
gamma_mean = self.pausestop - self.pausestart
gamma_var = pow(self.pauseblurwidth, 2)
shape = gamma_mean**2/gamma_var
scale = gamma_var/gamma_mean
gamma_pdf = lambda t : 1/(sp_gamma(shape)*(scale**shape)) * t**(shape-1) * np.exp(-t/scale)
gamma_vals = np.asarray([gamma_pdf(t) for t in m.t_domain() - self.pausestart if t >= 0])
sumgamma = np.sum(gamma_vals)
gamma_start = next(i for i,t in enumerate(m.t_domain() - self.pausestart) if t >= 0)
# Generate first part of pdf (before the pause)
newcorr = np.zeros(m.t_domain().shape, dtype=corr.dtype)
newerr = np.zeros(m.t_domain().shape, dtype=err.dtype)
# Generate pdf after the pause
for i,t in enumerate(m.t_domain()):
#print(np.sum(newcorr)+np.sum(newerr))
if 0 <= t < self.pausestart:
newcorr[i] = corr[i]
newerr[i] = err[i]
elif self.pausestart <= t:
newcorr[i:] += corr[gamma_start:len(corr)-(i-gamma_start)]*gamma_vals[int(i-gamma_start)]/sumgamma
newerr[i:] += err[gamma_start:len(corr)-(i-gamma_start)]*gamma_vals[int(i-gamma_start)]/sumgamma
else:
raise ValueError("Invalid domain")
return Solution(newcorr, newerr, m, cond, undec, evolution)
|
import torch
import torch_stonne
a=torch.randn(6,5)
b=torch.randn(5,5)
r=torch_stonne.simulated_matmul("", a, b, "../../simulation_files/maeri_128mses_128_bw.cfg", "tile", 0)
print(r)
print(torch.matmul(a,b))
|
import FWCore.ParameterSet.Config as cms
import RecoJets.JetProducers.CaloTowerSchemeB_cfi
towerMakerWithHO = RecoJets.JetProducers.CaloTowerSchemeB_cfi.towerMaker.clone(
UseHO = True
)
|
# ==============================================================
# Author: Rodolfo Ferro
# Twitter: @FerroRodolfo
#
# ABOUT COPYING OR USING PARTIAL INFORMATION:
# This script has been originally created by Rodolfo Ferro.
# Any explicit usage of this script or its contents is granted
# according to the license provided and its conditions.
# ==============================================================
# -*- coding: utf-8 -*-
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import skimage.transform as transf
import skimage.io as io
import numpy as np
import os
sky = [128, 128, 128]
building = [128, 0, 0]
pole = [192, 192, 128]
road = [128, 64, 128]
pavement = [ 60, 40, 222]
tree = [128, 128, 0]
sign_symbol = [192, 128, 128]
fence = [ 64, 64, 128]
car = [ 64, 0, 128]
pedestrian = [ 64, 64, 0]
bicyclist = [ 0, 128, 192]
unlabelled = [ 0, 0, 0]
COLOR_DICT = np.array([
sky,
building,
pole,
road,
pavement,
tree,
sign_symbol,
fence,
car,
pedestrian,
bicyclist,
unlabelled
])
def adjust_data(img, mask, flag_multi_class, num_class):
if flag_multi_class:
img = img / 255.
mask = mask[:, :, :, 0] if len(mask.shape) == 4 else mask[:, :, 0]
new_mask = np.zeros(mask.shape + (num_class,))
for i in range(num_class):
new_mask[mask == i, i] = 1
new_mask = np.reshape(
new_mask,
(
new_mask.shape[0],
new_mask.shape[1] * new_mask.shape[2],
new_mask.shape[3]
)
) if flag_multi_class else np.reshape(
new_mask,
(new_mask.shape[0] * new_mask.shape[1], new_mask.shape[2])
)
mask = new_mask
elif np.max(img) > 1:
img = img / 255.
mask = mask /255.
mask[mask > 0.5] = 1
mask[mask <= 0.5] = 0
return img, mask
def train_generator(batch_size, train_path, image_folder, mask_folder,
aug_dict, image_color_mode='grayscale', mask_color_mode='grayscale',
image_save_prefix='image', mask_save_prefix='mask',
flag_multi_class=False, num_class=2, save_to_dir=None,
target_size=(256, 256), seed=1):
image_datagen = ImageDataGenerator(**aug_dict)
mask_datagen = ImageDataGenerator(**aug_dict)
image_generator = image_datagen.flow_from_directory(
train_path,
classes=[image_folder],
class_mode=None,
color_mode=image_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=image_save_prefix,
seed=seed
)
mask_generator = mask_datagen.flow_from_directory(
train_path,
classes=[mask_folder],
class_mode=None,
color_mode=mask_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=mask_save_prefix,
seed=seed
)
train_generator = zip(image_generator, mask_generator)
for img, mask in train_generator:
img, mask = adjust_data(img, mask, flag_multi_class, num_class)
yield img, mask
def test_generator(test_path, num_image=30, target_size=(256, 256),
flag_multi_class=False, as_gray=True):
for i in range(num_image):
img_path = os.path.join(test_path, f'{i}.png')
img = io.imread(img_path, as_gray=as_gray)
img = img / 255.
img = transf.resize(img, target_size)
img = np.reshape(img, img.shape + (1,)) \
if not flag_multi_class else img
img = np.reshape(img, (1,) + img.shape)
yield img
def visualize_label(num_class, color_dict, img):
img = img[:, :, 0] if len(img.shape) == 3 else img
img_out = np.zeros(img.shape + (3,)).astype(np.uint8)
for i in range(num_class):
img_out[img == i, :] = color_dict[i]
return img_out / 255.
def save_results(save_path, npyfile, flag_multi_class=False, num_class=2):
if not os.path.exists(save_path):
os.mkdir(save_path)
for i, img in enumerate(npyfile):
# img = visualize_label(num_class, COLOR_DICT, item) \
# if flag_multi_class else item[:, :, 0]
img *= 255.
img = img.astype(np.uint8)
img_path = os.path.join(save_path, f'{i}.png')
io.imsave(img_path, img)
|
import paramiko
import sys
import datetime
import threading
import logging
"""
Edit this line and add your command
"""
#cmd2run = "for f in $(ioscli lsdev -type adapter | grep fcs | grep 8Gb | awk {'print $1'}); do wwpn=$(ioscli lsdev -dev $f -vpd | grep Network | sed s'/\.//g;s/Network Address//g;s/ //g');echo $f,$wwpn; done"
cmd2run = "echo \"lslpp -l | grep -i bes\" | oem_setup_env"
sys.tracebacklimit = 0
if len(sys.argv) < 1:
logging.error("Not enough arguments")
sys.exit(1)
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy)
current_date = datetime.date.today()
results = []
def run_dsh(ip):
try:
if "vsa" in ip:
ssh.connect(hostname=ip, port=22, username='padmin', timeout=5)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd2run)
output = ssh_stdout.readlines()
for line in output:
if len(line) > 0:
results.append([ip, line])
elif "hmc" in ip:
ssh.connect(hostname=ip, port=22, username='hscroot', password="start1234", timeout=5)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd2run)
output = ssh_stdout.readlines()
for line in output:
if len(line) > 0:
results.append([ip, line])
else:
ssh.connect(hostname=ip, port=22, username='ibmadmin', timeout=5)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd2run)
output = ssh_stdout.readlines()
for line in output:
if len(line) > 0:
results.append([ip, line])
except:
print("[+] Unable to get info from " + str(ip)) + str(Exception)
finally:
pass
threads = []
for x in sys.argv[1:]:
if x:
t = threading.Thread(target=run_dsh, args=(x,))
threads.append(t)
for i in threads:
i.start()
i.join()
print("\n------------------------------------------------------\n")
for line in results:
if line:
print(str(line[0]).rstrip('\n') + ": " + str(line[1]).rstrip('\n'))
print("\n------------------------------------------------------\n")
|
from nltk.corpus import wordnet
from nltk.tag import pos_tag
from pattern.en import pluralize
with open('nouns.txt', 'w') as ofile:
words = set(lemma.name().replace('_', ' ')
for synset in wordnet.all_synsets(wordnet.NOUN)
for lemma in synset.lemmas())
for word, tag in pos_tag(sorted(words)):
if tag == 'NNP' and word.istitle():
ofile.write(word)
else:
plural = pluralize(word)
if plural.endswith('ss'):
ofile.write(word)
else:
ofile.write(plural)
ofile.write('\n')
|
aliases['zc'] = 'rm ~/.z'
|
"""
Signal processing utilities
Author: Nicolas EKICIER
Release: V1.52 03/2021
"""
import numpy as np
def smooth_compute(input, dim, njob=-1, cst=100, ord=3):
"""
Compute whittaker smoothing in jobs
:param input: array of vi signal
:param dim: dimension of vector signal (0 = row / 1 = column)
:param njob: number of jobs (refer to Joblib doc, default = -1)
:param cst: penalization (Whittaker parameter)
:param ord: derivation order (Whittaker parameter)
:return:
"""
from joblib import Parallel, delayed
from tqdm import tqdm
def lambdf(x):
return whittf(fillnan_and_resample(x), beta=cst, order=ord)
if dim == 0:
tw = Parallel(n_jobs=njob)(delayed(lambdf)(input[i, :]) for i in tqdm(range(0, input.shape[0]), desc='Whittaker Smoothing'))
elif dim == 1:
tw = Parallel(n_jobs=njob)(delayed(lambdf)(input[:, i]) for i in tqdm(range(0, input.shape[1]), desc='Whittaker Smoothing'))
return np.array(tw)
def whittf(y, weight=None, beta=100, order=3):
"""
Weighted Whittaker smoothing
:param y: vector data (signal)
:param weight: weights of each sample (one by default)
:param beta: penalization parameter (> 0)
:param order: derivation order
:return: smooth signal
"""
m = len(y)
speye = np.eye(m)
p = np.diff(speye, n=order)
if weight is None:
diag = speye
else:
diag = np.diag(weight)
pp = np.transpose(p)
yf = np.linalg.solve(diag + beta*np.dot(p, pp), np.dot(diag, y))
return yf
def fillnan_and_resample(y, x=None, method='linear'):
"""
Interpolation of vector data with nan values
Extrema are extrapolated
:param y: vector data
:param x: vector of x positions to resampling (Default = None)
:param method: interpolation method
- 'linear' default
- 'nearest'
- 'zero', 'slinear', 'quadratic', 'cubic' = spline interpolation of zeroth, first, second or third order
:return: interpolated signal
"""
from scipy.interpolate import interp1d
y = np.ravel(y)
if x is None:
x = np.arange(0, len(y))
igood = np.where(np.isfinite(y))
func = interp1d(np.ravel(igood),
np.ravel(y[igood]), # fndvi.iloc[np.ravel(igood), fndvi.columns.get_loc(f)]
fill_value='extrapolate',
kind=method)
return func(x)
def regress(x, y, deg=1):
"""
Compute regression (linear or polynomial) from 2 datasets
:param x: x values (vector)
:param y: y values (vector)
:param deg: degree of regression (default = 1 = linear)
:return: sorted values from x [x, y, predict] (out)
regression coefficients (coeffs)
polynomial class (clpoly)
metrics (r2, mae, rmse)
"""
# Regression
coeffs = np.polyfit(x, y, deg)
predict = np.polyval(coeffs, x)
clpoly = np.poly1d(coeffs)
# Metrics
diff = y - predict
mae = np.mean(abs(diff))
rmse = np.sqrt(np.mean(diff**2))
r2 = 1-(sum(diff**2)/sum((y-np.mean(y))**2))
# Sort
ind = np.argsort(x)
out = np.vstack((x[ind], y[ind], predict[ind])).transpose()
return out, coeffs, clpoly, (r2, mae, rmse)
def outliers(input):
"""
Extract index and values of outliers in input
Works with NaN values
:param input: vector data (signal)
:return: index, (whisker inf, sup)
"""
Quart = np.nanpercentile(input, [25, 75]) # 1er et 3ème quartile
IQuart = Quart[1] - Quart[0] # interquartile
wsup = Quart[1] + 1.5 * IQuart # whisker sup
winf = Quart[0] - 1.5 * IQuart # whisker inf
idx = np.flatnonzero((input < winf) | (input > wsup))
return idx, (winf, wsup)
def phen_met(y, plot=False):
"""
Extract phenological metrics from one crop season
From "TIMESAT — a program for analyzing time-series of satellite sensor data", Jönsson al., 2004
:param y: ndvi profil
:param plot: plot result (default = False)
:return: pandas Dataframe with metrics
"""
import pandas as pd
import matplotlib.pyplot as plt
from scipy.signal import find_peaks
# Thresholds
pct_sg = 0.2
pct_eg = 0.8
pct_sd = 0.8
pct_ed = 0.2
# Maximum
tmp = find_peaks(y)
iMaxVal = np.argmax(y[tmp[0]])
iMaxVal = tmp[0][iMaxVal]
MaxVal = y[iMaxVal]
# Min before max
iLeftMinVal = np.argmin(y[0:iMaxVal])
LeftMinVal = y[iLeftMinVal]
# Min after max
iRightMinVal = np.argmin(y[iMaxVal:]) + iMaxVal
RightMinVal = y[iRightMinVal]
# Base
BaseVal = np.mean((LeftMinVal, RightMinVal))
# Amplitude
SeasonAmp = MaxVal - BaseVal
# Start of growth
sgb = LeftMinVal + pct_sg * SeasonAmp
iStartGrowth = np.flatnonzero(y[iLeftMinVal:iMaxVal] >= sgb)
iStartGrowth = iStartGrowth[0] + iLeftMinVal
StartGrowth = y[iStartGrowth]
# End of growth
egb = LeftMinVal + pct_eg * (MaxVal - LeftMinVal)
iEndGrowth = np.flatnonzero(y[iStartGrowth:iMaxVal] >= egb)
iEndGrowth = iEndGrowth[0] + iStartGrowth
EndGrowth = y[iEndGrowth]
# Start of decrease
sdb = RightMinVal + pct_sd * (MaxVal - RightMinVal)
iStartDecrease = np.flatnonzero(y[iMaxVal:iRightMinVal] >= sdb)
iStartDecrease = iStartDecrease[-1] + iMaxVal
StartDecrease = y[iStartDecrease]
# End of decrease
edb = RightMinVal + pct_ed * SeasonAmp
if edb > StartDecrease:
iEndDecrease = np.nan
EndDecrease = np.nan
else:
iEndDecrease = np.flatnonzero(y[iStartDecrease:iRightMinVal] >= edb)
iEndDecrease = iEndDecrease[-1] + iStartDecrease
EndDecrease = y[iEndDecrease]
# Length of season
SeasonL = iEndDecrease - iStartGrowth
# Output
out = pd.DataFrame({'istartgrowth': iStartGrowth,
'startgrowth': StartGrowth,
'iendgrowth': iEndGrowth,
'endgrowth': EndGrowth,
'imax': iMaxVal,
'max': MaxVal,
'istartdecrease': iStartDecrease,
'startdecrease': StartDecrease,
'ienddecrease': iEndDecrease,
'enddecrease': EndDecrease,
'slength': SeasonL},
index=[0])
if plot == True:
plt.figure()
plt.plot(y, 'k-', label='ndvi')
plt.scatter(out['istartgrowth'], out['startgrowth'], label='StartOfGrowth')
plt.scatter(out['iendgrowth'], out['endgrowth'], label='EndOfGrowth')
plt.scatter(out['imax'], out['max'], label='Maximum')
plt.scatter(out['istartdecrease'], out['startdecrease'], label='StartOfDecrease')
plt.scatter(out['ienddecrease'], out['enddecrease'], label='EndOfDecrease')
plt.title('Phenological Metrics')
plt.ylabel('Vegetation Index')
plt.legend()
plt.grid()
return out
|
from __init__ import QMainWindow
from .view import View
from .model import Model
class Controller:
def __init__(self, parent_window: QMainWindow, round: int):
super().__init__()
self.parent_window = parent_window
self.round = round
self.view = View(self.parent_window, self.round)
self.module = Model()
|
# -*- coding:utf-8 -*-
import csv
# 先按照年份来分类
year_dict = {}
with open('./entry.csv', 'r') as entry:
reader = csv.reader(entry)
for row in reader:
if reader.line_num == 1 or reader.line_num == 2:
continue
if reader.line_num == 3:
print(row)
# if reader.line_num < 100:
year = row[1]
if year not in year_dict:
year_dict[year] = [row]
else:
year_dict.get(year).append(row)
result_list = []
# 按照城市来分类
print(len(year_dict.keys()))
for year in sorted(year_dict.keys()):
this_year_list = year_dict.get(year)
this_year_city_key_dict = {}
for item in this_year_list:
city = item[0]
label = int(item[-1])
if city not in this_year_city_key_dict:
default_this_year_city = {'name': city, 'year': year, 'A': 0, 'A1': 0, 'A2': 0, 'A3': 0,
'A4': 0,'B': 0,'B1': 0, 'B2': 0,'B3': 0, 'B4': 0, 'C': 0, 'C1': 0, 'C2': 0, 'C3': 0, 'C4': 0}
if label <= 5:
A1 = float(item[2])
A2 = float(item[3])
A3 = float(item[4])
A4 = float(item[5])
default_this_year_city['A']+= 1
default_this_year_city['A1'] += A1
default_this_year_city['A2'] += A2
default_this_year_city['A3'] += A3
default_this_year_city['A4'] += A4
elif label >= 6 and label <= 50:
B1 = float(item[2])
B2 = float(item[3])
B3 = float(item[4])
B4 = float(item[5])
default_this_year_city['B']+= 1
default_this_year_city['B1'] += B1
default_this_year_city['B2'] += B2
default_this_year_city['B3'] += B3
default_this_year_city['B4'] += B4
else:
C1 = float(item[2])
C2 = float(item[3])
C3 = float(item[4])
C4 = float(item[5])
default_this_year_city['C']+= 1
default_this_year_city['C1'] += C1
default_this_year_city['C2'] += C2
default_this_year_city['C3'] += C3
default_this_year_city['C4'] += C4
this_year_city_key_dict[city] = default_this_year_city
else:
if label <= 5:
A1 = float(item[2])
A2 = float(item[3])
A3 = float(item[4])
A4 = float(item[5])
this_year_city_key_dict[city]['A']+= 1
this_year_city_key_dict[city]['A1'] += A1
this_year_city_key_dict[city]['A2'] += A2
this_year_city_key_dict[city]['A3'] += A3
this_year_city_key_dict[city]['A4'] += A4
elif label >= 6 and label <= 50:
B1 = float(item[2])
B2 = float(item[3])
B3 = float(item[4])
B4 = float(item[5])
this_year_city_key_dict[city]['B']+= 1
this_year_city_key_dict[city]['B1'] += B1
this_year_city_key_dict[city]['B2'] += B2
this_year_city_key_dict[city]['B3'] += B3
this_year_city_key_dict[city]['B4'] += B4
else:
C1 = float(item[2])
C2 = float(item[3])
C3 = float(item[4])
C4 = float(item[5])
this_year_city_key_dict[city]['C'] += 1
this_year_city_key_dict[city]['C1'] += C1
this_year_city_key_dict[city]['C2'] += C2
this_year_city_key_dict[city]['C3'] += C3
this_year_city_key_dict[city]['C4'] += C4
for city in this_year_city_key_dict.items():
result_list.append(city[1])
with open('./result.csv', 'w') as result:
fieldnames = ['name', 'year', 'A', 'A1', 'A2', 'A3', 'A4', 'B', 'B1', 'B2', 'B3', 'B4', 'C', 'C1', 'C2', 'C3', 'C4']
writer = csv.DictWriter(result, fieldnames=fieldnames)
writer.writeheader()
for city in result_list:
writer.writerow(city)
|
import os
import json
import fhirclient.models.diagnosticreport as dr
import fhirclient.models.codeableconcept as concept
import fhirclient.models.meta as meta
import fhirclient.models.resource as resource
import fhirclient.models.observation as observation
import fhirclient.models.fhirreference as reference
import fhirclient.models.fhirdate as date
import fhirclient.models.range as valRange
import fhirclient.models.medicationstatement as medication
import numpy as np
from collections import OrderedDict
from uuid import uuid4
from .common import _Utilities
class _Fhir_Helper:
def __init__(self, patientID):
self.report = dr.DiagnosticReport()
self.phased_rec_map = {}
self.result_ids = []
self.fhir_report = {}
self.obs_contained = []
self.patientID = patientID
def _get_region_studied_component(self, reportable_query_regions, nocall_regions):
observation_rs_components = []
for _, row in reportable_query_regions.df.iterrows():
obv_comp = observation.ObservationComponent()
obv_comp.code = concept.CodeableConcept({"coding": [{ "system": "http://loinc.org","code": "51959-5","display": "Ranges-examined component"}]})
obv_comp.valueRange = valRange.Range({"low": {"value": np.float(row['Start']) + 1},"high": {"value": np.float(row['End']) + 1}})
observation_rs_components.append(obv_comp)
for _, row in nocall_regions.df.iterrows():
obv_comp = observation.ObservationComponent()
obv_comp.code = concept.CodeableConcept({"coding": [{ "system": "http://loinc.org","code": "TBD-UncallableRegions","display": "Uncallable region"}]})
obv_comp.valueRange = valRange.Range({"low": {"value": np.float(row['Start']) + 1},"high": {"value": np.float(row['End']) + 1}})
observation_rs_components.append(obv_comp)
return observation_rs_components
def _addPhaseRecords(self, record):
if(record.samples[0].phased == False):
return
sample_data = record.samples[0].data
if(sample_data.GT != None and len(sample_data.GT.split('|')) >= 2 and sample_data.PS != None):
self.phased_rec_map.setdefault(sample_data.PS, []).append(record)
def initalizeReport(self):
patient_reference = reference.FHIRReference({"reference":"Patient/"+self.patientID})
self.report.id = "dr-"+uuid4().hex[:13]
self.report.meta = meta.Meta({"profile":["http://hl7.org/fhir/uv/genomics-reporting/StructureDefinition/genomics-report"]})
self.report.status = "final"
self.report.code = concept.CodeableConcept({"coding":[{"system":"http://loinc.org","code":"81247-9","display":"Master HL7 genetic variant reporting panel"}]})
self.report.subject = patient_reference
self.report.issued = date.FHIRDate(_Utilities.getFhirDate())
self.report.contained = []
def add_regionstudied_obv(self, ref_seq, reportable_query_regions, nocall_regions):
if reportable_query_regions.empty and nocall_regions.empty:
return
patient_reference = reference.FHIRReference({"reference":"Patient/"+self.patientID})
contained_uid = "rs-"+ uuid4().hex[:13]
self.result_ids.append(contained_uid)
# Region Studied Obeservation
observation_rs = observation.Observation()
contained_rs = observation_rs
contained_rs.id = contained_uid
observation_rs.resource_type = "Observation"
contained_rs.meta = meta.Meta({"profile":["http://hl7.org/fhir/uv/genomics-reporting/StructureDefinition/region-studied"]})
observation_rs.code = concept.CodeableConcept({"coding":[{"system":"http://loinc.org","code":"53041-0","display":"DNA region of interest panel"}]})
observation_rs.status = "final"
observation_rs.category = [concept.CodeableConcept({"coding":[{"system": "http://terminology.hl7.org/CodeSystem/observation-category","code": "laboratory"}]})]
observation_rs.subject = patient_reference
observation_rs_component2 = observation.ObservationComponent()
observation_rs_component2.code = concept.CodeableConcept({"coding": [{"system": "http://loinc.org","code": "92822-6","display": "Genomic coord system"}]})
observation_rs_component2.valueCodeableConcept = concept.CodeableConcept({"coding":[{"system":"http://loinc.org","code":"LA30102-0","display": "1-based character counting"}]})
observation_rs_component3 = observation.ObservationComponent()
observation_rs_component3.code = concept.CodeableConcept({"coding": [{"system": "http://loinc.org","code": "48013-7","display": "Genomic reference sequence ID"}]})
observation_rs_component3.valueCodeableConcept = concept.CodeableConcept({"coding":[{"system":"http://www.ncbi.nlm.nih.gov/nuccore","code":ref_seq}]})
observation_rs_components = self._get_region_studied_component(reportable_query_regions, nocall_regions)
observation_rs.component = [observation_rs_component2,observation_rs_component3] + observation_rs_components
# Observation structure : described-variants
self.report.contained.append(contained_rs)
def add_variant_obv(self, record, ref_seq):
# collect all the record with similar position values,
# to utilized later in phased sequence relationship
self._addPhaseRecords(record)
patient_reference = reference.FHIRReference({"reference":"Patient/"+self.patientID})
alleles = _Utilities.getAllelicState(record)
dvuid = "dv-"+ uuid4().hex[:13]
self.fhir_report.update({ str(record.POS) : dvuid})
self.result_ids.append(dvuid)
observation_dv = observation.Observation()
observation_dv.resource_type = "Observation"
observation_dv.id = dvuid
observation_dv.meta = meta.Meta({"profile":["http://hl7.org/fhir/uv/genomics-reporting/StructureDefinition/variant"]})
observation_dv.status = "final"
observation_dv.category = [concept.CodeableConcept({"coding":[{"system": "http://terminology.hl7.org/CodeSystem/observation-category","code": "laboratory"}]})]
observation_dv.code = concept.CodeableConcept({"coding":[{"system":"http://loinc.org","code":"69548-6","display":"Genetic variant assessment"}]})
observation_dv.subject = patient_reference
observation_dv.valueCodeableConcept = concept.CodeableConcept({"coding":[{"system":"http://loinc.org","code":"LA9633-4","display":"present"}]})
observation_dv.component = []
observation_dv_component1 = observation.ObservationComponent()
observation_dv_component1.code = concept.CodeableConcept({"coding": [{"system": "http://loinc.org","code": "62374-4","display": "Human reference sequence assembly version"}]})
observation_dv_component1.valueCodeableConcept = concept.CodeableConcept({"coding": [{"system": "http://loinc.org","code": "LA14029-5","display": "GRCh37"}]})
observation_dv.component.append(observation_dv_component1)
observation_dv_component2 = observation.ObservationComponent()
observation_dv_component2.code = concept.CodeableConcept({"coding": [{"system": "http://loinc.org","code": "48013-7","display": "Genomic reference sequence ID"}]})
observation_dv_component2.valueCodeableConcept = concept.CodeableConcept({"coding": [{"system": "http://www.ncbi.nlm.nih.gov/nuccore","code": ref_seq}]})
observation_dv.component.append(observation_dv_component2)
if alleles['CODE'] != "" or alleles['ALLELE'] != "":
observation_dv_component3 = observation.ObservationComponent()
observation_dv_component3.code = concept.CodeableConcept({"coding": [{"system": "http://loinc.org","code": "53034-5","display": "Allelic state"}]})
observation_dv_component3.valueCodeableConcept = concept.CodeableConcept({"coding": [{"system": "http://loinc.org","code": alleles['CODE'],"display": alleles['ALLELE']}]})
observation_dv.component.append(observation_dv_component3)
observation_dv_component4 = observation.ObservationComponent()
observation_dv_component4.code = concept.CodeableConcept({"coding": [{"system": "http://loinc.org","code": "69547-8","display": "Genomic Ref allele [ID]"}]})
observation_dv_component4.valueString = record.REF
observation_dv.component.append(observation_dv_component4)
observation_dv_component5 = observation.ObservationComponent()
observation_dv_component5.code = concept.CodeableConcept({"coding": [{"system": "http://loinc.org","code": "69551-0","display": "Genomic Alt allele [ID]"}]})
observation_dv_component5.valueString = record.ALT[0].sequence
observation_dv.component.append(observation_dv_component5)
observation_dv_component6 = observation.ObservationComponent()
observation_dv_component6.code = concept.CodeableConcept({"coding": [{"system": "http://loinc.org","code": "92822-6","display": "Genomic coord system"}]})
observation_dv_component6.valueCodeableConcept = concept.CodeableConcept({"coding":[{"system":"http://loinc.org","code":"LA30102-0","display":"1-based character counting"}]})
observation_dv.component.append(observation_dv_component6)
observation_dv_component7 = observation.ObservationComponent()
observation_dv_component7.code = concept.CodeableConcept({"coding": [{"system": "http://hl7.org/fhir/uv/genomics-reporting/CodeSystem/tbd-codes","code": "exact-start-end","display": "Variant exact start and end"}]})
observation_dv_component7.valueRange = valRange.Range({"low": {"value": int(record.POS)}})
observation_dv.component.append(observation_dv_component7)
self.report.contained.append(observation_dv)
def add_phased_relationship_obv(self):
patient_reference = reference.FHIRReference({"reference":"Patient/"+self.patientID})
self.sequenceRels = _Utilities.getSequenceRelation(self.phased_rec_map)
for index in self.sequenceRels.index:
siduid = "sid-" + uuid4().hex[:13]
self.result_ids.append(siduid)
observation_sid = observation.Observation()
observation_sid.resource_type = "Observation"
observation_sid.id = siduid
observation_sid.meta = meta.Meta({"profile":["http://hl7.org/fhir/uv/genomics-reporting/StructureDefinition/sequence-phase-relationship"]})
observation_sid.status = "final"
observation_sid.category = [concept.CodeableConcept({"coding":[{"system": "http://terminology.hl7.org/CodeSystem/observation-category","code": "laboratory"}]})]
observation_sid.code = concept.CodeableConcept({"coding":[{"system":"http://loinc.org","code":"82120-7","display":"Allelic phase"}]})
observation_sid.subject = patient_reference
observation_sid.valueCodeableConcept = concept.CodeableConcept({"coding":[{"system":"http://hl7.org/fhir/uv/genomics-reporting/CodeSystem/seq-phase-relationship","code":self.sequenceRels.at[index,'Relation'],"display":self.sequenceRels.at[index,'Relation']}]})
self.report.contained.append(observation_sid)
def add_report_result(self):
reportResult = []
for uid in self.result_ids:
reportResult.append(reference.FHIRReference({"reference": f"#{uid}"}))
self.report.result = reportResult
def generate_final_json(self):
response = self.report.as_json()
od = OrderedDict()
od["resourceType"] = response['resourceType']
od["id"] = response['id']
od["meta"] = response['meta']
if 'contained' in response:
od["contained"] = response['contained']
else:
od["contained"] = []
od["status"] = response['status']
od["code"] = response['code']
od["subject"] = response['subject']
od["issued"] = response['issued']
if 'result' in response:
od["result"] = response['result']
else:
od["result"] = []
odCodeCoding = OrderedDict()
odCodeCoding["system"] = od["code"]["coding"][0]["system"]
odCodeCoding["code"] = od["code"]["coding"][0]["code"]
odCodeCoding["display"] = od["code"]["coding"][0]["display"]
od["code"]["coding"][0] = odCodeCoding
sidIndex = 0
for index,fhirReport in enumerate(od['contained']):
if (fhirReport['id'].startswith('sid-')):
sidIndex = index
break
for index,(_, fhirReport) in enumerate(zip(self.sequenceRels.index, od['contained'][sidIndex:])):
dvRef1 = self.fhir_report.get(str(self.sequenceRels.at[index,'POS1']))
dvRef2 = self.fhir_report.get(str(self.sequenceRels.at[index,'POS2']))
if (fhirReport['id'].startswith('sid-')):
derivedFromDV1 = {}
derivedFromDV2 = {}
derivedFromDV1['reference'] = f"#{dvRef1}"
derivedFromDV2['reference'] = f"#{dvRef2}"
derivedFrom = [derivedFromDV1,derivedFromDV2]
fhirReport['derivedFrom']= derivedFrom
for k,i in enumerate(od['contained']):
if (i['category'][0]['coding'][0]):
odCategoryCoding = OrderedDict()
odCategoryCoding["system"] = i['category'][0]['coding'][0]["system"]
odCategoryCoding["code"] = i['category'][0]['coding'][0]["code"]
od['contained'][k]['category'][0]['coding'][0] = odCategoryCoding
if (i['code']['coding'][0]):
odCodeCoding = OrderedDict()
odCodeCoding["system"] = i['code']['coding'][0]["system"]
odCodeCoding["code"] = i['code']['coding'][0]["code"]
odCodeCoding["display"] = i['code']['coding'][0]["display"]
od['contained'][k]['code']['coding'][0] = odCodeCoding
if 'valueCodeableConcept' in i.keys():
odValueCodeableConceptCoding = OrderedDict()
odValueCodeableConceptCoding["system"] = i['valueCodeableConcept']['coding'][0]["system"]
odValueCodeableConceptCoding["code"] = i['valueCodeableConcept']['coding'][0]["code"]
odValueCodeableConceptCoding["display"] = i['valueCodeableConcept']['coding'][0]["display"]
od['contained'][k]['valueCodeableConcept']['coding'][0] = odValueCodeableConceptCoding
if ((i['id'].startswith('dv-')) or (i['id'].startswith('rs-'))):
for l,j in enumerate(i['component']):
odComponentCodeCoding = OrderedDict()
if j['code']['coding'][0]["system"]:
odComponentCodeCoding["system"] = j['code']['coding'][0]["system"]
if j['code']['coding'][0]["code"]:
odComponentCodeCoding["code"] = j['code']['coding'][0]["code"]
if j['code']['coding'][0]["display"]:
odComponentCodeCoding["display"] = j['code']['coding'][0]["display"]
if od['contained'][k]['component'][l]['code']['coding'][0]:
od['contained'][k]['component'][l]['code']['coding'][0] = odComponentCodeCoding
odComponentvalueCodeableConcept = OrderedDict()
if 'valueCodeableConcept' in j.keys():
odComponentvalueCodeableConcept["system"] = j['valueCodeableConcept']['coding'][0]["system"]
if 'code' in j['valueCodeableConcept']['coding'][0].keys():
odComponentvalueCodeableConcept["code"] = j['valueCodeableConcept']['coding'][0]["code"]
if 'display' in j['valueCodeableConcept']['coding'][0].keys():
odComponentvalueCodeableConcept["display"] = j['valueCodeableConcept']['coding'][0]["display"]
od['contained'][k]['component'][l]['valueCodeableConcept']['coding'][0] = odComponentvalueCodeableConcept
if (i['id'].startswith('rs-')):
odRS = OrderedDict()
odRS["resourceType"] = i['resourceType']
odRS["id"] = i['id']
odRS["meta"] = i['meta']
odRS["status"] = i['status']
odRS["category"] = i['category']
odRS["code"] = i['code']
odRS["subject"] = i['subject']
odRS["component"] = i['component']
od['contained'][k] = odRS
if (i['id'].startswith('dv-')):
odDV = OrderedDict()
odDV["resourceType"] = i['resourceType']
odDV["id"] = i['id']
odDV["meta"] = i['meta']
odDV["status"] = i['status']
odDV["category"] = i['category']
odDV["code"] = i['code']
odDV["subject"] = i['subject']
odDV["valueCodeableConcept"] = i['valueCodeableConcept']
odDV["component"] = i['component']
od['contained'][k] = odDV
if (i['id'].startswith('sid-')):
odSID = OrderedDict()
odSID["resourceType"] = i['resourceType']
odSID["id"] = i['id']
odSID["meta"] = i['meta']
odSID["status"] = i['status']
odSID["category"] = i['category']
odSID["code"] = i['code']
odSID["subject"] = i['subject']
odSID["valueCodeableConcept"] = i['valueCodeableConcept']
odSID["derivedFrom"] = i['derivedFrom']
od['contained'][k] = odSID
self.fhir_json = od
def export_fhir_json(self, output_filename):
with open(output_filename, 'w') as fp:
json.dump(self.fhir_json, fp,indent=4)
|
from bs4 import UnicodeDammit
import unicodedata
import time
import re
def ensure_unicode(value):
return UnicodeDammit(value).unicode_markup
def normalize_unicode(value):
return unicodedata.normalize("NFKD", ensure_unicode(value))\
.encode('ascii', 'ignore')
def ensure_alphanum(value):
return re.sub(r'\W', '', value)
# returns a cleaned, normalized, and shortened version of a name
def clean_author(name):
short_name = ''.join(name.split()).lower()
return normalize_unicode(short_name)
def clean_authors(names):
return ''.join([clean_author(name['family']) for name in names if 'family' in name])
# returns a cleaned, normalized, version of a title
def clean_title(start_title):
return ensure_alphanum(normalize_unicode(start_title).lower())
# merges two dicts; unioned items from dict2 overwrite dict1
def merge_dicts(dict1, dict2):
return dict(dict1.items() + dict2.items())
# adds or updates an article from raw DB to production DB
def add_or_update(article, raw, production, merges):
# if article is already in production db
# get computed fields
indices = {}
# this should never happen--why does it?
if 'citation' not in article:
return None
if 'title' in article['citation'] and 'author' in article['citation']:
cleaned_title = clean_title(article['citation']['title'])
cleaned_author = ''.join([
clean_author(name['family'])
for name in article['citation']['author']
if 'family' in name
])
indices['cleaned_author_title'] = '_'.join([cleaned_author, cleaned_title])
if 'PMID' in article['citation']:
indices['PMID'] = article['citation']['PMID']
if 'DOI' in article['citation']:
indices['DOI'] = article['citation']['DOI']
article['indices'] = indices
production_article = None
#for key, value in indices.items():
for field in ['DOI', 'PMID', 'cleaned_author_title']:
if field in indices:
value = indices[field]
production_article = production.find_one({
'indices.{}'.format(field) : value
})
if production_article:
break
# ensure citation has not been imported
try:
if article["meta-data"]["flags"]["imported"] and production_article:
return production_article["_id"]
except KeyError:
pass
if production_article:
# track merges
merges.insert({
"production_article_id": production_article["_id"],
"raw_article_id": article.get("_id", None),
"citation_dicts": {
"production": production_article["citation"],
"raw": article["citation"]
}
})
# merge article with one in production db
production.update(
{"_id": production_article["_id"]},
{"$set":
{
'citation': merge_dicts(
article["citation"],
production_article["citation"]
),
'indices': merge_dicts(
indices,
production_article.get('indices', {})
)
}
}
)
# iterate through article.references
id_list = []
for reference in article.get("references", []):
id_list.append(add_or_update({'citation': reference}, raw, production, merges))
# append pointer for reference to article already in production DB
production.update(
{"_id": production_article["_id"]},
{"$addToSet":
{"references": {"$each": id_list}}
}
)
_id = production_article["_id"]
# else - article doesn't already exist in production DB
else:
# create a new article to represent the article
#print "creating a new article"
production_article = {}
# copy citation and meta-data to the article
production_article["citation"] = article["citation"]
production_article['indices'] = indices
production_article['references'] = []
production_article['meta-data'] = article.get('meta-data', {})
# iterate through article.references
id_list = []
for reference in article.get("references", []):
reference_id = add_or_update({'citation': reference}, raw, production, merges)
if reference_id:
id_list.append(reference_id)
# append pointer for reference to article already in production DB
production_article["references"] = id_list
_id = production.insert(production_article)
# update imported tag in raw DB
if '_id' in article:
raw.update(
{"_id": article["_id"]},
{"$set": {
"meta-data.flags.imported": True}
}
)
return _id
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from math import ceil, floor, log2
from os import urandom
import pytest
import winternitz.signatures
__author__ = "Harald Heckmann"
__copyright__ = "Harald Heckmann"
__license__ = "mit"
wots = None
wots2 = None
wots_strange_w = None
wots_strange_w2 = None
wotsp = None
wotsp2 = None
wotsp_strange_w = None
wotsp_strange_w2 = None
wots_def_key_count = 0
keysize = 0
# self is of no use since pytest creates new instances for each test function
@pytest.mark.incremental
class TestWOTS(object):
def test_init(self):
# Init for __function__ and getter tests
global wots, wots2, wots_strange_w, wots_strange_w2, wotsp, wotsp2,\
wotsp_strange_w, wotsp_strange_w2, wots_def_key_count, keysize
wots_strange_w = winternitz.signatures.WOTS(w=13)
wots_strange_w2 = winternitz.signatures.WOTS(w=((1 << 13) + 1917))
wots = winternitz.signatures.WOTS(w=4)
wots2 = winternitz.signatures.WOTS(w=16)
wotsp = winternitz.signatures.WOTSPLUS(w=4)
wotsp2 = winternitz.signatures.WOTSPLUS(w=16)
wotsp_strange_w = winternitz.signatures.WOTSPLUS(w=13)
wotsp_strange_w2 = winternitz.signatures.WOTSPLUS(w=((1 << 13) + 1917))
kswots = winternitz.signatures.WOTS()
msgkeys = int(ceil(kswots.digestsize / log2(kswots.w)))
cskeys = int(floor(log2(msgkeys *
(kswots.w - 1)) / log2(kswots.w)) + 1)
wots_def_key_count = msgkeys + cskeys
keysize = int(ceil(kswots.digestsize / 8))
# Invalid w parameter
with pytest.raises(ValueError):
_ = winternitz.signatures.WOTS(w=1) # noqa
with pytest.raises(ValueError):
_ = winternitz.signatures.WOTS(w=(1 << 513)) # noqa
# Invalid private key size
with pytest.raises(ValueError):
_ = winternitz.signatures.WOTS(privkey=[b"Hi"]) # noqa
# Invalid public key size
with pytest.raises(ValueError):
_ = winternitz.signatures.WOTS(pubkey=[b"Hi"]) # noqa
# Invalid size of one element of public key
with pytest.raises(ValueError):
_ = winternitz.signatures.WOTS(pubkey=[urandom(1) for _ in # noqa
range(wots_def_key_count)])
def test_underscore_functions_and_getter(self):
global wots, wots2, wotsp, wotsp2, wots_def_key_count, keysize
# Object representation
_ = str(wots2)
_ = str(wotsp2) # noqa: F841
# Test string representation if public key is set
_ = str(winternitz.signatures.WOTS(pubkey=[urandom(keysize) for
_ in range(wots_def_key_count)]))
# __repr__(self) returns a string which contains the code to be
# executed to create an equal object. eval(...) does execute this code.
wots_copy = eval(repr(wots))
wotsp_copy = eval(repr(wotsp))
# Equality checks
assert wots == wots_copy
assert wotsp == wotsp_copy
assert not (wots == wots2)
assert not (wotsp == wotsp2)
# Not equality checks
assert wots != wots2
assert wotsp != wotsp2
assert not (wots != wots_copy)
assert not (wotsp != wotsp_copy)
# Number to base returns [0]
_ = wots._numberToBase(0, 16) # noqa
# Number conversion to another base does return more numbers than
# private keys
with pytest.raises(IndexError):
wots._getSignatureBaseMessage(urandom(keysize + 1))
def test_sign_and_verify_wots(self):
global wots, wots_strange_w, wots_strange_w2
WOTS = winternitz.signatures.WOTS
message = "Hello World!".encode("utf-8")
# Sign and verify with the same object
sig = wots.sign(message) # noqa: F841
assert(wots.verify(message, sig["signature"]))
# Test getPubkeyFromSignature
pubkey = wots.getPubkeyFromSignature(message, sig["signature"])
assert(wots.pubkey == pubkey)
# Sign with one object, derive the public key from checksum
sig = wots_strange_w.sign(message)
# Copy the object, the public key is derived from the private key
wots_strange_w_copy = eval(repr(wots_strange_w))
assert(wots_strange_w_copy.verify(message, sig["signature"]))
# Create an object and specify only the public key. Verify the sig
wots_strange_w_pub = WOTS(w=wots_strange_w.w,
pubkey=wots_strange_w.pubkey)
assert(wots_strange_w_pub.verify(message, sig["signature"]))
# It should not be possible to sign having only a private key
with pytest.raises(ValueError):
_ = wots_strange_w_pub.sign(message) # noqa
# Verification should fail with an invalid public key
assert(not wots2.verify(message, sig["signature"]))
wots_same_w = WOTS(w=4)
assert(not wots_same_w.verify(message, sig["signature"]))
# Sign and verify with the same object using a big and strange w value
sig = wots_strange_w2.sign(message) # noqa: F841
assert(wots_strange_w2.verify(message, sig["signature"]))
def test_sign_and_verify_wots_plus(self):
global wotsp, wotsp2, wotsp_strange_w, wotsp_strange_w2
WOTSP = winternitz.signatures.WOTSPLUS
message = "Hello World!".encode("utf-8")
# Sign and verify with the same object
sig = wotsp.sign(message) # noqa: F841
assert(wotsp.verify(message, sig["signature"]))
# Test getPubkeyFromSignature
pubkey = wotsp.getPubkeyFromSignature(message, sig["signature"])
assert(wotsp.pubkey == pubkey)
# Sign with one object, derive the public key from checksum
sig = wotsp_strange_w.sign(message)
# Copy the object, the public key is derived from the private key
wotsp_strange_w_copy = eval(repr(wotsp_strange_w))
assert(wotsp_strange_w_copy.verify(message, sig["signature"]))
# Create an object and specify only the public key. Verify the sig
wotsp_strange_w_pub = WOTSP(w=wotsp_strange_w.w, pubkey=wotsp_strange_w
.pubkey)
# Should fail because we need the seed
assert(not wotsp_strange_w_pub.verify(message, sig["signature"]))
wotsp_strange_w_pub = WOTSP(w=wotsp_strange_w.w,
seed=wotsp_strange_w.seed,
pubkey=wotsp_strange_w.pubkey)
assert(wotsp_strange_w_pub.verify(message, sig["signature"]))
# It should not be possible to sign having only a private key
with pytest.raises(ValueError):
_ = wotsp_strange_w_pub.sign(message) # noqa
# Verification should fail with an invalid public key
assert(not wotsp2.verify(message, sig["signature"]))
wotsp_same_w = WOTSP(w=4, seed=wotsp_strange_w.seed)
assert(not wotsp_same_w.verify(message, sig["signature"]))
|
from bs4 import BeautifulSoup
import requests
MAX_PAGE = 250
# defining base_url to create a list of all page urls
BASE_URL = "https://www.truecar.com/used-cars-for-sale/listings/"
# defining host_name for creating urls for each car ad
HOST_NAME = "https://www.truecar.com"
pages = list()
urls = list()
failed_pages = list()
def url_scraper():
with open("pages.txt", "w") as f:
for i in range(1,MAX_PAGE+1):
page = BASE_URL + "?page=" + str(i)
f.write(page+"\n")
pages.append(page)
for page in pages:
try:
response = requests.get(page)
response.raise_for_status()
except:
failed_pages.append(page)
continue
src = response.text
soup = BeautifulSoup(src, "html.parser")
ads = soup.find_all("a", attrs={
"data-test" : "vehicleCardLink"
})
url_list = [HOST_NAME+link["href"] for link in ads]
with open("urls.txt", "a+") as f:
for url in url_list:
urls.append(url)
f.write(url+"\n")
# By Sina Kazemi
# Github : https://github.com/sina96n/
|
from sklearn import preprocessing
import pandas as pd
from collections import defaultdict
class DfOneHotEncoder():
def __init__(self):
self.le = defaultdict()
self.ohe = defaultdict()
self.dropped_columns = {}
def fit(self, df, columns=None, **kwargs):
self.le = df[columns].apply(lambda x: preprocessing.LabelEncoder().fit(x)).to_dict()
for column in columns:
self.ohe[column] = preprocessing.\
OneHotEncoder(sparse=False).\
fit(self.le[column].transform(df[column]).reshape(len(df[column]), 1))
def transform(self, df, columns=None, drop_first=False, **kwargs):
for column in columns:
label_encode = self.le[column].transform(df[column]).reshape(len(df[column]), 1)
one_hot_encode = self.ohe[column].transform(label_encode)
df = df.drop(column, axis=1)
new_col_name = [str(column) + "_" + str(cat_name) for cat_name in list(self.le[column].classes_)]
if drop_first:
start_index = 1
self.dropped_columns[column] = new_col_name[0]
else:
start_index = 0
df = pd.concat([df, pd.DataFrame(one_hot_encode[:,start_index:], columns=new_col_name[start_index:])], axis=1)
return df
def fit_transform(self, df, columns=None, **kwargs):
self.fit(df, columns, **kwargs)
return self.transform(df, columns, **kwargs)
def inverse_transform(self, df, columns):
for column in columns:
names_of_encoded_columns = [i for i in list(df) if i.startswith(column + "_")]
df[column] = df[names_of_encoded_columns].\
apply(lambda x: x[x != 0].keys()[0][len(column)+1:] if ~(x == 0).all()
else self.dropped_columns[column][len(column)+1:], axis=1)
df = df.drop(names_of_encoded_columns,axis=1)
return df
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 4 11:42:49 2021
@author: Easin
"""
in1 = input()
in1 = int(in1)
if in1 %2 != 0:
x = in1 -9
y = 9
else:
x = in1 -4
y = 4
print(x,y)
|
import random
from tests import utils as tests_utils
from .test_event_base import SalesManagoEventsTestsBase
class SalesManagoEventDataFeatureTest(SalesManagoEventsTestsBase):
def test_requestDict_for_minimal(self):
AGAINST = {
'email': self.CLIENT_MAIL,
'owner': self.OWNER_MAIL,
'contactId': self.CONTACT_ID,
'contactEvent': {
'date': self.EVENT_DATE,
'contactExtEventType': self.mcd['contactExtEventType']
}
}
TEST_DICT = self.eventClass.requestDict()
self.assertEqual(AGAINST, TEST_DICT)
for ext_field in self.EXT_EVENT_FIELDS:
self.assertNotIn(ext_field, TEST_DICT)
self.assertNotIn(ext_field, TEST_DICT['contactEvent'])
def test_requestDict_for_full(self):
AGAINST = {
'email': self.CLIENT_MAIL,
'owner': self.OWNER_MAIL,
'contactId': self.CONTACT_ID,
'forceOptIn': self.fcd['forceOptIn'],
'contactEvent': {
'date': self.EVENT_DATE,
'contactExtEventType': self.fcd['contactExtEventType'],
'products': self.fcd['products'],
'location': self.fcd['location'],
'value': self.fcd['value'],
'detail1': self.fcd['detail1'],
'detail2': self.fcd['detail2'],
'description': self.fcd['description'],
'externalId': self.fcd['externalId'],
'shopDomain': self.fcd['shopDomain']
}
}
TEST_DICT = self.eventClassFull.requestDict()
self.assertEqual(AGAINST, TEST_DICT)
|
#!/usr/bin/python3.8
import os
import json
import aiohttp
import asyncio
import logging
import pprint
from asyncio import wait_for
from aiohttp import web
from aiohttp import client
from hpfeeds.asyncio import ClientSession
HPFSERVER = os.environ.get("HPFSERVER", "127.0.0.1")
HPFPORT = int(os.environ.get("HPFPORT", 20000))
HPFIDENT = os.environ.get("HPFIDENT", "testing")
HPFSECRET = os.environ.get("HPFSECRET", "secretkey")
HIVEID = os.environ.get("HIVEID", "UnknownHive")
logging.basicConfig(level=logging.ERROR)
logger = logging.getLogger(__name__)
# Credit for the base of this goes to a stackoverflow question i need to find.
baseUrl = 'http://0.0.0.0:8080'
async def hpfeeds_publish(event_message):
async with ClientSession(HPFSERVER, HPFPORT, HPFIDENT, HPFSECRET) as client:
client.publish('wordpress.sessions', json.dumps(event_message).encode('utf-8'))
return True
async def handler(request):
# Write the log entry before we sort the response as thats more important
# Get HTTP as version string
http_version = "HTTP/{0}.{1}".format(request.version.major, request.version.minor)
# convert Cookies to a standard dict, We will loose Duplicates
http_cookies = {}
for k, v in request.cookies.items():
http_cookies[k] = v
# convert Headers to a standard dict, We will loose Duplicates
http_headers = {}
for k, v in request.headers.items():
http_headers[k] = v
# convert POST to a standard dict, We will loose Duplicates
http_post = {}
if request.method == 'POST':
data = await request.post()
for key, value in data.items():
http_post[key] = value
event_message = {
"hive_id": HIVEID,
"source_ip": request.remote,
"http_remote": request.remote,
"http_host": request.host,
"http_version": http_version,
"http_method": request.method,
"http_scheme": request.scheme,
"http_query": request.path_qs,
"http_post": http_post,
"http_headers": http_headers,
"http_path": request.path
}
# Send the Broker message
# Set timeout to 3 seconds in a try: except so we dont kill the http response
try:
await wait_for(hpfeeds_publish(event_message), timeout=3)
except asyncio.TimeoutError:
print("Unable to connect to hpfeeds broker.")
pass
proxyPath = request.path_qs
reqH = request.headers.copy()
if reqH['connection'] == 'Upgrade' and reqH['upgrade'] == 'websocket' and request.method == 'GET':
ws_server = web.WebSocketResponse()
await ws_server.prepare(request)
logger.info('##### WS_SERVER %s' % pprint.pformat(ws_server))
client_session = aiohttp.ClientSession(cookies=request.cookies)
async with client_session.ws_connect(baseUrl+request.path_qs) as ws_client:
logger.info('##### WS_CLIENT %s' % pprint.pformat(ws_client))
async def wsforward(ws_from,ws_to):
async for msg in ws_from:
logger.info('>>> msg: %s',pprint.pformat(msg))
mt = msg.type
md = msg.data
if mt == aiohttp.WSMsgType.TEXT:
await ws_to.send_str(md)
elif mt == aiohttp.WSMsgType.BINARY:
await ws_to.send_bytes(md)
elif mt == aiohttp.WSMsgType.PING:
await ws_to.ping()
elif mt == aiohttp.WSMsgType.PONG:
await ws_to.pong()
elif ws_to.closed:
await ws_to.close(code=ws_to.close_code,message=msg.extra)
else:
raise ValueError('unexpecte message type: %s',pprint.pformat(msg))
finished,unfinished = await asyncio.wait([wsforward(ws_server,ws_client),wsforward(ws_client,ws_server)],return_when=asyncio.FIRST_COMPLETED)
return ws_server
else:
# We can put something in here so we can set any user / password combination to work.
# Essentially edit the POST form and replace the values with whatever we default to admin:admin
async with client.request(
request.method,baseUrl+proxyPath,
headers = reqH,
allow_redirects=False,
data = await request.read()
) as res:
headers = res.headers.copy()
# Get teh body from the upstream
body = await res.read()
# If there is a compression header or chunked transfer we need to remove it.
# The response body we forward is already decompressed and re assembled
if 'Content-Encoding' in headers:
del headers['Content-Encoding']
if 'Transfer-Encoding' in headers:
if headers['Transfer-Encoding'] == "chunked":
del headers['Transfer-Encoding']
# We need to fix the content length if a reponse was compressed.
try:
if len(body) != headers['Content-Length']:
headers['Content-Length'] = str(len(body))
except:
pass
# Now return the response
return web.Response(
headers = headers,
status = res.status,
body = body
)
return ws_server
app = web.Application(
client_max_size=10000000
)
app.router.add_route('*','/{proxyPath:.*}', handler)
web.run_app(app,port=80)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 30 13:45:39 2020
the version of loompy should be the consistent with velocyto; otherwise error returned
@author: jingkui.wang
"""
import loompy
import glob
files = glob.glob("/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S117008_R9533/LOOMS/*.loom")
loompy.combine(files,
"/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S117008_R9533/LOOMS/S117008_R9533_merged.loom")
files = glob.glob("/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S117007_R9533/LOOMS/*.loom")
loompy.combine(files,
"/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S117007_R9533/LOOMS/S117007_R9533_merged.loom")
files = glob.glob("/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S117009_R9533/LOOMS/*.loom")
loompy.combine(files,
"/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S117009_R9533/LOOMS/S117009_R9533_merged.loom")
# folder S124890_R9968
files = glob.glob("/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S124890_R9968/LOOMS/*.loom")
loompy.combine(files,
"/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S124890_R9968/LOOMS/S124890_R9968_merged.loom")
# folder S124889_R9968
files = glob.glob("/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S124889_R9968/LOOMS/*.loom")
loompy.combine(files,
"/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S124889_R9968/LOOMS/S124889_R9968_merged.loom")
# folder S124891_R9968
files = glob.glob("/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S124891_R9968/LOOMS/*.loom")
loompy.combine(files,
"/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S124891_R9968/LOOMS/S124891_R9968_merged.loom")
# here combine merged loom files from all folders
files = glob.glob("/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/*/LOOMS/*merged.loom")
loompy.combine(files, '/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/all_merged.loom')
loompy.combine(files, '/Users/jiwang/workspace/imp/scRNAseq_MS_lineage_dev/data/velocyto_all_merged.loom')
ds = loompy.connect('/Users/jiwang/workspace/imp/scRNAseq_MS_lineage_dev/data/velocyto_all_merged.loom')
ds.shape
|
import random
class room:
def room(n,e,s,w,items):
stuff = "Really cool stuff",items
doors = [["north",n],["east",e],["south",s],["west",w]]
return [doors,stuff]
class levels:
def rooms(x,y,
room1 = "room1",
room2 = "room2",
room3 = "room3",
room4 = "room4",
room5 = "room5",
room6 = "room6",
room7 = "room7",
room8 = "room8",
room9 = "room9",
room10 = "room10",
room11 = "room11",
room12 = "room12",
room13 = "room13",
room14 = "room14",
room15 = "room15",
room16 = "room16",
room17 = "room17",
room18 = "room18",
room19 = "room19",
room20 = "room20",
room21 = "room21",
room22 = "room22",
room23 = "room23",
room24 = "room24",
room25 = "room25"):
rooms = [[room1,room2,room3,room4,room5],
[room6,room7,room8,room9,room10],
[room11,room12,room13,room14,room15],
[room16,room17,room18,room19,room20],
[room21,room22,room23,room24,room25]]
return rooms[x][y]
print (levels.rooms(4,4))
room1 = room.room(0,1,1,1,"Better Sword")
print (room1[1])
print (levels.rooms(0,0))
|
# -*- coding: utf-8 -*-
"""
CTableModel类定义文件
"""
from PyQt5.QtGui import QStandardItemModel
from PyQt5.QtCore import Qt, QVariant
from enum import IntEnum, Enum
class CTableModel(QStandardItemModel):
# 各属性项枚举
class EAttrIndex(IntEnum):
EAttr_Id = 0 # id
EAttr_Descrition = 1 # 描述
EAttr_Checked = 2 # 是否已验证
EAttr_LastOneFlag = 3 # 是否最后一个
Eattr_AnimateSpeed = 4 # 动画速度
Eattr_Max = 5
# 速度枚举值
class EAnimateSpeed(Enum):
EAnimateSpeed_Slow = 0 # 慢速
EAnimateSpeed_Normal = 1 # 中速
EAnimateSpeed_Fast = 2 # 快速
EAnimateSpeed_Max = 3
def __init__(self, rows, columns, parent=None):
super(CTableModel, self).__init__(rows, columns, parent)
def flags(self, index):
# 只有第1列允许被编辑
itemFlags = Qt.ItemFlags(0)
if 1 != index.column():
itemFlags &= (
~Qt.ItemIsEditable) # Qt.ItemIsEditable表示可编辑,~Qt.ItemIsEditable表示取反,即不可编辑。
return itemFlags
else:
return QStandardItemModel.flags(self, index)
def data(self, index, role):
if Qt.EditRole == role:
return QStandardItemModel.data(self, index, role)
elif Qt.DisplayRole != role:
return QStandardItemModel.data(self, index, role)
var = self.data(index, Qt.EditRole)
var = QVariant(var)
if 0 == index.column():
return var
if CTableModel.EAttrIndex.EAttr_Checked == CTableModel.EAttrIndex(index.row()):
var = (var.value() and 'no' or 'yes') # 0:yes, 1:no
elif CTableModel.EAttrIndex.EAttr_LastOneFlag == CTableModel.EAttrIndex(index.row()):
var = (var.value() and True or False) # 0:False, other:True
elif CTableModel.EAttrIndex.Eattr_AnimateSpeed == CTableModel.EAttrIndex(index.row()):
eSpeed = CTableModel.EAnimateSpeed(var.value())
if eSpeed == CTableModel.EAnimateSpeed.EAnimateSpeed_Slow:
var = '慢速'
elif eSpeed == CTableModel.EAnimateSpeed.EAnimateSpeed_Normal:
var = '中速'
elif eSpeed == CTableModel.EAnimateSpeed.EAnimateSpeed_Fast:
var = '快速'
else:
var = ''
return var
def setData(self, index, value, role):
if Qt.EditRole == role:
return QStandardItemModel.setData(self, index, value, role)
else:
return False
|
import json
import datetime
from lib.utils.utils import DATE_STRING, DATETIME_STRING
from const.query_execution import QueryExecutionErrorType
should_escape_list = (",", '"', "\n", "\r")
def spread_dict(x, y):
z = x.copy()
z.update(y)
return z
def merge_str(str1: str, str2: str, separator: str = "\n") -> str:
"""Join two strings together if by the separator. If either is empty
then separator will not be used
Arguments:
str1 {str} -- Joined on left
str2 {str} -- Joined on right
Keyword Arguments:
separator {str} -- Middle string if both input are non-empty
(default: {'\n'})
Returns:
str -- The joined str
"""
if len(str1) and len(str2):
return str1 + separator + str2
return str1 or str2
def serialize_cell(cell) -> str:
try:
cell_type = type(cell)
if cell_type == str:
return cell
elif cell_type == datetime.datetime:
return DATETIME_STRING(cell)
elif cell_type == datetime.date:
return DATE_STRING(cell)
else:
return json.dumps(cell, ensure_ascii=False)
except (UnicodeDecodeError, TypeError):
# obj is byte string
try:
return str(cell)
except Exception:
return "[Unserializable]"
def row_to_csv(row):
output = []
for cell in row:
str_col = serialize_cell(cell)
if any(c in str_col for c in should_escape_list):
str_col = '"%s"' % str_col.replace('"', '""')
output.append(str_col)
return ",".join(output) + "\n"
def parse_exception(e):
error_type = QueryExecutionErrorType.INTERNAL.value
error_str = str(e)
error_extracted = None
return error_type, error_str, error_extracted
def get_parsed_syntax_error(
message: str, line_num: int = None, char_num: int = None,
):
error_type = QueryExecutionErrorType.SYNTAX.value
error_str = json.dumps({"line": line_num, "char": char_num, "message": message,})
return error_type, error_str, None
def format_error_message(code: int, message: str):
return f"Error #{code}: {message}"
|
from django.urls import resolve, reverse
import pytest
from build_migration.users.views import UserViewSet
class TestUserURLs:
"""Test URL patterns for users app."""
@pytest.mark.parametrize("url,view_class", [("/users/", UserViewSet)])
def test_users_views_resolve(self, url, view_class):
assert resolve(url).func.cls == view_class
@pytest.mark.parametrize(
"url_name,url_kwargs,url",
[
("users:user-list", {}, "/users/"),
("users:user-detail", {"pk": 1}, "/users/1/"),
],
)
def test_users_views_reverse(self, url_name, url_kwargs, url):
assert reverse(url_name, kwargs=url_kwargs) == url
|
import numpy as numpy
a = numpy.arange(16).reshape(4,4)
print ('First array')
print(a)
print('\n')
print('Horizontal splitting')
b = numpy.hsplit(a,2)
print(b)
print('\n')
|
'''
# nflfantasy/fantasymath.py
# scraper/parser for fantasymath.com fantasy resources
'''
import logging
import time
from sportscraper.scraper import RequestScraper
class Scraper(RequestScraper):
'''
'''
def __init__(self, **kwargs):
'''
Scrape fantasymath API
Args:
'''
RequestScraper.__init__(self, **kwargs)
self.headers.update({'origin': 'https://fantasymath.com',
'authority': 'api.fantasymath.com',
'referer': 'https://fantasymath.com/'})
def distribution(self, player_codes):
'''
Gets projection distribution for specified players
Args:
player_codes(list): of str
Returns:
dict
'''
# api uses multiple parameters with same key (wdis)
# get_json method sorts params for caching consistency
# need to use tuple of lists so not overwrite wdis param
player_codes = list(player_codes)
url = 'https://api.fantasymath.com/v2/players-wdis/'
wdis = tuple(['wdis', player_code] for player_code in player_codes)
params = (['dst', 'mfl'], ['qb', 'pass4'], ['scoring', 'ppr'])
resp = self.session.get(url, params=wdis + params)
self.urls.append(resp.url)
resp.raise_for_status()
if self.delay:
time.sleep(self.delay)
return resp.json()
def players(self):
'''
Gets projection distribution for specified players
Args:
player_codes(list): of str
Returns:
dict
'''
url = 'https://api.fantasymath.com/players'
return self.get_json(url)
class Parser():
'''
'''
def __init__(self):
'''
'''
logging.getLogger(__name__).addHandler(logging.NullHandler())
def _fix_val(self, v):
'''
Fixes various values
Args:
v:
Returns:
'''
try:
return round(v, 3)
except:
return v
def distribution(self, content):
'''
Parses player distribution JSON
Args:
content (dict): parsed JSON
Returns:
list: of player dict
'''
wanted = ['fp_id', 'name', 'p25', 'p5', 'p50', 'p75', 'p95', 'pos', 'prob',
'proj', 'scoring', 'std']
return [{k: self._fix_val(v) for k,v in p.items() if k in wanted} for
p in content['players']]
def players(self, content):
'''
Parses players JSON
Args:
content (dict): parsed JSON
Returns:
dict
'''
fm_players = {}
for p in content:
vals = p['label'].split()
d = {'id': p['value'], 'pos': vals[0], 'name': ' '.join(vals[1:])}
fm_players[d['id']] = d
return fm_players
class Agent():
'''
'''
def __init__(self, cache_name='fantasymath-agent'):
logging.getLogger(__name__).addHandler(logging.NullHandler())
self._s = Scraper(cache_name=cache_name, delay=1.5)
self._p = Parser()
def weekly_projections(self):
'''
Gets weekly projections
Args:
None
Returns:
dict
'''
dists = {}
content = self._s.players()
players = self._p.players(content)
for i in range(0, len(players), 3):
try:
ids = [players[i]['id'], players[i+1]['id'], players[i+2]['id']]
except IndexError:
try:
ids = [players[i]['id'], players[i+1]['id']]
except:
ids = [players[i]['id']]
idstr = ', '.join(ids)
logging.info('getting %s', idstr)
if idstr in dists:
logging.info('skipping %s', idstr)
continue
try:
content = self._s.distribution(ids)
dists[idstr] = self._p.distributions(content)
except:
logging.exception('could not get %s', idstr)
return dists
if __name__ == '__main__':
pass
|
#! python
# -*- coding: utf-8 -*-
# This is a utility module for integrating SMS providers into VizAlerts.
import os
import re
import phonenumbers
import twilio
# import local modules
import config
import log
import vizalert
# store the SMS client we get back from Twilio for use across all modules
smsclient = None
# regular expression used to split recipient number strings into separate phone numbers
SMS_RECIP_SPLIT_REGEX = u'[;,]*'
# appended to the bottom of all SMS messages, unless overidden
# expecting smsfooter.format(subscriber_email)
smsfooter = u'\r\rThis VizAlert SMS sent on behalf of {}'
class SMS:
"""Represents an SMS to be sent"""
def __init__(self, sms_from, sms_to, msgbody=None):
self.sms_from = sms_from
self.sms_to = sms_to
self.msgbody = msgbody # REVISIT--why is it okay for this to be blank?
def get_sms_client():
"""Generic function get an SMS client object. This only works with Twilio at this time."""
# check to see if there's a provider set
if config.configs['smsaction.provider'] == None or len(config.configs['smsaction.provider']) == 0:
errormessage = u'SMS Actions are enabled but smsaction.provider value is not set, exiting'
log.logger.error(errormessage)
raise ValueError(errormessage)
# load code for Twilio
elif config.configs['smsaction.provider'].lower() == 'twilio':
# these need to be in the global name space to send SMS messages
global twilio
import twilio
# Monkey patch to allow Twilio to find the cacert.pem file even when compiled into an exe
# See: https://stackoverflow.com/questions/17158529/fixing-ssl-certificate-error-in-exe-compiled-with-py2exe-or-pyinstaller
# and https://github.com/twilio/twilio-python/issues/167
ca_cert_path = os.path.join('twilio', 'conf', 'cacert.pem')
from twilio.http import get_cert_file
get_cert_file = lambda: ca_cert_path
twilio.http.get_cert_file = get_cert_file
global twiliorest
import twilio.rest as twiliorest
global smsclient
smsclient = twiliorest.Client(
config.configs['smsaction.account_id'],
config.configs['smsaction.auth_token'])
return smsclient
# unknown SMS provider error
else:
errormessage = u'SMS Actions are enabled but found unknown smsaction.provider {}, exiting'.format(
config.configs['smsaction.provider'])
log.logger.error(errormessage)
raise ValueError(errormessage)
def send_sms(sms_instance):
"""REVISIT: This and the other SMS methods should probably be members of the SMS class
function to send an sms using Twilio's REST API, see https://www.twilio.com/docs/python/install for details.
Presumes that numbers have gone through a first level of checks for validity
Returns nothing on success, error string back on failure"""
# shouldn't happen but setting content to '' if it's None
if not sms_instance.msgbody:
sms_instance.msgbody = ''
log.logger.info(u'Sending SMS: {},{},{}'.format(sms_instance.sms_from, sms_instance.sms_to, sms_instance.msgbody))
# now to send the message
try:
if sms_instance.sms_from.startswith('+'):
# kinda kloogy, but if we think it's an E.164 number, assume it is...
message = smsclient.messages.create(body=sms_instance.msgbody, to=sms_instance.sms_to,
from_=sms_instance.sms_from)
else:
# if not, assume it must be a message service SID
message = smsclient.messages.create(body=sms_instance.msgbody, to=sms_instance.sms_to,
messaging_service_sid=sms_instance.sms_from)
# this may never happen since the Twilio REST API throws exceptions, it's a failsafe check
if message.status == 'failed':
raise ValueError(u'Failed to deliver SMS message to {} with body {},'
u' no additional information is available'.format(
sms_instance.sms_to,
sms_instance.msgbody))
# check for Twilio REST API exceptions
except twilio.base.exceptions.TwilioRestException as e:
errormessage = u'Could not send SMS message to {} with body {}.\nHTTP status {} returned for request: ' \
u'{} {}\nWith error {}: {} '.format(
sms_instance.sms_to,
sms_instance.msgbody,
e.status,
e.method,
e.uri,
e.code,
e.msg)
log.logger.error(errormessage)
return errormessage
# check for ValueError from try
except ValueError as e:
log.logger.error(e)
return e
except Exception as e:
errormessage = u'Could not send SMS message to {} with body {}, error {}, type {}'.format(
sms_instance.sms_to,
sms_instance.msgbody, e, e.__class__.__name__)
log.logger.error(errormessage)
return e
return None
def sms_append_body(body, vizcompleterefs, row, alert):
"""Generic function for filling SMS body text with the body & footers from the csv
plus inserting content references"""
body.append(row[alert.action_field_dict[vizalert.SMS_MESSAGE_FIELDKEY].field_name])
# add the footer if needed
if alert.action_field_dict[vizalert.SMS_FOOTER_FIELDKEY].field_name:
body.append(row[alert.action_field_dict[vizalert.SMS_FOOTER_FIELDKEY].field_name].replace(
vizalert.DEFAULT_FOOTER,
smsfooter.format(alert.subscriber_email)))
else:
# no footer specified, add the default footer
body.append(smsfooter.format(alert.subscriber_email))
# find all distinct content references in the email body list
# so we can replace each with an inline image or hyperlink text
foundcontent = re.findall(u'VIZ_LINK\(.*?\)', ' '.join(body))
foundcontentset = set(foundcontent)
vizrefs = list(foundcontentset)
if len(vizrefs) > 0:
for vizref in vizrefs:
# we're replacing #VIZ_LINK text
if vizcompleterefs[vizref]['formatstring'] == 'LINK':
# always use raw link, ignore presence or absence of RAWLINK argument
replacestring = alert.get_view_url(vizcompleterefs[vizref]['view_url_suffix'])
replaceresult = vizalert.replace_in_list(body, vizref, replacestring)
if replaceresult['foundstring']:
body = replaceresult['outlist']
return body
def validate_smsnumbers(vizdata, sms_to_fieldname, allowed_recipient_numbers, iso2countrycode):
"""Loops through the viz data for an Advanced Alert and returns a list of dicts
containing any errors found in recipients"""
errorlist = []
rownum = 2 # account for field header in CSV
try:
for row in vizdata:
result = smsnumbers_are_invalid(row[sms_to_fieldname],
False, # empty string not acceptable as a To number
iso2countrycode,
allowed_recipient_numbers)
if result:
errorlist.append(
{'Row': rownum, 'Field': sms_to_fieldname, 'Value': result['number'], 'Error': result['errormessage']})
rownum += 1
except Exception as e:
errormessage = u'Encountered error validating SMS numbers. Error: {}'.format(e.message)
log.logger.error(errormessage)
errorlist.append(errormessage)
return errorlist
return errorlist
def smsnumbers_are_invalid(sms_numbers, emptystringok, iso2countrycode, regex_eval=None):
"""Validates all SMS numbers found in a given string, optionally that conform to the regex_eval"""
log.logger.debug(u'Validating SMS field value: {}'.format(sms_numbers))
sms_number_list = re.split(SMS_RECIP_SPLIT_REGEX, sms_numbers.strip())
for sms_number in sms_number_list:
log.logger.debug(u'Validating presumed sms number: {}'.format(sms_number))
try:
# skip if we're okay with empty, and it is
if sms_number == '':
if not emptystringok:
errormessage = u'SMS number is empty'
else:
continue
else:
errormessage = smsnumber_is_invalid(sms_number, iso2countrycode, regex_eval)
if errormessage:
log.logger.debug(u'SMS number is invalid: {}, Error: {}'.format(sms_number, errormessage))
if len(sms_number) > 64:
sms_number = sms_number[:64] + '...' # truncate a too-long address for error formattting purposes
return {'number': sms_number, 'errormessage': errormessage}
except Exception as e:
errormessage = u'Encountered error validating an SMS number. Error: {}'.format(e.message)
log.logger.error(errormessage)
return {'number': sms_number, 'errormessage': errormessage}
return None
def smsnumber_is_invalid(smsnumber, iso2countrycode, regex_eval=None):
"""Checks for a syntactically invalid phone number, returns None for success or an error message"""
try:
e164_number = smsnumber_to_e164(smsnumber, iso2countrycode)
# looks valid, but it must be permitted by regex pattern if so specified
if regex_eval:
log.logger.debug("testing smsnumber {} against regex {}".format(e164_number, regex_eval))
if not re.match(regex_eval, e164_number):
errormessage = u'SMS number must match regex pattern set by the administrator: {}'.format(regex_eval)
log.logger.error(errormessage)
return errormessage
except Exception as e:
return e.message
# looks like it was fine!
return None
def get_e164numbers(sms_numbers, iso2countrycode):
"""Converts a delimited string or list of SMS numbers to E.164 format
Returns a UNIQUE list of E.164 numbers
NOTE: This method ASSUMES that they have all been validated already """
sms_number_list = []
e164_numbers = []
if isinstance(sms_numbers, str) or isinstance(sms_numbers, unicode):
sms_number_list.extend(re.split(SMS_RECIP_SPLIT_REGEX, sms_numbers.strip()))
elif isinstance(sms_numbers, list):
sms_number_list.extend(sms_numbers)
else:
# that's not what we expected
errormessage = u'Input is neither a string nor a list: {}'.format(sms_numbers)
log.logger.error(errormessage)
raise UserWarning(errormessage)
# convert and add each number to our return list
for sms_number in sms_number_list:
log.logger.debug(u'Converting {} to E.164 format'.format(sms_number))
try:
e164_number = smsnumber_to_e164(sms_number, iso2countrycode)
if e164_number not in e164_numbers:
e164_numbers.append(e164_number)
except Exception as e:
raise UserWarning(e.message)
return e164_numbers
def smsnumber_to_e164(smsnumber, iso2countrycode):
"""Tries to convert a string into an E.164 formatted phone number
Raises exception if it can't, returns the E.164 number as a string, if it can """
try:
log.logger.debug(u'Converting {} to E.164 format, country code {}'.format(smsnumber, iso2countrycode))
try:
if smsnumber.startswith('+'):
smsnumber_obj = phonenumbers.parse(smsnumber)
else:
# country code not specified in number, so pass it in
smsnumber_obj = phonenumbers.parse(smsnumber, iso2countrycode)
except phonenumbers.NumberParseException as e:
errormessage = u'SMS Unable to parse number {}. Error: {}'.format(smsnumber, e.message)
log.logger.error(errormessage)
raise UserWarning(errormessage)
try:
if not phonenumbers.is_possible_number(smsnumber_obj):
errormessage = u'SMS Number is not possibly valid: {}.'.format(smsnumber)
log.logger.error(errormessage)
raise UserWarning(errormessage)
except phonenumbers.NumberParseException as e:
errormessage = u'SMS Unable to parse number {}. Error: {}'.format(smsnumber, e.message)
log.logger.error(errormessage)
raise UserWarning(errormessage)
if not phonenumbers.is_valid_number(smsnumber_obj):
errormessage = u'SMS Number is not valid: {}.'.format(smsnumber)
log.logger.error(errormessage)
raise UserWarning(errormessage)
e164_number = phonenumbers.format_number(smsnumber_obj, phonenumbers.PhoneNumberFormat.E164)
if not e164_number:
errormessage = u'SMS number {} could not be converted to E.164 for an unknown reason.'.format(smsnumber)
log.logger.error(errormessage)
raise UserWarning(errormessage)
# all good, return it!
return e164_number
except Exception as e:
log.logger.error(e.message)
return None
|
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
import factory
from edfi_performance.api.client.school import SchoolClient
from .. import APIFactory
from ..descriptors.utils import build_descriptor
from ..utils import RandomSuffixAttribute, formatted_date
class SectionFactory(APIFactory):
educationalEnvironmentDescriptor = build_descriptor("EducationalEnvironment", "Classroom")
sectionIdentifier = RandomSuffixAttribute("ELA012017RM555")
availableCredits = 1
sequenceOfCourse = 1
classPeriods = factory.List([
factory.Dict(
dict(
classPeriodReference=factory.Dict(dict(
classPeriodName=None # Must be entered by client
)),
),
),
])
courseOfferingReference = factory.Dict(
dict(
localCourseCode="ELA-01", # Will need to override this with reference value
schoolId=SchoolClient.shared_elementary_school_id(),
schoolYear=2014,
sessionName="2016-2017 Fall Semester", # Will need to override this with reference value
)
)
locationReference = factory.Dict(
dict(
schoolId=SchoolClient.shared_elementary_school_id(),
classroomIdentificationCode="501", # Will need to override this with reference value
)
)
class SectionAttendanceTakenEventFactory(APIFactory):
calendarDateReference = factory.Dict(
dict(
calendarCode="107SS111111",
schoolId=SchoolClient.shared_elementary_school_id(),
schoolYear=2014,
date=formatted_date(9, 16, 2014),
)
)
sectionReference = factory.Dict(
dict(
sectionIdentifier=None, # Must be created
localCourseCode="ELA-01",
schoolId=SchoolClient.shared_elementary_school_id(),
schoolYear=2014,
sessionName="2016-2017 Fall Semester",
)
)
eventDate = formatted_date(9, 9)
|
"""
Design your implementation of the linked list. You can choose to use the
singly linked list or the doubly linked list. A node in a singly linked
list should have two attributes: val and next. val is the value of the
current node, and next is a pointer/reference to the next node. If you want
to use the doubly linked list, you will need one more attribute prev to
indicate the previous node in the linked list. Assume all nodes in the
linked list are 0-indexed.
Implement these functions in your linked list class:
- get(index) : Get the value of the index-th node in the linked list.
If the index is invalid, return -1.
- addAtHead(val) : Add a node of value val before the first element of
the linked list. After the insertion, the new node will be the first
node of the linked list.
- addAtTail(val) : Append a node of value val to the last element of
the linked list.
- addAtIndex(index, val) : Add a node of value val before the index-th
node in the linked list. If index equals to the length of linked list,
the node will be appended to the end of linked list. If index is
greater than the length, the node will not be inserted.
- deleteAtIndex(index) : Delete the index-th node in the linked list,
if the index is valid.
Example:
Input:
["MyLinkedList","addAtHead","addAtTail","addAtIndex","get","deleteAtIndex","get"]
[[],[1],[3],[1,2],[1],[1],[1]]
Output:
[null,null,null,null,2,null,3]
Explanation:
MyLinkedList linkedList = new MyLinkedList(); // Initialize empty LinkedList
linkedList.addAtHead(1);
linkedList.addAtTail(3);
linkedList.addAtIndex(1, 2); // linked list becomes 1->2->3
linkedList.get(1); // returns 2
linkedList.deleteAtIndex(1); // now the linked list is 1->3
linkedList.get(1); // returns 3
Constraints:
- 0 <= index,val <= 1000
- Please do not use the built-in LinkedList library.
- At most 2000 calls will be made to get, addAtHead, addAtTail,
addAtIndex and deleteAtIndex.
"""
#Difficulty: Medium
#57 / 57 test cases passed.
#Runtime: 300 ms
#Memory Usage: 14.3 MB
#Runtime: 300 ms, faster than 49.54% of Python3 online submissions for Design Linked List.
#Memory Usage: 14.3 MB, less than 63.36% of Python3 online submissions for Design Linked List.
class Node:
def __init__(self, val):
"""
Initialize of the Node.
"""
self.val = val
self.next = None
class MyLinkedList:
def __init__(self):
"""
Initialize your data structure here.
"""
self.head = Node(None)
def get(self, index: int) -> int:
"""
Get the value of the index-th node in the linked list. If the index is
invalid, return -1.
"""
node = self.head
for i in range(index):
node = node.next
return node.val if node else -1
def addAtHead(self, val: int) -> None:
"""
Add a node of value val before the first element of the linked list.
After the insertion, the new node will be the first node of the linked
list.
"""
if self.head.val is not None:
old_head = self.head
self.head = Node(val)
self.head.next = old_head
else:
self.head.val = val
def addAtTail(self, val: int) -> None:
"""
Append a node of value val to the last element of the linked list.
"""
node = self.head
while node.next:
node = node.next
node.next = Node(val)
def addAtIndex(self, index: int, val: int) -> None:
"""
Add a node of value val before the index-th node in the linked list.
If index equals to the length of linked list, the node will be appended
to the end of linked list. If index is greater than the length, the
node will not be inserted.
"""
if index == 0:
return self.addAtHead(val)
prev = None
for i in range(index):
if prev:
prev = prev.next
next_node = next_node.next
else:
prev = self.head
next_node = self.head.next
node = Node(val)
prev.next = node
node.next = next_node
def deleteAtIndex(self, index: int) -> None:
"""
Delete the index-th node in the linked list, if the index is valid.
"""
if index == 0:
self.head = self.head.next
return
prev = None
for i in range(index):
if prev and next_node:
if not next_node.next:
return
prev = prev.next
next_node = next_node.next
else:
prev = self.head
next_node = self.head.next
if next_node:
prev.next = next_node.next
# Your MyLinkedList object will be instantiated and called as such:
# obj = MyLinkedList()
# param_1 = obj.get(index)
# obj.addAtHead(val)
# obj.addAtTail(val)
# obj.addAtIndex(index,val)
# obj.deleteAtIndex(index)
|
from keras.engine.saving import model_from_json
from matplotlib import image
import numpy as np
from app import SKIN_CLASSES
f = open('E:\\STUDY\PYTHON\\New folder (2)\\akiec2.jpg', 'r')
print("naerm "+ f.name)
path = 'static/data/' + f.name
f.save(path)
j_file = open('modelnew.json', 'r')
loaded_json_model = j_file.read()
j_file.close()
model = model_from_json(loaded_json_model)
model.load_weights('modelnew.h5')
img1 = image.load_img(f, target_size=(224, 224))
img1 = np.array(img1)
img1 = img1.reshape((1, 224, 224, 3))
img1 = img1 / 255
prediction = model.predict(img1)
pred = np.argmax(prediction)
disease = SKIN_CLASSES[pred]
accuracy = prediction[0][pred]
|
from abc import ABC, abstractmethod
from typing import Dict, List, Optional
from omegaconf import DictConfig
from pydantic.dataclasses import dataclass
from pytorch_lightning.callbacks import Callback
from seg_lapa.config_parse.callbacks_available import (
CheckpointConf,
EarlyStopConf,
LearningRateMonitorConf,
LogMediaConf,
)
from seg_lapa.config_parse.conf_utils import validate_config_group_generic
# The Callbacks config cannot be directly initialized because it contains sub-entries for each callback, each
# of which describes a separate class.
# For each of the callbacks, we define a dataclass and use them to init the list of callbacks
@dataclass(frozen=True)
class CallbacksConf(ABC):
name: str
@abstractmethod
def get_callbacks_list(self, *args) -> List:
return []
@dataclass(frozen=True)
class DisabledCallbacksConf(CallbacksConf):
def get_callbacks_list(self) -> List:
return []
@dataclass(frozen=True)
class StandardCallbacksConf(CallbacksConf):
"""Get a dictionary of all the callbacks."""
early_stopping: Optional[Dict] = None
checkpoints: Optional[Dict] = None
log_media: Optional[Dict] = None
lr_monitor: Optional[Dict] = None
def get_callbacks_list(self, exp_dir: str, cfg: DictConfig) -> List[Callback]:
"""Get all available callbacks and the Callback Objects in list
If a callback's entry is not present in the config file, it'll not be output in the list
"""
callbacks_list = []
if self.early_stopping is not None:
early_stop = EarlyStopConf(**self.early_stopping).get_callback()
callbacks_list.append(early_stop)
if self.checkpoints is not None:
checkpoint = CheckpointConf(**self.checkpoints).get_callback(exp_dir)
callbacks_list.append(checkpoint)
if self.log_media is not None:
log_media = LogMediaConf(**self.log_media).get_callback(exp_dir, cfg)
callbacks_list.append(log_media)
if self.lr_monitor is not None:
lr_monitor = LearningRateMonitorConf(**self.lr_monitor).get_callback()
callbacks_list.append(lr_monitor)
return callbacks_list
valid_names = {
"disabled": DisabledCallbacksConf,
"standard": StandardCallbacksConf,
}
def validate_config_group(cfg_subgroup: DictConfig) -> CallbacksConf:
validated_dataclass = validate_config_group_generic(
cfg_subgroup, dataclass_dict=valid_names, config_category="callback"
)
return validated_dataclass
|
import pandas as pd
import multiprocessing
import argparse
from functools import partial
import logging
import os, sys, glob, datetime, time, gzip
import collections
from collections import defaultdict
from math import log
from scTE.miniglbase import genelist, glload, location
from scTE.annotation import annoGtf
import subprocess
import numpy as np
import scipy
import anndata as ad
def read_opts(parser):
args = parser.parse_args()
if args.format == "BAM" :
args.parser = "BAM"
elif args.format == "SAM" :
args.parser = "SAM"
else :
logging.error("The input file must be SAM/BAM format: %s !\n" % (args.format))
sys.exit(1)
args.error = logging.critical
args.warn = logging.warning
args.debug = logging.debug
args.info = logging.info
args.argtxt ="\n".join(("Parameter list:", \
"Sample = %s" % (args.out), \
# "Genome = %s" % (args.genome), \
"Reference annotation index = %s" %(args.annoglb[0]), \
"Minimum number of genes required = %s" % (args.genenumber), \
"Minimum number of counts required = %s"% (args.countnumber),\
"Number of threads = %s " % (args.thread),\
))
return args
# def getanno(filename, genefile, tefile, genome, mode):
# form ={'force_tsv': True, 'loc': 'location(chr=column[0], left=column[1], right=column[2])', 'annot': 3}
#
# if genefile == 'default' and tefile == 'default':
# if genome == 'mm10':
# chr_list = ['chr'+ str(i) for i in range(1,20) ] + [ 'chrX','chrY', 'chrM' ]
# if mode == 'exclusive':
# if not os.path.exists('mm10.exclusive.glb'):
# logging.error("Did not find the annotation index mm10.exclusive.glb, you can download it from scTE github (www....) or either give the annotation with -te and -gene option \n" )
# sys.exit(1)
# all_annot = 'mm10.exclusive.glb'
# allelement = set(glload(all_annot)['annot'])
#
# elif mode == 'inclusive':
# if not os.path.exists('mm10.inclusive.glb'):
# logging.error("Did not find the annotation index mm10.inclusive.glb, you can download it from scTE github (www....) or either give the annotation with -te and -gene option \n" )
# sys.exit(1)
# all_annot = 'mm10.inclusive.glb'
# allelement = set(glload(all_annot)['annot'])
#
# elif genome == 'hg38':
# chr_list = ['chr'+ str(i) for i in range(1,23) ] + [ 'chrX','chrY', 'chrM' ]
# if mode == 'exclusive':
# if not os.path.exists('hg38.exclusive.glb'):
# logging.error("Did not find the annotation index hg38.exclusive.glb, you can download it from scTE github (www....) or either give the annotation with -te and -gene option \n" )
# sys.exit(1)
# all_annot = 'hg38.exclusive.glb'
# allelement = set(glload(all_annot)['annot'])
#
# elif mode == 'inclusive':
# if not os.path.exists('hg38.inclusive.glb'):
# logging.error("Did not find the annotation index hg38.inclusive.glb, you can download it from scTE github (www....) or either give the annotation with -te and -gene option \n")
# sys.exit(1)
# all_annot = 'hg38.inclusive.glb'
# allelement = set(glload(all_annot)['annot'])
# else:
# if genome in ['hg38']:
# chr_list = ['chr'+ str(i) for i in range(1,23) ] + [ 'chrX','chrY', 'chrM' ]
#
# elif genome in ['mm10']:
# chr_list = ['chr'+ str(i) for i in range(1,20) ] + [ 'chrX','chrY', 'chrM' ]
#
# if not os.path.isfile(tefile) :
# logging.error("No such file: %s !\n" %(tefile))
# sys.exit(1)
#
# if not os.path.isfile(genefile) :
# logging.error("No such file: %s !\n" % (genefile))
# sys.exit(1)
#
# all_annot = annoGtf(filename, genefile=genefile, tefile=tefile, mode=mode)
# allelement = set(glload(all_annot)['annot'])
#
# return(allelement,chr_list,all_annot)
def Readanno(filename, annoglb): #genome
glannot = glload(annoglb)
allelement = set(glannot['annot'])
# if genome in ['mm10']:
# chr_list = ['chr'+ str(i) for i in range(1,20) ] + [ 'chrX','chrY', 'chrM' ]
# elif genome in ['hg38']:
# chr_list = ['chr'+ str(i) for i in range(1,23) ] + [ 'chrX','chrY', 'chrM' ]
chr_list = list(set([ k['chr'] for k in glannot['loc']])) #this is useful for costume chromsome
return(allelement, chr_list, annoglb, glannot)
def checkCBUMI(filename,out,CB,UMI):
if CB == 'CR':
subprocess.run('samtools view %s | head -100| grep "CR:Z:" | wc -l > %s_scTEtmp/o1/testCR.txt'%(filename,out),shell=True)
time.sleep(2) #subprocess need take some time
o=open('%s_scTEtmp/o1/testCR.txt'%(out),'rU')
for l in o:
l=l.strip()
if int(l) < 100:
logging.error("The input file %s has no cell barcodes information, plese make sure the aligner have add the cell barcode key, or set CB to False"%filename)
sys.exit(1)
elif CB == 'CB':
subprocess.run('samtools view %s | head -100| grep "CB:Z:" | wc -l > %s_scTEtmp/o1/testCR.txt'%(filename,out),shell=True)
time.sleep(2) #subprocess need take some time
o=open('%s_scTEtmp/o1/testCR.txt'%(out),'rU')
for l in o:
l=l.strip()
if int(l) < 100:
logging.error("The input file %s has no cell barcodes information, plese make sure the aligner have add the cell barcode key, or set CB to False"%filename)
sys.exit(1)
if UMI == 'UR':
subprocess.run('samtools view %s | head -100| grep "UR:Z:" | wc -l > %s_scTEtmp/o1/testUMI.txt'%(filename,out),shell=True)
time.sleep(2)
o=open('%s_scTEtmp/o1/testUMI.txt'%(out),'rU')
for l in o:
l=l.strip()
if int(l) < 100:
logging.error("The input file %s has no UR:Z information, plese make sure the aligner have add the UMI key, or set UMI to False" % filename)
sys.exit(1)
elif UMI == 'UB':
subprocess.run('samtools view %s | head -100| grep "UB:Z:" | wc -l > %s_scTEtmp/o1/testUMI.txt'%(filename,out),shell=True)
time.sleep(2)
o=open('%s_scTEtmp/o1/testUMI.txt'%(out),'rU')
for l in o:
l=l.strip()
if int(l) < 100:
logging.error("The input file %s has no UB:Z information, plese make sure the aligner have add the UMI key, or set UMI to False" % filename)
sys.exit(1)
def Bam2bed(filename, CB, UMI, out, num_threads):
if not os.path.exists('%s_scTEtmp/o1'%out):
os.system('mkdir -p %s_scTEtmp/o1'%out)
sample=filename.split('/')[-1].replace('.bam','')
if sys.platform == 'darwin': # Mac OSX has BSD sed
switch = '-E'
else:
switch = '-r'
if UMI == 'False':
if CB == 'False':
# Put the sample name in the barcode slot
os.system('samtools view -@ %s %s | awk \'{OFS="\t"}{print $3,$4,$4+100,"%s"}\' | sed %s \'s/^chr//g\'| gzip -c > %s_scTEtmp/o1/%s.bed.gz' % (num_threads, filename, out, switch, out, out))
elif CB == 'CR':
os.system('samtools view -@ %s %s | awk \'{OFS="\t"}{for(i=12;i<=NF;i++)if($i~/CR:Z:/)n=i}{print $3,$4,$4+100,$n}\' | sed %s \'s/CR:Z://g\' | sed %s \'s/^chr//g\' | gzip -c > %s_scTEtmp/o1/%s.bed.gz' % (num_threads, filename, switch, switch, out, out))
elif CB == 'CB':
os.system('samtools view -@ %s %s | awk \'{OFS="\t"}{for(i=12;i<=NF;i++)if($i~/CB:Z:/)n=i}{print $3,$4,$4+100,$n}\' | sed %s \'s/CB:Z://g\' | sed %s \'s/^chr//g\' | gzip -c > %s_scTEtmp/o1/%s.bed.gz' % (num_threads, filename, switch, switch, out, out))
elif UMI == 'UR':
if CB == 'CR':
os.system('samtools view -@ %s %s | awk \'{OFS="\t"}{for(i=12;i<=NF;i++)if($i~/CR:Z:/)n=i}{for(i=12;i<=NF;i++)if($i~/UR:Z:/)m=i}{print $3,$4,$4+100,$n,$m}\' | sed %s \'s/CR:Z://g\' | sed %s \'s/UR:Z://g\'| sed %s \'s/^chr//g\' | awk \'!x[$4$5]++\' | gzip -c > %s_scTEtmp/o1/%s.bed.gz' % (num_threads, filename, switch, switch, switch, out,out))
elif CB == 'CB':
os.system('samtools view -@ %s %s | awk \'{OFS="\t"}{for(i=12;i<=NF;i++)if($i~/CB:Z:/)n=i}{for(i=12;i<=NF;i++)if($i~/UR:Z:/)m=i}{print $3,$4,$4+100,$n,$m}\' | sed %s \'s/CB:Z://g\' | sed %s \'s/UR:Z://g\'| sed %s \'s/^chr//g\' | awk \'!x[$4$5]++\' | gzip -c > %s_scTEtmp/o1/%s.bed.gz' % (num_threads, filename, switch, switch, switch, out,out))
elif UMI == 'UB':
if CB == 'CR':
os.system('samtools view -@ %s %s | awk \'{OFS="\t"}{for(i=12;i<=NF;i++)if($i~/CR:Z:/)n=i}{for(i=12;i<=NF;i++)if($i~/UB:Z:/)m=i}{print $3,$4,$4+100,$n,$m}\' | sed %s \'s/CR:Z://g\' | sed %s \'s/UB:Z://g\'| sed %s \'s/^chr//g\' | awk \'!x[$4$5]++\' | gzip -c > %s_scTEtmp/o1/%s.bed.gz' % (num_threads, filename, switch, switch, switch, out,out))
elif CB == 'CB':
os.system('samtools view -@ %s %s | awk \'{OFS="\t"}{for(i=12;i<=NF;i++)if($i~/CB:Z:/)n=i}{for(i=12;i<=NF;i++)if($i~/UB:Z:/)m=i}{print $3,$4,$4+100,$n,$m}\' | sed %s \'s/CB:Z://g\' | sed %s \'s/UB:Z://g\'| sed %s \'s/^chr//g\' | awk \'!x[$4$5]++\' | gzip -c > %s_scTEtmp/o1/%s.bed.gz' % (num_threads, filename, switch, switch, switch, out,out))
def Para_bam2bed(filename, CB, UMI, out):
if not os.path.exists('%s_scTEtmp/o0'%out):
os.system('mkdir -p %s_scTEtmp/o0'%out)
sample=filename.split('/')[-1].replace('.bam','')
if sys.platform == 'darwin': # Mac OSX has BSD sed
switch = '-E'
else:
switch = '-r'
print(UMI,CB)
if UMI == 'False':
if CB == 'False':
os.system('samtools view %s | awk \'{OFS="\t"}{print $3,$4,$4+100,"%s"}\' | sed %s \'s/^chr//g\' | gzip > %s_scTEtmp/o0/%s.bed.gz'%(filename, sample, switch, out, sample))
elif CB == 'CR':
os.system('samtools view %s | awk \'{OFS="\t"}{for(i=12;i<=NF;i++)if($i~/CR:Z:/)n=i}{print $3,$4,$4+100,$n,$m}\' | sed %s \'s/CR:Z://g\' | sed %s \'s/^chr//g\' | gzip > %s_scTEtmp/o0/%s.bed.gz'%(filename, switch, switch, out,sample))
elif CB == 'CB':
os.system('samtools view %s | awk \'{OFS="\t"}{for(i=12;i<=NF;i++)if($i~/CB:Z:/)n=i}{print $3,$4,$4+100,$n,$m}\' | sed %s \'s/CB:Z://g\' | sed %s \'s/^chr//g\' | gzip > %s_scTEtmp/o0/%s.bed.gz'%(filename, switch, switch, out,sample))
elif UMI == 'UR':
if CB == 'CR':
os.system('samtools view %s | awk \'{OFS="\t"}{for(i=12;i<=NF;i++)if($i~/CR:Z:/)n=i}{for(i=12;i<=NF;i++)if($i~/UR:Z:/)m=i}{print $3,$4,$4+100,$n,$m}\' | sed %s \'s/CR:Z://g\' | sed %s \'s/UR:Z://g\' | sed %s \'s/^chr//g\' | awk \'!x[$4$5]++\' | gzip > %s_scTEtmp/o0/%s.bed.gz'%(filename, switch, switch, switch, out,sample))
elif CB == 'CB':
os.system('samtools view %s | awk \'{OFS="\t"}{for(i=12;i<=NF;i++)if($i~/CB:Z:/)n=i}{for(i=12;i<=NF;i++)if($i~/UR:Z:/)m=i}{print $3,$4,$4+100,$n,$m}\' | sed %s \'s/CB:Z://g\' | sed %s \'s/UR:Z://g\' | sed %s \'s/^chr//g\' | awk \'!x[$4$5]++\' | gzip > %s_scTEtmp/o0/%s.bed.gz'%(filename, switch, switch, switch, out,sample))
elif UMI == 'UB':
if CB == 'CR':
os.system('samtools view %s | awk \'{OFS="\t"}{for(i=12;i<=NF;i++)if($i~/CR:Z:/)n=i}{for(i=12;i<=NF;i++)if($i~/UB:Z:/)m=i}{print $3,$4,$4+100,$n,$m}\' | sed %s \'s/CR:Z://g\' | sed %s \'s/UB:Z://g\' | sed %s \'s/^chr//g\' | awk \'!x[$4$5]++\' | gzip > %s_scTEtmp/o0/%s.bed.gz'%(filename, switch, switch, switch, out,sample))
elif CB == 'CB':
os.system('samtools view %s | awk \'{OFS="\t"}{for(i=12;i<=NF;i++)if($i~/CB:Z:/)n=i}{for(i=12;i<=NF;i++)if($i~/UB:Z:/)m=i}{print $3,$4,$4+100,$n,$m}\' | sed %s \'s/CB:Z://g\' | sed %s \'s/UB:Z://g\' | sed %s \'s/^chr//g\' | awk \'!x[$4$5]++\' | gzip > %s_scTEtmp/o0/%s.bed.gz'%(filename, switch, switch, switch, out,sample))
def splitAllChrs(chromosome_list, filename, genenumber, countnumber, UMI=True):
'''
**Purpose**
Split the data into separate beds, and count up all the times each barcode appears
This variant uses more memory, but does it all at the same time and gets the filtered whitelist for free
**Arguments**
chromosome_list
List of chromosome names
filename (Required)
filename stub to use for tmp files
genenumber (Required)
Minimum number of genes expressed required for a cell to pass filtering
countnumber (Required)
Minimum number of counts required for a cell to pass filtering.
UMI (optional, default=True)
use the UMI
**Returns**
The barcode whitelist
'''
if not os.path.exists('%s_scTEtmp/o2' % filename):
os.system('mkdir -p %s_scTEtmp/o2'%filename)
chromosome_list = set([c.replace('chr', '') for c in chromosome_list])
file_handle_in = gzip.open('%s_scTEtmp/o1/%s.bed.gz' % (filename,filename), 'rt')
file_handles_out = {chr: gzip.open('%s_scTEtmp/o2/%s.chr%s.bed.gz' % (filename,filename,chr), 'wt') for chr in chromosome_list}
CRs = defaultdict(int)
if UMI:
uniques = {chrom: set([]) for chrom in chromosome_list}
# Make a BED for each chromosome
for line in file_handle_in:
t = line.strip().split('\t')
chrom = t[0].replace('chr', '') # strip chr
if chrom not in chromosome_list: # remove the unusual chromosomes
# Force chrMT -> chrM
if chrom == 'MT':
chrom = 'M'
else:
continue
if UMI:
if line in uniques[chrom]:
continue
uniques[chrom].add(line)
CRs[t[3]] += 1
else:
CRs[t[3]] += 1
file_handles_out[chrom].write(line)
[file_handles_out[k].close() for k in file_handles_out]
file_handle_in.close()
# Because this does it all in one go, you can just filter the whitelist here now, and don't need the .count. file;
sortcb = sorted(CRs.items(), key=lambda item:item[1], reverse=True) # Sorts by the count;
if not countnumber:
mincounts = 2 * genenumber
else:
mincounts = countnumber
whitelist = []
for n, k in enumerate(sortcb):
if k[1] < mincounts:
break
whitelist.append(k[0])
return set(whitelist)
def filterCRs(filename, genenumber, countnumber):
CRs = defaultdict(int)
for f in glob.glob('%s_scTEtmp/o2/%s*.count.gz'%(filename,filename)):
o = gzip.open(f,'rt')
for l in o:
t = l.strip().split('\t')
CRs[t[0]] += int(t[1])
o.close()
sortcb=sorted(CRs.items(),key=lambda item:item[1],reverse=True)
if not countnumber:
mincounts = 2* genenumber
else:
mincounts = countnumber
whitelist=[]
for n,k in enumerate(sortcb):
if k[1] < mincounts:
break
whitelist.append(k[0])
#print(len(whitelist))
return set(whitelist)
def splitChr(chr, filename, CB, UMI):
if not os.path.exists('%s_scTEtmp/o2'%filename):
os.system('mkdir -p %s_scTEtmp/o2'%filename)
chr=chr.replace('chr','')
if CB == 'CR' or CB == 'CB': CB = True
else: CB = False
if UMI == 'UR' or UMI == 'UB': UMI = True
else: UMI= False
if not CB: # C1-style data is a cell per BAM, so no barcode;
if not UMI:
if chr == '1':
os.system('gunzip -c -f %s_scTEtmp/o1/%s.bed.gz | grep -v ^1\'[0-9]\' | grep ^%s | gzip -c > %s_scTEtmp/o2/%s.chr%s.bed.gz'%(filename,filename,chr,filename,filename,chr))
elif chr == '2':
os.system('gunzip -c -f %s_scTEtmp/o1/%s.bed.gz | grep -v ^2\'[0-9]\' | grep ^%s | gzip -c > %s_scTEtmp/o2/%s.chr%s.bed.gz'%(filename,filename,chr,filename,filename,chr))
elif chr == '3':
os.system('gunzip -c -f %s_scTEtmp/o1/%s.bed.gz | grep -v ^3\'[0-9]\' | grep ^%s | gzip -c > %s_scTEtmp/o2/%s.chr%s.bed.gz'%(filename,filename,chr,filename,filename,chr))
else:
os.system('gunzip -c -f %s_scTEtmp/o1/%s.bed.gz | grep ^%s | gzip -c > %s_scTEtmp/o2/%s.chr%s.bed.gz'%(filename,filename,chr,filename,filename,chr))
else:
if chr == '1':
os.system('gunzip -c -f %s_scTEtmp/o1/%s.bed.gz | grep -v ^1\'[0-9]\' | grep ^%s | gzip -c > %s_scTEtmp/o2/%s.chr%s.bed.gz'%(filename,filename,chr,filename,filename,chr))
elif chr == '2':
os.system('gunzip -c -f %s_scTEtmp/o1/%s.bed.gz | grep -v ^2\'[0-9]\' | grep ^%s | gzip -c > %s_scTEtmp/o2/%s.chr%s.bed.gz'%(filename,filename,chr,filename,filename,chr))
elif chr == '3':
os.system('gunzip -c -f %s_scTEtmp/o1/%s.bed.gz | grep -v ^3\'[0-9]\' | grep ^%s | gzip -c > %s_scTEtmp/o2/%s.chr%s.bed.gz'%(filename,filename,chr,filename,filename,chr))
else:
os.system('gunzip -c -f %s_scTEtmp/o1/%s.bed.gz | grep ^%s | gzip -c > %s_scTEtmp/o2/%s.chr%s.bed.gz'%(filename,filename,chr,filename,filename,chr))
else:
if not UMI: # did not remove the potential PCR duplicates for scRNA-seq
if chr == '1':
os.system('gunzip -c -f %s_scTEtmp/o1/%s.bed.gz | grep -v ^1\'[0-9]\' | grep ^%s | gzip -c > %s_scTEtmp/o2/%s.chr%s.bed.gz'%(filename,filename,chr,filename,filename,chr))
elif chr == '2':
os.system('gunzip -c -f %s_scTEtmp/o1/%s.bed.gz | grep -v ^2\'[0-9]\' | grep ^%s | gzip -c > %s_scTEtmp/o2/%s.chr%s.bed.gz'%(filename,filename,chr,filename,filename,chr))
elif chr == '3':
os.system('gunzip -c -f %s_scTEtmp/o1/%s.bed.gz | grep -v ^3\'[0-9]\' | grep ^%s | gzip -c > %s_scTEtmp/o2/%s.chr%s.bed.gz'%(filename,filename,chr,filename,filename,chr))
else:
os.system('gunzip -c -f %s_scTEtmp/o1/%s.bed.gz | grep ^%s | gzip -c > %s_scTEtmp/o2/%s.chr%s.bed.gz'%(filename,filename,chr,filename,filename,chr))
else:
if chr == '1':
os.system('gunzip -c -f %s_scTEtmp/o1/%s.bed.gz | grep -v ^1\'[0-9]\' | grep ^%s | gzip -c > %s_scTEtmp/o2/%s.chr%s.bed.gz'%(filename,filename,chr,filename,filename,chr))
elif chr == '2':
os.system('gunzip -c -f %s_scTEtmp/o1/%s.bed.gz | grep -v ^2\'[0-9]\' | grep ^%s | gzip -c > %s_scTEtmp/o2/%s.chr%s.bed.gz'%(filename,filename,chr,filename,filename,chr))
elif chr == '3':
os.system('gunzip -c -f %s_scTEtmp/o1/%s.bed.gz | grep -v ^3\'[0-9]\' | grep ^%s | gzip -c > %s_scTEtmp/o2/%s.chr%s.bed.gz'%(filename,filename,chr,filename,filename,chr))
else:
os.system('gunzip -c -f %s_scTEtmp/o1/%s.bed.gz | grep ^%s | gzip -c > %s_scTEtmp/o2/%s.chr%s.bed.gz'%(filename,filename,chr,filename,filename,chr))
CRs = defaultdict(int)
o = gzip.open('%s_scTEtmp/o2/%s.chr%s.bed.gz'%(filename,filename,chr),'rt')
for l in o:
t = l.strip().split('\t')
CRs[t[3]] += 1
o.close()
o = gzip.open('%s_scTEtmp/o2/%s.chr%s.count.gz'%(filename,filename,chr),'wt')
for k in CRs:
o.write('%s\t%s\n'%(k,CRs[k]))
o.close()
def align(chr, filename, all_annot, glannot, whitelist): #CB
'''
**Purpose**
For each read, align it to the index and assign a TE, gene.
This is the speed critical part.
'''
s1 = time.time()
chr = 'chr' + chr
if not os.path.exists('%s_scTEtmp/o3'%filename):
os.system('mkdir -p %s_scTEtmp/o3'%filename)
if not glannot: # Load separately for the multicore pipeline, share the index for the single core pipeline
glannot = glload(all_annot)
# Only keep the glbase parts we need.
buckets = glannot.buckets[chr.replace('chr', '')]
all_annot = glannot.linearData
oh = gzip.open('%s_scTEtmp/o2/%s.%s.bed.gz' % (filename, filename, chr), 'rt')
res = {}
for line in oh:
t = line.strip().split('\t')
barcode = t[3]
if barcode not in whitelist:
continue
if barcode not in res:
res[barcode] = defaultdict(int)
#chrom = t[0].replace('chr', '') # Don't need as each align is already split for each chrom;
left = int(t[1])
rite = int(t[2])
#loc = location(chr=chrom, left=left, right=rite)
left_buck = ((left-1)//10000) * 10000
right_buck = ((rite)//10000) * 10000
buckets_reqd = range(left_buck, right_buck+10000, 10000)
if buckets_reqd:
loc_ids = set()
loc_ids_update = loc_ids.update
# get the ids reqd.
[loc_ids_update(buckets[buck]) for buck in buckets_reqd if buck in buckets]
result = [all_annot[index]['annot'] for index in loc_ids if (rite >= all_annot[index]['loc'].loc['left'] and left <= all_annot[index]['loc'].loc["right"])]
if result:
for gene in result:
res[barcode][gene] += 1
oh.close()
oh = gzip.open('%s_scTEtmp/o3/%s.%s.bed.gz' % (filename,filename,chr), 'wt')
for bc in sorted(res):
for gene in sorted(res[bc]):
oh.write('%s\t%s\t%s\n' % (bc, gene, res[bc][gene]))
oh.close()
def Countexpression(filename, allelement, genenumber, cellnumber, hdf5):
gene_seen = allelement
whitelist={}
o = gzip.open('%s_scTEtmp/o4/%s.bed.gz'%(filename, filename), 'rt')
for n,l in enumerate(o):
t = l.strip().split('\t')
if t[0] not in whitelist:
whitelist[t[0]] = 0
whitelist[t[0]] += 1
o.close()
CRlist = []
sortcb = sorted(whitelist.items(), key=lambda item:item[1], reverse=True)
for n,k in enumerate(sortcb):
if k[1] < genenumber:
break
if n >= cellnumber:
break
CRlist.append(k[0])
CRlist = set(CRlist)
res = {}
genes_oh = gzip.open('%s_scTEtmp/o4/%s.bed.gz' % (filename,filename), 'rt')
for n, l in enumerate(genes_oh):
t = l.strip().split('\t')
if t[0] not in CRlist:
continue
if t[0] not in res:
res[t[0]] = {}
if t[1] not in res[t[0]]:
res[t[0]][t[1]] = 0
res[t[0]][t[1]] += int(t[2])
genes_oh.close()
s=time.time()
# Save out the final file
gene_seen = list(gene_seen) # Do the sort once;
gene_seen.sort()
#==== save results =====
if not hdf5: # save as csv
res_oh = open('%s.csv'%filename, 'w')
res_oh.write('barcodes,')
res_oh.write('%s\n' % (','.join([str(i) for i in gene_seen])))
for k in sorted(res):
l = ["0"] * len(gene_seen) # Avoid all the appends
for idx, gene in enumerate(gene_seen):
if gene in res[k]:
l[idx] = str(res[k][gene])
res_oh.write('%s,%s\n' % (k, ','.join(l)))
res_oh.close()
else: # save as hdf5
data = []
CBs = []
for k in sorted(res):
l = ["0"] * len(gene_seen) # Avoid all the appends
for idx, gene in enumerate(gene_seen):
if gene in res[k]:
l[idx] = str(res[k][gene])
data.append(l)
CBs.append(k)
obs = pd.DataFrame(index = CBs)
var = pd.DataFrame(index = gene_seen)
adata = ad.AnnData(np.asarray(data),var = var,obs = obs)
adata.X = scipy.sparse.csr_matrix(adata.X)
adata.write('%s.h5ad'%filename)
#========================
return len(res), genenumber, filename
def timediff(timestart, timestop):
t = (timestop-timestart)
time_day = t.days
s_time = t.seconds
ms_time = t.microseconds / 1000000
usedtime = int(s_time + ms_time)
time_hour = int(usedtime / 60 / 60 )
time_minute = int((usedtime - time_hour * 3600 ) / 60 )
time_second = int(usedtime - time_hour * 3600 - time_minute * 60 )
retstr = "%dd %dh %dm %ds" %(time_day, time_hour, time_minute, time_second,)
return retstr
|
from typing import Any
from boa3.builtin import interop, public, type
@public
def call_flags_all() -> interop.contract.CallFlags:
return interop.contract.CallFlags.ALL
@public
def main(scripthash: type.UInt160, method: str, args: list) -> Any:
return interop.contract.call_contract(scripthash, method, args)
|
#!/usr/bin/env python3
# encoding: utf-8
"""
booleanTest.py
Created by Jakub Konka on 2010-11-21.
Copyright (c) 2010 Uni of Strathclyde. All rights reserved.
"""
import sys
import os
def boolean_test(anything):
'''Tests whether the statement is True or False
Keyword arguments:
anything -- the statement to be evaluated
Returns: Boolean
'''
if anything:
print("The statement is True.\n")
else:
print("The statement is False.\n")
if __name__ == '__main__':
boolean_test([1])
|
from datadog_checks.base import AgentCheck
from datadog_checks.base.errors import CheckException
from datadog_checks.utils.subprocess_output import get_subprocess_output
import json
EXPECTED_RESPONSE_CODE = "NO_ERROR"
class LighthouseCheck(AgentCheck):
def check(self, instance):
lighthouse_url = instance.get('url')
lighthouse_name = instance.get('name')
if not lighthouse_url or not lighthouse_name:
self.log.error("missing instance url or name")
raise CheckException("missing lighthouse instance url or name, please fix yaml")
cmd = ["lighthouse",
lighthouse_url,
"--output",
"json",
"--quiet",
"--chrome-flags='--headless'"]
json_string, error_message, exit_code = LighthouseCheck._get_lighthouse_report(cmd, self.log, False)
# check for error since we have raise_on_empty_output set to False
if exit_code > 0:
self.log.error("lighthouse subprocess error {0} exit code {1} for url: {2}"
.format(error_message, exit_code, lighthouse_url)
)
raise CheckException(json_string, error_message, exit_code)
try:
data = json.loads(json_string)
except Exception as e:
self.log.warn("lighthouse response JSON different than expected for url: {0}".format(lighthouse_url))
raise CheckException(error_message, exit_code, e)
if data["runtimeError"]["code"] == EXPECTED_RESPONSE_CODE:
score_accessibility = round(data["categories"]["accessibility"]["score"] * 100)
score_best_practices = round(data["categories"]["best-practices"]["score"] * 100)
score_performance = round(data["categories"]["performance"]["score"] * 100)
score_pwa = round(data["categories"]["pwa"]["score"] * 100)
score_seo = round(data["categories"]["seo"]["score"] * 100)
else:
err_code = data["runtimeError"]["code"]
err_msg = data["runtimeError"]["message"]
self.log.warn("not collecting lighthouse metrics for url {0} runtimeError code {1} message {2}"
.format(lighthouse_url, err_code, err_msg)
)
return
# add tags
tags = instance.get('tags', [])
if type(tags) != list:
self.log.warn('The tags list in the lighthouse check is not configured properly')
tags = []
tags.append("url:{0}".format(lighthouse_url))
tags.append("name:{0}".format(lighthouse_name))
self.gauge("lighthouse.accessibility", score_accessibility, tags=tags)
self.gauge("lighthouse.best_practices", score_best_practices, tags=tags)
self.gauge("lighthouse.performance", score_performance, tags=tags)
self.gauge("lighthouse.pwa", score_pwa, tags=tags)
self.gauge("lighthouse.seo", score_seo, tags=tags)
@staticmethod
def _get_lighthouse_report(command, logger, raise_on_empty=False):
json, err_msg, exit_code = get_subprocess_output(command, logger, raise_on_empty_output=raise_on_empty)
return json, err_msg, exit_code
|
# 面试题 08.01. 三步问题
# 三步问题。有个小孩正在上楼梯,楼梯有n阶台阶,小孩一次可以上1阶、2阶或3阶。实现一种方法,计算小孩有多少种上楼梯的方式。结果可能很大,你需要对结果模1000000007。
# 示例1:
# 输入:n = 3
# 输出:4
# 说明: 有四种走法
# 示例2:
# 输入:n = 5
# 输出:13
# 提示:
# n范围在[1, 1000000]之间
class Solution:
data = [1, 1, 2, 4]
def waysToStep(self, n: int) -> int:
mod_num = 1000000007
rev = 0
t0 = 0
t1 = 1
t2 = 2
t3 = 4
if n == 0:
return 0
if n == 1:
return t1
if n == 2:
return t2
if n == 3:
return t3
ld = len(self.data)
if n < ld:
return self.data[n]
i = ld
while i <= n:
self.data.append(
(self.data[i-1] + self.data[i-2] + self.data[i-3]) % mod_num)
i += 1
return self.data[n] % mod_num
|
import FWCore.ParameterSet.Config as cms
from DQMOffline.PFTau.candidateBenchmark_cfi import *
from DQMOffline.PFTau.pfCandidateBenchmark_cfi import *
from DQMOffline.PFTau.metBenchmark_cfi import *
DQMOfflineParticleFlowSequence = cms.Sequence (
candidateBenchmark +
pfCandidateBenchmark +
metBenchmark +
matchMetBenchmark
)
|
import logging
from sqlalchemy import Column, INT, TEXT, BOOLEAN, REAL
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy_utc import UtcDateTime
from pajbot import utils
from pajbot.managers.db import Base
from pajbot.managers.songrequest_queue_manager import SongRequestQueueManager
from pajbot.streamhelper import StreamHelper
log = logging.getLogger("pajbot")
class SongrequestQueue(Base):
__tablename__ = "songrequest_queue"
id = Column(INT, primary_key=True)
video_id = Column(TEXT, ForeignKey("songrequest_song_info.video_id", ondelete="CASCADE"), nullable=False)
date_added = Column(UtcDateTime(), nullable=False)
skip_after = Column(INT, nullable=True) # skipped after
requested_by_id = Column(INT, ForeignKey("user.id"), nullable=True)
date_resumed = Column(UtcDateTime(), nullable=True)
played_for = Column(REAL, default=0, nullable=False)
song_info = relationship("SongRequestSongInfo", foreign_keys=[video_id], lazy="joined")
requested_by = relationship("User", foreign_keys=[requested_by_id], lazy="joined")
def __init__(self, **options):
super().__init__(**options)
if self.skip_after and self.skip_after < 0:
# Make sure skip_after cannot be a negative number
self.skip_after = None
def jsonify(self):
return {
"id": self.id,
"video_id": self.video_id,
"date_added": self.date_added,
"skip_after": self.skip_after,
"playing": self.playing,
"current_song_time": self.current_song_time,
"requested_by": self.requested_by.username_raw if self.requested_by_id else None,
}
def webjsonify(self):
return {
"song_info": self.song_info.jsonify(),
"requested_by": self.requested_by.username_raw if self.requested_by_id else "Backup Playlist",
"current_song_time": self.current_song_time,
"database_id": self.id,
"skip_after": self.skip_after,
"formatted_duration": self.formatted_duration,
}
def purge(self, db_session):
SongRequestQueueManager.remove_song_id(self.id)
db_session.delete(self)
@property
def formatted_duration(self):
m, s = divmod(self.duration, 60)
m = int(m)
s = int(s)
return f"{m:02d}:{s:02d}"
def queue_and_playing_in(self, db_session):
all_song_ids_before_current = SongRequestQueueManager.songs_before(self.id, "song-queue")
if SongRequestQueueManager.song_playing_id:
all_song_ids_before_current.append(SongRequestQueueManager.song_playing_id)
queued_unordered_songs = SongrequestQueue.from_list_id(db_session, all_song_ids_before_current)
time = 0
for song in queued_unordered_songs:
time += song.time_left if song.playing else song.duration
if song.playing:
log.info(f"Song has {song.time_left}")
return len(queued_unordered_songs), time
@hybrid_property
def playing(self):
return str(self.id) == str(SongRequestQueueManager.song_playing_id)
@hybrid_property
def time_left(self):
time_left = self.duration - self.current_song_time
return time_left if time_left > 0 else 0
@hybrid_property
def current_song_time(self):
return (
self.played_for + ((utils.now() - self.date_resumed).total_seconds() if self.date_resumed else 0)
if bool(self.playing)
else 0
)
@hybrid_property
def duration(self):
return self.skip_after if self.skip_after else self.song_info.duration
def move_song(self, to_id):
if not self.requested_by:
return
SongRequestQueueManager.move_song(self.id, to_id)
def to_histroy(self, db_session, skipped_by_id=None):
stream_id = StreamHelper.get_current_stream_id()
history = SongrequestHistory.create(
db_session,
stream_id or None,
self.video_id,
self.requested_by.id if self.requested_by else None,
skipped_by_id,
self.skip_after,
)
self.purge(db_session)
return history
@hybrid_property
def link(self):
return f"youtu.be/{self.video_id}"
@staticmethod
def from_list_id(db_session, _ids):
return db_session.query(SongrequestQueue).populate_existing().filter(SongrequestQueue.id.in_(_ids)).all()
@staticmethod
def from_id(db_session, _id):
return db_session.query(SongrequestQueue).populate_existing().filter_by(id=_id).one_or_none()
@staticmethod
def pop_next_song(db_session, use_backup=True):
song = None
while song is None:
next_id = SongRequestQueueManager.get_next_song(use_backup)
SongRequestQueueManager.remove_song_id(next_id)
if not next_id:
return None
song = db_session.query(SongrequestQueue).populate_existing().filter_by(id=next_id).one_or_none()
return song
@staticmethod
def create(db_session, video_id, skip_after, requested_by_id, queue=None, backup=False):
songrequestqueue = SongrequestQueue(
video_id=video_id, date_added=utils.now(), skip_after=skip_after, requested_by_id=requested_by_id
)
db_session.add(songrequestqueue)
db_session.commit()
SongRequestQueueManager.inset_song(songrequestqueue.id, "backup-song-queue" if backup else "song-queue", queue)
return songrequestqueue
@staticmethod
def get_current_song(db_session):
return (
db_session.query(SongrequestQueue)
.populate_existing()
.filter_by(id=SongRequestQueueManager.song_playing_id)
.one_or_none()
if SongRequestQueueManager.song_playing_id
else None
)
@staticmethod
def get_next_song(db_session):
song = None
while song is None:
next_id = SongRequestQueueManager.get_next_song()
if not next_id:
return None
song = db_session.query(SongrequestQueue).populate_existing().filter_by(id=next_id).one_or_none()
if not song:
SongRequestQueueManager.remove_song_id(next_id)
return song
@staticmethod
def all_by_video_id(db_session, _video_id):
return db_session.query(SongrequestQueue).populate_existing().filter_by(video_id=_video_id).all()
@staticmethod
def pruge_videos(db_session, _video_id):
all_songs = SongrequestQueue.all_by_video_id(db_session, _video_id)
for song in all_songs:
song.purge(db_session)
@staticmethod
def clear_backup_songs(db_session):
SongRequestQueueManager.delete_backup_songs()
return db_session.query(SongrequestQueue).filter_by(requested_by=None).delete(synchronize_session="evaluate")
@staticmethod
def load_backup_songs(db_session, songs, youtube):
for song in songs:
song_info = SongRequestSongInfo.create_or_get(db_session, song, youtube)
if song_info:
SongrequestQueue.create(db_session, song, None, None, backup=True)
@staticmethod
def get_playlist(db_session, limit=None, as_json=True):
while True:
queued_song_ids = SongRequestQueueManager.get_next_songs(limit=limit, queue="song-queue")
if not queued_song_ids:
return []
queued_unordered_songs = SongrequestQueue.from_list_id(db_session, queued_song_ids)
if len(queued_song_ids) == len(queued_unordered_songs):
break
song_ids = [song.id for song in queued_unordered_songs]
for song_id in queued_song_ids:
if song_id not in song_ids:
SongRequestQueueManager.remove_song_id(song_id)
queued_songs = SongrequestQueue.sort(queued_song_ids, queued_unordered_songs)
if not as_json:
return queued_songs
songs = []
for song in queued_songs:
songs.append(song.webjsonify())
return songs
@staticmethod
def get_backup_playlist(db_session, limit=None, as_json=True):
while True:
queued_song_ids = SongRequestQueueManager.get_next_songs(limit=limit, queue="backup-song-queue")
if not queued_song_ids:
return []
queued_unordered_songs = SongrequestQueue.from_list_id(db_session, queued_song_ids)
if len(queued_song_ids) == len(queued_unordered_songs):
break
song_ids = [song.id for song in queued_unordered_songs]
for song_id in queued_song_ids:
if song_id not in song_ids:
SongRequestQueueManager.remove_song_id(song_id)
queued_songs = SongrequestQueue.sort(queued_song_ids, queued_unordered_songs)
if not as_json:
return queued_songs
songs = []
for song in queued_songs:
songs.append(song.webjsonify())
return songs
@staticmethod
def sort(order, unordered):
queued_songs = []
for song in unordered:
queued_songs.insert(order.index(song.id), song)
return queued_songs
class SongrequestHistory(Base):
__tablename__ = "songrequest_history"
id = Column(INT, primary_key=True)
stream_id = Column(INT, nullable=True)
video_id = Column(TEXT, ForeignKey("songrequest_song_info.video_id"), nullable=False)
date_finished = Column(UtcDateTime(), nullable=False)
requested_by_id = Column(INT, ForeignKey("user.id"), nullable=True)
skipped_by_id = Column(INT, ForeignKey("user.id"), nullable=True)
skip_after = Column(INT, nullable=True)
song_info = relationship("SongRequestSongInfo", foreign_keys=[video_id], lazy="joined")
requested_by = relationship("User", foreign_keys=[requested_by_id], lazy="joined")
skipped_by = relationship("User", foreign_keys=[skipped_by_id], lazy="joined")
def jsonify(self):
return {
"id": self.id,
"stream_id": self.stream_id,
"video_id": self.video_id,
"date_finished": str(self.date_finished),
"requested_by": self.requested_by.username_raw if self.requested_by_id else None,
"skipped_by": self.skipped_by.username_raw if self.skipped_by_id else None,
"skip_after": self.skip_after,
}
def webjsonify(self):
return {
"song_info": self.song_info.jsonify(),
"requested_by": self.requested_by.username_raw if self.requested_by_id else "Backup Playlist",
"skipped_by": self.skipped_by.username_raw if self.skipped_by_id else None,
"database_id": self.id,
"date_finished": str(self.date_finished),
"skip_after": self.skip_after,
"formatted_duration": self.formatted_duration,
}
@property
def formatted_duration(self):
m, s = divmod(self.duration, 60)
m = int(m)
s = int(s)
return f"{m:02d}:{s:02d}"
@hybrid_property
def link(self):
return f"youtu.be/{self.video_id}"
@hybrid_property
def duration(self):
return self.skip_after if self.skip_after else self.song_info.duration
def requeue(self, db_session, requested_by):
return SongrequestQueue.create(db_session, self.video_id, self.skip_after, requested_by)
@staticmethod
def create(db_session, stream_id, video_id, requested_by_id, skipped_by_id, skip_after):
songrequesthistory = SongrequestHistory(
stream_id=stream_id,
video_id=video_id,
date_finished=utils.now(),
requested_by_id=requested_by_id,
skipped_by_id=skipped_by_id,
skip_after=skip_after,
)
db_session.add(songrequesthistory)
return songrequesthistory
@staticmethod
def get_previous(db_session, position):
songs = (
db_session.query(SongrequestHistory)
.populate_existing()
.order_by(SongrequestHistory.id.desc())
.limit(position + 1)
.all()
)
if len(songs) == position + 1:
return songs[position]
@staticmethod
def insert_previous(db_session, requested_by_id, position=0):
previous = SongrequestHistory.get_previous(db_session, position)
if not previous:
return False
return SongrequestQueue.create(db_session, previous.video_id, previous.skip_after, requested_by_id, 0)
@staticmethod
def get_list(db_session, size):
return (
db_session.query(SongrequestHistory)
.populate_existing()
.order_by(SongrequestHistory.id.desc())
.limit(size)
.all()
)
@staticmethod
def from_id(db_session, _id):
return db_session.query(SongrequestHistory).populate_existing().filter_by(id=_id).one_or_none()
@staticmethod
def get_history(db_session, limit):
played_songs = (
db_session.query(SongrequestHistory)
.populate_existing()
.filter(SongrequestHistory.song_info.has(banned=False))
.order_by(SongrequestHistory.id.desc())
.limit(limit)
.all()
)
songs = []
for song in played_songs:
songs.append(song.webjsonify())
return songs
class SongRequestSongInfo(Base):
__tablename__ = "songrequest_song_info"
video_id = Column(TEXT, primary_key=True, autoincrement=False)
title = Column(TEXT, nullable=False)
duration = Column(INT, nullable=False)
default_thumbnail = Column(TEXT, nullable=False)
banned = Column(BOOLEAN, default=False)
favourite = Column(BOOLEAN, default=False)
def jsonify(self):
return {
"video_id": self.video_id,
"title": self.title,
"duration": self.duration,
"default_thumbnail": self.default_thumbnail,
"banned": self.banned,
"favourite": self.favourite,
"formatted_duration": self.formatted_duration,
}
@property
def formatted_duration(self):
m, s = divmod(self.duration, 60)
m = int(m)
s = int(s)
return f"{m:02d}:{s:02d}"
@staticmethod
def create(db_session, video_id, title, duration, default_thumbnail):
songinfo = SongRequestSongInfo(
video_id=video_id, title=title, duration=duration, default_thumbnail=default_thumbnail
)
db_session.add(songinfo)
return songinfo
@staticmethod
def create_or_get(db_session, video_id, youtube):
song_info = db_session.query(SongRequestSongInfo).populate_existing().filter_by(video_id=video_id).one_or_none()
if song_info:
return song_info
import isodate
if youtube is None:
log.warning("youtube was not initialized")
return False
try:
video_response = youtube.videos().list(id=str(video_id), part="snippet,contentDetails").execute()
except:
return False
if not video_response.get("items", []):
log.warning(f"Got no valid responses for {video_id}")
return False
video = video_response["items"][0]
title = video["snippet"]["title"]
duration = int(isodate.parse_duration(video["contentDetails"]["duration"]).total_seconds())
default_thumbnail = video["snippet"]["thumbnails"]["default"]["url"]
return SongRequestSongInfo.create(db_session, video_id, title, duration, default_thumbnail)
@staticmethod
def get(db_session, video_id):
return db_session.query(SongRequestSongInfo).populate_existing().filter_by(video_id=video_id).one_or_none()
@staticmethod
def get_banned(db_session):
return (
db_session.query(SongRequestSongInfo)
.populate_existing()
.filter_by(banned=True)
.order_by(SongRequestSongInfo.video_id)
.all()
)
@staticmethod
def get_favourite(db_session):
return (
db_session.query(SongRequestSongInfo)
.populate_existing()
.filter_by(favourite=True)
.order_by(SongRequestSongInfo.video_id)
.all()
)
|
from dataclasses import dataclass
from typing import Union
from postmanparser.description import Description
@dataclass
class KeyVal:
key: str
value: str
disabled: bool = False
description: Union[Description, None, str] = None
@classmethod
def parse(cls, data: dict):
description = data.get("description")
if isinstance(description, dict):
description = Description.parse(description)
return cls(
data.get("key", ""),
data.get("value", ""),
disabled=data.get("disabled", False),
description=description,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.