max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
setup.py
|
Pooya448/leap
| 55
|
12780951
|
<gh_stars>10-100
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
from torch.utils.cpp_extension import BuildExtension
import numpy
# Get the numpy include directory.
numpy_include_dir = numpy.get_include()
# efficient mesh extraction (Occupancy networks: Learning 3d reconstruction in function space, CVPR 2019)
mise_module = Extension(
'leap.tools.libmise.mise',
sources=[
'leap/tools/libmise/mise.pyx'
],
)
# occupancy checks needed for training
libmesh_module = Extension(
'leap.tools.libmesh.triangle_hash',
sources=[
'leap/tools/libmesh/triangle_hash.pyx'
],
libraries=['m'], # Unix-like specific
include_dirs=[numpy_include_dir]
)
ext_modules = [
libmesh_module,
mise_module,
]
setup(
name='leap',
version='0.0.1',
ext_modules=cythonize(ext_modules),
cmdclass={
'build_ext': BuildExtension
},
url='https://neuralbodies.github.io/LEAP',
license='',
author='<NAME>',
author_email='<EMAIL>',
description=''
)
| 1.445313
| 1
|
srcT/DataStruct/PrepDeepFix.py
|
elishatofunmi/macer
| 7
|
12780952
|
<filename>srcT/DataStruct/PrepDeepFix.py
import pandas as pd
from srcT.Common import ConfigFile as CF
def addDummyCols(cols):
df = pd.read_csv(CF.fnameDeepFix_Test, encoding="ISO-8859-1")
if 'id' not in df.columns:
df['id'] = df['code_id']
del df['code_id']
if 'sourceText' not in df.columns:
df['sourceText'] = df['code']
del df['code']
df['targetText'] = ""
df['sourceLineText'] = ""
df['targetLineText'] = ""
df['sourceLineAbs'] = ""
df['targetLineAbs'] = ""
df['lineNums_Text'] = ""
df.to_csv(CF.fnameDeepFix_Test, index=False)
# sourceAbs targetAbs sourceLineText targetLineText sourceLineAbs targetLineAbs lineNums_Text lineNums_Abs diffText_ins diffText_del diffText_repl diffAbs_ins diffAbs_del errSet_diffs sourceErrorPrutor sourceErrorClangParse ErrSet class newErrSet
if __name__=='__main__':
cols = []
addDummyCols(cols)
| 2.96875
| 3
|
PycharmProjects/pythonexercicios/aula015/ex068.py
|
zmixtv1/cev-Python
| 0
|
12780953
|
from random import randint
cont = 0
while True:
valor = int(input("Digite um valor: "))
conputador = randint(0, 10)
total = conputador + valor
tipo = " "
while tipo not in "PI":
tipo = str(input("Par ou Impar [P/I]")).strip().upper()[0]
print(f"você jogou {valor} e o computador {conputador}")
print("Deu Par " if total % 2 == 0 else "Deu impar")
if tipo == "P":
if total % 2 == 0:
print("Você venceu!")
cont += 1
else:
print("Você Perdeu!")
break
elif tipo == "I":
if total % 2 == 1:
print("Você Venceu!")
cont += 1
else:
print("Você Perdeu!")
break
print("Vamos jogar novamente...")
print(f"Você venceu {cont} vezes!")
| 3.71875
| 4
|
2018/spacy/def.py
|
sematext/activate
| 11
|
12780954
|
<reponame>sematext/activate<gh_stars>10-100
from __future__ import unicode_literals
import spacy
nlp = spacy.load('en_core_web_sm')
print("")
print("Doc 10, title: '#bbuzz: Radu Gheorghe JSON Logging with Elasticsearch'")
print("-----")
doc = nlp(u"#bbuzz: Radu Gheorghe JSON Logging with Elasticsearch")
for entity in doc.ents:
print(entity.label_, ' | ', entity.text)
print("")
print("Doc 20:, title: 'How to Run Solr on Docker. And Why. - <NAME> & <NAME>, Sematext'")
print("-----")
doc = nlp(u"How to Run Solr on Docker. And Why. - <NAME> & <NAME>, Sematext")
for entity in doc.ents:
print(entity.label_, ' | ', entity.text)
print("")
print("Doc 37:, title: '#bbuzz 2016: <NAME> - Running High Performance And Fault Tolerant Elasticsearch'")
print("-----")
doc = nlp(u"#bbuzz 2016: <NAME> - Running High Performance And Fault Tolerant Elasticsearch")
for entity in doc.ents:
print(entity.label_, ' | ', entity.text)
| 2.265625
| 2
|
ship.py
|
camargo-advanced/blasteroidspi
| 0
|
12780955
|
from pygame.locals import *
from blast import Blast
from sound import Sound
from wentity import WEntity
from pygame.math import Vector2
from utils import *
WIDTH = 3 # line thickness
SCALE_FACTOR = 5.0
ACCELERATION = 250.0 # pixels per second
DAMPING = 0.57 # some damping
ANGULAR_SPEED = 180.0 # degrees per second
SHIP_WIREFRAME = [
Vector2(0.0, -5.0), Vector2(3.0, 4.0), Vector2(1.5, 2.0),
Vector2(-1.5, 2.0), Vector2(-3.0, 4.0)
]
THRUST_WIREFRAME = [
Vector2(1.0, 2.0), Vector2(0.0, 5.0), Vector2(-1.0, 2.0)
]
class Ship(WEntity):
def __init__(self, galaxy):
super().__init__(galaxy, "ship", GREEN, SHIP_WIREFRAME, WIDTH)
# ship initial position
self.position = Vector2(self.galaxy.rect.width/2,
self.galaxy.rect.height/2)
self.acceleration = ACCELERATION
self.damping = DAMPING
self.angular_speed = ANGULAR_SPEED
self.size = SCALE_FACTOR
self.shielded = True
self.firing = False
self.dying = False
def update(self, time_passed, event_list):
super().update(time_passed, event_list)
if self.galaxy.get_entity_by_name('score').game_status != GAME_RUNNING:
return
self.process_events(event_list)
if self.firing:
# build a new blast, set its position to the ship's,
# set its velocity vector to ship's orientation
# and then add it to the galaxy
blast = Blast(self.galaxy, Vector2(self.position), self.angle)
self.galaxy.add_entity(blast)
for entity in self.galaxy.get_entities_by_name('asteroid'):
if not self.shielded and self.collide(entity):
# if a rock hit me, I lose a life but I'm shielded for 5 sec!
# I also need to be positioned at the center of screen stationary,
# and in the same angle I was born. The lives must be reduced by 1
self.dying = True
self.shield()
pygame.time.set_timer(UNSHIELD_EVENT, 2500, 1)
self.position = Vector2(self.galaxy.rect.width/2,
self.galaxy.rect.height/2)
self.velocity = Vector2(0.0, 0.0)
self.angle = 0.0
self.galaxy.get_entity_by_name('score').update_lives(-1)
def render(self, surface):
super().render(surface)
if self.accelerating == FORWARD:
Sound().play('thrust')
self.wireframe = THRUST_WIREFRAME
super().render(surface)
self.wireframe = SHIP_WIREFRAME
if self.firing:
Sound().play('fire')
self.firing = False
if self.dying:
Sound().play('bang')
self.dying = False
def process_events(self, event_list):
for event in event_list:
if event.type == KEYDOWN:
if event.key == K_LEFT or event.key == K_a:
self.start_rotating(CCLOCKWISE)
if event.key == K_RIGHT or event.key == K_d:
self.start_rotating(CLOCKWISE)
if event.key == K_UP or event.key == K_w:
self.start_accelerating(FORWARD)
if event.key == K_SPACE:
self.fire()
if event.type == KEYUP:
if event.key == K_LEFT or event.key == K_a or \
event.key == K_RIGHT or event.key == K_d:
self.stop_rotating()
if event.key == K_UP or event.key == K_w:
self.stop_accelerating()
if event.type == UNSHIELD_EVENT:
self.unshield()
def fire(self):
self.firing = True
def unshield(self):
self.shielded = False
self.galaxy.get_entity_by_name('score').update_ship_shielded(False)
def shield(self):
self.shielded = True
self.galaxy.get_entity_by_name('score').update_ship_shielded(True)
| 2.984375
| 3
|
messenger/admin.py
|
lucida-no/hdo-quiz-service
| 0
|
12780956
|
from django.contrib import admin
from messenger.models import ChatSession
class ChatSessionAdmin(admin.ModelAdmin):
readonly_fields = ['uuid', 'user_id']
list_display = ['uuid', 'state', 'user_id']
list_filter = ['state']
admin.site.register(ChatSession, ChatSessionAdmin)
| 1.703125
| 2
|
src/trw/datasets/cityscapes.py
|
civodlu/trw
| 3
|
12780957
|
from typing import Optional, List
import torch
import torchvision
import numpy as np
from ..basic_typing import Datasets
from ..train import SequenceArray
from ..train import SamplerRandom, SamplerSequential
import functools
import collections
import os
from ..transforms import Transform
from typing_extensions import Literal
def image_to_torch(i):
return torch.from_numpy(np.array(i).transpose((2, 0, 1))).unsqueeze(0)
def segmentation_to_torch(i):
return torch.from_numpy(np.array(i)).type(torch.int64).unsqueeze(0).unsqueeze(0)
def load_case(batch, dataset, transform):
case_ids = batch['case_id']
images = []
segmentations = []
for case_id in case_ids:
image, segmentation = dataset[case_id]
images.append(image_to_torch(image))
segmentations.append(segmentation_to_torch(segmentation))
data_batch = {
'case_id': case_ids,
'image': torch.cat(images),
'segmentation': torch.cat(segmentations)
}
if transform is not None:
data_batch = transform(data_batch)
return data_batch
def create_cityscapes_dataset(
batch_size: int = 32,
root: Optional[str] = None,
transform_train: Optional[List[Transform]] = None,
transform_valid: Optional[List[Transform]] = None,
nb_workers: int = 4,
target_type: Literal['semantic'] = 'semantic') -> Datasets:
"""
Load the cityscapes dataset. This requires to register on their website https://www.cityscapes-dataset.com/
and manually download the dataset.
The dataset is composed of 3 parts: gtCoarse, gtFine, leftImg8bit. Download each package and unzip in a
folder (e.g., `cityscapes`)
Args:
batch_size:
root: the folder containing the 3 unzipped cityscapes data `gtCoarse`, `gtFine`, `leftImg8bit`
transform_train: the transform to apply on the training batches
transform_valid: the transform to apply on the validation batches
nb_workers: the number of workers for each split allocated to the data loading and processing
target_type: the segmentation task
Returns:
a dict of splits. Each split is a :class:`trw.train.Sequence`
"""
if root is None:
# first, check if we have some environment variables configured
root = os.environ.get('TRW_DATA_ROOT')
if root is None:
# else default a standard folder
root = './data'
cityscapes_path = os.path.join(root, 'cityscapes')
train_dataset = torchvision.datasets.cityscapes.Cityscapes(cityscapes_path, mode='fine', split='train', target_type=target_type)
valid_dataset = torchvision.datasets.cityscapes.Cityscapes(cityscapes_path, mode='fine', split='val', target_type=target_type)
train_sampler = SamplerRandom(batch_size=batch_size)
train_sequence = SequenceArray({'case_id': np.arange(len(train_dataset))}, sampler=train_sampler)
train_sequence = train_sequence.map(
functools.partial(load_case, dataset=train_dataset, transform=transform_train), nb_workers=nb_workers)
valid_sampler = SamplerSequential(batch_size=batch_size)
valid_sequence = SequenceArray({'case_id': np.arange(len(valid_dataset))}, sampler=valid_sampler)
valid_sequence = valid_sequence.map(
functools.partial(load_case, dataset=valid_dataset, transform=transform_valid), nb_workers=nb_workers)
dataset = collections.OrderedDict([
('train', train_sequence),
('valid', valid_sequence)
])
return collections.OrderedDict([
('cityscapes', dataset)
])
| 2.65625
| 3
|
examples/rtsp_pyqt/client.py
|
Adancurusul/MaixPy3
| 93
|
12780958
|
import socket
from threading import Thread
from typing import Union, Optional, List, Tuple
from time import sleep
from PIL import Image
from io import BytesIO
import re
from typing import Optional
class InvalidRTSPRequest(Exception):
pass
class RTSPPacket:
RTSP_VERSION = 'RTSP/1.0'
INVALID = -1
SETUP = 'SETUP'
PLAY = 'PLAY'
PAUSE = 'PAUSE'
TEARDOWN = 'TEARDOWN'
RESPONSE = 'RESPONSE'
def __init__(
self,
request_type,
video_file_path: Optional[str] = None,
sequence_number: Optional[int] = None,
dst_port: Optional[int] = None,
session_id: Optional[str] = None):
self.request_type = request_type
self.video_file_path = video_file_path
self.sequence_number = sequence_number
self.session_id = session_id
# if request_type SETUP
self.rtp_dst_port = dst_port
def __str__(self):
return (f"RTSPPacket({self.request_type}, "
f"{self.video_file_path}, "
f"{self.sequence_number}, "
f"{self.rtp_dst_port}, "
f"{self.session_id})")
@classmethod
def from_response(cls, response: bytes):
# only response format implemented, taken from server class:
# """
# <RTSP_VERSION> 200 OK\r\n
# CSeq: <SEQUENCE_NUMBER>\r\n
# Session: <SESSION_ID>\r\n
# """
match = re.match(
r"(?P<rtsp_version>RTSP/\d+.\d+) 200 OK\r?\n"
r"CSeq: (?P<sequence_number>\d+)\r?\n"
r"Session: (?P<session_id>\d+)\r?\n",
response.decode()
)
if match is None:
raise Exception(f"failed to parse RTSP response: {response}")
g = match.groupdict()
# not used, defaults to 1.0
# rtsp_version = g.get('rtsp_version')
sequence_number = g.get('sequence_number')
session_id = g.get('session_id')
try:
sequence_number = int(sequence_number)
except (ValueError, TypeError):
raise Exception(f"failed to parse sequence number: {response}")
if session_id is None:
raise Exception(f"failed to parse session id: {response}")
return cls(
request_type=RTSPPacket.RESPONSE,
sequence_number=sequence_number,
session_id=session_id
)
@classmethod
def build_response(cls, sequence_number: int, session_id: str):
response = '\r\n'.join((
f"{cls.RTSP_VERSION} 200 OK",
f"CSeq: {sequence_number}",
f"Session: {session_id}",
)) + '\r\n'
return response
@classmethod
def from_request(cls, request: bytes):
# loosely follows actual rtsp protocol, considering only SETUP, PLAY, PAUSE, and TEARDOWN
# https://en.wikipedia.org/wiki/Real_Time_Streaming_Protocol
match = re.match(
r"(?P<request_type>\w+) rtsp://(?P<video_file_path>\S+) (?P<rtsp_version>RTSP/\d+.\d+)\r?\n"
r"CSeq: (?P<sequence_number>\d+)\r?\n"
r"(Range: (?P<play_range>\w+=\d+-\d+\r?\n))?"
r"(Transport: .*client_port=(?P<dst_port>\d+).*\r?\n)?" # in case of SETUP request
r"(Session: (?P<session_id>\d+)\r?\n)?",
request.decode()
)
if match is None:
raise InvalidRTSPRequest(f"failed to parse request: {request}")
g = match.groupdict()
request_type = g.get('request_type')
if request_type not in (RTSPPacket.SETUP,
RTSPPacket.PLAY,
RTSPPacket.PAUSE,
RTSPPacket.TEARDOWN):
raise InvalidRTSPRequest(f"invalid request type: {request}")
video_file_path = g.get('video_file_path')
# not used, defaults to `RTSPPacket.RTSP_VERSION`
# rtsp_version = g.get('rtsp_version')
sequence_number = g.get('sequence_number')
dst_port = g.get('dst_port')
session_id = g.get('session_id')
if request_type == RTSPPacket.SETUP:
try:
dst_port = int(dst_port)
except (ValueError, TypeError):
raise InvalidRTSPRequest(f"failed to parse RTP port")
try:
sequence_number = int(sequence_number)
except (ValueError, TypeError):
raise InvalidRTSPRequest(f"failed to parse sequence number: {request}")
return cls(
request_type,
video_file_path,
sequence_number,
dst_port,
session_id
)
def to_request(self) -> bytes:
# loosely follows actual rtsp protocol, considering only SETUP, PLAY, PAUSE, and TEARDOWN
# https://en.wikipedia.org/wiki/Real_Time_Streaming_Protocol
if any((attr is None for attr in (self.request_type,
self.sequence_number,
self.session_id))):
raise InvalidRTSPRequest('missing one attribute of: `request_type`, `sequence_number`, `session_id`')
if self.request_type in (self.INVALID, self.RESPONSE):
raise InvalidRTSPRequest(f"invalid request type: {self}")
request_lines = [
f"{self.request_type} rtsp://{self.video_file_path} {self.RTSP_VERSION}",
f"CSeq: {self.sequence_number}",
]
if self.request_type == self.SETUP:
if self.rtp_dst_port is None:
raise InvalidRTSPRequest(f"missing RTP destination port: {self}")
request_lines.append(
f"Transport: RTP/UDP;client_port={self.rtp_dst_port}"
)
else:
request_lines.append(
f"Session: {self.session_id}"
)
request = '\r\n'.join(request_lines) + '\r\n'
return request.encode()
class InvalidPacketException(Exception):
pass
class RTPPacket:
# default header info
HEADER_SIZE = 12 # bytes
VERSION = 0b10 # 2 bits -> current version 2
PADDING = 0b0 # 1 bit
EXTENSION = 0b0 # 1 bit
CC = 0x0 # 4 bits
MARKER = 0b0 # 1 bit
SSRC = 0x00000000 # 32 bits
class TYPE:
MJPEG = 26
def __init__(
self,
payload_type: int = None,
sequence_number: int = None,
timestamp: int = None,
payload: bytes = None):
self.payload = payload
self.payload_type = payload_type
self.sequence_number = sequence_number
self.timestamp = timestamp
# b0 -> v0 v1 p x c0 c1 c2 c3
zeroth_byte = (self.VERSION << 6) | (self.PADDING << 5) | (self.EXTENSION << 4) | self.CC
# b1 -> m pt0 pt1 pt2 pt3 pt4 pt5 pt6
first_byte = (self.MARKER << 7) | self.payload_type
# b2 -> s0 s1 s2 s3 s4 s5 s6 s7
second_byte = self.sequence_number >> 8
# b3 -> s8 s9 s10 s11 s12 s13 s14 s15
third_byte = self.sequence_number & 0xFF
# b4~b7 -> timestamp
fourth_to_seventh_bytes = [
(self.timestamp >> shift) & 0xFF for shift in (24, 16, 8, 0)
]
# b8~b11 -> ssrc
eigth_to_eleventh_bytes = [
(self.SSRC >> shift) & 0xFF for shift in (24, 16, 8, 0)
]
self.header = bytes((
zeroth_byte,
first_byte,
second_byte,
third_byte,
*fourth_to_seventh_bytes,
*eigth_to_eleventh_bytes,
))
@classmethod
def from_packet(cls, packet: bytes):
if len(packet) < cls.HEADER_SIZE:
raise InvalidPacketException(f"The packet {repr(packet)} is invalid")
header = packet[:cls.HEADER_SIZE]
payload = packet[cls.HEADER_SIZE:]
# b1 -> m pt0 ... pt6
# i.e. payload type is whole byte except first bit
payload_type = header[1] & 0x7F
# b2 -> s0 ~ s7
# b3 -> s8 ~ s15
# i.e. sequence number is b2<<8 | b3
sequence_number = header[2] << 8 | header[3]
# b4 ~ b7 -> t0 ~ t31
timestamp = 0
for i, b in enumerate(header[4:8]):
timestamp = timestamp | b << (3 - i) * 8
return cls(
payload_type,
sequence_number,
timestamp,
payload
)
def get_packet(self) -> bytes:
return bytes((*self.header, *self.payload))
def print_header(self):
# print header without SSRC
for i, by in enumerate(self.header[:8]):
s = ' '.join(f"{by:08b}")
# break line after the third and seventh bytes
print(s, end=' ' if i not in (3, 7) else '\n')
class Client:
DEFAULT_CHUNK_SIZE = 128 * 1024
DEFAULT_RECV_DELAY = 20 # in milliseconds
DEFAULT_LOCAL_HOST = '0.0.0.0'
RTP_SOFT_TIMEOUT = 5 # in milliseconds
# for allowing simulated non-blocking operations
# (useful for keyboard break)
RTSP_SOFT_TIMEOUT = 1# in milliseconds
# if it's present at the end of chunk, client assumes
# it's the last chunk for current frame (end of frame)
PACKET_HEADER_LENGTH = 5
def __init__(
self,
file_path: str,
remote_host_address: str,
remote_host_port: int,
rtp_port: int):
self._rtsp_connection: Union[None, socket.socket] = None
self._rtp_socket: Union[None, socket.socket] = None
self._rtp_receive_thread: Union[None, Thread] = None
self._frame_buffer: List[Image.Image] = []
self._current_sequence_number = 0
self.session_id = ''
self.current_frame_number = -1
self.is_rtsp_connected = False
self.is_receiving_rtp = False
self.file_path = file_path
self.remote_host_address = remote_host_address
self.remote_host_port = remote_host_port
self.rtp_port = rtp_port
def get_next_frame(self) -> Optional[Tuple[Image.Image, int]]:
if self._frame_buffer:
self.current_frame_number += 1
# skip 5 bytes which contain frame length in bytes
return self._frame_buffer.pop(0), self.current_frame_number
return None
@staticmethod
def _get_frame_from_packet(packet: RTPPacket) -> Image.Image:
# the payload is already the jpeg
raw = packet.payload
frame = Image.open(BytesIO(raw))
return frame
def _recv_rtp_packet(self, size=DEFAULT_CHUNK_SIZE) -> RTPPacket:
recv = bytes()
print('Waiting RTP packet...')
while True:
try:
recv += self._rtp_socket.recv(size)
print('packet', len(recv))
if recv.endswith(b'\xff\xd9'): # VideoStream.JPEG_EOF = b'\xff\xd9'
break
except socket.timeout:
continue
except Exception as e:
print(e)
# print(f"Received from server: {repr(recv)}")
return RTPPacket.from_packet(recv)
def _start_rtp_receive_thread(self):
self._rtp_receive_thread = Thread(target=self._handle_video_receive)
self._rtp_receive_thread.setDaemon(True)
self._rtp_receive_thread.start()
def _handle_video_receive(self):
self._rtp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._rtp_socket.bind((self.DEFAULT_LOCAL_HOST, self.rtp_port))
self._rtp_socket.settimeout(self.RTP_SOFT_TIMEOUT / 1000.)
while True:
if not self.is_receiving_rtp:
sleep(self.RTP_SOFT_TIMEOUT/1000.) # diminish cpu hogging
continue
packet = self._recv_rtp_packet()
frame = self._get_frame_from_packet(packet)
self._frame_buffer.append(frame)
def establish_rtsp_connection(self):
if self.is_rtsp_connected:
print('RTSP is already connected.')
return
print(f"Connecting to {self.remote_host_address}:{self.remote_host_port}...")
self._rtsp_connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._rtsp_connection.connect((self.remote_host_address, self.remote_host_port))
self._rtsp_connection.settimeout(self.RTSP_SOFT_TIMEOUT / 1000.)
self.is_rtsp_connected = True
def close_rtsp_connection(self):
if not self.is_rtsp_connected:
print('RTSP is not connected.')
return
self._rtsp_connection.close()
self.is_rtsp_connected = False
if self._rtp_socket:
self._rtp_socket.close()
def _send_request(self, request_type=RTSPPacket.INVALID) -> RTSPPacket:
if not self.is_rtsp_connected:
raise Exception('rtsp connection not established. run `setup_rtsp_connection()`')
request = RTSPPacket(
request_type,
self.file_path,
self._current_sequence_number,
self.rtp_port,
self.session_id
).to_request()
print(f"Sending request: {repr(request)}")
self._rtsp_connection.send(request)
self._current_sequence_number += 1
return self._get_response()
def send_setup_request(self) -> RTSPPacket:
response = self._send_request(RTSPPacket.SETUP)
self._start_rtp_receive_thread()
self.session_id = response.session_id
return response
def send_play_request(self) -> RTSPPacket:
response = self._send_request(RTSPPacket.PLAY)
self.is_receiving_rtp = True
return response
def send_pause_request(self) -> RTSPPacket:
response = self._send_request(RTSPPacket.PAUSE)
self.is_receiving_rtp = False
return response
def send_teardown_request(self) -> RTSPPacket:
response = self._send_request(RTSPPacket.TEARDOWN)
self.is_receiving_rtp = False
self.is_rtsp_connected = False
return response
def _get_response(self, size=DEFAULT_CHUNK_SIZE) -> RTSPPacket:
rcv = None
while True:
try:
rcv = self._rtsp_connection.recv(size)
break
except socket.timeout:
continue
# print(f"Received from server: {repr(rcv)}")
response = RTSPPacket.from_response(rcv)
return response
| 2.6875
| 3
|
igtagger/atomic_sentences.py
|
institutional-grammar-pl/ig-tagger
| 0
|
12780959
|
import re
import spacy
spacy_model_name = 'en_core_web_lg'
if not spacy.util.is_package(spacy_model_name):
spacy.cli.download(spacy_model_name)
nlp = spacy.load(spacy_model_name)
def filter_sentence(sentence):
def sentence_length(s, min_len=8):
if len(s) < min_len:
return False
else:
return True
filters = [sentence_length]
return all([filter_(sentence) for filter_ in filters])
def gen_atomic_statements(sentence):
"""
obsługuje sytuację (1) ..., (2) ...
:param sentence:
:return:
"""
rex = r"\([abcdefghi123456789]\)([A-z \n,–:;-]+(\(?(?=[A-z]{2,})[A-z]+\)?[A-z \n,-–;]+)+)"
splits = re.split(rex, sentence)
main_sentence = splits[0] if splits is not None else None
subsentences = re.findall(rex, sentence)
atomic_statements = []
if main_sentence and subsentences:
clean_main_sentence = re.sub(r'\([abcdefgh123456789]\)|\n', ' ', main_sentence).strip()
for subsentence in subsentences:
clean_subsentence = re.sub(r'\([abcdefgh123456789]\)|\n', ' ', subsentence[0]).strip()
atomic_statements.append(clean_main_sentence + ' ' + clean_subsentence + '.')
return atomic_statements
else:
return sentence
| 2.71875
| 3
|
lib/python/treadmill/tests/syscall/__init__.py
|
vrautela/treadmill
| 133
|
12780960
|
<reponame>vrautela/treadmill
"""Tests for treadmill's linux direct system call interface."""
| 0.886719
| 1
|
halotools/utils/__init__.py
|
aphearin/halotools
| 0
|
12780961
|
<reponame>aphearin/halotools
r""" This module contains helper functions used throughout the Halotools package.
"""
from __future__ import division, print_function, absolute_import, unicode_literals
from .spherical_geometry import *
from .array_utils import *
from .io_utils import *
from .table_utils import *
from .value_added_halo_table_functions import *
from .group_member_generator import group_member_generator
from .crossmatch import *
from .array_indexing_manipulations import *
from .inverse_transformation_sampling import *
from .distribution_matching import *
from .probabilistic_binning import fuzzy_digitize
from .conditional_percentile import sliding_conditional_percentile
from .vector_utilities import *
| 1.109375
| 1
|
section2/src/run_ml_pipeline.py
|
felixglush/HippocampalVolumeSegmentation
| 0
|
12780962
|
"""
This file contains code that will kick off training and testing processes
"""
import os, sys
import argparse
import json
import numpy as np
from experiments.UNetExperiment import UNetExperiment
from data_prep.HippocampusDatasetLoader import LoadHippocampusData
from torch.utils.data import random_split
class Config:
"""
Holds configuration parameters
"""
def __init__(self):
self.name = "Basic_unet"
self.root_dir = r"data/"
self.n_epochs = 10
self.learning_rate = 0.0002
self.batch_size = 8
self.patch_size = 64
self.test_results_dir = "out/results"
self.model_name = "" # the command line provided model name to save network weights in
self.weights_name = "" # the command line provided weights file name to load network weights from
self.test = False
def set_model_name(self, m):
self.model_name = m
def set_weights_name(self, w):
self.weights_name = w
def set_test(self, t):
self.test = t
if __name__ == "__main__":
# Get configuration
# TASK: Fill in parameters of the Config class and specify directory where the data is stored and
# directory where results will go
c = Config()
parser = argparse.ArgumentParser()
parser.add_argument("--weights", "-w", help="file name for saved model weights", action="store")
parser.add_argument("--modelname", "-m", help="model weights filename used for saving this model", action="store")
parser.add_argument("--testonly", "-t", help="test only, no training", action="store_true")
args = parser.parse_args()
if args.weights:
print("Will load model weights from", args.weights)
c.set_weights_name(args.weights)
else:
print("No pretrained model weights given. Will train a new model.")
if args.modelname:
print("Will store model weights in", args.modelname)
c.set_model_name(args.modelname)
if args.testonly:
# need to also provide a weights filename if we're only testing
print("Testing mode.")
c.set_test(True)
if not args.weights:
print("Please also provide a weights filename through -w")
sys.exit()
# Load data
print("Loading data...")
# TASK: LoadHippocampusData is not complete. Go to the implementation and complete it.
data = LoadHippocampusData(c.root_dir + "TrainingSet/", y_shape = c.patch_size, z_shape = c.patch_size)
# Create test-train-val split
# In a real world scenario you would probably do multiple splits for
# multi-fold training to improve your model quality
data_len = len(data)
keys = range(data_len)
# Here, random permutation of keys array would be useful in case if we do something like
# a k-fold training and combining the results.
# TASK: create three keys in the dictionary: "train", "val" and "test". In each key, store
# the array with indices of training volumes to be used for training, validation
# and testing respectively.
train_proportion = 0.7
val_proportion = 0.2
test_proportion = 0.1
splits = [int(np.floor(train_proportion * data_len)),
int(np.floor(val_proportion * data_len)),
int(np.floor(test_proportion * data_len))]
train, val, test = random_split(keys, splits)
split = {"train": train,
"val": val,
"test": test}
# Set up and run experiment
# TASK: Class UNetExperiment has missing pieces. Go to the file and fill them in
exp = UNetExperiment(c, split, data)
# You could free up memory by deleting the dataset
# as it has been copied into loaders
del data
if not args.testonly:
# run training and validation
exp.run()
# prep and run testing
# TASK: Test method is not complete. Go to the method and complete it
results_json = exp.run_test()
results_json["config"] = vars(c)
with open(os.path.join(exp.out_dir, "results.json"), 'w') as out_file:
json.dump(results_json, out_file, indent=2, separators=(',', ': '))
| 2.640625
| 3
|
chrome/test/install_test/theme_updater.py
|
nagineni/chromium-crosswalk
| 2
|
12780963
|
<filename>chrome/test/install_test/theme_updater.py
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Update tests for themes."""
import os
import sys
_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(_DIRECTORY, os.path.pardir, os.path.pardir,
os.path.pardir, 'build', 'util', 'lib'))
from common import util
import chrome_options
import install_test
class ThemeUpdater(install_test.InstallTest):
"""Theme update tests."""
_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
_EXTENSIONS_DIR = os.path.join(_DIRECTORY, os.path.pardir, 'data',
'extensions')
camo_theme = os.path.join(_EXTENSIONS_DIR, 'theme.crx')
camo_img = ('chrome://theme/IDR_THEME_NTP_BACKGROUND?'
'iamefpfkojoapidjnbafmgkgncegbkad')
def setUp(self):
super(ThemeUpdater, self).setUp()
self._user_data_dir = util.MakeTempDir()
def _CheckThemeApplied(self):
"""Loads the New Tab Page and asserts that the theme is applied."""
self._driver.get('chrome://newtab')
html = self._driver.find_element_by_xpath('html')
html_background = html.value_of_css_property('background-image')
self.assertTrue(self.camo_img in html_background,
msg='Did not find expected theme background-image')
def _StartChromeProfile(self, incognito=False):
"""Start Chrome with a temp profile.
Args:
incognito: Boolean flag for starting Chrome in incognito.
"""
options = chrome_options.ChromeOptions()
options.SetUserDataDir(self._user_data_dir)
if incognito:
options.AddSwitch('incognito')
self.StartChrome(options.GetCapabilities())
def _StartChromeProfileExtension(self, extension):
"""Start Chrome with a temp profile and with specified extension.
Args:
extension: Paths to extension to be installed.
"""
options = chrome_options.ChromeOptions()
options.AddExtension(extension)
options.SetUserDataDir(self._user_data_dir)
self.StartChrome(options.GetCapabilities())
def testInstallTheme(self):
"""Install a theme and check it is still applied after update."""
self.Install(self.GetUpdateBuilds()[0])
self._StartChromeProfileExtension(self.camo_theme)
self._CheckThemeApplied()
# Update and relaunch without extension.
self.Install(self.GetUpdateBuilds()[1])
self._StartChromeProfile()
self._CheckThemeApplied()
def testInstallThemeIncognito(self):
"""Install a theme and check it still applies to incognito after update."""
self.Install(self.GetUpdateBuilds()[0])
self._StartChromeProfileExtension(self.camo_theme)
self._CheckThemeApplied()
# Relaunch without extension in incognito.
self._driver.quit()
self._StartChromeProfile(incognito=True)
self._CheckThemeApplied()
# Update and relaunch without extension in incognito.
self.Install(self.GetUpdateBuilds()[1])
self._StartChromeProfile(incognito=True)
self._CheckThemeApplied()
| 2.171875
| 2
|
manager.py
|
ostapkharysh/Stock-return-predictability
| 2
|
12780964
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, Session
from sqlalchemy.exc import IntegrityError, InvalidRequestError
from db_management.DB import Base, Company, News, db_link
def add_company(company):
engine = create_engine(db_link) # pool_size=20, max_overflow=0
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
data = session.query(Company).all()
print(data)
if company in [el.name for el in data]:
return "There is already a Table with such name: {}".format(company)
# Insert a company in the Comapny table
DBcompany = Company(name=company)
session.add(DBcompany)
try:
session.commit()
except:
session.rollback()
finally:
session.close()
return "The new table {} is created.".format(company)
def add_news(info_dict):
# Insert an news in the address table
engine = create_engine(db_link)
engine.pool_timeout = 60
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
company = info_dict['comp_index']
cur_company = session.query(Company).filter_by(name=company).first()
if not cur_company:
print("Not found. Creating company: {}".format(company))
cur_company = Company(name=company)
try:
key = info_dict.keys()
# WITH SOURCECOLLECTIONIDENTIFIER AND TITLE
#new_news = News(DATE=str(info_dict[key[0]]), SOURCECOLLECTIONIDENTIFIER= int(info_dict[key[1]]), SOURCECOMMONNAME=info_dict[key[2]], DOCUMENTIDENTIFIER=info_dict[key[3]], LOCATIONS=info_dict[key[4]],
# ORGANIZATIONS=info_dict[key[5]], TONE=info_dict[key[6]], GCAM=info_dict[key[7]], ALLNAMES=info_dict[key[8]], TITLE=info_dict[key[9]], company_id=info_dict[key[10]])
#WITHOUT SOURCECOLLECTIONIDENTIFIER AND TITLE
new_news = News(DATE=str(info_dict[key[0]]),
SOURCECOMMONNAME=info_dict[key[2]], DOCUMENTIDENTIFIER=info_dict[key[3]],
#LOCATIONS=info_dict[key[4]],
#TITLE=info_dict[key[9]],
ORGANIZATIONS=info_dict[key[5]], TONE=info_dict[key[6]], GCAM=info_dict[key[7]],
ALLNAMES=info_dict[key[8]], company_id=cur_company.id)
session.add(new_news)
session.commit()
except IntegrityError:
session.rollback()
return 'The link provided seems to exist in DB: {}'.format(info_dict[key[3]])
except InvalidRequestError:
session.rollback()
return 'You are requesting access to the non-existing source'
try:
#print("COMMITING...")
session.commit()
except:
session.rollback()
finally:
session.close()
#print("The news has been successfully added")
| 2.921875
| 3
|
tests/test_deck.py
|
House-Rulez/black_jack
| 0
|
12780965
|
<filename>tests/test_deck.py<gh_stars>0
# https://stackoverflow.com/questions/10253826/path-issue-with-pytest-importerror-no-module-named-yadayadayada
import sys, os
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../scripts/deck/')
sys.path.insert(1, myPath + '/../scripts/dealer/')
sys.path.insert(2, myPath + '/../scripts/player/')
import pytest
###########################################
## Import the classes from the card and player file ##
###########################################
from deck import Deck
from player import User, Dealer
from card import Card
#################################################
## Test the Imports to see if they are working ##
#################################################
@pytest.fixture()
def deck():
return Deck()
@pytest.fixture()
def user():
return User()
def test_import():
assert deck
def test_deck_size(deck):
assert deck.deck_size() == 104
def test_cards_remaining(deck, user):
for _ in range(0, 52):
deck.deal()
expected = 52
actual = deck.cards_remaining()
assert expected == actual
def test_cards_remaining_two(deck, user):
for _ in range(0, 103):
deck.deal()
expected = 1
actual = deck.cards_remaining()
assert expected == actual
| 2.84375
| 3
|
userbot/modules/degi_nehi.py
|
PratikGoswamiPM/OUB-Remix
| 1
|
12780966
|
#"""Fun pligon...for HardcoreUserbot
#\nCode by @Hack12R
#type `.degi` and `.nehi` to see the fun.
#"""
import random, re
#from uniborg.util import admin_cmd
import asyncio
from telethon import events
from userbot.events import register
from asyncio import sleep
import time
from userbot import CMD_HELP
@register(outgoing=True, pattern="^.degi$")
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("wO")
await asyncio.sleep(0.7)
await event.edit("dEgI")
await asyncio.sleep(1)
await event.edit("tUm")
await asyncio.sleep(0.8)
await event.edit("EkBaR")
await asyncio.sleep(0.9)
await event.edit("mAnG")
await asyncio.sleep(1)
await event.edit("kAr")
await asyncio.sleep(0.8)
await event.edit("ToH")
await asyncio.sleep(0.7)
await event.edit("dEkHo")
await asyncio.sleep(1)
await event.edit("`wO dEgI tUm EkBaR mAnG kAr ToH dEkHo`")
@register(outgoing=True, pattern="^.nehi$")
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("wO")
await asyncio.sleep(0.7)
await event.edit("pAkKa")
await asyncio.sleep(1)
await event.edit("DeGi")
await asyncio.sleep(0.8)
await event.edit("Tu")
await asyncio.sleep(0.9)
await event.edit("MaNg")
await asyncio.sleep(1)
await event.edit("KaR")
await asyncio.sleep(0.8)
await event.edit("tOh")
await asyncio.sleep(0.7)
await event.edit("Dekh")
await asyncio.sleep(1)
await event.edit("`wO pAkKa DeGi Tu MaNg KaR tOh DeKh`")
CMD_HELP.update({
"degi":
".degi or .nehi\
\nUsage: Sabka Katega."
})
| 2.234375
| 2
|
app/telegram/commands/debug.py
|
unimarijo/vkmessages
| 0
|
12780967
|
# -*- coding: utf-8 -*-
from app import logging
from app import config as config
import logging
def debug(client, message):
try:
client.send_message(
message.from_user.id,
"Ниже находится информация, которая может оказаться полезной."
"\n\n**Информация о приложении:** \n`Version: {0}`\n`Commit: {1}`\n`Developer Mode: {2}`"
"\n\n**Информация о пользователе:** \n`User ID: {3}`\n`Message ID: {4}`\n`Language Code: {5}`".format(
config.VERSION, config.COMMIT, config.DEVELOPER_MODE, message.from_user.id, message.message_id,
message.from_user.language_code))
except Exception as e:
try:
client.send_message(
message.from_user.id,
"❗ Произошла непредвиденная ошибка при выполнении метода. Сообщите об этом администратору для более "
"быстрого ее исправления.")
except:
pass
logging.error("Произошла ошибка при попытке выполнения метода.", exc_info=True)
return e
| 2.171875
| 2
|
src/model_params.py
|
madhu121/Image-Driven-Machine-Learning-Approach-for-Microstructure-Classification-and-Segmentation-Ti-6Al-4V
| 0
|
12780968
|
<reponame>madhu121/Image-Driven-Machine-Learning-Approach-for-Microstructure-Classification-and-Segmentation-Ti-6Al-4V
width=200
height=200
total_size = 1000
train_size = 800
validation_size = 100
test_size = total_size - train_size - validation_size
| 1.648438
| 2
|
tests/test_k8sobject.py
|
projectsyn/commodore
| 39
|
12780969
|
import pytest
from commodore import k8sobject
_test_objs = [
{
"apiVersion": "v1",
"kind": "ServiceAccount",
"metadata": {
"name": "test",
"namespace": "test",
},
},
{
"apiVersion": "v1",
"kind": "ServiceAccount",
"metadata": {
"name": "test-sa-2",
"namespace": "test",
},
},
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "test",
"namespace": "test",
"labels": {
"name": "test",
},
},
"spec": {
"image": "image",
"command": "pause",
},
},
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "Role",
"metadata": {
"name": "test-role",
"namespace": "test",
},
},
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "Role",
"metadata": {
"name": "test-role",
"namespace": "test-2",
},
},
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "ClusterRole",
"metadata": {
"name": "test-cr",
},
},
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "ClusterRole",
"metadata": {
"name": "test-cr-2",
},
},
{
"test": "testing",
},
]
@pytest.mark.parametrize(
"k8sdict,expected",
zip(
[None] + _test_objs,
[
{
"kind": "",
"name": "",
"namespace": "",
},
{
"kind": "ServiceAccount",
"name": "test",
"namespace": "test",
},
{
"kind": "ServiceAccount",
"name": "test-sa-2",
"namespace": "test",
},
{
"kind": "Pod",
"name": "test",
"namespace": "test",
},
{
"kind": "Role",
"name": "test-role",
"namespace": "test",
"spec": {
"test": "testing",
},
},
{
"kind": "Role",
"name": "test-role",
"namespace": "test-2",
"spec": {
"test": "testing2",
},
},
{
"kind": "ClusterRole",
"namespace": "",
"name": "test-cr",
},
{
"kind": "ClusterRole",
"namespace": "",
"name": "test-cr-2",
},
{
"name": "",
"namespace": "",
"kind": "",
},
],
),
)
def test_k8sobject_constructor(k8sdict, expected):
o = k8sobject.K8sObject(k8sdict)
assert expected["kind"] == o._kind
assert expected["name"] == o._name
assert expected["namespace"] == o._namespace
_cluster_scoped_obj = k8sobject.K8sObject(
{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {
"name": "test",
"labels": {
"name": "test",
},
},
}
)
_ns_scoped_obj = k8sobject.K8sObject(
{
"apiVersion": "v1",
"kind": "ServiceAccount",
"metadata": {
"name": "test",
"labels": {
"name": "test",
},
},
}
)
@pytest.mark.parametrize(
"k8sdict,to_cluster_scoped,to_ns_scoped",
zip(
_test_objs,
[False, False, False, False, False, True, True, True],
[False, False, True, True, True, True, True, True],
),
)
def test_k8sobject_less_than(k8sdict, to_cluster_scoped, to_ns_scoped):
o = k8sobject.K8sObject(k8sdict)
assert (o < _cluster_scoped_obj) == to_cluster_scoped
assert (o < _ns_scoped_obj) == to_ns_scoped
assert (o > _cluster_scoped_obj) == (not to_cluster_scoped)
assert (o > _ns_scoped_obj) == (not to_ns_scoped)
@pytest.mark.parametrize("k8sdict_a", _test_objs)
@pytest.mark.parametrize("k8sdict_b", _test_objs)
def test_k8sobject_equal(k8sdict_a, k8sdict_b):
a = k8sobject.K8sObject(k8sdict_a)
b = k8sobject.K8sObject(k8sdict_b)
expect = False
if (
k8sdict_a.get("kind", "") == k8sdict_b.get("kind", "")
and k8sdict_a.get("metadata", {}).get("namespace", "")
== k8sdict_b.get("metadata", {}).get("namespace", "")
and k8sdict_a.get("metadata", {}).get("name", "")
== k8sdict_b.get("metadata", {}).get("name", "")
):
expect = True
assert (a == b) == expect
| 1.953125
| 2
|
python/helpers/profiler/prof_util.py
|
teddywest32/intellij-community
| 0
|
12780970
|
__author__ = 'traff'
import threading
import os
import sys
import tempfile
from _prof_imports import Stats, FuncStat, Function
try:
execfile=execfile #Not in Py3k
except NameError:
#We must redefine it in Py3k if it's not already there
def execfile(file, glob=None, loc=None):
if glob is None:
import sys
glob = sys._getframe().f_back.f_globals
if loc is None:
loc = glob
# It seems that the best way is using tokenize.open(): http://code.activestate.com/lists/python-dev/131251/
import tokenize
stream = tokenize.open(file) # @UndefinedVariable
try:
contents = stream.read()
finally:
stream.close()
#execute the script (note: it's important to compile first to have the filename set in debug mode)
exec(compile(contents+"\n", file, 'exec'), glob, loc)
def save_main_module(file, module_name):
sys.modules[module_name] = sys.modules['__main__']
sys.modules[module_name].__name__ = module_name
from imp import new_module
m = new_module('__main__')
sys.modules['__main__'] = m
if hasattr(sys.modules[module_name], '__loader__'):
setattr(m, '__loader__', getattr(sys.modules[module_name], '__loader__'))
m.__file__ = file
return m
class ProfDaemonThread(threading.Thread):
def __init__(self):
super(ProfDaemonThread, self).__init__()
self.setDaemon(True)
self.killReceived = False
def run(self):
self.OnRun()
def OnRun(self):
pass
def generate_snapshot_filepath(basepath, local_temp_dir=False, extension='.pstat'):
basepath = get_snapshot_basepath(basepath, local_temp_dir)
n = 0
path = basepath + extension
while os.path.exists(path):
n+=1
path = basepath + (str(n) if n>0 else '') + extension
return path
def get_snapshot_basepath(basepath, local_temp_dir):
if basepath is None:
basepath = 'snapshot'
if local_temp_dir:
basepath = os.path.join(tempfile.gettempdir(), os.path.basename(basepath.replace('\\', '/')))
return basepath
def stats_to_response(stats, m):
if stats is None:
return
ystats = Stats()
ystats.func_stats = []
m.ystats = ystats
for func, stat in stats.items():
path, line, func_name = func
cc, nc, tt, ct, callers = stat
func = Function()
func_stat = FuncStat()
func.func_stat = func_stat
ystats.func_stats.append(func)
func_stat.file = path
func_stat.line = line
func_stat.func_name = func_name
func_stat.calls_count = nc
func_stat.total_time = ct
func_stat.own_time = tt
func.callers = []
for f, s in callers.items():
caller_stat = FuncStat()
func.callers.append(caller_stat)
path, line, func_name = f
cc, nc, tt, ct = s
caller_stat.file = path
caller_stat.line = line
caller_stat.func_name = func_name
caller_stat.calls_count = cc
caller_stat.total_time = ct
caller_stat.own_time = tt
# m.validate()
| 2.015625
| 2
|
migrations/20211128_01_Mn7Ng-create-holdem-game-record-table.py
|
zw-g/Funny-Nation
| 126
|
12780971
|
"""
Create holdem game record table
"""
from yoyo import step
__depends__ = {'20211109_01_xKblp-change-comments-on-black-jack-record'}
steps = [
step("CREATE TABLE `holdemGameRecord` ( `userID` BIGINT NOT NULL , `moneyInvested` BIGINT NOT NULL , `status` INT NOT NULL COMMENT '0 represent in progress; 1 represent lose or fold; 2 represent win;' , `tableID` BIGINT NOT NULL , `time` TIMESTAMP NOT NULL , `tableUUID` VARCHAR(64) NOT NULL ) ENGINE = InnoDB;")
]
| 1.828125
| 2
|
django/contrib/gis/geos/io.py
|
huicheese/Django-test3
| 2
|
12780972
|
"""
Module that holds classes for performing I/O operations on GEOS geometry
objects. Specifically, this has Python implementations of WKB/WKT
reader and writer classes.
"""
from ctypes import byref, c_size_t
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.prototypes import io as capi
class IOBase(GEOSBase):
"Base class for GEOS I/O objects."
def __init__(self):
# Getting the pointer with the constructor.
self.ptr = self.constructor()
def __del__(self):
# Cleaning up with the appropriate destructor.
if self._ptr: self.destructor(self._ptr)
### WKT Reading and Writing objects ###
# Non-public class for internal use because its `read` method returns
# _pointers_ instead of a GEOSGeometry object.
class _WKTReader(IOBase):
constructor = capi.wkt_reader_create
destructor = capi.wkt_reader_destroy
ptr_type = capi.WKT_READ_PTR
def read(self, wkt):
if not isinstance(wkt, basestring): raise TypeError
return capi.wkt_reader_read(self.ptr, wkt)
class WKTReader(_WKTReader):
def read(self, wkt):
"Returns a GEOSGeometry for the given WKT string."
return GEOSGeometry(super(WKTReader, self).read(wkt))
class WKTWriter(IOBase):
constructor = capi.wkt_writer_create
destructor = capi.wkt_writer_destroy
ptr_type = capi.WKT_WRITE_PTR
def write(self, geom):
"Returns the WKT representation of the given geometry."
return capi.wkt_writer_write(self.ptr, geom.ptr)
### WKB Reading and Writing objects ###
# Non-public class for the same reason as _WKTReader above.
class _WKBReader(IOBase):
constructor = capi.wkb_reader_create
destructor = capi.wkb_reader_destroy
ptr_type = capi.WKB_READ_PTR
def read(self, wkb):
"Returns a _pointer_ to C GEOS Geometry object from the given WKB."
if isinstance(wkb, buffer):
wkb_s = str(wkb)
return capi.wkb_reader_read(self.ptr, wkb_s, len(wkb_s))
elif isinstance(wkb, basestring):
return capi.wkb_reader_read_hex(self.ptr, wkb, len(wkb))
else:
raise TypeError
class WKBReader(_WKBReader):
def read(self, wkb):
"Returns a GEOSGeometry for the given WKB buffer."
return GEOSGeometry(super(WKBReader, self).read(wkb))
class WKBWriter(IOBase):
constructor = capi.wkb_writer_create
destructor = capi.wkb_writer_destroy
ptr_type = capi.WKB_WRITE_PTR
def write(self, geom):
"Returns the WKB representation of the given geometry."
return buffer(capi.wkb_writer_write(self.ptr, geom.ptr, byref(c_size_t())))
def write_hex(self, geom):
"Returns the HEXEWKB representation of the given geometry."
return capi.wkb_writer_write_hex(self.ptr, geom.ptr, byref(c_size_t()))
### WKBWriter Properties ###
# Property for getting/setting the byteorder.
def _get_byteorder(self):
return capi.wkb_writer_get_byteorder(self.ptr)
def _set_byteorder(self, order):
if not order in (0, 1): raise ValueError('Byte order parameter must be 0 (Big Endian) or 1 (Little Endian).')
capi.wkb_writer_set_byteorder(self.ptr, order)
byteorder = property(_get_byteorder, _set_byteorder)
# Property for getting/setting the output dimension.
def _get_outdim(self):
return capi.wkb_writer_get_outdim(self.ptr)
def _set_outdim(self, new_dim):
if not new_dim in (2, 3): raise ValueError('WKB output dimension must be 2 or 3')
capi.wkb_writer_set_outdim(self.ptr, new_dim)
outdim = property(_get_outdim, _set_outdim)
# Property for getting/setting the include srid flag.
def _get_include_srid(self):
return bool(ord(capi.wkb_writer_get_include_srid(self.ptr)))
def _set_include_srid(self, include):
if bool(include): flag = chr(1)
else: flag = chr(0)
capi.wkb_writer_set_include_srid(self.ptr, flag)
srid = property(_get_include_srid, _set_include_srid)
# Instances of the WKT and WKB reader/writer objects.
wkt_r = _WKTReader()
wkt_w = WKTWriter()
wkb_r = _WKBReader()
wkb_w = WKBWriter()
| 2.234375
| 2
|
admin/urls.py
|
asifhaider/BookKeep
| 1
|
12780973
|
from django.urls import path
from admin import views
urlpatterns = [
path('manage/', views.AdminPanel.as_view(), name = 'admin-panel'),
path('manage/customer-list/', views.AdminCustomerListView.as_view(), name = 'admin-customer-list-view'),
path('manage/book-list/', views.AdminBookListView.as_view(), name = 'admin-book-list-view'),
path('manage/author-list/', views.AdminAuthorListView.as_view(), name = 'admin-author-list-view'),
path('manage/publisher-list/', views.AdminPublisherListView.as_view(), name = 'admin-publisher-list-view'),
path('manage/order-log/', views.AdminOrderLogView.as_view(), name = 'admin-order-log-view'),
path('manage/offer-list/', views.AdminOfferListView.as_view(), name = 'admin-offer-list-view'),
path('manage/borrows/', views.AdminBorrowsView.as_view(), name = 'admin-borrows-view'),
path('manage/plan-list/', views.AdminPlanListView.as_view(), name = 'admin-plan-list-view'),
# path('test/', views.Test.as_view(), name = 'test'),
]
| 1.84375
| 2
|
cart_pole/cart_pole.py
|
jeongyoonlee/gym_example
| 0
|
12780974
|
<reponame>jeongyoonlee/gym_example<gh_stars>0
import gym
from time import sleep
from const import ENV_NAME
with gym.make(ENV_NAME) as env:
action_space = env.action_space
def run(pi, n_episode=1):
with gym.make(ENV_NAME) as env:
for i_episode in range(n_episode):
state = env.reset()
done = False
t = 0
while not done:
env.render()
action = pi.next(state)
print(state, action)
state, reward, done, info = env.step(action)
if done:
print('Episode finished after {} steps'.format(t + 1))
break
t += 1
sleep(1)
| 3.171875
| 3
|
src/test.py
|
tclements/CS249FINAL
| 0
|
12780975
|
<reponame>tclements/CS249FINAL<gh_stars>0
import glob, os
import tensorflow as tf
import tensorflow.keras as keras
import numpy as np
import matplotlib.pyplot as plt
import sklearn.metrics
def test2input(A,input_dir):
'Convert test data to cc/header file'
# files to write
cc_file = os.path.join(input_dir,"input_data.cc")
h_file = os.path.join(input_dir,"input_data.h")
# create header file first
h_out = "#ifndef EDE_INPUT_DATA_H_\n" \
"#define EDE_INPUT_DATA_H_\n\n" \
"extern const float input_data[];\n" \
"#endif\n"
open(h_file, "w").write(h_out)
# write data to cc file
A = A.flatten(order="F")
cc_out = '#include "input_data.h"\n' \
"static const int input_data_len = 240;\n" \
"static const float input_data[240] = {\n"
arrstring = ""
for ii in range(A.size-1):
arrstring += str(A[ii])
arrstring += ", "
arrstring += str(A[-1])
arrstring += "};\n"
cc_out += arrstring
open(cc_file, "w").write(cc_out)
return None
def decision_threshold(Y,threshold):
Ycop = Y.copy()
Ycop[np.where(Ycop[:,0:2] < threshold)] = 0
return np.argmax(Ycop,axis=-1)
if __name__ == "__main__":
# load test data
testdata = np.load("/home/timclements/CS249FINAL/data/test.npz")
Xtest = testdata["Xtest"]
Ytest = testdata["Ytest"]
truth = np.argmax(Ytest,axis=-1)
# check that models work on test data
modelname = "/home/timclements/CS249FINAL/CNN/models/model_5_4_16.tflite"
model = tf.lite.Interpreter(model_name)
model.allocate_tensors()
model_input_index = model.get_input_details()[0]["index"]
model_output_index = model.get_output_details()[0]["index"]
x_value_tensor = tf.convert_to_tensor(Xtest[0:1,:,:,:], dtype=np.float32)
model.set_tensor(model_input_index, x_value_tensor)
model.invoke()
model.get_tensor(model_output_index)[0]
# convert some test data to data that can be read on the device
input_dir = "/home/timclements/CS249FINAL/src/"
test2input(Xtest[0:1],input_dir)
# test precision, recall and F1 score on range on thresholds
CLASSES = ["P-wave","S-wave","Noise"]
preds = model.predict(Xtest)
thresholds = np.linspace(0.4,0.99,60)
reports = {}
for ii in range(len(thresholds)):
threshpred = decision_threshold(preds,thresholds[ii])
reports[ii] = sklearn.metrics.classification_report(truth,threshpred,target_names=CLASSES,output_dict=True)
# extract accuracies
accuracy = np.zeros(len(thresholds))
Precall = np.zeros(len(thresholds))
Pprecision = np.zeros(len(thresholds))
Srecall = np.zeros(len(thresholds))
Sprecision = np.zeros(len(thresholds))
Nrecall = np.zeros(len(thresholds))
Nprecision = np.zeros(len(thresholds))
for ii in range(len(thresholds)):
accuracy[ii] = reports[ii]["accuracy"]
Precall[ii] = reports[ii]["P-wave"]["precision"]
Pprecision[ii] = reports[ii]["P-wave"]["recall"]
Srecall[ii] = reports[ii]["S-wave"]["precision"]
Sprecision[ii] = reports[ii]["S-wave"]["recall"]
Nrecall[ii] = reports[ii]["Noise"]["precision"]
Nprecision[ii] = reports[ii]["Noise"]["recall"]
# plot precision vs recall
fig,ax = plt.subplots(figsize=(6,6))
im = ax.scatter(Precall ,Pprecision,100,c=thresholds,alpha=0.85,label="P-wave",edgecolor="k",cmap=plt.cm.inferno)
ax.scatter(Srecall ,Sprecision,100,c=thresholds,alpha=0.85,label="S-wave",edgecolor="k",marker="^",cmap=plt.cm.inferno)
ax.axhline([0.25],linestyle="--",c="grey",alpha=0.85,label="No skill")
ax.legend(loc="center left",fontsize=14,borderpad = 1.)
ax.set_xlabel("Recall",fontsize=18)
ax.set_ylabel("Precision",fontsize=18)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylim([0.18, 0.97])
ax.spines['left'].set_bounds(0.2, 0.97)
ax.tick_params(direction='in')
ax.spines['bottom'].set_bounds(0.86,1.)
ax.tick_params(labelsize=12)
c_ax = inset_axes(ax,
width="50%", # width = 50% of parent_bbox width
height="5%", # height : 5%
loc='lower left',
borderpad=7.5)
cb = fig.colorbar(im, cax=c_ax, orientation="horizontal")
cb.ax.xaxis.set_ticks_position("top")
cb.ax.set_xlabel('Detection Threshold',fontsize=14)
cb.ax.tick_params(labelsize=12)
plt.tight_layout()
plt.savefig("/home/timclements/CS249FINAL/FIGURES/precision-vs-recall.pdf")
plt.close()
# plot confusion matrix
ind = np.argmax(accuracy)
maxpred = decision_threshold(preds,thresholds[ind])
confusion = tf.math.confusion_matrix(truth,maxpred)
plt.figure(figsize=(6, 6))
ax = sns.heatmap(np.round(confusion / np.sum(confusion,axis=0),decimals=2), xticklabels=CLASSES, yticklabels=CLASSES,
annot=True,cmap="Blues",linewidths=.5,cbar=False)
ax.set_xticklabels(ax.get_xmajorticklabels(), fontsize = 16)
ax.set_yticklabels(ax.get_ymajorticklabels(), fontsize = 16)
plt.xlabel('Prediction',fontsize=18)
plt.ylabel('Label',fontsize=18)
plt.tight_layout()
plt.savefig("/home/timclements/CS249FINAL/FIGURES/confusion-matrix.pdf")
plt.close()
| 2.34375
| 2
|
Python/236.py
|
JWang169/LintCodeJava
| 1
|
12780976
|
<reponame>JWang169/LintCodeJava
# April 14
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
self.lca = None
self.dfs(root, p, q)
return self.lca
def dfs(self, root, p, q):
if not root:
return False, False
hasP, hasQ = False, False
leftP, leftQ = self.dfs(root.left, p, q)
rightP, rightQ = self.dfs(root.right, p, q)
if leftP or rightP or root.val == p.val:
hasP = True
if leftQ or rightQ or root.val == q.val:
hasQ = True
if hasP and hasQ and self.lca == None:
self.lca = root
return hasP, hasQ
# Mar 25
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
if p.val == q.val:
return p
self.lca = None
self.search(root, p, q)
return self.lca
def search(self, node, p, q):
if not node:
return False
left = self.search(node.left, p, q)
right = self.search(node.right, p, q)
if node.val == p.val or node.val == q.val:
cur = True
else:
cur = False
if not self.lca:
if left and right:
self.lca = node
if (left or right) and cur:
self.lca = node
return left or right or cur
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
if not root:
return None
self.result = None
self.search(root, p, q)
return self.result
def search(self, root, p, q):
if not root:
return None, None
leftp, leftq = self.search(root.left, p, q)
rightp, rightq = self.search(root.right, p, q)
rootp = leftp or rightp or root.val == p.val
rootq = leftq or rightq or root.val == q.val
if not self.result and rootp and rootq:
self.result = root
return rootp, rootq
| 3.375
| 3
|
src/data/yt_data_scrapping.py
|
love-bits/site
| 2
|
12780977
|
<gh_stars>1-10
import re
import json
import pytz
import pandas as pd
from bs4 import BeautifulSoup as soup
from urllib.request import urlopen as uReq
from datetime import datetime, timezone, timedelta
def getBSHtml(url):
uClient = uReq(url)
page_html = uClient.read()
uClient.close()
page_soup = soup(page_html, 'html.parser')
return page_soup
def getDate(link):
#get the date and done status
page_soup = getBSHtml(link)
date = re.findall(r'meta content="(.+?)".+?startDate"', page_soup.prettify())[0]
date = re.split('[T+]', date)
date = date[0] + ' ' + date[1]
date = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
localtimezone = pytz.timezone('UTC')
date = localtimezone.localize(date, is_dst=None)
diff = timedelta(0,0,0,0,0,-3)
fuse = timezone(diff)
date = date.astimezone(fuse)
now = localtimezone.localize(datetime.now(), is_dst=None)
done = now > date
return date, done
def getDict(url):
#return a dictonary with the links, image links, date, titles of the videos and done status of the videos
data_frame = pd.DataFrame(columns=['img','date','title','link', 'done'])
page_soup = getBSHtml(url)
#find video id
after_id = ',"playlistId":"PLa_Eeh4hhoGc_yIrmXWaWhnv5yLc9Ttab","index":'
video_ids = re.findall(r'"videoId":"(.{,20})"'+after_id, page_soup.prettify())
#find video title
titles = re.findall(r'"title":.+?":"(.+?) Love Bits', page_soup.prettify())
#find video image link
imgs = re.findall(r'138},{"url":"(.+?)\?sqp.+?', page_soup.prettify())
#find link and date, and create dictonary
for count, video_id in enumerate(video_ids):
link = 'https://www.youtube.com/watch?v='+video_id
date, done = getDate(link)
data_frame.loc[count] = [imgs[count], date, titles[count], link, done]
#order data frame by date
data_frame.sort_values(by='date', ascending=False, inplace=True)
#convert date to DD/MM/AAAA às hh:mm horas
data_frame['date'] = data_frame['date'].apply(lambda date : re.split('[ :-]',str(date)))
data_frame['date'] = data_frame['date'].apply(lambda date : date[2]+'/'+date[1]+'/'+date[0]+' às '+date[3]+':'+date[4]+' horas')
data_list = data_frame.to_dict('records')
return {'meetings':data_list}
url = 'https://www.youtube.com/playlist?list=PLa_Eeh4hhoGc_yIrmXWaWhnv5yLc9Ttab'
data_dict = getDict(url)
with open('./src/data/meetings.json', 'w') as fp:
json.dump(data_dict, fp)
| 2.78125
| 3
|
courses/migrations/0032_add_course_edx_key.py
|
Wassaf-Shahzad/micromasters
| 32
|
12780978
|
<reponame>Wassaf-Shahzad/micromasters
# Generated by Django 2.1.10 on 2019-08-15 18:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0031_electives_set_releated_name'),
]
operations = [
migrations.AddField(
model_name='course',
name='edx_key',
field=models.CharField(max_length=50, null=True),
),
]
| 1.609375
| 2
|
tekis/board/migrations/0002_auto_20160221_1643.py
|
TKOaly/tkoaly-new-service
| 0
|
12780979
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-21 14:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('board', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='board',
options={'get_latest_by': 'year', 'ordering': ('year',), 'verbose_name': 'Board', 'verbose_name_plural': 'Boards'},
),
migrations.AlterModelOptions(
name='boardmember',
options={'ordering': ('role',), 'verbose_name': 'Board Member', 'verbose_name_plural': 'Board Members'},
),
migrations.AlterField(
model_name='boardmember',
name='contact',
field=models.CharField(default='firstname.lastname(at)cs.helsinki.fi', max_length=100, verbose_name='Contact'),
),
migrations.AlterField(
model_name='boardmember',
name='face',
field=models.ImageField(upload_to='board_faces/%Y/', verbose_name='Mugshot'),
),
migrations.AlterField(
model_name='boardmember',
name='name',
field=models.CharField(max_length=100, verbose_name='Name'),
),
migrations.AlterField(
model_name='boardmember',
name='role',
field=models.IntegerField(choices=[(0, 'Chairman'), (1, 'Treasurer'), (2, 'Secretary'), (3, 'Vice chairman'), (4, 'Board Member'), (5, 'Study affairs'), (6, 'Communications'), (7, 'Corporate affairs'), (8, 'RV affairs'), (9, 'Freshman affairs'), (20, 'First deputy board member'), (21, 'Second deputy board member'), (22, 'Third deputy board member'), (23, 'Fourth deputy board member')], verbose_name='Role'),
),
]
| 1.632813
| 2
|
pymerge.py
|
l0o0/bio-analysis-kit
| 3
|
12780980
|
<gh_stars>1-10
#!/usr/bin/python
# create by linxzh at 2014-8-19
import argparse
import re
import sys
parser = argparse.ArgumentParser('pymerge')
parser.add_argument('-A', type=argparse.FileType('r'))
parser.add_argument('-B', type=argparse.FileType('r'))
parser.add_argument('-Aby', type=int, default=1,
help='key col of -A file, default is 1')
parser.add_argument('-Bby', type=int, default=1,
help='key col of -B file, default is 1')
parser.add_argument('-Acol', type=str, help ='to be merged cols')
parser.add_argument('-Bcol', type=str, help ='to be merged cols')
parser.add_argument('-s', type=str, choices=['all','a','b','ab'],
default='all', help ="all = union, a = by -A items,\
b = by -B items, ab = a intersect with b, default is \
all(union). None exits field is filled with '--'")
parser.add_argument('-Out', type=argparse.FileType('w'))
parser.add_argument('-Sep', type=str, default='\t',
help='field delimiter, default is TAB')
parser.add_argument('-H', type=int, default=1,
help='line num of head line, default is 1')
parser.add_argument('-F', type=argparse.FileType('r'),
help='file contains a filelist to be merged')
args=parser.parse_args()
def get_col(flist, cols):
colslist = re.split(',|-', cols)
if '' in colslist:
out = flist[int(colslist[0])-1:]
elif '-' in cols:
out = flist[int(colslist[0])-1:int(colslist[1])]
else:
out = [flist[int(x)-1] for x in colslist]
return out
# read data to dict
def read_data(infile, bycol, cols, sep, h):
'''bycol is the arg specified by -Aby / -Bby
cols specified by -Acol / -Bcol
return a dict:
{bycol : [target cols] ... }'''
if isinstance(infile, str):
infile = open(infile)
D = {}
c = 1
for f in infile:
flist = f.strip().split(sep)
k = flist[bycol-1]
v = get_col(flist, cols)
if c == h:
hk = 'header'
D[hk] = v
else:
D[k] = v
c += 1
return D
#
def merge_dict(D1, D2, s):
'''D1 as primary'''
D = {}
D1len = len(D1[D1.keys()[0]])
D2len = len(D2[D2.keys()[0]])
if s == 'all':
kset = set(D1.keys()) | set(D2.keys())
elif s == 'a':
kset = D1.keys()
elif s == 'b':
kset = D2.keys()
elif s == 'ab':
kset = set(D1.keys()) & set(D2.keys())
for k in kset:
if k in D1 and k in D2 and k != 'header':
v = D1[k] + D2[k]
elif k in D1 and k not in D2:
empty = ['--']*D2len
v = D1[k] + empty
elif k not in D1 and k in D2:
empty = ['--'] * D1len
v = empty + D2[k]
D[k] = v
D['header'] = D1['header'] + D2['header']
return D
#
def reduce_merge_dict(F, bycol, cols, sep, h, s):
filelist = F.readlines()
filelist = [x.strip() for x in filelist]
tmp = map(lambda x : read_data(x, bycol, cols, sep, h), filelist)
D = reduce(lambda x, y : merge_dict(x, y, s), tmp)
return D
if __name__ == '__main__':
if args.F:
D = reduce_merge_dict(args.F, args.Aby, args.Acol, args.Sep, args.H, args.s)
else:
D1 = read_data(args.A, args.Aby, args.Acol, args.Sep, args.H)
D2 = read_data(args.B, args.Bby, args.Bcol, args.Sep, args.H)
D = merge_dict(D1,D2, args.s)
outline = []
header = 'Name\t%s\n' % ('\t'.join(D['header']))
outline.append(header)
del D['header']
for k in D:
line = '%s\t%s\n' % (k, '\t'.join(D[k]))
outline.append(line)
args.Out.writelines(outline)
| 2.828125
| 3
|
hcap_geo/models/__init__.py
|
fabiommendes/capacidade_hospitalar
| 0
|
12780981
|
from .region import Region
| 1.101563
| 1
|
basic/module/custom/createModule/importModuleDemo.py
|
gwaysoft/python
| 0
|
12780982
|
<gh_stars>0
import sys
import createModuleDemo as mo, os
from createModuleDemo01 import add as a
from createModuleDemo02 import *
# import default path
print(sys.path)
print(mo.add(3, 4.555))
print(a(30, 4))
print(add(34, 4))
# show method
print(dir())
| 2.015625
| 2
|
server/kvls/lang.py
|
Antyos/kvlang-vscode
| 10
|
12780983
|
<reponame>Antyos/kvlang-vscode
"""Module import kivy Parser is available. Otherwise fake class is created."""
from __future__ import absolute_import
import os
# Disable stdout printout from kivy
os.environ["KIVY_NO_FILELOG"] = "1"
os.environ["KIVY_NO_CONSOLELOG"] = "1"
# Import Kivy parser
# Disable pylint warning, because environment variables must be set
# before import of module kivy. Create fake classes when import error appear "Duck typing"
KIVY_IMPORTED = True
KIVY_IMPORT_MSG = """KvLint was not able import kivy module.
Please check if module is installed under currently used Kvlang: Python Path.
"""
try:
from kivy.lang import Parser, ParserException # pylint: disable=unused-import
except ImportError:
KIVY_IMPORTED = False
class Parser(object):
"""Fake class when import can't be done of kivy module."""
def __init__(self, content):
"""Fake initialization."""
pass
class ParserException(BaseException):
"""Fake class when import can't be done of kivy module."""
pass
| 2.40625
| 2
|
calliope_app/api/migrations/0010_auto_20190730_1726.py
|
jmorrisnrel/engage
| 3
|
12780984
|
# Generated by Django 2.1.4 on 2019-07-30 17:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0009_auto_20190710_2235'),
]
operations = [
migrations.RemoveField(
model_name='carrier',
name='model',
),
migrations.DeleteModel(
name='Carrier',
),
]
| 1.398438
| 1
|
code/python/src/utility/logger.py
|
manurare/360monodepth
| 0
|
12780985
|
import logging
import traceback
import colorama
from colorama import Fore, Back, Style
colorama.init()
class CustomFormatter(logging.Formatter):
"""Logging Formatter to add colors and count warning / errors
reference: https://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output/
"""
import platform
if platform.system() == 'Windows':
grey = "\x1b[38;21m"
yellow = "\x1b[33;21m"
magenta = "\x1b[35;21m"
red = "\x1b[31;21m"
reset = "\x1b[0m"
else:
grey = Style.DIM
yellow = Fore.YELLOW
magenta = Fore.MAGENTA
red = Fore.RED
reset = Style.RESET_ALL
# format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)"
format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
FORMATS = {
logging.DEBUG: grey + format + reset,
logging.INFO: grey + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: red + format + reset
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
class Logger:
def __init__(self, name=None):
# create logger
self.logger = logging.getLogger(name)
self.logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
# add formatter
handler.setFormatter(CustomFormatter())
# add handler to logger
self.logger.addHandler(handler)
def debug(self, message):
self.logger.debug(message)
def info(self, message):
self.logger.info(message)
def warn(self, message):
self.logger.warning(message)
def print_stack(self):
print("---traceback---")
for line in traceback.format_stack():
print(line.strip())
def error(self, message):
self.logger.error(message)
self.print_stack()
exit()
def critical(self, message):
self.logger.critical(message)
self.print_stack()
| 3.171875
| 3
|
tests/apimodels/UserResource_test.py
|
j-000/ezresbackend
| 0
|
12780986
|
import requests
from . import UserModel, db, DEVELOPMENT
base_url = 'http://localhost:5000/api/user'
if not DEVELOPMENT:
raise SystemError('Not in development mode!')
exit()
def prepare_db():
db.drop_all()
db.create_all()
test_email = UserModel.fetch(email='<EMAIL>')
if test_email:
UserModel.delete(test_email)
def test_prepare_db():
prepare_db()
assert UserModel.fetch(email='<EMAIL>') is None
def test_get_request():
req = requests.get(base_url)
json_response = req.json()
# status code 200
assert req.status_code == 200
# usersRegistered as a property of response object
assert 'usersRegistered' in json_response.keys()
# usersRegistered value is a number
assert type(json_response['usersRegistered']) is int
def test_create_user():
test_user = {'name': 'test', 'email': '<EMAIL>', 'password':'<PASSWORD>'}
req = requests.post(base_url, json=test_user)
# status code 200
assert req.status_code == 200
# assert properties contain:
# 'message', 'success', 'user'
assert 'message' in req.json().keys()
assert 'success' in req.json().keys()
assert 'user' in req.json().keys()
# assert 'user' response object only contains 'email','id','is_admin' and 'name'
excepted_response = {
"message": "User created.",
"success": True,
"user": {
"email": "<EMAIL>",
"id": 1,
"is_admin": False,
"name": "test"
}
}
# assert response matches expected response
assert excepted_response.items() == req.json().items()
def test_user_in_db():
test_user = UserModel.fetch(email='<EMAIL>')
# assert user is created
assert test_user is not None
# assert user object properties in db match
assert test_user.name == 'test'
assert test_user.id == 1
assert test_user.email == '<EMAIL>'
assert test_user.check_password('<PASSWORD>') == True
assert test_user.is_admin == False
def test_missing_params():
test_user = {'name': 'test'}
req = requests.post(base_url, json=test_user)
# assert status code
assert req.status_code == 200
# assert response object mathes expecteds
expected_response = {'error':'Missing email field.'}
assert expected_response.items() == req.json().items()
def test_missing_payload():
req = requests.post(base_url, json=None)
# assert status code
assert req.status_code == 200
# assert response object mathes expecteds
expected_response = {'error':'No data was sent with the request.'}
assert expected_response.items() == req.json().items()
def test_already_exists():
test_user = {'name': 'test', 'email': '<EMAIL>', 'password':'<PASSWORD>'}
req = requests.post(base_url, json=test_user)
# status code 200
assert req.status_code == 200
# assert response object mathes expecteds
expected_response = {'error':'Email is already registered.'}
assert expected_response.items() == req.json().items()
def test_methods_not_allowed():
put_req = requests.put(base_url)
delete_req = requests.delete(base_url)
# same response is expected for both
expected_response = {'error':'Method not allowed.'}
# status code 200
assert put_req.status_code == 200
assert delete_req.status_code == 200
# assert expected response matched
assert put_req.json().items() == expected_response.items()
assert delete_req.json().items() == expected_response.items()
def test_clear_db():
prepare_db()
assert UserModel.fetch(email='<EMAIL>') is None
| 2.59375
| 3
|
The improved SMDS.py
|
0AnonymousSite0/Social-media-data-to-Interrelated-informtion-to-Parameters-of-virtual-road-model
| 1
|
12780987
|
import tweepy
import re
import os, logging, datetime, argparse
from logging.handlers import RotatingFileHandler
import json
from prepare_data_for_labeling_infer import prepare_data_for_subject_object_labeling_infer
from produce_submit_json_file import Sorted_relation_and_entity_list_Management
from DataInteroperability import SPO2KG_Function
import sys
import subprocess
from py2neo import Graph, Node, Relationship,NodeMatcher
CONSUMER_KEY = "1LBqUbcbBOAD6LKsq2f49yHVM"
CONSUMER_SECRET = "<KEY>"
OAUTH_TOKEN = "<KEY>"
OAUTH_TOKEN_SECRET = "<KEY>"
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
api = tweepy.API(auth)
graph = Graph('http://localhost:7474', username='neo4j', password='<PASSWORD>')
def classify_tweets_related_or_not(Id):
data_dir=r"--data_dir=file directory of classification model\bin/predicate_classifiction/classification_data/"+Id
output_dir=r" --output_dir=file directory of classification model/output/predicate_classification_model/epochs1700/"+Id+r"/"
os.makedirs("file directory of classification model/output/predicate_classification_model/epochs1700/"+Id+r"/")
classification_command=r"C:\Users\CivilIM\Anaconda3\envs\TF115P37\python.exe file directory of classification model\run_predicate_classification.py "+data_dir+output_dir
print (data_dir)
os.system(classification_command)
def check_classification_result(Id):
f = open(r"file directory of classification model/output/predicate_classification_model/epochs1700/"+Id+r"/predicate_predict.txt")
classification_result=f.read()
classification_result=classification_result.replace("\n", "")
print(classification_result)
if classification_result=="traffic":
return True
else:
return False
def infer_relations(Id):
data_dir = r"--data_dir=file directory of classification model\bin/predicate_classifiction/classification_data/" + Id
output_dir = r" --output_dir=file directory of models for inferring interrelations and recognizing subject/objects/output/predicate_classification_model/epochs700/" + Id + r"/"
os.makedirs("file directory of models for inferring interrelations and recognizing subject/objects/output/predicate_classification_model/epochs700/"+Id+r"/")
infer_relations_command = r"C:\Users\CivilIM\Anaconda3\envs\TF115P37\python.exe file directory of models for inferring interrelations and recognizing subject/objects\run_predicate_classification.py " + data_dir + output_dir
os.system(infer_relations_command)
print("finish infer_relations")
def check_inferred_relations(Id):
f = open(r"file directory of models for inferring interrelations and recognizing subject/objects/output/predicate_classification_model/epochs700/"+Id+r"/predicate_predict.txt")
relations_result=f.read()
relations_result=relations_result.replace("\n", "")
str_list = relations_result.split(" ")
print("check_inferred_relations",str_list)
if ("Road_status" in str_list) and ( "Road_position" in str_list) and ("Lane_of_Road" not in str_list) :
return True
elif ("Road_status" in str_list) and ( "Road_position" in str_list) and ("Lane_of_Road" in str_list) and ("Lane_status" in str_list) and (( "Lane_position" in str_list) or ("Lane_direction" in str_list)):
return True
else:
return False
def prepare_data_for_extracting_SO(Id):
data_dir = r"file directory of classification model\bin/predicate_classifiction/classification_data/" + Id+"/test"
predicate_classifiction_infer_file_dir=r"file directory of models for inferring interrelations and recognizing subject/objects\output\predicate_classification_model\epochs700/"+Id+"/"
output_dir = r"file directory of models for inferring interrelations and recognizing subject/objects\bin\subject_object_labeling\sequence_labeling_data/" + Id + r"/test"
os.makedirs(r"file directory of models for inferring interrelations and recognizing subject/objects\bin\subject_object_labeling\sequence_labeling_data/" + Id + r"/test/")
prepare_data_for_subject_object_labeling_infer(data_dir,predicate_classifiction_infer_file_dir,output_dir)
def extract_SO(Id):
data_dir = r"--data_dir=file directory of models for inferring interrelations and recognizing subject/objects\bin\subject_object_labeling\sequence_labeling_data/" + Id
output_dir = r" --output_dir=file directory of models for inferring interrelations and recognizing subject/objects\output/sequnce_infer_out/epochs700/ckpt12415/" + Id
os.makedirs(r"file directory of models for inferring interrelations and recognizing subject/objects\output/sequnce_infer_out/epochs700/ckpt12415/"+Id+r"/")
extract_SO_command = r"C:\Users\CivilIM\Anaconda3\envs\TF115P37\python.exe file directory of models for inferring interrelations and recognizing subject/objects\run_sequnce_labeling.py " + data_dir + output_dir
os.system(extract_SO_command)
def generate_json_result(Id):
spo_list_manager = Sorted_relation_and_entity_list_Management(
r"file directory of models for inferring interrelations and recognizing subject/objects\bin\subject_object_labeling\sequence_labeling_data/"+Id+"/test",
r"file directory of models for inferring interrelations and recognizing subject/objects\output\sequnce_infer_out\epochs700\ckpt12415/"+Id+"/",
Competition_Mode=True)
spo_list_manager.produce_output_file(
OUT_RESULTS_DIR=r"file directory of models for inferring interrelations and recognizing subject/objects/output/final_text_spo_list_result/"+Id,
keep_empty_spo_list=True)
#override tweepy.StreamListener to add logic to on_status
class MyStreamListener(tweepy.StreamListener):
def on_data(self, data):
#print data
with open('fetched_tweets.json','a') as tf:
tf.write(data)
data = json.loads(data)
print (data)
tweet=data['text']
tweet=re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+——!,。?、~@#¥%……&*()]+", " ", tweet)
tweet=tweet.replace(" "," ")
tweet = tweet.replace(" ", " ")
tweet = tweet.replace(" ", " ")
tweet = tweet.replace(" ", " ")
tweet = tweet.replace(" ", " ")
print(tweet)
print(data['id'])
tweet_storage=r"file directory of classification model\bin/predicate_classifiction/classification_data/"+str(data['id'])+r"/test/"
os.makedirs(tweet_storage)
with open(tweet_storage+"text.txt",'w') as tf:
tf.write(tweet)
with open(tweet_storage+"token_in.txt",'w') as tf:
tf.write(tweet)
with open(tweet_storage+"token_in_not_UNK.txt",'w') as tf:
tf.write(tweet)
if str(data['user']['id'])=="1348585566040772609": # only for test
classify_tweets_related_or_not(str(data['id']))
print("check_classification_result(str(data['id']))",check_classification_result(str(data['id'])))
if check_classification_result(str(data['id']))==True:
infer_relations(str(data['id']))
print("check_inferred_relations(str(data['id']))", check_inferred_relations(str(data['id'])))
if check_inferred_relations(str(data['id']))==True:
prepare_data_for_extracting_SO(str(data['id']))
print("prepare_data_for_extracting_SO finish")
extract_SO(str(data['id']))
print("extract_SO finish")
generate_json_result(str(data['id']))
print("generate_json_result finish")
SPO2KG_Function(r"file directory of models for inferring interrelations and recognizing subject/objects/output/final_text_spo_list_result/"+str(data['id'])+r"\keep_empty_spo_list_subject_predicate_object_predict_output.json",graph)
print("Tweet2KnowledgeGraph finish")
subprocess.Popen([r"C:\Program Files\ArcGIS\Pro/bin\Python\envs/arcgispro-py3\python.exe", r"D:/ZHOUSHENGHUA/PythonNeo4j/Convert_Information_triplets to_VRM_parameters.py"])
return True
def on_error(self, status):
print (status)
def on_status(self, status):
print(status.text)
myStreamListener = MyStreamListener()
myStream = tweepy.Stream(auth=api.auth, listener=myStreamListener)
#api.verify_credentials()
myStream.filter(track=["keywords for searching tweets"], filter_level="low",locations=["depends on the city"], is_async=True)
print (myStream)
#print (myStreamListener.on_status())
#myStream.disconnect()
print ("OK")
| 2.265625
| 2
|
packages/weevely/modules/sql/dump.py
|
ZooAtmosphereGroup/HelloPackages
| 0
|
12780988
|
<reponame>ZooAtmosphereGroup/HelloPackages
from core.vectors import PhpFile, ShellCmd
from core.module import Module
from core.loggers import log
from core import messages
import tempfile
import os
class Dump(Module):
"""Multi dbms mysqldump replacement."""
def init(self):
self.register_info(
{
'author': [
'<NAME>'
],
'license': 'GPLv3'
}
)
self.register_vectors(
[
ShellCmd(
payload="mysqldump -h ${host} -u${user} -p${passwd} ${db} ${table} --single-transaction",
name='mysqldump_sh'
),
PhpFile(
payload_path=os.path.join(self.folder, 'mysqldump.tpl'),
name='mysqldump_php',
)
]
)
self.register_arguments([
{'name': 'db', 'help': 'Db to dump'},
{'name': 'user', 'help': 'SQL username'},
# Using passwd instead of pass to avoid rendering the `pass` keyword
{'name': 'passwd', 'help': 'SQL password'},
{'name': '-dbms', 'help': 'Db type. Vector \'mysqldump_sh\' supports only \'mysql\'.',
'choices': ('mysql', 'pgsql', 'sqlite', 'dblib'), 'default': 'mysql'},
{'name': '-host', 'help': 'Db host or host:port', 'nargs': '?', 'default': '127.0.0.1'},
{'name': '-lpath', 'help': 'Dump to local path (default: temporary file)'},
{'name': '-vector', 'choices': self.vectors.get_names(), 'default': 'mysqldump_php'}
])
def run(self):
self.args['table'] = self.args.get('table', '')
if self.args['vector'] == 'mysqldump_sh' and self.args['dbms'] != 'mysql':
log.warn(messages.module.vector_s_not_support_arg_s_s % (
self.args['vector'],
'dbms',
self.args['dbms'])
)
return
vector_name, result = self.vectors.find_first_result(
names=[self.args.get('vector')],
format_args=self.args,
condition=lambda r: r and '-- Dumping data for table' in r
)
if not vector_name:
log.warn(messages.module_sql_dump.sql_dump_failed_check_credentials)
return
# Get a temporary file name if not specified
lpath = self.args.get('lpath')
if not lpath:
temp_file = tempfile.NamedTemporaryFile(
suffix='_%s_%s_%s_%s.sqldump' % (
self.args['user'], self.args['passwd'], self.args['host'], self.args['db']
),
delete=False
)
lpath = temp_file.name
if not os.path.isabs(lpath):
if lpath.startswith('~'):
lpath = os.path.expanduser('~') + os.path.join('/', lpath.replace('~/', ''))
else:
lpath = os.path.join(os.getcwd(), lpath)
path, filename = os.path.split(lpath)
if not os.path.exists(path):
os.makedirs(path)
try:
open(lpath, 'w').write(result)
except Exception as e:
log.warn(
messages.generic.error_creating_file_s_s % (lpath, e)
)
return
log.info(messages.module_sql_dump.sql_dump_saved_s % lpath)
| 2.265625
| 2
|
MindLink-Eumpy/test/xmlAnalyzer.py
|
Breeze1in1drizzle/MindLink-Exploring
| 7
|
12780989
|
<reponame>Breeze1in1drizzle/MindLink-Exploring
'''
XML文件接卸
最初用于提取valence和arousal的值
'''
import xml.dom.minidom
def read_xml(filepath):
dom = xml.dom.minidom.parse(filepath)
session = dom.documentElement
arousal = float(session.getAttribute("feltArsl"))
print("arousal: ", arousal, " (type: ", type(arousal), ")")
valence = session.getAttribute("feltVlnc")
print("valence: ", valence, " (type: ", type(valence), ")")
def test(filepath):
dom = xml.dom.minidom.parse(filepath)
print('read xml successfully.')
root = dom.documentElement
itemList = root.getElementsByTagName('login')
item = itemList[0]
un = item.getAttribute("username")
print(un)
pd = item.getAttribute("passwd")
print(pd)
ii = root.getElementsByTagName('item')
i1 = ii[0]
i = i1.getAttribute("id")
print(i)
i2 = ii[1]
i = i2.getAttribute("id")
print(i)
if __name__ == '__main__':
read_xml('session.xml')
# test(filepath='abc.xml')
| 3.078125
| 3
|
datasets/coco_merger.py
|
scheckmedia/centernet-uda
| 19
|
12780990
|
<reponame>scheckmedia/centernet-uda<filename>datasets/coco_merger.py
import logging
import numpy as np
import hydra
from torch.utils import data
log = logging.getLogger(__name__)
class Dataset(data.Dataset):
def __init__(self, datasets, max_samples=None, **defaults):
self.max_sampels = max_samples
self.datasets = {}
self.num_samples = 0
for ds in datasets:
coco = hydra.utils.get_class(
f'datasets.{ds.name}.Dataset')
params = {**defaults, **ds.params}
coco = coco(**params)
self.num_samples += len(coco)
self.datasets[self.num_samples] = coco
self.intervals = np.array(list(self.datasets.keys()))
log.info(
f"merged {len(self.datasets)} datasets with a total "
f"number of {self.num_samples} samples")
def __len__(self):
return self.num_samples
def __getitem__(self, index):
interval_idx = np.argmax(index < self.intervals)
interval = self.intervals[interval_idx]
offset = self.intervals[interval_idx - 1] if interval_idx > 0 else 0
return self.datasets[interval].__getitem__(index - offset)
| 2.15625
| 2
|
specify/result.py
|
Einenlum/spyec
| 0
|
12780991
|
<gh_stars>0
from typing import List
class ResultLine:
'''
A result is made of several result lines
'''
def __init__(self, spec_class, test_name, exception=None):
'''
Take the spec_class (object_behavior), the test name (it_…),
and the exception if any
'''
self.spec_class = spec_class
self.test_name = test_name
self.exception = exception
class Result:
'''
The final Result object that will contain the whole results
'''
def __init__(self, result_lines: List[ResultLine]):
self.result_lines = result_lines
def _prettify_test_name(self, test_name):
'''
"it_works_correctly" becomes "it works correctly"
'''
return test_name.replace('_', ' ')
def _print_line(self, num: int, line: ResultLine):
'''
Take an index and a result line, and transform it
to a valid TAP line ("ok 1 - Test name…")
'''
if line.exception is None:
prefix = 'ok'
else:
prefix = 'not ok'
desc = f"{line.spec_class.__qualname__}: {self._prettify_test_name(line.test_name)}"
text = f"{prefix} {num} - {desc}"
if line.exception is not None:
text = text + "\n ---"
text = text + "\n " + line.exception
text = text + "\n ..."
return text
def export(self):
'''
Export the final output for a valid TAP result
'''
text = "TAP version 13\n"
last_elem = len(self.result_lines)
text = text + f"1..{last_elem}\n"
for index, line in enumerate(self.result_lines):
text = text + "\n" + self._print_line(index+1, line)
return text
| 3.328125
| 3
|
progSnips/boundingBox/openCam.py
|
PhilTKamp/SDMinesLabDisplay
| 0
|
12780992
|
from cb import drawBox
import cb
import cv2
import numpy as np
def dynamicColorMask():
# Begins reading from the default webcam
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
cv2.namedWindow('Feed')
cv2.setMouseCallback('Feed', drawBox)
while(1):
# Reads the next frame from the camera
ret, frame = cap.read()
if ret == False:
break
cv2.rectangle(frame, (cb.ix, cb.iy), (cb.fx, cb.fy), (255, 204, 51), 1)
cv2.imshow('Feed', frame)
# Use 'Q' to exit program
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
cap.release()
dynamicColorMask()
| 2.8125
| 3
|
show_parametrization_map.py
|
luca-morreale/neural_surface_maps
| 36
|
12780993
|
import torch
from models import SurfaceMapModel
from models import InterMapModel
from utils import show_mesh
from utils import show_mesh_2D
SURFACE_PATH = '/SET/HERE/YOUR/PATH'
CHECKPOINT_PATH = '/SET/HERE/YOUR/PATH'
def main() -> None:
torch.set_grad_enabled(False)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
meta = SurfaceMapModel()
net = InterMapModel()
data = torch.load(SURFACE_PATH)
source = data['grid'].to(device).float()
faces = data['faces'].long()
weights = data['weights']
net.load_state_dict(torch.load(CHECKPOINT_PATH))
net = net.to(device)
for k in weights.keys():
weights[k] = weights[k].to(device).detach()
# generate mesh at GT vertices
surface = meta(source, weights)
param = net(source)
show_mesh_2D('param.png', param, faces)
show_mesh('param_small.ply', param, surface, faces)
# generate mesh at sample vertices
source = data['visual_grid'].to(device).float()
faces = data['visual_faces'].long()
surface = meta(source, weights)
param = net(source)
show_mesh_2D('param_big.png', param, faces)
show_mesh('neural_surface_big.ply', param, surface, faces)
if __name__ == '__main__':
main()
| 2.296875
| 2
|
Scraper/templatetags/tags.py
|
FadedCoder/AnimeScraperGUI
| 3
|
12780994
|
<filename>Scraper/templatetags/tags.py
from django import template
APP_NAME = "Anime Scraper"
register = template.Library()
@register.simple_tag
def app_name(request):
return APP_NAME
@register.assignment_tag(takes_context=False)
def user_logged_in(request):
if request.user.is_authenticated:
return True
return False
@register.simple_tag
def user_name(request):
if request.user.is_authenticated:
if request.user.first_name:
return request.user.first_name
elif request.user.last_name:
return request.user.last_name
else:
return request.user.username
return "Hacker alert"
| 2.484375
| 2
|
model.py
|
rahulrajeev21/PointNet
| 0
|
12780995
|
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
class TNet(nn.Module):
def __init__(self, k=64):
super(TNet, self).__init__()
self.k = k
# Each layer has batchnorm and relu on it
# conv 3 64
self.conv1 = nn.Sequential(nn.Conv1d(k, 64, 1), nn.BatchNorm1d(64),
nn.ReLU(inplace=True))
# conv 64 128
self.conv2 = nn.Sequential(nn.Conv1d(64, 128, 1), nn.BatchNorm1d(128),
nn.ReLU(inplace=True))
# conv 128 1024
self.conv3 = nn.Sequential(nn.Conv1d(128, 1024, 1), nn.BatchNorm1d(1024),
nn.ReLU(inplace=True))
# max pool
self.mpool = nn.Sequential(nn.AdaptiveMaxPool1d(1))
# fc 1024 512
self.fc1 = nn.Sequential(nn.Linear(1024, 512), nn.BatchNorm1d(512),
nn.ReLU(inplace=True))
# fc 512 256
self.fc2 = nn.Sequential(nn.Linear(512, 256), nn.BatchNorm1d(256),
nn.ReLU(inplace=True))
# fc 256 k*k (no batchnorm, no relu)
self.fc3 = nn.Linear(256, k * k)
# add bias
self.fc3.bias = torch.nn.Parameter(torch.eye(k).view(-1))
# reshape
def forward(self, x):
batch_size = x.size()[0]
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.mpool(x)
x = x.view(x.shape[:-1])
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
x = x.view(batch_size, self.k, self.k)
return x
class PointNetfeat(nn.Module):
def __init__(self, global_feat=True, feature_transform=False):
super(PointNetfeat, self).__init__()
self.feature_transform = feature_transform
# Use TNet to apply transformation on input and multiply the input points with the transformation
self.tnet1 = TNet(k=3)
# conv 3 64
self.conv1 = nn.Sequential(nn.Conv1d(3, 64, 1), nn.BatchNorm1d(64),
nn.ReLU(inplace=True))
# Use TNet to apply transformation on features and multiply the input features with the transformation
# (if feature_transform is true)
# conv 64 128
self.conv2 = nn.Sequential(nn.Conv1d(64, 128, 1), nn.BatchNorm1d(128),
nn.ReLU(inplace=True))
# conv 128 1024 (no relu)
self.conv3 = nn.Sequential(nn.Conv1d(128, 1024, 1), nn.BatchNorm1d(1024))
# max pool
self.mpool = nn.Sequential(nn.AdaptiveMaxPool1d(1))
self.global_feat = global_feat
self.feature_transform = feature_transform
if self.feature_transform:
self.tnet2 = TNet(k=64)
def forward(self, x):
n_pts = x.size()[2]
# You will need these extra outputs:
# trans = output of applying TNet function to input
# trans_feat = output of applying TNet function to features (if feature_transform is true)
trans = self.tnet1(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans)
x = x.transpose(2, 1)
x = self.conv1(x)
if self.feature_transform:
trans_feat = self.tnet2(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans_feat)
x = x.transpose(2, 1)
else:
trans_feat = None
pointfeat = x
x = self.conv2(x)
x = self.conv3(x)
x = self.mpool(x)
x = x.view(x.shape[:-1])
if self.global_feat: # This shows if we're doing classification or segmentation
return x, trans, trans_feat
else:
x = x.view(-1, 1024, 1).repeat(1, 1, n_pts)
return torch.cat([x, pointfeat], 1), trans, trans_feat
class PointNetCls(nn.Module):
def __init__(self, k=2, feature_transform=False):
super(PointNetCls, self).__init__()
self.feature_transform = feature_transform
self.feat = PointNetfeat(global_feat=True, feature_transform=feature_transform)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k)
self.dropout = nn.Dropout(p=0.3)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.relu = nn.ReLU()
def forward(self, x):
x, trans, trans_feat = self.feat(x)
x = F.relu(self.bn1(self.fc1(x)))
x = F.relu(self.bn2(self.dropout(self.fc2(x))))
x = self.fc3(x)
return F.log_softmax(x, dim=1), trans, trans_feat
class PointNetDenseCls(nn.Module):
def __init__(self, k=2, feature_transform=False):
super(PointNetDenseCls, self).__init__()
self.k = k
self.feature_transform = feature_transform
# get global features + point features from PointNetfeat
self.pointNetFeat = PointNetfeat(global_feat=False, feature_transform=feature_transform)
# conv 1088 512
self.conv1 = nn.Sequential(nn.Conv1d(1088, 512, 1), nn.BatchNorm1d(512),
nn.ReLU(inplace=True))
# conv 512 256
self.conv2 = nn.Sequential(nn.Conv1d(512, 256, 1), nn.BatchNorm1d(256),
nn.ReLU(inplace=True))
# conv 256 128
self.conv3 = nn.Sequential(nn.Conv1d(256, 128, 1), nn.BatchNorm1d(128),
nn.ReLU(inplace=True))
# conv 128 k
self.conv4 = torch.nn.Conv1d(128, self.k, 1)
# softmax
def forward(self, x):
# You will need these extra outputs:
# trans = output of applying TNet function to input
# trans_feat = output of applying TNet function to features (if feature_transform is true)
# (you can directly get them from PointNetfeat)
batch_size = x.size()[0]
n_pts = x.size()[2]
x, trans, trans_feat = self.pointNetFeat(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = x.transpose(2, 1)
x = x.reshape(-1, self.k)
x = F.log_softmax(x, dim=-1)
x = x.view(batch_size, n_pts, self.k)
return x, trans, trans_feat
def feature_transform_regularizer(trans):
# compute |((trans * trans.transpose) - I)|^2
I_matrix = torch.eye(trans.size()[1])[None, :, :]
AAT = torch.bmm(trans, trans.transpose(2, 1))
diffMat = AAT - I_matrix.cuda()
loss = torch.norm(diffMat, dim=(1, 2))
loss = torch.mean(loss)
return loss
if __name__ == '__main__':
sim_data = Variable(torch.rand(32, 3, 2500))
print('Input data dimensions:', sim_data.size())
trans = TNet(k=3)
out = trans(sim_data)
print('TNet', out.size())
print('loss', feature_transform_regularizer(out.cuda()))
feat_trans_reg_testmat = Variable(torch.rand(16, 64, 3))
print('Input Matrix for FT Regularizer:', feat_trans_reg_testmat.size())
print('Feature Transform Regularizer Output: ', feature_transform_regularizer(feat_trans_reg_testmat.cuda()))
sim_data_64d = Variable(torch.rand(32, 64, 2500))
trans = TNet(k=64)
out = trans(sim_data_64d)
print('TNet 64d', out.size())
print('loss', feature_transform_regularizer(out.cuda()))
pointfeat = PointNetfeat(global_feat=True)
print('Input data dimensions:', sim_data.size())
out, _, _ = pointfeat(sim_data)
print('global feat', out.size())
pointfeat = PointNetfeat(global_feat=False)
print('Input data dimensions:', sim_data.size())
out, _, _ = pointfeat(sim_data)
print('point feat', out.size())
cls = PointNetCls(k=5)
print('Input data dimensions:', sim_data.size())
out, _, _ = cls(sim_data)
print('class', out.size())
seg = PointNetDenseCls(k=3)
print('Input data dimensions:', sim_data.size())
out, _, _ = seg(sim_data)
print('seg', out.size())
| 2.734375
| 3
|
cgat/tools/bam2depth.py
|
cgat-developers/cgat-apps
| 19
|
12780996
|
'''output depth statistics for a BAM file.
'''
import collections
import subprocess
import re
import os
import shlex
import cgatcore.experiment as E
import cgatcore.iotools as iotools
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.ArgumentParser(description=__doc__)
parser.add_argument(
"--input-filename-fasta", dest="input_filename_fasta", type=str,
help="filename with reference sequence in fasta format ")
parser.add_argument(
"--counting-mode", dest="counting_mode", type=str,
choices=("all", "pileup_defaults"),
help="counting mode. all=all reads/bases. pileup-defaults= "
"use default pileup thresholds. Options will be added to "
"--mpileup-options. .")
parser.add_argument(
"--mpileup-options", dest="mpileup_options", type=str,
help="pileup options to use ")
parser.set_defaults(
mpileup_options="",
counting_mode="all",
input_filename_fasta=None,
report_step=1000000,
)
# add common options (-h/--help, ...) and parse command line
(args) = E.start(parser, argv=argv, add_output_options=True)
bamfile = args[0]
mpileup_options = args.mpileup_options
if args.counting_mode == "all":
mpileup_options += " -Q 0 -B -A"
read_depth_histogram = collections.defaultdict(int)
base_depth_histogram = collections.defaultdict(int)
# deletions are marked by something like -2AA at the first
# position and a '*' for subsequent positions
rx_deletions = re.compile("([-][0-9]+|[*])")
report_step = args.report_step
npositions = 0
samtools = iotools.which("samtools")
statement = (
"{samtools} mpileup "
"-f {reference_fasta} "
"{mpileup_options} "
"{bamfile} ".format(
samtools=samtools,
reference_fasta=args.input_filename_fasta,
mpileup_options=mpileup_options,
bamfile=os.path.abspath(bamfile)))
E.info("running the following statement: {}".format(statement))
cmd_args = shlex.split(statement)
proc = subprocess.Popen(
cmd_args,
shell=False,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=os.path.abspath(os.curdir))
for line in proc.stdout:
line = line.decode("utf-8")
contig, pos, base, read_depth, info, qualities = line[:-1].split("\t")
read_depth = int(read_depth)
pos = int(pos)
if pos % report_step == 0:
E.info("working on {}: {}".format(contig, pos))
ndeletions = len(rx_deletions.findall(info))
base_depth = read_depth - ndeletions
read_depth_histogram[read_depth] += 1
base_depth_histogram[base_depth] += 1
for line in proc.stderr:
E.warn(line)
keys = sorted(set(read_depth_histogram.keys()).union(
base_depth_histogram.keys()))
args.stdout.write("depth\tread_depth_positions\tbase_depth_positions\n")
for key in keys:
args.stdout.write("{}\t{}\t{}\n".format(
key,
read_depth_histogram[key],
base_depth_histogram[key]))
E.info("positions tested: {}".format(sum(read_depth_histogram.values())))
E.stop()
| 2.375
| 2
|
presqt/api_v1/utilities/validation/get_process_info_data.py
|
craig-willis/presqt
| 0
|
12780997
|
<gh_stars>0
from rest_framework import status
from presqt.utilities import read_file
from presqt.utilities import PresQTValidationError
def get_process_info_data(action, ticket_number):
"""
Get the JSON from process_info.json in the requested ticket number directory.
Parameters
----------
action : str
The action directory we should look in for the ticket_number directory
ticket_number : str
Requested ticket_number directory the JSON should live in
Returns
-------
JSON dictionary representing the process_info.json data.
"""
try:
return read_file('mediafiles/{}/{}/process_info.json'.format(action, ticket_number), True)
except FileNotFoundError:
raise PresQTValidationError("PresQT Error: Invalid ticket number, '{}'.".format(ticket_number),
status.HTTP_404_NOT_FOUND)
| 2.5
| 2
|
simple_monitor_alert/tests/base.py
|
Nekmo/simple-monitor-alert
| 33
|
12780998
|
import os
import sys
from configparser import NoSectionError
from simple_monitor_alert.alerts import Alerts
from simple_monitor_alert.lines import Observable, ItemLine
from simple_monitor_alert.monitor import Monitors
from simple_monitor_alert.sma import Results, Config, MonitorsInfo
from simple_monitor_alert.utils.files import JSONFile
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
MONITORS_DIR = os.path.join(BASE_DIR, 'assets', 'monitors')
class FakeJSONFile(JSONFile):
def __init__(self, data):
super(FakeJSONFile, self).__init__('/Fake-JSON-File', create=False)
self.update(data)
def read(self):
pass
def write(self):
pass
class FakeObservableResults(FakeJSONFile, Results):
monitor = None
def __init__(self, data=None, sma=None):
data = data or {'monitors': {}}
super(FakeObservableResults, self).__init__(data)
self.sma = sma
def get_observable_result(self, observable):
monitor = self['monitors'].get(getattr(observable, 'monitor', self.monitor), {})
result = monitor.get(observable.name, self.get_default_observable_result())
monitor[observable.name] = result
self['monitors'][getattr(observable, 'monitor', self.monitor)] = monitor
return result
class FakeMonitorsInfo(FakeJSONFile, MonitorsInfo):
pass
class FakeMonitors(Monitors):
pass
class FakeSMA(object):
def __init__(self, config=None, monitors_info=None, monitors_dir=MONITORS_DIR):
self.results = FakeObservableResults()
self.monitors_info = monitors_info or FakeMonitorsInfo({})
self.config = config
self.monitors = FakeMonitors(monitors_dir, sma=self)
class FakeAlert(object):
executions = 0
def __init__(self, section):
self.section = section
def send(self, *args, **kwargs):
self.executions += 1
return True
class FakeConfig(Config):
def __init__(self, data):
if sys.version_info >= (3, 0):
super().__init__('/Fake-Config-File')
else:
# Old Style Class
Config.__init__(self, '/Fake-Config-File')
self._data = data
def items(self, section=None, **kwargs):
try:
return self._data[section]
except KeyError:
raise NoSectionError(section)
class TestBase(object):
def get_observable(self):
observable = Observable('test')
observable.add_line(ItemLine('test.expected', '20'))
observable.add_line(ItemLine('test.value', '19'))
return observable
def get_alerts(self, section, sma):
alerts_modules = [FakeAlert(section)]
alerts = Alerts(sma, '/Fake-Alerts-Dir', alerts_modules, [section])
return alerts
def get_results(self, data=None, monitors_info=None):
return FakeObservableResults(data, FakeSMA(monitors_info=monitors_info).monitors_info)
def get_sma(self, section=None, monitors_info=None):
config = FakeConfig({section: ()})
sma = FakeSMA(config, monitors_info=monitors_info)
return sma
| 2.34375
| 2
|
command_safe/safe.py
|
omar-ozgur/Command-Safe
| 0
|
12780999
|
import click
import pickle
from subprocess import call
class Safe:
update = False
def __init__(self, safe_file):
self.safe_file = safe_file
def load(self):
try:
with open(self.safe_file, 'rb') as input:
try:
self.safe = pickle.load(input)
except EOFError:
self.safe = {}
except IOError:
self.safe = {}
def save(self):
with open(self.safe_file, 'wb') as output:
pickle.dump(self.safe, output, 2)
def clear(self):
self.safe = {}
self.update = True
def delete(self, alias):
if alias in self.safe:
del self.safe[alias]
self.update = True
def get_command(self, alias):
if alias in self.safe:
return self.safe[alias]
else:
return None
def set_command(self, alias, command):
self.safe[alias] = command
self.update = True
def execute(self, alias):
call(self.safe[alias], shell=True)
def show(self):
table = [('alias:', 'command:')]
for key, value in self.safe.items():
table.append((key, value))
column_size = [max(map(len, column)) for column in zip(*table)]
format_string = ' | '.join(["{{:<{}}}".format(i) for i in column_size])
table.insert(1, ['-' * i for i in column_size])
for row in table:
click.echo('{}'.format(format_string.format(*row)))
| 3.015625
| 3
|
scripts/setup_demo.py
|
ki-tools/kitools-py
| 4
|
12781000
|
#!/usr/bin/env python3
import argparse
import sys
import os
import uuid
import tempfile
import random
import synapseclient as syn
script_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(script_dir, '..', 'src'))
try:
from kitools import KiProject, DataUri, SysPath, DataTypeTemplate
except Exception as ex:
print('WARNING: Failed to load kitools: {0}'.format(ex))
def gen_id():
return str(uuid.uuid4())[:8]
def mk_dirs(*args):
path = os.path.join(*args)
if not os.path.exists(path):
os.makedirs(path)
return path
def write_random_data_to_file(file_path):
with open(file_path, mode='w') as f:
for _ in range(1, random.randrange(2, 10)):
f.write(str(uuid.uuid4()))
def mk_local_files_and_folders(start_path,
prefix='',
depth=3,
file_count=3,
folder_count=3,
current_depth=0,
syn_client=None,
syn_parent=None):
current_depth += 1
local_results = []
remote_results = []
for _ in range(0, file_count):
filename = '{0}test_file_{1}_{2}.dat'.format(prefix, current_depth, gen_id())
file_path = os.path.join(start_path, filename)
# Fill the file with random data.
write_random_data_to_file(file_path)
local_results.append(file_path)
# Store the file in Synapse
if syn_parent:
syn_file = syn_client.store(syn.File(path=file_path, parent=syn_parent))
remote_results.append(syn_file)
if current_depth < depth:
# Create the folders.
for _ in range(0, folder_count):
foldername = '{0}test_folder_{1}_{2}'.format(prefix, current_depth, gen_id())
folder_path = mk_dirs(start_path, foldername)
local_results.append(folder_path)
# Create the folder in Synapse
if syn_parent:
syn_folder = syn_client.store(syn.Folder(name=foldername, parent=syn_parent))
remote_results.append(syn_folder)
more_locals, more_remotes = mk_local_files_and_folders(folder_path,
prefix=prefix,
depth=depth,
current_depth=current_depth,
syn_client=syn_client,
syn_parent=syn_folder)
local_results += more_locals
remote_results += more_remotes
return local_results, remote_results
def create_demo_curator():
"""
1. Creates a new Synapse Project.
2. Creates the data/[core, artifacts, discovered] folders in Synapse.
3. Creates and uploads files and folders into Synapse.
4. Creates a new KiProject.
5. data_adds each of the root data folders (core, artifacts, discovered) that were created in Synapse.
6. data_pulls all the remote files and folders from Synapse.
7. Creates one new file in each of the local data/[core, artifacts, discovered] folders.
8. data_pushes each of the newly added local files.
9. Changes 3 of the files that were pulled from the Synapse Project and data_pushes them to Synapse.
10. Prints out the commands to load the project, data_add/data_pull the new and changed files.
"""
print('Creating Demo for curator...')
demo_id = gen_id()
demo_commands = []
demo_commands.append('')
demo_commands.append('# Import the KiProject class:')
demo_commands.append('from kitools import KiProject')
kiproject_path = mk_dirs(tempfile.gettempdir(), 'demo_curator_{0}'.format(demo_id))
syn_client = syn.login(silent=True)
# Create the Synapse project
syn_project = syn_client.store(syn.Project(name='Ki Tools Curator Demo - {0}'.format(demo_id)))
kiproject = KiProject(kiproject_path,
no_prompt=True,
title='Demo KiProject {0}'.format(demo_id),
project_uri=DataUri('syn', syn_project.id).uri,
data_type_template=DataTypeTemplate.default().name)
demo_commands.append('')
demo_commands.append('# Open the KiProject:')
demo_commands.append(
'kiproject = KiProject({0}"{1}")'.format(('r' if os.sep == '\\' else ''), kiproject.local_path))
# Add the synapse project files/folders.
syn_temp_dir = mk_dirs(kiproject_path, '.demo-data')
# Create files and folders in each DataType directory.
for data_type in kiproject.data_types:
parent = syn_project
dt_folder_path = mk_dirs(os.path.join(syn_temp_dir, data_type.rel_path))
for name in SysPath(data_type.rel_path).rel_parts:
parent = syn_client.store(syn.Folder(name=name, parent=parent))
kiproject.data_add(DataUri('syn', parent.id).uri, name=parent.name)
mk_local_files_and_folders(dt_folder_path,
depth=3,
prefix='{0}_'.format(data_type.name),
syn_client=syn_client,
syn_parent=parent)
kiproject.data_pull()
# Create some new files for data_add/data_push
demo_commands.append('')
demo_commands.append('# Add some new files and push them:')
for data_type in kiproject.data_types:
dt_folder_path = mk_dirs(data_type.abs_path)
local_results, _ = mk_local_files_and_folders(dt_folder_path, prefix='new_study_file_', depth=0, file_count=1)
for new_filename in local_results:
demo_commands.append('kiproject.data_add({0}"{1}")'.format(
('r' if os.sep == '\\' else ''), SysPath(new_filename, rel_start=kiproject.local_path).rel_path))
demo_commands.append('kiproject.data_push()')
# Create a change in some files for data_push
demo_commands.append('')
demo_commands.append('# Push some changed files:')
change_count = 0
for resource in kiproject.resources:
if change_count >= 3:
break
if resource.abs_path and resource.abs_path.endswith('.dat'):
change_count += 1
file_path = resource.abs_path
write_random_data_to_file(file_path)
demo_commands.append('kiproject.data_push("{0}")'.format(os.path.basename(resource.name)))
print('')
print('Demo project created in: {0}'.format(kiproject_path))
print('Synapse Project: {0} ({1})'.format(syn_project.name, syn_project.id))
print('')
print('Python Script:')
for command in demo_commands:
print(command)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('demo', nargs='?', help='Which demo to create.', choices=['curator'], default='curator')
args = parser.parse_args()
if args.demo == 'curator':
create_demo_curator()
if __name__ == "__main__":
main()
| 1.976563
| 2
|
rtamt/spec/io_stl_ct/offline.py
|
BentleyJOakes/rtamt
| 0
|
12781001
|
<gh_stars>0
import operator
from rtamt.spec.stl_ct.offline import STLCTOffline
from rtamt.spec.io_stl.io_interpretation import IOInterpretation
class STLIOCTOffline(STLCTOffline):
def __init__(self, spec):
super(STLIOCTOffline, self).__init__(spec)
def visitPredicate(self, element, args):
out_sample = super(STLIOCTOffline, self).visitPredicate(element, args)
out = []
if (self.spec.iosem == 'output-robustness' and not element.out_vars):
for tuple in out_sample:
ntuple = [tuple[0], tuple[1]*float('inf')]
out.append(ntuple)
elif(self.spec.iosem == 'input-vacuity' and not element.in_vars):
for tuple in out_sample:
ntuple = [tuple[0], 0]
out.append(ntuple)
elif(self.spec.iosem == 'input-robustness' and not element.in_vars):
for tuple in out_sample:
ntuple = [tuple[0], tuple[1]*float('inf')]
out.append(ntuple)
elif(self.spec.iosem == 'output-vacuity' and not element.in_vars):
for tuple in out_sample:
ntuple = [tuple[0], 0]
out.append(ntuple)
else:
out = out_sample
return out
| 2.078125
| 2
|
TASSELpy/net/maizegenetics/dna/snp/CoreGenotypeTable.py
|
er432/TASSELpy
| 1
|
12781002
|
from TASSELpy.net.maizegenetics.dna.snp.GenotypeTable import GenotypeTable
from TASSELpy.utils.helper import make_sig
from TASSELpy.utils.Overloading import javaConstructorOverload
import javabridge
## Dictionary to hold java imports
java_imports = {'AlleleDepth':'net/maizegenetics/dna/snp/depth/AlleleDepth',
'BitSet':'net/maizegenetics/util/BitSet',
'Chromosome':'net/maizegenetics/dna/map/Chromosome',
'CoreGenotypeTable':'net/maizegenetics/dna/snp/CoreGenotypeTable',
'GenotypeTable':'net.maizegenetics.dna.snp.GenotypeTable',
'GenotypeCallTable':'net/maizegenetics/dna/snp/genotypecall/GenotypeCallTable',
'PositionList':'net/maizegenetics/dna/map/PositionList',
'SiteScore':'net/maizegenetics/dna/snp/score/SiteScore',
'TaxaList':'net/maizegenetics/taxa/TaxaList'}
class CoreGenotypeTable(GenotypeTable):
_java_name = java_imports['CoreGenotypeTable']
@javaConstructorOverload(java_imports['CoreGenotypeTable'],
(make_sig([java_imports['GenotypeCallTable'],java_imports['PositionList'],
java_imports['TaxaList']],'void'),(object,object,object)),
(make_sig([java_imports['GenotypeCallTable'],java_imports['PositionList'],
java_imports['TaxaList'],java_imports['SiteScore'],
java_imports['AlleleDepth']],'void'),(object,object,object,object,object)))
def __init__(self,*args,**kwargs):
"""
Instantiates a CoreGenotypeTable
Signatures:
CoreGenotypeTable(GenotypeCallTable genotype, PositionList positionList,
TaxaList taxaList, SiteScore siteScore, AlleleDepth alleleDepth)
CoreGenotypeTable(GenotypeCallTable genotype, PositionList positionList, TaxaList taxaList)
"""
pass
| 2.265625
| 2
|
drfxios/context_processors.py
|
milano-slesarik/django-drfxios
| 2
|
12781003
|
import json
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import import_string
def drfxios(request):
DRFXIOS_ROUTER_PATH = getattr(settings, 'DRFXIOS_ROUTER_PATH', None)
if not DRFXIOS_ROUTER_PATH:
raise ImproperlyConfigured("No DRFXIOS_ROUTER_PATH attr inside settings.py")
router = import_string(DRFXIOS_ROUTER_PATH)
r = router.registry
models_list = [x[0] for x in r]
return {
'DRFXIOS': {
'MODELS_LIST': json.dumps(models_list)
}
}
| 2.15625
| 2
|
build-support/editor_setup.py
|
msilvey/grapl
| 0
|
12781004
|
#!/usr/bin/env python3
"""Encapsulates logic for generating and updating editor
configuration files to make it easy to work with Grapl code.
Provided as a self-documenting Click app for discoverability and ease
of maintenance.
"""
import json
from typing import Dict, List, Union
import click
import toml
from typing_extensions import TypedDict
# NOTE: This is essentially to silence the typechecker (and help us
# not shoot ourselves in the foot). It's not making any attempt to be
# a complete and faithful typing of Pyright configuration documents;
# it's just typing what we're currently using. Feel free to update
# this as this code develops and matures.
class PyrightConfig(TypedDict):
pythonVersion: str
pythonPlatform: str
venvPath: str
venv: str
verboseOutput: bool
reportMissingImports: bool
exclude: List[str]
executionEnvironments: List[Dict[str, Union[str, List[str]]]]
BASE_PYRIGHTCONFIG: PyrightConfig = {
"pythonVersion": "3.7",
"pythonPlatform": "Linux",
"venvPath": "build-support",
"venv": "grapl-venv",
"verboseOutput": True,
"reportMissingImports": True,
"exclude": [
"src/js/**",
"src/rust/**",
],
"executionEnvironments": [
{"root": "pulumi"},
{"root": "pants-plugins"},
# NOTE: We will augment this with the src/python root in the
# code below
],
}
PANTS_TOML = "pants.toml"
PYRIGHTCONFIG_JSON = "pyrightconfig.json"
def src_python_execution_environment() -> Dict[str, Union[str, List[str]]]:
"""Generate a pyright "executionEnvironments" entry for code in our
`src/python` directory.
Since this code is all interrelated, we need to provide the
appropriate "extraPaths" for Pyright to properly resolve imports,
types, etc. In general, this amounts to adding our Pants source
roots, with a few caveats:
1) not all the roots are required for Python code in that
directory
2) Our Pants configuration explicitly provides absolute paths, not
patterns that may be matched anywhere
As such, we first filter out what we don't need, and then
"relativize" the paths, since this is what Pyright need.
"""
pants = toml.load(PANTS_TOML)
source_roots = pants["source"]["root_patterns"]
if any(not r.startswith("/") for r in source_roots):
raise click.ClickException(
"Expected all Pants source roots to be absolute, but at least one was not!"
)
# We don't care about these source roots for things that are in src/python
filtered = [
root
for root in source_roots
if root
not in (
"/3rdparty",
"/build-support",
"/pants-plugins",
"/pulumi",
"/src/js/grapl-cdk",
"/src/proto",
)
]
relativized = [root.lstrip("/") for root in filtered]
return {"root": "src/python", "extraPaths": relativized}
def write_or_echo(output: str, path: str, write_file: bool) -> None:
""" Consolidate logic for whether to write `output` to the file at `path`, or to send it to standard output instead."""
if write_file:
with click.open_file(path, "w") as f:
f.write(output)
click.echo(f"Wrote content to {path} file")
else:
click.echo(output)
@click.command(name="generate")
@click.option(
"--write-file/--no-write-file",
is_flag=True,
default=True,
show_default=True,
help="Controls whether or not to write the generated output to disk, or to standard output.",
)
def generate_pyrightconfig(write_file: bool) -> None:
"""Generate a pyrightconfig.json file from pants.toml.
Do this if you have no existing pyrightconfig.json file that you
are using. If you already have one, on the other hand, please see
the `update` command instead.
"""
pyrightconfig = BASE_PYRIGHTCONFIG
pyrightconfig["executionEnvironments"].append(src_python_execution_environment())
output = json.dumps(pyrightconfig, indent=4)
write_or_echo(output, PYRIGHTCONFIG_JSON, write_file)
@click.command(name="update")
@click.option(
"--write-file/--no-write-file",
is_flag=True,
default=True,
show_default=True,
help="Controls whether or not to write the generated output to disk, or to standard output.",
)
def update_pyrightconfig(write_file: bool) -> None:
"""Update an existing pyrightconfig.json file.
In particular, the `extraPaths` entries for various
`executionEnvironments` must be kept in-sync with what we declare
in our pants.toml file.
Any other changes you may have made to your file will be
preserved.
"""
with click.open_file(PYRIGHTCONFIG_JSON, "r") as f:
pyright = json.load(f)
execution_environments = pyright["executionEnvironments"]
# Preserve other environments; we're only concerned about the
# src/python one here
new_execution_environments = [
e for e in execution_environments if e["root"] != "src/python"
]
new_execution_environments.append(src_python_execution_environment())
pyright.update({"executionEnvironments": new_execution_environments})
output = json.dumps(pyright, indent=4)
write_or_echo(output, PYRIGHTCONFIG_JSON, write_file)
@click.group(name="pyright")
def configure_pyright() -> None:
""" Set up Pyright for Python IDE integration. """
configure_pyright.add_command(generate_pyrightconfig)
configure_pyright.add_command(update_pyrightconfig)
@click.group()
def editor_setup() -> None:
"""A utility for helping to configure IDEs and editors for working
with Grapl code."""
editor_setup.add_command(configure_pyright)
if __name__ == "__main__":
editor_setup()
| 1.953125
| 2
|
monasca_analytics/spark/driver.py
|
daisuke-fujita/monsaca-analytics_20181107
| 1
|
12781005
|
#!/usr/bin/env python
# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import pyspark
import monasca_analytics.config.config as config
import monasca_analytics.ingestor.base as bi
import monasca_analytics.ldp.base as mldp
import monasca_analytics.sink.base as msink
import monasca_analytics.sml.base as bml
import monasca_analytics.spark.aggregator as agg
import monasca_analytics.spark.streaming_context as streamingctx
import monasca_analytics.voter.base as mvoter
logger = logging.getLogger(__name__)
class DriverExecutor(object):
"""Driver part of the job submitted to spark.
This is where we control what is submitted to workers,
what is driver specific and how the pipeline is constructed.
We also execute the pipeline from here.
"""
def __init__(self, _config):
self._links = None
self._sources = None
self._orchestrator = None
self.set_links(config.instantiate_components(_config))
def restart_spark():
self._ssc = streamingctx.create_streaming_context(
self._sc,
_config)
self._restart_spark = restart_spark
self._sc = pyspark.SparkContext(
appName=_config["spark_config"]["appName"])
self._ssc = streamingctx.create_streaming_context(self._sc, _config)
def set_links(self, links):
"""Set new set of links
This function has no effect on the current pipeline.
In order to use them, you need to restart the pipeline.
"""
self._links = links
logger.debug("Collect sources...")
self._sources = config.collect_sources(self._links)
logger.debug("New list of sources: {}".format(self._sources))
self._orchestrator = agg.Aggregator(self)
logger.debug("Propagating feature list...")
self._propagate_feature_list()
def start_pipeline(self):
"""Start the pipeline"""
# Start by connecting the source
if self._phase1_required():
logger.info("Phase 1 required, ldp won't produce data until"
" smls have finished.")
# Connect sources to ingestors
self._prepare_phase(self._connect_dependents_phase1)
# Preparation step for the orchestrator:
# Accumulate everything from the sources
self._orchestrator.prepare_final_accumulate_stream_step()
# Then prepare the orchestrator
self._prepare_orchestrator()
else:
# Connect sources to ldps
logger.info("Phase 1 was not required, skipping it.")
self._prepare_phase(self._connect_dependents_phase2)
logger.info("Start the streaming context")
self._ssc.start()
def stop_pipeline(self):
logger.debug("Stop spark context.")
self._ssc.stop(False, False)
logger.debug("Terminate sources.")
self._terminate_sources()
logger.debug("Restart spark context.")
self._restart_spark()
def move_to_phase2(self):
if self._ssc is not None:
logger.debug("Phase 2: Stop SparkStreamingContext.")
self._ssc.stop(False, False)
logger.debug("Phase 2: Stop sources")
self._terminate_sources()
logger.debug("Phase 2: Restart streaming...")
self._restart_spark()
logger.debug("Phase 2: Create new connections")
self._prepare_phase(self._connect_dependents_phase2)
self._ssc.start()
# ?
self._ssc.awaitTermination()
def _terminate_sources(self):
"""Terminates the sources."""
for source in self._sources:
source.terminate_source()
def _phase1_required(self):
for src in self._sources:
if any(isinstance(el, bi.BaseIngestor) for el in self._links[src]):
return True
return False
def _prepare_orchestrator(self):
"""
This is a part of phase 1. The orchestrator collects
input from all ingestors and then orchestrate the sml
pipeline to solve it and provide to LDPs the learned
data structure.
"""
smls = filter(lambda c: isinstance(c, bml.BaseSML),
self._links.keys())
sml_with_no_dependents = filter(
lambda c: set(self._links[c]).isdisjoint(smls),
smls)
for sml in sml_with_no_dependents:
logger.debug("Append {} to orchestrator".format(sml))
self._orchestrator.append_sml(sml)
self._connect_sml_dependents(sml)
def _prepare_phase(self, connect_dependent):
"""Prepare given phase by starting sources.
:type connect_dependent: (pyspark.streaming.DStream,
monasca_analytics.source.base.BaseSource) -> None
:param connect_dependent: Callback that is going to selectively connect
the appropriate dependencies of each sources.
"""
for src in self._sources:
logger.debug("Prepare source {}".format(src))
dstream = src.create_dstream(self._ssc)
connect_dependent(dstream, src)
def _connect_sml_dependents(self, from_component):
"""Connect an sml component with all its dependencies.
During phase 1 this code is running exclusively by the driver
at the moment.
:type from_component: bml.BaseSML | mvoter.BaseVoter
:param from_component: Where we came from.
"""
for connected_node in self._links[from_component]:
# SML can, for now, only be connected to voter.
if isinstance(connected_node, mvoter.BaseVoter) and \
isinstance(from_component, bml.BaseSML):
logger.debug("Set {} to {}"
.format(connected_node, from_component))
from_component.set_voter(connected_node)
# Voter can only be connected to LDPs
if isinstance(from_component, mvoter.BaseVoter) and \
isinstance(connected_node, mldp.BaseLDP):
logger.debug("Append {} to {}"
.format(connected_node, from_component))
from_component.append_ldp(connected_node)
# We don't connect LDP to anything
continue
# Only SML can be connected to a sink
if isinstance(connected_node, msink.BaseSink):
logger.debug("Sink {} into {}"
.format(from_component, connected_node))
connected_node.sink_ml(from_component)
# Sink can't be connected to anything
continue
self._connect_sml_dependents(connected_node)
def _connect_dependents_phase2(self, dstream, from_component):
"""Connect a component to its dependencies.
During phase 2, only live data processors are considered.
All ingestors are shutdown.
:type dstream: pyspark.streaming.DStream | None
:param dstream: Dstream that will be modified by dependent.
It can be None, only if from_component is aggregator,
sml or voter.
:type from_component: monasca_analytics.component.base.BaseComponent
:param from_component: Where we came from.
"""
for connected_node in self._links[from_component]:
# Live data processors are also doing a map, they add
# the causality bit to each element in the stream.
if isinstance(connected_node, mldp.BaseLDP):
logger.debug("Connecting {} to {}".format(from_component,
connected_node))
new_dstream = connected_node.map_dstream(dstream)
self._connect_dependents_phase2(new_dstream, connected_node)
# Sink are at the end of the branch!
if isinstance(connected_node, msink.BaseSink):
logger.debug("Sink {} into {}".format(from_component,
connected_node))
connected_node.sink_dstream(dstream)
def _connect_dependents_phase1(self, dstream, from_component):
"""Connect a component to its dependencies for phase 1.
All live data processors are ignored during that phase.
:type dstream: pyspark.streaming.DStream | None
:param dstream: Dstream that will be modified by dependent.
It can be None, only if from_component is aggregator,
sml or voter.
:type from_component: monasca_analytics.component.base.BaseComponent --
:param from_component: Where we came from.
"""
for connected_node in self._links[from_component]:
# Ingestors "map" the dstream. They are mainly doing worker
# specific transformation. Like parsing and vectorizing the
# data.
if isinstance(connected_node, bi.BaseIngestor):
logger.debug("Stream from {} to {}"
.format(from_component, connected_node))
new_dstream = connected_node.map_dstream(dstream)
# We then connect directly this stream to the orchestrator
self._orchestrator.accumulate_dstream_samples(new_dstream)
# And we look for sink if any
self._connect_dependents_phase1(new_dstream, connected_node)
# Sink are at the end of the branch!
if isinstance(connected_node, msink.BaseSink):
logger.debug("Sink {} into {}"
.format(from_component, connected_node))
connected_node.sink_dstream(dstream)
def _propagate_feature_list(self):
"""Set the appropriate features list on each live data processor."""
for source in self._sources:
features = source.get_feature_list()
for connected_node in self._links[source]:
propagated = False
if isinstance(connected_node, bi.BaseIngestor):
connected_node.set_feature_list(features)
propagated = True
if isinstance(connected_node, mldp.BaseLDP):
connected_node.set_feature_list(features)
propagated = True
if propagated:
logger.info("Feature list {} propagated from {} to {}"
.format(features, source, connected_node))
| 2.09375
| 2
|
fw-rde/mnist/rde_new.py
|
morgankohler/FrankWolfe.jl
| 0
|
12781006
|
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import tensorflow.keras.backend as K
from models import load_model, load_adfmodel
import instances
# GENERAL PARAMETERS
MODE = 'joint_untargeted'
IMG_SHAPE = [28, 28]
# LOAD MODEL
model = load_model()
generator = instances.load_generator()
def get_data_sample(index):
return (
generator[index],
os.path.splitext(os.path.split(generator.filenames[index])[1])[0],
)
def store_single_result(mapping, name, fname, rate, d, subdir):
savedir = os.path.join('results', subdir, fname)
os.makedirs(savedir, exist_ok=True)
# print(mapping.shape)
mapping = np.reshape(mapping, IMG_SHAPE)
# for line in mapping:
# print(line)
# raise Exception
# for row in mapping:
# print(row)
# np.save(f'/home/Morgan/fw-rde/mnist/results/{name}.npy', mapping)
# print(np.max(mapping))
# print(np.min(mapping))
# mapping = mapping - np.min(mapping)
# mapping = mapping / np.max(mapping)
# for row in mapping:
# print(row)
plt.imsave(
os.path.join(
savedir,
f'{name}_rate-{rate}_d-{d}.png'
),
mapping.squeeze(),
cmap='Greys',
vmin=np.min(mapping),
vmax=np.max(mapping),
format='png',
)
def store_pert_img(x, s, p, name, fname, rate, d, subdir):
savedir = os.path.join('results', subdir, fname)
os.makedirs(savedir, exist_ok=True)
# print(mapping.shape)
x = np.reshape(x, IMG_SHAPE)
s = np.reshape(s, IMG_SHAPE)
p = np.reshape(p, IMG_SHAPE)
x = x + s*p
# for line in mapping:
# print(line)
# raise Exception
# np.save(f'/home/Morgan/fw-rde/mnist/results/{name}.npy', x)
plt.imsave(
os.path.join(
savedir,
f'{name}_rate-{rate}_d-{d}.png'
),
x.squeeze(),
cmap='Greys',
vmin=np.min(x),
vmax=np.max(x),
format='jpg',
)
def get_distortion(x, model=model, mode=MODE):
x_tensor = tf.constant(x, dtype=tf.float32)
s_flat = tf.placeholder(tf.float32, (np.prod(x_tensor.shape),))
s_tensor = tf.reshape(s_flat, x.shape)
p_flat = tf.placeholder(tf.float32, (np.prod(x_tensor.shape),))
p_tensor = tf.reshape(p_flat, x.shape)
pred = model.predict(x)
node = np.argpartition(pred[0, ...], -2)[-1]
# target = pred[0, node]
unprocessed = x + s_tensor * p_tensor
# network_input = (tf.tanh((unprocessed + 37.96046)/255 * 2 - 1) + 1) / 2 * 255 - 37
network_input = tf.clip_by_value(unprocessed, clip_value_min=np.min(x), clip_value_max=np.max(x))
out = model(network_input)
if mode == 'joint_untargeted':
loss = tf.squeeze(out[..., node])
gradient = K.gradients(loss, [s_flat, p_flat])
f_out = K.function([s_flat, p_flat], [loss])
f_gradient = K.function([s_flat, p_flat], [gradient])
# a = tf.random.uniform(shape=s_flat.shape)
# b = tf.random.uniform(shape=s_flat.shape)
#
# c = f_out([a, b])
# d = f_gradient([a, b])
return lambda s, p: f_out([s, p])[0], lambda s, p: f_gradient([s, p])[0][0], lambda s, p: f_gradient([s, p])[0][1], node, pred
def print_model_prediction(x, s, p):
print('\n------------------------\n')
print(np.max(x))
print(np.min(x))
print('\n------------------------\n')
print(np.max(s))
print(np.min(s))
print('\n------------------------\n')
print(np.max(p))
print(np.min(p))
print('\n------------------------\n')
s = np.reshape(s, x.shape)
p = np.reshape(p, x.shape)
pert_input = x + s * p
print(np.max(pert_input))
print(np.min(pert_input))
print('\n------------------------\n')
# for t in [x, pert_input]:
# print('\n\n\n\n')
# for row in t:
# print(row)
# raise(Exception)
# s = tf.reshape(s, x.shape)
# p = tf.reshape(p, x.shape)
# pert_input = x+s*p
pert_input = tf.convert_to_tensor(pert_input)
# pert_input = (tf.tanh((pert_input + 37.96046) / 255 * 2 - 1) + 1) / 2 * 255 - 37
pert_input = tf.clip_by_value(pert_input, clip_value_min=np.min(x), clip_value_max=np.max(x))
sess = tf.Session()
with sess.as_default():
pert_input = pert_input.eval()
print('\n------------------------\n')
print(pert_input.shape)
print(np.max(pert_input))
print(np.min(pert_input))
print('\n------------------------\n')
# pert_input[pert_input < -37.96046] = -37.96046
# pert_input[pert_input > 255-37.96046] = 255-37.96046
pred0 = model.predict(x, steps=1)
pred1 = model.predict(pert_input, steps=1)
print(f'orig pred: {pred0}')
print(f'pert pred: {pred1}')
# x, fname = get_data_sample(0)
#
# f, gs, gp, n, p = get_distortion(x)
#
# a = tf.random.uniform(shape=[28*28])
# b = tf.random.uniform(shape=[28*28])
#
# out = f(a,b)
#
#
# _=0
| 2.15625
| 2
|
12_find the output/03_In Python/01_GeeksForGeeks/02_Set two/problem_3.py
|
Magdyedwar1996/python-level-one-codes
| 1
|
12781007
|
values = [1, 2, 3, 4,5,6]
numbers = set(values)
def checknums(num):
if num in numbers:
return True
else:
return False
for i in filter(checknums, values):
print(i)
| 3.84375
| 4
|
src/minerl/herobraine/hero/handlers/actionable.py
|
imatge-upc/pixelcoordEDL
| 1
|
12781008
|
from abc import ABC, abstractmethod
import gym
import numpy as np
from minerl.herobraine.hero import AgentHandler
from minerl.herobraine.hero import KEYMAP
from minerl.herobraine.hero import spaces
from minerl.herobraine.hero.spaces import DiscreteRange
class CommandAction(AgentHandler):
"""
An action handler based on commands
# Todo: support blacklisting commands. (note this has to work with mergeing somehow)
"""
def __init__(self, command: str, space: gym.Space):
"""
Initializes the space of the handler with a gym.spaces.Dict
of all of the spaces for each individual command.
"""
self._command = command
super().__init__(space)
@property
def command(self):
return self._command
def to_string(self):
return self._command
def to_hero(self, x):
"""
Returns a command string for the multi command action.
:param x:
:return:
"""
cmd = ""
verb = self.command
if isinstance(x, np.ndarray):
flat = x.flatten().tolist()
flat = [str(y) for y in flat]
adjective = " ".join(flat)
elif isinstance(x, list):
adjective = " ".join([str(y) for y in x])
else:
adjective = str(x)
cmd += "{} {}".format(
verb, adjective)
return cmd
def __or__(self, other):
if not self.command == other.command:
raise ValueError("Command must be the same between {} and {}".format(self.command, other.command))
return self
class ItemListCommandAction(CommandAction):
"""
An action handler based on a list of items
The action space is determiend by the length of the list plus one
"""
def __init__(self, command: str, items: list):
"""
Initializes the space of the handler with a gym.spaces.Dict
of all of the spaces for each individual command.
"""
# TODO must check that the first element is 'none' and last elem is 'other'
self._command = command
self._items = items
self._univ_items = ['minecraft:' + item for item in items]
assert 'none' in self._items
self._default = 'none'
super().__init__(self._command, spaces.Enum(*self._items, default=self._default))
@property
def items(self):
return self._items
@property
def universal_items(self):
return self._univ_items
@property
def default(self):
return self._default
def to_hero(self, x):
"""
Returns a command string for the multi command action.
:param x:
:return:
"""
cmd = ""
verb = self._command
if isinstance(x, np.ndarray):
raise NotImplementedError
elif isinstance(x, list):
raise NotImplementedError
elif 0 < x < len(self._items):
adjective = self._items[x]
cmd += "{} {}".format(
verb, adjective)
else:
cmd += "{} NONE".format(
verb)
return cmd
def from_universal(self, x):
raise NotImplementedError()
def __or__(self, other):
"""
Merges two ItemListCommandActions into one by unioning their items.
Assert that the commands are the same.
"""
if not isinstance(other, self.__class__):
raise TypeError("other must be an instance of ItemListCommandAction")
if self._command != other._command:
raise ValueError("Command must be the same for merging")
new_items = list(set(self._items) | set(other._items))
return self.__class__(new_items)
def __eq__(self, other):
"""
Asserts equality betwen item list command actions.
"""
if not isinstance(other, ItemListCommandAction):
return False
if self._command != other._command:
return False
# Check that all items are in self._items
if not all(x in self._items for x in other._items):
return False
# Check that all items are in other._items
if not all(x in other._items for x in self._items):
return False
return True
class CraftItem(ItemListCommandAction):
"""
An action handler for crafting items
Note when used along side Craft Item Nearby, block lists must be disjoint or from_universal will fire multiple
times
"""
_command = "craft"
def to_string(self):
return "craft"
def __init__(self, items: list):
"""
Initializes the space of the handler to be one for each item in the list plus one for the
default no-craft action (command 0)
Items are minecraft resource ID's
"""
super().__init__(self._command, items)
def from_universal(self, obs):
if 'diff' in obs and 'crafted' in obs['diff'] and len(obs['diff']['crafted']) > 0:
try:
x = self._univ_items.index(obs['diff']['crafted'][0]['item'])
return obs['diff']['crafted'][0]['item'].split('minecraft:')[-1]
except ValueError:
return self._default
# return self._items.index('other')
else:
return self._default
class CraftItemNearby(CraftItem):
"""
An action handler for crafting items when agent is in view of a crafting table
Note when used along side Craft Item, item lists must be disjoint or from_universal will fire multiple times
"""
_command = "craftNearby"
def to_string(self):
return 'nearbyCraft'
class SmeltItem(CraftItem):
def from_universal(self, obs):
if 'diff' in obs and 'smelted' in obs['diff'] and len(obs['diff']['smelted']) > 0:
try:
x = self._univ_items.index(obs['diff']['smelted'][0]['item'])
return obs['diff']['smelted'][0]['item'].split('minecraft:')[-1]
except ValueError:
return self._default
# return self._items.index('other')
else:
return self._default
class SmeltItemNearby(SmeltItem):
"""
An action handler for crafting items when agent is in view of a crafting table
Note when used along side Craft Item, block lists must be disjoint or from_universal will fire multiple times
"""
_command = 'smeltNearby'
def to_string(self):
return 'nearbySmelt'
class PlaceBlock(ItemListCommandAction):
"""
An action handler for placing a specific block
"""
def to_string(self):
return 'place'
def __init__(self, blocks: list):
"""
Initializes the space of the handler to be one for each item in the list
Requires 0th item to be 'none' and last item to be 'other' coresponding to
no-op and non-listed item respectively
"""
self._items = blocks
self._command = 'place'
super().__init__(self._command, self._items)
self._prev_inv = None
# print(self._items)
# print(self._univ_items)
def from_universal(self, obs):
try:
for action in obs['custom_action']['actions'].keys():
try:
if int(action) == -99 and self._prev_inv is not None:
item_name = self._prev_inv[int(-10 + obs['hotbar'])]['name'].split("minecraft:")[-1]
if item_name not in self._items:
raise ValueError()
else:
return item_name
except ValueError:
return self._default
except TypeError:
print('Saw a type error in PlaceBlock')
raise TypeError
except KeyError:
return self._default
finally:
try:
self._prev_inv = obs['slots']['gui']['slots']
except KeyError:
self._prev_inv = None
return self._default
class EquipItem(ItemListCommandAction):
"""
An action handler for observing a list of equipped items
"""
def to_string(self):
return 'equip'
def __init__(self, items: list):
"""
Initializes the space of the handler to be one for each item in the list plus one for the
default no-craft action
"""
self._items = items
self._command = 'equip'
super().__init__(self._command, self._items)
self.previous = self._default
# print(self._items)
# print(self._univ_items)
def from_universal(self, obs):
try:
if obs['slots']['gui']['type'] == 'class net.minecraft.inventory.ContainerPlayer':
hotbar_index = int(obs['hotbar'])
item = self._univ_items.index(obs['slots']['gui']['slots'][-10 + hotbar_index]['name'])
if item != self.previous:
self.previous = item
return obs['slots']['gui']['slots'][-10 + hotbar_index]['name'].split('minecraft:')[-1]
except KeyError:
return self._default
except ValueError:
return self._default
# return self._items.index('other')
return self._default
def reset(self):
self.previous = self._default
class ContinuousMovementAction(CommandAction, ABC):
"""
Handles player control actions
"""
def add_to_mission_spec(self, mission_spec):
mission_spec.allowAllContinuousMovementCommands()
pass
class Camera(ContinuousMovementAction):
"""
Uses <delta_pitch, delta_yaw> vector in degrees to rotate the camera. pitch range [-180, 180], yaw range [-180, 180]
"""
def to_string(self):
return 'camera'
def __init__(self):
self._command = 'camera'
super().__init__(self.command, spaces.Box(low=-180, high=180, shape=[2], dtype=np.float32))
def from_universal(self, x):
if 'custom_action' in x and 'cameraYaw' in x['custom_action'] and 'cameraPitch' in x['custom_action']:
delta_pitch = x['custom_action']['cameraPitch']
delta_yaw = x['custom_action']['cameraYaw']
assert not np.isnan(np.sum(x['custom_action']['cameraYaw'])), "NAN in action!"
assert not np.isnan(np.sum(x['custom_action']['cameraPitch'])), "NAN in action!"
return np.array([-delta_pitch, -delta_yaw], dtype=np.float32)
else:
return np.array([0.0, 0.0], dtype=np.float32)
class KeyboardAction(ContinuousMovementAction):
"""
Handles keyboard actions.
"""
def to_string(self):
return self.command
def __init__(self, command, *keys):
if len(keys) == 2:
# Like move or strafe. Example: -1 for left, 1 for right
super().__init__(command, DiscreteRange(-1, 2))
else:
# Its a n-key action with discrete items.
# Eg hotbar actions
super().__init__(command, spaces.Discrete(len(keys) + 1))
self.keys = keys
def from_universal(self, x):
actions_mapped = list(x['custom_action']['actions'].keys())
# actions_mapped is just the raw key codes.
# for action in x['custom_action']['actions'].keys():
# try:
# actions_mapped += [KEYMAP[action]]
# except KeyError:
# pass
offset = self.space.begin if isinstance(self.space, DiscreteRange) else 0
default = 0
for i, key in enumerate(self.keys):
if key in actions_mapped:
if isinstance(self.space, DiscreteRange):
return i * 2 + offset
else:
return i + 1 + offset
# if "BUTTON1" in actions_mapped:
# print("BUTTON1")
# If no key waspressed.
return default
class SingleKeyboardAction(ContinuousMovementAction):
"""
Handles keyboard actions.
"""
def to_string(self):
return self.command
def __init__(self, command, key):
super().__init__(command, spaces.Discrete(2))
self.key = key
def from_universal(self, x):
if 'custom_action' in x and 'actions' in x['custom_action']:
if self.key in x['custom_action']['actions'].keys():
return 1
else:
return 0
def __or__(self, other):
"""
Combines two keyboard actions into one by unioning their keys.
"""
if not isinstance(other, KeyboardAction):
raise TypeError("other must be an instance of KeyboardAction")
new_keys = list(set(self.keys + other.keys))
return KeyboardAction(self._command, new_keys)
def __eq__(self, other):
"""
Tests for equality between two keyboard actions.
"""
if not isinstance(other, KeyboardAction):
return False
return self._command == other._command and self.keys == other.keys
| 2.71875
| 3
|
PulseView_C2_decoder/c2/__init__.py
|
debug-silicon/C8051F34x_Glitch
| 34
|
12781009
|
'''
This is a decoder for SiLabs C2 debug protocol.
'''
from .pd import Decoder
| 0.945313
| 1
|
backend/core/urls.py
|
open-contracting/pelican-frontend
| 0
|
12781010
|
<filename>backend/core/urls.py
from django.urls import include, path
urlpatterns = [
path("", include("dqt.urls"), name="api"),
path("datasets/", include("controller.urls"), name="controller"),
path("", include("exporter.urls"), name="exporter"),
]
| 1.765625
| 2
|
test/crop_image_partial.py
|
mskim99/Pix2Vox_modify
| 0
|
12781011
|
<gh_stars>0
import cv2
import numpy as np
f_index = 6
m_index = 57
c_index = 276
s_index = 298
a_index = 1010
c_x = 136
c_y = 720
s_x = 116
s_y = 692
a_x = 132
a_y = 134
resolution = 256
'''
c_img = cv2.imread('I:/DK_Data_Process/i_1-2_Slices/f_' + str(f_index).zfill(3) + '/coronal/f_' + str(f_index).zfill(3) + '_Coronal_' + str(c_index).zfill(5) + '.bmp')
if type(c_img) is np.ndarray:
c_cropped_img = c_img[c_y: c_y + resolution, c_x: c_x + resolution]
cv2.imwrite('I:/DK_Data_Process/i_1-2_Slices_Cropped/f_' + str(f_index).zfill(3) + '_Coronal_cropped.bmp', c_cropped_img)
'''
'''
c_img = cv2.imread('I:/DK_Data_Process/i_1-2_Slices/f_' + str(f_index).zfill(3) + '/coronal/f_' + str(f_index).zfill(2) + '_Coronal_' + str(c_index).zfill(5) + '.bmp')
if type(c_img) is np.ndarray:
c_cropped_img = c_img[c_y: c_y + resolution, c_x: c_x + resolution]
cv2.imwrite('I:/DK_Data_Process/i_1-2_Slices_Cropped/f_' + str(f_index).zfill(3) + '_Coronal_cropped.bmp', c_cropped_img)
'''
'''
a_img = cv2.imread('I:/DK_Data_Process/i_1-2_Slices/f_' + str(f_index).zfill(3) + '/axial/f_' + str(f_index).zfill(3) + '_Axial_' + str(a_index).zfill(5) + '.bmp')
if type(a_img) is np.ndarray:
a_cropped_img = a_img[a_y: a_y + resolution, a_x: a_x + resolution]
cv2.imwrite('I:/DK_Data_Process/i_1-2_Slices_Cropped/f_' + str(f_index).zfill(3) + '_Axial_cropped.bmp', a_cropped_img)
'''
'''
a_img = cv2.imread('I:/DK_Data_Process/i_1-2_Slices/f_' + str(f_index).zfill(3) + '/axial/f_' + str(f_index).zfill(3) + '_Axial_' + str(a_index).zfill(5) + '.bmp')
if type(a_img) is np.ndarray:
a_cropped_img = a_img[a_y: a_y + resolution, a_x: a_x + resolution]
cv2.imwrite('I:/DK_Data_Process/i_1-2_Slices_Cropped/f_' + str(f_index).zfill(3) + '_Axial_cropped.bmp', a_cropped_img)
'''
s_img = cv2.imread('I:/DK_Data_Process/i_1-2_Slices/f_' + str(f_index).zfill(3) + '/sagittal/f_' + str(f_index).zfill(3) + '_Sagittal_' + str(s_index).zfill(5) + '.bmp')
if type(s_img) is np.ndarray:
s_cropped_img = s_img[s_y: s_y + resolution, s_x: s_x + resolution]
cv2.imwrite('I:/DK_Data_Process/i_1-2_Slices_Cropped/f_' + str(f_index).zfill(3) + '_Sagittal_cropped.bmp', s_cropped_img)
'''
s_img = cv2.imread('I:/DK_Data_Process/i_1-2_Slices/m_' + str(m_index).zfill(3) + '/sagittal/m_' + str(m_index).zfill(2) + '_Sagittal_' + str(s_index).zfill(5) + '.bmp')
if type(s_img) is np.ndarray:
s_cropped_img = s_img[s_y: s_y + resolution, s_x: s_x + resolution]
cv2.imwrite('I:/DK_Data_Process/i_1-2_Slices_Cropped/m_' + str(m_index).zfill(3) + '_Sagittal_cropped.bmp', s_cropped_img)
'''
'''
c_img = cv2.imread('I:/DK_Data_Process/i_1-2_Slices/m_' + str(m_index).zfill(3) + '/coronal/m_' + str(m_index).zfill(2) + '_Coronal_' + str(c_index).zfill(5) + '.bmp')
if type(c_img) is np.ndarray:
c_cropped_img = c_img[c_y: c_y + resolution, c_x: c_x + resolution]
cv2.imwrite('I:/DK_Data_Process/i_1-2_Slices_Cropped/m_' + str(m_index).zfill(3) + '_Coronal_cropped.bmp', c_cropped_img)
'''
'''
a_img = cv2.imread('I:/DK_Data_Process/i_1-2_Slices/m_' + str(m_index).zfill(3) + '/axial/m_' + str(m_index).zfill(2) + '_Axial_' + str(a_index).zfill(5) + '.bmp')
if type(a_img) is np.ndarray:
a_cropped_img = a_img[a_y: a_y + resolution, a_x: a_x + resolution]
cv2.imwrite('I:/DK_Data_Process/i_1-2_Slices_Cropped/m_' + str(m_index).zfill(3) + '_Axial_cropped.bmp', a_cropped_img)
'''
| 2.171875
| 2
|
tests/settings.py
|
hadizakialqattan/simple-api
| 0
|
12781012
|
from app import crud, database, schemas
from .IO import URLs
def create_users():
"""
create test users:
- username: admin, password: <PASSWORD>, isadmin: "true"
- username: user2, password: <PASSWORD>, isadmin: "false"
"""
if not crud.get_user(
db=database.SessionLocal(), username="admin"
) and not crud.get_admins(db=database.SessionLocal()):
user = schemas.UserCreate(
**{"username": "admin", "password": "<PASSWORD>", "isadmin": "true"}
)
crud.create_user(db=database.SessionLocal(), user=user)
if not crud.get_user(db=database.SessionLocal(), username="user2"):
user = schemas.UserCreate(
**{"username": "user2", "password": "<PASSWORD>", "isadmin": "false"}
)
crud.create_user(db=database.SessionLocal(), user=user)
def create_configs():
"""
create test configs
"""
# admin default config for testing
config1 = schemas.ConfigCreate(
**{
"owner": "admin",
"name": "api-1",
"metadata": {
"name": "SimpleAPI",
"url": "http://127.0.0.1:5057",
"database": {
"name": "apidb",
"type": "sql",
"ms": "postgresql",
"host": "0.0.0.0",
"port": "5432",
"enabled": "true",
"running": "true",
},
"enabled": "true",
"running": "true",
},
"note": "The api has been enabled.",
}
)
# user2 default config for testing
config2 = schemas.ConfigCreate(
**{
"owner": "user2",
"name": "api-2",
"metadata": {
"name": "SimpleAPI",
"url": "http://127.0.0.1:5057",
"database": {
"name": "apidb",
"type": "sql",
"ms": "postgresql",
"host": "0.0.0.0",
"port": "5432",
"enabled": "true",
"running": "false",
},
"enabled": "true",
"running": "false",
},
"note": "The api has been enabled without the DB!",
}
)
# create admin config
if not crud.get_config(db=database.SessionLocal(), name="api-1", owner="admin"):
crud.create_config(db=database.SessionLocal(), config=config1, owner="admin")
# create user2 config
if not crud.get_config(db=database.SessionLocal(), name="api-2", owner="user2"):
crud.create_config(db=database.SessionLocal(), config=config2, owner="user2")
| 2.703125
| 3
|
minydra/__init__.py
|
pg2455/minydra
| 0
|
12781013
|
from .parser import Parser
__version__ = "0.1.2"
def parse_args(
verbose=0,
allow_overwrites=True,
warn_overwrites=True,
parse_env=True,
warn_env=True,
):
def decorator(function):
def wrapper(*args, **kwargs):
parser = Parser(
verbose=verbose,
allow_overwrites=allow_overwrites,
warn_overwrites=warn_overwrites,
parse_env=parse_env,
warn_env=warn_env,
)
result = function(parser.args)
return result
return wrapper
return decorator
| 3.078125
| 3
|
but/trades/views/goods_modify.py
|
yevgnenll/but
| 4
|
12781014
|
<reponame>yevgnenll/but
from django.views.generic.edit import UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from trades.models import Sell
from .base import GoodsSellBase
class SellUpdateView(LoginRequiredMixin, GoodsSellBase, UpdateView):
fields = [
'title',
'sub_title',
'goods_name',
'is_public',
'stock',
'price',
'sub_image',
'welcome_image',
'second_image',
'goods_description',
]
template_name = "trades/goods_modify.html"
slug_field = "hash_id"
def form_valid(self, form):
return super(SellUpdateView, self).form_valid(form)
| 1.882813
| 2
|
Algorithms/exponent.py
|
Sinchiguano/codingProblems_Python
| 2
|
12781015
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 <NAME> <<EMAIL>>
#
# Distributed under terms of the BSD license.
"""
"""
#We've started a recursive function below called
#exponent_calc(). It takes in two integer parameters, base
#and expo. It should return the mathematical answer to
#base^expo. For example, exponent_calc(5, 3) should return
#125: 5^3 = 125.
#
#The code is almost done - we have our base case written.
#We know to stop recursing when we've reached the simplest
#form. When the exponent is 0, we return 1, because anything
#to the 0 power is 1. But we are missing our recursive call!
#
#Fill in the marked line with the recursive call necessary
#to complete the function. Do not use the double-asterisk
#operator for exponentiation. Do not use any loops.
#
#Hint: Notice the similarity between exponentiation and
#factorial:
# 4! = 4! = 4 * 3!, 3! = 3 * 2!, 2! = 2 * 1
# 2^4 = 2 * 2^3, 2^3 = 2 * 2^2, 2^2 = 2 * 2^1, 2^1 = 2 * 2^0
def exponent_calc(base, expo):
if expo == 0:
return 1
else:
return base*exponent_calc(base,expo-1)
#Complete this line!
#Below are some lines of code that will test your function.
#You can change the value of the variable(s) to test your
#function with different inputs.
#
#If your function works correctly, this will originally
#print: 125
print(exponent_calc(5, 3))
def countDown(start):
if start <= 0:
print(start)
else:
countDown(start - 1)
print(start)
countDown(5)
'''
That's right! If we print after the call,
then all the recursive calls will be performed before a print statement is ever run.
So, the first print statement will be when start is 0. Then, after that, it will climb back up the tree, printing for start = 1, then 2, then 3, then 4, then 5.
'''
| 4.625
| 5
|
scytale/crypto/rsa.py
|
MarcoYLyu/scytale
| 0
|
12781016
|
<reponame>MarcoYLyu/scytale<gh_stars>0
from .. import algorithm as algo
from .pkc_template import PKC
import numpy as np
__all__ = ['RSA']
class RSA(PKC):
def __init__(self, e, p, q):
assert(algo.gcd(e, (p - 1) * (q - 1)) == 1)
self.e = e
self.p = p
self.q = q
self.d = algo.inverse_mod(e, (p - 1) * (q - 1))
self.N = p * q
def encrypt(self, text):
return super().encrypt(text)
def decrypt(self, carr):
return super().decrypt(carr)
def _encrypt(self, m):
return pow(m, self.e) % self.N
def _decrypt(self, c):
return pow(c, self.d) % self.N
| 2.75
| 3
|
CodingBat/Python/List-2/sum13.py
|
Togohogo1/pg
| 0
|
12781017
|
<filename>CodingBat/Python/List-2/sum13.py<gh_stars>0
'''
Return the sum of the numbers in the array, returning 0 for an empty array.
Except the number 13 is very unlucky, so it does not count and numbers that
come immediately after a 13 also do not count.
'''
def sum13(nums):
nums.append(0)
count = 0
for i in range(len(nums)):
if nums[i] == 13 and nums[i + 1] != 13:
count -= (nums[i] + nums[i + 1])
elif nums[i] == 13:
count -= nums[i]
count += nums[i]
return count
| 3.953125
| 4
|
main_animal.py
|
ObamaBinHiding/den
| 0
|
12781018
|
#!/usr/bin/python3
#Classes for side project with Gabe
#Authors:
# <NAME> <<EMAIL>>
# ...
#imports needed
from random import randint
#classes
class Animal:
"""
This is the main animal class of the mini-game, all other animal classes come from this one
"""
def __init__(self):
#agility attributes
self.speed = 0
#damage attributes
self.attack = 0
#health attributes
self.health = 0
self.defense = 0
self.defenseFactor = 0
def animalSound(self):
return ''
def attackDamage(self):
#Simple method that returns the total damage this animal will do
return(self.speed*self.attack)#'*' is times, 3*4=12
def defensePadding(self, damageIn):
#Simple method that will be run uppon receiving damage, helps negate some of the damage comming in
return(damageIn-(self.defense+(randint(1, self.defenseFactor))))#Gabe, randint(x, y) gives us a random number in between those two points
def takeDamage(self, damage):
#Simple method that removes health
self.health -= damage
"""
TODO - Gabe:
Create these classes in this file:
class Reptile(Animal): <- the Animal inside the parenthesis means we are inheriting from the Animal class, if you read you know what it means. If you didn't, go read it.
{...}
class Mammal(Animal):
{...}
class Bird(Animal):
{...}
START THESE BY TONIGHT, use the default one I made to help you.
add your name to the authors(replace the '...') when you work on it.
"""
| 4.0625
| 4
|
contraste/examples/example.py
|
gmilare/ser-347
| 0
|
12781019
|
#
# This file is part of Contraste.
# Copyright (C) 2021 INPE.
#
# Contraste is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
##Instalação do pacote gdal como pré-requisito do pacote
from osgeo import gdal
##instalação do pacote
pip install contraste-1.0.tar.gz
##importar a função de contrastre raiz quadrada do pacote
#outras funções disponíveis: QUADRATICO, MINMAX, LINEAR, NEGATIVO e EQUALIZACAO
from contraste.RaizQuadrada import RAIZQUADRADA
##Chamada da função
RAIZQUADRADA()
# A função listará os arquivos .tif do diretório raiz. Selecionar a imagem (Landsat 8) de acordo com a numeração.
# O contraste será aplicado na imagem selecionada
####importar a função de contrastre por classe
from contraste.PorClasse import PORCLASSE
##Chamada da função
PORCLASSE()
# A função listará os arquivos .tif do diretório raiz. Selecionar a imagem (Landsat 8) de acordo com a numeração e, posteriormente, a imagem classificada (Mapbiomas).
# Escolha se o contraste será otimizado (1) ou personalizado (2)
| 2.25
| 2
|
invenio_app_ils/records/resolver/jsonresolver/document_series.py
|
lauren-d/invenio-app-ils
| 0
|
12781020
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018-2019 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Resolve the referred Series for a Document."""
import jsonresolver
from invenio_pidstore.errors import PIDDoesNotExistError
from werkzeug.routing import Rule
from ...api import Document, Series
from ..resolver import get_field_value_for_record as get_field_value
# Note: there must be only one resolver per file,
# otherwise only the last one is registered
@jsonresolver.hookimpl
def jsonresolver_loader(url_map):
"""Resolve the referred Series for a Document record."""
from flask import current_app
def series_resolver(document_pid):
"""Resolve the referred Series for a Document record."""
try:
series_objs = get_field_value(Document, document_pid,
"series_objs")
series = []
for obj in series_objs:
record = Series.get_record_by_pid(obj["pid"])
keep_keys = ("series_pid", "mode_of_issuance", "issn", "title")
for key in list(record):
if key not in keep_keys:
del record[key]
record["volume"] = obj["volume"]
series.append(record)
return series
except (KeyError, PIDDoesNotExistError):
return {}
url_map.add(
Rule(
"/api/resolver/documents/<document_pid>/series",
endpoint=series_resolver,
host=current_app.config.get("JSONSCHEMAS_HOST"),
)
)
| 2.03125
| 2
|
nsd1804/python/day08/tcpclient.py
|
MrWangwf/nsd1806
| 0
|
12781021
|
import socket
host = '192.168.4.254'
port = 12345
addr = (host, port) # 指定要连接的服务器
c = socket.socket()
c.connect(addr)
while True:
data = input('> ') + '\r\n'
data = data.encode()
c.send(data)
if data.strip() == b'quit':
break
rdata = c.recv(1024).decode() # 将bytes转成str类型
print(rdata, end='')
c.close()
| 3.0625
| 3
|
python/lfmmexample.py
|
ejyoo921/FMM3D
| 71
|
12781022
|
#!/usr/bin/env python
import fmm3dpy as fmm
import numpy as np
#
# This is a sample code to demonstrate how to use
# the fmm libraries
#
# sample with one density, sources to sources,
# charge interactions, and potential only
#
n = 200000
nd = 1
sources = np.random.uniform(0,1,(3,n))
eps = 10**(-5)
charges = np.random.uniform(0,1,n)
out = fmm.lfmm3d(eps=eps,sources=sources,charges=charges,pg=1)
# sample with a vector of densities, sources to
# sources and targets, dipole interactions,
# potential and gradietns
nd = 3
nt = 1870
targ = np.random.uniform(0,1,(3,nt))
dipvecs = np.random.uniform(0,1,(nd,3,n))
out = fmm.lfmm3d(eps=eps,sources=sources,dipvec=dipvecs,\
targets=targ,nd=nd,pg=2,pgt=2)
| 1.992188
| 2
|
Project/data/game.py
|
Nikurkel/4InARow
| 0
|
12781023
|
<filename>Project/data/game.py<gh_stars>0
import random
import time
import threading
class Game:
def __init__(self, id):
self.id = id
self.wins = [0,0]
self.ready = False
self.nextPlayer = random.randint(1,2)
self.state = [([0]*7) for i in range(5)]
self.chat = ['2,The Chat starts here', '2,type /help for commands']
self.debug = False # testing
self.waitBetweenRounds = 3
self.roundDone = False
self.userNames = ["Player 1", "Player 2"]
self.colors = { "player1_1": (50,50,150),
"player2_1": (150,50,50),
"player1_2": (150,150,150),
"player2_2": (150,150,150)}
def move(self, player, number):
if self.debug:
print(f"player {player} trys a move at x = {number - 1}")
if not self.roundDone:
if player + 1 == self.nextPlayer:
for e in reversed(self.state):
if e[number - 1] == 0:
print(f"player {player} makes move at x = {number - 1}")
e[number - 1] = self.nextPlayer
if self.nextPlayer == 1:
self.nextPlayer = 2
else:
self.nextPlayer = 1
break
self.checkState()
def checkState(self):
if self.debug:
print("checking State")
for player in range(1,3):
if self.checkHorizontals(player):
self.winner(player)
break
elif self.checkVerticals(player):
self.winner(player)
break
elif self.checkDiagonals(player):
self.winner(player)
break
if self.checkNoMoreMoves():
self.winner(-1)
def checkHorizontals(self, player):
if self.debug:
print("checking Horizontals, Player", player)
for x in range(len(self.state[0]) - 3):
for y in range(len(self.state)):
if self.state[y][x] == player and self.state[y][x + 1] == player and self.state[y][x + 2] == player and self.state[y][x + 3] == player:
self.state[y][x] = player + 2
self.state[y][x + 1] = player + 2
self.state[y][x + 2] = player + 2
self.state[y][x + 3] = player + 2
return True
return False
def checkVerticals(self, player):
if self.debug:
print("checking Verticals, Player", player)
for x in range(len(self.state[0])):
for y in range(len(self.state) - 3):
if self.state[y][x] == player and self.state[y + 1][x] == player and self.state[y + 2][x] == player and self.state[y + 3][x] == player:
self.state[y][x] = player + 2
self.state[y + 1][x] = player + 2
self.state[y + 2][x] = player + 2
self.state[y + 3][x] = player + 2
return True
return False
def checkDiagonals(self, player):
if self.debug:
print("checking Diagonals, Player", player)
for x in range(len(self.state[0])):
for y in range(len(self.state) - 3):
if x >= 3:
if self.state[y][x] == player and self.state[y + 1][x - 1] == player and self.state[y + 2][x - 2] == player and self.state[y + 3][x - 3] == player:
self.state[y][x] = player + 2
self.state[y + 1][x - 1] = player + 2
self.state[y + 2][x - 2] = player + 2
self.state[y + 3][x - 3] = player + 2
return True
if x <= 3:
if self.state[y][x] == player and self.state[y + 1][x + 1] == player and self.state[y + 2][x + 2] == player and self.state[y + 3][x + 3] == player:
self.state[y][x] = player + 2
self.state[y + 1][x + 1] = player + 2
self.state[y + 2][x + 2] = player + 2
self.state[y + 3][x + 3] = player + 2
return True
return False
def checkNoMoreMoves(self):
if self.debug:
print("checking for possible moves")
for a in self.state[0]:
if a == 0:
return False
return True
def winner(self, player):
if self.debug:
print(self.state[0])
print(self.state[1])
print(self.state[2])
print(self.state[3])
print(self.state[4])
if player == -1:
print("no winner")
else:
print(f"winner is Player {player}")
self.wins[player - 1] += 1
self.roundDone = True
self.newMsg(f"2,game restarts in {self.waitBetweenRounds} seconds")
timer = threading.Timer(self.waitBetweenRounds, self.restart)
timer.start()
def restart(self):
nxt = random.randint(1, 2)
self.newMsg(f"{nxt - 1},Player {nxt} starts")
self.nextPlayer = nxt
self.state = [([0] * 7) for i in range(5)]
self.roundDone = False
def newMsg(self, msg):
self.chat.append(msg)
def newCmd(self, msg):
self.chat.append(f"{msg[0]},{msg[2:]}")
if msg[3:] == "reset wins":
self.wins = [0,0]
self.chat.append(f"2,Player {int(msg[0]) + 1} has reset the wins")
elif msg[3:] == "reset state":
self.newMsg(f"2,game restarts in {self.waitBetweenRounds} seconds")
timer = threading.Timer(self.waitBetweenRounds, self.restart)
timer.start()
elif msg[3:] == "reset chat":
self.chat = []
self.chat.append(f"2,Player {int(msg[0]) + 1} has reset the chat")
elif msg[3:7] == "say ":
self.chat.append(f"2,Player {int(msg[0]) + 1} {msg[7:]}")
elif msg[3:7] == "roll":
if msg[8:].isnumeric() and int(msg[8:]) > 0:
self.chat.append(f"2,Player {int(msg[0]) + 1} rolls {random.randint(1, int(msg[8:]))} (custom)")
else:
self.chat.append(f"2,Player {int(msg[0]) + 1} rolls {random.randint(1,6)} (normal)")
elif msg[3:9] == "delay " and msg[9:].isnumeric():
self.chat.append(f"2,Player {int(msg[0]) + 1} has set delay to {msg[9:]} s")
self.waitBetweenRounds = int(msg[9:])
elif msg[3:] == "help":
self.chat.append("2,/help | server commands")
self.chat.append("2,/help 2 | client commands")
self.chat.append("2,/reset wins | wins = 0 - 0")
self.chat.append("2,/reset state | reset board")
self.chat.append("2,/reset chat | reset chat")
self.chat.append("2,/say x | Sys: Player x (text)")
self.chat.append("2,/roll | random 1 - 6")
self.chat.append("2,/roll x | random 1 - x (number)")
self.chat.append("2,/delay x | x sec between rounds")
self.chat.append("2,/name x | username = x")
self.chat.append("2,/close | close game")
elif msg[3:] == "help 2":
self.chat.append("2,//color Element Hex-Value")
self.chat.append("2,Element: me1, me2, text,")
self.chat.append("2,background, buttons, chat")
self.chat.append("2,Hex-Value: 000000 - ffffff")
self.chat.append("2,//save x| save theme x (name)")
self.chat.append("2,//load x | load theme x (name)")
self.chat.append("2,//themes | list your themes")
self.chat.append("2,//scale x | width = x (pixels)")
self.chat.append("2,//ping | roundtrip time to server (ms)")
elif msg[3:14] == "color this ":
c1 = msg[14:20]
c2 = msg[21:27]
if msg[0] == "0":
self.colors["player1_1"] = tuple(int(c1[i:i + 2], 16) for i in (0, 2, 4)) # rgb hex -> tuple
self.colors["player1_2"] = tuple(int(c2[i:i + 2], 16) for i in (0, 2, 4))
elif msg[0] == "1":
self.colors["player2_1"] = tuple(int(c1[i:i + 2], 16) for i in (0, 2, 4))
self.colors["player2_2"] = tuple(int(c2[i:i + 2], 16) for i in (0, 2, 4))
elif msg[3:8] == "name ":
self.userNames[int(msg[0])] = msg[8:]
else:
self.chat.append(f"2,Command {msg} not found")
| 3.21875
| 3
|
migrations/versions/50701261db54_.py
|
PaulloClara/Toilter
| 0
|
12781024
|
<reponame>PaulloClara/Toilter
"""empty message
Revision ID: 50701261db54
Revises:
Create Date: 2019-03-01 14:34:02.960151
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '50701261db54'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id_', sa.Integer(), nullable=False),
sa.Column('username', sa.String(), nullable=True),
sa.Column('password', sa.String(), nullable=True),
sa.Column('name', sa.String(), nullable=True),
sa.Column('email', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id_'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_table('follow',
sa.Column('id_', sa.Integer(), nullable=False),
sa.Column('userId', sa.Integer(), nullable=True),
sa.Column('followId', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['followId'], ['users.id_'], ),
sa.ForeignKeyConstraint(['userId'], ['users.id_'], ),
sa.PrimaryKeyConstraint('id_')
)
op.create_table('posts',
sa.Column('id_', sa.Integer(), nullable=False),
sa.Column('content', sa.Text(), nullable=True),
sa.Column('userId', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['userId'], ['users.id_'], ),
sa.PrimaryKeyConstraint('id_')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('posts')
op.drop_table('follow')
op.drop_table('users')
# ### end Alembic commands ###
| 1.882813
| 2
|
xeasy_ml/xes_ml_arch/src/tests/feature_enginnering_test/test_discretize.py
|
jiayanduo456/xeasy-ml
| 10
|
12781025
|
<reponame>jiayanduo456/xeasy-ml
# -*-coding:utf-8-*-
# @version: 0.0.1
# License: MIT
import unittest
import sys
import pandas as pd
import configparser
import copy
sys.path.append("../../../..")
from xes_ml_arch.src.feature_enginnering import feature_discretizer
class TestDiscretize(unittest.TestCase):
def setUp(self):
self._data_path = "../data/test_discretize.txt"
self._config = "./conf/test_discretize.conf"
self._data = pd.read_csv(self._data_path)
self._conf = configparser.ConfigParser()
self._conf1 = configparser.ConfigParser()
self._conf.read(self._config)
self._conf1.read("./conf/test_discretize_try.conf")
self._ins = feature_discretizer.FeatureDiscretizer(data=self._data, conf=self._conf, log_path = '../log/log.conf')
self._ins_try = feature_discretizer.FeatureDiscretizer(data=None, conf=self._conf1,log_path = '../log/log.conf')
def test_excute(self):
self.assertTrue(self._ins.excute())
self.assertFalse(self._ins_try.excute())
def test_get_data(self):
self._ins.reset(data=self._data, conf=self._conf)
tmp_data = copy.copy(self._data)
self._ins.excute()
data = self._ins.get_data
# print data
# print tmp_data
res = pd.merge(data, tmp_data, on=["userid"], suffixes=["_dis", "_old"])
print("res:\n", res)
res_diff = res[res["age_dis"] != res["targ3_dis"]]
print("res diff:\n", res_diff)
if __name__ == '__main__':
unittest.main()
| 2.265625
| 2
|
srcs/sg/objects/rational_number_object.py
|
pomponchik/computor_v2
| 0
|
12781026
|
from srcs.sg.objects.abstract_object import AbstractObject
from srcs.errors import RuntimeASTError
class RationalNumberObject(AbstractObject):
type_mark = 'r'
def __init__(self, number, node):
self.number = int(number) if int(number) == number else number
self.node = node
self.is_real_number = True if isinstance(self.number, float) else False
@classmethod
def create_from_node(cls, node):
source = node.ast_node.tokens[0].source.replace(' ', '').replace('\t', '')
number = float(source) if '.' in source else int(source)
return cls(number, node)
def representation(self, context):
return f'{self.number}'
def type_representation(self):
if self.is_real_number:
return 'rational real number'
return 'rational number'
def real_operation(self, other, operation, operation_node):
if other.type_mark == self.type_mark:
if operation == '+':
number = self.number + other.number
return RationalNumberObject(number, self.node)
elif operation == '*':
number = self.number * other.number
return RationalNumberObject(number, self.node)
elif operation == '/':
if other.number == 0:
raise RuntimeASTError('division by 0', other.node)
number = self.number / other.number
return RationalNumberObject(number, self.node)
elif operation == '-':
number = self.number - other.number
return RationalNumberObject(number, self.node)
elif operation == '^':
if type(other.number) is float:
raise RuntimeASTError('not a whole degree', other.node)
if other.number < 0:
raise RuntimeASTError('the exponent is less than zero', other.node)
number = (self.number)**(other.number)
return RationalNumberObject(number, self.node)
elif operation == '%':
if other.number == 0:
raise RuntimeASTError('division by 0', other.node)
number = self.number % other.number
return RationalNumberObject(number, self.node)
elif other.type_mark == 'c':
if operation == '-':
inverted_other = other.real_operation(type(self)(-1), '*', self.node)
new_real_part = inverted_other.real_part + self.number
new_imaginary_part = inverted_other.imaginary_part
return type(other)(self.node, new_real_part, new_imaginary_part)
raise RuntimeASTError(f'the "{operation}" operation between {self.type_representation()} and {other.type_representation()} is not defined', operation_node)
| 3.015625
| 3
|
pytest_cases/tests/cases/legacy/intermediate/test_shared_cases.py
|
chinghwayu/python-pytest-cases
| 0
|
12781027
|
from ..example_code import super_function_i_want_to_test, super_function_i_want_to_test2
from pytest_cases import test_target, case_tags
try: # python 3.5+
from pytest_cases import CaseData
except ImportError:
pass
@test_target(super_function_i_want_to_test)
def case_for_function1():
# type: () -> CaseData
ins = dict(a=1, b=2)
outs = 2, 3
return ins, outs, None
@test_target(super_function_i_want_to_test2)
def case_for_function2():
# type: () -> CaseData
ins = dict(a=1, b=2)
outs = 2, 3
return ins, outs, None
@case_tags(super_function_i_want_to_test, super_function_i_want_to_test2)
def case_for_function_1_and_2():
# type: () -> CaseData
ins = dict(a=1, b=2)
outs = 2, 3
return ins, outs, None
| 2.3125
| 2
|
examples/DecryptLoginExamples/crawlers/weibolottery/__init__.py
|
hedou/DecryptLogin
| 0
|
12781028
|
'''initialize'''
from .weibolottery import WeiboLottery
| 0.980469
| 1
|
jupyter_docker/kernelspec.py
|
mariusvniekerk/jupyter_docker_kernel
| 0
|
12781029
|
"""Tools for managing kernel specs"""
# Copyright (c) <NAME>
# Distributed under the terms of the Modified BSD License.
from jupyter_client.kernelspec import KernelSpec, KernelSpecManager
from traitlets import Unicode, List, Type
from traitlets.config import LoggingConfigurable
import os
class DockerKernelSpec(KernelSpec):
docker_image_name = Unicode()
docker_args = List()
class DockerKernelSpecManager(KernelSpecManager):
kernel_spec_class = Type(DockerKernelSpec, config=True,
help="""The kernel spec class. This is configurable to allow
subclassing of the KernelSpecManager for customized behavior.
"""
)
class DockerKernelManagerMixin(LoggingConfigurable):
docker_executable = Unicode("docker")
docker_default_options = List(["--rm", "--net=host"])
from jupyter_client.ioloop.manager import IOLoopKernelManager
class DockerKernelManager(IOLoopKernelManager, DockerKernelManagerMixin):
def format_kernel_cmd(self, extra_arguments=None):
cmd = super(DockerKernelManager, self).format_kernel_cmd(extra_arguments)
# Prepend all the docker stuff. This seems to be sufficient.
if self.kernel_spec.docker_image_name:
connection_path, _ = os.path.split(self.connection_file)
# Right now environment variables are dropped on the floor
docker_prefix = [self.docker_executable] \
+ self.docker_default_options \
+ self.kernel_spec.docker_args \
+ ['--user={}:{}'.format(os.getuid(), os.getgid()),
'-v', '{c}:{c}'.format(c=connection_path),
'-v', '{c}:{c}'.format(c=os.path.expanduser('~')),
self.kernel_spec.docker_image_name,
]
return docker_prefix + cmd
else:
return cmd
| 2.265625
| 2
|
Utils/strings.py
|
furkankykc/AutomatedDocumentMailer
| 0
|
12781030
|
errors = {
'tr': {
'error': 'Hata!',
'authError': 'Email veya sifrenizi yanlış girdiniz',
'connectError': 'Smtp ayarlarını düzgün yaptığınızdan emin olunuz',
'senderRefused': 'Bu mail adresinin email gönderim limiti dolmustur',
'serverDisconnect': 'Server ile bağlantı kesildi',
'listError': 'Liste Dosyası Bulunamadı',
'keyError': 'EMAİL adlı kolon bulunamadı',
'startError': 'Başlangıç değeri epostalardan fazla olamaz',
'spamError': 'Server bu mesaj spam olabileceği için reddetti',
'limitError': '"Tüm postalarınızın limiti dolmustur"',
},
'en': {
'error': 'Error!',
'authError': 'Wrong Password or Email adress',
'connectError': 'Make sure you set Smtp correctly',
'senderRefused': 'You cannot send further mail with this mail',
'serverDisconnect': 'Server Disconnected',
'listError': 'List cannot found',
'keyError': 'Cannot found colon named "EMAİL" in your list',
'startError': 'Initial value cannot be more than emails',
'spamError': 'Server rejected this message because it may be spam ',
'limitError': 'You cannot send Email anymore Limit Exceed',
}
}
| 2.21875
| 2
|
uproot/interp/jagged.py
|
guiguem/uproot
| 0
|
12781031
|
#!/usr/bin/env python
# Copyright (c) 2017, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numbers
import numpy
import uproot.source.source
import uproot.source.cursor
from uproot.interp.interp import Interpretation
from uproot.interp.numerical import asdtype
from uproot.interp.numerical import _dimsprod
def sizes2offsets(sizes):
out = numpy.empty(len(sizes) + 1, dtype=sizes.dtype)
out[0] = 0
sizes.cumsum(out=out[1:])
return out
def _compactify(fromdata, fromstarts, fromstops, todata, tostarts, tostops):
for i in range(len(fromstarts)):
todata[tostarts[i]:tostops[i]] = fromdata[fromstarts[i]:fromstops[i]]
try:
import numba
except ImportError:
pass
else:
_compactify = numba.njit(_compactify)
class asjagged(Interpretation):
# makes __doc__ attribute mutable before Python 3.3
__metaclass__ = type.__new__(type, "type", (Interpretation.__metaclass__,), {})
def __init__(self, asdtype, skip_bytes=0):
self.asdtype = asdtype
self.skip_bytes = skip_bytes
def __repr__(self):
if self.skip_bytes == 0:
return "asjagged({0})".format(repr(self.asdtype))
else:
return "asjagged({0}, skip_bytes={1})".format(repr(self.asdtype), self.skip_bytes)
def to(self, todtype=None, todims=None, skip_bytes=None):
if skip_bytes is None:
skip_bytes = self.skip_bytes
return asjagged(self.asdtype.to(todtype, todims), skip_bytes)
@property
def identifier(self):
if self.skip_bytes == 0:
return "asjagged({0})".format(self.asdtype.identifier)
else:
return "asjagged({0}, {1})".format(self.asdtype.identifier, self.skip_bytes)
@property
def dtype(self):
subshape = self.asdtype.dtype.shape
sub = self.asdtype.dtype.subdtype
if sub is None:
tpe = self.asdtype.dtype
else:
tpe = sub[0]
return numpy.dtype((tpe, (0,) + subshape))
def empty(self):
return JaggedArray(self.asdtype.empty(), numpy.empty(0, dtype=numpy.int64), numpy.empty(0, dtype=numpy.int64))
def compatible(self, other):
return isinstance(other, asjagged) and self.asdtype.compatible(other.asdtype)
def numitems(self, numbytes, numentries):
return self.asdtype.numitems(numbytes - numentries*self.skip_bytes, numentries)
def source_numitems(self, source):
return self.asdtype.source_numitems(source.content)
def fromroot(self, data, offsets, local_entrystart, local_entrystop):
if local_entrystart == local_entrystop:
content = self.asdtype.fromroot(data, None, 0, 0)
else:
itemsize = self.asdtype.fromdtype.itemsize * _dimsprod(self.asdtype.fromdims)
if self.skip_bytes == 0:
numpy.floor_divide(offsets, itemsize, offsets)
starts = offsets[local_entrystart : local_entrystop ]
stops = offsets[local_entrystart + 1 : local_entrystop + 1]
content = self.asdtype.fromroot(data, None, starts[0], stops[-1])
else:
fromstarts = offsets[local_entrystart : local_entrystop ] + self.skip_bytes
fromstops = offsets[local_entrystart + 1 : local_entrystop + 1]
newoffsets = numpy.empty(1 + local_entrystop - local_entrystart, dtype=offsets.dtype)
newoffsets[0] = 0
numpy.cumsum(fromstops - fromstarts, out=newoffsets[1:])
newdata = numpy.empty(newoffsets[-1], dtype=data.dtype)
_compactify(data, fromstarts, fromstops, newdata, newoffsets[:-1], newoffsets[1:])
numpy.floor_divide(newoffsets, itemsize, newoffsets)
starts = newoffsets[:-1]
stops = newoffsets[1:]
content = self.asdtype.fromroot(newdata, None, 0, stops[-1])
return JaggedArray(content, starts, stops)
def destination(self, numitems, numentries):
content = self.asdtype.destination(numitems, numentries)
sizes = numpy.empty(numentries, dtype=numpy.int64)
return JaggedArray._Prep(content, sizes)
def fill(self, source, destination, itemstart, itemstop, entrystart, entrystop):
destination.sizes[entrystart:entrystop] = source.stops - source.starts
self.asdtype.fill(source.content, destination.content, itemstart, itemstop, entrystart, entrystop)
def clip(self, destination, itemstart, itemstop, entrystart, entrystop):
destination.content = self.asdtype.clip(destination.content, itemstart, itemstop, entrystart, entrystop)
destination.sizes = destination.sizes[entrystart:entrystop]
return destination
def finalize(self, destination, branch):
offsets = sizes2offsets(destination.sizes)
starts = offsets[:-1]
stops = offsets[1:]
content = self.asdtype.finalize(destination.content, branch)
leafcount = None
if len(branch.fLeaves) == 1:
leafcount = branch.fLeaves[0].fLeafCount
return JaggedArray(content, starts, stops, leafcount=leafcount)
def asstlvector(asdtype):
return asjagged(asdtype, skip_bytes=10)
def _jaggedarray_getitem(jaggedarray, index):
stopslen = len(jaggedarray.stops)
if index < 0:
index += stopslen
if 0 <= index < stopslen:
start = jaggedarray.starts[index]
stop = jaggedarray.stops[index]
return jaggedarray.content[start:stop]
else:
raise IndexError("index out of range for JaggedArray")
class JaggedArray(object):
# makes __doc__ attribute mutable before Python 3.3
__metaclass__ = type.__new__(type, "type", (type,), {})
class _Prep(object):
def __init__(self, content, sizes):
self.content = content
self.sizes = sizes
@staticmethod
def fromlists(lists):
offsets = numpy.empty(len(lists) + 1, dtype=numpy.int64)
offsets[0] = 0
stop = 0
anybool = False
anyint = False
anyfloat = False
anycomplex = False
for i, x in enumerate(lists):
offsets[i + 1] = stop = stop + len(x)
if isinstance(x, numpy.ndarray):
if issubclass(x.dtype.type, (numpy.bool, numpy.bool_)):
anybool = True
elif issubclass(x.dtype.type, numpy.integer):
anyint = True
elif issubclass(x.dtype.type, numpy.floating):
anyfloat = True
elif issubclass(x.dtype.type, numpy.complexfloating):
anycomplex = True
else:
if not anybool and not anyint and not anyfloat and not anycomplex and any(isinstance(y, bool) for y in x):
anybool = True
if not anyint and not anyfloat and not anycomplex and any(isinstance(y, int) for y in x):
anyint = True
if not anyfloat and not anycomplex and any(isinstance(y, float) for y in x):
anyfloat = True
if not anycomplex and any(isinstance(y, complex) for y in x):
anycomplex = True
if anycomplex:
dtype = numpy.dtype(numpy.complex)
elif anyfloat:
dtype = numpy.dtype(numpy.float64)
elif anyint:
dtype = numpy.dtype(numpy.int64)
elif anybool:
dtype = numpy.dtype(numpy.bool)
else:
raise TypeError("no numerical types found in lists")
starts = offsets[:-1]
stops = offsets[1:]
content = numpy.empty(offsets[-1], dtype=dtype)
for i, x in enumerate(lists):
content[starts[i]:stops[i]] = x
return JaggedArray(content, starts, stops)
def __init__(self, content, starts, stops, leafcount=None):
assert isinstance(content, numpy.ndarray)
assert isinstance(starts, numpy.ndarray) and issubclass(starts.dtype.type, numpy.integer)
assert isinstance(stops, numpy.ndarray) and issubclass(stops.dtype.type, numpy.integer)
assert len(stops.shape) == 1
assert starts.shape == stops.shape
self.content = content
self.starts = starts
self.stops = stops
self.leafcount = leafcount
def __getstate__(self):
state = self.__dict__.copy()
state["leafcount"] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
def __eq__(self, other):
return isinstance(other, JaggedArray) and numpy.array_equal(self.content, other.content) and self.aligned(other)
def __ne__(self, other):
return not self.__eq__(other)
@property
def offsets(self):
if self.starts.base is not None and self.stops.base is not None and self.starts.base is self.stops.base and \
self.starts.ctypes.data == self.starts.base.ctypes.data and \
self.stops.ctypes.data == self.stops.base.ctypes.data + self.starts.dtype.itemsize and \
len(self.starts) == len(self.starts.base) - 1 and \
len(self.stops) == len(self.starts.base) - 1:
return self.starts.base
elif numpy.array_equal(self.starts[1:], self.stops[:-1]):
return numpy.append(self.starts, self.stops[-1])
else:
raise ValueError("starts and stops are not compatible; cannot express as offsets")
def aligned(self, other):
if self.leafcount is not None and other.leafcount is not None and self.leafcount is other.leafcount:
return True
else:
return numpy.array_equal(self.starts, other.starts) and numpy.array_equal(self.stops, other.stops)
def __len__(self):
return len(self.stops)
def __getitem__(self, index):
if isinstance(index, numbers.Integral):
return _jaggedarray_getitem(self, index)
elif isinstance(index, slice):
if index.step is not None and index.step != 1:
raise NotImplementedError("cannot yet slice a JaggedArray with step != 1 (FIXME: this is possible, should be implemented)")
else:
return JaggedArray(self.content, self.starts[index], self.stops[index])
else:
raise TypeError("JaggedArray index must be an integer or a slice")
def __iter__(self):
content = self.content
starts = self.starts
stops = self.stops
for i in range(len(stops)):
yield content[starts[i]:stops[i]]
def __repr__(self, indent="", linewidth=None):
if linewidth is None:
linewidth = numpy.get_printoptions()["linewidth"]
dtypestr = repr(self.content.dtype).replace("(", "=").rstrip(")")
linewidth = linewidth - 12 - 2 - len(dtypestr)
return "jaggedarray({0})".format(self.__str__(indent=" " * 12, linewidth=linewidth))
def __str__(self, indent="", linewidth=None):
if linewidth is None:
linewidth = numpy.get_printoptions()["linewidth"]
def single(a):
if len(a) > 6:
return numpy.array_str(a[:3], max_line_width=numpy.inf).rstrip("]") + " ... " + numpy.array_str(a[-3:], max_line_width=numpy.inf).lstrip("[")
else:
return numpy.array_str(a, max_line_width=numpy.inf)
if len(self) > 10:
content = [single(self[i]) for i in range(3)] + ["..."] + [single(self[i]) for i in range(-3, 0)]
else:
content = [single(x) for x in self]
if sum(len(x) for x in content) + 2*(len(content) - 1) + 2 <= linewidth:
return "[" + ", ".join(content) + "]"
else:
return "[" + (",\n " + indent).join(content) + "]"
def tolist(self):
return [x.tolist() for x in self]
def __array__(self, dtype=None, copy=False, order="K", subok=False, ndmin=0):
if dtype is None:
dtype = self.content.dtype
elif not isinstance(dtype, numpy.dtype):
dtype = numpy.dtype(dtype)
if dtype == self.content.dtype and not copy and not subok and ndmin == 0:
return self.content
else:
return numpy.array(self.content, dtype=dtype, copy=copy, order=order, subok=subok, ndmin=ndmin)
class asvar(asjagged):
def __init__(self, genclass, skip_bytes=0, args=()):
self.genclass = genclass
super(asvar, self).__init__(asdtype(numpy.dtype(numpy.uint8)), skip_bytes=skip_bytes)
self.args = args
def __repr__(self):
return self.identifier
@property
def identifier(self):
args = []
if self.skip_bytes != 0:
args.append(", skip_bytes={0}".format(self.skip_bytes))
return "asvar({0}{1})".format(self.genclass.__name__, "".join(args))
@property
def dtype(self):
return self.genclass._dtype(self.args)
def empty(self):
return self.genclass(*((super(asvar, self).empty(),) + self.args))
def compatible(self, other):
return isinstance(other, asvar) and self.genclass is other.genclass and super(asvar, self).compatible(other) and self.args == other.args
def source_numitems(self, source):
return super(asvar, self).source_numitems(source.jaggedarray)
def fromroot(self, data, offsets, local_entrystart, local_entrystop):
return self.genclass(*((super(asvar, self).fromroot(data, offsets, local_entrystart, local_entrystop),) + self.args))
def fill(self, source, destination, itemstart, itemstop, entrystart, entrystop):
return super(asvar, self).fill(source.jaggedarray, destination, itemstart, itemstop, entrystart, entrystop)
def finalize(self, destination, branch):
return self.genclass(*((super(asvar, self).finalize(destination, branch),) + self.args))
class VariableLength(object):
def __init__(self, *args):
self.jaggedarray = args[0]
assert self.jaggedarray.content.dtype.itemsize == 1
assert len(self.jaggedarray.content.shape) == 1
self.args = args[1:]
def __len__(self):
return len(self.jaggedarray)
def __getitem__(self, index):
if isinstance(index, numbers.Integral):
return self.interpret(self.jaggedarray[index])
elif isinstance(index, slice):
return self.__class__(*((self.jaggedarray[index],) + self.args))
else:
raise TypeError("{0} index must be an integer or a slice".format(self.__class__.__name__))
def __iter__(self):
for x in self.jaggedarray:
yield self.interpret(x)
def __str__(self):
if len(self) > 6:
return "[{0} ... {1}]".format(" ".join(repr(self[i]) for i in range(3)), " ".join(repr(self[i]) for i in range(-3, 0)))
else:
return "[{0}]".format(" ".join(repr(x) for x in self))
def tolist(self):
return list(self)
@staticmethod
def interpret(item):
raise NotImplementedError
class asobjs(asvar):
def __init__(self, cls, context=None):
super(asobjs, self).__init__(JaggedObjects, skip_bytes=0, args=(cls, context))
self.cls = cls
self.context = context
@property
def identifier(self):
return "asobjs({0})".format(self.cls.__name__)
@property
def dtype(self):
return numpy.dtype((object, (0,)))
def asjaggedobjects(cls, context=None):
return asobjs(cls, context=context)
class JaggedObjects(VariableLength):
indexdtype = numpy.dtype(">i4")
def __init__(self, jaggedarray, cls, context):
super(JaggedObjects, self).__init__(jaggedarray, cls)
self._class = cls
self._context = context
def interpret(self, item):
size, = item[6:10].view(JaggedObjects.indexdtype)
source = uproot.source.source.Source(item)
cursor = uproot.source.cursor.Cursor(10)
out = [None] * size
for i in range(size):
out[i] = self._class.read(source, cursor, self._context, None)
return out
def __str__(self):
if len(self) > 6:
return "[{0}\n ...\n{1}]".format(",\n".join(("" if i == 0 else " ") + repr(self[i]) for i in range(3)), ",\n".join(" " + repr(self[i]) for i in range(-3, 0)))
else:
return "[{0}]".format(", ".join(repr(x) for x in self))
def __repr__(self):
return "<JaggedObjects of {0} at {1:012x}>".format(self._class.__name__, id(self))
def __getitem__(self, index):
if isinstance(index, numbers.Integral):
return self.interpret(self.jaggedarray[index])
elif isinstance(index, slice):
return JaggedObjects(self.jaggedarray[index], self._class, self._context)
else:
raise TypeError("{0} index must be an integer or a slice".format(self.__class__.__name__))
def asstlvectorvector(fromdtype):
return asvar(JaggedJaggedArray, skip_bytes=6, args=(numpy.dtype(fromdtype),))
class JaggedJaggedArray(VariableLength):
def __init__(self, jaggedarray, fromdtype):
super(JaggedJaggedArray, self).__init__(jaggedarray, fromdtype)
self.fromdtype = fromdtype
@classmethod
def _dtype(cls, args):
dtype, = args
return numpy.dtype((dtype, (0, 0)))
indexdtype = numpy.dtype(">i4")
def interpret(self, item):
i = 0
size, = item[i : i + 4].view(JaggedJaggedArray.indexdtype)
i += 4
out = []
while i < len(item):
size, = item[i : i + 4].view(JaggedJaggedArray.indexdtype)
i += 4
numbytes = size * self.fromdtype.itemsize
out.append(item[i : i + numbytes].view(self.fromdtype))
i += numbytes
return out
def __str__(self):
if len(self) > 6:
return "[{0} ... {1}]".format(", ".join(repr([y.tolist() for y in self[i]]) for i in range(3)), ", ".join(repr([y.tolist() for y in self[i]]) for i in range(-3, 0)))
else:
return "[{0}]".format(", ".join(repr([y.tolist() for y in x]) for x in self))
def __repr__(self):
return "jaggedjaggedarray({0})".format(str(self))
def tolist(self):
return [[y.tolist() for y in x] for x in self]
| 1.59375
| 2
|
tests/test_medieval_futhork.py
|
stscoundrel/riimut-py
| 0
|
12781032
|
<filename>tests/test_medieval_futhork.py
from src.riimut import medieval_futhork
def test_transforms_letters_to_runes():
content = "aábcdðeéfghiíjklmnoóǫpqrstuúvwxyýzåäæœöøþ "
expected = "ᛆᛆᛒᚴᚦᚦᚽᚽᚠᚵᚼᛁᛁᛁᚴᛚᛘᚿᚮᚮᚰᛕᚴᚱᛋᛏᚢᚢᚠᚠᛋᛦᛦᛋᚮᛅᛅᚯᚯᚯᚦ:"
result = medieval_futhork.letters_to_runes(content)
assert result == expected
def test_transforms_runes_to_letters():
content = "ᚠᚢᚦᚮᚱᚴᚼᚿᛁᛆᛌᛋᛐᛏᛒᛘᛚᛦᚯᛅᚰᛕᚽ:"
expected = "fuþorkhniassttbmlyøæǫᴘe "
result = medieval_futhork.runes_to_letters(content)
assert result == expected
def test_transforms_later_old_norse_to_runes():
# From Lord's Prayer, in Old Norse.
content = "Faðer uor som ast i himlüm, halgað warðe þit nama"
expected = "ᚠᛆᚦᚽᚱ:ᚢᚮᚱ:ᛋᚮᛘ:ᛆᛋᛏ:ᛁ:ᚼᛁᛘᛚᚢᛘ,:ᚼᛆᛚᚵᛆᚦ:ᚠᛆᚱᚦᚽ:ᚦᛁᛏ:ᚿᛆᛘᛆ"
result = medieval_futhork.letters_to_runes(content)
assert result == expected
def test_transforms_runes_to_later_old_norse():
# From Lord's Prayer, in Old Norse.
content = "ᚠᛆᚦᚽᚱ:ᚢᚮᚱ:ᛋᚮᛘ:ᛆᛋᛏ:ᛁ:ᚼᛁᛘᛚᚢᛘ:ᚼᛆᛚᚵᛆᚦ:ᚠᛆᚱᚦᚽ:ᚦᛁᛏ:ᚿᛆᛘᛆ"
expected = "faþer uor som ast i himlum halgaþ farþe þit nama" # Wont tell apart eth & thorn in mid sentence.
result = medieval_futhork.runes_to_letters(content)
assert result == expected
| 2.8125
| 3
|
mlds/my_dataset/my_dataset.py
|
Conchylicultor/mlds
| 0
|
12781033
|
<filename>mlds/my_dataset/my_dataset.py
"""MyDataset implementation."""
class MyDataset:
pass
| 1.195313
| 1
|
apps/admin/views/achievement.py
|
panla/kesousou
| 1
|
12781034
|
<reponame>panla/kesousou
from django.db.models import Q
from rest_framework import generics
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from drf_yasg.utils import swagger_auto_schema
from common.page import get_results, page_params
from common.order import order_params
from common.users import IsAdminUser
from model.models import Expert, Achievement
from admin.parameters.user import token_parameters
from admin.parameters.achievement import achievement_filter_params, achievement_update_params
from admin.serializers.achievement import AchievementListSerializer, AchievementDetailSerializer
class AchievementsView(APIView):
permission_classes = [IsAdminUser]
authentication_classes = [JSONWebTokenAuthentication]
@swagger_auto_schema(
manual_parameters=achievement_filter_params + order_params + page_params + token_parameters,
operation_id='achievement_list',
responses={200: AchievementListSerializer(many=True)}, tags=['achievements']
)
def get(self, request, *args, **kwargs):
"""
查看科技成果列表,
查询参数 text,排序字段 order
模糊查询字段包括 title, keywords, organizations, creators
精准查询字段包括 sn
"""
text = request.query_params.get('text')
order = request.query_params.get('order')
queryset = Achievement.objects.all()
if text:
queryset = queryset.filter(
Q(sn=text) | Q(title__contains=text) | Q(keywords__contains=text) | Q(organizations__contains=text)
| Q(creators__contains=text)
)
if order:
queryset = queryset.order_by(order)
data = get_results(request, queryset, self, AchievementListSerializer)
return Response({'count': queryset.count(), 'achievements': data}, status.HTTP_200_OK)
class ExpertAchievementsView(APIView):
permission_classes = [IsAdminUser]
authentication_classes = [JSONWebTokenAuthentication]
@swagger_auto_schema(
manual_parameters=order_params + page_params + token_parameters, operation_id='expert_achievement_list',
responses={200: AchievementListSerializer(many=True)}, tags=['achievements']
)
def get(self, request, expert_id, *args, **kwargs):
"""
查看某一个专家的科技成果列表
路径参数 expert_id,排序参数 order
"""
order = request.query_params.get('order')
queryset = Achievement.filter_by_expert_id(expert_id)
if queryset:
if order:
queryset = queryset.order_by(order)
data = get_results(request, queryset, self, AchievementListSerializer)
return Response({'count': queryset.count(), 'achievements': data}, status.HTTP_200_OK)
else:
return Response({'error': 'there is nos such expert'}, status.HTTP_404_NOT_FOUND)
class AchievementView(generics.RetrieveUpdateAPIView):
permission_classes = [IsAdminUser]
authentication_classes = [JSONWebTokenAuthentication]
serializer_class = AchievementDetailSerializer
queryset = Achievement.objects.all()
@swagger_auto_schema(
manual_parameters=token_parameters, operation_id='achievement_read',
responses={200: AchievementDetailSerializer()}, tags=['achievements']
)
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
@swagger_auto_schema(
manual_parameters=token_parameters, request_body=achievement_update_params, operation_id='achievement_update',
responses={201: AchievementDetailSerializer()}, tags=['achievements']
)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
@swagger_auto_schema(
manual_parameters=token_parameters, request_body=achievement_update_params,
operation_id='achievement_partial_update', responses={201: AchievementDetailSerializer()}, tags=['achievements']
)
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
| 2.015625
| 2
|
signatureanalyzer/plotting/_muts.py
|
getzlab/getzlab-SignatureAnalyzer
| 5
|
12781035
|
<gh_stars>1-10
import itertools
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
import pandas as pd
from typing import Union
import numpy as np
import re
import sys
from ..utils import compl, sbs_annotation_converter
from ..context import context96, context78, context83, context1536, context_composite, signature_composite, signature_cosmic, signature_DBS, signature_ID, context_polymerase96
def stacked_bar(H: pd.DataFrame, ref_type: str, figsize: tuple = (8,8)):
"""
Plot stacked barchart & normalized stacked barchart.
--------------------------------------
Args:
* H: matrix output from NMF
* figsize: size of figure (int,int)
Returns:
* figure
Example usage:
plot_bar(H)
"""
H = H.iloc[:,:-3].copy()
# Map signature etiology
if ref_type in ['pcawg_COMPOSITE', 'pcawg_COMPOSITE96', 'pcawg_SBS', 'pcawg_SBS96_ID', 'pcawg_SBS_ID']:
H.columns = H.columns.map(lambda x: x[x.index('SBS') : x.index('_')]).map(signature_composite)
elif ref_type in ['cosmic3', 'cosmic3_exome']:
H.columns = H.columns.map(lambda x: x[x.index('SBS'):]).map(signature_cosmic)
elif ref_type == 'cosmic3_DBS':
H.columns = H.columns.map(lambda x: x[x.index('DBS'):]).map(signature_DBS)
elif ref_type == 'cosmic3_ID':
H.columns = H.columns.map(lambda x: x[x.index('ID'):]).map(signature_ID)
# Sort H matrix by mutation burden for relevant mutation type
H['sum'] = H.sum(1)
H = H.sort_values('sum', ascending=False)
fig,axes = plt.subplots(2,1,figsize=figsize, sharex=True)
H.iloc[:,:-1].plot(
kind='bar',
stacked=True,
ax=axes[0],
width=1.0,
rasterized=True
)
axes[0].set_xticklabels([])
axes[0].set_xticks([])
axes[0].set_ylabel('Counts', fontsize=20)
H_norm = H.iloc[:,:-1].div(H['sum'].values,axis=0)
H_norm.plot(
kind='bar',
stacked=True,
ax=axes[1],
width=1.0,
rasterized=True
)
axes[1].set_xticklabels([])
axes[1].set_xticks([])
axes[1].set_xlabel('Samples', fontsize=16)
axes[1].set_ylabel('Fractions', fontsize=20)
axes[1].get_legend().remove()
axes[1].set_ylim([0,1])
return fig
def _map_sbs_sigs_back(df: pd.DataFrame) -> pd.Series:
"""
Map Back Single-Base Substitution Signatures.
-----------------------
Args:
* df: pandas.core.frame.DataFrame with index to be mapped
Returns:
* pandas.core.series.Series with matching indices to context96
"""
def _check_to_flip(x, ref):
if x in ref:
return x
else:
return compl(x)
if df.index.name is None: df.index.name = 'index'
df_idx = df.index.name
if ">" in df.index[0]:
# Already in arrow format
context_s = df.reset_index()[df_idx].apply(sbs_annotation_converter)
else:
# Already in word format
context_s = df.reset_index()[df_idx]
return context_s.apply(lambda x: _check_to_flip(x, context96.keys()))
def _map_id_sigs_back(df: pd.DataFrame) -> pd.Series:
"""
Map Back Insertion-Deletion Signatures.
-----------------------
Args:
* df: pandas.core.frame.DataFrame with index to be mapped
Returns:
* pandas.core.series.Series with matching indices to context83
"""
if df.index.name is None: df.index.name = 'index'
df_idx = df.index.name
context_s = df.reset_index()[df_idx]
def _convert_from_cosmic(x):
if x in context83:
return x
i1, i2, i3, i4 = x.split('_')
pre = i2 if i3 == '1' else i3
main = i1.lower() + ('m' if i2 == 'MH' else '')
if main == 'del':
post = str(int(i4[0]) + 1) + i4[1:]
else:
post = i4
return pre + main + post
return context_s.apply(_convert_from_cosmic)
def signature_barplot(W: pd.DataFrame, contributions: Union[int, pd.Series] = 1):
"""
Plots signatures from W-matrix for Single-Base Substitutions
--------------------------------------
Args:
* W: W-matrix
* contributions: Series of total contributions, np.sum(H), from each
signature if W is normalized; else, 1
Returns:
* fig
Example usage:
signature_barplot(W, np.sum(H))
"""
W = W.copy()
W.index = _map_sbs_sigs_back(W)
# Fill in any missing contexts
for c in context96:
if c not in W.index:
W.loc[c] = 0
# Sort contexts
W.sort_index(inplace=True)
# Extract columns corresponding to signatures
sig_columns = [c for c in W if c.startswith('S')]
# Calculate total number of mutations at each context for every signature
if isinstance(contributions, pd.Series):
W = W[sig_columns] * contributions[sig_columns]
else:
W = W[sig_columns] * contributions
# Determine number of signatures
n_sigs = len(sig_columns)
# Initialize SBS C>N and T>N mutations and their contexts
# For each context, iterate through C>N and T>N mutations, and take reverse complement
# of context for A>N mutations
context_label = []
change_map = {'CA': [], 'CG': [], 'CT': [], 'TA': [], 'TC': [], 'TG': []}
for p in itertools.product('ACGT', 'ACGT'):
context = ''.join(p)
# Reverse complement of context
compl_context = compl(context, reverse=True)
context_label.append('-'.join(context))
for key in change_map:
if key.startswith('C'):
change_map[key].append(key + context)
else:
# Complement of mutation + reverse complement of context
change_map[key].append(compl(key) + compl_context)
color_map = {'CA': 'cyan', 'CG': 'red', 'CT': 'yellow', 'TA': 'purple', 'TC': 'green', 'TG': 'blue'}
# Plot contributions
x_coords = range(16)
fig, axes = plt.subplots(nrows=n_sigs, ncols=6, figsize=(20, 2.5 * n_sigs), sharex='col', sharey='row')
for row, sig in enumerate(sig_columns):
for col, chg in enumerate(['CA', 'CG', 'CT', 'TA', 'TC', 'TG']):
if n_sigs == 1:
ax = axes[col]
else:
ax = axes[row, col]
bar_heights = W[sig].loc[change_map[chg]]
ax.bar(x_coords, bar_heights, width=.95, linewidth=1.5, edgecolor='gray', color=color_map[chg], rasterized=True)
ax.set_xlim(-.55, 15.55)
if row == 0:
ax.set_title('>'.join(chg), fontsize=18)
if col == 0:
ax.text(51.2 / 16, 1.3, 'Mutational Signatures', transform=ax.transAxes,
horizontalalignment='center', fontsize=24)
if row < n_sigs - 1:
ax.tick_params(axis='x', length=0)
else:
ax.set_xticks(x_coords)
ax.set_xticklabels(context_label, fontfamily='monospace', rotation='vertical')
if col == 0:
ax.text(51.2 / 16, -.4, 'Motifs', transform=ax.transAxes, horizontalalignment='center', fontsize=20,
fontweight='bold')
if col > 0:
ax.tick_params(axis='y', length=0)
if col == 5:
ax.text(1.05, .5, sig, fontsize=14, rotation=270, transform=ax.transAxes, verticalalignment='center')
plt.subplots_adjust(wspace=.08, hspace=.15)
fig.text(.08, .5, 'Contributions', rotation='vertical', verticalalignment='center', fontsize=20, fontweight='bold')
return fig
def signature_barplot_DBS(W, contributions):
"""
Plots signatures from W-matrix for Doublet-Base Substitutions
--------------------------------------
Args:
* W: W-matrix
* contributions: Series of total contributions, np.sum(H), from each
signature if W is normalized; else, 1
Returns:
* fig
Example usage:
signature_barplot_DBS(W, np.sum(H))
"""
W = W.copy()
for c in context78:
if c not in W.index:
W.loc[c] = 0
W.sort_index(inplace=True)
sig_columns = [c for c in W if c.startswith('S')]
if isinstance(contributions, pd.Series):
W = W[sig_columns] * contributions[sig_columns]
else:
W = W[sig_columns] * contributions
n_sigs = len(sig_columns)
ref_map = {'AC': [], 'AT': [], 'CC': [], 'CG': [], 'CT': [], 'GC': [], 'TA': [], 'TC': [], 'TG': [], 'TT': []}
for x in W.index:
ref_map[x[:2]].append(x)
x_coords = {ref: range(len(sigs)) for ref, sigs in ref_map.items()}
color_map = {'AC': '#99CCFF', 'AT': '#0000FF', 'CC': '#CCFF99', 'CG': '#00FF00', 'CT': '#FF99CC',
'GC': '#FF0000', 'TA': '#FFCC99', 'TC': '#FF8000', 'TG': '#CC99FF', 'TT': '#8000FF'}
fig, axes = plt.subplots(nrows=n_sigs, ncols=10, figsize=(20, 2.5 * n_sigs), sharex='col',
sharey='row', gridspec_kw={'width_ratios': (3, 2, 3, 2, 3, 2, 2, 3, 3, 3)})
for row, sig in enumerate(sig_columns):
for col, ref in enumerate(ref_map):
if n_sigs == 1:
ax = axes[col]
else:
ax = axes[row, col]
bar_heights = W[sig].loc[ref_map[ref]]
ax.bar(x_coords[ref], bar_heights, width=.95, linewidth=1.5, edgecolor='gray', color=color_map[ref],
rasterized=True)
ax.set_xlim(-.55, x_coords[ref][-1] + .55)
if row == 0:
ax.set_title(ref)
if col == 0:
ax.text(44.5 / 6, 1.2, 'Mutational Signatures', transform=ax.transAxes,
horizontalalignment='center', fontsize=24)
if row < n_sigs - 1:
ax.tick_params(axis='x', length=0)
else:
xlabels = [x[3:] for x in ref_map[ref]]
ax.set_xticks(x_coords[ref])
ax.set_xticklabels(xlabels, fontfamily='monospace', rotation='vertical')
if col == 0:
ax.text(44.5 / 6, -.3, 'Motifs', transform=ax.transAxes, horizontalalignment='center', fontsize=20,
fontweight='bold')
if col > 0:
ax.tick_params(axis='y', length=0)
if col == 9:
ax.text(1.05, .5, sig, fontsize=14, rotation=270, transform=ax.transAxes, verticalalignment='center')
plt.subplots_adjust(wspace=.08, hspace=.15)
fig.text(.08, .5, 'Contributions', rotation='vertical', verticalalignment='center', fontsize=20, fontweight='bold')
return fig
def signature_barplot_ID(W, contributions):
"""
Plots signatures from W-matrix for Insertions-Deletions
--------------------------------------
Args:
* W: W-matrix
* contributions: Series of total contributions, np.sum(H), from each
signature if W is normalized; else, 1
Returns:
* fig
Example usage:
signature_barplot_ID(W, np.sum(H))
"""
W = W.copy()
W.index = _map_id_sigs_back(W)
for c in context83:
if c not in W.index:
W.loc[c] = 0
W = W.loc[context83]
sig_columns = [c for c in W if c.startswith('S')]
if isinstance(contributions, pd.Series):
W = W[sig_columns] * contributions[sig_columns]
else:
W = W[sig_columns] * contributions
n_sigs = len(sig_columns)
group_map = {'Cdel': [], 'Tdel': [], 'Cins': [], 'Tins': [],
'2del': [], '3del': [], '4del': [], '5+del': [],
'2ins': [], '3ins': [], '4ins': [], '5+ins': [],
'2delm': [], '3delm': [], '4delm': [], '5+delm': []}
for x in W.index:
group = re.search('.+?(?=[\d])', x).group(0)
group_map[group].append(x)
x_coords = {group: range(len(sigs)) for group, sigs in group_map.items()}
color_map = {'Cdel': '#FFCC99', 'Tdel': '#FF8000', 'Cins': '#00FF00', 'Tins': '#00BB00',
'2del': '#FF99CC', '3del': '#FF3377', '4del': '#FF0000', '5+del': '#880000',
'2ins': '#99CCFF', '3ins': '#3377FF', '4ins': '#0000FF', '5+ins': '#000088',
'2delm': '#CC99FF', '3delm': '#9966FF', '4delm': '#8000FF', '5+delm': '#6000AA'}
fig, axes = plt.subplots(nrows=n_sigs, ncols=16, figsize=(20, 2.5 * n_sigs), sharex='col',
sharey='row', gridspec_kw={'width_ratios': (6,) * 12 + (1, 2, 3, 5)})
for row, sig in enumerate(sig_columns):
for col, group in enumerate(group_map):
if n_sigs == 1:
ax = axes[col]
else:
ax = axes[row, col]
bar_heights = W[sig].loc[group_map[group]]
ax.bar(x_coords[group], bar_heights, width=.95, linewidth=1.5, edgecolor='gray', color=color_map[group],
rasterized=True)
ax.set_xlim(-.55, x_coords[group][-1] + .55)
if row == 0:
ax.set_title(re.search('[\d+CT]+', group).group(0), color=color_map[group])
if col == 0:
ax.text(44.5 / 6, 1.3, 'Mutational Signatures', transform=ax.transAxes,
horizontalalignment='center', fontsize=24)
if group == 'Tdel':
ax.text(-.02, 1.16, '1bp deletions at repeats', fontsize=10, transform=ax.transAxes,
horizontalalignment='center', color=color_map[group])
if group == 'Tins':
ax.text(-.02, 1.16, '1bp insertions at repeats', fontsize=10, transform=ax.transAxes,
horizontalalignment='center', color=color_map[group])
if group == '4del':
ax.text(-.02, 1.16, '>1bp deletions at repeats', fontsize=10, transform=ax.transAxes,
horizontalalignment='center', color=color_map[group])
if group == '4ins':
ax.text(-.02, 1.16, '>1bp insertions at repeats', fontsize=10, transform=ax.transAxes,
horizontalalignment='center', color=color_map[group])
if group == '4delm':
ax.text(.8, 1.16, '>1bp deletions with microhomology', fontsize=10, transform=ax.transAxes,
horizontalalignment='center', color=color_map[group])
if row < n_sigs - 1:
ax.tick_params(axis='x', length=0)
else:
xlabels = [re.search('[\d+]+$', x).group(0) for x in group_map[group]]
ax.set_xticks(x_coords[group])
ax.set_xticklabels(xlabels, fontfamily='monospace')
if col == 0:
ax.text(44.5 / 6, -.3, 'Motifs', transform=ax.transAxes, horizontalalignment='center', fontsize=20,
fontweight='bold')
if col > 0:
ax.tick_params(axis='y', length=0)
if col == 15:
ax.text(1.05, .5, sig, fontsize=14, rotation=270, transform=ax.transAxes, verticalalignment='center')
plt.subplots_adjust(wspace=.08, hspace=.15)
fig.text(.08, .5, 'Contributions', rotation='vertical', verticalalignment='center', fontsize=20, fontweight='bold')
return fig
def signature_barplot_composite(W: pd.DataFrame, contributions: Union[int, pd.Series] = 1):
"""
Plot signatures from W-matrix for SBS, DBS, and IDs from composite W matrix
--------------------------------------
Args:
* W: W-matrix
* contributions: Series of total contributions, np.sum(H), from each
signature if W is normalized; else, 1
Returns:
* fig
Example usage:
signature_barplot(W, np.sum(H))
"""
W = W.copy()
# Fill in missing features
composite_index = list(context96)+list(context78)+list(context83)
for c in composite_index:
if c not in list(W.index):
W.loc[c] = 0
W = W.reindex(composite_index)
# Get signature labels
sig_columns = [c for c in W if c.startswith('S')]
n_sigs = len(sig_columns)
# Evaluate contributions
if isinstance(contributions, pd.Series):
W = W[sig_columns] * contributions[sig_columns]
else:
W = W[sig_columns] * contributions
#### x coordinates for SBS contributions
context_label = []
change_map = {'CA': [], 'CG': [], 'CT': [], 'TA': [], 'TC': [], 'TG': []}
for p in itertools.product('ACGT', 'ACGT'):
context = ''.join(p)
compl_context = compl(context, reverse=True)
context_label.append('-'.join(context))
for key in change_map:
if key.startswith('C'):
change_map[key].append(key + context)
else:
change_map[key].append(compl(key) + compl_context)
color_map_sbs = {'CA': 'cyan', 'CG': 'red', 'CT': 'yellow', 'TA': 'purple', 'TC': 'green', 'TG': 'blue'}
x_coords_sbs = range(16)
##### x coordinates for DBS contributions
ref_map = {'AC': [], 'AT': [], 'CC': [], 'CG': [], 'CT': [], 'GC': [], 'TA': [], 'TC': [], 'TG': [], 'TT': []}
for x in context78:
ref_map[x[:2]].append(x)
x_coords_dbs = {ref: range(len(sigs)) for ref, sigs in ref_map.items()}
color_map_dbs = {'AC': '#99CCFF', 'AT': '#0000FF', 'CC': '#CCFF99', 'CG': '#00FF00', 'CT': '#FF99CC',
'GC': '#FF0000', 'TA': '#FFCC99', 'TC': '#FF8000', 'TG': '#CC99FF', 'TT': '#8000FF'}
##### x coordinates for ID contributions
group_map = {'Cdel': [], 'Tdel': [], 'Cins': [], 'Tins': [],
'2del': [], '3del': [], '4del': [], '5+del': [],
'2ins': [], '3ins': [], '4ins': [], '5+ins': [],
'2delm': [], '3delm': [], '4delm': [], '5+delm': []}
for x in context83:
group = re.search('.+?(?=[\d])', x).group(0)
group_map[group].append(x)
x_coords_id = {group: range(len(sigs)) for group, sigs in group_map.items()}
color_map_id = {'Cdel': '#FFCC99', 'Tdel': '#FF8000', 'Cins': '#00FF00', 'Tins': '#00BB00',
'2del': '#FF99CC', '3del': '#FF3377', '4del': '#FF0000', '5+del': '#880000',
'2ins': '#99CCFF', '3ins': '#3377FF', '4ins': '#0000FF', '5+ins': '#000088',
'2delm': '#CC99FF', '3delm': '#9966FF', '4delm': '#8000FF', '5+delm': '#6000AA'}
# Include spaces to separate feature types
all_columns = ['CA', 'CG', 'CT', 'TA', 'TC', 'TG'] + ['space'] + list(ref_map) + ['space'] + list(group_map)
fig, axes = plt.subplots(nrows=n_sigs, ncols=34, figsize=(60,2.5*n_sigs), sharex='col',
gridspec_kw={'width_ratios': (16,)*6 + (1,)+ (9,6,9,6,9,6,6,9,9,9) + (1,) + (6,)*12+(1,2,3,5)})
max_height = 0 # Maximum height for scaling y-axis per feature type per signature
# Iterate through signatures, such that each row plots mutational landscape for a signature
for row, sig in enumerate(sig_columns):
for col, ref in enumerate(all_columns):
if n_sigs == 1:
ax = axes[col]
else:
ax = axes[row,col]
if col in [6,17]: # Space between feature types...Remove ax and move to next feature (column)
ax.remove()
continue
# For SBS portion, iterate through 6 SNV types (C>A, C>T, C>G, T>A...)
if col < 6:
bar_heights = W[sig].loc[change_map[ref]]
for height in bar_heights:
if height > max_height: max_height = height
ax.bar(x_coords_sbs, bar_heights, width=.95, linewidth=1.5, edgecolor='gray', color=color_map_sbs[ref], rasterized=True)
ax.set_xlim(-.55, 15.55)
if row == 0:
ax.set_title('>'.join(ref), fontsize=18)
if col == 0:
ax.text(8.1, 1.3, 'Mutational Signatures', transform=ax.transAxes,
horizontalalignment='center', fontsize=24)
if row < n_sigs - 1:
ax.tick_params(axis='x', length=0, labelbottom=False)
else:
ax.set_xticks(x_coords_sbs)
ax.set_xticklabels(context_label, fontfamily='monospace', rotation='vertical')
if col == 0:
ax.text(8.1, -.4, 'Motifs', transform = ax.transAxes, horizontalalignment='center', fontsize=20)
if col == 5:
if n_sigs == 1:
for axis in axes[:col+1]:
axis.set_ylim(0,max_height + 0.1*max_height+1)
else:
for axis in axes[row,:col+1]:
axis.set_ylim(0,max_height + 0.1*max_height+1)
max_height = 0
# For DBS portion
elif col < 17:
bar_heights = W[sig].loc[ref_map[ref]]
for height in bar_heights:
if height > max_height: max_height = height
ax.bar(x_coords_dbs[ref], bar_heights, width=.95, linewidth=1.5, edgecolor='gray', color=color_map_dbs[ref],
rasterized=True)
ax.set_xlim(-.55, x_coords_dbs[ref][-1] + .55)
if row == 0:
ax.set_title(ref)
if row < n_sigs - 1:
ax.tick_params(axis='x', length=0)
else:
xlabels = [x[3:] for x in ref_map[ref]]
ax.set_xticks(x_coords_dbs[ref])
ax.set_xticklabels(xlabels, fontfamily='monospace', rotation='vertical')
if col == 15:
if n_sigs == 1:
for axis in axes[6:col+1]:
axis.set_ylim(0,max_height + 0.1*max_height+1)
else:
for axis in axes[row,6:col+1]:
axis.set_ylim(0,max_height + 0.1*max_height+1)
max_height = 0
# For ID portion
else:
bar_heights = W[sig].loc[group_map[ref]]
for height in bar_heights:
if height > max_height: max_height = height
ax.bar(x_coords_id[ref], bar_heights, width=.95, linewidth=1.5, edgecolor='gray', color=color_map_id[ref],
rasterized=True)
ax.set_xlim(-.55, x_coords_id[ref][-1] + .55)
if row == 0:
ax.set_title(re.search('[\d+CT]+', ref).group(0), color=color_map_id[ref])
if ref == 'Tdel':
ax.text(-.02, 1.16, '1bp deletions at repeats', fontsize=10, transform=ax.transAxes,
horizontalalignment='center', color=color_map_id[ref])
if ref == 'Tins':
ax.text(-.02, 1.16, '1bp insertions at repeats', fontsize=10, transform=ax.transAxes,
horizontalalignment='center', color=color_map_id[ref])
if ref == '4del':
ax.text(-.02, 1.16, '>1bp deletions at repeats', fontsize=10, transform=ax.transAxes,
horizontalalignment='center', color=color_map_id[ref])
if ref == '4ins':
ax.text(-.02, 1.16, '>1bp insertions at repeats', fontsize=10, transform=ax.transAxes,
horizontalalignment='center', color=color_map_id[ref])
if ref == '4delm':
ax.text(.8, 1.16, '>1bp deletions with microhomology', fontsize=10, transform=ax.transAxes,
horizontalalignment='center', color=color_map_id[ref])
if row < n_sigs - 1:
ax.tick_params(axis='x', length=0)
else:
xlabels = [re.search('[\d+]+$', x).group(0) for x in group_map[ref]]
ax.set_xticks(x_coords_id[ref])
ax.set_xticklabels(xlabels, fontfamily='monospace')
if col == 33:
if n_sigs == 1:
for axis in axes[16:col+1]:
axis.set_ylim(0,max_height + 0.1*max_height+1)
else:
for axis in axes[row,16:col+1]:
axis.set_ylim(0,max_height + 0.1*max_height+1)
max_height = 0
if col not in [0,7,18]:
ax.tick_params(axis='y', which='both',length=0, labelleft=False)
if col == 33:
ax.text(1.05, .5, sig, fontsize=14, rotation=270, transform=ax.transAxes, verticalalignment='center')
# Set titles and organize plot
plt.subplots_adjust(wspace=.12, hspace=.15)
fig.text(.105, .5, 'Contributions', rotation='vertical', verticalalignment='center', fontsize=20, fontweight='bold')
return fig
def signature_barplot_sbs_id(W: pd.DataFrame, contributions: Union[int, pd.Series] = 1):
"""
Plot signatures from W-matrix for SBS, DBS, and IDs from composite W matrix
--------------------------------------
Args:
* W: W-matrix
* contributions: Series of total contributions, np.sum(H), from each
signature if W is normalized; else, 1
Returns:
* fig
Example usage:
signature_barplot(W, np.sum(H))
"""
W = W.copy()
# Fill in missing features and sort
composite_index = list(context96)+list(context83)
for c in composite_index:
if c not in list(W.index):
W.loc[c] = 0
W = W.reindex(composite_index)
# Get signature labels
sig_columns = [c for c in W if c.startswith('S')]
n_sigs = len(sig_columns)
# Evaluate contributions
if isinstance(contributions, pd.Series):
W = W[sig_columns] * contributions[sig_columns]
else:
W = W[sig_columns] * contributions
#### x coordinates for SBS contributions
context_label = []
change_map = {'CA': [], 'CG': [], 'CT': [], 'TA': [], 'TC': [], 'TG': []}
for p in itertools.product('ACGT', 'ACGT'):
context = ''.join(p)
compl_context = compl(context, reverse=True)
context_label.append('-'.join(context))
for key in change_map:
if key.startswith('C'):
change_map[key].append(key + context)
else:
change_map[key].append(compl(key) + compl_context)
color_map_sbs = {'CA': 'cyan', 'CG': 'red', 'CT': 'yellow', 'TA': 'purple', 'TC': 'green', 'TG': 'blue'}
x_coords_sbs = range(16)
##### x coordinates for ID contributions
group_map = {'Cdel': [], 'Tdel': [], 'Cins': [], 'Tins': [],
'2del': [], '3del': [], '4del': [], '5+del': [],
'2ins': [], '3ins': [], '4ins': [], '5+ins': [],
'2delm': [], '3delm': [], '4delm': [], '5+delm': []}
for x in context83:
group = re.search('.+?(?=[\d])', x).group(0)
group_map[group].append(x)
x_coords_id = {group: range(len(sigs)) for group, sigs in group_map.items()}
color_map_id = {'Cdel': '#FFCC99', 'Tdel': '#FF8000', 'Cins': '#00FF00', 'Tins': '#00BB00',
'2del': '#FF99CC', '3del': '#FF3377', '4del': '#FF0000', '5+del': '#880000',
'2ins': '#99CCFF', '3ins': '#3377FF', '4ins': '#0000FF', '5+ins': '#000088',
'2delm': '#CC99FF', '3delm': '#9966FF', '4delm': '#8000FF', '5+delm': '#6000AA'}
all_columns = ['CA', 'CG', 'CT', 'TA', 'TC', 'TG'] + ['space'] + list(group_map)
fig, axes = plt.subplots(nrows=n_sigs, ncols=23, figsize=(60,2.5*n_sigs), sharex='col',
gridspec_kw={'width_ratios': (16,)*6 + (1,) + (6,)*12+(1,2,3,5)})
max_height = 0
for row, sig in enumerate(sig_columns):
for col, ref in enumerate(all_columns):
if n_sigs == 1:
ax = axes[col]
else:
ax = axes[row,col]
if col == 6:
ax.remove()
continue
# For SBS portion
if col < 6:
bar_heights = W[sig].loc[change_map[ref]]
for height in bar_heights:
if height > max_height: max_height = height
ax.bar(x_coords_sbs, bar_heights, width=.95, linewidth=1.5, edgecolor='gray', color=color_map_sbs[ref], rasterized=True)
ax.set_xlim(-.55, 15.55)
if row == 0:
ax.set_title('>'.join(ref), fontsize=18)
if col == 0:
ax.text(5.5, 1.3, 'Mutational Signatures', transform=ax.transAxes,
horizontalalignment='center', fontsize=24)
if row < n_sigs - 1:
ax.tick_params(axis='x', length=0, labelbottom=False)
else:
ax.set_xticks(x_coords_sbs)
ax.set_xticklabels(context_label, fontfamily='monospace', rotation='vertical')
if col == 0:
ax.text(5.5, -.4, 'Motifs', transform = ax.transAxes, horizontalalignment='center', fontsize=20)
if col == 5:
if n_sigs == 1:
for axis in axes[:col+1]:
axis.set_ylim(0,max_height + 0.1*max_height+1)
else:
for axis in axes[row,:col+1]:
axis.set_ylim(0,max_height + 0.1*max_height+1)
max_height = 0
# For ID portion
else:
bar_heights = W[sig].loc[group_map[ref]]
for height in bar_heights:
if height > max_height: max_height = height
ax.bar(x_coords_id[ref], bar_heights, width=.95, linewidth=1.5, edgecolor='gray', color=color_map_id[ref],
rasterized=True)
ax.set_xlim(-.55, x_coords_id[ref][-1] + .55)
if row == 0:
ax.set_title(re.search('[\d+CT]+', ref).group(0), color=color_map_id[ref])
if ref == 'Tdel':
ax.text(-.02, 1.16, '1bp deletions at repeats', fontsize=10, transform=ax.transAxes,
horizontalalignment='center', color=color_map_id[ref])
if ref == 'Tins':
ax.text(-.02, 1.16, '1bp insertions at repeats', fontsize=10, transform=ax.transAxes,
horizontalalignment='center', color=color_map_id[ref])
if ref == '4del':
ax.text(-.02, 1.16, '>1bp deletions at repeats', fontsize=10, transform=ax.transAxes,
horizontalalignment='center', color=color_map_id[ref])
if ref == '4ins':
ax.text(-.02, 1.16, '>1bp insertions at repeats', fontsize=10, transform=ax.transAxes,
horizontalalignment='center', color=color_map_id[ref])
if ref == '4delm':
ax.text(.8, 1.16, '>1bp deletions with microhomology', fontsize=10, transform=ax.transAxes,
horizontalalignment='center', color=color_map_id[ref])
if row < n_sigs - 1:
ax.tick_params(axis='x', length=0)
else:
xlabels = [re.search('[\d+]+$', x).group(0) for x in group_map[ref]]
ax.set_xticks(x_coords_id[ref])
ax.set_xticklabels(xlabels, fontfamily='monospace')
if col == 22:
if n_sigs == 1:
for axis in axes[6:col+1]:
axis.set_ylim(0,max_height + 0.1*max_height+1)
else:
for axis in axes[row,6:col+1]:
axis.set_ylim(0,max_height + 0.1*max_height+1)
max_height = 0
if col not in [0,7]:
ax.tick_params(axis='y', which='both',length=0, labelleft=False)
if col == 22:
ax.text(1.05, .5, sig, fontsize=14, rotation=270, transform=ax.transAxes, verticalalignment='center')
# Set titles and organize plot
plt.subplots_adjust(wspace=.12, hspace=.15)
fig.text(.105, .5, 'Contributions', rotation='vertical', verticalalignment='center', fontsize=20, fontweight='bold')
return fig
def signature_barplot_polymerase(W: pd.DataFrame, contributions: Union[int, pd.Series] = 1):
W = W.copy()
# Fill in missing features
for c in context_polymerase96:
if c not in list(W.index):
W.loc[c] = 0
W = W.reindex(context_polymerase96)
# Get signature labels
sig_columns = [c for c in W if c.startswith('S')]
n_sigs = len(sig_columns)
# Evaluate contributions
if isinstance(contributions, pd.Series):
W = W[sig_columns] * contributions[sig_columns]
else:
W = W[sig_columns] * contributions
#### X coordinates for SBS contributions
context_label = []
change_map = {'CA': [], 'CG': [], 'CT': [], 'TA': [], 'TC': [], 'TG': []}
for p in itertools.product('ACGT','ACGT'):
context = ''.join(p)
compl_context = compl(context, reverse=True)
context_label.append('-'.join(context))
for key in change_map:
if key.startswith('C'):
change_map[key].append(key + context)
else:
change_map[key].append(compl(key) + compl_context)
color_map_sbs = {'CA': 'cyan', 'CG': 'red', 'CT': 'yellow', 'TA': 'purple', 'TC': 'green', 'TG': 'blue'}
x_coords_sbs = range(16)
#### X coordinates for ID contributions
group_map = {'INS': ['INS' + str(i+1) for i in range(4)], 'DEL': ['DEL' + str(i+1) for i in range(4)]}
color_map_id = {'INS':'#FFCC99', 'DEL':'#FF8000'}
x_coords_id = {'INS':range(0,4), 'DEL':range(0,4)}
all_columns = [x for x in change_map.keys()] + ['space', 'INS', 'DEL']
fig, axes = plt.subplots(nrows=n_sigs, ncols=9, figsize=(60,2.5*n_sigs), sharex='col',
gridspec_kw={'width_ratios': (16,)*6 + (1,) + (4,)*2})
max_height = 0
# Iterate through signatures
for row, sig in enumerate(sig_columns):
# iterate through columns
for col, ref in enumerate(all_columns):
if n_sigs == 1:
ax = axes[col]
else:
ax = axes[row,col]
if col == 6:
ax.remove()
continue
# For SBS portion
if col < 6:
bar_heights = W[sig].loc[change_map[ref]]
for height in bar_heights:
if height > max_height: max_height = height
ax.bar(x_coords_sbs, bar_heights, width=.95, linewidth=1.5, edgecolor='gray', color=color_map_sbs[ref], rasterized=True)
ax.set_xlim(-.55, 15.55)
if row == 0:
ax.set_title('>'.join(ref), fontsize=18)
if col == 0:
ax.text(4, 1.3, 'Mutational Signatures', transform=ax.transAxes,
horizontalalignment='center', fontsize=24)
if row < n_sigs - 1:
ax.tick_params(axis='x', length=0, labelbottom=False)
else:
ax.set_xticks(x_coords_sbs)
ax.set_xticklabels(context_label, fontfamily='monospace', rotation='vertical')
if col == 0:
ax.text(4, -.4, 'Motifs', transform = ax.transAxes, horizontalalignment='center', fontsize=20)
if col == 5:
if n_sigs == 1:
for axis in axes[:col+1]:
axis.set_ylim(0,max_height + 0.1*max_height+1)
else:
for axis in axes[row,:col+1]:
axis.set_ylim(0,max_height + 0.1*max_height+1)
max_height = 0
# For ID portion
else:
bar_heights = W[sig].loc[group_map[ref]]
for height in bar_heights:
if height > max_height: max_height = height
ax.bar(x_coords_id[ref], bar_heights, width=0.95, linewidth=1.5, edgecolor='gray', color=color_map_id[ref],
rasterized=True)
ax.set_xlim(-.55, x_coords_id[ref][-1] + 0.55)
# Set column titles
if row == 0:
ax.set_title(ref, color=color_map_id[ref])
if row < n_sigs - 1:
ax.tick_params(axis='x', length=0)
else:
xlabels = ['1','2','3','4+']
ax.set_xticks(x_coords_id[ref])
ax.set_xticklabels(xlabels, fontfamily='monospace')
if col == 8:
ax.text(1.05, .5, sig, fontsize=14, rotation=270, transform=ax.transAxes, verticalalignment="center")
# Set titles and organize plot
plt.subplots_adjust(wspace=.12, hspace=0.15)
fig.text(0.105, 0.5, 'Contributions', rotation='vertical', verticalalignment='center', fontsize=20, fontweight='bold')
return fig
| 2.28125
| 2
|
google/cloud/securitycenter/settings/v1beta1/securitycenter-settings-v1beta1-py/google/cloud/securitycenter/settings_v1beta1/services/security_center_settings_service/transports/__init__.py
|
googleapis/googleapis-gen
| 7
|
12781036
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import SecurityCenterSettingsServiceTransport
from .grpc import SecurityCenterSettingsServiceGrpcTransport
from .grpc_asyncio import SecurityCenterSettingsServiceGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[SecurityCenterSettingsServiceTransport]]
_transport_registry['grpc'] = SecurityCenterSettingsServiceGrpcTransport
_transport_registry['grpc_asyncio'] = SecurityCenterSettingsServiceGrpcAsyncIOTransport
__all__ = (
'SecurityCenterSettingsServiceTransport',
'SecurityCenterSettingsServiceGrpcTransport',
'SecurityCenterSettingsServiceGrpcAsyncIOTransport',
)
| 1.695313
| 2
|
deep_daze/deep_daze.py
|
asigalov61/deep-daze
| 1
|
12781037
|
import os
import signal
import subprocess
import sys
import random
from datetime import datetime
from pathlib import Path
from shutil import copy
import torch
import torch.nn.functional as F
from siren_pytorch import SirenNet, SirenWrapper
from torch import nn
from torch.cuda.amp import GradScaler, autocast
from torch.optim import Adam
from torchvision.utils import save_image
from tqdm import trange, tqdm
from deep_daze.clip import load, tokenize
assert torch.cuda.is_available(), 'CUDA must be available in order to use Deep Daze'
# graceful keyboard interrupt
terminate = False
def signal_handling(signum, frame):
global terminate
terminate = True
signal.signal(signal.SIGINT, signal_handling)
perceptor, normalize_image = load()
# Helpers
def exists(val):
return val is not None
def interpolate(image, size):
return F.interpolate(image, (size, size), mode='bilinear', align_corners=False)
def rand_cutout(image, size):
width = image.shape[-1]
offsetx = torch.randint(0, width - size, ())
offsety = torch.randint(0, width - size, ())
cutout = image[:, :, offsetx:offsetx + size, offsety:offsety + size]
return cutout
def open_folder(path):
if os.path.isfile(path):
path = os.path.dirname(path)
if not os.path.isdir(path):
return
cmd_list = None
if sys.platform == 'darwin':
cmd_list = ['open', '--', path]
elif sys.platform == 'linux2' or sys.platform == 'linux':
cmd_list = ['xdg-open', path]
elif sys.platform in ['win32', 'win64']:
cmd_list = ['explorer', path.replace('/', '\\')]
if cmd_list == None:
return
try:
subprocess.check_call(cmd_list)
except subprocess.CalledProcessError:
pass
except OSError:
pass
def norm_siren_output(img):
return ((img + 1) * 0.5).clamp(0, 1)
class DeepDaze(nn.Module):
def __init__(
self,
total_batches,
batch_size,
num_layers=8,
image_width=512,
loss_coef=100,
):
super().__init__()
# load clip
self.loss_coef = loss_coef
self.image_width = image_width
self.batch_size = batch_size
self.total_batches = total_batches
self.num_batches_processed = 0
siren = SirenNet(
dim_in=2,
dim_hidden=256,
num_layers=num_layers,
dim_out=3,
use_bias=True
)
self.model = SirenWrapper(
siren,
image_width=image_width,
image_height=image_width
)
self.generate_size_schedule()
def forward(self, text, return_loss=True):
out = self.model()
out = norm_siren_output(out)
if not return_loss:
return out
pieces = []
width = out.shape[-1]
size_slice = slice(self.num_batches_processed, self.num_batches_processed + self.batch_size)
for size in self.scheduled_sizes[size_slice]:
apper = rand_cutout(out, size)
apper = interpolate(apper, 224)
pieces.append(normalize_image(apper))
image = torch.cat(pieces)
with autocast(enabled=False):
image_embed = perceptor.encode_image(image)
text_embed = perceptor.encode_text(text)
self.num_batches_processed += self.batch_size
loss = -self.loss_coef * torch.cosine_similarity(text_embed, image_embed, dim=-1).mean()
return loss
def generate_size_schedule(self):
batches = 0
counter = 0
self.scheduled_sizes = []
while batches < self.total_batches:
counter += 1
sizes = self.sample_sizes(counter)
batches += len(sizes)
self.scheduled_sizes.extend(sizes)
def sample_sizes(self, counter):
pieces_per_group = 4
# 6 piece schedule increasing in context as model saturates
if counter < 500:
partition = [4, 5, 3, 2, 1, 1]
elif counter < 1000:
partition = [2, 5, 4, 2, 2, 1]
elif counter < 1500:
partition = [1, 4, 5, 3, 2, 1]
elif counter < 2000:
partition = [1, 3, 4, 4, 2, 2]
elif counter < 2500:
partition = [1, 2, 2, 4, 4, 3]
elif counter < 3000:
partition = [1, 1, 2, 3, 4, 5]
else:
partition = [1, 1, 1, 2, 4, 7]
dbase = .38
step = .1
width = self.image_width
sizes = []
for part_index in range(len(partition)):
groups = partition[part_index]
for _ in range(groups * pieces_per_group):
sizes.append(torch.randint(
int((dbase + step * part_index + .01) * width),
int((dbase + step * (1 + part_index)) * width), ()))
sizes.sort()
return sizes
class Imagine(nn.Module):
def __init__(
self,
text,
*,
lr=1e-5,
batch_size=4,
gradient_accumulate_every=4,
save_every=100,
image_width=512,
num_layers=16,
epochs=20,
iterations=1050,
save_progress=False,
seed=None,
open_folder=True,
save_date_time=False
):
super().__init__()
if exists(seed):
tqdm.write(f'setting seed: {seed}')
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
self.epochs = epochs
self.iterations = iterations
total_batches = epochs * iterations * batch_size * gradient_accumulate_every
model = DeepDaze(
total_batches=total_batches,
batch_size=batch_size,
image_width=image_width,
num_layers=num_layers
).cuda()
self.model = model
self.scaler = GradScaler()
self.optimizer = Adam(model.parameters(), lr)
self.gradient_accumulate_every = gradient_accumulate_every
self.save_every = save_every
self.save_date_time = save_date_time
self.open_folder = open_folder
self.save_progress = save_progress
self.text = text
self.textpath = text.replace(" ", "_")
self.filename = self.image_output_path()
self.encoded_text = tokenize(text).cuda()
def image_output_path(self, current_iteration: int = None) -> Path:
"""
Returns underscore separated Path.
A current timestamp is prepended if `self.save_date_time` is set.
Sequence number left padded with 6 zeroes is appended if `save_every` is set.
:rtype: Path
"""
output_path = self.textpath
if current_iteration:
sequence_number = int(current_iteration / self.save_every)
sequence_number_left_padded = str(sequence_number).zfill(6)
output_path = f"{output_path}.{sequence_number_left_padded}"
if self.save_date_time:
current_time = datetime.now().strftime("%y%m%d-%H%M%S_%f")
output_path = f"{current_time}_{output_path}"
return Path(f"{output_path}.png")
def replace_current_img(self):
"""
Replace the current file at {text_path}.png with the current self.filename
"""
always_current_img = f"{self.textpath}.png"
if os.path.isfile(always_current_img) or os.path.islink(always_current_img):
os.remove(always_current_img) # remove the file
#copy(str(self.filename), str(self.filename))
def generate_and_save_image(self, custom_filename: Path = None, current_iteration: int = None):
"""
:param current_iteration:
:param custom_filename: A custom filename to use when saving - e.g. "testing.png"
"""
with torch.no_grad():
img = normalize_image(self.model(self.encoded_text, return_loss=False).cpu())
img.clamp_(0., 1.)
self.filename = custom_filename if custom_filename else self.image_output_path(current_iteration=current_iteration)
save_image(img, self.filename)
self.replace_current_img()
tqdm.write(f'image updated at "./{str(self.filename)}"')
def train_step(self, epoch, iteration) -> int:
total_loss = 0
for _ in range(self.gradient_accumulate_every):
with autocast():
loss = self.model(self.encoded_text)
loss = loss / self.gradient_accumulate_every
total_loss += loss
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
self.optimizer.zero_grad()
if (iteration % self.save_every == 0) and self.save_progress:
self.generate_and_save_image(current_iteration=iteration)
return total_loss
def forward(self):
tqdm.write(f'Imagining "{self.text}" from the depths of my weights...')
if self.open_folder:
open_folder('./')
self.open_folder = False
for epoch in trange(self.epochs, desc='epochs'):
pbar = trange(self.iterations, desc='iteration')
for i in pbar:
loss = self.train_step(epoch, i)
pbar.set_description(f'loss: {loss.item():.2f}')
if terminate:
print('interrupted by keyboard, gracefully exiting')
return
| 1.796875
| 2
|
tests/credentials/credentials.py
|
Acidburn0zzz/dfvfs
| 1
|
12781038
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the credentials interface."""
from __future__ import unicode_literals
import unittest
from dfvfs.credentials import credentials
from tests import test_lib as shared_test_lib
class Credentials(shared_test_lib.BaseTestCase):
"""Tests the credentials interface."""
def testInitialize(self):
"""Tests the __init__ function."""
with self.assertRaises(ValueError):
credentials.Credentials()
if __name__ == '__main__':
unittest.main()
| 2.359375
| 2
|
xfeeds/parser/tasks.py
|
rubeon/django-xfeeds
| 0
|
12781039
|
import logging
import os
import ssl
import urllib.request
import feedparser
from datetime import datetime
from time import mktime, localtime
from pprint import pprint, pformat
from bs4 import BeautifulSoup as soup
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from ..models import Feed
from ..models import FeedItem
from ..models import TaggedItem
from ..models import CachedImage
LOGGER = logging.getLogger(__name__)
FAKE_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'
feedparser.USER_AGENT = FAKE_AGENT
def get_cached_image(url):
"""
Utility to cache images
"""
# first, see if the URL has already been cached
try:
cimage = CachedImage.objects.get(url=url)
except CachedImage.DoesNotExist:
cimage = CachedImage(url=url)
cimage.save()
cimage.cache()
return cimage
def url_to_feed(url):
"""
takes a URL, returns the feed object or None
"""
# pprint"Url to feed entered")
LOGGER.debug("%s.url_to_feed entered" % __name__)
res = parse_feed(url)
# minor kluge here
parsed_feed = res['feed']
parsed_feed['etag'] = getattr(res, 'etag', None)
parsed_feed['last_modified'] = getattr(res, 'last_modified', None)
# pprintparsed_feed)
# some minor validation...
for required_key in ['title',]:
if required_key not in parsed_feed:
return None
feed = add_feed(parsed_feed, url)
# ok, now add the items
feed_items = update_items(feed)
return feed
def update_items(feed, force=False):
"""
might be an instance method?
"""
if feed.needs_update or force:
LOGGER.debug("%s.update_items entered" % __name__)
items = parse_feed(feed.feed_url, etag=feed.etag, modified=feed.last_update)['items']
res = add_items(feed, items)
else:
print("Skipping (update not needed)")
res = 0
return res
def get_feed_image(parsed_feed):
"""
Figures out how this precious little snowflake defines its image
returns it as a django File object
"""
image = None
for key in ['image']:
if hasattr(parsed_feed, key):
image_struct = parsed_feed[key]
LOGGER.info("Found image key %s: %s" % (key, image_struct))
image = get_cached_image(image_struct.url)
if image:
continue
return image
def get_feed_icon(parsed_feed):
if hasattr(parsed_feed, 'icon'):
image_url = parsed_feed['icon']
LOGGER.info("Found icon: %s" % icon_url)
def add_feed(parsed_feed, feed_url):
"""
Takes a feed dictionary, and adds it to the database
if exists, returns the original
"""
LOGGER.debug("%s.add_feed entered" % __name__)
LOGGER.debug("feed_url: %s" % feed_url)
LOGGER.debug("feed: \n%s" % pformat(parsed_feed))
if 'links' in parsed_feed:
for link in parsed_feed['links']:
if 'self' in list(link.values()):
# self-declared feed_url takes precendence
# FIXME: let's see how that works out in practice...
feed_url = link['href']
# else:
# # pprintparsed_feed)
# raise ValidationError
# check if this is a known feed
# if 'title' not in parsed_feed:
# # pprintparsed_feed)
#
try:
f = Feed.objects.get(feed_url=feed_url)
f.etag = parsed_feed['etag']
f.last_modified = parsed_feed['last_modified']
f.save()
except Feed.DoesNotExist:
# needs to be added
if parsed_feed.get('updated', None):
updated = datetime.fromtimestamp(mktime(parsed_feed['updated_parsed']))
else:
updated = datetime.now()
struct = {
'feed_title': parsed_feed['title'],
'language': parsed_feed.get('language', 'en'),
'copyright': parsed_feed.get('copyright',''),
'generator': parsed_feed.get('generator', ''),
'link': parsed_feed['link'],
'last_update': datetime.now(),
'pubDate': updated,
'lastBuildDate': updated,
'skipHours': parsed_feed.get('skipHours', 1),
'feed_url' : feed_url,
'etag' : parsed_feed['etag'],
}
struct['image'] = get_feed_image(parsed_feed)
LOGGER.debug(struct)
f = Feed(**struct)
f.save()
return f
def add_items(feed, parsed_items):
# feed: Feed object
# parsed_items: list of items from the feedparser
count = 0
for item in parsed_items:
# check of this has already been indexed
# pprintitem['id'])
# pprint(item)
if not id in item:
item['id'] = item['link']
pubDate = localtime()
try:
FeedItem.objects.get(guid=item['id'])
continue
# except KeyError as e:
# # item doesn't have a guid, for shame!
# item['id'] = item['link']
except FeedItem.DoesNotExist:
# figure out the pub_date
if 'published_parsed' in item:
pubDate = item['published_parsed']
elif item.has_key('updated_parsed'):
pubDate = item['updated_parsed']
pubDate = datetime.fromtimestamp(mktime(pubDate))
# ok, it's new
# need to figure out content
# pprintitem)
# if not item.has_key('description'):
# print "DOH!"
# LOGGER.debug('description empty, look for content')
# description = item['content'][0]['value'] # wordpress weirdness
# else:
# description = item['description']
description = item['description']
struct = {
'source': feed,
'guid': item['id'],
'pubDate': pubDate,
'title': item.get('title', 'Untitled'),
'description': description,
'link': item['link'],
'author': item.get('author', feed.author),
'comments': item.get('comments',''),
}
# pprintstruct)
i = FeedItem(**struct)
i.save()
count = count + 1
return count
def find_feed(site):
"""
Parses a page, and returns a list of
atom / RSS feeds
"""
parsed_url = urllib.parse.urlparse(site)
if not parsed_url.scheme:
site = 'http://' + site
parsed_url = urllib.parse.urlparse(site)
req = urllib.request.Request(
site,
data=None,
headers={
'User-Agent': FAKE_AGENT
}
)
raw = urllib.request.urlopen(req).read()
result = []
possible_feeds = []
html = soup(raw, features='html.parser')
feed_urls = html.findAll("link", rel="alternate")
for f in feed_urls:
t = f.get("type",None)
if t:
if "rss" in t or "xml" in t:
href = f.get("href",None)
if href:
possible_feeds.append(href)
parsed_url = urllib.parse.urlparse(site)
if not parsed_url.scheme:
parsed_url = urllib.parse.urlparse('http://' + site)
base = parsed_url.scheme+"://"+parsed_url.hostname
atags = html.findAll("a")
for a in atags:
href = a.get("href",None)
if href:
if "xml" in href or "rss" in href or "feed" in href:
possible_feeds.append(base+href)
for url in list(set(possible_feeds)):
f = feedparser.parse(url)
if len(f.entries) > 0:
if url not in result:
result.append(url)
return(result)
def parse_feed(url, etag=None, modified=None):
# use urllib to get the text
d = feedparser.parse(url, etag=etag, modified=modified)
# pprintd)
return d
| 2.46875
| 2
|
0657.GCBA-supermarket_sales.py
|
alphacastio/connectors-gcba
| 1
|
12781040
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import requests
import pandas as pd
from lxml import etree
from bs4 import BeautifulSoup
import datetime
import io
import numpy as np
from alphacast import Alphacast
from dotenv import dotenv_values
API_KEY = dotenv_values(".env").get("API_KEY")
alphacast = Alphacast(API_KEY)
# In[2]:
url1 = "https://www.estadisticaciudad.gob.ar/eyc/wp-content/uploads/2020/10/AC_S_AX05.xlsx"
df1 = pd.read_excel(url1)
df1.columns = df1.iloc[1]
df1 = df1.drop(index=1)
df1 = df1.dropna(subset = ['Bebidas'])
df1 = df1[~df1.iloc[:, 0].astype(str).str.isdigit()]
df1 = df1.drop(df1.columns[[0]], axis=1)
df1.index = pd.date_range(start='1/1/2013', periods=len(df1), freq = "MS")
df1.index.name = "Date"
df1.columns = "Índice a valores constantes Base 2013=100 de ventas en supermercados - " + df1.columns
df1
# In[3]:
url2 = "https://www.estadisticaciudad.gob.ar/eyc/wp-content/uploads/2019/05/AC_S_AX04.xlsx"
df2 = pd.read_excel(url2)
df2.columns = df2.iloc[0]
df2.columns = "Ventas totales en supermercados (miles de pesos) - " + df2.columns
df2 = df2.drop(index=0)
df2 = df2.drop(index=1)
df2 = df2.drop(index=2)
df2 = df2.dropna(subset = ['Ventas totales en supermercados (miles de pesos) - Bebidas'])
df2 = df2[~df2.iloc[:, 0].astype(str).str.isdigit()]
df2 = df2.drop(df2.columns[[0, 1]], axis=1)
df2.index = pd.date_range(start='1/1/2007', periods=len(df2), freq = "MS")
df2.index.name = "Date"
df2 = df2.rename(columns={np.nan: "Ventas totales en supermercados (miles de pesos) - Otros"})
# In[4]:
url3 = "https://www.estadisticaciudad.gob.ar/eyc/wp-content/uploads/2019/05/AC_S_AX02.xlsx"
df3 = pd.read_excel(url3)
df3.columns = df3.iloc[1]
#df3.columns = "Ventas totales en supermercados (miles de pesos) - " + df3.columns
df3 = df3.drop(index=0)
df3 = df3.drop(index=1)
#df3 = df3.drop(index=2)
df3 = df3.dropna(subset = ['Operaciones'])
df3 = df3[~df3.iloc[:, 0].astype(str).str.isdigit()]
df3 = df3.drop(df3.columns[[0, 1]], axis=1)
df3.index = pd.date_range(start='1/1/2007', periods=len(df3), freq = "MS")
df3.index.name = "Date"
#df3 = df3.rename(columns={np.nan: "Ventas totales en supermercados (miles de pesos) - Otros"})
df3
# In[5]:
df4 = df1.merge(df2, right_index = True, left_index=True, how = "right").merge(df3, right_index = True, left_index=True)
# In[6]:
for col in df4.columns:
df4[col] = pd.to_numeric(df4[col], errors="coerce")
# In[7]:
df4["country"] = "CABA"
alphacast.datasets.dataset(657).upload_data_from_df(df4,
deleteMissingFromDB = True, onConflictUpdateDB = True, uploadIndex=True)
| 3.015625
| 3
|
openmdao.gui/src/openmdao/gui/test/functional/files/model_vartree.py
|
swryan/OpenMDAO-Framework
| 0
|
12781041
|
""" This model is used by test_valueeditors to test expand/collapse on
the object and parameter panes.
"""
from openmdao.main.api import Component, Assembly, VariableTree
from openmdao.lib.datatypes.api import Float, Slot
class DumbVT3(VariableTree):
def __init__(self):
super(DumbVT3, self).__init__()
self.add('a', Float(1., units='ft'))
self.add('b', Float(12., units='inch'))
class DumbVT2(VariableTree):
vt3 = Slot(DumbVT3, iotype='in')
def __init__(self):
super(DumbVT2, self).__init__()
self.add('x', Float(-1.))
self.add('y', Float(-2.))
self.add('vt3', DumbVT3())
class DumbVT(VariableTree):
vt2 = Slot(DumbVT2, iotype='in')
def __init__(self):
super(DumbVT, self).__init__()
self.add('vt2', DumbVT2())
self.add('v1', Float(1., desc='vv1'))
self.add('v2', Float(2., desc='vv2'))
class SimpleComp(Component):
cont_in = Slot(DumbVT, iotype='in')
cont_out = Slot(DumbVT, iotype='out')
def __init__(self):
super(SimpleComp, self).__init__()
self.add('cont_in', DumbVT())
self.add('cont_out', DumbVT())
class Topp(Assembly):
def configure(self):
self.add('p1', SimpleComp())
| 1.992188
| 2
|
neural_network.py
|
amrkh97/Arabic-OCR-Using-Python
| 1
|
12781042
|
import numpy as np
import torch
import torch.nn.functional as F
import dataset_creator as DC
from torch import nn
from torch import optim
# import keras
def createNN(_inputSize):
input_size = _inputSize
hidden_sizes = [15,10] # 12 nodes in first hidden layer
output_size = 29 # Number of possible outputs
model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),
nn.ReLU(),
#nn.Dropout(0.2),
#nn.Linear(hidden_sizes[0], hidden_sizes[1]),
#nn.ReLU(),
#nn.Dropout(0.3),
nn.Linear(hidden_sizes[0], output_size))
return model
def convert2tensor(x):
x = torch.FloatTensor(x)
return x
def convert2long(x):
x = torch.LongTensor(x)
return x
def switchLoader(e,it1,it2,it3,it4,it5):
switcher={
0:it1,
1:it2,
2:it3,
3:it4,
4:it5
}
return switcher.get(e,"Invalid Iterator")
def TrainNN(model,t1,t2,t3,t4,t5):
criterion = nn.CrossEntropyLoss()
#criterion = nn.CTCLoss()
#optimizer = optim.SGD(model.parameters(), lr=0.01)
optimizer = optim.Adam(model.parameters(), lr=1e-5)
epochs = 5
print_every = 1000
steps = 0
correct_train = 0
for e in range(epochs):
running_loss = 0
loaderForData = switchLoader(e,t1,t2,t3,t4,t5)
for images, labels in iter(loaderForData):
steps += 1
images = convert2tensor(images)
actual_label = labels
labels = [labels,]
labels = convert2long(labels)
labels = torch.LongTensor(labels)
optimizer.zero_grad() # Clear the gradients as gradients are accumulated
# Forward and backward passes
output = model.forward(images)
output = F.softmax(output, dim=0)
output = output.unsqueeze(dim=0)
loss = criterion(output, labels) # Calculate the loss
loss.backward() # backpropagate to get values of the new weights
optimizer.step() # Take a step to update the newly calculated weights
_, predicted = torch.max(output.data, 1)
correct_train += predicted.eq(labels.data).sum().item()
running_loss += loss.item()
if steps % print_every == 0:
print(predicted)
print(labels.data)
print("Epoch: {}/{}... ".format(e+1, epochs),
"Loss: {:.4f}".format(running_loss/print_every))
print("Ended Epoch.",str(e+1))
#Saving the model after training:
train_accuracy = 100 * correct_train / 5000
print("Train Accuracy on 1000 Elements: {}%".format(train_accuracy))
PATH = 'trained_model.pth'
torch.save(model.state_dict(), PATH)
def TestNN(model,testloader):
images = torch.FloatTensor(testloader[:17])
logits = model.forward(images)
ps = F.softmax(logits, dim=0)
ps = ps.data.numpy().squeeze()
prediction = np.argmax(ps)
print(ps)
D = DC.returnToArabicDictionary()
return D[prediction]
# def PrepareLabels():
def load_checkpoint(filepath):
model = torch.load('trained_model.pth')
return model
| 2.78125
| 3
|
lib/models/__init__.py
|
simonwey/DecoupleNet
| 0
|
12781043
|
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import lib.models.pose_resnet
import lib.models.pose_hrnet
import lib.models.pose_hrnet_se
import lib.models.pose_hrnet_se_lambda
import lib.models.pose_hrnet_se_lambda_visualize
import lib.models.pose_hrnet_multi_task_lambda
import lib.models.pose_resnet_se
import lib.models.pose_resnet_se_lambda
import lib.models.pose_hrnet_SEIgn_lambda
import lib.models.pose_hrnet_ss_lambda
import lib.models.pose_hrnet_decouple_gcn
import lib.models.pose_hrnet_IgnSE12_lambda
import lib.models.pose_hrnet_decouple_cnn
import lib.models.pose_hrnet_decouple_naive
import lib.models.pose_hrnet_decouple_naive_withdp
import lib.models.pose_hrnet_decouple_stupid
import lib.models.pose_hrnet_decouple_ia
import lib.models.pose_hrnet_dcp_skt
import lib.models.pose_hrnet_decomp_align
import lib.models.pose_hrnet_four_arm
import lib.models.pose_hrnet_twohead
import lib.models.pose_cnn_two_arm
import lib.models.pose_hrnet_ori
import lib.models.pose_two_two
import lib.models.pose_two_catt
| 1.789063
| 2
|
1_stroop.py
|
kalenkovich/nengo_stroop
| 0
|
12781044
|
# coding: utf-8
# In[1]:
import nengo
import nengo_spa as spa
import numpy as np
# In[2]:
from matplotlib import pyplot as plt
# In[3]:
#create semantic pointers
words = [ 'CAT', 'BLUE', 'RED']
colors = ['RED', 'BLUE']
fingers = ['INDEX', 'MIDDLE']
D = 16 #we reduced it from 32 cause of capacity of our computers
vocab = spa.Vocabulary(D)
vocab.populate(';'.join(words))
vocab.populate('COLOR; WORD')
vocab.populate(';'.join(fingers))
stimuli = []
for i in range(10):
w = np.random.choice(colors)
c = np.random.choice(colors)
stimuli.append((w,c))
# # No recurrent connections
# In[4]:
model = spa.Network()
with model:
t_stim = 0.5
t_isi = 0.5
def word_func(t):
index = int (t / (t_stim + t_isi))
t = t % (t_stim + t_isi)
if t < t_isi:
return '0'
else:
return stimuli[index%len(stimuli)][0]
def color_func(t):
index = int (t / (t_stim + t_isi))
t = t % (t_stim + t_isi)
if t < t_isi:
return '0'
else:
return stimuli[index%len(stimuli)][1]
stim_w = spa.Transcode(word_func, output_vocab=vocab)
#create node for pre processing color to mimic delay
pre_stim_c = spa.Transcode(color_func, output_vocab=vocab)
stim_c = spa.State(vocab)
#reduced amount of neurons to increase volatility of attention
attention = spa.State(vocab, neurons_per_dimension=10)
spa.sym.WORD * 0.45 + spa.sym.COLOR * 0.55 >> attention
wm = spa.State(vocab)
nengo.Connection(pre_stim_c.output, stim_c.input, synapse=0.3)
#added gain for action selection to be triggered
(spa.sym.COLOR*stim_c+spa.sym.WORD*stim_w)*~attention*2 >> wm
finger = spa.State(vocab)
with spa.ActionSelection():
spa.ifmax( spa.dot(wm, spa.sym.BLUE),
spa.sym.INDEX >> finger)
spa.ifmax(spa.dot(wm, spa.sym.RED),
spa.sym.MIDDLE >> finger)
spa.ifmax(0.5,
spa.semantic_pointer.Zero(D) >> finger)
# In[5]:
with model:
p_input_word = nengo.Probe(stim_w.output)
p_input_color = nengo.Probe(pre_stim_c.output)
p_wm = nengo.Probe(wm.output)
p_finger = nengo.Probe(finger.output)
# In[6]:
with nengo.Simulator(model) as sim:
sim.run(5)
# In[7]:
figuge, axs = plt.subplots(ncols=1, nrows=4, figsize=(10, 10))
axs[0].plot(sim.trange(), spa.similarity(sim.data[p_input_word], vocab))
axs[0].legend(vocab.keys(), loc='right')
axs[1].plot(sim.trange(), spa.similarity(sim.data[p_input_color], vocab))
axs[1].legend(vocab.keys(), loc='right')
axs[2].plot(sim.trange(), spa.similarity(sim.data[p_wm], vocab))
axs[2].legend(vocab.keys(), loc='right')
axs[3].plot(sim.trange(), spa.similarity(sim.data[p_finger], vocab))
axs[3].legend(vocab.keys(), loc='right')
# The delay in processing 'color' vs 'word' was successful. However the model without recurrent wm always responds incorrectly (to 'word'), as it responds to the first input to wm. Thus we decided to add recurrent feedback to the wm nodes, to achive accumulation of evidence.
# # Yes recurrent connections
# In[4]:
model_rec = spa.Network()
with model_rec:
#we changed durations, to avoid intertrial effects (wm overlap)
t_stim = 0.3
t_isi = 0.7
def word_func(t):
index = int (t / (t_stim + t_isi))
t = t % (t_stim + t_isi)
if t < t_isi:
return '0'
else:
return stimuli[index%len(stimuli)][0]
def color_func(t):
#instead of achieving delay via additional node, for better control we present 'color' later than 'word'
t -= 0.1
index = int (t / (t_stim + t_isi))
t = t % (t_stim + t_isi)
if t < t_isi:
return '0'
else:
return stimuli[index%len(stimuli)][1]
stim_w = spa.Transcode(word_func, output_vocab=vocab)
stim_c = spa.Transcode(color_func, output_vocab=vocab)
rec_weight_input = 1
rec_weight_feedback = 0.5
wm_w = spa.State(vocab, feedback=rec_weight_feedback)
wm_c = spa.State(vocab, feedback=rec_weight_feedback)
stim_w * rec_weight_input >> wm_w
stim_c * rec_weight_input >> wm_c
attention = spa.State(vocab, neurons_per_dimension=10)
#we reduced attentional difference to give higher chance to'word'
spa.sym.WORD * 0.48 + spa.sym.COLOR * 0.52 >> attention
wm = spa.State(vocab, feedback=rec_weight_feedback)
(spa.sym.COLOR * wm_c + spa.sym.WORD * wm_w) * ~attention * rec_weight_input * 2 >> wm
finger = spa.State(vocab)
with spa.ActionSelection():
spa.ifmax( spa.dot(wm, spa.sym.BLUE),
spa.sym.INDEX >> finger)
spa.ifmax(spa.dot(wm, spa.sym.RED),
spa.sym.MIDDLE >> finger)
spa.ifmax(0.5,
spa.semantic_pointer.Zero(D) >> finger)
# In[5]:
with model_rec:
p_input_word = nengo.Probe(stim_w.output)
p_input_color = nengo.Probe(stim_c.output)
p_wm_word = nengo.Probe(wm_w.output)
p_wm_color = nengo.Probe(wm_c.output)
p_wm = nengo.Probe(wm.output)
p_finger = nengo.Probe(finger.output)
# In[11]:
stimuli = []
for i in range(10):
w = np.random.choice(colors)
c = np.random.choice(colors)
stimuli.append((w,c))
# In[12]:
with nengo.Simulator(model_rec) as sim_rec:
sim_rec.run(10)
# In[13]:
figuge, axs = plt.subplots(ncols=1, nrows=6, figsize=(10, 10))
axs[0].plot(sim_rec.trange(), spa.similarity(sim_rec.data[p_input_word], vocab))
axs[0].legend(vocab.keys(), loc='right')
axs[1].plot(sim_rec.trange(), spa.similarity(sim_rec.data[p_input_color], vocab))
axs[1].legend(vocab.keys(), loc='right')
axs[2].plot(sim_rec.trange(), spa.similarity(sim_rec.data[p_wm_word], vocab))
axs[2].legend(vocab.keys(), loc='right')
axs[3].plot(sim_rec.trange(), spa.similarity(sim_rec.data[p_wm_color], vocab))
axs[3].legend(vocab.keys(), loc='right')
axs[4].plot(sim_rec.trange(), spa.similarity(sim_rec.data[p_wm], vocab))
axs[4].legend(vocab.keys(), loc='right')
axs[5].plot(sim_rec.trange(), spa.similarity(sim_rec.data[p_finger], vocab))
axs[5].legend(vocab.keys(), loc='right')
# This is the closest result that shows mistakes (at least we can interpret it that way): 4,5,7 & 8 timepoints where both fingers are selected (wrong finger is the first one)
| 2.5625
| 3
|
modules/fucking_dinner.py
|
CHCMATT/Code
| 15
|
12781045
|
import re
from util.hook import *
from util import web
uri = 'http://www.whatthefuckshouldimakefordinner.com'
re_mark = re.compile(r'<dt><a href="(.*?)" target="_blank">(.*?)</a></dt>')
@hook(cmds=['fucking_dinner', 'fd', 'dinner'], priority='low')
def dinner(code, input):
"""fd -- WHAT DO YOU WANT FOR FUCKING DINNER?"""
err = '{red}EAT LEFT OVER PIZZA FOR ALL I CARE.'
try:
data = web.text(uri)
results = re_mark.findall(data)
if not results:
return code.say(err)
url, food = results[0][0], web.escape(results[0][1])
code.say('WHY DON\'T YOU EAT SOME FUCKING {b}%s{b}. HERE IS THE RECIPE: %s' % (
food.upper(), url))
except:
return code.say(err)
| 2.59375
| 3
|
chariot/transformer/text/lower_normalizer.py
|
Y-Kuro-u/chariot
| 134
|
12781046
|
<reponame>Y-Kuro-u/chariot<gh_stars>100-1000
from chariot.transformer.text.base import TextNormalizer
class LowerNormalizer(TextNormalizer):
def __init__(self, copy=True):
super().__init__(copy)
def apply(self, text):
return text.lower()
| 2.265625
| 2
|
edf/scripts/test.py
|
ktfm2/Kai_updates
| 1
|
12781047
|
<reponame>ktfm2/Kai_updates
import aa_py
import sys
sys.path.append('../py/')
import edf_py
import json
import numpy as np
import matplotlib.pyplot as plt
try:
import seaborn
except:
pass
with open('../config.json') as data_file:
data = json.load(data_file)
pot = aa_py.GalPot(str(data['potential']))
acts = aa_py.Actions_AxisymmetricFudge_InterpTables(pot,str(data['actions']),False,0.2,20.)
edf = edf_py.edf(pot,acts)
edf.readParams(str(data['edf_params']))
X = np.array([8.,2.,1.,0.1,0.1,0.1])
age = 1.
metal = -0.5
vr = np.linspace(-100.,100.,1000)
R = np.sqrt(X[0]**2+X[1]**2)
ct,st = X[0]/R, X[1]/R
vp=220.
fvr = np.array([edf(np.append(X[:3],np.array([v*ct-vp*st,v*st+vp*ct,X[-1]])),
age,metal) for v in vr])
fvp = np.array([edf(np.append(X[:3],np.array([-(v+vp)*st,(vp+v)*ct,X[-1]])),
age,metal) for v in vr])
fvz = np.array([edf(np.append(X[:3],np.array([-vp*st,vp*ct,v])),
age,metal) for v in vr])
plt.plot(vr,fvr,label=r'$v_R$')
plt.plot(vr,fvp,label=r'$v_\phi$')
plt.plot(vr,fvz,label=r'$v_z$')
plt.xlabel(r'$v_r,v_\phi-220,v_z/\,\mathrm{km\,s}^{-1}$')
plt.ylabel(r'$f(v_i)$')
plt.legend()
plt.show()
| 1.984375
| 2
|
project/apps/blog/tests/test_models.py
|
slaily/pypublisher
| 0
|
12781048
|
from django.test import SimpleTestCase
from project.apps.blog import models
class AuthorTest(SimpleTestCase):
def test_str_dunder(self):
author = models.Author(id=1)
author_dunder_str_format = '<Author: ID - {id}>'.format(id=author.id)
self.assertMultiLineEqual(
author.__str__(),
author_dunder_str_format
)
class ArticleTest(SimpleTestCase):
def test_str_dunder(self):
article = models.Article(id=1)
article_dunder_str_format = '<Article: ID - {id}>'.format(id=article.id)
self.assertMultiLineEqual(
article.__str__(),
article_dunder_str_format
)
| 2.8125
| 3
|
openmdao.gui/src/openmdao/gui/test/functional/pageobjects/dataflow.py
|
swryan/OpenMDAO-Framework
| 0
|
12781049
|
<reponame>swryan/OpenMDAO-Framework
import logging
import time
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.common.exceptions import StaleElementReferenceException
from basepageobject import BasePageObject, TMO
from elements import GenericElement, ButtonElement, TextElement
from component import ComponentPage, DriverPage, PropertiesPage, AssemblyPage
from connections import ConnectionsPage
class DataflowFigure(BasePageObject):
""" Represents elements within a dataflow figure. """
name = TextElement((By.CLASS_NAME, 'DataflowFigureHeader'))
top_left = GenericElement((By.CLASS_NAME, 'DataflowFigureTopLeft'))
header = GenericElement((By.CLASS_NAME, 'DataflowFigureHeader'))
top_right = ButtonElement((By.CLASS_NAME, 'DataflowFigureTopRight'))
content_area = GenericElement((By.CLASS_NAME, 'DataflowFigureContentArea'))
bottom_left = GenericElement((By.CLASS_NAME, 'DataflowFigureBottomLeft'))
bottom_right = GenericElement((By.CLASS_NAME, 'DataflowFigureBottomRight'))
footer = GenericElement((By.CLASS_NAME, 'DataflowFigureFooter'))
# Context menu.
edit_button = ButtonElement((By.XPATH, "../div/a[text()='Edit']"))
properties_button = ButtonElement((By.XPATH, "../div/a[text()='Properties']"))
run_button = ButtonElement((By.XPATH, "../div/a[text()='Run']"))
connections_button = ButtonElement((By.XPATH, "../div/a[text()='Edit Data Connections']"))
show_dataflows = ButtonElement((By.XPATH, "../div/a[text()='Show Data Connections']"))
hide_dataflows = ButtonElement((By.XPATH, "../div/a[text()='Hide Data Connections']"))
show_driverflows = ButtonElement((By.XPATH, "../div/a[text()='Show Driver Connections']"))
hide_driverflows = ButtonElement((By.XPATH, "../div/a[text()='Hide Driver Connections']"))
disconnect_button = ButtonElement((By.XPATH, "../div/a[text()='Disconnect']"))
remove_button = ButtonElement((By.XPATH, "../div/a[text()='Remove']"))
# Port context menus.
edit_connections = ButtonElement((By.XPATH, "../div/a[text()='Edit Connections']"))
edit_passthroughs = ButtonElement((By.XPATH, "../div/a[text()='Edit Passthroughs']"))
edit_driver = ButtonElement((By.XPATH, "../div/a[text()='Edit Driver']"))
def __init__(self, browser, port, root):
super(DataflowFigure, self).__init__(browser, port, root)
self._pathname = None
@property
def pathname(self):
""" Pathname of this component. """
if self._pathname is None:
# Much slower than if explicitly set.
parent = self('header').find_element_by_xpath('..')
fig_id = parent.get_attribute('id')
script = "return jQuery('#" + fig_id + "').data('pathname')"
self._pathname = self.browser.execute_script(script)
return self._pathname
@pathname.setter
def pathname(self, path):
self._pathname = path
@property
def input_port(self):
""" Input port element, `pathname` must be set previously. """
return self.root.find_element_by_id(self.pathname + '-input')
@property
def output_port(self):
""" Output port element, `pathname` must be set previously. """
return self.root.find_element_by_id(self.pathname + '-output')
@property
def border(self):
""" Figure border property. """
return self.root.value_of_css_property('border')
@property
def background_color(self):
""" Figure background-color property. """
return self.root.value_of_css_property('background-color')
@property
def coords(self):
""" Figure (left, top). """
left = self.root.value_of_css_property('left')
left = int(left[0:-2]) # Drop 'px'.
top = self.root.value_of_css_property('top')
top = int(top[0:-2]) # Drop 'px'.
return (left, top)
def editor_page(self, double_click=True, base_type='Component'):
""" Return :class:`ComponentPage` for this component. """
chain = ActionChains(self.browser)
if double_click:
chain.double_click(self.root).perform()
else:
self._context_click('edit_button')
editor_id = 'CE-%s' % self.pathname.replace('.', '-')
if base_type == 'Assembly':
return AssemblyPage(self.browser, self.port, (By.ID, editor_id))
elif base_type == 'Driver':
return DriverPage(self.browser, self.port, (By.ID, editor_id))
else:
return ComponentPage(self.browser, self.port, (By.ID, editor_id))
def properties_page(self):
""" Return :class:`PropertiesPage` for this component. """
self._context_click('properties_button')
props_id = '%s-properties' % self.pathname.replace('.', '-')
return PropertiesPage(self.browser, self.port, (By.ID, props_id))
def connections_page(self):
""" Return :class:`ConnectionsPage` for this component. """
self._context_click('connections_button')
frame_id = 'ConnectionsFrame-%s' % self.pathname.replace('.', '-')
return ConnectionsPage(self.browser, self.port, (By.ID, frame_id))
def input_edit_driver(self, driver_pathname):
""" Return :class:`DriverPage` associated with the input port. """
chain = ActionChains(self.browser)
chain.context_click(self.input_port).perform()
time.sleep(0.5)
self('edit_driver').click()
editor_id = 'CE-%s' % driver_pathname.replace('.', '-')
return DriverPage(self.browser, self.port, (By.ID, editor_id))
def output_edit_driver(self, driver_pathname):
""" Return :class:`DriverPage` associated with the output port. """
# FIXME: can't get response from context click.
chain = ActionChains(self.browser)
chain.context_click(self.output_port).perform()
time.sleep(0.5)
self('edit_driver').click()
editor_id = 'CE-%s' % driver_pathname.replace('.', '-')
return DriverPage(self.browser, self.port, (By.ID, editor_id))
def run(self):
""" Run this component. """
self._context_click('run_button')
def disconnect(self):
""" Disconnect this component. """
self._context_click('disconnect_button')
def remove(self):
""" Remove this component. """
self._context_click('remove_button')
def display_dataflows(self, show):
""" Show/hide data flows. """
if show:
self._context_click('show_dataflows')
else:
self._context_click('hide_dataflows')
def display_driverflows(self, show):
""" Show/hide driver flows. """
if show:
self._context_click('show_driverflows')
else:
self._context_click('hide_driverflows')
def _context_click(self, name):
""" Display context menu. """
chain = ActionChains(self.browser)
# Default is centered which causes problems in some contexts.
# Offset is apparently limited, (20, 20) had problems.
chain.move_to_element_with_offset(self.root, 15, 15)
chain.context_click(None)
chain.perform()
time.sleep(0.5)
self(name).click()
def find_dataflow_figures(page):
""" Return dataflow figure elements in `page`. """
root = page.root or page.browser
time.sleep(0.5) # Pause for stable display.
return root.find_elements_by_class_name('DataflowFigure')
def find_dataflow_figure(page, name, prefix=None, retries=5):
""" Return :class:`DataflowFigure` for `name` in `page`. """
root = page.root or page.browser
for retry in range(retries):
time.sleep(0.5) # Pause for stable display.
figures = root.find_elements_by_class_name('DataflowFigure')
if not figures:
continue
fig_name = None
for figure in figures:
page.browser.implicitly_wait(1)
try:
header = figure.find_elements_by_class_name('DataflowFigureHeader')
if len(header) == 0:
# the outermost figure (globals) has no header or name
if name == '' and prefix is None:
fig = DataflowFigure(page.browser, page.port, figure)
return fig
else:
continue
fig_name = figure.find_elements_by_class_name('DataflowFigureHeader')[0].text
except StaleElementReferenceException:
logging.warning('get_dataflow_figure:'
' StaleElementReferenceException')
else:
if fig_name == name:
fig = DataflowFigure(page.browser, page.port, figure)
if prefix is not None:
if prefix:
fig.pathname = '%s.%s' % (prefix, name)
else:
fig.pathname = name
return fig
finally:
page.browser.implicitly_wait(TMO)
return None
def find_dataflow_component_names(page):
""" Return names of dataflow components in `page`. """
root = page.root or page.browser
names = []
# Assume there should be at least 1, wait for number to not change.
n_found = 0
for retry in range(10):
time.sleep(0.5) # Pause for stable display.
dataflow_component_headers = \
root.find_elements_by_class_name('DataflowFigureHeader')
if dataflow_component_headers:
n_headers = len(dataflow_component_headers)
if n_found:
if n_headers == n_found:
return [h.text for h in dataflow_component_headers]
n_found = n_headers
else:
logging.error('get_dataflow_component_names: n_found %s', n_found)
return names
#for i in range(len(dataflow_component_headers)):
#for retry in range(10): # This has had issues...
#try:
#names.append(root.find_elements_by_class_name('DataflowFigureHeader')[i].text)
#except StaleElementReferenceException:
#logging.warning('get_dataflow_component_names:'
#' StaleElementReferenceException')
#except IndexError:
#logging.warning('get_dataflow_component_names:'
#' IndexError for i=%s, headers=%s',
#i, len(dataflow_component_headers))
#else:
#break
#if len(names) != len(dataflow_component_headers):
#logging.error('get_dataflow_component_names:'
#' expecting %d names, got %s',
#len(dataflow_component_headers), names)
#return names
| 2.0625
| 2
|
CCPD/lista-1/questao1.py
|
Dowsley/CESAR2
| 2
|
12781050
|
from multiprocessing import Pool, cpu_count
import time
flatten = lambda t: [item for sublist in t for item in sublist]
def load_list(f):
pre_list = f.read().splitlines()
return [int(e) for e in pre_list]
def dump_list(f, num_list):
for i in num_list:
f.write("%d\n" % i)
def split_list(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def remove_duplicates(segment):
seen = set()
seen_add = seen.add
return [e for e in segment if not (e in seen or seen_add(e))]
def task(num_list):
num_cpus = cpu_count()
with Pool(num_cpus) as pool:
segments = split_list(num_list, num_cpus)
return flatten(pool.map_async(remove_duplicates, segments).get())
if __name__ == '__main__':
with open('out.txt', 'r') as input_file:
num_list = load_list(input_file)
# Test with multithreading
start = time.time()
result = remove_duplicates(task(num_list))
end = time.time()
print(f"With multithreading: {(end - start) * 1000} milisseconds")
with open('result.txt', 'w') as output_file:
dump_list(output_file, result)
# Test without multithreading
start = time.time()
result = remove_duplicates(num_list)
end = time.time()
print(f"Without multithreading: {(end - start) * 1000} milisseconds")
| 3.203125
| 3
|