hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0d2fbfa96bb0ce534111ac7e21349a1c716d99fa | 637 | py | Python | PyOpenWorm/quantity.py | nheffelman/pyopdata | 5cc3042b004f167dbf18acc119474ea48b47810d | [
"MIT"
] | null | null | null | PyOpenWorm/quantity.py | nheffelman/pyopdata | 5cc3042b004f167dbf18acc119474ea48b47810d | [
"MIT"
] | null | null | null | PyOpenWorm/quantity.py | nheffelman/pyopdata | 5cc3042b004f167dbf18acc119474ea48b47810d | [
"MIT"
] | null | null | null | import pint as Q
class Quantity:
""" Enables the use of measurement units in our statements
"""
ur = Q.UnitRegistry()
@classmethod
def parse(self, s):
q = self.ur.Quantity(s)
my_q = Quantity(0,"mL")
my_q._quant = q
return my_q
def __init__(self, value, unit):
self._quant = self.ur.Quantity(value,unit)
@property
def unit(self):
return str(self._quant.units)
@property
def value(self):
return self._quant.magnitude
def serialize(self):
return (str(self.value) + self.unit)
#alias call to string
__str__ = serialize
| 21.233333 | 62 | 0.601256 | 618 | 0.970173 | 0 | 0 | 278 | 0.436421 | 0 | 0 | 91 | 0.142857 |
0d3113fefbcc2c8c41ba544c0d489f999a22dfe6 | 4,752 | py | Python | rhapsody_web/models.py | wbadart/rhapsody | 433a376b4a3881d4b12bebbbbdf08194c62fa8a2 | [
"MIT"
] | null | null | null | rhapsody_web/models.py | wbadart/rhapsody | 433a376b4a3881d4b12bebbbbdf08194c62fa8a2 | [
"MIT"
] | 12 | 2018-03-21T02:26:45.000Z | 2018-05-09T07:12:55.000Z | rhapsody_web/models.py | wbadart/rhapsody | 433a376b4a3881d4b12bebbbbdf08194c62fa8a2 | [
"MIT"
] | null | null | null | from itertools import chain
from django.db import models
from random import choices
class Node(object):
def neighbors(self):
raise NotImplementedError
def graph(self, depth=1):
if not depth:
return {self: set()}
elif depth == 1:
return {self: set(self.neighbors())}
else:
init = self.graph(depth=1)
for n in self.neighbors():
init.update(n.graph(depth - 1))
return init
def edges(self, depth=1):
self.g = self.graph(depth)
for vertex, edgelist in self.g.items():
for edge in edgelist:
yield (vertex, edge)
class Artist(models.Model, Node):
spotify_id = models.CharField(max_length=22, primary_key=True)
popularity = models.IntegerField(null=True)
name = models.CharField(max_length=30, default="")
# albums - ManyToManyField included in Album
# songs - ManyToManyField included in Song
# concerts = models.ManyToManyField(Concert)
def __str__(self):
return self.name + " (" + self.spotify_id + ")"
def neighbors(self):
#albums = (a for a in Album.objects.all() if self in a.artists.all())
#songs = Song.objects.filter(artist=self)
#return chain(albums, songs)
adj_songs = Song.objects.filter(artist=self)
if len(adj_songs) > 4:
return choices(Song.objects.filter(artist=self), k=4)
else:
return adj_songs
class Genre(models.Model):
name = models.CharField(max_length=30, primary_key=True)
artists = models.ManyToManyField(Artist)
# albums - ManyToManyField included in Album
# songs -
# In the spotify data, individual songs don't have genre
# data. We could extrapolate this from the album or artist genre
# data later though
class Album(models.Model, Node):
ALBUM = "A"
SINGLE = "S"
COMPILATION = "C"
ALBUM_TYPE_CHOICES = (
(ALBUM, "album"),
(SINGLE, "single"),
(COMPILATION, "compilation")
)
album_type = models.CharField(
max_length=1, choices=ALBUM_TYPE_CHOICES, default=ALBUM)
artists = models.ManyToManyField(Artist)
spotify_id = models.CharField(max_length=22, primary_key=True)
genres = models.ManyToManyField(Genre)
label = models.CharField(max_length=30, default="")
name = models.CharField(max_length=30, default="")
# Note this is going to come in
# as a string from the spotify
# API, so some conversion will
# have to be done
release_date = models.DateField(null=True)
def __str__(self):
return self.name + " (" + self.spotify_id + ")"
def neighbors(self):
songs = choices(Song.objects.filter(album=self), k=4)
return chain(self.artists.all(), songs)
class Song(models.Model, Node):
spotify_id = models.CharField(max_length=22, primary_key=True)
artist = models.ForeignKey(Artist, on_delete=models.CASCADE)
album = models.ForeignKey(Album, null=True, on_delete=models.CASCADE)
title = models.CharField(max_length=30, default="")
name = models.CharField(max_length=30, default="")
def __str__(self):
return self.title + " (" + self.spotify_id + ")"
def neighbors(self):
return [self.artist, self.album]
class Playlist(models.Model):
spotify_id = models.CharField(max_length=22, primary_key=True)
owner = models.ForeignKey('User', null=True, on_delete=models.CASCADE)
songs = models.ManyToManyField(Song)
collaborative = models.BooleanField(default=False)
description = models.CharField(max_length=5000, default="")
# followers - see ManyToManyField in User
name = models.CharField(max_length=30, default="")
public = models.BooleanField(default=True)
class RadioStation(models.Model):
pass
class Concert(models.Model):
pass
class User(models.Model):
# abstract = True
username = models.CharField(max_length=30, unique=True)
spotify_id = models.CharField(max_length=22, primary_key=True)
artist = models.ManyToManyField(Artist)
genre = models.ManyToManyField(Genre)
album = models.ManyToManyField(Album)
song = models.ManyToManyField(Song)
playlist_followed = models.ManyToManyField(Playlist)
radio_station = models.ManyToManyField(RadioStation)
friends = models.ForeignKey("self", on_delete=models.SET_NULL, null=True)
class Admin(User):
pass
class Regular(User):
pass
class Song_Graph(models.Model):
song1_id = models.CharField(max_length=22, null=True)
song2_id = models.CharField(max_length=22, null=True)
edge_weight = models.IntegerField(null=True)
class Meta:
unique_together = ("song1_id", "song2_id")
| 31.058824 | 77 | 0.666877 | 4,632 | 0.974747 | 179 | 0.037668 | 0 | 0 | 0 | 0 | 736 | 0.154882 |
0d315a6eab2cc3aa2454ad8e379488130a26267e | 1,076 | py | Python | examples/kddcup2011/track1.py | zenogantner/MML-KDD | 4c66101439d83bdcd15a464bf95c7ae74f1abbed | [
"BSD-3-Clause"
] | 1 | 2021-03-07T15:29:48.000Z | 2021-03-07T15:29:48.000Z | examples/kddcup2011/track1.py | zenogantner/MML-KDD | 4c66101439d83bdcd15a464bf95c7ae74f1abbed | [
"BSD-3-Clause"
] | null | null | null | examples/kddcup2011/track1.py | zenogantner/MML-KDD | 4c66101439d83bdcd15a464bf95c7ae74f1abbed | [
"BSD-3-Clause"
] | 3 | 2015-03-17T20:22:48.000Z | 2019-11-20T06:25:55.000Z | #!/usr/bin/env ipy
import clr
clr.AddReference("MyMediaLite.dll")
clr.AddReference("MyMediaLiteExperimental.dll")
from MyMediaLite import *
train_file = "trainIdx1.firstLines.txt"
validation_file = "validationIdx1.firstLines.txt"
test_file = "testIdx1.firstLines.txt"
# load the data
training_data = IO.KDDCup2011.Ratings.Read(train_file)
validation_data = IO.KDDCup2011.Ratings.Read(validation_file)
test_data = IO.KDDCup2011.Ratings.ReadTest(test_file)
item_relations = IO.KDDCup2011.Items.Read("trackData1.txt", "albumData1.txt", "artistData1.txt", "genreData1.txt", 1);
print item_relations
# set up the recommender
recommender = RatingPrediction.ItemAverage()
recommender.MinRating = 0
recommender.MaxRating = 100
recommender.Ratings = training_data
print "Training ..."
recommender.Train()
print "done."
# measure the accuracy on the validation set
print Eval.RatingEval.Evaluate(recommender, validation_data)
# predict on the test set
print "Predicting ..."
Eval.KDDCup.PredictTrack1(recommender, test_data, "track1-output.txt")
print "done."
| 29.888889 | 118 | 0.77974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 382 | 0.355019 |
0d315c941d5c258d24b3ce3f264e4496447f3b10 | 754 | py | Python | resdk/resources/kb/mapping.py | tristanbrown/resolwe-bio-py | c911defde8a5e7e902ad1adf4f9e480f17002c18 | [
"Apache-2.0"
] | null | null | null | resdk/resources/kb/mapping.py | tristanbrown/resolwe-bio-py | c911defde8a5e7e902ad1adf4f9e480f17002c18 | [
"Apache-2.0"
] | null | null | null | resdk/resources/kb/mapping.py | tristanbrown/resolwe-bio-py | c911defde8a5e7e902ad1adf4f9e480f17002c18 | [
"Apache-2.0"
] | null | null | null | """KB mapping resource."""
from __future__ import absolute_import, division, print_function, unicode_literals
from ..base import BaseResource
class Mapping(BaseResource):
"""Knowledge base Mapping resource."""
endpoint = 'kb.mapping.admin'
query_endpoint = 'kb.mapping.search'
query_method = 'POST'
WRITABLE_FIELDS = ()
UPDATE_PROTECTED_FIELDS = ()
READ_ONLY_FIELDS = ('id', 'relation_type', 'source_db', 'source_id', 'target_db', 'target_id')
def __repr__(self):
"""Format mapping representation."""
# pylint: disable=no-member
return "<Mapping source_db='{}' source_id='{}' target_db='{}' target_id='{}'>".format(
self.source_db, self.source_id, self.target_db, self.target_id)
| 32.782609 | 98 | 0.676393 | 608 | 0.806366 | 0 | 0 | 0 | 0 | 0 | 0 | 304 | 0.403183 |
0d330685451bd90d18d6890bfb83c6bc7d79a1c5 | 1,293 | py | Python | wavshape.py | IngoKl/wavlength | 700b1f14dabd552b2ccf63a1651c469def023468 | [
"MIT"
] | null | null | null | wavshape.py | IngoKl/wavlength | 700b1f14dabd552b2ccf63a1651c469def023468 | [
"MIT"
] | null | null | null | wavshape.py | IngoKl/wavlength | 700b1f14dabd552b2ccf63a1651c469def023468 | [
"MIT"
] | null | null | null | """Scans a given path (folder) for .wav files recursively and plots them all into files."""
from pathlib import Path, PurePath
import matplotlib.pyplot as plt
import numpy as np
from numpy import fft as fft
import click
import scipy.io.wavfile
from wavlength import get_files, plot_wav
def analyze_and_plot(wavfile, output, save_to=False, title=False):
"""Analyze a given wav file based on an amplitude threshold."""
rate = 0
audio_data = 0
if wavfile.is_file():
try:
rate, audio_data = scipy.io.wavfile.read(wavfile.resolve())
plot_wav(rate, audio_data, save_to=save_to, title=title)
except Exception as e:
print(e)
return False
@click.command()
@click.option('--folder', default='.', help='The folder to scan.')
@click.option('--output', default='./plots', help='The folder to plot to..')
def plotting(folder, output):
"""Scan a given folder and plot all wav files."""
files = get_files(Path(folder))
print(f'{len(files)} have been found and will be plotted.')
for wavfile in files:
save_to = str(Path(PurePath(output + '/'), wavfile.name).resolve()) + '.png'
analyze_and_plot(wavfile, output, save_to=save_to, title=wavfile.name)
if __name__ == '__main__':
plotting() | 34.026316 | 91 | 0.673627 | 0 | 0 | 0 | 0 | 535 | 0.413766 | 0 | 0 | 352 | 0.272235 |
0d3413bac5a6aea2f9f7fe0d6f905b5180d47e91 | 456 | py | Python | lib/ocr.py | jabbalaci/Bash-Utils | c6fb115834a221c4aaba8eaa37f650beea45ef29 | [
"MIT"
] | 73 | 2015-03-31T01:12:26.000Z | 2021-07-10T19:45:04.000Z | lib/ocr.py | doc22940/Bash-Utils | c6fb115834a221c4aaba8eaa37f650beea45ef29 | [
"MIT"
] | 2 | 2017-01-06T17:17:42.000Z | 2017-08-23T18:35:55.000Z | lib/ocr.py | doc22940/Bash-Utils | c6fb115834a221c4aaba8eaa37f650beea45ef29 | [
"MIT"
] | 27 | 2015-01-03T18:51:23.000Z | 2020-11-15T11:49:51.000Z | #!/usr/bin/env python3
"""
OCR with the Tesseract engine from Google
this is a wrapper around pytesser (http://code.google.com/p/pytesser/)
"""
import config as cfg
from lib.process import get_simple_cmd_output
def image_file_to_string(fname):
"""Convert an image file to text using OCR."""
cmd = "{tesseract} {fname} stdout".format(
tesseract=cfg.TESSERACT,
fname=fname
)
return get_simple_cmd_output(cmd).rstrip('\n')
| 24 | 70 | 0.701754 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 220 | 0.482456 |
0d34f19c13621e0ed06e9108be40f56911d2c61d | 4,848 | py | Python | geiger-counter/remote-logging.py | SaintGimp/BeagleBoneHardware | 04990a6fbcb7d95b1bbcd4d33e9145f5bfbdd3b2 | [
"MIT"
] | 2 | 2018-07-03T12:34:25.000Z | 2019-02-22T14:44:59.000Z | geiger-counter/remote-logging.py | SaintGimp/BeagleBoneHardware | 04990a6fbcb7d95b1bbcd4d33e9145f5bfbdd3b2 | [
"MIT"
] | null | null | null | geiger-counter/remote-logging.py | SaintGimp/BeagleBoneHardware | 04990a6fbcb7d95b1bbcd4d33e9145f5bfbdd3b2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import gimpbbio.gpio as gpio
import serial
import re
import http.client
import urllib
import threading
import queue
import sys
import datetime
import time
import socket
import logging
import logging.handlers
import argparse
import Adafruit_BMP.BMP085 as BMP085
class MyLogger(object):
def __init__(self, logger, level):
"""Needs a logger and a logger level."""
self.logger = logger
self.level = level
def write(self, message):
# Only log if there is a message (not just a new line)
if message.rstrip() != "":
self.logger.log(self.level, message.rstrip())
parser = argparse.ArgumentParser(description="geiger-counter")
parser.add_argument("-l", "--log", help="file to write log to")
parser.add_argument("key", help="Phant private key")
args = parser.parse_args()
if args.log:
LOG_LEVEL = logging.INFO # Could be e.g. "DEBUG" or "WARNING"
LOG_FILENAME = args.log
logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)
handler = logging.handlers.TimedRotatingFileHandler(LOG_FILENAME, when="midnight", backupCount=14)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
sys.stdout = MyLogger(logger, logging.INFO)
sys.stderr = MyLogger(logger, logging.ERROR)
print("Starting up")
altitude_in_meters = 112
phant_url = 'gimp-phant.azurewebsites.net'
phant_public_key = 'kgkWV69Nqnupn6W9Xbo6'
pressure_samples = []
pressure_sampling_lock = threading.Lock()
queue = queue.Queue()
uart = gpio.uarts.uart1
uart.open()
# We have a quarter-second timeout because if we start reading in
# the middle of a serial message or if a byte is dropped for any
# reason, we'll throw away the partial message and try again
ser = serial.Serial(port = "/dev/ttyO1", baudrate=9600, timeout=0.25)
pressure_sensor = BMP085.BMP085(mode=BMP085.BMP085_ULTRAHIGHRES)
headers = {
"Phant-Private-Key": str(args.key),
'Content-Type': 'application/x-www-form-urlencoded'
}
def sendData():
while True:
body = queue.get()
success = False
while not success:
try:
phantServer = http.client.HTTPConnection(phant_url, timeout=10)
phantServer.request(method="POST", url="/input/" + phant_public_key, body=body, headers=headers)
response = phantServer.getresponse()
response.read()
if response.status == 200:
success = True
print("Logged to server: " + body)
else:
print("Phant server returned status " + str(response.status) + ": " + response.reason)
except (http.client.HTTPException, socket.error) as err:
print("HTTP error: {0}".format(err))
if not success:
time.sleep(5)
print("Retrying...")
def oncePerMinute():
global next_interval_time
while True:
try:
# Sleep for the remainder of the time until the next
# interval, prevents timer drift. The calculated time
# to sleep could be negative if our clock got updated
# by ntptime so just sleep one minute in that case.
next_interval_time += 60
sleep_time = next_interval_time - time.time()
if sleep_time < 0:
sleep_time = 60
time.sleep(sleep_time)
device_time = str(datetime.datetime.now())
current_cpm = cpm
pressure = getPressure()
sea_level_pressure = pressure / pow(1.0 - altitude_in_meters / 44330.0, 5.255)
body = urllib.parse.urlencode({'cpm': current_cpm, 'device_time': device_time, 'pressure': '{0:0.2f}'.format(pressure), 'sea_level_pressure': '{0:0.2f}'.format(sea_level_pressure)})
queue.put_nowait(body)
except:
print("Unexpected onePerMinute error: {0}".format(sys.exc_info()[0]))
else:
print("Queued sample")
def samplePressure():
global pressure_samples
while True:
with pressure_sampling_lock:
pressure_samples.append(pressure_sensor.read_pressure())
def getPressure():
global pressure_samples
with pressure_sampling_lock:
median_pressure = median(pressure_samples)
pressure_samples = []
return median_pressure
def median(number_list):
sorted_list = sorted(number_list)
length = len(sorted_list)
if not length % 2:
return (sorted_list[length // 2] + sorted_list[length // 2 - 1]) / 2.0
else:
return sorted_list[length // 2]
socket.setdefaulttimeout(10)
sendThread = threading.Thread(target = sendData)
sendThread.daemon = True
sendThread.start()
next_interval_time = time.time()
sampleThread = threading.Thread(target = oncePerMinute)
sampleThread.daemon = True
sampleThread.start()
pressureThread = threading.Thread(target = samplePressure)
pressureThread.daemon = True
pressureThread.start()
while True:
bytes = ser.read(36)
if len(bytes) == 36:
try:
line1 = bytes[2:18].decode('ascii')
line2 = bytes[20:36].decode('ascii')
#print(line1 + " " + line2)
cpm = int(re.search(r'CPM:\s*(\d+)', line1).group(1))
except (UnicodeDecodeError):
print("Unicode decoding error!")
| 27.862069 | 184 | 0.72731 | 310 | 0.063944 | 0 | 0 | 0 | 0 | 0 | 0 | 1,122 | 0.231436 |
0d34f30b476694738f42f03846398937a9cf92fc | 852 | py | Python | migrations/versions/0e26a2d71475_.py | Toluwalemi/flask-audiofile-server | 548d90aa8f25a58cd3647f08975e9ac6a679087d | [
"Unlicense"
] | null | null | null | migrations/versions/0e26a2d71475_.py | Toluwalemi/flask-audiofile-server | 548d90aa8f25a58cd3647f08975e9ac6a679087d | [
"Unlicense"
] | null | null | null | migrations/versions/0e26a2d71475_.py | Toluwalemi/flask-audiofile-server | 548d90aa8f25a58cd3647f08975e9ac6a679087d | [
"Unlicense"
] | 1 | 2021-04-07T13:46:22.000Z | 2021-04-07T13:46:22.000Z | """empty message
Revision ID: 0e26a2d71475
Revises:
Create Date: 2021-03-19 00:13:23.019330
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0e26a2d71475'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(None, 'audio_book', ['id'])
op.create_unique_constraint(None, 'podcast', ['id'])
op.create_unique_constraint(None, 'song', ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'song', type_='unique')
op.drop_constraint(None, 'podcast', type_='unique')
op.drop_constraint(None, 'audio_book', type_='unique')
# ### end Alembic commands ###
| 25.818182 | 65 | 0.687793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 424 | 0.497653 |
0d350e50046900dd997c091b0cd94feb9afe2441 | 9,510 | py | Python | 2_functions.py | codernayeem/python-cheat-sheet | ec6fe9f33e9175251df65899cef89f65219b9cb4 | [
"MIT"
] | null | null | null | 2_functions.py | codernayeem/python-cheat-sheet | ec6fe9f33e9175251df65899cef89f65219b9cb4 | [
"MIT"
] | null | null | null | 2_functions.py | codernayeem/python-cheat-sheet | ec6fe9f33e9175251df65899cef89f65219b9cb4 | [
"MIT"
] | null | null | null | # Functions
print("************* Function ***********")
# Simple function without any arguments/parameters
def say_welocme():
return print('Welocme')
# Simple function with arguments/parameters
def say_helo(name, age):
print('Helo', name, age)
# this function returns None
say_helo('Nayeem', 18) # passing args as positional args
say_helo(age=19, name='Sami') # passing args as keyword args (if you mismatch the serial, use keywords)
def check_odd_number(n):
return True if n % 2 else False
if check_odd_number(43):
print(43, " is a odd number")
print("********* Default parameter **********")
# Simple function with a default arguments/parameters
def say_somethings(name, message="Welcome"):
print(message, name)
# Type hint:
print("********* Type hint **********")
def greeting(name: str) -> str:
# Type hints improve IDEs and linters. They make it much easier to statically reason about your code
# The Python runtime does not enforce function and variable type annotations. They can be used by third party tools such as type checkers, IDEs, linters, etc
# here we defined name should be str and a str will be returned
return 'Hello ' + name
greeting("Nayeem")
# scope
print("************ Scope *************")
parent_name = "Anything" # this is a global variable
def show_parent1():
print(parent_name) # this will print the global variable
def show_parent2():
parent_name = "Lovely" # this will not change global variable. it will create a new local variable
print(parent_name) # print local variable
def show_parent3():
# we can use global variable in function
# but cannot modify them directly
# TO modify:
# method 1:
global parent_name
parent_name = "Something" # this will change the global variable
print(parent_name)
# method 2:
globals()['parent_name'] = "Something_Nothing" # this will change the global variable
print(globals()['parent_name'])
def show_parent4(parent_name):
print(parent_name) # this parent_name is a local variable
# to use the global variable here
print(globals()['parent_name']) # this will print the global variable, not the local one
# A variable can not be both : parameter and global
# So you can not do that here:
# global parent_name
# print(parent_name)
show_parent1()
show_parent2()
show_parent3()
show_parent4("Long Lasting")
l1 = [56, 87, 89, 45, 57]
d1 = {'Karim': 50, 'Rafiq': 90, 'Sabbir': 60}
# Lambda function
print("************ Lambda function *************")
# lambda function is just a one line simple anonymous function.
# It's defination ==> lambda parameter_list: expression
# lambda function is used when we need a function once and as a argument to another function
print(min(d1.items(), key=lambda item: item[1])) # returns the smallest element
# Python built-in functions/methods
print("************ Some Built-in functions *************")
print(len(l1)) # returns the length of that iterable
print(sum(l1)) # return the sum of an iterable
print(max(l1)) # returns the biggext element
print(min(l1)) # returns the smallest element
print(max(d1, key=lambda k: d1[k])) # returns the biggext element
print(min(d1.items(), key=lambda item: item[1])) # returns the smallest element
print(all([0, 1, 5])) # returns True if all the elements is True, otherwise False
print(any([0, 1, 5])) # returns True if any of the elements is True, otherwise False
print(repr('hi')) # call __repr__() for that object. Represent object
print(id(l1)) # returns a unique integer number which represents identity
print(type(56)) # returns the class type of that object
print(dir(567)) # Returns a list of the specified object's properties and methods
print(ord('A')) # 65 : Return the Unicode code point for a one-character string
print(chr(65)) # 'A' : Return a Unicode string of one character with ordina
print(abs(-62)) # 62 : Return a absolute value of a number
eval('print("hi")') # Evaluates and executes an expression
print(eval('(58*9)+3**2')) # Evaluates and executes an expression
print("************ All Built-in functions *************")
# abs() Returns the absolute value of a number
# all() Returns True if all items in an iterable object are true
# any() Returns True if any item in an iterable object is true
# ascii() Returns a readable version of an object. Replaces none-ascii characters with escape character
# bin() Returns the binary version of a number
# bool() Returns the boolean value of the specified object
# bytearray() Returns an array of bytes
# bytes() Returns a bytes object
# callable() Returns True if the specified object is callable, otherwise False
# chr() Returns a character from the specified Unicode code.
# classmethod() Converts a method into a class method
# compile() Returns the specified source as an object, ready to be executed
# complex() Returns a complex number
# delattr() Deletes the specified attribute (property or method) from the specified object
# dict() Returns a dictionary (Array)
# dir() Returns a list of the specified object's properties and methods
# divmod() Returns the quotient and the remainder when argument1 is divided by argument2
# enumerate() Takes a collection (e.g. a tuple) and returns it as an enumerate object
# eval() Evaluates and executes an expression
# exec() Executes the specified code (or object)
# filter() Use a filter function to exclude items in an iterable object
# float() Returns a floating point number
# format() Formats a specified value
# frozenset() Returns a frozenset object
# getattr() Returns the value of the specified attribute (property or method)
# globals() Returns the current global symbol table as a dictionary
# hasattr() Returns True if the specified object has the specified attribute (property/method)
# hash() Returns the hash value of a specified object
# help() Executes the built-in help system
# hex() Converts a number into a hexadecimal value
# id() Returns the id of an object
# input() Allowing user input
# int() Returns an integer number
# isinstance() Returns True if a specified object is an instance of a specified object
# issubclass() Returns True if a specified class is a subclass of a specified object
# iter() Returns an iterator object
# len() Returns the length of an object
# list() Returns a list
# locals() Returns an updated dictionary of the current local symbol table
# map() Returns the specified iterator with the specified function applied to each item
# max() Returns the largest item in an iterable
# memoryview() Returns a memory view object
# min() Returns the smallest item in an iterable
# next() Returns the next item in an iterable
# object() Returns a new object
# oct() Converts a number into an octal
# open() Opens a file and returns a file object
# ord() Convert an integer representing the Unicode of the specified character
# pow() Returns the value of x to the power of y
# print() Prints to the standard output device
# property() Gets, sets, deletes a property
# range() Returns a sequence of numbers, starting from 0 and increments by 1 (by default)
# repr() Returns a readable version of an object
# reversed() Returns a reversed iterator
# round() Rounds a numbers
# set() Returns a new set object
# setattr() Sets an attribute (property/method) of an object
# slice() Returns a slice object
# sorted() Returns a sorted list
# @staticmethod() Converts a method into a static method
# str() Returns a string object
# sum() Sums the items of an iterator
# super() Returns an object that represents the parent class
# tuple() Returns a tuple
# type() Returns the type of an object
# vars() Returns the __dict__ property of an object
# zip() Returns an iterator, from two or more iterators
# Decorators
print('*********** Decorators ************')
from functools import wraps
def star(func):
def inner(*args, **kwargs):
print("*" * 30)
func(*args, **kwargs)
print("*" * 30)
return inner
@star
def printer1(msg):
print(msg)
def percent(func):
def inner(*args, **kwargs):
print("%" * 30)
func(*args, **kwargs)
print("%" * 30)
return inner
@star
@percent
def printer2(msg):
print(msg)
printer1("Hello")
printer2("Hello")
# Function caching
print('*********** Function caching ************')
import time
from functools import lru_cache
@lru_cache(maxsize=32)
def some_work(n):
time.sleep(3)
return n * 2
print('Running work')
some_work(5)
print('Calling again ..')
some_work(9) # tihs time, this run immedietly
print('finished')
# Coroutines
print('*********** Coroutines ************')
import time
def searcher():
time.sleep(3)
book = "Tihs is ok"
while True:
text = (yield) # this means its a Coroutine function
if text in book:
print(f'"{text}" found')
else:
print(f'"{text}" not found')
search = searcher()
next(search) # this runs until that while loop
search.send('ok')
print('Going for next')
search.send('okk')
print('Going for next')
search.send('is')
print('Finished')
search.close()
| 35.222222 | 161 | 0.674238 | 0 | 0 | 252 | 0.026498 | 162 | 0.017035 | 0 | 0 | 7,141 | 0.750894 |
0d35cf0c049dbda0c7224db51ed6f0b36f88de1d | 3,763 | py | Python | Learning Scripts/helper.py | n3urovirtual/EyeTracking_Experiment | 00a534540a524db8606d54e33ebc43de49a959ac | [
"MIT"
] | 2 | 2021-11-18T16:59:43.000Z | 2022-02-11T18:25:03.000Z | Learning Scripts/helper.py | n3urovirtual/EyeTracking_Experiment | 00a534540a524db8606d54e33ebc43de49a959ac | [
"MIT"
] | null | null | null | Learning Scripts/helper.py | n3urovirtual/EyeTracking_Experiment | 00a534540a524db8606d54e33ebc43de49a959ac | [
"MIT"
] | null | null | null | screen_resX=1920
screen_resY=1080
img_id=['1.JPG_HIGH_',
'2.JPG_HIGH_',
'7.JPG_HIGH_',
'12.JPG_HIGH_',
'13.JPG_HIGH_',
'15.JPG_HIGH_',
'19.JPG_HIGH_',
'25.JPG_HIGH_',
'27.JPG_HIGH_',
'29.JPG_HIGH_',
'41.JPG_HIGH_',
'42.JPG_HIGH_',
'43.JPG_HIGH_',
'44.JPG_HIGH_',
'48.JPG_HIGH_',
'49.JPG_HIGH_',
'51.JPG_HIGH_',
'54.JPG_HIGH_',
'55.JPG_HIGH_',
'59.JPG_HIGH_',
'61.JPG_HIGH_',
'64.JPG_HIGH_',
'67.JPG_HIGH_',
'74.JPG_HIGH_',
'76.JPG_HIGH_',
'77.JPG_HIGH_',
'84.JPG_HIGH_',
'87.JPG_HIGH_',
'88.JPG_HIGH_',
'91.JPG_HIGH_',
'94.JPG_HIGH_',
'95.JPG_HIGH_',
'100.JPG_HIGH_',
'101.JPG_HIGH_',
'112.JPG_HIGH_',
'113.JPG_HIGH_',
'3.JPG_LOW_',
'6.JPG_LOW_',
'10.JPG_LOW_',
'17.JPG_LOW_',
'21.JPG_LOW_',
'23.JPG_LOW_',
'28.JPG_LOW_',
'33.JPG_LOW_',
'35.JPG_LOW_',
'38.JPG_LOW_',
'39.JPG_LOW_',
'40.JPG_LOW_',
'46.JPG_LOW_',
'50.JPG_LOW_',
'52.JPG_LOW_',
'58.JPG_LOW_',
'60.JPG_LOW_',
'62.JPG_LOW_',
'63.JPG_LOW_',
'70.JPG_LOW_',
'72.JPG_LOW_',
'73.JPG_LOW_',
'75.JPG_LOW_',
'78.JPG_LOW_',
'80.JPG_LOW_',
'82.JPG_LOW_',
'89.JPG_LOW_',
'90.JPG_LOW_',
'92.JPG_LOW_',
'97.JPG_LOW_',
'99.JPG_LOW_',
'102.JPG_LOW_',
'103.JPG_LOW_',
'104.JPG_LOW_',
'105.JPG_LOW_',
'108.JPG_LOW_']
sub_id=[1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
25,
26,
28,
29,
30,
31]
BEHAVIORAL_FILE='C:/Users/presi/Desktop/PhD/Memory guided attention in '\
'cluttered scenes v.3/Behavioral Data/Memory/Preprocessed/Memory_all_dirty.csv'
DATA_PATH='C:/Users/presi/Desktop/PhD/Memory guided attention in '\
'cluttered scenes v.3/Eye Tracking Data/1. Associative Learning/Raw'
TRIALS_PATH='C:/Users/presi/Desktop/PhD/Memory guided attention in '\
'cluttered scenes v.3/Eye Tracking Data/'\
'1. Associative Learning/Learn_trials'
EVENTS_PATH='C:/Users/presi/Desktop/PhD/Memory guided attention in '\
'cluttered scenes v.3/Eye Tracking Data/'\
'1. Associative Learning/Learn_events'
COLLATION_PATH='C:/Users/presi/Desktop/PhD/Memory guided attention in '\
'cluttered scenes v.3/Eye Tracking Data/'\
'1. Associative Learning/Learn_collation'
DIR_ROUTE_PATH='C:/Users/presi/Desktop/PhD/Memory guided attention in '\
'cluttered scenes v.3/Eye Tracking Data/'\
'1. Associative Learning/Learn_direct_route'
IMG_PATH='C:/Users/presi/Desktop/PhD/Memory guided attention in '\
'cluttered scenes v.3/Tasks/Task2/Scenes'
RAW_SCANPATH='C:/Users/presi/Desktop/PhD/Memory guided attention in '\
'cluttered scenes v.3/Eye Tracking Data/1. Associative Learning/'\
'Learn_ visualizations/Raw_Scanpaths'
#IVT_SCANPATH='C:/Users/presi/Desktop/PhD/Memory guided attention in '\
#'cluttered scenes v.3/Eye Tracking Data/1. Associative Learning/'\
# 'Learn_visualizations/IVT_Scanpaths'
IVT_SCANPATH='E:/UCY PhD/Memory guided attention'\
' in cluttered scenes v.3/1. Associative Learning/'\
'Learn_visualizations/IVT_Scanpaths'
| 25.773973 | 83 | 0.545841 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,338 | 0.621313 |
0d365cc37f818437b854ee723afc0abb9a2d8fa4 | 3,219 | py | Python | twython_auth/views.py | tsaylor/nepal | 452669dc8f301cbfab5cdf804292246ff91ed65f | [
"MIT"
] | null | null | null | twython_auth/views.py | tsaylor/nepal | 452669dc8f301cbfab5cdf804292246ff91ed65f | [
"MIT"
] | 2 | 2021-03-19T21:47:22.000Z | 2021-06-10T17:44:44.000Z | twython_auth/views.py | tsaylor/nepal | 452669dc8f301cbfab5cdf804292246ff91ed65f | [
"MIT"
] | null | null | null | from django.contrib.auth import authenticate, login, logout as django_logout
from django.contrib.auth import get_user_model
from django.http import HttpResponseRedirect
from django.conf import settings
from django.core.urlresolvers import reverse
from twython import Twython
from profiles.models import Profile
def logout(request, redirect_url=settings.LOGOUT_REDIRECT_URL):
"""
Nothing hilariously hidden here, logs a user out. Strip this out if your
application already has hooks to handle this.
"""
django_logout(request)
return HttpResponseRedirect(request.build_absolute_uri(redirect_url))
def begin_auth(request):
"""The view function that initiates the entire handshake.
For the most part, this is 100% drag and drop.
"""
# Instantiate Twython with the first leg of our trip.
twitter = Twython(settings.TWITTER_KEY, settings.TWITTER_SECRET)
# Request an authorization url to send the user to...
callback_url = request.build_absolute_uri(reverse('twython_auth.views.thanks'))
auth_props = twitter.get_authentication_tokens(callback_url)
# Then send them over there, durh.
request.session['request_token'] = auth_props
# request.session['next_url'] = request.GET.get('next', settings.LOGIN_REDIRECT_URL)
return HttpResponseRedirect(auth_props['auth_url'])
def thanks(request, redirect_url=settings.LOGIN_REDIRECT_URL):
"""A user gets redirected here after hitting Twitter and authorizing your app to use their data.
This is the view that stores the tokens you want
for querying data. Pay attention to this.
"""
User = get_user_model()
# Now that we've got the magic tokens back from Twitter, we need to exchange
# for permanent ones and store them...
oauth_token = request.session['request_token']['oauth_token']
oauth_token_secret = request.session['request_token']['oauth_token_secret']
twitter = Twython(settings.TWITTER_KEY, settings.TWITTER_SECRET,
oauth_token, oauth_token_secret)
# Retrieve the tokens we want...
authorized_tokens = twitter.get_authorized_tokens(request.GET['oauth_verifier'])
# If they already exist, grab them, login and redirect to a page displaying stuff.
try:
user = User.objects.get(username=authorized_tokens['user_id'])
except User.DoesNotExist:
twitter = Twython(
settings.TWITTER_KEY, settings.TWITTER_SECRET,
authorized_tokens['oauth_token'], authorized_tokens['oauth_token_secret']
)
user_info = twitter.verify_credentials()
user_info.update(authorized_tokens)
user_info['profile_id'] = user_info['id']
profile, created = Profile.from_json(user_info)
if created:
profile.user = User.objects.create_user(
user_info['profile_id'], password=user_info['oauth_token_secret']
)
profile.save()
user = authenticate(
username=authorized_tokens['user_id'],
password=authorized_tokens['oauth_token_secret']
)
login(request, user)
redirect_url = request.session.get('next_url', redirect_url)
return HttpResponseRedirect(redirect_url)
| 38.321429 | 100 | 0.720721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,180 | 0.366573 |
0d37e90dca95e3985986b6fc950f40dc1aa89c2b | 140 | py | Python | hello.py | alee8542/cs3240-labdemo | f80a8f1dae79167cb13c3e176e14e3942852d04e | [
"MIT"
] | null | null | null | hello.py | alee8542/cs3240-labdemo | f80a8f1dae79167cb13c3e176e14e3942852d04e | [
"MIT"
] | null | null | null | hello.py | alee8542/cs3240-labdemo | f80a8f1dae79167cb13c3e176e14e3942852d04e | [
"MIT"
] | null | null | null | import helper
from helper import greeting
def main():
#print("called hello")
helper.greeting("hello")
if __name__ == '__main__':
main() | 15.555556 | 27 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.278571 |
0d38684afecb14d8b612ceb8af67630b80b4ff71 | 989 | py | Python | Multidimensional_Lists/05_square_with_maximum_sum.py | MihailMarkovski/Python-Advanced-2020 | 8edea78cbe5588a409ba9bc3767861250f58c1a6 | [
"MIT"
] | 4 | 2020-09-19T13:53:19.000Z | 2020-11-01T18:34:53.000Z | Multidimensional_Lists/05_square_with_maximum_sum.py | MNikov/Python-Advanced-September-2020 | 1d65039de7f094d908411afffa8aee9689ab4220 | [
"MIT"
] | null | null | null | Multidimensional_Lists/05_square_with_maximum_sum.py | MNikov/Python-Advanced-September-2020 | 1d65039de7f094d908411afffa8aee9689ab4220 | [
"MIT"
] | null | null | null | def create_matrix(rows_count):
matrix = []
for _ in range(rows_count):
matrix.append([int(x) for x in input().split(', ')])
return matrix
def get_square_sum(row, col, matrix):
square_sum = 0
for r in range(row, row + 2):
for c in range(col, col + 2):
square_sum += matrix[r][c]
return square_sum
def print_square(matrix, row, col):
for r in range(row, row + 2):
for c in range(col, col + 2):
print(matrix[r][c], end=' ')
print()
rows_count, cols_count = [int(x) for x in input().split(', ')]
matrix = create_matrix(rows_count)
best_pos = 0, 0
best_sum = get_square_sum(0, 0, matrix)
for r in range(rows_count - 1):
for c in range(cols_count - 1):
current_pos = r, c
current_sum = get_square_sum(r, c, matrix)
if current_sum > best_sum:
best_pos = current_pos
best_sum = current_sum
print_square(matrix, best_pos[0], best_pos[1])
print(best_sum)
| 26.026316 | 62 | 0.604651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.011122 |
0d394051cbe789e0a2cdb86b79de18a4fdfcc0a4 | 756 | py | Python | algorithms/Python/sorting/selection_sort.py | Tanmoy07tech/DSA | f88c9cbf58bedb006e4401502507982cf4f52260 | [
"MIT"
] | 247 | 2020-12-24T05:06:41.000Z | 2022-03-30T19:32:17.000Z | algorithms/Python/sorting/selection_sort.py | k2491p/DSA | ea01662f90a6913ca6b7faa88319afefb5f8d296 | [
"MIT"
] | 518 | 2020-12-23T14:19:19.000Z | 2022-03-31T17:45:08.000Z | algorithms/Python/sorting/selection_sort.py | k2491p/DSA | ea01662f90a6913ca6b7faa88319afefb5f8d296 | [
"MIT"
] | 356 | 2020-12-24T08:12:31.000Z | 2022-03-28T11:28:09.000Z | '''
Find the largest element and place that element at the bottom
of the list. Repeat for each sub-array.
O(n^2) time complexity.
'''
from string import ascii_letters
arrays = (
[12, 3, 7, 22, -12, 100, 1],
[10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[4, 1, 3, 9, 7],
[0, -1.5, 1.5, 1.3, -1.3, -1.01, 1.01],
list(reversed(ascii_letters)),
)
def selection_sort(arr):
"""
>>> all(selection_sort(arr) or arr == sorted(arr) for arr in arrays)
True
"""
for i in range(len(arr) - 1, 0, -1):
k = 0
for j in range(1, i + 1):
if arr[j] > arr[k]:
k = j
arr[i], arr[k] = arr[k], arr[i] # swap
if __name__ == "__main__":
for arr in arrays:
selection_sort(arr)
print("Sorted array: ")
for ele in arr: # type: ignore
print(f"\t{ele}")
| 20.432432 | 69 | 0.579365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 276 | 0.365079 |
0d3f74b53d1976c4b1197848cb8716e04cb65c67 | 2,535 | py | Python | djangocms_html_tags/cms_plugins.py | radity/djangocms-html-tags | d9d8d8b2609685d896e05af8fc9e2271c1dc0c26 | [
"MIT"
] | null | null | null | djangocms_html_tags/cms_plugins.py | radity/djangocms-html-tags | d9d8d8b2609685d896e05af8fc9e2271c1dc0c26 | [
"MIT"
] | 2 | 2019-02-17T22:15:40.000Z | 2019-02-20T22:40:21.000Z | djangocms_html_tags/cms_plugins.py | radity/djangocms-html-tags | d9d8d8b2609685d896e05af8fc9e2271c1dc0c26 | [
"MIT"
] | 2 | 2019-02-01T09:03:52.000Z | 2020-01-14T12:56:52.000Z | from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django.utils.translation import ugettext_lazy as _
from djangocms_html_tags.forms import HTMLTextInputForm, HTMLFormForm, HTMLTextareaForm
from djangocms_html_tags.models import HTMLTag, HTMLText
from djangocms_html_tags.utils import FormMethod
class HTMLTextBase(CMSPluginBase):
model = HTMLText
module = _("HTML Tags")
render_template = 'djangocms_html_tags/html_text.html'
fields = ('value', 'attributes')
form = HTMLTextInputForm
tag = None
def save_model(self, request, obj, form, change):
obj.tag = self.tag
return super(HTMLTextBase, self).save_model(request, obj, form, change)
class Heading1Plugin(HTMLTextBase):
name = _("Heading 1")
tag = HTMLTag.H1
class Heading2Plugin(HTMLTextBase):
name = _("Heading 2")
tag = HTMLTag.H2
class Heading3Plugin(HTMLTextBase):
name = _("Heading 3")
tag = HTMLTag.H3
class Heading4Plugin(HTMLTextBase):
name = _("Heading 4")
tag = HTMLTag.H4
class Heading5Plugin(HTMLTextBase):
name = _("Heading 5")
tag = HTMLTag.H5
class Heading6Plugin(HTMLTextBase):
name = _("Heading 6")
tag = HTMLTag.H6
class ParagraphPlugin(HTMLTextBase):
name = _("Paragraph")
tag = HTMLTag.P
form = HTMLTextareaForm
allow_children = True
class ButtonPlugin(HTMLTextBase):
name = _("Button")
tag = HTMLTag.BUTTON
allow_children = True
class InputPlugin(HTMLTextBase):
name = _("Input")
tag = HTMLTag.INPUT
render_template = 'djangocms_html_tags/input.html'
class FormPlugin(HTMLTextBase):
name = _("Form")
tag = HTMLTag.FORM
model = HTMLText
form = HTMLFormForm
fields = (('method', 'action'), 'value', 'attributes')
render_template = 'djangocms_html_tags/form.html'
allow_children = True
def render(self, context, instance, placeholder):
context.update({'is_post': instance.attributes.get('method') == FormMethod.POST})
return super(FormPlugin, self).render(context, instance, placeholder)
plugin_pool.register_plugin(Heading1Plugin)
plugin_pool.register_plugin(Heading2Plugin)
plugin_pool.register_plugin(Heading3Plugin)
plugin_pool.register_plugin(Heading4Plugin)
plugin_pool.register_plugin(Heading5Plugin)
plugin_pool.register_plugin(Heading6Plugin)
plugin_pool.register_plugin(ParagraphPlugin)
plugin_pool.register_plugin(ButtonPlugin)
plugin_pool.register_plugin(InputPlugin)
plugin_pool.register_plugin(FormPlugin)
| 26.40625 | 89 | 0.740039 | 1,735 | 0.684418 | 0 | 0 | 0 | 0 | 0 | 0 | 279 | 0.110059 |
0d4067eb03baa5149bcc67a886187a2fa8a7e860 | 4,659 | py | Python | handlers/rights_users.py | itcosplay/cryptobot | 6890cfde64a631bf0e4db55f6873a2217212d801 | [
"MIT"
] | null | null | null | handlers/rights_users.py | itcosplay/cryptobot | 6890cfde64a631bf0e4db55f6873a2217212d801 | [
"MIT"
] | null | null | null | handlers/rights_users.py | itcosplay/cryptobot | 6890cfde64a631bf0e4db55f6873a2217212d801 | [
"MIT"
] | null | null | null | # команда меню "права пользователей"
from aiogram.types import Message, CallbackQuery, ReplyKeyboardRemove
from filters import IsAdmin
from loader import dp, db, bot
from keyboards.inline.callback_data import change_button_data
from keyboards.inline.callback_data import set_status_data
from keyboards.inline.callback_data import group_users_data
@dp.message_handler(IsAdmin(), text='права пользователей')
async def rights_users(message:Message):
from keyboards.inline.group_users_buttons import create_kb_groups_users
await message.delete()
kb_groups_users = create_kb_groups_users()
await message.answer('ГРУППЫ ПОЛЬЗОВАТЕЛЕЙ:', reply_markup=kb_groups_users)
@dp.callback_query_handler(IsAdmin(), group_users_data.filter(handler='statuses'))
async def get_groups(call:CallbackQuery):
from keyboards.inline.group_users_buttons import create_kb_particular_group
await call.answer()
await call.message.delete()
print(call.data)
user_data = group_users_data.parse(call.data)
# Example of result group_users_data.parse(call.data):
# {'@': 'gud', 'group': 'admin', 'handler': 'statuses'}
all_statuses = {
'admin': 'АДМИНИСТРАТОРЫ:',
'changer': 'ЧЕЙНДЖИ:',
'operator': 'ОПЕРАТОРЫ:',
'secretary': 'СЕКРЕТАРИ:',
'executor': 'ИСПОЛНИТЕЛИ:',
'permit': 'НА ПРОПУСК:',
'request': 'В СТАТУСЕ "ЗАПРОС":',
'block': 'ЗАБЛОКИРОВАННЫ:'
}
for status in all_statuses.keys():
if user_data['group'] == status:
text = all_statuses[status]
kb_particular_group = create_kb_particular_group(user_data['group'])
await call.message.answer(text, reply_markup=kb_particular_group)
@dp.callback_query_handler(IsAdmin(), change_button_data.filter(type_button='change_button'))
async def change_status(call:CallbackQuery):
await call.answer()
await call.message.delete()
user_data = change_button_data.parse(call.data)
# Example of result change_button_data.parse(call.data):
# {'@': 'change_button', 'user_id': '1637852195', 'type_button': 'change_button'}
from keyboards.inline.avalible_rights_users_kb import create_kb_change_status_handler
keyboard = create_kb_change_status_handler(user_data)
print('@dp.callback_query_handler()')
print('user_name = db.get_user_name(user_data["user_id"])')
user_name = db.get_user_name(user_data['user_id'])
await call.message.answer(f'НОВЫЕ ПРАВА для {user_name}:', reply_markup=keyboard)
# await bot.send_message (
# chat_id = call.message.chat.id,
# text='введите сумму:'
# )
@dp.callback_query_handler(IsAdmin(), set_status_data.filter(type_btn='set_st_btn'))
async def set_status(call:CallbackQuery):
from keyboards.default.admin_keyboard import create_kb_coustom_main_menu
await call.answer()
await call.message.delete()
user_id = call.message.from_user.id
user_data = set_status_data.parse(call.data)
# Example of result set_status_data.parse(call.data):
# {'@': 'ssb', 'id': '1637852195', 'new_st': 'admin', 'type_btn': 'set_st_btn'}
print('callback_query_handler, db.get_user_name')
user_name = db.get_user_name(user_data['id'])
if user_data['new_st'] == 'delete':
print('@dp.callback_query_handler(set_status_data.filter(type_button=\'set_status_btn\'))')
db.delete_user(user_data['id'])
# await call.answer(f'пользователь {user_data["user_name"]} удален', show_alert=True)
await call.message.reply(f'пользователь {user_name} УДАЛЕН', reply_markup=create_kb_coustom_main_menu(user_id))
else:
print('@dp.callback_query_handler(set_status_data.filter(type_button=\'set_status_btn\'))')
db.update_status(user_data['new_st'], user_data['id'])
list_rights = {
'admin': 'администратор',
'changer': 'чейндж',
'operator': 'оператор',
'secretary': 'секретарь',
'executor': 'исполнитель',
'permit': 'на пропуск',
'request': 'в статус "запрос"',
'block': 'заблокировать',
'delete': 'удалить'
}
# await call.answer(f'статус установлен', show_alert=True)
await call.message.answer(f'пользователь {user_name} теперь - {list_rights[user_data["new_st"]].upper()}', reply_markup=create_kb_coustom_main_menu(user_id))
await bot.send_message (
chat_id = user_data['id'],
text=f'Ваши права - {list_rights[user_data["new_st"]].upper()}. Используйте меню.',
reply_markup=create_kb_coustom_main_menu(user_data['id'])
)
| 37.272 | 166 | 0.691994 | 0 | 0 | 0 | 0 | 4,630 | 0.922311 | 4,291 | 0.854781 | 2,051 | 0.408566 |
0d40d5a5294c9c8290f11ed2656dfcbf8016ab4e | 12,878 | py | Python | qa327/frontend/sessions.py | rickyzhangca/CISC-327 | e419caafa6ae3fe77aa411228b6b58b237fe6a61 | [
"MIT"
] | null | null | null | qa327/frontend/sessions.py | rickyzhangca/CISC-327 | e419caafa6ae3fe77aa411228b6b58b237fe6a61 | [
"MIT"
] | 39 | 2020-10-11T02:31:14.000Z | 2020-12-15T20:18:56.000Z | qa327/frontend/sessions.py | rickyzhangca/CISC-327 | e419caafa6ae3fe77aa411228b6b58b237fe6a61 | [
"MIT"
] | 1 | 2020-10-17T02:44:43.000Z | 2020-10-17T02:44:43.000Z | import helpers
import exceptions
import datetime as dt
'''
This is the sessions module:
'''
'''
Base class with the basic structure of all frontend sessions.
'''
class Session:
# username is None when no one logged in.
def __init__(self, username = None):
self.username = username
# return with the object of the next session.
def routing(self):
return self
# functionality of the current session.
def operate(self):
pass
'''
Base class for sessions that required login.
'''
class LoggedInSession(Session):
# If logged in, show the menu item buy, sell, update, and logout. Also, print out the user's balance.
# raise exceptions if user have not logged in.
def __init__(self, username):
super().__init__(username)
if not username:
print('\nInvaild command, user must be logged in first')
raise exceptions.CannotAccessPageException()
def routing(self):
return LandingSession(self.username)
def getMenu(self):
return 'buy, sell, update, and logout'
'''
Base class for sessions that does not required login.
'''
class UnloggedInSession(Session):
# if not logged in, show the menu item login, register, and exits.
# raise exceptions if user have logged in.
def __init__(self, username):
super().__init__()
if username:
print('\nInvaild command, user must be logged out first')
raise exceptions.CannotAccessPageException()
def routing(self):
return LandingSession()
def getMenu(self):
return 'login, register, and exits'
'''
Landing page that displays usermenu and balance.
'''
class LandingSession(Session):
def __init__(self, username = None):
super().__init__(username)
# go to corresponding sessions.
def routing(self):
try:
if self.command == 'login':
new_session = LoginSession(self.username)
elif self.command == 'register':
new_session = RegisterSession(self.username)
elif self.command == 'buy':
new_session = BuySession(self.username)
elif self.command == 'sell':
new_session = SellSession(self.username)
elif self.command == 'update':
new_session = UpdateSession(self.username)
elif self.command == 'logout':
new_session = LogoutSession(self.username)
elif self.command == 'exits':
new_session = ExitSession(self.username)
else:
print('\nCommand undefind.')
new_session = self
except exceptions.CannotAccessPageException:
new_session = self
return new_session
def operate(self):
print('\nLanding Screen...')
self.showbalance()
self.displayMenu()
self.getUserCommand()
# display user menu depend on whether the user logged in.
def displayMenu(self):
print('Menu options - ', end = '')
if self.username:
print(LoggedInSession.getMenu(self))
else:
print(UnloggedInSession.getMenu(self))
def showbalance(self):
if self.username:
print('Hi', self.username + '!')
print('Your balance is: $' + str(helpers.ResourcesHelper.getUserInfo()[self.username]['balance']) + '.\n')
def getUserCommand(self):
self.command = input('Your command: ')
'''
session that guide the user's login process.
'''
class LoginSession(UnloggedInSession):
def __init__(self, username):
super().__init__(username)
self.username = None
def routing(self):
return LandingSession(self.username)
def operate(self):
print('\nLog in session starts...')
#check email
try:
email = helpers.UserIOHelper.acceptEmail()
password = helpers.UserIOHelper.acceptPassword()
self.authorize(email, password)
except exceptions.WrongFormatException as e:
print(str(e))
print('\nLogin failed, ending session...')
# authorize email and password the user inputed. Setup username.
def authorize(self, email, password):
for i in helpers.ResourcesHelper.getUserInfo():
if helpers.ResourcesHelper.getUserInfo()[i]['email'] == email and helpers.ResourcesHelper.getUserInfo()[i]['password'] == password:
print('\nAccount logged in!')
self.username = i
return
print('\nEmail or password incorrect.')
'''
user register
'''
class RegisterSession(UnloggedInSession):
def __init__(self, username):
super().__init__(username)
self.username = None
def operate(self):
try:
user_email = helpers.UserIOHelper.acceptEmail()
if self.checkExistence(user_email):
raise exceptions.EmailAlreadyExistsException()
user_name = helpers.UserIOHelper.acceptUserName()
user_password = helpers.UserIOHelper.acceptPassword()
user_password2 = helpers.UserIOHelper.acceptPassword2()
if user_password != user_password2:
raise exceptions.PasswordsNotMatchingException()
self.addNewUser(user_name, user_email, user_password)
except exceptions.EmailAlreadyExistsException:
print('\nThis email already exists in the system')
print('\nRegistration failed, ending session...')
except exceptions.PasswordsNotMatchingException:
print('\nThe password entered first time does not match the one enter the second time.')
print('\nRegistration failed, ending session...')
except exceptions.WrongFormatException as e:
print(str(e))
print('\nRegistration failed, ending session...')
def checkExistence(self, user_email):
for i in helpers.ResourcesHelper.getUserInfo():
if user_email == helpers.ResourcesHelper.getUserInfo()[i]['email']:
return True
return False
def addNewUser(self, user_name, user_email, user_password):
helpers.TransactionsHelper.newUserTransaction("register", user_name, user_email, user_password, 3000)
print('\nRegistered successfully.')
'''
update ticket
'''
class UpdateSession(LoggedInSession):
# only appear after user logged in
def __init__(self, username):
super().__init__(username)
def operate(self):
try:
ticket_name = helpers.UserIOHelper.acceptTicketName()
ticket_quantity = helpers.UserIOHelper.acceptTicketQuantity()
ticket_price = helpers.UserIOHelper.acceptTicketPrice()
ticket_date = helpers.UserIOHelper.acceptDate()
if ticket_name not in helpers.ResourcesHelper.getTicketInfo():
raise exceptions.WrongTicketNameException
self.updateTicket(ticket_name, ticket_price, ticket_quantity, ticket_date)
except exceptions.WrongFormatException as e:
print(str(e))
print('\nUpdate failed, ending session...')
except exceptions.WrongTicketNameException:
print('\nThe ticket name you entered cannot be found, ending session...')
def updateTicket(self, ticket_name, ticket_price, ticket_quantity, ticket_date):
helpers.TransactionsHelper.newTicketTransaction("update", self.username, ticket_name, ticket_price, ticket_quantity, ticket_date)
helpers.ResourcesHelper.getTicketInfo()[ticket_name]['price'] = ticket_price
helpers.ResourcesHelper.getTicketInfo()[ticket_name]['number'] = ticket_quantity
helpers.ResourcesHelper.getTicketInfo()[ticket_name]['date'] = ticket_date
'''
User logout.
'''
class LogoutSession(LoggedInSession):
# only appear after user logged in
def __init__(self, username):
super().__init__(username)
def operate(self):
print('\nLogout Successfully!')
def routing(self):
return LandingSession(None)
'''
Exiting the program.
'''
class ExitSession(UnloggedInSession):
# only appear after user not logged in
def __init__(self, username):
super().__init__(username)
def operate(self):
print('\nSaving transactions & exit...')
def routing(self):
return None
'''
Selling session.
'''
class SellSession(LoggedInSession):
# only appear after user logged in
def __init__(self, username):
super().__init__(username)
def operate(self):
print('\nSelling Session starts...')
try:
ticket_name = helpers.UserIOHelper.acceptTicketName()
if ticket_name in helpers.ResourcesHelper.getTicketInfo():
raise exceptions.WrongTicketNameException
ticket_quantity = helpers.UserIOHelper.acceptTicketQuantity()
ticket_price = helpers.UserIOHelper.acceptTicketPrice()
ticket_date = helpers.UserIOHelper.acceptDate()
self.addNewTicket(ticket_name, ticket_price, ticket_quantity, ticket_date)
except exceptions.WrongFormatException as e:
print(str(e))
print('\nAdd new ticket failed, ending session...')
except exceptions.WrongTicketNameException:
print('\nTicket with this name already exist, ending session...')
except exceptions.WrongTicketQuantityException:
print('\nThe ticket quantity you entered is not available, ending session...')
except exceptions.WrongTicketPriceException as e:
print(str(e))
print('\nThe ticket price you entered is not available, ending session...')
def addNewTicket(self, ticket_name, ticket_price, ticket_quantity, ticket_date):
helpers.TransactionsHelper.newTicketTransaction("sell", self.username, ticket_name, ticket_price, ticket_quantity, ticket_date)
helpers.ResourcesHelper.getTicketInfo()[ticket_name] = {
'price': ticket_price,
'number': ticket_quantity,
'email': helpers.ResourcesHelper.getUserInfo()[self.username]['email'],
'date': ticket_date
}
print('\nTicket info added successfully.')
'''
Buying session.
'''
class BuySession(LoggedInSession):
def __init__(self, username):
super().__init__(username)
def operate(self):
print('\nBuying Session starts...')
self.printTicketList()
try:
ticket_name = helpers.UserIOHelper.acceptTicketName()
if ticket_name not in helpers.ResourcesHelper.getTicketInfo():
raise exceptions.WrongTicketNameException
ticket_quantity = helpers.UserIOHelper.acceptTicketQuantity()
if ticket_quantity > helpers.ResourcesHelper.getTicketInfo()[ticket_name]['number']:
raise exceptions.WrongTicketQuantityException
ticket_price = helpers.ResourcesHelper.getTicketInfo()[ticket_name]['price']
if self.checkBalance(ticket_price, ticket_quantity):
self.processOrder(ticket_name, ticket_price, ticket_quantity)
else:
print('\nInsufficient funds, ending session...')
except exceptions.WrongFormatException as e:
print(str(e))
print('\nBuy ticket failed, ending session...')
except exceptions.WrongTicketNameException:
print('\nThe ticket name you entered cannot be found, ending session...')
except exceptions.WrongTicketQuantityException:
print('\nThe ticket quantity you entered is not available, ending session...')
def printTicketList(self):
print('\nTicket avilable:\nTicket Name\tPrice\tNumber\tDate')
for i in helpers.ResourcesHelper.getTicketInfo():
print(i + '\t' + str(helpers.ResourcesHelper.getTicketInfo()[i]['price']) + '\t' + str(helpers.ResourcesHelper.getTicketInfo()[i]['number']) + '\t' + str(helpers.ResourcesHelper.getTicketInfo()[i]['date']))
def checkBalance(self, ticket_price, ticket_quantity):
return helpers.ResourcesHelper.getUserInfo()[self.username]['balance'] >= ticket_price * ticket_quantity
def processOrder(self, ticket_name, ticket_price, ticket_quantity):
helpers.ResourcesHelper.getUserInfo()[self.username]['balance'] -= ticket_price * ticket_quantity
helpers.ResourcesHelper.getTicketInfo()[ticket_name]['number'] -= ticket_quantity
helpers.TransactionsHelper.newTicketTransaction("buy", self.username, ticket_name, ticket_price, ticket_quantity, helpers.ResourcesHelper.getTicketInfo()[ticket_name]['date'])
print('\nTicket "' + ticket_name + '" sold successfully.') | 38.100592 | 218 | 0.650101 | 12,318 | 0.956515 | 0 | 0 | 0 | 0 | 0 | 0 | 2,857 | 0.221851 |
0d426d8765e77fea1c98581ceb39c83d5c0fb5f4 | 215 | py | Python | parts/edag_diodes.py | baryluk/edag | 675107e2078bcecb30768a5e96c7431104352024 | [
"BSL-1.0"
] | null | null | null | parts/edag_diodes.py | baryluk/edag | 675107e2078bcecb30768a5e96c7431104352024 | [
"BSL-1.0"
] | null | null | null | parts/edag_diodes.py | baryluk/edag | 675107e2078bcecb30768a5e96c7431104352024 | [
"BSL-1.0"
] | null | null | null | #!/usr/bin/env python3
# Diodes
"1N4148"
"1N5817G"
"BAT43" # Zener
"1N457"
# bc junction of many transistors can also be used as dioded, i.e. 2SC1815, 2SA9012, etc. with very small leakage current (~1pA at -4V).
| 21.5 | 136 | 0.702326 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.948837 |
0d42aee94bc382588eed7920090962df76b33a83 | 1,574 | py | Python | PageObjectLibrary/loginpage_v2_using_composition.py | rama-bornfree/simple-pageobject | 6a66a256867f7b1604005818b12e7c9f8dc6c027 | [
"Apache-2.0"
] | null | null | null | PageObjectLibrary/loginpage_v2_using_composition.py | rama-bornfree/simple-pageobject | 6a66a256867f7b1604005818b12e7c9f8dc6c027 | [
"Apache-2.0"
] | null | null | null | PageObjectLibrary/loginpage_v2_using_composition.py | rama-bornfree/simple-pageobject | 6a66a256867f7b1604005818b12e7c9f8dc6c027 | [
"Apache-2.0"
] | null | null | null | from pageobject import PageObject
from homepage import HomePage
from locatormap import LocatorMap
from robot.api import logger
class LoginPage():
PAGE_TITLE = "Login - PageObjectLibrary Demo"
PAGE_URL = "/login.html"
# these are accessible via dot notaton with self.locator
# (eg: self.locator.username, etc)
_locators = {
"username": "id=id_username",
"password": "id=id_password",
"submit_button": "id=id_submit",
}
def __init__(self):
self.logger = logger
self.po = PageObject()
self.se2lib = self.po.se2lib
self.locator = LocatorMap(getattr(self, "_locators", {}))
def navigate_to(self, url):
logger.console ("Navigating to %s".format(url))
self.se2lib.go_to(url)
if 'yahoo' in url:
logger.console ("Navigating to homepage")
return HomePage()
def create_browser(self, browser_name):
self.se2lib.create_webdriver(browser_name)
def enter_username(self, username):
"""Enter the given string into the username field"""
self.se2lib.input_text(self.locator.username, username)
def enter_password(self, password):
"""Enter the given string into the password field"""
self.se2lib.input_text(self.locator.password, password)
def click_the_submit_button(self):
"""Click the submit button, and wait for the page to reload"""
with self.po._wait_for_page_refresh():
self.se2lib.click_button(self.locator.submit_button)
return HomePage() | 32.122449 | 70 | 0.65629 | 1,446 | 0.918679 | 0 | 0 | 0 | 0 | 0 | 0 | 442 | 0.280813 |
0d442d2358091616d21b759e490009907755740d | 2,569 | py | Python | reefbot-controller/bin/ButtonMapper.py | MRSD2018/reefbot-1 | a595ca718d0cda277726894a3105815cef000475 | [
"MIT"
] | null | null | null | reefbot-controller/bin/ButtonMapper.py | MRSD2018/reefbot-1 | a595ca718d0cda277726894a3105815cef000475 | [
"MIT"
] | null | null | null | reefbot-controller/bin/ButtonMapper.py | MRSD2018/reefbot-1 | a595ca718d0cda277726894a3105815cef000475 | [
"MIT"
] | null | null | null | '''Maps buttons for the Reefbot control.'''
import roslib; roslib.load_manifest('reefbot-controller')
import rospy
class JoystickButtons:
DPAD_LR = 4
DPAD_UD = 5
ALOG_LEFT_UD = 1
ALOG_LEFT_LR = 0
ALOG_RIGHT_UD = 3
ALOG_RIGHT_LR = 2
BUTTON_1 = 0
BUTTON_2 = 1
BUTTON_3 = 2
BUTTON_4 = 3
BUTTON_5 = 4
BUTTON_6 = 5
BUTTON_7 = 6
BUTTON_8 = 7
BUTTON_9 = 8
BUTTON_10 = 9
BUTTON_11 = 10
BUTTON_12 = 11
class ButtonMapper:
def __init__(self):
self.diveDownButton = rospy.get_param("~dive_down_button",
JoystickButtons.BUTTON_7)
self.diveUpButton = rospy.get_param("~dive_up_button",
JoystickButtons.BUTTON_5)
self.diveAxis = rospy.get_param("~dive_axis", None)
self.leftTurnButton = rospy.get_param("~left_turn_button", None)
self.rightTurnButton = rospy.get_param("~right_turn_button", None)
self.turnAxis = rospy.get_param("~turn_axis",
JoystickButtons.ALOG_LEFT_LR)
self.fwdButton = rospy.get_param("~fwd_button", None)
self.backButton = rospy.get_param("~back_button", None)
self.fwdBackAxis = rospy.get_param("~fwd_back_axis",
JoystickButtons.ALOG_LEFT_UD)
def GetFwdAxis(self, joyMsg):
'''Returns the value of the move fwd/backward axis. +1 is full forward.'''
return self._GetAxisValue(joyMsg, self.fwdBackAxis, self.fwdButton,
self.backButton)
def GetTurnAxis(self, joyMsg):
'''Returns the value of the turning axis. +1 is full left.'''
return self._GetAxisValue(joyMsg, self.turnAxis, self.leftTurnButton,
self.rightTurnButton)
def GetDiveAxis(self, joyMsg):
'''Returns the value of the dive axis. +1 is full up.'''
return self._GetAxisValue(joyMsg, self.diveAxis, self.diveUpButton,
self.diveDownButton)
def _GetAxisValue(self, joyMsg, axis, posButton, negButton):
if axis is not None and axis >= 0:
return joyMsg.axes[axis]
axisVal = 0.
if joyMsg.buttons[posButton] and not joyMsg.buttons[negButton]:
axisVal = 1.
if not joyMsg.buttons[posButton] and joyMsg.buttons[negButton]:
axisVal = -1.
return axisVal
def GetCeilingDisable(self, joyMsg):
'''Returns the value of the button that disables the ceiling.'''
return self._GetButtonValue(joyMsg, JoystickButtons.BUTTON_10)
def _GetButtonValue(self, joyMsg, button):
return joyMsg.buttons[button]
| 32.935897 | 78 | 0.651615 | 2,449 | 0.953289 | 0 | 0 | 0 | 0 | 0 | 0 | 460 | 0.179058 |
0d44e5e6d9f36901e93c880a5e517173d618c59a | 159 | py | Python | env.py | Code-Institute-Submissions/Lordph8-Project3 | fdefe7ffb7c53e8cb2d70b8c760e4efd27bb4517 | [
"MIT"
] | null | null | null | env.py | Code-Institute-Submissions/Lordph8-Project3 | fdefe7ffb7c53e8cb2d70b8c760e4efd27bb4517 | [
"MIT"
] | null | null | null | env.py | Code-Institute-Submissions/Lordph8-Project3 | fdefe7ffb7c53e8cb2d70b8c760e4efd27bb4517 | [
"MIT"
] | null | null | null | import os
os.environ.setdefault("MONGO_URI", "mongodb+srv://root:Thisisarandompassword@myfirstcluster-qpzww.mongodb.net/theRecipe?retryWrites=true&w=majority") | 79.5 | 149 | 0.836478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.779874 |
0d451f62800e63abede99f96599f3535638b9e60 | 1,482 | py | Python | tests/v1/test_synthetics_test_config.py | MichaelTROEHLER/datadog-api-client-python | 12c46626622fb1277bb1e172753b342c671348bd | [
"Apache-2.0"
] | null | null | null | tests/v1/test_synthetics_test_config.py | MichaelTROEHLER/datadog-api-client-python | 12c46626622fb1277bb1e172753b342c671348bd | [
"Apache-2.0"
] | null | null | null | tests/v1/test_synthetics_test_config.py | MichaelTROEHLER/datadog-api-client-python | 12c46626622fb1277bb1e172753b342c671348bd | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import absolute_import
import sys
import unittest
import datadog_api_client.v1
try:
from datadog_api_client.v1.model import synthetics_assertion
except ImportError:
synthetics_assertion = sys.modules[
'datadog_api_client.v1.model.synthetics_assertion']
try:
from datadog_api_client.v1.model import synthetics_browser_variable
except ImportError:
synthetics_browser_variable = sys.modules[
'datadog_api_client.v1.model.synthetics_browser_variable']
try:
from datadog_api_client.v1.model import synthetics_test_request
except ImportError:
synthetics_test_request = sys.modules[
'datadog_api_client.v1.model.synthetics_test_request']
from datadog_api_client.v1.model.synthetics_test_config import SyntheticsTestConfig
class TestSyntheticsTestConfig(unittest.TestCase):
"""SyntheticsTestConfig unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSyntheticsTestConfig(self):
"""Test SyntheticsTestConfig"""
# FIXME: construct object with mandatory attributes with example values
# model = SyntheticsTestConfig() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 30.244898 | 108 | 0.762483 | 399 | 0.269231 | 0 | 0 | 0 | 0 | 0 | 0 | 604 | 0.407557 |
0d45995076dc8988bcc58b7a91a3417a1a1f526d | 1,803 | py | Python | parlai/tasks/saferdialogues/agents.py | twstewart42/ParlAI | db8dc7b684a089427ab5338a67fe50220d2fa622 | [
"MIT"
] | 1 | 2022-03-13T21:02:22.000Z | 2022-03-13T21:02:22.000Z | parlai/tasks/saferdialogues/agents.py | twstewart42/ParlAI | db8dc7b684a089427ab5338a67fe50220d2fa622 | [
"MIT"
] | 1 | 2022-01-18T09:14:27.000Z | 2022-01-18T09:14:27.000Z | parlai/tasks/saferdialogues/agents.py | twstewart42/ParlAI | db8dc7b684a089427ab5338a67fe50220d2fa622 | [
"MIT"
] | 1 | 2022-03-30T14:05:29.000Z | 2022-03-30T14:05:29.000Z | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
from parlai.core.params import ParlaiParser
from parlai.core.opt import Opt
import os
import copy
from parlai.core.teachers import ParlAIDialogTeacher
from .build import build
def _path(opt):
# Build the data if it doesn't exist.
build(opt)
dt = opt['datatype'].split(':')[0]
return os.path.join(
opt['datapath'], 'saferdialogues', 'saferdialogues_dataset', dt + '.txt'
)
class SaferDialoguesTeacher(ParlAIDialogTeacher):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
agent = parser.add_argument_group('SaFeRDialogues options')
agent.add_argument(
'--recovery',
type=bool,
default=True,
help="Whether or not to include the recovery utterance",
)
return parser
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['parlaidialogteacher_datafile'] = _path(opt)
super().__init__(opt, shared)
def _setup_data(self, path):
super()._setup_data(path)
if not self.opt['recovery']:
for i, ep in enumerate(self.episodes):
# make the signaling msg the label and remove the recovery msg
texts = ep[0]['text'].split('\n')
self.episodes[i][0].force_set('text', '\n'.join(texts[:-1]))
self.episodes[i][0].force_set('labels', [texts[-1]])
class DefaultTeacher(SaferDialoguesTeacher):
pass
| 31.631579 | 80 | 0.645591 | 1,165 | 0.646145 | 0 | 0 | 456 | 0.252912 | 0 | 0 | 518 | 0.287299 |
0d475beab3b1cd6b2f3e149dfdb979b4179e340d | 614 | py | Python | FatherSon/HelloWorld2_source_code/listing_7-1.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | 1 | 2019-01-04T05:47:50.000Z | 2019-01-04T05:47:50.000Z | FatherSon/HelloWorld2_source_code/listing_7-1.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | null | null | null | FatherSon/HelloWorld2_source_code/listing_7-1.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | null | null | null | # Listing_7-1.py
# Copyright Warren & Carter Sande, 2013
# Released under MIT license http://www.opensource.org/licenses/mit-license.php
# Version $version ----------------------------
# Using comparison operators
num1 = float(raw_input("Enter the first number: "))
num2 = float(raw_input("Enter the second number: "))
if num1 < num2:
print num1, "is less than", num2
if num1 > num2:
print num1, "is greater than", num2
if num1 == num2: #Remember that this is a double equal sign
print num1, "is equal to", num2
if num1 != num2:
print num1, "is not equal to", num2
| 34.111111 | 82 | 0.63355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 374 | 0.609121 |
0d48928ed6b737fd788b3269ad8bbc5084f62d61 | 6,658 | py | Python | landmark.py | Ank221199/cnn-facial-landmark | 5b6db58c59e36a4851917c86a5d252230d228b2c | [
"MIT"
] | null | null | null | landmark.py | Ank221199/cnn-facial-landmark | 5b6db58c59e36a4851917c86a5d252230d228b2c | [
"MIT"
] | null | null | null | landmark.py | Ank221199/cnn-facial-landmark | 5b6db58c59e36a4851917c86a5d252230d228b2c | [
"MIT"
] | 1 | 2019-07-20T01:37:29.000Z | 2019-07-20T01:37:29.000Z | """
Convolutional Neural Network for facial landmarks detection.
"""
import argparse
import cv2
import numpy as np
import tensorflow as tf
from model import LandmarkModel
# Add arguments parser to accept user specified arguments.
parser = argparse.ArgumentParser()
parser.add_argument('--train_record', default='train.record', type=str,
help='Training record file')
parser.add_argument('--val_record', default='validation.record', type=str,
help='validation record file')
parser.add_argument('--model_dir', default='train', type=str,
help='training model directory')
parser.add_argument('--export_dir', default=None, type=str,
help='directory to export the saved model')
parser.add_argument('--train_steps', default=1000, type=int,
help='training steps')
parser.add_argument('--num_epochs', default=None, type=int,
help='epochs of training dataset')
parser.add_argument('--batch_size', default=16, type=int,
help='training batch size')
# CAUTION: The image width, height and channels should be consist with your
# training data. Here they are set as 128 to be complied with the tutorial.
# Mismatching of the image size will cause error of mismatching tensor shapes.
IMG_WIDTH = 128
IMG_HEIGHT = 128
IMG_CHANNEL = 3
def cnn_model_fn(features, labels, mode):
"""
The model function for the network.
"""
# Construct the network.
model = LandmarkModel(output_size=68*2)
logits = model(features)
# Make prediction for PREDICATION mode.
predictions = logits
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
'predict': tf.estimator.export.PredictOutput(predictions)
})
# Calculate loss using mean squared error.
loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions)
# Create a tensor logging purposes.
tf.identity(loss, name='loss')
tf.summary.scalar('loss', loss)
# Configure the train OP for TRAIN mode.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
else:
train_op = None
# Create a metric.
rmse_metrics = tf.metrics.root_mean_squared_error(
labels=labels,
predictions=predictions)
metrics = {'eval_mse': rmse_metrics}
# A tensor for metric logging
tf.identity(rmse_metrics[1], name='root_mean_squared_error')
tf.summary.scalar('root_mean_squared_error', rmse_metrics[1])
# Generate a summary node for the images
tf.summary.image('images', features, max_outputs=6)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics
)
def _parse_function(record):
"""
Extract data from a `tf.Example` protocol buffer.
"""
# Defaults are not specified since both keys are required.
keys_to_features = {
'image/filename': tf.FixedLenFeature([], tf.string),
'image/encoded': tf.FixedLenFeature([], tf.string),
'label/marks': tf.FixedLenFeature([136], tf.float32),
}
parsed_features = tf.parse_single_example(record, keys_to_features)
# Extract features from single example
image_decoded = tf.image.decode_image(parsed_features['image/encoded'])
image_reshaped = tf.reshape(
image_decoded, [IMG_HEIGHT, IMG_WIDTH, IMG_CHANNEL])
points = tf.cast(parsed_features['label/marks'], tf.float32)
return image_reshaped, points
def input_fn(record_file, batch_size, num_epochs=None, shuffle=True):
"""
Input function required for TensorFlow Estimator.
"""
dataset = tf.data.TFRecordDataset(record_file)
# Use `Dataset.map()` to build a pair of a feature dictionary and a label
# tensor for each example.
dataset = dataset.map(_parse_function)
if shuffle is True:
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.batch(batch_size)
dataset = dataset.repeat(num_epochs)
# Make dataset iterator.
iterator = dataset.make_one_shot_iterator()
# Return the feature and label.
image, label = iterator.get_next()
return image, label
def serving_input_receiver_fn():
"""An input function for TensorFlow Serving."""
def _preprocess_image(image_bytes):
"""Preprocess a single raw image."""
image = tf.image.decode_jpeg(image_bytes, channels=IMG_CHANNEL)
image.set_shape((None, None, IMG_CHANNEL))
image = tf.image.resize_images(image, [IMG_HEIGHT, IMG_WIDTH],
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False)
return image
image_bytes_list = tf.compat.v1.placeholder(
shape=[None], dtype=tf.string,
name='encoded_image_string_tensor')
image = tf.map_fn(_preprocess_image, image_bytes_list,
dtype=tf.float32, back_prop=False)
return tf.estimator.export.TensorServingInputReceiver(
features=image,
receiver_tensors={'image_bytes': image_bytes_list})
def main(unused_argv):
"""Train, eval and export the model."""
# Parse the arguments.
args = parser.parse_args(unused_argv[1:])
# Create the Estimator
estimator = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir=args.model_dir)
# Train for N steps.
tf.logging.info('Starting to train.')
estimator.train(
input_fn=lambda: input_fn(record_file=args.train_record,
batch_size=args.batch_size,
num_epochs=args.num_epochs,
shuffle=True),
steps=args.train_steps)
# Do evaluation after training.
tf.logging.info('Starting to evaluate.')
evaluation = estimator.evaluate(
input_fn=lambda: input_fn(record_file=args.val_record,
batch_size=1,
num_epochs=1,
shuffle=False))
print(evaluation)
# Export trained model as SavedModel.
if args.export_dir is not None:
estimator.export_savedmodel(args.export_dir, serving_input_receiver_fn)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
| 34.14359 | 79 | 0.658306 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,876 | 0.281766 |
0d492d307402409cd402271070306ff0cee7ae12 | 2,593 | py | Python | read_testdata.py | veralily/MLKD-mission3-resentForIC | f652f80ad848fca321f912e9c1594517f1942e42 | [
"MIT"
] | null | null | null | read_testdata.py | veralily/MLKD-mission3-resentForIC | f652f80ad848fca321f912e9c1594517f1942e42 | [
"MIT"
] | null | null | null | read_testdata.py | veralily/MLKD-mission3-resentForIC | f652f80ad848fca321f912e9c1594517f1942e42 | [
"MIT"
] | null | null | null | import skimage.io # bug. need to import this before tensorflow
import skimage.transform # bug. need to import this before tensorflow
from resnet_train import train
from resnet import inference
import tensorflow as tf
import time
import os
import sys
import re
import numpy as np
from image_processing import image_preprocessing
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('filename_list', 'check.doc.list', 'file list')
'''def file_list(filename_list):
reader = open(filename_list, 'r')
filenames = reader.readlines()
filenames = [int(f) for f in filenames]
return filenames'''
def file_list(data_dir):
i = 0
filenames = []
for root, dirs, files in os.walk(data_dir):
for file in files:
if os.path.splitext(file)[1] == '.jpg':
filename = os.path.splitext(file)[0]
i = i + 1
filenames.append(int(filename))
print("number of files")
print(i)
return filenames
def load_data(data_dir):
data = []
start_time = time.time()
files = file_list(data_dir)
duration = time.time() - start_time
print "took %f sec" % duration
for img_fn in files:
img_fn = str(img_fn) + '.jpg'
fn = os.path.join(data_dir, img_fn)
data.append(fn)
return data
def distorted_inputs(data_dir):
filenames = load_data(data_dir)
files = []
images = []
i = 0
files_b = []
images_b = []
height = FLAGS.input_size
width = FLAGS.input_size
depth = 3
step = 0
for filename in filenames:
image_buffer = tf.read_file(filename)
bbox = []
train = False
image = image_preprocessing(image_buffer, bbox, train, 0)
files_b.append(filename)
images_b.append(image)
i = i + 1
#print(image)
if i == 20:
print(i)
files.append(files_b)
images_b = tf.reshape(images_b, [20, height, width, depth])
images.append(images_b)
files_b = []
images_b = []
i = 0
#files = files_b
#images = tf.reshape(images_b, [13, height, width, depth])
images = np.array(images, ndmin=1)
#images = tf.cast(images, tf.float32)
#images = tf.reshape(images, shape=[-1, height, width, depth])
print(type(files))
print(type(images))
print(images.shape)
#files = tf.reshape(files, [len(files)])
# print(files)
# print(images)
return files, images
_, images = distorted_inputs("check_ic//check")
| 22.745614 | 74 | 0.600463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 617 | 0.237948 |
0d4e4c53bfb40749ea8ec2131b648151af59d377 | 2,305 | py | Python | userbot/plugins/thordp.py | indianSammy07/Wolf | aba9ecce1860f86f81a52722062531590521ad7f | [
"MIT"
] | null | null | null | userbot/plugins/thordp.py | indianSammy07/Wolf | aba9ecce1860f86f81a52722062531590521ad7f | [
"MIT"
] | null | null | null | userbot/plugins/thordp.py | indianSammy07/Wolf | aba9ecce1860f86f81a52722062531590521ad7f | [
"MIT"
] | null | null | null |
import os
from datetime import datetime
from PIL import Image, ImageDraw, ImageFont
from pySmartDL import SmartDL
from telethon.tl import functions
from uniborg.util import admin_cmd
import asyncio
import shutil
import random, re
FONT_FILE_TO_USE = "/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf"
#Add telegraph media links of profile pics that are to be used
TELEGRAPH_MEDIA_LINKS = ["https://telegra.ph/file/2bc2e85fb6b256efc4088.jpg",
"https://telegra.ph/file/443fff8a7db51d390e1a7.jpg",
"https://telegra.ph/file/e49bbb9e21383f8231d85.jpg",
"https://telegra.ph/file/d6875213197a9d93ff181.jpg",
"https://telegra.ph/file/ec7da24872002e75e6af8.jpg",
"https://telegra.ph/file/468a2af386d10cd45df8f.jpg",
"https://telegra.ph/file/59c7ce59289d80f1fe830.jpg"
]
@borg.on(admin_cmd(pattern="thordp ?(.*)"))
async def autopic(event):
while True:
piclink = random.randint(0, len(TELEGRAPH_MEDIA_LINKS) - 1)
AUTOPP = TELEGRAPH_MEDIA_LINKS[piclink]
downloaded_file_name = "./ravana/original_pic.png"
downloader = SmartDL(AUTOPP, downloaded_file_name, progress_bar=True)
downloader.start(blocking=False)
photo = "photo_pfp.png"
while not downloader.isFinished():
place_holder = None
shutil.copy(downloaded_file_name, photo)
im = Image.open(photo)
current_time = datetime.now().strftime("@MrSemmy \n \nTime: %H:%M:%S \nDate: %d/%m/%y")
img = Image.open(photo)
drawn_text = ImageDraw.Draw(img)
fnt = ImageFont.truetype(FONT_FILE_TO_USE, 30)
drawn_text.text((30, 50), current_time, font=fnt, fill=(102, 209, 52))
img.save(photo)
file = await event.client.upload_file(photo) # pylint:disable=E0602
try:
await event.client(functions.photos.DeletePhotosRequest(await event.client.get_profile_photos("me", limit=1)))
await event.client(functions.photos.UploadProfilePhotoRequest( # pylint:disable=E0602
file
))
os.remove(photo)
await asyncio.sleep(600)
except:
return
| 40.438596 | 122 | 0.633839 | 0 | 0 | 0 | 0 | 1,365 | 0.592191 | 1,321 | 0.573102 | 619 | 0.268547 |
0d4e981d336496f51c5ebd89178a51218846e23a | 514 | py | Python | visualize/preprocess.py | peitaosu/SpectralClustering | 5c679ce0f9f2974fa7be2abe9caa1265dbbd4a2c | [
"MIT"
] | null | null | null | visualize/preprocess.py | peitaosu/SpectralClustering | 5c679ce0f9f2974fa7be2abe9caa1265dbbd4a2c | [
"MIT"
] | null | null | null | visualize/preprocess.py | peitaosu/SpectralClustering | 5c679ce0f9f2974fa7be2abe9caa1265dbbd4a2c | [
"MIT"
] | null | null | null | import os, sys
class Preprocesser():
def __init__(self):
self.data = {
"X": [],
"Y": []
}
def process(self, input):
if not os.path.isfile(input):
print(input + " is not exists.")
sys.exit(-1)
with open(input) as in_file:
for line in in_file.readlines():
self.data["X"].append(float(line.split("\t")[0]))
self.data["Y"].append(float(line.split("\t")[1]))
return self.data
| 25.7 | 65 | 0.478599 | 496 | 0.964981 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.071984 |
0d51a39dc3763222336d58785130ec15857fbfe1 | 34,938 | py | Python | cogs/information.py | BioKZM/Colonist | ab3872c01b1bdc235e80065530fbed9953952919 | [
"MIT"
] | 5 | 2021-11-20T12:30:55.000Z | 2022-02-02T15:34:23.000Z | cogs/information.py | BioKZM/Colonist | ab3872c01b1bdc235e80065530fbed9953952919 | [
"MIT"
] | null | null | null | cogs/information.py | BioKZM/Colonist | ab3872c01b1bdc235e80065530fbed9953952919 | [
"MIT"
] | null | null | null | # import discord
# import asyncio
# import json
# from discord.ext import commands
# from discord.utils import get
# # from cogs.personalPoint import PersonalPoint
# from main import client
# from discord_ui import UI,Button
# from functions.userClass import User,experiences,levelNames
# from cogs.rank import getSortedMembers
# ui = UI(client)
# class Information(commands.Cog):
# def __init__(self,client):
# self.client = client
# @commands.command()
# async def bilgi(self,ctx):
# embed = discord.Embed(title="Üye Bilgi Ekranı",description="Üye bilgi ekranına hoş geldin.\nAşağıdaki butonlara basarak\nbilgisini almak istediğin içeriği görebilirsin.",color = 0x8d42f5,)
# embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.avatar_url)
# message = await ctx.channel.send(
# embed=embed,
# components = [
# Button(
# label = "Mevcut Seviye",
# custom_id = "seviye",
# color = ButtonStyle.Green,
# emoji = "📰",
# ),
# Button(
# label = "Liderlik Tablosu",
# custom_id = "liderliktablosu",
# color = ButtonStyle.Green,
# emoji = "📋",
# ),
# Button(
# label = "Detaylı Bilgi",
# custom_id = "detaylıbilgi",
# color = ButtonStyle.Green,
# emoji = "📜",
# new_line=True
# ),
# Button(
# label="Görevler",
# custom_id = "görevler",
# color = ButtonStyle.Green,
# emoji = "🪧",
# ),
# Button(
# label="Seviyeler",
# custom_id = "seviyeler",
# color = ButtonStyle.Green,
# emoji = "🚩",
# new_line=True
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# ),
# ]
# )
# with open("files/infoMessage.json") as file:
# info = json.load(file)
# info[ctx.author.id] = message.id
# with open("files/infoMessage.json","w") as file:
# json.dump(info,file,indent=4)
# @ui.components.listening_component('seviye')
# async def listening_component(component):
# with open("files/infoMessage.json") as file:
# info = json.load(file)
# try:
# if component.message.id != info[f"{component.author.id}"]:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# else:
# await component.message.edit(components=[
# Button(
# label = "Mevcut Seviye",
# custom_id = "seviye",
# color = ButtonStyle.Green,
# emoji = "📰",
# disabled=True
# ),
# Button(
# label = "Liderlik Tablosu",
# custom_id = "liderliktablosu",
# color = ButtonStyle.Green,
# emoji = "📋",
# disabled=True
# ),
# Button(
# label = "Detaylı Bilgi",
# custom_id = "detaylıbilgi",
# color = ButtonStyle.Green,
# emoji = "📜",
# new_line=True,
# disabled=True
# ),
# Button(
# label="Görevler",
# custom_id = "görevler",
# color = ButtonStyle.Green,
# emoji = "🪧",
# disabled=True
# ),
# Button(
# label="Seviyeler",
# custom_id = "seviyeler",
# color = ButtonStyle.Green,
# emoji = "🚩",
# new_line=True,
# disabled=True
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# disabled=True
# ),
# ])
# try:
# await component.respond()
# except:
# pass
# member = component.author
# user = User(member.id)
# if not member.bot:
# embed = discord.Embed(title=f"{member.name}#{member.discriminator} adlı kullanıcının değerleri",description="",color=0x8d42f5)
# embed.add_field(name="Mevcut değerler - 🏆 ",value="Seviyesi = **{}**\n Puanı = **{}**\n Rütbesi = **{}**\n".format(user.level,user.XP,user.levelName,inline=False))
# if user.isMaxLevel():
# embed.add_field(name="Bir sonraki rütbe - 🚀 ",value=f"**Maksimum seviyeye ulaştınız!**",inline=False)
# elif not user.isMaxLevel():
# if experiences[user.level] - user.XP <= 0:
# embed.add_field(name="Bir sonraki rütbe - 🚀 ",value=f"**{levelNames[user.getLevel(user.XP)]}** rütbesine ulaştın! Seviye atlamak için ses kanalına girebilirsin.",inline=False)
# else:
# embed.add_field(name="Bir sonraki rütbe - 🚀 ",value=f"**{levelNames[user.level]}** rütbesi için kalan puan = **{(experiences[user.level-2])-user.XP}**",inline=False)
# embed.set_author(name=component.author.display_name, icon_url=component.author.avatar_url)
# await component.message.edit(embed=embed,components=[
# Button(
# label="Geri",
# custom_id="geri",
# color=ButtonStyle.Grey,
# emoji="⬅️"
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# )
# ])
# except KeyError:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# return
# try:
# await component.respond()
# except:
# pass
# @ui.components.listening_component('liderliktablosu')
# async def listening_component(component):
# with open("files/infoMessage.json") as file:
# info = json.load(file)
# try:
# if component.message.id != info[f"{component.author.id}"]:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# else:
# await component.message.edit(components=[
# Button(
# label = "Mevcut Seviye",
# custom_id = "seviye",
# color = ButtonStyle.Green,
# emoji = "📰",
# disabled=True
# ),
# Button(
# label = "Liderlik Tablosu",
# custom_id = "liderliktablosu",
# color = ButtonStyle.Green,
# emoji = "📋",
# disabled=True
# ),
# Button(
# label = "Detaylı Bilgi",
# custom_id = "detaylıbilgi",
# color = ButtonStyle.Green,
# emoji = "📜",
# new_line=True,
# disabled=True
# ),
# Button(
# label="Görevler",
# custom_id = "görevler",
# color = ButtonStyle.Green,
# emoji = "🪧",
# disabled=True
# ),
# Button(
# label="Seviyeler",
# custom_id = "seviyeler",
# color = ButtonStyle.Green,
# emoji = "🚩",
# new_line=True,
# disabled=True
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# disabled=True
# ),
# ])
# try:
# await component.respond()
# except:
# pass
# sortedMembers = getSortedMembers(component)
# embed=discord.Embed(title="Sıralama",inline=False,color=0x8d42f5)
# embed.set_author(name=component.author.display_name, icon_url=component.author.avatar_url)
# count = 1
# for key,value in sortedMembers.items():
# embed.add_field(name="{} - {}".format(count,key),value="**Puan**: {}\n**Rütbe**: {}".format(value[0],value[1]),inline=False)
# count += 1
# if count == 11:break
# await component.message.edit(embed=embed,components=[
# Button(
# label="Geri",
# custom_id="geri",
# color=ButtonStyle.Grey,
# emoji="⬅️"
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# )
# ])
# except KeyError:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# @ui.components.listening_component('detaylıbilgi')
# async def listening_component(component):
# with open("files/infoMessage.json") as file:
# info = json.load(file)
# try:
# if component.message.id != info[f"{component.author.id}"]:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# else:
# await component.message.edit(components=[
# Button(
# label = "Mevcut Seviye",
# custom_id = "seviye",
# color = ButtonStyle.Green,
# emoji = "📰",
# disabled=True
# ),
# Button(
# label = "Liderlik Tablosu",
# custom_id = "liderliktablosu",
# color = ButtonStyle.Green,
# emoji = "📋",
# disabled=True
# ),
# Button(
# label = "Detaylı Bilgi",
# custom_id = "detaylıbilgi",
# color = ButtonStyle.Green,
# emoji = "📜",
# new_line=True,
# disabled=True
# ),
# Button(
# label="Görevler",
# custom_id = "görevler",
# color = ButtonStyle.Green,
# emoji = "🪧",
# disabled=True
# ),
# Button(
# label="Seviyeler",
# custom_id = "seviyeler",
# color = ButtonStyle.Green,
# emoji = "🚩",
# new_line=True,
# disabled=True
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# disabled=True
# ),
# ])
# liste = {}
# XP = {}
# for i in range(1,11):
# liste[f'level{i}'] = 0
# XP[f'xp{i}'] = ""
# if i == 1:
# XP[f"xp{i}"] += f"{levelNames[i-1]}"
# else:
# XP[f'xp{i}'] += f"{levelNames[i-1]} - {experiences[i-2]}"
# try:
# await component.respond()
# except:
# pass
# for member in client.get_all_members():
# if not member.bot:
# user = User(member.id)
# liste[f'level{user.level}'] += 1
# message = discord.Embed(title = "Detaylı Bilgi",description="**Aşağıda, hangi seviyede kaç kullanıcının bulunduğunu öğrenebilirsin**",color = 0x8d42f5)
# for level in range(1,11):
# XPs = XP[f'xp{level}']
# levels = liste[f'level{level}']
# if levels == 0:
# if XP[f'xp{level}'] == "Guest":
# message.add_field(name=f"*Seviye {level}* / {XPs}:",value=f"Bu seviyede herhangi biri yok.",inline=False)
# else:
# message.add_field(name=f"*Seviye {level}* / {XPs} XP:",value=f"Bu seviyede herhangi biri yok.",inline=False)
# else:
# if XP[f'xp{level}'] == "Guest":
# message.add_field(name=f"*Seviye {level}* / {XPs}:",value=f"**{levels}** kişi bu seviyede.",inline=False)
# else:
# message.add_field(name=f"*Seviye {level}* / {XPs} XP:",value=f"**{levels}** kişi bu seviyede.",inline=False)
# message.set_author(name=component.author.display_name, icon_url=component.author.avatar_url)
# await component.message.edit(embed=message,components=[
# Button(
# label="Geri",
# custom_id="geri",
# color=ButtonStyle.Grey,
# emoji="⬅️"
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# )
# ])
# except KeyError:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# @ui.components.listening_component('görevler')
# async def listening_component(component):
# with open("files/infoMessage.json") as file:
# info = json.load(file)
# try:
# if component.message.id != info[f"{component.author.id}"]:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# else:
# await component.message.edit(components=[
# Button(
# label = "Mevcut Seviye",
# custom_id = "seviye",
# color = ButtonStyle.Green,
# emoji = "📰",
# disabled=True
# ),
# Button(
# label = "Liderlik Tablosu",
# custom_id = "liderliktablosu",
# color = ButtonStyle.Green,
# emoji = "📋",
# disabled=True
# ),
# Button(
# label = "Detaylı Bilgi",
# custom_id = "detaylıbilgi",
# color = ButtonStyle.Green,
# emoji = "📜",
# new_line=True,
# disabled=True
# ),
# Button(
# label="Görevler",
# custom_id = "görevler",
# color = ButtonStyle.Green,
# emoji = "🪧",
# disabled=True
# ),
# Button(
# label="Seviyeler",
# custom_id = "seviyeler",
# color = ButtonStyle.Green,
# emoji = "🚩",
# new_line=True,
# disabled=True
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# disabled=True
# ),
# ])
# try:
# await component.respond()
# except:
# pass
# embed = discord.Embed(
# title = "Görevler",
# description = "**Bir gemiye atla ve bir oyun üret**;\nPC/Platform .............................. 10.0000 XP\nMobil ............................................... 5.000 XP\nHyperCasual................................... 2.000 XP\nGameJam.......................................... 1.000XP\n*Oyun yayınlanırsa kazanılan deneyim puanı iki katına çıkar*",
# color = 0x8d42f5
# )
# embed.add_field(
# name = "\n\nSunucu Takviyesi",
# value = "Her sunucu takviyesi başına **250 XP**",
# inline=False
# )
# embed.add_field(
# name = "\n\nSes Kanallarına Aktif Ol",
# value = "Dakika başına 1 XP\n*Not: Kazanılan XP, yayın ve kamera açma durumuna göre değişiklik gösterir.*",
# inline=False
# )
# embed.set_author(name=component.author.display_name, icon_url=component.author.avatar_url)
# await component.message.edit(embed=embed,components=[
# Button(
# label="Geri",
# custom_id="geri",
# color=ButtonStyle.Grey,
# emoji="⬅️"
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# )
# ])
# except KeyError:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# @ui.components.listening_component('seviyeler')
# async def listening_component(component):
# with open("files/infoMessage.json") as file:
# info = json.load(file)
# try:
# if component.message.id != info[f"{component.author.id}"]:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# else:
# await component.message.edit(components=[
# Button(
# label = "Mevcut Seviye",
# custom_id = "seviye",
# color = ButtonStyle.Green,
# emoji = "📰",
# disabled=True
# ),
# Button(
# label = "Liderlik Tablosu",
# custom_id = "liderliktablosu",
# color = ButtonStyle.Green,
# emoji = "📋",
# disabled=True
# ),
# Button(
# label = "Detaylı Bilgi",
# custom_id = "detaylıbilgi",
# color = ButtonStyle.Green,
# emoji = "📜",
# new_line=True,
# disabled=True
# ),
# Button(
# label="Görevler",
# custom_id = "görevler",
# color = ButtonStyle.Green,
# emoji = "🪧",
# disabled=True
# ),
# Button(
# label="Seviyeler",
# custom_id = "seviyeler",
# color = ButtonStyle.Green,
# emoji = "🚩",
# new_line=True,
# disabled=True
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# disabled=True
# ),
# ])
# try:
# await component.respond()
# except:
# pass
# embed = discord.Embed(
# title = "Seviyeler",
# description = "Aşağıda, sunucuda bulunan mevcut seviyeleri görebilirsin.",
# color = 0x8d42f5
# )
# embed.add_field(
# name = "Guest:",
# value = "Misafir statüsünde üye",
# inline = False,
# )
# embed.add_field(
# name = "Colony Member / 250 XP:",
# value = "Koloni üyesi",
# inline = False,
# )
# embed.add_field(
# name = "Open Crew / 1.987 XP:",
# value = "Açık gemilerde mürettebat olma hakkına sahip üye",
# inline = False,
# )
# embed.add_field(
# name = "Crew / 6.666 XP:",
# value = "Bütün gemilerde mürettebat olma hakkına sahip üye",
# inline = False,
# )
# embed.add_field(
# name = "Captain / 9.999 XP:",
# value = "Gemilere kaptanlık yapma hakkına sahip üye",
# inline = False,
# )
# embed.add_field(
# name = "Judge / 30.000 XP:",
# value = "Oy kullanma hakkına sahip üye",
# inline = False,
# )
# embed.add_field(
# name = "Colony Manager / 90.000 XP:",
# value = "Tasarlanacak oyunlara karar veren üye",
# inline = False,
# )
# embed.add_field(
# name = "Mars Lover / 300.000 XP:",
# value = "Yayınlanan bütün oyunlarda adına teşekkür edilen üye",
# inline = False,
# )
# embed.add_field(
# name = "Chief of the Colony / 900.000 XP:",
# value = "Kolonideki kamu yönetiminde, herhangi bir rolü alabilen üye, A.K.A Chief",
# inline = False,
# )
# embed.add_field(
# name = "Partner / 10.000.001 XP:",
# value = "Koloninin fahri ortağı",
# inline = False,
# )
# embed.set_author(name=component.author.display_name, icon_url=component.author.avatar_url)
# await component.message.edit(embed=embed,components = [
# Button(
# label="Geri",
# custom_id="geri",
# color=ButtonStyle.Grey,
# emoji="⬅️"
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# )
# ])
# except KeyError:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# @ui.components.listening_component('geri')
# async def listening_component(component):
# with open("files/infoMessage.json") as file:
# info = json.load(file)
# try:
# if component.message.id != info[f"{component.author.id}"]:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# else:
# embed = discord.Embed(title="Üye Bilgi Ekranı",description="Üye bilgi ekranına hoş geldin.\nAşağıdaki butonlara basarak\nbilgisini almak istediğin içeriği görebilirsin.",color = 0x8d42f5)
# embed.set_author(name=component.author.display_name, icon_url=component.author.avatar_url)
# try:
# await component.respond()
# except:
# pass
# await component.message.edit(
# embed=embed,
# components = [
# Button(
# label = "Mevcut Seviye",
# custom_id = "seviye",
# color = ButtonStyle.Green,
# emoji = "📰",
# ),
# Button(
# label = "Liderlik Tablosu",
# custom_id = "liderliktablosu",
# color = ButtonStyle.Green,
# emoji = "📋",
# ),
# Button(
# label = "Detaylı Bilgi",
# custom_id = "detaylıbilgi",
# color = ButtonStyle.Green,
# emoji = "📜",
# new_line=True,
# ),
# Button(
# label="Görevler",
# custom_id = "görevler",
# color = ButtonStyle.Green,
# emoji = "🪧",
# ),
# Button(
# label="Seviyeler",
# custom_id = "seviyeler",
# color = ButtonStyle.Green,
# emoji = "🚩",
# new_line=True,
# ),
# Button(
# label = "Mesajı Sil",
# custom_id = "sil",
# color = ButtonStyle.Red,
# ),
# ]
# )
# except KeyError:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# @ui.components.listening_component('sil')
# async def listening_component(component):
# with open("files/infoMessage.json") as file:
# info = json.load(file)
# try:
# if component.message.id != info[f"{component.author.id}"]:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# await component.message.delete()
# else:
# try:
# await component.respond()
# except:
# pass
# await component.message.delete()
# del info[component.author.id]
# with open("files/infoMessage.py","w",encoding="utf-8") as dosya:
# dosya.write("info = ")
# dosya.write(str(info))
# except KeyError:
# embed = discord.Embed(
# title = "Uyarı",
# description = "Bu senin mesajın değil!\nKendini mesajını oluşturmak için `!bilgi`",
# color = 0xFF0000
# )
# try:
# await component.respond()
# except:
# pass
# message = await component.channel.send(embed=embed)
# await asyncio.sleep(5)
# await message.delete()
# def setup(client):
# client.add_cog(Information(client)) | 41.642431 | 373 | 0.382563 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34,170 | 0.967112 |
0d51ae17ec18a17c3026a04979a7cfd4797e2e95 | 1,680 | py | Python | infoblox_netmri/api/remote/models/wireless_hot_standby_grid_remote.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | infoblox_netmri/api/remote/models/wireless_hot_standby_grid_remote.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | infoblox_netmri/api/remote/models/wireless_hot_standby_grid_remote.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class WirelessHotStandbyGridRemote(RemoteModel):
"""
| ``id:`` none
| ``attribute type:`` string
| ``WlsHotSbTimestamp:`` none
| ``attribute type:`` string
| ``MonitoredIPDotted:`` none
| ``attribute type:`` string
| ``MonitoredIPNumeric:`` none
| ``attribute type:`` string
| ``MonitoredDevice:`` none
| ``attribute type:`` string
| ``MonitoredDeviceID:`` none
| ``attribute type:`` string
| ``MonitoredDeviceType:`` none
| ``attribute type:`` string
| ``WlsHotSbMAC:`` none
| ``attribute type:`` string
| ``WlsHotSbInd:`` none
| ``attribute type:`` string
| ``WlsHotSbStatus:`` none
| ``attribute type:`` string
| ``WlsHotSbState:`` none
| ``attribute type:`` string
| ``WlsHotSbPollingFeq:`` none
| ``attribute type:`` string
| ``WlsHotSbPollingTimeout:`` none
| ``attribute type:`` string
"""
properties = ("id",
"WlsHotSbTimestamp",
"MonitoredIPDotted",
"MonitoredIPNumeric",
"MonitoredDevice",
"MonitoredDeviceID",
"MonitoredDeviceType",
"WlsHotSbMAC",
"WlsHotSbInd",
"WlsHotSbStatus",
"WlsHotSbState",
"WlsHotSbPollingFeq",
"WlsHotSbPollingTimeout",
)
| 21.265823 | 62 | 0.491071 | 1,511 | 0.899405 | 0 | 0 | 0 | 0 | 0 | 0 | 1,177 | 0.700595 |
0d52e3e144e777e66888716d6fd11de6d57fc9e0 | 11,717 | py | Python | tests/test_geometric_tests.py | mxrie-eve/Pyrr | 34802ba0393a6e7752cf55fadecd0d7824042dc0 | [
"Unlicense"
] | null | null | null | tests/test_geometric_tests.py | mxrie-eve/Pyrr | 34802ba0393a6e7752cf55fadecd0d7824042dc0 | [
"Unlicense"
] | null | null | null | tests/test_geometric_tests.py | mxrie-eve/Pyrr | 34802ba0393a6e7752cf55fadecd0d7824042dc0 | [
"Unlicense"
] | null | null | null | from pyrr.geometric_tests import ray_intersect_sphere
try:
import unittest2 as unittest
except:
import unittest
import numpy as np
from pyrr import geometric_tests as gt
from pyrr import line, plane, ray, sphere
class test_geometric_tests(unittest.TestCase):
def test_import(self):
import pyrr
pyrr.geometric_tests
from pyrr import geometric_tests
def test_point_intersect_line(self):
p = np.array([1.,1.,1.])
l = np.array([[0.,0.,0.],[2.,2.,2.]])
result = gt.point_intersect_line(p, l)
self.assertTrue(np.array_equal(result, p))
def test_point_intersect_line_invalid(self):
p = np.array([3.,3.,3.])
l = np.array([[0.,0.,0.],[2.,2.,2.]])
result = gt.point_intersect_line(p, l)
self.assertTrue(np.array_equal(result, p))
def test_point_intersect_line_segment(self):
p = np.array([1.,1.,1.])
l = np.array([[0.,0.,0.],[2.,2.,2.]])
result = gt.point_intersect_line_segment(p, l)
self.assertTrue(np.array_equal(result, p))
def test_point_intersect_line_segment_invalid(self):
p = np.array([3.,3.,3.])
l = np.array([[0.,0.,0.],[2.,2.,2.]])
result = gt.point_intersect_line_segment(p, l)
self.assertEqual(result, None)
def test_point_intersect_rectangle_valid_intersections_1(self):
r = np.array([
[0.0, 0.0],
[5.0, 5.0]
])
p = [ 0.0, 0.0]
result = gt.point_intersect_rectangle(p, r)
self.assertTrue(np.array_equal(result, p))
def test_point_intersect_rectangle_valid_intersections_2(self):
r = np.array([
[0.0, 0.0],
[5.0, 5.0]
])
p = [ 5.0, 5.0]
result = gt.point_intersect_rectangle(p, r)
self.assertTrue(np.array_equal(result, p))
def test_point_intersect_rectangle_valid_intersections_3(self):
r = np.array([
[0.0, 0.0],
[5.0, 5.0]
])
p = [ 1.0, 1.0]
result = gt.point_intersect_rectangle(p, r)
self.assertTrue(np.array_equal(result, p))
def test_point_intersect_rectangle_invalid_intersections_1(self):
r = np.array([
[0.0, 0.0],
[5.0, 5.0]
])
p = [-1.0, 1.0]
result = gt.point_intersect_rectangle(p, r)
self.assertFalse(np.array_equal(result, p))
def test_point_intersect_rectangle_invalid_intersections_2(self):
r = np.array([
[0.0, 0.0],
[5.0, 5.0]
])
p = [ 1.0, 10.0]
result = gt.point_intersect_rectangle(p, r)
self.assertFalse(np.array_equal(result, p))
def test_point_intersect_rectangle_invalid_intersections_3(self):
rect = np.array([
[0.0, 0.0],
[5.0, 5.0]
])
point = [ 1.0,-1.0]
result = gt.point_intersect_rectangle(point, rect)
self.assertFalse(np.array_equal(result, point))
def test_ray_intersect_plane(self):
r = ray.create([0.,-1.,0.],[0.,1.,0.])
p = plane.create([0.,1.,0.], 0.)
result = gt.ray_intersect_plane(r, p)
self.assertFalse(np.array_equal(result, [0.,1.,0.]))
def test_ray_intersect_plane_front_only(self):
r = ray.create([0.,-1.,0.],[0.,1.,0.])
p = plane.create([0.,1.,0.], 0.)
result = gt.ray_intersect_plane(r, p, front_only=True)
self.assertEqual(result, None)
def test_ray_intersect_plane_invalid(self):
r = ray.create([0.,-1.,0.],[1.,0.,0.])
p = plane.create([0.,1.,0.], 0.)
result = gt.ray_intersect_plane(r, p)
self.assertEqual(result, None)
def test_point_closest_point_on_ray(self):
l = line.create_from_points(
[ 0.0, 0.0, 0.0 ],
[10.0, 0.0, 0.0 ]
)
p = np.array([ 0.0, 1.0, 0.0])
result = gt.point_closest_point_on_ray(p, l)
self.assertTrue(np.array_equal(result, [ 0.0, 0.0, 0.0]))
def test_point_closest_point_on_line(self):
p = np.array([0.,1.,0.])
l = np.array([[0.,0.,0.],[2.,0.,0.]])
result = gt.point_closest_point_on_line(p, l)
self.assertTrue(np.array_equal(result, [0.,0.,0.]), (result,))
def test_point_closest_point_on_line_2(self):
p = np.array([3.,0.,0.])
l = np.array([[0.,0.,0.],[2.,0.,0.]])
result = gt.point_closest_point_on_line(p, l)
self.assertTrue(np.array_equal(result, [3.,0.,0.]), (result,))
def test_point_closest_point_on_line_segment(self):
p = np.array([0.,1.,0.])
l = np.array([[0.,0.,0.],[2.,0.,0.]])
result = gt.point_closest_point_on_line_segment(p, l)
self.assertTrue(np.array_equal(result, [0.,0.,0.]), (result,))
def test_vector_parallel_vector(self):
v1 = np.array([1.,0.,0.])
v2 = np.array([2.,0.,0.])
self.assertTrue(gt.vector_parallel_vector(v1,v2))
def test_vector_parallel_vector_invalid(self):
v1 = np.array([1.,0.,0.])
v2 = np.array([0.,1.,0.])
self.assertTrue(False == gt.vector_parallel_vector(v1,v2))
def test_ray_parallel_ray(self):
r1 = ray.create([0.,0.,0.],[1.,0.,0.])
r2 = ray.create([1.,0.,0.],[2.,0.,0.])
self.assertTrue(gt.ray_parallel_ray(r1,r2))
def test_ray_parallel_ray_2(self):
r1 = ray.create([0.,0.,0.],[1.,0.,0.])
r2 = ray.create([1.,0.,0.],[0.,1.,0.])
self.assertTrue(False == gt.ray_parallel_ray(r1,r2))
def test_ray_parallel_ray_3(self):
r1 = ray.create([0.,0.,0.],[1.,0.,0.])
r2 = ray.create([0.,1.,0.],[1.,0.,0.])
self.assertTrue(gt.ray_parallel_ray(r1,r2))
def test_ray_coincident_ray(self):
r1 = ray.create([0.,0.,0.],[1.,0.,0.])
r2 = ray.create([1.,0.,0.],[2.,0.,0.])
self.assertTrue(gt.ray_coincident_ray(r1,r2))
def test_ray_coincident_ray_2(self):
r1 = ray.create([0.,0.,0.],[1.,0.,0.])
r2 = ray.create([1.,0.,0.],[0.,1.,0.])
self.assertTrue(False == gt.ray_coincident_ray(r1,r2))
def test_ray_coincident_ray_3(self):
r1 = ray.create([0.,0.,0.],[1.,0.,0.])
r2 = ray.create([0.,1.,0.],[1.,0.,0.])
self.assertTrue(False == gt.ray_coincident_ray(r1,r2))
def test_ray_intersect_aabb_valid_1(self):
a = np.array([[-1.0,-1.0,-1.0], [ 1.0, 1.0, 1.0]])
r = np.array([[ 0.5, 0.5, 0.0], [ 0.0, 0.0,-1.0]])
result = gt.ray_intersect_aabb(r, a)
self.assertTrue(np.array_equal(result, [ 0.5, 0.5,-1.0]))
def test_ray_intersect_aabb_valid_2(self):
a = np.array([[-1.0,-1.0,-1.0], [ 1.0, 1.0, 1.0]])
r = np.array([[2.0, 2.0, 2.0], [ -1.0, -1.0, -1.0]])
result = gt.ray_intersect_aabb(r, a)
self.assertTrue(np.array_equal(result, [1.0, 1.0, 1.0]))
def test_ray_intersect_aabb_valid_3(self):
a = np.array([[-1.0, -1.0, -1.0], [1.0, 1.0, 1.0]])
r = np.array([[.5, .5, .5], [0, 0, 1.0]])
result = gt.ray_intersect_aabb(r, a)
self.assertTrue(np.array_equal(result, [.5, .5, 1.0]))
def test_ray_intersect_aabb_invalid_1(self):
a = np.array([[-1.0,-1.0,-1.0], [ 1.0, 1.0, 1.0]])
r = np.array([[2.0, 2.0, 2.0], [ 1.0, 1.0, 1.0]])
result = gt.ray_intersect_aabb(r, a)
self.assertEqual(result, None)
def test_point_height_above_plane(self):
pl = plane.create([0., 1., 0.], 1.)
p = np.array([0., 1., 0.])
result = gt.point_height_above_plane(p, pl)
self.assertEqual(result, 0.)
p = np.array([0., 0., 0.])
result = gt.point_height_above_plane(p, pl)
self.assertEqual(result, -1.)
v1 = np.array([ 0.0, 0.0, 1.0])
v2 = np.array([ 1.0, 0.0, 1.0])
v3 = np.array([ 0.0, 1.0, 1.0])
p = np.array([0.0, 0.0, 20.0])
pl = plane.create_from_points(v1, v2, v3)
pl = plane.invert_normal(pl)
result = gt.point_height_above_plane(p, pl)
self.assertEqual(result, 19.)
pl = plane.create_xz(distance=5.)
p = np.array([0., 5., 0.])
h = gt.point_height_above_plane(p, pl)
self.assertEqual(h, 0.)
def test_point_closest_point_on_plane(self):
pl = np.array([ 0.0, 1.0, 0.0, 0.0])
p = np.array([ 5.0, 20.0, 5.0])
result = gt.point_closest_point_on_plane(p, pl)
self.assertTrue(np.array_equal(result, [ 5.0, 0.0, 5.0]))
def test_sphere_does_intersect_sphere_1(self):
s1 = sphere.create()
s2 = sphere.create()
self.assertTrue(gt.sphere_does_intersect_sphere(s1, s2))
def test_sphere_does_intersect_sphere_2(self):
s1 = sphere.create()
s2 = sphere.create([1.,0.,0.])
self.assertTrue(gt.sphere_does_intersect_sphere(s1, s2))
def test_sphere_does_intersect_sphere_3(self):
s1 = sphere.create()
s2 = sphere.create([2.,0.,0.], 1.0)
self.assertTrue(gt.sphere_does_intersect_sphere(s1, s2))
def test_sphere_does_intersect_sphere_4(self):
s1 = sphere.create()
s2 = sphere.create([2.,0.,0.], 0.5)
self.assertTrue(False == gt.sphere_does_intersect_sphere(s1, s2))
def test_sphere_penetration_sphere_1(self):
s1 = sphere.create()
s2 = sphere.create()
self.assertEqual(gt.sphere_penetration_sphere(s1, s2), 2.0)
def test_sphere_penetration_sphere_2(self):
s1 = sphere.create()
s2 = sphere.create([1.,0.,0.], 1.0)
self.assertEqual(gt.sphere_penetration_sphere(s1, s2), 1.0)
def test_sphere_penetration_sphere_3(self):
s1 = sphere.create()
s2 = sphere.create([2.,0.,0.], 1.0)
self.assertEqual(gt.sphere_penetration_sphere(s1, s2), 0.0)
def test_sphere_penetration_sphere_4(self):
s1 = sphere.create()
s2 = sphere.create([3.,0.,0.], 1.0)
self.assertEqual(gt.sphere_penetration_sphere(s1, s2), 0.0)
def test_ray_intersect_sphere_no_solution_1(self):
r = ray.create([0, 2, 0], [1, 0, 0])
s = sphere.create([0, 0, 0], 1)
intersections = ray_intersect_sphere(r, s)
self.assertEqual(len(intersections), 0)
def test_ray_intersect_sphere_no_solution_2(self):
r = ray.create([0, 0, 0], [1, 0, 0])
s = sphere.create([0, 2, 0], 1)
intersections = ray_intersect_sphere(r, s)
self.assertEqual(len(intersections), 0)
def test_ray_intersect_sphere_one_solution_1(self):
r = ray.create([0, 0, 0], [1, 0, 0])
s = sphere.create([0, 0, 0], 1)
intersections = ray_intersect_sphere(r, s)
self.assertEqual(len(intersections), 1)
np.testing.assert_array_almost_equal(intersections[0], np.array([1, 0, 0]), decimal=2)
def test_ray_intersect_sphere_two_solutions_1(self):
r = ray.create([-2, 0, 0], [1, 0, 0])
s = sphere.create([0, 0, 0], 1)
intersections = ray_intersect_sphere(r, s)
self.assertEqual(len(intersections), 2)
np.testing.assert_array_almost_equal(intersections[0], np.array([1, 0, 0]), decimal=2)
np.testing.assert_array_almost_equal(intersections[1], np.array([-1, 0, 0]), decimal=2)
def test_ray_intersect_sphere_two_solutions_2(self):
r = ray.create([2.48, 1.45, 1.78], [-3.1, 0.48, -3.2])
s = sphere.create([1, 1, 0], 1)
intersections = ray_intersect_sphere(r, s)
self.assertEqual(len(intersections), 2)
np.testing.assert_array_almost_equal(intersections[0], np.array([0.44, 1.77, -0.32]), decimal=2)
np.testing.assert_array_almost_equal(intersections[1], np.array([1.41, 1.62, 0.67]), decimal=2)
if __name__ == '__main__':
unittest.main()
| 36.962145 | 104 | 0.581804 | 11,443 | 0.976615 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.000853 |
0d530a1783161928697682724069d35b73f59b12 | 6,462 | py | Python | python/tvm/micro/base.py | uwsampl/tvm | 29a85eb8f75dbd3a338db0b28c2121ca997eb2a4 | [
"Apache-2.0"
] | 2 | 2019-12-27T04:50:01.000Z | 2021-02-04T09:54:21.000Z | python/tvm/micro/base.py | uwsampl/tvm | 29a85eb8f75dbd3a338db0b28c2121ca997eb2a4 | [
"Apache-2.0"
] | null | null | null | python/tvm/micro/base.py | uwsampl/tvm | 29a85eb8f75dbd3a338db0b28c2121ca997eb2a4 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Base definitions for micro."""
from __future__ import absolute_import
import logging
import os
import tvm.module
from tvm.contrib import graph_runtime, util
from tvm import relay
from .._ffi.function import _init_api
from .._ffi.libinfo import find_include_path
from .cross_compile import create_lib
SUPPORTED_DEVICE_TYPES = ["host", "openocd"]
class Session:
"""MicroTVM Session
Example
--------
.. code-block:: python
c_mod = ... # some module generated with "c" as the target
device_type = "host"
with tvm.micro.Session(device_type) as sess:
sess.create_micro_mod(c_mod)
"""
def __init__(self, device_type, binutil_prefix, port=0):
"""Stores parameters for initializing a micro device session.
The session is not initialized until the constructed object is used
in a `with` block.
Parameters
----------
device_type : str
type of low-level device
binutil_prefix : str
binutil prefix to be used. For example, a prefix of
"riscv64-unknown-elf-" means "riscv64-unknown-elf-gcc" is used as
the compiler and "riscv64-unknown-elf-ld" is used as the linker,
etc.
port : integer, optional
port number of OpenOCD server
"""
if device_type not in SUPPORTED_DEVICE_TYPES:
raise RuntimeError("unknown micro device type \"{}\"".format(device_type))
self.device_type = device_type
self.binutil_prefix = binutil_prefix
self.port = port
def build(self, func: relay.Function, params={}):
"""Create a graph runtime module with a micro device context."""
with tvm.build_config(disable_vectorize=True):
with relay.build_config(opt_level=3):
graph, c_mod, params = relay.build(func, target="c", params=params)
micro_mod = self.create_micro_mod(c_mod)
ctx = tvm.micro_dev(0)
mod = graph_runtime.create(graph, micro_mod, ctx)
return mod, params
def create_micro_mod(self, c_mod):
"""Produces a micro module from a given module.
Parameters
----------
c_mod : tvm.module.Module
module with "c" as its target backend
device_type : str
type of low-level device to target
Return
------
micro_mod : tvm.module.Module
micro module for the target device
"""
temp_dir = util.tempdir()
# Save module source to temp file.
lib_src_path = temp_dir.relpath("dev_lib.c")
mod_src = c_mod.get_source()
with open(lib_src_path, "w") as f:
f.write(mod_src)
# Compile to object file.
lib_obj_path = self.create_micro_lib(lib_src_path)
micro_mod = tvm.module.load(lib_obj_path, "micro_dev")
return micro_mod
def create_micro_lib(self, src_path, obj_path=None):
"""Compiles code into a binary for the target micro device.
Parameters
----------
src_path : str
path to source file
obj_path : str, optional
path to generated object file (defaults to same directory as
`src_path`)
Return
------
obj_path : bytearray
compiled binary file path (will match input `obj_path`, if it was specified)
"""
def replace_suffix(s, new_suffix):
if "." in os.path.basename(s):
# There already exists an extension.
return os.path.join(
os.path.dirname(s),
".".join(os.path.basename(s).split(".")[:-1] + [new_suffix]))
# No existing extension; we can just append.
return s + "." + new_suffix
if obj_path is None:
obj_name = replace_suffix(src_path, "obj")
obj_path = os.path.join(os.path.dirname(src_path), obj_name)
# uTVM object files cannot have an ".o" suffix, because it triggers the
# code path for creating shared objects in `tvm.module.load`. So we replace
# ".o" suffixes with ".obj".
if obj_path.endswith(".o"):
logging.warning(
"\".o\" suffix in \"%s\" has been replaced with \".obj\"", obj_path)
obj_path = replace_suffix(obj_path, "obj")
options = ["-I" + path for path in find_include_path()] + ["-fno-stack-protector"]
# TODO(weberlo): Consolidate `create_lib` and `contrib.cc.cross_compiler`
create_lib(obj_path, src_path, options, self._compile_cmd())
return obj_path
def _compile_cmd(self):
return "{}gcc".format(self.binutil_prefix)
def __enter__(self):
# First, find and compile runtime library.
micro_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
micro_device_dir = os.path.join(micro_dir, "..", "..", "..",
"src", "runtime", "micro", "device")
runtime_src_path = os.path.join(micro_device_dir, "utvm_runtime.c")
tmp_dir = util.tempdir()
runtime_lib_path = tmp_dir.relpath("utvm_runtime.obj")
runtime_lib_path = self.create_micro_lib(runtime_src_path, obj_path=runtime_lib_path)
# Then, initialize the session (includes loading the compiled runtime lib).
_InitSession(self.device_type, runtime_lib_path, self.port)
# Return `self` to bind the session as a variable in the `with` block.
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
_EndSession()
_init_api("tvm.micro", "tvm.micro.base")
| 36.100559 | 93 | 0.630919 | 5,278 | 0.816775 | 0 | 0 | 0 | 0 | 0 | 0 | 3,437 | 0.531879 |
0d5399e828d26c68b909576d4808a3578d5cec48 | 1,807 | py | Python | tests/test_connect_nets.py | enics-labs/salamandra | e3f334d0ead5296b02c471b56cb90b1516e12769 | [
"Apache-2.0"
] | 1 | 2021-11-18T10:45:26.000Z | 2021-11-18T10:45:26.000Z | tests/test_connect_nets.py | enics-labs/salamandra | e3f334d0ead5296b02c471b56cb90b1516e12769 | [
"Apache-2.0"
] | null | null | null | tests/test_connect_nets.py | enics-labs/salamandra | e3f334d0ead5296b02c471b56cb90b1516e12769 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 EnICS Labs, Bar-Ilan University.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import sys, os
sys.path.append(os.path.abspath('..'))
from salamandra import *
def main():
test(is_metatest=False)
def test(is_metatest):
inv = Component('inv')
inv.add_pin(Input('A'))
inv.add_pin(Output('Z'))
inv.connect_nets('A', 'Z')
and_ = Component('and')
and_.add_pin(Input('A1'))
and_.add_pin(Input('A2'))
and_.add_pin(Output('Z'))
and_.connect_nets('A1', 'Z')
and_.connect_nets(and_.get_net('A2'), and_.get_net('Z'))
nand = Component('nand')
nand.add_pinbus(Bus(Input, 'A', 2))
nand.add_netbus(Bus(Net, 'B', 2))
nand.connect_netbusses('A', 'B')
nand.add_pin(Output('Z'))
nand.add_net(Net('B0B1'))
nand.add_component(and_, 'i_and')
nand.add_component(inv, 'i_inv')
nand.connect('B[0]', 'i_and.A1')
nand.connect('B[1]', 'i_and.A2')
nand.connect('B0B1', 'i_and.Z')
nand.connect('B0B1', 'i_inv.A')
nand.connect('Z', 'i_inv.Z')
SoC = Component('SoC')
SoC.add_pinbus(Bus(Input, 'A', 2))
SoC.add_pin(Input('B1'))
SoC.add_pin(Input('B2'))
SoC.add_net(Net('B1B2'))
SoC.add_pin(Output('Z1'))
SoC.add_pin(Output('Z2'))
SoC.add_subcomponent(nand, 'i_nand')
SoC.add_subcomponent(and_, 'i_and')
SoC.add_subcomponent(inv, 'i_inv')
SoC.connect_bus('A', 'i_nand.A')
SoC.connect('Z1', 'i_nand.Z')
SoC.connect('B1', 'i_and.A1')
SoC.connect('B2', 'i_and.A2')
SoC.connect('B1B2', 'i_and.Z')
SoC.connect('B1B2', 'i_inv.A')
SoC.connect('Z2', 'i_inv.Z')
if not is_metatest:
SoC.print_verilog(include_descendants=True)
return True
if __name__ == '__main__':
main() | 26.970149 | 74 | 0.624792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 486 | 0.268954 |
0d539b5ac601361cf9fe4062ce887af4d40cacd7 | 4,109 | py | Python | rl_dobot/algos/learn.py | sandipan1/rl_dobot | 2317c171f73ae6714c13a6e85cff3d894b242687 | [
"MIT"
] | 2 | 2018-11-09T19:31:46.000Z | 2020-01-28T11:20:08.000Z | rl_dobot/algos/learn.py | sandipan1/rl_dobot | 2317c171f73ae6714c13a6e85cff3d894b242687 | [
"MIT"
] | null | null | null | rl_dobot/algos/learn.py | sandipan1/rl_dobot | 2317c171f73ae6714c13a6e85cff3d894b242687 | [
"MIT"
] | null | null | null | '''
update rule for the DQN model is defined
'''
'''
Preprocessing : give only hsv value or masked end efforctor image
Downsample pixel and convert RGB to grayscale
Use nn.functional where there is no trainable parameter like relu or maxpool
Try pooling vs not pooling
In Atari they consider multiple frames to find direction
'''
'''
To reduce the size of the representation
using larger stride in CONV layer once in a while can always be a preferred option in many cases.
Discarding pooling layers has also been found to be important in training good generative models,
such as variational autoencoders (VAEs) or generative adversarial networks (GANs).
Also it seems likely that future architectures will feature very few to no pooling layers.
Hintons says pooling is a mistake and that it works so well is a disaster
Pooling is important when we don't consider the position of the object in the image , just its presence
'''
import os
import torch
import torch.nn as nn
import torch.functional as F
from rl_dobot.utils import Buffer
from rl_dobot.utils import hard_update, print_heading, heading_decorator
from rl_dobot.algos.dqn_model import DQN
def to_np(x):
return x.data.cpu().numpy()
device = torch.device("cpu")
class DQN_LEARN():
def __init__(self, reward_scale, discount_factor, action_dim, target_update_interval, lr, writer):
self.Q_net = DQN(num_action=action_dim)
self.target_Q_net = DQN(num_action=action_dim)
self.reward_scale = reward_scale
self.discount_factor = discount_factor
self.action_dim = action_dim
self.writer = writer
self.target_update_interval = target_update_interval
self.lr = lr
hard_update(self.Q_net, self.target_Q_net)
# self.target_Q_net.load_state_dict(self.Q_net) this can also be done
self.optimizer = torch.optim.Adam(lr=self.lr)
def policy_update(self, batch, update_number):
state_batch = torch.stack(batch['state']).detach()
action_batch = torch.stack(batch['action']).detach()
reward_batch = torch.stack(batch['reward']).detach()
next_state_batch = torch.stack(batch['next_state']).detach()
done_batch = torch.stack(batch['done']).detach()
q_values = self.Q_net(state_batch)
# next_q_values = self.Q_net(next_state_batch)
q_t1 = self.target_Q_net(next_state_batch).detach()
# target = reward + gamma * max(Q'(s',a')
# loss = (r+ gamma*max(Q'(s',a') - Q(s,a))^2
# reward clipping
Q_target = reward_batch * self.reward_scale + self.gamma * (1 - done_batch) * torch.max(q_t1, dim=1).view(-1, 1)
## Huber loss
huber_loss = nn.SmoothL1Loss()
error = huber_loss(Q_target - q_values)
## backward pass
self.optimizer.zero_grad()
error.backward()
self.optimizer.step()
if update_number % self.target_update_interval == 0:
hard_update(self.Q_net, self.target_Q_net)
self.writer.add_scaler("q_values", q_values.mean(), global_step=update_number)
self.writer.add_scaler("qt1_values", q_t1.mean(), global_step=update_number)
self.writer.add_scaler("huber_loss", huber_loss.mean(), global_step=update_number)
self.writer.add_scaler("error", error.mean(), global_step=update_number)
def save_model(self, env_name, q_path, info=1):
if q_path is not None:
self.q_path = q_path
else:
self.q_path = f'model/{env_name}/'
os.makedirs(self.q_path, exist_ok=True)
print_heading("Saving actor,critic,value network parameters")
torch.save(self.target_Q_net.state_dict(), q_path + f"value_{info}.pt")
heading_decorator(bottom=True, print_req=True)
def load_model(self, q_path=None):
print_heading(f"Loading models from paths: \n q_func:{q_path}")
if q_path is not None:
self.target_Q_net.load_state_dict(torch.load(q_path))
print_heading('loading done')
def get_action(self, state):
return self.target_Q_net.get_action(state)
| 36.6875 | 120 | 0.696276 | 2,872 | 0.698954 | 0 | 0 | 0 | 0 | 0 | 0 | 1,408 | 0.342662 |
0d53c344e253ec5b2dbe3d8a0ac3fa6d4a976325 | 923 | py | Python | backend/app/utils.py | williamsyb/StockTick | 1dd10101d44fa3a0584f849b022fc8254c2e66c7 | [
"MIT"
] | 2 | 2020-11-23T13:38:49.000Z | 2021-08-17T15:37:04.000Z | backend/app/utils.py | williamsyb/StockTick | 1dd10101d44fa3a0584f849b022fc8254c2e66c7 | [
"MIT"
] | null | null | null | backend/app/utils.py | williamsyb/StockTick | 1dd10101d44fa3a0584f849b022fc8254c2e66c7 | [
"MIT"
] | null | null | null | from .protocol import Resp
from flask import make_response
import pandas as pd
class Utils:
@staticmethod
def build_resp(error_no: int, msg: str, data: dict):
return make_response(Resp(error_no, msg, data).to_json())
@staticmethod
def treat_bar(data: pd.DataFrame) -> dict:
# data = data.set_index('time').dropna(axis=0, how='all').reset_index()
data.fillna(0, inplace=True)
trade_volume = data.TradeVolume.tolist()
order_volume = data.OrderVolume.tolist()
data.drop(['TradeVolume', 'OrderVolume'], axis=1, inplace=True)
data.time = data.time.astype(str)
data_time = data.time.tolist()
data = data[['open', 'close', 'low', 'high']]
result = dict(
trade_volume=trade_volume,
order_volume=order_volume,
time=data_time,
ohlc=data.values.tolist()
)
return result
| 31.827586 | 79 | 0.619718 | 841 | 0.911159 | 0 | 0 | 818 | 0.886241 | 0 | 0 | 121 | 0.131094 |
0d55391048b46c947199f64898343316208f90a7 | 2,626 | py | Python | app/recipe/views.py | juancarestre/recipe-app-api2 | 7f93a2a01ebe811cba84526f0c1202dca7800b7a | [
"MIT"
] | null | null | null | app/recipe/views.py | juancarestre/recipe-app-api2 | 7f93a2a01ebe811cba84526f0c1202dca7800b7a | [
"MIT"
] | null | null | null | app/recipe/views.py | juancarestre/recipe-app-api2 | 7f93a2a01ebe811cba84526f0c1202dca7800b7a | [
"MIT"
] | null | null | null | from rest_framework import viewsets, mixins
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
import redis
import os
import json
from core.models import Tag, Ingredient
from recipe import serializers
# Connect to our Redis instance
redis_instance = redis.StrictRedis(host=os.environ.get('REDIS_HOST'),
port=os.environ.get('REDIS_PORT'), db=0)
class TagViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
"""Manage tags in the database"""
"""Manage tags in the database"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
queryset = Tag.objects.all()
serializer_class = serializers.TagSerializer
def perform_create(self, serializer):
"""Create a new ingredient"""
serializer.save(user=self.request.user)
def get_queryset(self):
"""Return objects for the current authenticated user only"""
return self.queryset.filter(user=self.request.user).order_by('-name')
class IngredientViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
"""Manage ingredients in the database"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
queryset = Ingredient.objects.all()
serializer_class = serializers.IngredientSerializer
def perform_create(self, serializer):
"""Create a new ingredient"""
serializer.save(user=self.request.user)
def get_queryset(self):
"""Return objects for the current authenticated user only"""
ingredients = None
if not redis_instance.get(f'ingredientsx:{self.request.user.email}'):
print('not in cache')
ingredients = self.queryset.filter(
user=self.request.user).order_by('-name')
ingredients_to_cache = f'{list(ingredients.values())}'
redis_instance.setex(f'ingredientsx:{self.request.user.email}', 60,
json.dumps(ingredients_to_cache))
else:
print('in cache')
ingredients = redis_instance.get(
f'ingredientsx:{self.request.user.email}').decode(
"utf-8").replace('"', "").replace("'", '"')
ingredients = eval(ingredients)
return ingredients
ingredients = self.queryset.filter(
user=self.request.user).order_by('-name')
return ingredients
| 35.972603 | 79 | 0.653085 | 2,172 | 0.827113 | 0 | 0 | 0 | 0 | 0 | 0 | 556 | 0.211729 |
0d55d639ae3cbd3fbf923acfe2653d7ccc5a7c72 | 2,468 | py | Python | torchbenchmark/models/mlp_larger/__init__.py | VectorInstitute/benchmark | 1cd8ff536aeddf3ebcafd4837d91684a1d9e58d2 | [
"BSD-3-Clause"
] | 1 | 2022-01-19T15:46:20.000Z | 2022-01-19T15:46:20.000Z | torchbenchmark/models/mlp_larger/__init__.py | VectorInstitute/benchmark | 1cd8ff536aeddf3ebcafd4837d91684a1d9e58d2 | [
"BSD-3-Clause"
] | null | null | null | torchbenchmark/models/mlp_larger/__init__.py | VectorInstitute/benchmark | 1cd8ff536aeddf3ebcafd4837d91684a1d9e58d2 | [
"BSD-3-Clause"
] | null | null | null | import random
import numpy as np
import torch
from torch import nn
from torchbenchmark.tasks import OTHER
from ...util.model import BenchmarkModel
torch.manual_seed(1337)
random.seed(1337)
np.random.seed(1337)
# pretend we are using MLP to predict CIFAR images
class MLP(nn.Module):
def __init__(self):
super().__init__()
self.main = nn.Sequential(
nn.Flatten(),
nn.Linear(3 * 32 * 32, 1024),
nn.ReLU(),
nn.Linear(1024, 1024),
nn.ReLU(),
nn.Linear(1024, 1024),
nn.ReLU(),
nn.Linear(1024, 1024),
nn.ReLU(),
nn.Linear(1024, 10),
nn.Softmax(dim=-1),
)
def forward(self, x):
return self.main(x)
class Model(BenchmarkModel):
task = OTHER.OTHER_TASKS
def __init__(self, device='cpu', jit=False, lr=1e-4, weight_decay=1e-4):
super().__init__()
self.device = device
self.jit = jit
batch_size = 4096
# mimic a normalized image
self.sample_inputs = torch.randn(batch_size, 3, 32,
32).clamp_(-1, 1).to(device)
self.sample_targets = torch.randint(0, 10, (batch_size, )).to(device)
self.model = MLP().to(device)
self.optimizer = torch.optim.Adam(self.model.parameters(),
lr=lr,
weight_decay=weight_decay)
self.criterion = nn.CrossEntropyLoss()
def train(self, niter=1):
if self.jit:
raise NotImplementedError()
self.model.train()
for _ in range(niter):
out = self.model(self.sample_inputs)
self.optimizer.zero_grad()
loss = self.criterion(out, self.sample_targets)
loss.backward()
self.optimizer.step()
def eval(self, niter=1):
if self.jit:
raise NotImplementedError()
self.model.eval()
with torch.no_grad():
for _ in range(niter):
out = self.model(self.sample_inputs)
def get_module(self):
if self.jit:
raise NotImplementedError()
return self.model, self.sample_inputs
if __name__ == '__main__':
for device in ['cpu', 'cuda']:
print("Testing device {}, JIT {}".format(device, False))
m = Model(device=device, jit=False)
m.train()
m.eval()
| 26.826087 | 77 | 0.547407 | 1,990 | 0.806321 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.052269 |
b48fbff20842d5dc1be80ae2543317f479252cd3 | 99 | py | Python | args.py | NiranjanVRam/spotify-downloader | f20580dbb6fa6ebc00e7935f233f5a87e9762d18 | [
"MIT"
] | 7 | 2020-12-27T13:38:38.000Z | 2022-03-06T16:34:18.000Z | args.py | NiranjanVRam/spotify-downloader | f20580dbb6fa6ebc00e7935f233f5a87e9762d18 | [
"MIT"
] | 2 | 2021-10-03T10:56:12.000Z | 2021-10-10T23:43:14.000Z | args.py | NiranjanVRam/spotify-downloader | f20580dbb6fa6ebc00e7935f233f5a87e9762d18 | [
"MIT"
] | 5 | 2021-03-31T11:38:59.000Z | 2021-12-30T02:36:40.000Z | clientId = 'CLIENT_ID'
clientSecret = 'CLIENT_SECRET'
geniusToken = 'GENIUS_TOKEN'
bitrate = '320' | 19.8 | 30 | 0.757576 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.454545 |
b4903e0e49fe00b8a065406698071247bb86a02d | 654 | py | Python | experiments/experiments_2/bs3_exp11_2_runner.py | petrroll/msc-neuro | cdfb5f0ad1a6974fdde6ac9760364d5545d70690 | [
"MIT"
] | 1 | 2019-10-26T19:38:42.000Z | 2019-10-26T19:38:42.000Z | experiments/experiments_2/bs3_exp11_2_runner.py | petrroll/msc-neuro | cdfb5f0ad1a6974fdde6ac9760364d5545d70690 | [
"MIT"
] | null | null | null | experiments/experiments_2/bs3_exp11_2_runner.py | petrroll/msc-neuro | cdfb5f0ad1a6974fdde6ac9760364d5545d70690 | [
"MIT"
] | 1 | 2021-03-23T14:54:04.000Z | 2021-03-23T14:54:04.000Z | import os
import sys
sys.path.append(os.getcwd())
import utils.runners as urun
#
# Dropout is `keep_prob` probability not `dropout rate` (as in TF2.x)
#
if __name__ == "__main__":
exp_folder = "experiments_2"
exp = "bs3_exp11"
exp_rev = "2"
runner = urun.get_runner(sys.argv[1])
run = 0
for hidden in [0.2, 0.3, 0.4]:
for dropout_h in [0, 0.05, 0.1, 0.2, 0.4, 0.5]:
runner(
exp_folder, exp,
f"--exp_folder={exp_folder} --exp={exp}_{exp_rev} --run={run} --hidden={hidden} --dropout_h={1-dropout_h}",
f"{run}_{exp_rev}"
)
run += 1
| 25.153846 | 124 | 0.544343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.357798 |
b495cef87e530613f2c8277610c653f49cd1a833 | 2,003 | py | Python | library/aq6315.py | mjasperse/telepythic | fbf24a885cb195dc5cecf78e112b8ff4b993043d | [
"BSD-3-Clause"
] | 2 | 2020-10-06T15:55:26.000Z | 2021-04-01T04:09:01.000Z | library/aq6315.py | mjasperse/telepythic | fbf24a885cb195dc5cecf78e112b8ff4b993043d | [
"BSD-3-Clause"
] | null | null | null | library/aq6315.py | mjasperse/telepythic | fbf24a885cb195dc5cecf78e112b8ff4b993043d | [
"BSD-3-Clause"
] | null | null | null | """
AQ6315E DATA EXTRACTOR
Extracts all visible traces from Ando AQ-6315E Optical Spectrum Analyser
Usage: ./aq6315.py [filename]
If specified, extracted data is saved to CSV called "filename"
Relevant list of commands available at
http://support.us.yokogawa.com/downloads/TMI/COMM/AQ6317B/AQ6317B%20R0101.pdf
> GPIB commands, section 9
> Trace query format, section 9-42
"""
import sys
from telepythic import TelepythicDevice, PrologixInterface
import numpy as np
# connect to device
bridge = PrologixInterface(gpib=1,host=177,timeout=0.5)
dev = TelepythicDevice(bridge)
# confirm device identity
id = dev.id(expect=b'ANDO,AQ6315')
print 'Device ID:',id
res = dev.query(b'RESLN?') # resolution
ref = dev.query(b'REFL?') # reference level
npts = dev.query(b'SEGP?') # number of points in sweep
expectedlen = 12*npts+8 # estimate size of trace (ASCII format)
def get_trace(cmd):
# device returns a comma-separated list of values
Y = dev.ask(cmd).strip().split(',')
# first value is an integer, listing how many values follow
n = int(Y.pop(0))
# check that it matches what we got (i.e. no data was lost)
assert len(Y) == n, 'Got %i elems, expected %i'%(len(Y),n)
# convert to a numpy array
return np.asarray(Y,'f')
import pylab
pylab.clf()
res = {}
for t in b'ABC': # device has 3 traces
if dev.ask(b'DSP%s?'%t): # if the trace is visible
print 'Reading Trace',t # download this trace
res[t+b'V'] = get_trace(b'LDAT'+t) # download measurement values (Y)
res[t+b'L'] = get_trace(b'WDAT'+t) # download wavelength values (X)
pylab.plot(res[t+b'L'],res[t+b'V']) # plot results
# close connection to prologix
dev.close()
# convert results dict to a pandas dataframe
import pandas as pd
df = pd.DataFrame(res)
if len(sys.argv) > 1:
# write to csv if filename was specified
df.to_csv(sys.argv[1],index=False)
# show graph
pylab.show()
| 32.306452 | 81 | 0.6665 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,149 | 0.57364 |
b49746c535ff505d70174da7005a75abdaab6aa0 | 1,707 | py | Python | src/test_uint8.py | disconnect3d/PicoBlade-asm | f4c1c5966f71ac9b11118dc0df1a6dd93699b4a2 | [
"MIT"
] | 4 | 2018-07-26T21:16:06.000Z | 2021-07-02T17:07:01.000Z | src/test_uint8.py | disconnect3d/PicoBlade-asm | f4c1c5966f71ac9b11118dc0df1a6dd93699b4a2 | [
"MIT"
] | 1 | 2016-11-09T22:47:05.000Z | 2017-01-19T23:34:50.000Z | src/test_uint8.py | disconnect3d/PicoBlade-asm | f4c1c5966f71ac9b11118dc0df1a6dd93699b4a2 | [
"MIT"
] | null | null | null | from src.uint8 import uint8
def test_constructor1():
assert int(uint8(20)) == 20
def test_constructor2():
assert uint8(256) == uint8(0)
assert uint8(260) == uint8(4)
assert uint8(-1) == uint8(255)
assert uint8(-5) == uint8(251)
assert uint8(-5) != uint8(252)
def test_add_other():
assert (uint8(50), 0) == uint8(20) + uint8(30)
assert (uint8(5), 1) == uint8(250) + uint8(11)
def test_add_int():
assert (uint8(50), 0) == uint8(20) + 30
assert (uint8(5), 1) == uint8(250) + 11
assert (uint8(251), 1) == uint8(1) + -6
def test_sub_other():
assert (uint8(246), 1) == uint8(20) - uint8(30)
assert (uint8(239), 0) == uint8(250) - uint8(11)
def test_sub_int():
assert (uint8(246), 1) == uint8(20) - 30
assert (uint8(239), 0) == uint8(250) - 11
assert (uint8(7), 0) == uint8(1) - -6
def test_and_other():
assert uint8(24) == uint8(31) & uint8(24)
assert uint8(0) == uint8(17) & uint8(12)
val = uint8(31)
val &= uint8(24)
assert val == uint8(24)
def test_and_int():
assert uint8(24) == uint8(31) & 24
assert uint8(0) == uint8(17) & 12
val = uint8(31)
val &= 24
assert val == uint8(24)
def test_eq_int():
assert uint8(42) == 42
assert uint8(256) == 0
assert uint8(-1) == 255
def test_mod():
assert uint8(32) % 2 == 0
assert uint8(5) % uint8(2) == 1
def test_lshift():
assert (uint8(0), 1) == uint8(128) << 1
assert (uint8(128), 1) == uint8(192) << 1
assert (uint8(64), 0) == uint8(32) << 1
def test_getitem():
assert uint8(16)[4] == 1
assert uint8(16)[5] == 0
for i in range(8):
assert uint8(255)[i] == 1
assert uint8(0)[i] == 0
| 22.168831 | 52 | 0.565319 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b497aee10348953dd46616dc98824f2c3d70953e | 1,042 | py | Python | tests/lid_driven_cavity/test.py | nazmas/SNaC | e928adc142df5bbe1a7941907c35add6ea6f1ff0 | [
"MIT"
] | null | null | null | tests/lid_driven_cavity/test.py | nazmas/SNaC | e928adc142df5bbe1a7941907c35add6ea6f1ff0 | [
"MIT"
] | null | null | null | tests/lid_driven_cavity/test.py | nazmas/SNaC | e928adc142df5bbe1a7941907c35add6ea6f1ff0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
def test_ldc():
import numpy as np
import os
from read_single_field_binary import read_single_field_binary
data_ref = np.loadtxt("data_ldc_re1000.txt")
if "data_x" in os.getcwd():
data,xp,yp,zp,xu,yv,zw = read_single_field_binary("vey_fld_0001500.bin",np.array([1,1,1]))
islice = int(np.size(data[0,0,:])/2)
np.testing.assert_allclose(data[0,islice,:], data_ref[:,1], rtol=1e-7, atol=0)
if "data_y" in os.getcwd():
data,xp,yp,zp,xu,yv,zw = read_single_field_binary("vex_fld_0001500.bin",np.array([1,1,1]))
islice = int(np.size(data[0,0,:])/2)
np.testing.assert_allclose(data[islice,0,:], data_ref[:,1], rtol=1e-7, atol=0)
if "data_z" in os.getcwd():
data,xp,yp,zp,xu,yv,zw = read_single_field_binary("vex_fld_0001500.bin",np.array([1,1,1]))
islice = int(np.size(data[0,:,0])/2)
np.testing.assert_allclose(data[islice,:,0], data_ref[:,1], rtol=1e-7, atol=0)
if __name__ == "__main__":
test_ldc()
print("Passed!")
| 47.363636 | 98 | 0.643954 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.142035 |
b4987682eaca1ee3d36ee754283c8f4e42d74219 | 638 | py | Python | zoom/_assets/standard_apps/icons/index.py | zodman/ZoomFoundry | 87a69f519a2ab6b63aeec0a564ce41259e64f88d | [
"MIT"
] | 8 | 2017-04-10T09:53:15.000Z | 2020-08-16T09:53:14.000Z | zoom/_assets/standard_apps/icons/index.py | zodman/ZoomFoundry | 87a69f519a2ab6b63aeec0a564ce41259e64f88d | [
"MIT"
] | 49 | 2017-04-13T22:51:48.000Z | 2019-08-15T22:53:25.000Z | zoom/_assets/standard_apps/icons/index.py | zodman/ZoomFoundry | 87a69f519a2ab6b63aeec0a564ce41259e64f88d | [
"MIT"
] | 12 | 2017-04-11T04:16:47.000Z | 2019-08-10T21:41:54.000Z | """
icons index
"""
import zoom
class MyView(zoom.View):
"""Index View"""
def index(self):
"""Index page"""
zoom.requires('fontawesome4')
content = zoom.tools.load('icons.html')
subtitle = 'Icons available as part of FontAwesome 4<br><br>'
return zoom.page(content, title='Icons', subtitle=subtitle)
def about(self):
"""About page"""
content = '{app.description}'
return zoom.page(
content.format(app=zoom.system.request.app),
title='About {app.title}'.format(app=zoom.system.request.app)
)
main = zoom.dispatch(MyView)
| 23.62963 | 73 | 0.589342 | 568 | 0.890282 | 0 | 0 | 0 | 0 | 0 | 0 | 192 | 0.30094 |
b4992dbe930e0e9eeb52ad731345b2e5829711bc | 1,096 | py | Python | python/AI/data/zero_mean.py | Vahegian/GiantTrader | 67a1acd8c52e2277ae8919834563775e59b6e6ce | [
"MIT"
] | 10 | 2020-05-12T13:41:42.000Z | 2022-03-24T17:32:09.000Z | python/AI/data/zero_mean.py | Vahegian/GiantTrader | 67a1acd8c52e2277ae8919834563775e59b6e6ce | [
"MIT"
] | 18 | 2020-09-26T00:57:28.000Z | 2021-06-02T01:31:35.000Z | python/AI/data/zero_mean.py | Vahegian/GiantTrader | 67a1acd8c52e2277ae8919834563775e59b6e6ce | [
"MIT"
] | 3 | 2021-01-03T07:44:09.000Z | 2021-11-20T18:44:24.000Z | '''
This script takes one 'npy' file that contains market data
of multiple markets and produces 3 files "train.npy", "test.npy", "val.npy".
The script calculates the mean image of the dataset and substracts it from
each image.
'''
import numpy as np
def save_file(dir, data):
np.save(dir, data)
print(f"saved file '{dir}' file length={len(data)}")
_FILE = "data/private/cnn_data.npy"
_FILE = np.load(_FILE, allow_pickle=True)
# print(_FILE)
# exit(0)
# substract mean image
mean_img = np.mean(_FILE, axis=0)[0]
print(f"\nTotal data length is '{len(_FILE)}', and 'mean img' is \n'{mean_img}'")
print("\n substracting mean img....")
for i in range(len(_FILE)):
_FILE[i][0] -= mean_img
'''
split data for train, test and validation sets
'''
train_size, test_size = int((90/100)*len(_FILE)), int((5/100)*len(_FILE)) # 90%, 5%
train, test, val = _FILE[:train_size], _FILE[train_size:train_size+test_size], _FILE[train_size+test_size:]
save_file("data/private/train.npy",train)
save_file("data/private/test.npy", test)
save_file("data/private/val.npy", val)
# print(train) | 28.102564 | 107 | 0.696168 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 605 | 0.552007 |
b49a302baad4a4b82d1403cf60a8484fa3cbece9 | 8,352 | py | Python | src/JobManager/Tasks.py | ROOSTER-fleet-management/rooster_fleet_manager | 93a41ea1ec0af2eda57957dde7d996929bfc057a | [
"Apache-2.0"
] | 6 | 2020-10-30T11:24:34.000Z | 2022-01-11T18:52:06.000Z | src/JobManager/Tasks.py | ROOSTER-fleet-management/rooster_fleet_manager | 93a41ea1ec0af2eda57957dde7d996929bfc057a | [
"Apache-2.0"
] | null | null | null | src/JobManager/Tasks.py | ROOSTER-fleet-management/rooster_fleet_manager | 93a41ea1ec0af2eda57957dde7d996929bfc057a | [
"Apache-2.0"
] | 8 | 2020-10-30T11:25:03.000Z | 2021-09-07T12:54:27.000Z | #! /usr/bin/env python
import rospy
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from tf.transformations import quaternion_from_euler
from std_msgs.msg import UInt8
from enum import Enum
#region ################## TODOLIST ########################
# DONE 1. Add Task class and make other task classes inherit from it.
#endregion #################################################
class TaskType(Enum):
"""Class that acts as an enum for the different kinds of tasks."""
ROBOTMOVEBASE = 0
AWAITINGLOADCOMPLETION = 1
AWAITINGUNLOADCOMPLETION = 2
class TaskStatus(Enum):
""" Class that acts as Enumerator for Task status. """
# 0 = PENDING, 1 = ACTIVE, 2 = CANCELLED, 3 = SUCCEEDED, 4 = ABORTED
PENDING = 0
ACTIVE = 1
CANCELLED = 2
SUCCEEDED = 3
ABORTED = 4
class Task(object):
"""
Base class from which specific child task classes inherit.
"""
def __init__(self, tasktype, child_start):
self.id = None # ID of the MEx to perform the task on/with
self.status = TaskStatus.PENDING # Status of this task.
self.type = tasktype # Type of the task, defined by child.
self.child_start = child_start # The child's task specific start method.
def start(self, mex_id, task_id, job_callback):
"""
Start the task's specific action.
All job tasks should have this method (through inheritance): 'start', with these
arguments: 'self', 'mex_id', 'task_id', 'job_callback'.
"""
self.id = mex_id
self.task_id = task_id
self.job_callback = job_callback
self.status = TaskStatus.ACTIVE
self.child_start()
def get_status(self):
""" Return the status of the task. """
return self.status
class RobotMoveBase(Task):
"""
Task class: RobotMoveBase, implements move_base action calls to robot navigation stack.
Used by the higher level Job class to populate a list with its job tasks.
"""
def __init__(self, location):
super(RobotMoveBase, self).__init__(TaskType.ROBOTMOVEBASE, self.move_robot)
self.location = location # location of the goal of the move_base.
#region Callback definitions
def active_cb(self):
"""
Callback for starting move_base action.
Connected to actionlib send_goal call.
"""
rospy.loginfo(self.id + ". Goal pose being processed")
def feedback_cb(self, feedback):
"""
Callback for continuous feedback of move_base position.
Connected to actionlib send_goal call.
"""
pass # Don't spam the console for now..
# rospy.loginfo("Current location: "+str(feedback))
def done_cb(self, status, result):
"""
Callback for stopping of goal.
Connected to actionlib send_goal call.
move_base callback status options: PENDING=0, ACTIVE=1, PREEMPTED=2, SUCCEEDED=3,
ABORTED=4, REJECTED=5, PREEMPTING=6, RECALLING=7, RECALLED=8, LOST=9.
"""
if status == 3:
self.status = TaskStatus.SUCCEEDED
rospy.loginfo(self.id + ". Goal reached")
if status == 2 or status == 8:
self.status = TaskStatus.CANCELLED
rospy.loginfo(self.id + ". Goal cancelled")
if status == 4:
self.status = TaskStatus.ABORTED
rospy.loginfo(self.id + ". Goal aborted")
if self.job_callback:
self.job_callback([self.task_id, self.status])
#endregion
def move_robot(self):
""" Start a move_base action using actionlib. """
self.navclient = actionlib.SimpleActionClient(self.id + '/move_base',MoveBaseAction)
self.navclient.wait_for_server()
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = "map"
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose.position.x = self.location.x
goal.target_pose.pose.position.y = self.location.y
goal.target_pose.pose.position.z = 0.0
quaternion = quaternion_from_euler(0, 0, self.location.theta)
goal.target_pose.pose.orientation.x = quaternion[0]
goal.target_pose.pose.orientation.y = quaternion[1]
goal.target_pose.pose.orientation.z = quaternion[2]
goal.target_pose.pose.orientation.w = quaternion[3]
self.navclient.send_goal(goal, done_cb=self.done_cb, active_cb=self.active_cb, feedback_cb=self.feedback_cb)
def get_status(self):
"""
Overwrites base class get_status, but inherits using super().
Retrieve the status of the move_base action.
"""
if self.navclient:
# navclient state options: PENDING=0, ACTIVE=1, PREEMPTED=2, SUCCEEDED=3,
# ABORTED=4, REJECTED=5, PREEMPTING=6, RECALLING=7, RECALLED=8, LOST=9.
navclient_state = self.navclient.get_state()
if navclient_state == 0 or navclient_state == 1:
self.status = TaskStatus.ACTIVE
elif navclient_state == 2 or navclient_state == 5 or navclient_state == 8:
self.status = TaskStatus.CANCELLED
elif navclient_state == 3:
self.status = TaskStatus.SUCCEEDED
elif navclient_state == 4 or navclient_state == 9:
self.status = TaskStatus.ACTIVE
super(RobotMoveBase, self).get_status()
class AwaitingLoadCompletion(Task):
"""
Task class: AwaitingLoadCompletion, waits for input from user or system to mark loading of the MEx as succeeded, cancelled, aborted.
Used by the higher level Job class to populate a list with its job tasks.
"""
def __init__(self):
super(AwaitingLoadCompletion, self).__init__(TaskType.AWAITINGLOADCOMPLETION, self.child_start)
def child_start(self):
""" Start the task's specific action, subscribing to the /LoadInput topic on the MEx's namespace. """
self.input_subcriber = rospy.Subscriber(self.id + "/LoadInput", UInt8, self.input_cb) # Subscribe to /'mex_id'/LoadInput topic to listen for published user/system input.
rospy.loginfo(self.id + ". Awaiting load completion input...")
def input_cb(self, data):
"""
Callback method for any user or system input.
Updates the instance status and calls the higher level job_callback.
load status option: PENDING=0 ACTIVE=1, CANCELLED=2, SUCCEEDED=3,
ABORTED=4.
"""
if self.job_callback: # Only process callback if this task was started.
# Input received from user/system,
if data.data == 3:
# Loading was completed succesfully.
self.status = TaskStatus.SUCCEEDED
elif data.data == 2:
# Loading was cancelled by user.
self.status = TaskStatus.CANCELLED
elif data.data == 4:
# Loading encountered an error and had to abort.
self.status = TaskStatus.ABORTED
if data.data == 2 or data.data == 3 or data.data == 4: # User input meaning some kind of end: cancel, succes or abort.
self.input_subcriber.unregister() # Unsubscribe to topic, as this task of the job is done.
self.job_callback([self.task_id, self.status]) # Call the higher level Job callback.
class AwaitingUnloadCompletion(AwaitingLoadCompletion):
"""
Task class: AwaitingUnloadCompletion, waits for input from user or system to mark unloading of the MEx as succeeded, cancelled, aborted.
Used by the higher level Job class to populate a list with its job tasks.
Inherets from AwaitingLoadCompletion.
"""
def __init__(self):
super(AwaitingUnloadCompletion, self).__init__()
self.type = TaskType.AWAITINGUNLOADCOMPLETION
def child_start(self):
""" Start the task's specific action, subscribing to the /UnloadInput topic on the MEx's namespace. """
self.input_subcriber = rospy.Subscriber(self.id + "/UnloadInput", UInt8, self.input_cb) # Subscribe to /'mex_id'/UnloadInput topic to listen for published user/system input.
rospy.loginfo(self.id + ". Awaiting unload completion input...") | 43.274611 | 181 | 0.637452 | 7,927 | 0.949114 | 0 | 0 | 0 | 0 | 0 | 0 | 3,708 | 0.443966 |
b49a5576e004df9c62a3ab2a288cc819b3c00b9c | 1,426 | py | Python | user.py | sdress/dojo-users | 13f10f0efcd0e9e5ad7ed8c20d991524636cf37f | [
"MIT"
] | null | null | null | user.py | sdress/dojo-users | 13f10f0efcd0e9e5ad7ed8c20d991524636cf37f | [
"MIT"
] | null | null | null | user.py | sdress/dojo-users | 13f10f0efcd0e9e5ad7ed8c20d991524636cf37f | [
"MIT"
] | null | null | null | # import the function that will return an instance of a connection
from mysqlconnection import connectToMySQL
# model the class after the friend table from our database
class User:
def __init__( self , data ):
self.id = data['id']
self.first_name = data['first_name']
self.last_name = data['last_name']
self.email = data['email']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
# Now we use class methods to query our database
@classmethod
def get_all(cls):
query = "SELECT * FROM users;"
# make sure to call the connectToMySQL function with the schema you are targeting.
results = connectToMySQL('users_schema').query_db(query)
# Create an empty list to append our instances of friends
users = []
# Iterate over the db results and create instances of friends with cls.
for user in results:
users.append( cls(user) )
return users
# class method to save our friend to the database
@classmethod
def save(cls, data ):
query = "INSERT INTO users ( first_name , last_name , email , created_at, updated_at ) VALUES ( %(fname)s , %(lname)s , %(email)s , NOW() , NOW() );"
# data is a dictionary that will be passed into the save method from server.py
return connectToMySQL('users_schema').query_db( query, data )
| 44.5625 | 157 | 0.648668 | 1,256 | 0.880785 | 0 | 0 | 835 | 0.585554 | 0 | 0 | 758 | 0.531557 |
b49af5e43de48fcf3cf24608acee75fdd5d449fb | 21,488 | py | Python | tiktokpy/client/user.py | returnWOW/tiktokpy | 14e1fb2ef0c4f2f0abe2bdfa7c156a33f6cd92d2 | [
"MIT"
] | 1 | 2021-11-15T15:35:14.000Z | 2021-11-15T15:35:14.000Z | tiktokpy/client/user.py | returnWOW/tiktokpy | 14e1fb2ef0c4f2f0abe2bdfa7c156a33f6cd92d2 | [
"MIT"
] | 1 | 2022-03-12T13:12:37.000Z | 2022-03-12T13:12:37.000Z | tiktokpy/client/user.py | returnWOW/tiktokpy | 14e1fb2ef0c4f2f0abe2bdfa7c156a33f6cd92d2 | [
"MIT"
] | null | null | null | import os
import asyncio
from typing import List
import traceback
import pyppeteer
from pyppeteer.page import Page
from tqdm import tqdm
import pdb
import time
from tiktokpy.client import Client
from tiktokpy.utils.client import catch_response_and_store, catch_response_info, get_dt_str, trans_char
from tiktokpy.utils.logger import logger
import re
pattern_comment_area = re.compile(r'comment-container">(.*?)comment-post-outside-container">', re.S)
pattern_comments = re.compile(r'<div .*? comment-content .*?<a href="/@(.*?)\?.*?username">(.*?)</span></a><p .*? comment-text"><span class=".*?">(.*?)</span>', re.S)
# pattern_comments = re.compile(r'<div .*? comment-content .*?<a href="/@(.*?)\?.*?".*?username">(.*?)</span></a><p class=".*? comment-text"><span class=".*?">(.*?)</span>', re.S)
# user post rex
pattern_user_post_id = re.compile(r'data-e2e="user-post-item".*?<a href="http.*?/@.*?/video/(\d+)', re.S)
# <div .*? comment-content .*?"><a href="/@(.*?)\?.*?" .*?username">(.*?)</span></a><p class=".*? comment-text"><span class=".*?">(.*?)</span>
class User:
def __init__(self, client: Client):
self.client = client
async def like(self, username: str, video_id: str, page=None):
self.client.delete_cache_files()
if not page:
page: Page = await self.client.new_page(blocked_resources=["image", "media", "font"])
page.setDefaultNavigationTimeout(0)
logger.debug(f"👥 Like video id {video_id} of @{username}")
like_info_queue: asyncio.Queue = asyncio.Queue(maxsize=1)
page.on(
"response",
lambda res: asyncio.create_task(
catch_response_info(res, like_info_queue, "/commit/item/digg"),
),
)
logger.info(f"🧭 Going to @{username}'s video {video_id} page for like")
await self.client.goto(
f"/@{username}/video/{video_id}",
page=page,
options={"waitUntil": "networkidle0"},
)
time.sleep(3)
# like_selector = ".lazyload-wrapper:first-child .item-action-bar.vertical > .bar-item-wrapper:first-child" # noqa: E501
# is_liked = await page.J(f'{like_selector} svg[fill="none"]')
# if is_liked:
# logger.info(f"😏 @{username}'s video {video_id} already liked")
# return
click_button = await page.xpath('//span[@data-e2e="like-icon"]/..')
print(click_button)
click_button = click_button[0]
await asyncio.sleep(2)
await click_button.click()
# like_info = await like_info_queue.get()
# if like_info["status_code"] == 0:
# else:
# logger.warning(f"⚠️ @{username}'s video {video_id} probably not liked")
logger.info(f"👍 @{username}'s video {video_id} liked")
await page.close()
async def unlike(self, username: str, video_id: str):
page: Page = await self.client.new_page(blocked_resources=["image", "media", "font"])
logger.debug(f"👥 Unlike video id {video_id} of @{username}")
like_info_queue: asyncio.Queue = asyncio.Queue(maxsize=1)
page.on(
"response",
lambda res: asyncio.create_task(
catch_response_info(res, like_info_queue, "/commit/item/digg"),
),
)
logger.info(f"🧭 Going to @{username}'s video {video_id} page for unlike")
await self.client.goto(
f"/@{username}/video/{video_id}",
page=page,
options={"waitUntil": "networkidle0"},
)
like_selector = ".lazyload-wrapper:first-child .item-action-bar.vertical > .bar-item-wrapper:first-child" # noqa: E501
is_unliked = await page.J(f'{like_selector} svg[fill="currentColor"]')
if is_unliked:
logger.info(f"😏 @{username}'s video {video_id} already unliked")
return
await page.click(like_selector)
like_info = await like_info_queue.get()
if like_info["status_code"] == 0:
logger.info(f"👎 @{username}'s video {video_id} unliked")
else:
logger.warning(f"⚠️ @{username}'s video {video_id} probably not unliked")
await page.close()
async def follow(self, username: str, page=None):
self.client.delete_cache_files()
if not page:
page: Page = await self.client.new_page(blocked_resources=["image", "media", "font"])
logger.debug(f"👥 Follow {username}")
page.setDefaultNavigationTimeout(0)
# follow_info_queue: asyncio.Queue = asyncio.Queue(maxsize=10)
# page.on(
# "response",
# lambda res: asyncio.create_task(
# catch_response_info(res, follow_info_queue, "/commit/follow/user"),
# ),
# )
logger.info(f"🧭 Going to {username}'s page for following")
await self.client.goto(
f"/@{username.lstrip('@')}",
page=page,
options={"waitUntil": "networkidle0"},
)
time.sleep(5)
# follow_title: str = await page.Jeval(
# ".follow-button",
# pageFunction="element => element.textContent",
# )
follow_button = await page.JJ('button[class*="FollowButton"]')
print(follow_button)
if not follow_button:
logger.error("button not found.")
return
follow_button = follow_button[0]
print(dir(follow_button))
follow_title = await page.evaluate('item => item.textContent', follow_button)
print(follow_title)
logger.debug("follow title: |{}|".format(follow_title))
if follow_title.lower() not in ("follow", "关注", "關註", "關注"):
logger.info(f"😏 {username} already followed")
return
# await page.click(".follow-button")
await follow_button.click()
# follow_info = await follow_info_queue.get()
# if follow_info["status_code"] == 0:
# logger.info(f"➕ {username} followed")
# else:
# logger.warning(f"⚠️ {username} probably not followed")
await page.close()
async def unfollow(self, username: str):
page: Page = await self.client.new_page(blocked_resources=["image", "media", "font"])
logger.debug(f"👥 Unfollow {username}")
unfollow_info_queue: asyncio.Queue = asyncio.Queue(maxsize=1)
page.on(
"response",
lambda res: asyncio.create_task(
catch_response_info(res, unfollow_info_queue, "/commit/follow/user"),
),
)
logger.info(f"🧭 Going to {username}'s page for unfollowing")
await self.client.goto(
f"/@{username.lstrip('@')}",
page=page,
options={"waitUntil": "networkidle0"},
)
follow_title: str = await page.Jeval(
".follow-button",
pageFunction="element => element.textContent",
)
if follow_title.lower() != "following":
logger.info(f"😏 {username} already unfollowed")
return
await page.click(".follow-button")
unfollow_info = await unfollow_info_queue.get()
if unfollow_info["status_code"] == 0:
logger.info(f"➖ {username} unfollowed")
else:
logger.warning(f"⚠️ {username} probably not unfollowed")
await page.close()
async def feed(self, username: str, amount: int, page=None):
self.client.delete_cache_files()
if not page:
page: Page = await self.client.new_page(blocked_resources=["image", "media", "font"])
logger.debug(f"📨 Request {username} feed")
result: List[dict] = []
page.on(
"response",
lambda res: asyncio.create_task(catch_response_and_store(res, result, "/post/item_list/")),
)
_ = await self.client.goto(f"/{username}", page=page, options={"waitUntil": "networkidle0"})
logger.debug(f"📭 Got {username} feed")
time.sleep(5)
# await page.waitForSelector(".video-feed-item", options={"visible": True})
pbar = tqdm(total=amount, desc=f"📈 Getting {username} feed")
pbar.n = min(len(result), amount)
pbar.refresh()
attempts = 0
last_result = len(result)
while len(result) < amount:
logger.debug("🖱 Trying to scroll to last video item")
# await page.evaluate(
# """
# document.querySelector('.video-feed-item:last-child')
# .scrollIntoView();
# """,
# )
# await page.waitFor(1_000)
# elements = await page.JJ(".video-feed-item")
# logger.debug(f"🔎 Found {len(elements)} items for clear")
# pbar.n = min(len(result), amount)
# pbar.refresh()
# if last_result == len(result):
# attempts += 1
# else:
# attempts = 0
# if attempts > 10:
# pbar.clear()
# pbar.total = len(result)
# logger.info(
# f"⚠️ After 10 attempts found {len(result)} videos. "
# f"Probably some videos are private",
# )
# break
# last_result = len(result)
# if len(elements) < 500:
# logger.debug("🔻 Too less for clearing page")
# continue
# await page.JJeval(
# ".video-feed-item:not(:last-child)",
# pageFunction="(elements) => elements.forEach(el => el.remove())",
# )
# logger.debug(f"🎉 Cleaned {len(elements) - 1} items from page")
logger.debug("wait for video")
await page.waitFor(30_000)
await page.close()
pbar.close()
return result[:amount]
async def feed2(self, username: str, amount: int, page=None):
if not page:
page: Page = await self.client.new_page(blocked_resources=["image", "media", "font"])
logger.debug(f"📨 Request {username} feed")
result: List[dict] = []
# page.on(
# "response",
# lambda res: asyncio.create_task(catch_response_and_store(res, result, "/post/item_list/")),
# )
_ = await self.client.goto(f"/{username}", page=page, options={"waitUntil": "networkidle0"})
logger.debug(f"📭 Got {username} feed")
time.sleep(5)
# await page.waitForSelector(".video-feed-item", options={"visible": True})
pbar = tqdm(total=amount, desc=f"📈 Getting {username} feed")
pbar.n = min(len(result), amount)
pbar.refresh()
attempts = 0
last_result = len(result)
idx = 0
while len(result) < amount:
logger.debug("🖱 Trying to scroll to last video item")
idx += 1
text = await page.content()
elems = pattern_user_post_id.findall(text)
if elems:
for elem in elems:
print(elem)
result.append(elem)
print("aproche")
return result
logger.debug("wait for video")
await page.waitFor(30_000)
await page.close()
pbar.close()
return result
async def get_comments(self, username: str, media_id: int, amount: int, page=None,
dbSession=None, dbobj=None):
try:
self.client.delete_cache_files()
if not page:
page: Page = await self.client.new_page(blocked_resources=["image", "media", "font"])
logger.debug(f"📨 Request {username} feed")
page.setDefaultNavigationTimeout(0)
result: List[dict] = []
ret = {}
# page.on(
# "response",
# lambda res: asyncio.create_task(catch_response_and_store(res, result, "/comment/list/")),
# )
_ = await self.client.goto(f"/@{username}/video/{media_id}?lang=en&is_copy_url=1&is_from_webapp=v1", page=page, options={"waitUntil": "networkidle0"})
logger.debug(f"📭 Got {username} feed")
elem = await page.JJ('span[class*="event-delegate-mask"]')
print(elem)
if not elem:
print("video comment button not found")
return
await elem[0].click()
# input("测试")
await asyncio.sleep(5)
pbar = tqdm(total=amount, desc=f"📈 Getting {username} {media_id} comments")
pbar.n = min(len(result), amount)
pbar.refresh()
attempts = 0
last_result = len(result)
while len(result) < amount:
logger.debug("🖱 Trying to scroll to last comment item")
try:
await page.evaluate(
"""
document.querySelector('.comments > .comment-item:last-child')
.scrollIntoView();
""",
)
# last_child_selector = ".video-feed-container > .lazyload-wrapper:last-child"
except pyppeteer.errors.ElementHandleError as e:
print(e)
# return result[:amount]
pass
logger.debug("get page source")
content = await page.content()
# with open("comments_html.html", "w", encoding="utf-8") as fout:
# fout.write(content)
comment_area = pattern_comment_area.search(content)
if not comment_area:
logger.error("comment area not found!!!")
return []
logger.debug("parser page source: {}".format(len(content)))
for e in pattern_comments.findall(comment_area.group(1)):
print(e)
# result.append(e)
if ret.get(e[0]):
if e[2] in ret[e[0]][1]:
continue
else:
result.append(e)
else:
ret[e[0]] = [e[1], set()]
ret[e[0]][1].add(e[2])
result.append(e)
if dbSession:
text = trans_char(e[2])
if dbSession.query(dbobj).filter(dbobj.PingLunZhe==e[0], dbobj.PingLunNeiRong==text, dbobj.FaBuZhe==username).first():
logger.debug("saved comment, skip")
continue
logger.debug("Save commenter: {} de emoji: {}".format(e, text))
obj = dbobj(PingLunZhe=e[0], PingLunNeiRong=text, GuanJianCi="", FaBuZhe=username,
TianJiaShiJian=get_dt_str(), ShiFouGuanZhu=False)
dbSession.add(obj)
dbSession.commit()
await page.waitFor(1_000)
print(result)
pbar.n = min(len(result), amount)
pbar.refresh()
if last_result == len(result):
attempts += 1
else:
attempts = 0
if attempts > 5:
pbar.clear()
pbar.total = len(result)
logger.info(
f"⚠️ After 10 attempts found {len(result)} videos. "
f"Probably some videos are private",
)
break
last_result = len(result)
await page.waitFor(30_000)
await page.close()
pbar.close()
return result[:amount]
except pyppeteer.errors.TimeoutError as e:
print(e)
logger.error(traceback.format_exc())
input("enter to exit")
async def comment(self, username: str, media_id: int, content: str, page=None):
self.client.delete_cache_files()
if not page:
page: Page = await self.client.new_page(blocked_resources=["image", "media", "font"])
logger.debug(f"📨 Request {username} feed")
page.setDefaultNavigationTimeout(0)
_ = await self.client.goto(f"/@{username}/video/{media_id}?lang=en&is_copy_url=1&is_from_webapp=v1", page=page, options={"waitUntil": "networkidle0"})
logger.debug(f"📭 Got {username} feed")
# elem = await page.JJ('span[class*="event-delegate-mask"]')
elem = await page.xpath('//span[@data-e2e="comment-icon"]')
print(elem)
if not elem:
print("video comment button not found")
return False
await elem[0].click()
# input("测试")
await asyncio.sleep(5)
comment_input = await page.J('div[class*="DivCommentContainer"]')
print(comment_input)
await comment_input.click()
await page.keyboard.type(content)
await asyncio.sleep(3)
post = await page.xpath('//div[contains(text(), "Post")]')
print(post)
await post[0].click()
await asyncio.sleep(3)
await page.close()
# comment_submit = await page.J('div[class*="post-container"]')
# print(comment_submit)
# await comment_submit.click()
async def upload_video(self, video: str, title: str = "nice one", is_private: bool = False, page=None):
# self.client.delete_cache_files()
if not page:
page: Page = await self.client.new_page(blocked_resources=[])
page.setDefaultNavigationTimeout(0)
if not os.path.exists(video):
logger.error("Video file not found: {}".format(video))
return False
video = os.path.abspath(video)
logger.debug("video file absolute path: {}".format(video))
_ = await self.client.goto(f"/upload?lang=en", page=page, options={"waitUntil": "networkidle0"}, timeout=60000)
time.sleep(10)
elem = await page.JJ('input[name="upload-btn"]')
print(elem)
if not elem:
print("video comment button not found")
return False
await elem[0].uploadFile(video)
# input("测试")
await asyncio.sleep(5)
# disable
# <button type="button" class="tiktok-btn-pc tiktok-btn-pc-large tiktok-btn-pc-primary tiktok-btn-pc-disabled">發佈</button>
# enable
# <button type="button" class="tiktok-btn-pc tiktok-btn-pc-large tiktok-btn-pc-primary">發佈</button>
while True:
button = await page.J('button[class*="tiktok-btn-pc-disabled"]')
print("submit button ", button)
if not button:
logger.debug("submit button is enabled")
break
else:
logger.debug("submit button is disabled")
time.sleep(2)
title_input = await page.J('div[class*="public-DraftStyleDefault-block"]')
print(title_input)
await title_input.click()
await page.keyboard.type(title)
await asyncio.sleep(3)
button = await page.J('button[class*="tiktok-btn-pc-primary"]')
print("submit: ", button)
await button.click()
await asyncio.sleep(3)
return True
async def message(self, username, message, page=None):
if not page:
page: Page = await self.client.new_page(blocked_resources=["media"])
page.setDefaultNavigationTimeout(0)
logger.info("Comment to: {username} message: {message}")
# https://www.tiktok.com/@karenmanlangit?lang=en
_ = await self.client.goto("/@{}?lang=en".format(username), page=page, options={"waitUntil": "networkidle0"}, timeout=60000)
follow_title: str = await page.Jeval(
".follow-button",
pageFunction="element => element.textContent",
)
logger.debug("follow title: |{}|".format(follow_title))
if follow_title.lower() in ("follow", "关注", "關註", "關注"):
logger.info(f"😏 {username} not follow")
await page.click(".follow-button")
time.sleep(2)
await page.click(".message-button")
time.sleep(5)
while True:
try:
await page.click(".comment-input-inner-wrapper")
break
except pyppeteer.errors.PageError as e:
print(e)
time.sleep(2)
continue
time.sleep(1)
await page.keyboard.type(message)
time.sleep(2)
await page.click(".send-button")
return | 36.114286 | 180 | 0.526526 | 20,557 | 0.949252 | 0 | 0 | 0 | 0 | 20,396 | 0.941818 | 7,449 | 0.343969 |
b49b84a57a5a0d4b3d8673371b1ea49d228a6368 | 2,927 | py | Python | amazon-top-results.py | dlameter/amazon-top-results | f4437512f872a4f8c427042770a76cd51eba2f5c | [
"MIT"
] | null | null | null | amazon-top-results.py | dlameter/amazon-top-results | f4437512f872a4f8c427042770a76cd51eba2f5c | [
"MIT"
] | null | null | null | amazon-top-results.py | dlameter/amazon-top-results | f4437512f872a4f8c427042770a76cd51eba2f5c | [
"MIT"
] | null | null | null | import json
import time
import sys
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# Loads config json file
def loadConfig(filename):
with open(filename, 'r') as configFile:
return json.load(configFile)
# Get search input element
def getSearchInput(driver):
searchDiv = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CLASS_NAME, 'nav-search-field'))
)
return searchDiv.find_element_by_tag_name('input')
# Perform a search
def searchTerm(inputElem, term):
inputElem.send_keys(term)
inputElem.send_keys(Keys.RETURN)
# Returns list of result candidates
def getResults(driver):
return driver.find_elements_by_class_name('s-result-item')
# Tests the element to see if it's a result that should be checked
def validResult(result):
valid = True
# Test if it has 'div span.a-text-normal'
try:
result.find_element_by_css_selector('div span.a-text-normal')
result.find_element_by_css_selector('div span.a-offscreen')
except:
valid = False
# Test if contains class contains 'AdHolder'
if 'AdHolder' in result.get_attribute('class'):
valid = False
return valid
# Gets the title of a result item
def getTitle(element):
return element.find_element_by_css_selector('div span.a-text-normal').text
# Gets the price of a result item
def getPrice(element):
return element.find_element_by_css_selector('div span.a-offscreen').get_attribute('innerHTML')
# Main function
if __name__ == "__main__":
if len(sys.argv) <= 1:
print('USAGE: executable "<search phrase>"')
sys.exit()
searchPhrase = sys.argv[1]
# Read config file
config = loadConfig('config.json')
address = config['address']
driverPath = config['driver']
# Build options
chrome_options = Options()
chrome_options.add_argument('--headless')
try:
# Initialize webdriver
driver = webdriver.Chrome(executable_path = driverPath, chrome_options=chrome_options);
driver.get(address)
searchInput = getSearchInput(driver)
searchTerm(searchInput, searchPhrase)
# Ensure page is loaded
#time.sleep(10)
# Find individual search results
results = getResults(driver)
count = 10
position = 0
while count > 0:
if validResult(results[position]):
print(getPrice(results[position]), getTitle(results[position]))
count -= 1
position += 1
except Exception as exc:
print("Program encountered and error: " + str(exc))
finally:
# Close webdriver
driver.close()
| 28.980198 | 98 | 0.688077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 762 | 0.260335 |
b49ba128005050a703dec74a0221d62f301b5b8c | 1,656 | py | Python | plugins/vpn.py | alobbs/autome | faf4c836ccb896d03020aa0dbb2c1332ffc791a2 | [
"MIT"
] | null | null | null | plugins/vpn.py | alobbs/autome | faf4c836ccb896d03020aa0dbb2c1332ffc791a2 | [
"MIT"
] | null | null | null | plugins/vpn.py | alobbs/autome | faf4c836ccb896d03020aa0dbb2c1332ffc791a2 | [
"MIT"
] | null | null | null | import os
import plugin
import pluginconf
util = plugin.get("util")
FILE_COUNTER = "~/.vpn_counter"
FILE_VPN_SH = "~/.vpn_sh"
EXPECT_SCRIPT = """#!/usr/bin/expect
spawn {cmd}
expect -exact "Enter Auth Username:"
send -- "{user}\\n"
expect -exact "Enter Auth Password:"
send -- "{password}\\n"
interact
"""
class VPN:
def __init__(self):
# Read configuration
self.conf = pluginconf.get('vpn')
def is_connected(self):
with os.popen("ps aux") as f:
pcs = f.read()
return self.conf['openvpn_conf'] in pcs
def get_password(self):
file_counter = os.path.expanduser(FILE_COUNTER)
# Read usage counter
with open(file_counter, 'r') as f:
raw = f.read()
counter = int(raw.strip()) + 1
# OAuth
cmd = "oathtool -b %s -c %s" % (self.conf['secret'], counter)
with os.popen(cmd, 'r') as f:
code = f.read().strip()
# Update counter
with open(file_counter, 'w') as f:
f.write(str(counter))
password = "%s%s" % (self.conf['pin'], code)
return password
def connect(self):
# Compose connection script
cmd = "sudo /usr/local/sbin/openvpn --config %s" % self.conf['openvpn_conf']
user = self.conf['user']
password = self.get_password()
script = EXPECT_SCRIPT.format(cmd=cmd, user=user, password=password)
# Write it to a file
vpn_script = os.path.expanduser(FILE_VPN_SH)
with open(vpn_script, 'w+') as f:
f.write(script)
os.chmod(vpn_script, 0o770)
# Run
os.system(vpn_script)
| 24.352941 | 84 | 0.577899 | 1,342 | 0.810386 | 0 | 0 | 0 | 0 | 0 | 0 | 455 | 0.274758 |
b49bc707c55e530402f7bf7501d6b84e68f26a0e | 7,321 | py | Python | jasypt4py/generator.py | fareliner/jasypt4py | 6ea7cdbb4ee1e3249cc9dcadfa3c54e603614458 | [
"Apache-2.0"
] | 7 | 2018-04-04T02:56:48.000Z | 2021-09-23T01:34:57.000Z | jasypt4py/generator.py | fareliner/jasypt4py | 6ea7cdbb4ee1e3249cc9dcadfa3c54e603614458 | [
"Apache-2.0"
] | 3 | 2018-07-31T08:56:56.000Z | 2022-03-04T01:03:03.000Z | jasypt4py/generator.py | fareliner/jasypt4py | 6ea7cdbb4ee1e3249cc9dcadfa3c54e603614458 | [
"Apache-2.0"
] | 4 | 2018-07-31T08:04:01.000Z | 2021-07-07T01:55:34.000Z | # Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
from abc import ABCMeta, abstractmethod
from Crypto import Random
from jasypt4py.exceptions import ArgumentError
class PBEParameterGenerator(object):
__metaclass__ = ABCMeta
@staticmethod
def adjust(a, a_off, b):
"""
Adjusts the byte array as per PKCS12 spec
:param a: byte[] - the target array
:param a_off: int - offset to operate on
:param b: byte[] - the bitsy array to pick from
:return: nothing as operating on array by reference
"""
x = (b[len(b) - 1] & 0xff) + (a[a_off + len(b) - 1] & 0xff) + 1
a[a_off + len(b) - 1] = x & 0xff
x = x >> 8
for i in range(len(b) - 2, -1, -1):
x = x + (b[i] & 0xff) + (a[a_off + i] & 0xff)
a[a_off + i] = x & 0xff
x = x >> 8
@staticmethod
def pkcs12_password_to_bytes(password):
"""
Converts a password string to a PKCS12 v1.0 compliant byte array.
:param password: byte[] - the password as simple string
:return: The unsigned byte array holding the password
"""
pkcs12_pwd = [0x00] * (len(password) + 1) * 2
for i in range(0, len(password)):
digit = ord(password[i])
pkcs12_pwd[i * 2] = digit >> 8
pkcs12_pwd[i * 2 + 1] = digit
return bytearray(pkcs12_pwd)
class PKCS12ParameterGenerator(PBEParameterGenerator):
"""
Equivalent of the Bouncycastle PKCS12ParameterGenerator.
"""
__metaclass__ = ABCMeta
KEY_SIZE_256 = 256
KEY_SIZE_128 = 128
DEFAULT_IV_SIZE = 128
KEY_MATERIAL = 1
IV_MATERIAL = 2
MAC_MATERIAL = 3
def __init__(self, digest_factory, key_size_bits=KEY_SIZE_256, iv_size_bits=DEFAULT_IV_SIZE):
"""
:param digest_factory: object - the digest algoritm to use (e.g. SHA256 or MD5)
:param key_size_bits: int - key size in bits
:param iv_size_bits: int - iv size in bits
"""
super(PKCS12ParameterGenerator, self).__init__()
self.digest_factory = digest_factory
self.key_size_bits = key_size_bits
self.iv_size_bits = iv_size_bits
def generate_derived_parameters(self, password, salt, iterations=1000):
"""
Generates the key and iv that can be used with the cipher.
:param password: str - the password used for the key material
:param salt: byte[] - random salt
:param iterations: int - number if hash iterations for key material
:return: key and iv that can be used to setup the cipher
"""
key_size = (self.key_size_bits // 8)
iv_size = (self.iv_size_bits // 8)
# pkcs12 padded password (unicode byte array with 2 trailing 0x0 bytes)
password_bytes = PKCS12ParameterGenerator.pkcs12_password_to_bytes(password)
d_key = self.generate_derived_key(password_bytes, salt, iterations, self.KEY_MATERIAL, key_size)
if iv_size and iv_size > 0:
d_iv = self.generate_derived_key(password_bytes, salt, iterations, self.IV_MATERIAL, iv_size)
else:
d_iv = None
return d_key, d_iv
def generate_derived_key(self, password, salt, iterations, id_byte, key_size):
"""
Generate a derived key as per PKCS12 v1.0 spec
:param password: bytearray - pkcs12 padded password (unicode byte array with 2 trailing 0x0 bytes)
:param salt: bytearray - random salt
:param iterations: int - number if hash iterations for key material
:param id_byte: int - the material padding
:param key_size: int - the key size in bytes (e.g. AES is 256/8 = 32, IV is 128/8 = 16)
:return: the sha256 digested pkcs12 key
"""
u = int(self.digest_factory.digest_size)
v = int(self.digest_factory.block_size)
d_key = bytearray(key_size)
# Step 1
D = bytearray(v)
for i in range(0, v):
D[i] = id_byte
# Step 2
if salt and len(salt) != 0:
salt_size = len(salt)
s_size = v * ((salt_size + v - 1) // v)
S = bytearray(s_size)
for i in range(s_size):
S[i] = salt[i % salt_size]
else:
S = bytearray(0)
# Step 3
if password and len(password) != 0:
password_size = len(password)
p_size = v * ((password_size + v - 1) // v)
P = bytearray(p_size)
for i in range(p_size):
P[i] = password[i % password_size]
else:
P = bytearray(0)
# Step 4
I = S + P
B = bytearray(v)
# Step 5
c = ((key_size + u - 1) // u)
# Step 6
for i in range(1, c + 1):
# Step 6 - a
digest = self.digest_factory.new()
digest.update(bytes(D))
digest.update(bytes(I))
A = digest.digest() # bouncycastle now resets the digest, we will create a new digest
for j in range(1, iterations):
A = self.digest_factory.new(A).digest()
# Step 6 - b
for k in range(0, v):
B[k] = A[k % u]
# Step 6 - c
for j in range(0, (len(I) // v)):
self.adjust(I, j * v, B)
if i == c:
for j in range(0, key_size - ((i - 1) * u)):
d_key[(i - 1) * u + j] = A[j]
else:
for j in range(0, u):
d_key[(i - 1) * u + j] = A[j]
# we string encode as Crypto functions need strings
return bytes(d_key)
class SaltGenerator(object):
"""
Base for a salt generator
"""
__metaclass__ = ABCMeta
DEFAULT_SALT_SIZE_BYTE = 16
def __init__(self, salt_block_size=DEFAULT_SALT_SIZE_BYTE):
self.salt_block_size = salt_block_size
@abstractmethod
def generate_salt(self):
pass
class RandomSaltGenerator(SaltGenerator):
"""
A basic random salt generator
"""
__metaclass__ = ABCMeta
def __init__(self, salt_block_size=SaltGenerator.DEFAULT_SALT_SIZE_BYTE, **kwargs):
"""
:param salt_block_size: the salt block size in bytes
"""
super(RandomSaltGenerator, self).__init__(salt_block_size)
def generate_salt(self):
return bytearray(Random.get_random_bytes(self.salt_block_size))
class FixedSaltGenerator(SaltGenerator):
"""
A fixed string salt generator
"""
__metaclass__ = ABCMeta
def __init__(self, salt_block_size=SaltGenerator.DEFAULT_SALT_SIZE_BYTE, salt=None, **kwargs):
"""
:param salt_block_size: the salt block size in bytes
"""
super(FixedSaltGenerator, self).__init__(salt_block_size)
if not salt:
raise ArgumentError('salt not provided')
# ensure supplied type matches
if isinstance(salt, str):
self.salt = bytearray(salt, 'utf-8')
elif isinstance(salt, bytearray):
self.salt = salt
else:
raise TypeError('salt must either be a string or bytearray but not %s' % type(salt))
def generate_salt(self):
return self.salt
| 30.377593 | 106 | 0.584483 | 7,093 | 0.968857 | 0 | 0 | 1,218 | 0.166371 | 0 | 0 | 2,298 | 0.313892 |
b49c1ea0938a5f26e07c3b56e99b431ee0d681c5 | 2,862 | py | Python | __init__.py | zznop/bn-kconfig-recover | 3e30cc6b3e5e273674a3ce6bca6c3bc60ac22f11 | [
"MIT"
] | 18 | 2022-01-02T16:48:25.000Z | 2022-03-16T12:52:39.000Z | __init__.py | zznop/bn-kconfig-recover | 3e30cc6b3e5e273674a3ce6bca6c3bc60ac22f11 | [
"MIT"
] | null | null | null | __init__.py | zznop/bn-kconfig-recover | 3e30cc6b3e5e273674a3ce6bca6c3bc60ac22f11 | [
"MIT"
] | 3 | 2022-01-02T16:48:43.000Z | 2022-01-04T17:08:11.000Z | """
Binary Ninja plugin for recovering kernel build configuration settings using BNIL
"""
import argparse
import logging
from binaryninja import (BinaryViewType, BinaryView, PluginCommand,
SaveFileNameField, get_form_input, BackgroundTaskThread)
class RecoverKConfigBackground(BackgroundTaskThread):
"""Class for running kernel configuration recovery in background
"""
def __init__(self, view: BinaryView, outpath: str) -> None:
BackgroundTaskThread.__init__(self, 'Recovering Linux kernel configuration', False)
self.outpath = outpath
self.view = view
def run(self):
"""Run analysis task
"""
self.view.reanalyze()
self.view.update_analysis_and_wait()
kconfigr = KConfigRecover(self.view)
config = kconfigr.recover()
save_kconfig(config, self.outpath)
self.progress = ""
def run_from_ui(view: BinaryView) -> None:
"""Run as a plugin under the UI
Args:
view: Binary view
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
config_field = SaveFileNameField('Configuration Output Path')
get_form_input([config_field], 'Kernel Configuration Recovery Options')
outpath = 'generated.config'
if config_field.result != '':
outpath = config_field.result
kconfig_task = RecoverKConfigBackground(view, outpath)
kconfig_task.start()
def parse_args() -> argparse.Namespace:
"""Parses command line arguments.
Returns:
Parsed command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'bndb', help='File path to kernel ELF or Binary Ninja database')
parser.add_argument(
'kconfig', help='File path to save recovered kernel configuration')
parser.add_argument('-d',
'--debug',
action='store_true',
help='Enable debug logging')
return parser.parse_args()
def run_headless() -> None:
"""Parse command line arguments and run app.
"""
args = parse_args()
logger = logging.getLogger()
if args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
logging.info('Opening "%s" and getting view...', args.bndb)
view = BinaryViewType.get_view_of_file(args.bndb)
logging.info('Running BN analysis, this may take some time...')
kconfig_task = RecoverKConfigBackground(view, args.kconfig)
kconfig_task.start()
if __name__ == '__main__':
from kconfig import KConfigRecover, save_kconfig
run_headless()
else:
from .kconfig import KConfigRecover, save_kconfig
PluginCommand.register(
"Recover Linux kernel config",
"Analyze Linux kernel binary and recover kernel configuration options",
run_from_ui,
)
| 29.8125 | 91 | 0.667016 | 631 | 0.220475 | 0 | 0 | 0 | 0 | 0 | 0 | 892 | 0.31167 |
b49d1896b18fa3dac7c56c26e4f2b64e748d5ed2 | 182 | py | Python | openapi_core/schema/operations/exceptions.py | grktsh/openapi-core | d4ada7bcbb9b13f5c5dd090988c35be7a0d141b7 | [
"BSD-3-Clause"
] | null | null | null | openapi_core/schema/operations/exceptions.py | grktsh/openapi-core | d4ada7bcbb9b13f5c5dd090988c35be7a0d141b7 | [
"BSD-3-Clause"
] | null | null | null | openapi_core/schema/operations/exceptions.py | grktsh/openapi-core | d4ada7bcbb9b13f5c5dd090988c35be7a0d141b7 | [
"BSD-3-Clause"
] | null | null | null | from openapi_core.schema.exceptions import OpenAPIMappingError
class OpenAPIOperationError(OpenAPIMappingError):
pass
class InvalidOperation(OpenAPIOperationError):
pass
| 18.2 | 62 | 0.835165 | 113 | 0.620879 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b49f409da0defa952f5265984dc68c10a3ae4ba5 | 1,811 | py | Python | rlkit/envs/non_mujoco_half_cheetah_vel.py | hammer-wang/FOCAL-ICLR | 4d19149f86acc1d6b987c93cdd3a9d957535c5e3 | [
"MIT"
] | 24 | 2021-03-24T07:14:52.000Z | 2022-03-17T08:15:44.000Z | rlkit/envs/non_mujoco_half_cheetah_vel.py | hammer-wang/FOCAL-ICLR | 4d19149f86acc1d6b987c93cdd3a9d957535c5e3 | [
"MIT"
] | 4 | 2021-06-18T16:38:38.000Z | 2022-03-18T11:04:45.000Z | rlkit/envs/non_mujoco_half_cheetah_vel.py | hammer-wang/FOCAL-ICLR | 4d19149f86acc1d6b987c93cdd3a9d957535c5e3 | [
"MIT"
] | 6 | 2021-04-12T18:49:47.000Z | 2021-09-07T05:33:22.000Z | import numpy as np
from gym import spaces
from gym import Env
#from rlkit.envs.mujoco.half_cheetah import HalfCheetahEnv
from rlkit.envs import register_env
@register_env('cheetah-vel')
class HalfCheetahVelEnv(Env):
def __init__(self, task={}, n_tasks=2, randomize_tasks=True, max_episode_steps=200):
self._task = task
self.tasks = self.sample_tasks(n_tasks)
self._goal_vel = self.tasks[0].get('velocity', 0.0)
self._goal = self._goal_vel
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(20,))
self.action_space = spaces.Box(low=-1, high=1, shape=(6,))
self._max_episode_steps = max_episode_steps
self._step = 0
def step(self, action):
print("placeholder! no env no step")
pass
def sample_tasks(self, num_tasks):
np.random.seed(1337)
velocities = np.random.uniform(0.0, 3.0, size=(num_tasks,))
tasks = [{'velocity': velocity} for velocity in velocities]
return tasks
def get_all_task_idx(self):
return range(len(self.tasks))
def reset(self):
return self.reset_model()
def reset_model(self):
# reset to a random location on the unit square
self._state = np.random.uniform(-1., 1., size=(20,))
self._step = 0
return self._get_obs()
def _get_obs(self):
return np.copy(self._state)
def reset_task(self, idx):
self._goal_idx = idx
self._task = self.tasks[idx]
self._goal_vel = self._task['velocity']
self._goal = self._goal_vel
self.reset()
def viewer_setup(self):
print('no viewer')
pass
def render(self):
print('no render')
pass
| 28.746032 | 89 | 0.605191 | 1,611 | 0.889564 | 0 | 0 | 1,641 | 0.906129 | 0 | 0 | 201 | 0.110988 |
b49f48a21b3b551b45af59b6469d669944ac7823 | 6,812 | py | Python | src/openprocurement/planning/api/validation.py | pontostroy/api | 5afdd3a62a8e562cf77e2d963d88f1a26613d16a | [
"Apache-2.0"
] | 3 | 2020-03-13T06:44:23.000Z | 2020-11-05T18:25:29.000Z | src/openprocurement/planning/api/validation.py | pontostroy/api | 5afdd3a62a8e562cf77e2d963d88f1a26613d16a | [
"Apache-2.0"
] | 2 | 2021-03-25T23:29:58.000Z | 2022-03-21T22:18:37.000Z | src/openprocurement/planning/api/validation.py | pontostroy/api | 5afdd3a62a8e562cf77e2d963d88f1a26613d16a | [
"Apache-2.0"
] | 3 | 2020-10-16T16:25:14.000Z | 2021-05-22T12:26:20.000Z | # -*- coding: utf-8 -*-
from openprocurement.api.validation import (
validate_json_data,
validate_data,
validate_accreditation_level,
validate_accreditation_level_mode,
)
from openprocurement.api.utils import update_logging_context, error_handler, upload_objects_documents
from openprocurement.planning.api.models import Plan, Milestone
from openprocurement.planning.api.constants import PROCEDURES
from itertools import chain
from openprocurement.api.utils import get_now
from openprocurement.api.constants import PLAN_ADDRESS_KIND_REQUIRED_FROM
from copy import deepcopy
def validate_plan_data(request):
update_logging_context(request, {"plan_id": "__new__"})
data = validate_json_data(request)
model = request.plan_from_data(data, create=False)
validate_plan_accreditation_level(request, model)
data = validate_data(request, model, data=data)
validate_plan_accreditation_level_mode(request)
validate_tender_procurement_method_type(request)
return data
def validate_plan_accreditation_level(request, model):
levels = model.create_accreditations
validate_accreditation_level(request, levels, "plan", "plan", "creation")
def validate_plan_accreditation_level_mode(request):
data = request.validated["data"]
mode = data.get("mode", None)
validate_accreditation_level_mode(request, mode, "plan", "plan", "creation")
def validate_tender_procurement_method_type(request):
_procedures = deepcopy(PROCEDURES)
if get_now() >= PLAN_ADDRESS_KIND_REQUIRED_FROM:
_procedures[""] = ("centralizedProcurement", )
procurement_method_types = list(chain(*_procedures.values()))
procurement_method_types_without_above_threshold_ua_defense = list(
filter(lambda x: x != 'aboveThresholdUA.defense', procurement_method_types)
)
kind_allows_procurement_method_type_mapping = {
"defense": procurement_method_types,
"general": procurement_method_types_without_above_threshold_ua_defense,
"special": procurement_method_types_without_above_threshold_ua_defense,
"central": procurement_method_types_without_above_threshold_ua_defense,
"authority": procurement_method_types_without_above_threshold_ua_defense,
"social": procurement_method_types_without_above_threshold_ua_defense,
"other": ["belowThreshold", "reporting"],
}
data = request.validated["data"]
kind = data.get("procuringEntity", {}).get("kind", "")
tender_procurement_method_type = data.get("tender", {}).get("procurementMethodType", "")
allowed_procurement_method_types = kind_allows_procurement_method_type_mapping.get(kind)
if allowed_procurement_method_types and get_now() >= PLAN_ADDRESS_KIND_REQUIRED_FROM:
if tender_procurement_method_type not in allowed_procurement_method_types:
request.errors.add(
"procuringEntity", "kind",
"procuringEntity with {kind} kind cannot publish this type of procedure. "
"Procurement method types allowed for this kind: {methods}.".format(
kind=kind, methods=", ".join(allowed_procurement_method_types)
)
)
request.errors.status = 403
def validate_patch_plan_data(request):
return validate_data(request, Plan, True)
def validate_plan_has_not_tender(request):
plan = request.validated["plan"]
if plan.tender_id:
request.errors.add("data", "tender_id", u"This plan has already got a tender")
request.errors.status = 422
raise error_handler(request.errors)
def validate_plan_with_tender(request):
plan = request.validated["plan"]
if plan.tender_id:
json_data = request.validated["json_data"]
names = []
if "procuringEntity" in json_data:
names.append("procuringEntity")
if "budget" in json_data and "breakdown" in json_data["budget"]:
names.append("budget.breakdown")
for name in names:
request.errors.add("data", name, "Changing this field is not allowed after tender creation")
if request.errors:
request.errors.status = 422
raise error_handler(request.errors)
def validate_plan_not_terminated(request):
plan = request.validated["plan"]
if plan.status in ("cancelled", "complete"):
request.errors.add("data", "status", "Can't update plan in '{}' status".format(plan.status))
request.errors.status = 422
raise error_handler(request.errors)
def validate_plan_status_update(request):
status = request.validated["json_data"].get("status")
if status == "draft" and request.validated["plan"].status != status:
request.errors.add("data", "status", "Plan status can not be changed back to 'draft'")
request.errors.status = 422
raise error_handler(request.errors)
def validate_milestone_data(request):
update_logging_context(request, {"milestone_id": "__new__"})
model = type(request.plan).milestones.model_class
milestone = validate_data(request, model)
upload_objects_documents(
request, request.validated["milestone"],
route_kwargs = {"milestone_id": request.validated["milestone"].id}
)
return milestone
def validate_patch_milestone_data(request):
model = type(request.context)
return validate_data(request, model, partial=True)
def validate_milestone_author(request):
milestone = request.validated["milestone"]
plan = request.validated["plan"]
author = milestone.author
plan_identifier = plan.procuringEntity.identifier
milestone_identifier = author.identifier
if (plan_identifier.scheme, plan_identifier.id) != (milestone_identifier.scheme, milestone_identifier.id):
request.errors.add(
"data",
"author",
"Should match plan.procuringEntity"
)
request.errors.status = 422
raise error_handler(request.errors)
if any(
(m.author.identifier.scheme, m.author.identifier.id) == (author.identifier.scheme, author.identifier.id)
for m in plan.milestones
if m.status in Milestone.ACTIVE_STATUSES
):
request.errors.add(
"data",
"author",
"An active milestone already exists for this author"
)
request.errors.status = 422
raise error_handler(request.errors)
def validate_milestone_status_scheduled(request):
milestone = request.validated["milestone"]
if milestone.status != Milestone.STATUS_SCHEDULED:
request.errors.add(
"data",
"status",
"Cannot create milestone with status: {}".format(milestone["status"])
)
request.errors.status = 422
raise error_handler(request.errors)
| 39.375723 | 112 | 0.708456 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,117 | 0.163975 |
b4a04bd98861d4add44fe78a70e2497100399370 | 1,176 | py | Python | Ago-Dic-2020/Ejemplos/clase-2.py | bryanbalderas/DAS_Sistemas | 1e31f088c0de7134471025a5730b0abfc19d936e | [
"MIT"
] | 41 | 2017-09-26T09:36:32.000Z | 2022-03-19T18:05:25.000Z | Ago-Dic-2020/Ejemplos/clase-2.py | bryanbalderas/DAS_Sistemas | 1e31f088c0de7134471025a5730b0abfc19d936e | [
"MIT"
] | 67 | 2017-09-11T05:06:12.000Z | 2022-02-14T04:44:04.000Z | Ago-Dic-2020/Ejemplos/clase-2.py | bryanbalderas/DAS_Sistemas | 1e31f088c0de7134471025a5730b0abfc19d936e | [
"MIT"
] | 210 | 2017-09-01T00:10:08.000Z | 2022-03-19T18:05:12.000Z | # Importando librerías
from numpy import array
# Listas y arreglos
a = array(['h', 101, 'l', 'l', 'o'])
x = ['h', 101, 'l', 'l', 'o']
print(a)
print(x)
print("Tamaño: ", len(x))
# Condicionales
if isinstance(x[1], int):
x[1] = chr(x[1])
elif isinstance(x[1], str):
pass
else:
raise TypeError("Tipo no soportado!. No te pases! >:c")
print(' uwu '.join(x))
# Ciclos
for item in x:
print(item)
for i in range(len(x)):
print(x[i])
for i in range(1, 10, 2):
print(i)
while len(x):
print(x.pop(0))
while len(x):
print(x.pop(0))
else:
print('F para x :C')
# Operaciones con listas
x.append('H')
x.append('o')
x.append('l')
x.append('a')
x.insert(1, 'o')
# Entrada de datos
print(x)
respuesta = input("Hola?")
print(respuesta)
# Operadores aritméticos y booleanos
print(x)
print(10.1)
print(1 + 2 - 4 * 5 / 8 % 2)
print(2 ** 5)
print(True and True)
print(False and True)
print(False or True)
print(not False)
# Listas comprimidas
print([i for i in range(1, 11) if i % 2 == 0])
print([j for j in range(2, 101) if all(j % i != 0 for i in range(2, j))])
print([j for j in range(2, 101) if not(j % 2 or j % 3 or j % 5)]) | 17.294118 | 73 | 0.593537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 279 | 0.236641 |
b4a0c7230303a692bcc4f7d71deebfb17b27263a | 3,751 | py | Python | constants.py | lanthony42/Snek | e463b58eeba32bd26279a57fd3a523f4fb773da7 | [
"MIT"
] | null | null | null | constants.py | lanthony42/Snek | e463b58eeba32bd26279a57fd3a523f4fb773da7 | [
"MIT"
] | null | null | null | constants.py | lanthony42/Snek | e463b58eeba32bd26279a57fd3a523f4fb773da7 | [
"MIT"
] | null | null | null | import math
SCREEN_WIDTH = 1400
SCREEN_HEIGHT = 800
TEXT = (5, 5)
FPS = 60
BASE_SIZE = 10
EYE_SIZE = 4
PUPIL_SIZE = EYE_SIZE - 2
BASE_SPEED = 2
MIN_DISTANCE = 1
MAX_DISTANCE = MIN_DISTANCE + 3
SIZE_INC = 15
EYE_INC = SIZE_INC * 4
PUPIL_INC = SIZE_INC * 8
GROWTH_INC = SIZE_INC * 20
BOOST_MIN = 10
BOOST_FACTOR = 2
BOOST_DCR = 5
ENEMIES = 5
AI_RADIUS = 150
BOOST_RADIUS = 100
FOOD_RADIUS = 5
DEAD_FOOD_RADIUS = FOOD_RADIUS + 1
FOOD_INIT = 150
FOOD_DEATH = 4
FOOD_COLOUR = (240, 40, 40)
BLACK = (0, 0, 0)
BLUE = (0, 0, 255)
FADED = (60, 60, 160)
GREEN = (30, 180, 30)
RED = (240, 0, 0)
PURPLE = (160, 30, 160)
YELLOW = (215, 215, 70)
TAN = (215, 125, 70)
WHITE = (220, 220, 220)
ENEMY_COLOURS = [RED, GREEN, PURPLE, YELLOW, TAN]
BOOST_OFFSET = 40
PING_PONG = 100
class Vector:
def __init__(self, x=0.0, y=0.0):
self.x = x
self.y = y
@staticmethod
def t(vector):
return Vector(vector[0], vector[1])
def tuple(self):
return round(self.x), round(self.y)
def copy(self):
return Vector(self.x, self.y)
def __add__(self, other):
return Vector(self.x + other.x, self.y + other.y)
def __iadd__(self, other):
self.x += other.x
self.y += other.y
return self
def __sub__(self, other):
return Vector(self.x - other.x, self.y - other.y)
def __isub__(self, other):
self.x -= other.x
self.y -= other.y
return self
def __mul__(self, other: float):
return Vector(self.x * other, self.y * other)
def __imul__(self, other: float):
self.x *= other
self.y *= other
return self
def __truediv__(self, other: float):
return Vector(self.x / other, self.y / other)
def __itruediv__(self, other: float):
self.x /= other
self.y /= other
return self
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __neg__(self):
return Vector(-self.x, -self.y)
def __str__(self):
return f'({self.x}, {self.y})'
__repr__ = __str__
def mag_squared(self):
return self.x ** 2 + self.y ** 2
def mag(self):
return math.sqrt(self.x ** 2 + self.y ** 2)
def normalized(self):
mag = self.mag()
if mag > 0:
return Vector(self.x / mag, self.y / mag)
else:
return Vector()
def normalize(self):
mag = self.mag()
if mag > 0:
self.x /= mag
self.y /= mag
else:
return self
def perpendicular(self, first=True):
return Vector(-self.y if first else self.y, self.x if first else -self.x).normalized()
def lerp(self, target, distance, gap=0):
direction = target - self
mag = direction.mag()
if gap > 0:
mag -= gap
direction.normalize()
direction *= mag
if mag <= 0:
return 0, Vector()
elif mag < distance:
self.x += direction.x
self.y += direction.y
return mag, direction
else:
direction *= distance / mag
self.x += direction.x
self.y += direction.y
return distance, direction
class Circle:
def __init__(self, x=0.0, y=0.0, radius=1, position: Vector = None, colour=FOOD_COLOUR):
if position is not None:
self.position = position
else:
self.position = Vector(x, y)
self.radius = radius
self.colour = colour
def __str__(self):
return f'Circle(position={self.position}, radius={self.radius}, colour={self.colour})'
__repr__ = __str__
START = Vector(BASE_SIZE * 2, SCREEN_HEIGHT // 2)
| 22.327381 | 94 | 0.564916 | 2,926 | 0.780059 | 0 | 0 | 76 | 0.020261 | 0 | 0 | 102 | 0.027193 |
b4a0edc51d50de306ea838cb4bc96583019d8526 | 831 | py | Python | app/routers/auth.py | nicolunardi/travela-server | 79537ed428c01bac90d078216c7513411b7695ad | [
"CNRI-Python"
] | null | null | null | app/routers/auth.py | nicolunardi/travela-server | 79537ed428c01bac90d078216c7513411b7695ad | [
"CNRI-Python"
] | null | null | null | app/routers/auth.py | nicolunardi/travela-server | 79537ed428c01bac90d078216c7513411b7695ad | [
"CNRI-Python"
] | null | null | null | from fastapi import APIRouter, Depends, status
from fastapi.security import OAuth2PasswordRequestForm
from sqlalchemy.orm import Session
from app.controllers.authControllers import login_user, register_user
from app.schemas.users import UserCreate
from app.schemas.tokens import Token
from app.config.database import get_db
router = APIRouter()
@router.post(
"/register",
status_code=status.HTTP_201_CREATED,
response_model=Token,
tags=["User"],
)
async def register(user: UserCreate, db: Session = Depends(get_db)):
return register_user(db, user)
@router.post(
"/login",
status_code=status.HTTP_200_OK,
response_model=Token,
tags=["User"],
)
async def login(
form_data: OAuth2PasswordRequestForm = Depends(),
db: Session = Depends(get_db),
):
return login_user(form_data, db)
| 24.441176 | 69 | 0.749699 | 0 | 0 | 0 | 0 | 478 | 0.575211 | 248 | 0.298436 | 31 | 0.037304 |
b4a48258b4b4da01eee0072c9a89c865191a602a | 2,222 | py | Python | clib/links/model/recognition/vggnet.py | Swall0w/clib | 46f659783d5a0a6ec5994c3c707c1cc8a7934385 | [
"MIT"
] | 1 | 2017-08-27T00:01:27.000Z | 2017-08-27T00:01:27.000Z | clib/links/model/recognition/vggnet.py | Swall0w/clib | 46f659783d5a0a6ec5994c3c707c1cc8a7934385 | [
"MIT"
] | 49 | 2017-08-20T02:09:26.000Z | 2017-12-31T11:58:27.000Z | clib/links/model/recognition/vggnet.py | Swall0w/clib | 46f659783d5a0a6ec5994c3c707c1cc8a7934385 | [
"MIT"
] | 1 | 2017-12-08T08:31:38.000Z | 2017-12-08T08:31:38.000Z | import chainer
import chainer.functions as F
import chainer.links as L
class VGGNet(chainer.Chain):
"""
VGGNet
- It takes (224, 224, 3) sized image as imput
"""
def __init__(self, n_class=1000):
super(VGGNet, self).__init__()
with self.init_scope():
self.conv1_1 = L.Convolution2D(3, 64, 3, stride=1, pad=1)
self.conv1_2 = L.Convolution2D(64, 64, 3, stride=1, pad=1)
self.conv2_1 = L.Convolution2D(64, 128, 3, stride=1, pad=1)
self.conv2_2 = L.Convolution2D(128, 128, 3, stride=1, pad=1)
self.conv3_1 = L.Convolution2D(128, 256, 3, stride=1, pad=1)
self.conv3_2 = L.Convolution2D(256, 256, 3, stride=1, pad=1)
self.conv3_3 = L.Convolution2D(256, 256, 3, stride=1, pad=1)
self.conv4_1 = L.Convolution2D(256, 512, 3, stride=1, pad=1)
self.conv4_2 = L.Convolution2D(512, 512, 3, stride=1, pad=1)
self.conv4_3 = L.Convolution2D(512, 512, 3, stride=1, pad=1)
self.conv5_1 = L.Convolution2D(512, 512, 3, stride=1, pad=1)
self.conv5_2 = L.Convolution2D(512, 512, 3, stride=1, pad=1)
self.conv5_3 = L.Convolution2D(512, 512, 3, stride=1, pad=1)
self.fc6 = L.Linear(None, 4096)
self.fc7 = L.Linear(4096, 4096)
self.fc8 = L.Linear(4096, n_class)
def __call__(self, x):
h = F.relu(self.conv1_1(x))
h = F.relu(self.conv1_2(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.relu(self.conv3_3(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv4_1(h))
h = F.relu(self.conv4_2(h))
h = F.relu(self.conv4_3(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv5_1(h))
h = F.relu(self.conv5_2(h))
h = F.relu(self.conv5_3(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.dropout(F.relu(self.fc6(h)))
h = F.dropout(F.relu(self.fc7(h)))
h = self.fc8(h)
return h
| 35.83871 | 72 | 0.559406 | 2,148 | 0.966697 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.032403 |
b4a6dc56ed919c7cf17e22c60123ac3000343e48 | 2,147 | py | Python | scraper.py | TheLady/say_what | 22b187e000b0ab13606c73804db0af89b945751d | [
"MIT"
] | null | null | null | scraper.py | TheLady/say_what | 22b187e000b0ab13606c73804db0af89b945751d | [
"MIT"
] | null | null | null | scraper.py | TheLady/say_what | 22b187e000b0ab13606c73804db0af89b945751d | [
"MIT"
] | null | null | null | import sqlite3
import requests
from bs4 import BeautifulSoup
from login import keys
import config
ARTICLE_SEARCH_URL = 'http://api.nytimes.com/svc/search/v2/articlesearch.json?api-key={key}'
SUNLIGHT_CONGRESS_URL = 'http://congress.api.sunlightfoundation.com/{method}?{query}&apikey={key}'
def get_json(url):
return requests.get(url).json()
def sunlight_url(base, method, query, key):
return base.format(method=method, query=query, key=key)
def sunlight_query(**kwargs):
return '&'.join(key+'='+value for key,value in kwargs.items())
def save_url(url, path, name):
file_path = path + '/' + name + '.txt'
r = requests.get(url)
text = BeautifulSoup(r.text).get_text()
with open(file_path, 'w') as f:
f.write(text)
print('saved: ' + url + ', to: ' + file_path)
return file_path
def get_bills(max_pages=50):
loop = 0
while loop < max_pages:
query = sunlight_query(congress='113', per_page='50', page=str(loop))
url = sunlight_url(SUNLIGHT_CONGRESS_URL, 'bills', query, keys['sunlight'])
bills = get_json(url)
for b in bills['results']:
yield b
loop = bills['page']['page'] + 1
def main():
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
for b in get_bills(max_pages=50):
if b['history']['active'] and b.get('last_version', None):
number = b['number']
chamber = b['chamber']
sponsor = b['sponsor_id']
congress = b['congress']
introduced_on = b['introduced_on']
title = b['official_title']
if b.get('last_version', None):
link = b['last_version']['urls']['html']
file_path = save_url(link, 'bill_text', str(number))
c.execute("""
INSERT INTO bills
VALUES
(?, ?, ?, ?, ?, ?, ?);
""",
(congress, chamber, number, introduced_on, sponsor, title, file_path))
conn.commit()
c.close()
if __name__ == '__main__':
main()
| 31.573529 | 98 | 0.566372 | 0 | 0 | 357 | 0.166279 | 0 | 0 | 0 | 0 | 548 | 0.25524 |
b4a6ed73413c10935d4c8fa52b9e4361216bd892 | 850 | py | Python | beatsaver/entity/MapTestplay.py | jundoll/bs-api-py | 1e12e1d68d6cbc4c8e25c0da961396854391be5b | [
"MIT"
] | null | null | null | beatsaver/entity/MapTestplay.py | jundoll/bs-api-py | 1e12e1d68d6cbc4c8e25c0da961396854391be5b | [
"MIT"
] | null | null | null | beatsaver/entity/MapTestplay.py | jundoll/bs-api-py | 1e12e1d68d6cbc4c8e25c0da961396854391be5b | [
"MIT"
] | null | null | null | # load modules
from dataclasses import dataclass
from typing import Union
from ...beatsaver.entity import UserDetail
# definition class
@dataclass(frozen=True)
class MapTestplay:
createdAt: str
feedback: str
feedbackAt: str
user: Union[UserDetail.UserDetail, None]
video: str
# definition function
def gen(response):
if response is not None:
instance = MapTestplay(
createdAt=response.get('createdAt'),
feedback=response.get('feedback'),
feedbackAt=response.get('feedbackAt'),
user=UserDetail.gen(response.get('user')),
video=response.get('video')
)
return instance
def gen_list(response):
if response is not None:
if len(response) == 0:
return []
else:
return [gen(v) for v in response]
| 21.25 | 54 | 0.628235 | 136 | 0.16 | 0 | 0 | 160 | 0.188235 | 0 | 0 | 99 | 0.116471 |
b4a997475aae508c6542885c908cb30034b6fc05 | 1,425 | py | Python | django_settings_diff/cli.py | GreenBankObservatory/django-settings-diff | 7940ddb6dd42e1464ae58814b8acd504b3824aaf | [
"MIT"
] | null | null | null | django_settings_diff/cli.py | GreenBankObservatory/django-settings-diff | 7940ddb6dd42e1464ae58814b8acd504b3824aaf | [
"MIT"
] | null | null | null | django_settings_diff/cli.py | GreenBankObservatory/django-settings-diff | 7940ddb6dd42e1464ae58814b8acd504b3824aaf | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""docstring"""
from __future__ import print_function, unicode_literals
import argparse
import sys
from .settingsdiff import dump_settings, diff_settings
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"settings_path_1",
nargs="?",
help='The path to the .pkl file of the first (i.e. "original") settings dump',
)
parser.add_argument(
"settings_path_2",
nargs="?",
help='The path to the .pkl file of the second (i.e. "new") settings dump',
)
parser.add_argument(
"-d", "--dump", metavar="PATH", help="The path the settings will be dumped to"
)
parser.add_argument(
"-t",
"--dump-type",
choices=["txt", "pkl"],
help=(
"The file type of the dump. Only needed if type cannot be derived "
"from extension give via --dump"
),
)
args = parser.parse_args()
if args.dump:
try:
dump_settings(args.dump, args.dump_type)
except NotImplementedError as error:
print("ERROR: {}".format(error), file=sys.stderr)
sys.exit(1)
elif args.settings_path_1 and args.settings_path_2:
diff_settings(args.settings_path_1, args.settings_path_2)
else:
parser.error("Invalid argument configuration!")
if __name__ == "__main__":
main()
| 26.886792 | 86 | 0.605614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 479 | 0.33614 |
b4a9a2a19510f8c534dd1f30edf649c69077292a | 5,283 | py | Python | Project 1-1/evaluate.py | marridG/2020-EI339_Team_Project | 013cd1bf596f8079886809caee65757ecc870dfe | [
"MIT"
] | null | null | null | Project 1-1/evaluate.py | marridG/2020-EI339_Team_Project | 013cd1bf596f8079886809caee65757ecc870dfe | [
"MIT"
] | null | null | null | Project 1-1/evaluate.py | marridG/2020-EI339_Team_Project | 013cd1bf596f8079886809caee65757ecc870dfe | [
"MIT"
] | null | null | null | import os
import types
import typing
import numpy as np
import constants
import environment
import policy
import update
class EvaluateEnv:
def __init__(self, show_details: bool = True):
self.env_obj = environment.Easy21Env()
self.show_details = show_details
def reset(self):
self.env_obj.reset()
def evaluate(self, action_func: types.MethodType) -> int:
self.reset()
state_current = self.env_obj.observe()
reward = -99
if self.show_details:
print("=============================================", end="")
while not constants.judge_state_is_terminate(state_current):
action = action_func(state=state_current)
state_next, reward, card = self.env_obj.step(action=action)
if self.show_details:
print()
print("{:15}\tDealer={}, Player={}".format("[CURRENT STATE]", state_current[0], state_current[1]))
print("{:15}\t{}".format("[ACTION]", "STICK" if action else "HIT"))
if isinstance(card["color"], str): # single card
print("{:15}\t{}{}".format("[CARD]", {"RED": "-", "BLACK": "+"}[card["color"]], card["value"]))
else: # single/multiple card(s)
print("{:15}\t{}".format("[CARD]",
", ".join(["(%s%d)" % ({"RED": "-", "BLACK": "+"}[_cd[0]], _cd[1])
for _cd in zip(card["color"], card["value"])])))
print("{:15}\tDealer={}, Player={}".format("[NEXT STATE]", state_next[0], state_next[1]))
print("{:15}\t{}".format("[REWARD]", reward))
# update state
state_current = state_next
if self.show_details:
print("=============================================", end="\n\n")
return reward
class QLearningEnv:
def __init__(self,
policy_func: types.MethodType = None,
trained_path: str = "./_trained/Q_Learning/"):
self.policy_func = policy_func if policy_func else policy.ActionPolicies().greedy_maximum
self.trained_path = trained_path
if not os.path.exists(trained_path):
raise RuntimeError("[Error] Trained Path NOT Found")
# Model Related Initialization
self.q_table = None
self.epsilon = None
def load_model(self, epsilon: float = 0.5, filename: str = "TestOutput.npy") -> None:
self.q_table = np.load(os.path.join(self.trained_path, filename)) # Q-Table
self.epsilon = epsilon
def action_func(self, state: typing.Tuple[int, int]) -> int:
action = self.policy_func(
q_table=self.q_table, state=state, epsilon=self.epsilon)
return action
class PolicyIterationEnv:
def __init__(self, trained_path: str = "./_trained/Policy_Iteration/"):
self.trained_path = trained_path
if not os.path.exists(trained_path):
raise RuntimeError("[Error] Trained Path NOT Found")
# Model Related Initialization
self.table_policy = None
def load_model(self, filename: str = "TestOutput.npy") -> None:
self.table_policy = np.load(os.path.join(self.trained_path, filename)) # Policy Table
def action_func(self, state: typing.Tuple[int, int]) -> int:
action = self.table_policy[state[0] - 1, state[1] - 1]
return action
if "__main__" == __name__:
epsilon_values = [0.6, ]
evaluate_rounds = 10000
test_eval_env_obj = EvaluateEnv(show_details=True)
# # Q-Learning
# test_policy_func = policy.ActionPolicies().greedy_epsilon
# test_ql_env_obj = QLearningEnv(**{"policy_func": test_policy_func})
# for _epsilon in epsilon_values:
# test_ql_env_obj.load_model(epsilon=_epsilon, filename="TestOutput.npy")
#
# results = {-1: 0, 0: 0, 1: 0, "err": 0}
# for rd in range(evaluate_rounds):
# terminate_reward = test_eval_env_obj.evaluate(
# **{"action_func": test_ql_env_obj.action_func})
# try:
# results[terminate_reward] += 1
# except KeyError:
# results["error"] += 1
#
# print("\n\n\n")
# print("WIN / TIE / LOSE / ERR / ALL\n%d / %d / %d / %d / %d" % (
# results[1], results[0], results[-1], results["err"], evaluate_rounds))
# print("Win Rate: %.2f %%" % (float(results[1]) / evaluate_rounds * 100.))
# Policy Iteration
test_policy_func = policy.ActionPolicies().greedy_epsilon
test_pi_action_obj = PolicyIterationEnv()
test_pi_action_obj.load_model(filename="TestOutput.npy")
results = {-1: 0, 0: 0, 1: 0, "err": 0}
for rd in range(evaluate_rounds):
terminate_reward = test_eval_env_obj.evaluate(
**{"action_func": test_pi_action_obj.action_func})
try:
results[terminate_reward] += 1
except KeyError:
results["error"] += 1
print("\n\n\n")
print("WIN / TIE / LOSE / ERR / ALL\n%d / %d / %d / %d / %d" % (
results[1], results[0], results[-1], results["err"], evaluate_rounds))
print("Win Rate: %.2f %%" % (float(results[1]) / evaluate_rounds * 100.))
| 38.562044 | 115 | 0.571266 | 3,336 | 0.631459 | 0 | 0 | 0 | 0 | 0 | 0 | 1,632 | 0.308915 |
b4abd48fdafccf895445255273d39c4cebef4513 | 502 | py | Python | axelrod/tests/unit/test_mock_player.py | dashiellfryer/Axelrod | 0d684b3273d15e3e0ecf70be8e893fffc5277c84 | [
"MIT"
] | null | null | null | axelrod/tests/unit/test_mock_player.py | dashiellfryer/Axelrod | 0d684b3273d15e3e0ecf70be8e893fffc5277c84 | [
"MIT"
] | null | null | null | axelrod/tests/unit/test_mock_player.py | dashiellfryer/Axelrod | 0d684b3273d15e3e0ecf70be8e893fffc5277c84 | [
"MIT"
] | null | null | null | import unittest
from axelrod import Action, MockPlayer, Player
C, D = Action.C, Action.D
class TestMockPlayer(unittest.TestCase):
def test_strategy(self):
for action in [C, D]:
m = MockPlayer(actions=[action])
p2 = Player()
self.assertEqual(action, m.strategy(p2))
actions = [C, C, D, D, C, C]
m = MockPlayer(actions=actions)
p2 = Player()
for action in actions:
self.assertEqual(action, m.strategy(p2))
| 23.904762 | 52 | 0.591633 | 407 | 0.810757 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b4ac8299434ea398cb53c3d22c8f3e7ec3f42e87 | 10,870 | py | Python | covidtracker/writer_flatcurve.py | mfkasim91/covid19-gradient-tracker | a25d245ef35782052d5ab179d431c6079074b047 | [
"MIT"
] | 2 | 2020-05-25T16:06:31.000Z | 2020-07-22T06:42:38.000Z | covidtracker/writer_flatcurve.py | mfkasim91/covid19-gradient-tracker | a25d245ef35782052d5ab179d431c6079074b047 | [
"MIT"
] | null | null | null | covidtracker/writer_flatcurve.py | mfkasim91/covid19-gradient-tracker | a25d245ef35782052d5ab179d431c6079074b047 | [
"MIT"
] | 1 | 2020-07-22T06:47:11.000Z | 2020-07-22T06:47:11.000Z | import os
import datetime
import numpy as np
from jinja2 import Template
from scipy.stats import hypergeom
import matplotlib.pyplot as plt
import covidtracker as ct
from covidtracker.dataloader import DataLoader
from covidtracker.models import update_samples
from covidtracker.plotter import plot_interval
def plot_gradient(res):
dl = res.dataloader
b = res.samples["b"].detach().numpy()
tnp = np.arange(b.shape[1])
plot_interval(tnp, (np.exp(b)-1)*100, color="C2")
plt.plot(tnp, tnp*0, "k--")
plt.ylabel("Persentase pertumbuhan")
plt.xticks(tnp[::7], dl.tdate[::7], rotation=90)
plt.title(dl.ylabel)
plt.legend(loc="upper right")
def plot_data_and_sim(res):
yobs = res.yobs
ysim = res.ysim
dl = res.dataloader
tnp = np.arange(len(yobs))
plt.bar(tnp, yobs, color="C1", alpha=0.6)
plot_interval(tnp, ysim, color="C1")
plt.xticks(tnp[::7], dl.tdate[::7], rotation=90)
plt.title(dl.ylabel)
plt.legend(loc="upper left")
def get_weekly_sum(y, a=7):
# yy = y[:((y.shape[-1]//a)*a)].reshape(-1,a).sum(axis=-1)
yy = y[y.shape[-1]%7:].reshape(-1,a).sum(axis=-1)
return yy
def get_in_week(t, a=7, i=0):
# ty = t[:((t.shape[-1]//a)*a)].reshape(-1,a)[:,i]
ty = t[t.shape[-1]%7:].reshape(-1,a)[:,i]
return ty
def get_total_cases(res, total_deaths_data):
model = res.model
samples = res.samples
ysim = res.ysim
unreported_ratio_death = 2200 / 785. # from reuter's report https://uk.reuters.com/article/us-health-coronavirus-indonesia-casualti/exclusive-more-than-2200-indonesians-have-died-with-coronavirus-symptoms-data-shows-idUKKCN22A04N
total_deaths_from_cases_fullifr = model.predict_total_deaths(samples, ifr=1.0) # (nsamples,)
unreported_ratio_fullifr = total_deaths_data * unreported_ratio_death / total_deaths_from_cases_fullifr
total_cases_fullifr = unreported_ratio_fullifr * np.sum(ysim, axis=-1)
total_cases = None
ntrial = 100
ifrs = np.random.randn(ntrial) * 0.0047 + 0.0086 # (0.39 - 1.33)% from https://www.medrxiv.org/content/10.1101/2020.03.09.20033357v1.full.pdf
ifrs[ifrs < 0.0039] = 0.0039
for i,ifr in enumerate(ifrs):
total_cases1 = total_cases_fullifr / ifr # (nsamples,)
if i == 0:
total_cases = total_cases1
else:
total_cases = np.concatenate((total_cases, total_cases1))
# get the statistics of the total cases
total_cases_median = int(np.round(np.median(total_cases)))
total_cases_025 = int(np.round(np.percentile(total_cases, 2.5)))
total_cases_975 = int(np.round(np.percentile(total_cases, 97.5)))
def formatstr(a):
b = int(float("%.2g"%a)) # round to some significant figures
c = f"{b:,}"
d = c.replace(",", ".")
return d
return formatstr(total_cases_median),\
formatstr(total_cases_025),\
formatstr(total_cases_975)
def plot_weekly_tests(res):
yobs = res.yobs
# show the tests
dltest = DataLoader("id_new_tests")
ytest = dltest.ytime
ttest = np.arange(ytest.shape[0])
tticks = dltest.tdate
a = 7
ytest = get_weekly_sum(ytest)
ttest = get_in_week(ttest, i=0)
tticks = get_in_week(tticks, i=0)
plt.bar(ttest, ytest, width=a-0.5)
# plt.bar(ttest, ytest)
plt.title("Pemeriksaan per minggu")
plt.xticks(ttest, tticks, rotation=90)
return ytest
def plot_weekly_tests_prov(res):
yobs = res.yobs # new positives / day
# get all the tests and the positive cases from all over the country
dltest = DataLoader("id_new_tests")
dlcase = DataLoader("id_new_cases")
ttest = dltest.tdate
ytest = dltest.ytime
ycase = dlcase.ytime
assert len(ytest) == len(ycase)
ntest_days = len(ytest)
nobs_days = len(yobs)
missing_days = ntest_days - nobs_days
offset_test = int(np.ceil(missing_days / 7.0)) * 7
offset_obs = offset_test - missing_days
# offset the positive tests to match the weekly test
yobs = yobs[offset_obs:]
ytest = ytest[offset_test:]
ycase = ycase[offset_test:]
ttest = ttest[offset_test:]
assert len(yobs) == len(ytest)
assert len(yobs) == len(ycase)
# yobs and ytest should have the same lengths by now
# get the weekly data
ycase = get_weekly_sum(ycase)
yobs = get_weekly_sum(yobs)
ytest = get_weekly_sum(ytest)
ttest = get_in_week(ttest, i=0)
ndata = len(yobs)
# calculate the posterior distribution of the number of tests
yall_positives = ycase.astype(np.int)
yall_tests = ytest.astype(np.int)
y_positives = yobs.astype(np.int)
max_tests = yall_tests.max()
posteriors = np.zeros((ndata, max_tests+1))
for i in range(ndata):
yall_positive = yall_positives[i]
yall_test = yall_tests[i]
y_positive = y_positives[i]
y_test = np.arange(yall_test+1)
lhood = hypergeom.pmf(y_positive, yall_test, yall_positive, y_test)
if np.sum(lhood) == 0:
print(y_positive, yall_test, yall_positive, res.dataloader.dataidentifier)
posteriors[i,:len(lhood)] = lhood / np.sum(lhood) # (max_tests+1)
cdf = np.cumsum(posteriors, axis=-1)
def h(cdf, q):
return np.sum(cdf < q, axis=-1)
x = np.arange(ndata)
ymed = h(cdf, 0.5)
yl1 = h(cdf, 0.025)
yu1 = h(cdf, 0.975)
plt.bar(x, height=ymed, alpha=0.5, label="Median")
plt.errorbar(x, ymed, [ymed-yl1, yu1-ymed], fmt="o", label="95% CI")
plt.xticks(x, ttest, rotation=90)
plt.legend()
plt.title("Perkiraan jumlah pemeriksaan mingguan")
return ymed, (h(cdf, 0.975)-h(cdf, 0.025))/2.0
def main(img_path, file_path, idx=None):
provinces = ["Jakarta", "Jabar", "Jatim", "Jateng", "Sulsel"]
fields = ["id_new_cases"] + ["idprov_%s_new_cases" % p.lower() for p in provinces]
names = ["Indonesia"] + provinces
if idx is not None:
provinces = provinces[idx:idx+1]
fields = fields[idx:idx+1]
names = names[idx:idx+1]
ftemplate = os.path.join(os.path.split(ct.__file__)[0], "templates", "template-idcovid19.md")
nsamples = 1000
nwarmups = 1000
nchains = 1
places = []
for i,df in enumerate(fields):
print("Field: %s" % df)
# get the samples or resample
res = update_samples(df, nsamples=nsamples, nchains=nchains, nwarmups=nwarmups,
jit=True, restart=False)
if idx is not None:
return
dl = res.dataloader
model = res.model
samples = res.samples
ysim = res.ysim
res.yobs = res.yobs[:ysim.shape[1]]
yobs = res.yobs
################## creating the figures ##################
# simulating the samples
b = samples["b"].detach().numpy() # (nsamples, n)
tnp = np.arange(len(yobs))
ncols = 3
plt.figure(figsize=(4*ncols,4))
plt.subplot(1,ncols,1)
plot_gradient(res)
plt.subplot(1,ncols,2)
plot_data_and_sim(res)
total_cases_median = ""
total_cases_025 = ""
total_cases_975 = ""
if df == "id_new_cases":
# show the tests
plt.subplot(1,ncols,3)
test_weekly = plot_weekly_tests(res)
# calculate the ratio for the last week
test_ratio = test_weekly[-1] / test_weekly[-2]
test_ratio_2std = 1e-8 # very small std
# calculate the estimated infection cases
total_deaths_data = DataLoader("id_cum_deaths").ytime[-1]
total_cases_median, total_cases_025, total_cases_975 = get_total_cases(res, total_deaths_data)
elif df.startswith("idprov_") and df.endswith("_new_cases"):
plt.subplot(1,ncols,3)
test_weekly, test_weekly_2std = plot_weekly_tests_prov(res)
# NOTE: comment the code below to use the ratio from the national figures
# test_ratio = test_weekly[-1] / test_weekly[-2]
# test_ratio_2std = ((test_weekly_2std[-1] / test_weekly[-1])**2 +\
# (test_weekly_2std[-2] / test_weekly[-2])**2)**.5 *\
# test_ratio
# calculate the estimated infection cases
total_deaths_data = DataLoader(df.replace("_new_cases", "_cum_deaths")).ytime[-1]
total_cases_median, total_cases_025, total_cases_975 = get_total_cases(res, total_deaths_data)
plt.tight_layout()
plt.savefig(os.path.join(img_path, "%s.png"%df))
plt.close()
################## deciding the results ##################
b_last = b[:,-1]
# calculate the exponential factor from the weekly ratio
b_test = np.log(test_ratio) / 7.0
b_test_std = test_ratio_2std / test_ratio
lower_grad_portion = np.sum(b_last < b_test) * 1.0 / b.shape[0]
# calculate the probability of the curve going down
decline_portion = np.sum(b_last < 0) * 1.0 / b.shape[0]
# calculate the total probability of it's really going down
if b_test < 0:
decline_prob = lower_grad_portion
else:
decline_prob = decline_portion
if decline_prob > 0.95:
flatcurve_res = "**turun**"
elif decline_prob > 0.75:
flatcurve_res = "**kemungkinan** turun"
elif decline_prob > 0.5:
flatcurve_res = "ada indikasi penurunan, tapi belum dapat dipastikan"
elif decline_prob < 0.5 and decline_portion > 0.5:
flatcurve_res = "kurva terlihat turun, tapi jumlah tes juga menurun"
else:
flatcurve_res = "belum dapat disimpulkan"
# calculate the probability of the curve going down not because of the test
# i.e. compare the test gradient and the curve gradient
## save the information for the templating
places.append({
"dataid": df,
"name": names[i],
"flatcurve_result": flatcurve_res,
"decline_prob": int(np.round(decline_prob * 100)),
# predicted cases
"total_cases_median": total_cases_median,
"total_cases_025": total_cases_025,
"total_cases_975": total_cases_975,
})
with open(ftemplate, "r") as f:
template = Template(f.read())
today = datetime.date.today()
content = template.render(places=places, date=today.strftime("%d/%m/%Y"))
with open(file_path, "w") as f:
f.write(content)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--idx", type=int, default=None)
args = parser.parse_args()
fpath = "../../mfkasim91.github.io"
img_path = os.path.join(fpath, "assets", "idcovid19-daily")
file_path = os.path.join(fpath, "idcovid19.md")
main(img_path, file_path, idx=args.idx)
| 35.639344 | 233 | 0.626127 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,603 | 0.239466 |
b4acdc0ec67a2e7b7290b7d91f3ae5eb0dcf9fcf | 359 | py | Python | SNDG/BioMongo/Model/exceptions.py | ezequieljsosa/sndg-bio | 5f709b5b572564ec1dfa40d090eca9a34295743e | [
"MIT"
] | null | null | null | SNDG/BioMongo/Model/exceptions.py | ezequieljsosa/sndg-bio | 5f709b5b572564ec1dfa40d090eca9a34295743e | [
"MIT"
] | null | null | null | SNDG/BioMongo/Model/exceptions.py | ezequieljsosa/sndg-bio | 5f709b5b572564ec1dfa40d090eca9a34295743e | [
"MIT"
] | 1 | 2020-09-01T15:57:54.000Z | 2020-09-01T15:57:54.000Z | '''
Created on Jun 15, 2016
@author: eze
'''
class NotFoundException(Exception):
'''
classdocs
'''
def __init__(self, element):
'''
Constructor
'''
self.elementNotFound = element
def __str__(self, *args, **kwargs):
return "NotFoundException(%s)" % self.elementNotFound
| 16.318182 | 61 | 0.537604 | 294 | 0.818942 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.356546 |
b4ae5a006f917cb0bbbc0e033064a28351e9c257 | 782 | py | Python | Strings/tablePrinter.py | shoalcellos/AutomateTheBoringStuff | 76d4ead2d41caaa96943cc64000da88c34bef75e | [
"MIT"
] | null | null | null | Strings/tablePrinter.py | shoalcellos/AutomateTheBoringStuff | 76d4ead2d41caaa96943cc64000da88c34bef75e | [
"MIT"
] | null | null | null | Strings/tablePrinter.py | shoalcellos/AutomateTheBoringStuff | 76d4ead2d41caaa96943cc64000da88c34bef75e | [
"MIT"
] | null | null | null | # Solution to the practise problem
# https://automatetheboringstuff.com/chapter6/
# Table Printer
def printTable(tableList):
"""Prints the list of list of strings with each column right justified"""
colWidth = 0
for row in tableList:
colWidth = max(colWidth, max([len(x) for x in row]))
colWidth += 1
printedTable = ''
tableList = [[row[i] for row in tableList] for i in range(len(tableList[0]))]
for row in tableList:
for item in row:
printedTable += item.rjust(colWidth, ' ')
printedTable += '\n'
print(printedTable)
tableData = [['apples', 'oranges', 'cherries', 'banana'],
['Alice', 'Bob', 'Carol', 'David'],
['dogs', 'cats', 'moose', 'goose']]
printTable(tableData) | 30.076923 | 81 | 0.602302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 264 | 0.337596 |
b4af056b2e6592ec3909fe81f77a486c195eff3f | 4,727 | py | Python | src/layers/layers.py | jabalazs/gating | 713f954656bea127ea331ab85aa83f6aaad21954 | [
"MIT"
] | 10 | 2019-04-08T02:09:37.000Z | 2021-05-04T10:30:44.000Z | src/layers/layers.py | lizezhonglaile/gating | 713f954656bea127ea331ab85aa83f6aaad21954 | [
"MIT"
] | null | null | null | src/layers/layers.py | lizezhonglaile/gating | 713f954656bea127ea331ab85aa83f6aaad21954 | [
"MIT"
] | 4 | 2019-09-24T14:24:25.000Z | 2021-09-02T14:41:38.000Z | import torch
import torch.nn as nn
from ..utils.torch import pack_forward
from .pooling import GatherLastLayer
class CharEncoder(nn.Module):
FORWARD_BACKWARD_AGGREGATION_METHODS = ["cat", "linear_sum"]
def __init__(
self,
char_embedding_dim,
hidden_size,
char_fw_bw_agg_method="cat",
bidirectional=True,
train_char_embeddings=True,
use_cuda=True,
):
if char_fw_bw_agg_method not in self.FORWARD_BACKWARD_AGGREGATION_METHODS:
raise ValueError(
f"{char_fw_bw_agg_method} not recognized, try with one of "
f"{self.FORWARD_BACKWARD_AGGREGATION_METHODS}"
)
super(CharEncoder, self).__init__()
self.char_embedding_dim = char_embedding_dim
self.n_layers = 1
self.char_hidden_dim = hidden_size
self.bidirectional = bidirectional
self.num_dirs = 2 if bidirectional else 1
self.hidden_x_dirs = self.num_dirs * self.char_hidden_dim
self.use_cuda = use_cuda
self.char_lstm = nn.LSTM(
self.char_embedding_dim,
self.char_hidden_dim,
self.n_layers,
bidirectional=self.bidirectional,
dropout=0.0,
)
self.gather_last = GatherLastLayer(
self.char_hidden_dim, bidirectional=self.bidirectional
)
self.char_fw_bw_agg_method = char_fw_bw_agg_method
if self.char_fw_bw_agg_method == "cat":
self.out_dim = self.hidden_x_dirs
elif self.char_fw_bw_agg_method == "linear_sum":
self.out_dim = self.char_hidden_dim
self.linear_layer = nn.Linear(
self.hidden_x_dirs, self.char_hidden_dim
)
def forward(self, char_batch, word_lengths):
"""char_batch: (batch_size, seq_len, word_len, char_emb_dim)
word_lengths: (batch_size, seq_len)"""
(batch_size, seq_len, word_len, char_emb_dim) = char_batch.size()
# (batch_size * seq_len, word_len, char_emb_dim)
char_batch = char_batch.view(batch_size * seq_len, word_len, char_emb_dim)
# (batch_size, seq_len) -> (batch_size * seq_len)
word_lengths = word_lengths.view(batch_size * seq_len)
# (batch_size * seq_len, word_len, hidden_x_dirs)
word_lvl_repr = pack_forward(self.char_lstm, char_batch, word_lengths)
# (batch_size * seq_len, hidden_x_dirs)
word_lvl_repr = self.gather_last(word_lvl_repr, lengths=word_lengths)
# last dimension of gather_last will always correspond to concatenated
# last hidden states of lstm if bidirectional
# (batch_size, seq_len, hidden_x_dirs)
word_lvl_repr = word_lvl_repr.view(
batch_size, seq_len, self.hidden_x_dirs
)
# We store this tensor for future introspection
self.concat_fw_bw_reprs = word_lvl_repr.clone()
if self.char_fw_bw_agg_method == "linear_sum":
# Based on the paper: http://www.anthology.aclweb.org/D/D16/D16-1209.pdf
# Line below is W*word_lvl_repr + b which is equivalent to
# [W_f; W_b] * [h_f;h_b] + b which in turn is equivalent to
# W_f * h_f + W_b * h_b + b
word_lvl_repr = self.linear_layer(word_lvl_repr)
return word_lvl_repr
class LinearAggregationLayer(nn.Module):
def __init__(self, in_dim):
"""
Simply concatenate the provided tensors on their last dimension
which needs to have the same size, along with their
element-wise multiplication and difference
Taken from the paper:
"Learning Natural Language Inference using Bidirectional
LSTM model and Inner-Attention"
https://arxiv.org/abs/1605.09090
"""
super(LinearAggregationLayer, self).__init__()
self.in_dim = in_dim
self.out_dim = 4 * in_dim
def forward(self, input_1, input_2):
"""
:param : input_1
Size is (*, hidden_size)
:param input_2:
Size is (*, hidden_size)
:return:
Merged vectors, size is (*, 4*hidden size)
"""
assert input_1.size(-1) == input_2.size(-1)
mult_combined_vec = torch.mul(input_1, input_2)
diff_combined_vec = torch.abs(input_1 - input_2)
# cosine_sim = simple_columnwise_cosine_similarity(input_1, input_2)
# cosine_sim = cosine_sim.unsqueeze(1)
# euclidean_dist = distance(input_1, input_2, 2)
combined_vec = torch.cat(
(input_1, input_2, mult_combined_vec, diff_combined_vec),
input_1.dim() - 1,
)
return combined_vec
| 33.524823 | 84 | 0.635287 | 4,609 | 0.975037 | 0 | 0 | 0 | 0 | 0 | 0 | 1,628 | 0.344404 |
b4af1aab669bbf7ec4facbb4437773d39076d0e0 | 7,458 | py | Python | backend/handlers/graphql/mutation_utils/mutationmethod.py | al-indigo/vmemperor | 80eb6d47d839a4736eb6f9d2fcfad35f0a7b3bb1 | [
"Apache-2.0"
] | null | null | null | backend/handlers/graphql/mutation_utils/mutationmethod.py | al-indigo/vmemperor | 80eb6d47d839a4736eb6f9d2fcfad35f0a7b3bb1 | [
"Apache-2.0"
] | 8 | 2017-10-11T13:26:10.000Z | 2021-12-13T20:27:52.000Z | backend/handlers/graphql/mutation_utils/mutationmethod.py | ispras/vmemperor | 80eb6d47d839a4736eb6f9d2fcfad35f0a7b3bb1 | [
"Apache-2.0"
] | 4 | 2017-07-27T12:25:42.000Z | 2018-01-28T02:06:26.000Z | import json
import uuid
from dataclasses import dataclass
from typing import Callable, Sequence, Any, Optional, Tuple, Union, List, Generic, TypeVar
from serflag import SerFlag
from handlers.graphql.graphql_handler import ContextProtocol
from handlers.graphql.utils.string import camelcase
from xenadapter.task import get_userids
from xenadapter.xenobject import XenObject
from functools import partial
import constants.re as re
from sentry_sdk import capture_exception
def call_mutation_from_string(mutable_object, changes, function):
def f():
old_value = {function: getattr(mutable_object, f'get_{function}')()}
new_value = {function: getattr(changes, function)}
getattr(mutable_object, f'set_{function}')(new_value[function])
return old_value, new_value
return f
@dataclass
class MutationMethod:
'''
Represents a mutation method - a function equipped with action name that is passed to check_access.
Attributes:
func: A mutation performer name without set prefix: a function that accepts an argument that is a part of used input named after the func.
i.e. if func == "name_label", it'll invoke set_name_label(user_input.name_label)
OR
a tuple of functions: 1st is going to be called with user input,
and 2nd is a validator, taking user input and returning a tuple of validation result and reason
access_action: An access action required for performing this mutation. None means this mutation is for administrators only
deps: Tuple of dependencies: lambdas that are called with our object as first argument and returning tuple of Boolean and reason string
'''
Input = TypeVar('Input')
InputArgument = TypeVar('InputArgument')
MutationFunction = Callable[[Input, "XenObject"], Tuple[InputArgument, InputArgument]]
MutationCheckerFunction = Callable[[Input, "XenObject"], Tuple[bool, Optional[str]]]
func: Union[str, Tuple[MutationFunction, MutationCheckerFunction]]
access_action: Optional[SerFlag]
deps: Tuple[Callable[["XenObject"], Tuple[bool, str]]] = tuple()
def call_mutation_from_function(mutable_object, changes, function: MutationMethod.MutationFunction):
return partial(function, changes, mutable_object)
@dataclass
class MutationHelper:
"""
A Mutation helper. Parameters:
- mutations: Sequence of mutations to be performed
- ctx: Request's context
- mutable_object: A Xen object to perform mutation on
"""
mutations: Sequence[MutationMethod]
ctx: ContextProtocol
mutable_object: XenObject
def prepare_mutations_for_item(self, item, changes):
dep_checks : List[Callable[[], Tuple[bool, str]]] = []
# Filling dependency checks in
for dep in item.deps:
dep_checks.append(partial(dep, self.mutable_object))
if isinstance(item.func, str):
if getattr(changes, item.func) is None:
return
else:
granted, reason = item.func[1](changes, self.mutable_object, self.ctx)
if not granted:
if not reason: # if Reason is None, we're instructed to skip this mutation as user didn't supply anything
return
else:
return reason
# Checking access
if not(item.access_action is None and \
self.ctx.user_authenticator.is_admin() or \
self.mutable_object.check_access(self.ctx.user_authenticator, item.access_action)):
if item.access_action:
return f"{camelcase(item.func if isinstance(item.func, str) else item.func[0].__name__)}: Access denied: object {self.mutable_object}; action: {item.access_action}"
else:
return f"{camelcase(item.func if isinstance(item.func, str) else item.func[0].__name__)}: Access denied: not an administrator"
else:
if isinstance(item.func, str):
function_candidate = call_mutation_from_string(self.mutable_object, changes, item.func)
else:
function_candidate = call_mutation_from_function(self.mutable_object, changes, item.func[0])
# Running dependency checks
for dep_check in dep_checks:
ret = dep_check()
if not ret[0]:
return f"{camelcase(item.func if isinstance(item.func, str) else '')}: {ret[1]}"
return function_candidate
def perform_mutations(self, changes: MutationMethod.Input) -> Tuple[bool, Optional[str]]:
'''
Perform mutations in a transaction fashion: Either all or nothing.
This method also inserts tasks in task table for each mutation.
If mutation fails, "failure" status is set.
In the result field, there's a JSON document of the following structure:
{
"old_val": {
"setting_name": "old_setting_value"
}
"new_val" : {
"setting_name": "new_setting_value"
}
}
:param changes: Graphene Input type instance with proposed changes
:return: Tuple [True, None] or [False, "String reason what's not granted"] where access is not granted]
'''
tasks : List[dict] = []
for item in self.mutations:
function_or_error = self.prepare_mutations_for_item(item, changes)
if not function_or_error:
continue
new_uuid = str(uuid.uuid4())
action = item.access_action.serialize()[0]
who = "users/" + self.ctx.user_authenticator.get_id() if not self.ctx.user_authenticator.is_admin() else None
object_ref = self.mutable_object.ref
object_type = self.mutable_object.__class__
task = {
"ref": new_uuid,
"object_ref": object_ref,
"object_type": object_type.__name__,
"action": action,
"error_info" : [],
"created": re.r.now().run(),
"name_label": f"{object_type.__name__}.{action}",
"name_description": "",
"uuid": new_uuid,
"progress": 1,
"resident_on": None,
"who": who,
"access" : {user: ['remove'] for user in get_userids(object_type, object_ref, action)}
}
if isinstance(function_or_error, str):
task['status'] = 'failure'
task['error_info'].append(function_or_error)
task['finished'] = re.r.now().run()
re.db.table('tasks').insert(task).run()
return False, function_or_error
else:
task['call'] = function_or_error
tasks.append(task)
for task in tasks:
try:
new_value, old_value = task['call']()
task['status'] = 'success'
task['result'] = json.dumps({"old_val": old_value, "new_val": new_value})
task['finished'] = re.r.now().run()
except Exception as e:
capture_exception(e)
task['status'] = 'failure'
task['error_info'].append(str(e))
task['result'] = ""
task['finished'] = re.r.now().run()
finally:
del task['call']
re.db.table('tasks').insert(task).run()
return True, None
| 42.375 | 181 | 0.622017 | 6,464 | 0.86672 | 0 | 0 | 6,486 | 0.86967 | 0 | 0 | 2,635 | 0.353312 |
b4af6bcde4fcd433d18c5597fca9f5e7a83c63f7 | 3,383 | py | Python | infra/utils.py | BrandoZhang/alis | 9699eba112eda2ea27d6023221df2df9dc270b7f | [
"CC-BY-2.0"
] | 176 | 2021-04-15T05:28:59.000Z | 2022-03-30T07:06:00.000Z | infra/utils.py | BrandoZhang/alis | 9699eba112eda2ea27d6023221df2df9dc270b7f | [
"CC-BY-2.0"
] | 12 | 2021-04-17T20:20:53.000Z | 2022-03-19T07:04:58.000Z | infra/utils.py | BrandoZhang/alis | 9699eba112eda2ea27d6023221df2df9dc270b7f | [
"CC-BY-2.0"
] | 22 | 2021-04-16T02:05:41.000Z | 2022-03-03T12:23:10.000Z | import os
import shutil
import subprocess
from distutils.dir_util import copy_tree
from shutil import copyfile
from typing import List, Optional
import click
import git
from omegaconf import DictConfig
def copy_objects(target_dir: os.PathLike, objects_to_copy: List[os.PathLike]):
for src_path in objects_to_copy:
trg_path = os.path.join(target_dir, os.path.basename(src_path))
if os.path.islink(src_path):
os.symlink(os.readlink(src_path), trg_path)
elif os.path.isfile(src_path):
copyfile(src_path, trg_path)
elif os.path.isdir(src_path):
copy_tree(src_path, trg_path)
else:
raise NotImplementedError(f"Unknown object type: {src_path}")
def create_symlinks(target_dir: os.PathLike, symlinks_to_create: List[os.PathLike]):
"""
Creates symlinks to the given paths
"""
for src_path in symlinks_to_create:
trg_path = os.path.join(target_dir, os.path.basename(src_path))
if os.path.islink(src_path):
# Let's not create symlinks to symlinks
# Since dropping the current symlink will break the experiment
os.symlink(os.readlink(src_path), trg_path)
else:
print(f'Creating a symlink to {src_path}, so try not to delete it occasionally!')
os.symlink(src_path, trg_path)
def is_git_repo(path: os.PathLike):
try:
_ = git.Repo(path).git_dir
return True
except git.exc.InvalidGitRepositoryError:
return False
def create_project_dir(project_dir: os.PathLike, objects_to_copy: List[os.PathLike], symlinks_to_create: List[os.PathLike]):
if is_git_repo(os.getcwd()) and are_there_uncommitted_changes():
if click.confirm("There are uncommited changes. Continue?", default=False):
print('Ok...')
else:
raise PermissionError("Cannot created a dir when there are uncommited changes")
if os.path.exists(project_dir):
if click.confirm(f'Dir {project_dir} already exists. Remove it?', default=False):
shutil.rmtree(project_dir)
else:
print('User refused to delete an existing project dir.')
raise PermissionError("There is an existing dir and I cannot delete it.")
os.makedirs(project_dir)
copy_objects(project_dir, objects_to_copy)
create_symlinks(project_dir, symlinks_to_create)
print(f'Created a project dir: {project_dir}')
def get_git_hash() -> Optional[str]:
if not is_git_repo(os.getcwd()):
return None
try:
return subprocess \
.check_output(['git', 'rev-parse', '--short', 'HEAD']) \
.decode("utf-8") \
.strip()
except:
return None
def get_experiment_path(master_dir: os.PathLike, experiment_name: str) -> os.PathLike:
return os.path.join(master_dir, f"{experiment_name}-{get_git_hash()}")
def get_git_hash_suffix() -> str:
git_hash: Optional[str] = get_git_hash()
git_hash_suffix = "" if git_hash is None else f"-{git_hash}"
return git_hash_suffix
def are_there_uncommitted_changes() -> bool:
return len(subprocess.check_output('git status -s'.split()).decode("utf-8")) > 0
def cfg_to_args_str(cfg: DictConfig, use_dashes=True) -> str:
dashes = '--' if use_dashes else ''
return ' '.join([f'{dashes}{p}={cfg[p]}' for p in cfg])
| 32.528846 | 124 | 0.670706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 694 | 0.205143 |
b4b34142fa2eee3e44e12513d4714643f5cc5f2d | 2,011 | py | Python | App/templatetags/app_extras.py | kholdarbekov/Petrol | 7d683def3ade380b0323e42a1d9ef8190bcc68ba | [
"MIT"
] | null | null | null | App/templatetags/app_extras.py | kholdarbekov/Petrol | 7d683def3ade380b0323e42a1d9ef8190bcc68ba | [
"MIT"
] | null | null | null | App/templatetags/app_extras.py | kholdarbekov/Petrol | 7d683def3ade380b0323e42a1d9ef8190bcc68ba | [
"MIT"
] | null | null | null | from django import template
from django.utils import timezone
register = template.Library()
@register.filter
def sold(oil, delta_months='12'):
total = 0
curTime = timezone.localtime(timezone.now())
from_time = curTime - timezone.timedelta(days=int(delta_months)*30)
for t in oil.trades.filter(dateTime__range=[from_time, curTime]).order_by('-dateTime'):
total += t.litreSold
return total
@register.filter
def last_checkin(oil):
last_checkin = oil.checkins.order_by('-date').last()
if last_checkin:
return last_checkin.date.strftime("%b %d, %Y")
else:
return ''
@register.filter
def checkin_cost(checkin):
cost = checkin.oil.price * checkin.bottles * checkin.oil.bottleVolume
if cost:
return round(cost, 2)
else:
return 0
@register.filter
def remaining_percent(sold, remainingLitres):
try:
if sold or sold > 0:
return 100 - round(float(sold) / (float(remainingLitres) + float(sold)) * 100, 2)
else:
return 100
except (ValueError, ZeroDivisionError):
return None
@register.filter
def multiply(value, arg):
try:
if value:
return float(value) * float(arg)
else:
return 0
except ValueError:
return None
@register.filter
def divide(value, arg):
try:
if value:
return float(value) / float(arg)
else:
return None
except (ValueError, ZeroDivisionError):
return None
@register.filter
def get_item(dictionary, key):
if dictionary:
val = dictionary[key]
if val:
return val.data
else:
return ''
@register.filter
def get_dict_value_by_key(dictionary, key):
if dictionary:
return dictionary[key]
else:
return ''
@register.filter
def chart_height(oils):
base_height = 300
if oils:
return base_height + (int(oils.__len__()) // 15) * 50
else:
return base_height
| 21.623656 | 93 | 0.625062 | 0 | 0 | 0 | 0 | 1,891 | 0.940328 | 0 | 0 | 39 | 0.019393 |
b4b539761e3c4833580192978074b62223577348 | 393 | py | Python | tests/sdk/test_util.py | unparalleled-js/py42 | 8c6b054ddd8c2bfea92bf77b0d648af76f1efcf1 | [
"MIT"
] | 1 | 2020-08-18T22:00:22.000Z | 2020-08-18T22:00:22.000Z | tests/sdk/test_util.py | unparalleled-js/py42 | 8c6b054ddd8c2bfea92bf77b0d648af76f1efcf1 | [
"MIT"
] | null | null | null | tests/sdk/test_util.py | unparalleled-js/py42 | 8c6b054ddd8c2bfea92bf77b0d648af76f1efcf1 | [
"MIT"
] | 1 | 2021-05-10T23:33:34.000Z | 2021-05-10T23:33:34.000Z | from datetime import datetime
import py42.util as util
def test_convert_timestamp_to_str_returns_expected_str():
assert util.convert_timestamp_to_str(235123656) == "1977-06-14T08:07:36.000Z"
def test_convert_datetime_to_timestamp_str_returns_expected_str():
d = datetime(2020, 4, 19, 13, 3, 2, 3)
assert util.convert_datetime_to_timestamp_str(d) == "2020-04-19T13:03:02.000Z"
| 30.230769 | 82 | 0.78117 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.132316 |
b4bc1366086824966b1d4039b3e9d25839830694 | 849 | py | Python | heartrate.py | mtimkovich/python-fitbit | d6b11b31a716e2fcf4617cf31e0fb5b1da2b86e3 | [
"Apache-2.0"
] | null | null | null | heartrate.py | mtimkovich/python-fitbit | d6b11b31a716e2fcf4617cf31e0fb5b1da2b86e3 | [
"Apache-2.0"
] | null | null | null | heartrate.py | mtimkovich/python-fitbit | d6b11b31a716e2fcf4617cf31e0fb5b1da2b86e3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Script for fetching my min and max heart rate over the past 15 minutes."""
import time
from config import *
from gather_keys_oauth2 import OAuth2Server
if __name__ == '__main__':
server = OAuth2Server(client_id, client_secret)
server.browser_authorize()
fb = server.fitbit
detail_level = '1min'
intraday = f'https://api.fitbit.com/1/user/-/activities/heart/date/today/1d/{detail_level}.json'
while True:
resp = fb.make_request(intraday)
dataset = resp['activities-heart-intraday']['dataset']
# Get only data from the last 15 minutes.
latest = [d['value'] for d in dataset[-16:]]
with open('maxs-hr.txt', 'w') as f:
output = '{}–{}'.format(min(latest), max(latest))
print(output)
f.write(output)
time.sleep(60)
| 30.321429 | 100 | 0.63722 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 308 | 0.361927 |
b4bc63d3ed1e43a273b9651b0e35d83b769d5ccc | 11,891 | py | Python | wienerschnitzelgemeinschaft/src/Dmytro/src/train_triplet.py | guitarmind/HPA-competition-solutions | 547d53aaca148fdb5f4585526ad7364dfa47967d | [
"MIT"
] | null | null | null | wienerschnitzelgemeinschaft/src/Dmytro/src/train_triplet.py | guitarmind/HPA-competition-solutions | 547d53aaca148fdb5f4585526ad7364dfa47967d | [
"MIT"
] | null | null | null | wienerschnitzelgemeinschaft/src/Dmytro/src/train_triplet.py | guitarmind/HPA-competition-solutions | 547d53aaca148fdb5f4585526ad7364dfa47967d | [
"MIT"
] | null | null | null | import argparse
import collections
import os
import cv2
import numpy as np
import pandas as pd
import pretrainedmodels
import torch
import torch.optim as optim
import torchsummary
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from torchvision import datasets, models, transforms
from tqdm import tqdm
import skimage.io
from sklearn.metrics import f1_score
import torch.nn as nn
import torch.nn.functional as F
import config
import utils
import classification_dataset
from triplet_dataset import TripletDataset, TripletDatasetUpdate, TripletDatasetPredict
from logger import Logger
from experiments import MODELS
class EmbeddingsModel(nn.Module):
def __init__(self, nb_embeddings=config.NB_EMBEDDINGS):
super().__init__()
self.base_model = pretrainedmodels.resnet18()
self.fc = nn.Linear(2048, nb_embeddings)
def forward(self, x):
x = self.base_model.conv1(x)
x = self.base_model.bn1(x)
x = self.base_model.relu(x)
x = self.base_model.maxpool(x)
x = self.base_model.layer1(x)
x = self.base_model.layer2(x)
x = self.base_model.layer3(x)
x = self.base_model.layer4(x)
x = self.base_model.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class TripletLoss(nn.Module):
def __init__(self, margin):
super().__init__()
self.margin = margin
def forward(self, anchor, positive, negative):
distance_positive = (anchor - positive).pow(2).sum(1)
distance_negative = (anchor - negative).pow(2).sum(1)
losses = F.relu(distance_positive - distance_negative + self.margin)
return losses.mean()
def train(model_name, run=None):
run_str = '' if run is None or run == '' else f'_{run}'
checkpoints_dir = f'../output/checkpoints_3/{model_name}{run_str}'
tensorboard_dir = f'../output/tensorboard_3/{model_name}{run_str}'
os.makedirs(checkpoints_dir, exist_ok=True)
os.makedirs(tensorboard_dir, exist_ok=True)
print('\n', model_name, '\n')
logger = Logger(tensorboard_dir)
model = EmbeddingsModel()
model = model.cuda()
dataset_train = TripletDataset(
is_train=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
crop_size=256
)
dataset_valid = TripletDataset(
is_train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
crop_size=256
)
dataset_update_train = TripletDatasetUpdate(dataset_train)
dataset_update_valid = TripletDatasetUpdate(dataset_valid)
model.training = True
print('using sgd optimiser')
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=1e-5)
scheduler = utils.CosineAnnealingLRWithRestarts(optimizer, T_max=8, T_mult=1.2)
print('Num training images: {} valid images: {}'.format(len(dataset_train), len(dataset_valid)))
data_loader_train = DataLoader(
dataset_train,
shuffle=True,
num_workers=8,
batch_size=64)
data_loader_valid = DataLoader(
dataset_valid,
shuffle=False,
num_workers=8,
batch_size=64)
data_loader_update_train = DataLoader(
dataset_update_train,
shuffle=False,
num_workers=8,
batch_size=64)
data_loader_update_valid = DataLoader(
dataset_update_valid,
shuffle=False,
num_workers=8,
batch_size=64)
criterium = TripletLoss(margin=1.0)
for epoch_num in range(512):
model.eval()
with torch.set_grad_enabled(False):
for iter_num, data in tqdm(enumerate(data_loader_update_train), total=len(data_loader_update_train)):
img = data['img'].cuda()
samples_idx = data['idx']
vectors = model(img).detach().cpu().numpy()
dataset_train.embeddings[samples_idx] = vectors
print(np.mean(dataset_train.embeddings, axis=0), np.std(dataset_train.embeddings, axis=0))
for iter_num, data in tqdm(enumerate(data_loader_update_valid), total=len(data_loader_update_valid)):
img = data['img'].cuda()
samples_idx = data['idx']
vectors = model(img).detach().cpu().numpy()
dataset_valid.embeddings[samples_idx] = vectors
print(np.mean(dataset_train.embeddings, axis=0), np.std(dataset_valid.embeddings, axis=0))
model.train()
epoch_loss = []
with torch.set_grad_enabled(True):
data_iter = tqdm(enumerate(data_loader_train), total=len(data_loader_train))
for iter_num, data in data_iter:
img = data['img'].cuda()
img_pos = data['img_pos'].cuda()
img_neg = data['img_neg'].cuda()
optimizer.zero_grad()
output = model(img)
output_pos = model(img_pos)
output_neg = model(img_neg)
loss = criterium(output, output_pos, output_neg)
epoch_loss.append(float(loss.detach().cpu()))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optimizer.step()
data_iter.set_description(
f'{epoch_num} Loss: {np.mean(epoch_loss):1.4f}')
logger.scalar_summary(f'loss_train', np.mean(epoch_loss), epoch_num)
epoch_loss = []
with torch.set_grad_enabled(False):
data_iter = tqdm(enumerate(data_loader_valid), total=len(data_loader_valid))
for iter_num, data in data_iter:
img = data['img'].cuda()
img_pos = data['img_pos'].cuda()
img_neg = data['img_neg'].cuda()
output = model(img)
output_pos = model(img_pos)
output_neg = model(img_neg)
loss = criterium(output, output_pos, output_neg)
epoch_loss.append(float(loss))
data_iter.set_description(
f'{epoch_num} Loss: {np.mean(epoch_loss):1.4f}')
logger.scalar_summary(f'loss_valid', np.mean(epoch_loss), epoch_num)
logger.scalar_summary('lr', optimizer.param_groups[0]['lr'], epoch_num)
scheduler.step(metrics=np.mean(epoch_loss), epoch=epoch_num)
model.eval()
torch.save(
{
'epoch': epoch_num,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
},
f'{checkpoints_dir}/{epoch_num:03}.pt'
)
def predict(model_name, epoch_num, img_dir, sample_ids, run=None):
model = EmbeddingsModel()
model = model.cuda()
run_str = '' if run is None or run == '' else f'_{run}'
checkpoints_dir = f'../output/checkpoints_3/{model_name}{run_str}'
checkpoint = torch.load(f'{checkpoints_dir}/{epoch_num:03}.pt')
model.load_state_dict(checkpoint['model_state_dict'])
dataset = TripletDatasetPredict(sample_ids=sample_ids,
img_dir=img_dir,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
crop_size=256)
data_loader_update_train = DataLoader(
dataset,
shuffle=False,
num_workers=8,
batch_size=64)
results = []
results_idx = []
with torch.set_grad_enabled(False):
for data in tqdm(data_loader_update_train):
img = data['img'].cuda()
samples_idx = data['idx']
embeddings = model(img).detach().cpu().numpy()
results.append(embeddings)
results_idx.append(samples_idx)
# for image_id in tqdm(sample_ids):
# images = []
# for color in ['red', 'green', 'blue']:
# try:
# img = cv2.imread(f'{img_dir}/{image_id}_{color}.png', cv2.IMREAD_UNCHANGED)
# img = cv2.resize(img, (256, 256), interpolation=cv2.INTER_AREA).astype("uint8")
# images.append(img)
# except:
# print(f'failed to open {img_dir}/{image_id}_{color}.png')
# raise
#
# images = np.stack(images, axis=0).astype(np.float32) / 255.0
# images = torch.from_numpy(images).cuda()
# images = normalize(images)
# images = torch.unsqueeze(images, 0)
# embeddings = model(images)
# embeddings = embeddings.detach().cpu().numpy()
# # print(image_id, embeddings.flatten())
# results.append(embeddings)
results = np.concatenate(results, axis=0)
results_idx = np.concatenate(results_idx, axis=0)
utils.print_stats('results_idx diff', np.diff(results_idx))
return results
def predict_extra(model_name, epoch_num, run=None):
data = pd.read_csv('../input/folds_4_extra.csv')
embeddings = predict(model_name=model_name, epoch_num=epoch_num,
img_dir=config.TRAIN_DIR_EXTRA,
sample_ids=data.Id.values,
run=run)
torch.save(embeddings, '../output/embeddings_extra.pt')
for i in range(embeddings.shape[1]):
data[f'emb_{i}'] = embeddings[:, i]
print(np.mean(embeddings, axis=0), np.std(embeddings, axis=0))
data.to_csv('../input/emb_extra.csv', index=False)
def predict_train(model_name, epoch_num, run=None):
data = pd.read_csv('../input/train.csv')
embeddings = predict(model_name=model_name, epoch_num=epoch_num,
img_dir=config.TRAIN_DIR,
sample_ids=data.Id.values,
run=run)
torch.save(embeddings, '../output/embeddings_train.pt')
for i in range(embeddings.shape[1]):
data[f'emb_{i}'] = embeddings[:, i]
print(np.mean(embeddings, axis=0), np.std(embeddings, axis=0))
data.to_csv('../input/emb_train.csv', index=False)
def predict_test(model_name, epoch_num, run=None):
data = pd.read_csv('../input/sample_submission.csv')
embeddings = predict(model_name=model_name, epoch_num=epoch_num,
img_dir=config.TEST_DIR,
sample_ids=data.Id.values,
run=run)
torch.save(embeddings, '../output/embeddings_test.pt')
for i in range(embeddings.shape[1]):
data[f'emb_{i}'] = embeddings[:, i]
print(np.mean(embeddings, axis=0), np.std(embeddings, axis=0))
data.to_csv('../input/emb_test.csv', index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('action', type=str, default='train')
parser.add_argument('--model', type=str, default='tr_resnet18_now_8')
parser.add_argument('--epoch', type=int, default=0)
args = parser.parse_args()
action = args.action
model = args.model
np.set_printoptions(precision=3, linewidth=200)
if action == 'train':
try:
train(model_name=model)
except KeyboardInterrupt:
pass
if action == 'predict_extra':
predict_extra(model_name=model, epoch_num=args.epoch, run=None)
if action == 'predict_train':
predict_train(model_name=model, epoch_num=args.epoch, run=None)
if action == 'predict_test':
predict_test(model_name=model, epoch_num=args.epoch, run=None)
| 34.466667 | 113 | 0.604827 | 1,065 | 0.089564 | 0 | 0 | 0 | 0 | 0 | 0 | 1,768 | 0.148684 |
b4c02e731c5a64ce720cceaeac6970bdb4974960 | 47,312 | py | Python | tools/armature.py | kayteh/cats-blender-plugin | 7fe6cc67aa754f4885f03d7801ba7745e1a6cf12 | [
"MIT"
] | 3 | 2018-10-12T05:32:16.000Z | 2020-05-16T16:24:34.000Z | tools/armature.py | kayteh/cats-blender-plugin | 7fe6cc67aa754f4885f03d7801ba7745e1a6cf12 | [
"MIT"
] | null | null | null | tools/armature.py | kayteh/cats-blender-plugin | 7fe6cc67aa754f4885f03d7801ba7745e1a6cf12 | [
"MIT"
] | null | null | null | # MIT License
# Copyright (c) 2017 GiveMeAllYourCats
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Code author: Shotariya
# Repo: https://github.com/Grim-es/shotariya
# Code author: Neitri
# Repo: https://github.com/netri/blender_neitri_tools
# Edits by: GiveMeAllYourCats, Hotox
import bpy
import copy
import tools.common
import tools.translate
import tools.armature_bones as Bones
import mmd_tools_local.operators.morph
import math
from mathutils import Matrix
mmd_tools_installed = True
class FixArmature(bpy.types.Operator):
bl_idname = 'armature.fix'
bl_label = 'Fix Model'
bl_description = 'Automatically:\n' \
'- Reparents bones\n' \
'- Removes unnecessary bones, objects, groups & constraints\n' \
'- Translates and renames bones & objects\n' \
'- Merges weight paints\n' \
'- Corrects the hips\n' \
'- Joins meshes\n' \
'- Converts morphs into shapes\n' \
'- Corrects shading'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
@classmethod
def poll(cls, context):
if tools.common.get_armature() is None:
return False
if len(tools.common.get_armature_objects()) == 0:
return False
return True
def execute(self, context):
if len(tools.common.get_meshes_objects()) == 0:
self.report({'ERROR'}, 'No mesh inside the armature found!')
return {'CANCELLED'}
print('\nFixing Model:\n')
wm = bpy.context.window_manager
armature = tools.common.set_default_stage()
# Check if bone matrix == world matrix, important for xps models
x_cord, y_cord, z_cord, fbx = tools.common.get_bone_orientations()
# Add rename bones to reweight bones
temp_rename_bones = copy.deepcopy(Bones.bone_rename)
temp_reweight_bones = copy.deepcopy(Bones.bone_reweight)
temp_list_reweight_bones = copy.deepcopy(Bones.bone_list_weight)
temp_list_reparent_bones = copy.deepcopy(Bones.bone_list_parenting)
for key, value in Bones.bone_rename_fingers.items():
temp_rename_bones[key] = value
for key, value in temp_rename_bones.items():
if key == 'Spine':
continue
list = temp_reweight_bones.get(key)
if not list:
temp_reweight_bones[key] = value
else:
for name in value:
if name not in list:
temp_reweight_bones.get(key).append(name)
# Count objects for loading bar
steps = 0
for key, value in temp_rename_bones.items():
if '\Left' in key or '\L' in key:
steps += 2 * len(value)
else:
steps += len(value)
for key, value in temp_reweight_bones.items():
if '\Left' in key or '\L' in key:
steps += 2 * len(value)
else:
steps += len(value)
steps += len(temp_list_reweight_bones) # + len(Bones.bone_list_parenting)
# Get Double Entries
print('DOUBLE ENTRIES:')
print('RENAME:')
list = []
for key, value in temp_rename_bones.items():
for name in value:
if name.lower() not in list:
list.append(name.lower())
else:
print(key + " | " + name)
print('REWEIGHT:')
list = []
for key, value in temp_reweight_bones.items():
for name in value:
if name.lower() not in list:
list.append(name.lower())
else:
print(key + " | " + name)
print('DOUBLES END')
# Check if model is mmd model
mmd_root = None
try:
mmd_root = armature.parent.mmd_root
except AttributeError:
pass
# Perform mmd specific operations
if mmd_root:
# Set correct mmd shading
mmd_root.use_toon_texture = False
mmd_root.use_sphere_texture = False
# Convert mmd bone morphs into shape keys
if len(mmd_root.bone_morphs) > 0:
current_step = 0
wm.progress_begin(current_step, len(mmd_root.bone_morphs))
armature.data.pose_position = 'POSE'
for index, morph in enumerate(mmd_root.bone_morphs):
current_step += 1
wm.progress_update(current_step)
armature.parent.mmd_root.active_morph = index
mmd_tools_local.operators.morph.ViewBoneMorph.execute(None, context)
mesh = tools.common.get_meshes_objects()[0]
tools.common.select(mesh)
mod = mesh.modifiers.new(morph.name, 'ARMATURE')
mod.object = armature
bpy.ops.object.modifier_apply(apply_as='SHAPE', modifier=mod.name)
wm.progress_end()
# Perform source engine specific operations
# Check if model is source engine model
source_engine = False
for bone in armature.pose.bones:
if bone.name.startswith('ValveBiped'):
source_engine = True
break
# Remove unused animation data
if armature.animation_data and armature.animation_data.action and armature.animation_data.action.name == 'ragdoll':
armature.animation_data_clear()
source_engine = True
# Delete unused VTA mesh
for mesh in tools.common.get_meshes_objects(mode=1):
if mesh.name == 'VTA vertices':
tools.common.delete_hierarchy(mesh)
source_engine = True
break
if source_engine:
# Delete unused physics meshes (like rigidbodies)
for mesh in tools.common.get_meshes_objects():
if mesh.name.endswith('_physics')\
or mesh.name.endswith('_lod1')\
or mesh.name.endswith('_lod2')\
or mesh.name.endswith('_lod3')\
or mesh.name.endswith('_lod4')\
or mesh.name.endswith('_lod5')\
or mesh.name.endswith('_lod6'):
tools.common.delete_hierarchy(mesh)
# Reset to default
tools.common.set_default_stage()
# Set better bone view
armature.data.draw_type = 'OCTAHEDRAL'
armature.draw_type = 'WIRE'
armature.show_x_ray = True
armature.data.show_bone_custom_shapes = False
armature.layers[0] = True
# Disable backface culling
if context.area:
context.area.spaces[0].show_backface_culling = False
# Remove Rigidbodies and joints
for obj in bpy.data.objects:
if 'rigidbodies' in obj.name or 'joints' in obj.name:
tools.common.delete_hierarchy(obj)
# Remove objects from different layers and things that are not meshes
for child in armature.children:
for child2 in child.children:
if not child2.layers[0] or child2.type != 'MESH':
tools.common.delete(child2)
if not child.layers[0] or child.type != 'MESH':
tools.common.delete(child)
# Remove empty mmd object and unused objects
tools.common.remove_empty()
tools.common.remove_unused_objects()
# Joins meshes into one and calls it 'Body'
mesh = tools.common.join_meshes()
# tools.common.select(armature)
#
# # Correct pivot position
# try:
# # bpy.ops.view3d.snap_cursor_to_center()
# bpy.context.scene.cursor_location = (0.0, 0.0, 0.0)
# bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
# except RuntimeError:
# pass
tools.common.unselect_all()
tools.common.select(mesh)
# # Correct pivot position
# try:
# # bpy.ops.view3d.snap_cursor_to_center()
# bpy.context.scene.cursor_location = (0.0, 0.0, 0.0)
# bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
# except RuntimeError:
# pass
# Unlock all transforms
for i in range(0, 3):
armature.lock_location[i] = False
armature.lock_rotation[i] = False
armature.lock_scale[i] = False
mesh.lock_location[i] = False
mesh.lock_rotation[i] = False
mesh.lock_scale[i] = False
# Unlock all bone transforms
for bone in armature.pose.bones:
bone.lock_location[0] = False
bone.lock_location[1] = False
bone.lock_location[2] = False
bone.lock_rotation[0] = False
bone.lock_rotation[1] = False
bone.lock_rotation[2] = False
bone.lock_scale[0] = False
bone.lock_scale[1] = False
bone.lock_scale[2] = False
if source_engine and tools.common.has_shapekeys(mesh):
mesh.data.shape_keys.key_blocks[0].name = "Basis"
# Remove empty shape keys and then save the shape key order
tools.common.clean_shapekeys(mesh)
tools.common.save_shapekey_order(mesh.name)
# Combines same materials
if context.scene.combine_mats:
bpy.ops.combine.mats()
else:
# At least clean material names. Combining mats would do this otherwise
tools.common.clean_material_names(mesh)
# If all materials are transparent, make them visible. Also set transparency always to Z-Transparency
all_transparent = True
for mat_slot in mesh.material_slots:
mat_slot.material.transparency_method = 'Z_TRANSPARENCY'
if mat_slot.material.alpha > 0:
all_transparent = False
if all_transparent:
for mat_slot in mesh.material_slots:
mat_slot.material.alpha = 1
# Reorders vrc shape keys to the correct order
tools.common.sort_shape_keys(mesh.name)
# Fix all shape key names of half jp chars
if tools.common.has_shapekeys(mesh):
for shapekey in mesh.data.shape_keys.key_blocks:
shapekey.name = tools.translate.fix_jp_chars(shapekey.name)
# Fix faulty UV coordinates
fixed_uv_coords = 0
for uv in mesh.data.uv_layers:
for vert in range(len(uv.data) - 1):
if math.isnan(uv.data[vert].uv.x):
uv.data[vert].uv.x = 0
fixed_uv_coords += 1
if math.isnan(uv.data[vert].uv.y):
uv.data[vert].uv.y = 0
fixed_uv_coords += 1
# Translate bones
to_translate = []
for bone in armature.data.bones:
to_translate.append(bone.name)
tools.translate.update_dictionary(to_translate)
for bone in armature.data.bones:
bone.name, translated = tools.translate.translate(bone.name)
# Armature should be selected and in edit mode
tools.common.unselect_all()
tools.common.select(armature)
tools.common.switch('EDIT')
# Show all hidden verts and faces
if bpy.ops.mesh.reveal.poll():
bpy.ops.mesh.reveal()
# Remove Bone Groups
for group in armature.pose.bone_groups:
armature.pose.bone_groups.remove(group)
# Model should be in rest position
armature.data.pose_position = 'REST'
# Count steps for loading bar again and reset the layers
steps += len(armature.data.edit_bones)
for bone in armature.data.edit_bones:
if bone.name in Bones.bone_list or bone.name.startswith(tuple(Bones.bone_list_with)):
if bone.parent is not None:
steps += 1
else:
steps -= 1
bone.layers[0] = True
# Start loading bar
current_step = 0
wm.progress_begin(current_step, steps)
# List of chars to replace if they are at the start of a bone name
starts_with = [
('_', ''),
('ValveBiped_', ''),
('Bip1_', 'Bip_'),
('Bip01_', 'Bip_'),
('Bip001_', 'Bip_'),
('Character1_', ''),
('HLP_', ''),
('JD_', ''),
('JU_', ''),
('Armature|', ''),
('Bone_', ''),
('C_', ''),
('Cf_S_', ''),
('Cf_J_', ''),
('G_', ''),
]
# Standardize names
for bone in armature.data.edit_bones:
current_step += 1
wm.progress_update(current_step)
# Make all the underscores!
name = bone.name.replace(' ', '_')\
.replace('-', '_')\
.replace('.', '_')\
.replace('____', '_')\
.replace('___', '_')\
.replace('__', '_')\
# Always uppercase at the start and after an underscore
upper_name = ''
for i, s in enumerate(name.split('_')):
if i != 0:
upper_name += '_'
upper_name += s[:1].upper() + s[1:]
name = upper_name
# Replace if name starts with specified chars
for replacement in starts_with:
if name.startswith(replacement[0]):
name = replacement[1] + name[len(replacement[0]):]
# Remove digits from the start
name_split = name.split('_')
if len(name_split) > 1 and name_split[0].isdigit():
name = name_split[1]
# Specific condition
name_split = name.split('"')
if len(name_split) > 3:
name = name_split[1]
# Another specific condition
if ':' in name:
for i, split in enumerate(name.split(':')):
if i == 0:
name = ''
else:
name += split
# Remove S0 from the end
if name[-2:] == 'S0':
name = name[:-2]
bone.name = name
# Add conflicting bone names to new list
conflicting_bones = []
for names in Bones.bone_list_conflicting_names:
if '\Left' not in names[1] and '\L' not in names[1]:
conflicting_bones.append(names)
continue
names0 = []
name1 = ''
name2 = ''
for name0 in names[0]:
names0.append(name0.replace('\Left', 'Left').replace('\left', 'left').replace('\L', 'L').replace('\l', 'l'))
if '\Left' in names[1] or '\L' in names[1]:
name1 = names[1].replace('\Left', 'Left').replace('\left', 'left').replace('\L', 'L').replace('\l', 'l')
if '\Left' in names[2] or '\L' in names[2]:
name2 = names[2].replace('\Left', 'Left').replace('\left', 'left').replace('\L', 'L').replace('\l', 'l')
conflicting_bones.append((names0, name1, name2))
for name0 in names[0]:
names0.append(name0.replace('\Left', 'Right').replace('\left', 'right').replace('\L', 'R').replace('\l', 'r'))
if '\Left' in names[1] or '\L' in names[1]:
name1 = names[1].replace('\Left', 'Right').replace('\left', 'right').replace('\L', 'R').replace('\l', 'r')
if '\Left' in names[2] or '\L' in names[2]:
name2 = names[2].replace('\Left', 'Right').replace('\left', 'right').replace('\L', 'R').replace('\l', 'r')
conflicting_bones.append((names0, name1, name2))
# Resolve conflicting bone names
for names in conflicting_bones:
# Search for bone in armature
bone = None
for bone_tmp in armature.data.edit_bones:
if bone_tmp.name.lower() == names[1].lower():
bone = bone_tmp
break
# Cancel if bone was not found
if not bone:
continue
# Search for all the required bones
found_all = True
for name in names[0]:
found = False
for bone_tmp in armature.data.edit_bones:
if bone_tmp.name.lower() == name.lower():
found = True
break
if not found:
found_all = False
break
# Rename only if all required bones are found
if found_all:
bone.name = names[2]
# Standardize bone names again (new duplicate bones have ".001" in it)
for bone in armature.data.edit_bones:
bone.name = bone.name.replace('.', '_')
# Rename all the bones
spines = []
spine_parts = []
for bone_new, bones_old in temp_rename_bones.items():
if '\Left' in bone_new or '\L' in bone_new:
bones = [[bone_new.replace('\Left', 'Left').replace('\left', 'left').replace('\L', 'L').replace('\l', 'l'), ''],
[bone_new.replace('\Left', 'Right').replace('\left', 'right').replace('\L', 'R').replace('\l', 'r'), '']]
else:
bones = [[bone_new, '']]
for bone_old in bones_old:
if '\Left' in bone_new or '\L' in bone_new:
bones[0][1] = bone_old.replace('\Left', 'Left').replace('\left', 'left').replace('\L', 'L').replace('\l', 'l')
bones[1][1] = bone_old.replace('\Left', 'Right').replace('\left', 'right').replace('\L', 'R').replace('\l', 'r')
else:
bones[0][1] = bone_old
for bone in bones: # bone[0] = new name, bone[1] = old name
current_step += 1
wm.progress_update(current_step)
# Seach for bone in armature
bone_final = None
for bone_tmp in armature.data.edit_bones:
if bone_tmp.name.lower() == bone[1].lower():
bone_final = bone_tmp
break
# Cancel if bone was not found
if not bone_final:
continue
# If spine bone, then don't rename for now, and ignore spines with no children
if bone_new == 'Spine':
if len(bone_final.children) > 0:
spines.append(bone_final.name)
else:
spine_parts.append(bone_final.name)
continue
# Rename the bone
if bone[0] not in armature.data.edit_bones:
bone_final.name = bone[0]
# Add bones to parent reweight list
for name in Bones.bone_reweigth_to_parent:
if '\Left' in name or '\L' in name:
bones = [name.replace('\Left', 'Left').replace('\left', 'left').replace('\L', 'L').replace('\l', 'l'),
name.replace('\Left', 'Right').replace('\left', 'right').replace('\L', 'R').replace('\l', 'r')]
else:
bones = [name]
for bone_name in bones:
# Search for bone in armature
bone = None
for bone_tmp in armature.data.edit_bones:
if bone_tmp.name.lower() == bone_name.lower():
bone = bone_tmp
break
# Add bone to reweight list
if bone and bone.parent:
temp_list_reweight_bones[bone.name] = bone.parent.name
# Check if it is a mixamo model
mixamo = False
for bone in armature.data.edit_bones:
if not mixamo and 'Mixamo' in bone.name:
mixamo = True
break
# Rename bones which don't have a side and try to detect it automatically
for key, value in Bones.bone_list_rename_unknown_side.items():
for bone in armature.data.edit_bones:
parent = bone.parent
if parent is None:
continue
if parent.name == key or parent.name == key.lower():
if 'right' in bone.name.lower():
parent.name = 'Right ' + value
break
elif 'left' in bone.name.lower():
parent.name = 'Left ' + value
break
parent = parent.parent
if parent is None:
continue
if parent.name == key or parent.name == key.lower():
if 'right' in bone.name.lower():
parent.name = 'Right ' + value
break
elif 'left' in bone.name.lower():
parent.name = 'Left ' + value
break
# Remove un-needed bones, disconnect them and set roll to 0
for bone in armature.data.edit_bones:
if bone.name in Bones.bone_list or bone.name.startswith(tuple(Bones.bone_list_with)):
if bone.parent and mesh.vertex_groups.get(bone.name) and mesh.vertex_groups.get(bone.parent.name):
temp_list_reweight_bones[bone.name] = bone.parent.name
else:
armature.data.edit_bones.remove(bone)
else:
bone.use_connect = False
bone.roll = 0
# Make Hips top parent and reparent other top bones to hips
if 'Hips' in armature.data.edit_bones:
hips = armature.data.edit_bones.get('Hips')
hips.parent = None
for bone in armature.data.edit_bones:
if bone.parent is None:
bone.parent = hips
# == FIXING OF SPECIAL BONE CASES ==
# Fix all spines!
spine_count = len(spines)
# Fix spines from armatures with no upper body (like skirts)
if len(spine_parts) == 1 and not armature.data.edit_bones.get('Neck'):
if spine_count == 0:
armature.data.edit_bones.get(spine_parts[0]).name = 'Spine'
else:
spines.append(spine_parts[0])
if spine_count == 0:
pass
elif spine_count == 1: # Create missing Chest
print('BONE CREATION')
spine = armature.data.edit_bones.get(spines[0])
chest = armature.data.edit_bones.new('Chest')
neck = armature.data.edit_bones.get('Neck')
# Check for neck
if neck:
chest_top = neck.head
else:
chest_top = spine.tail
# Correct the names
spine.name = 'Spine'
chest.name = 'Chest'
# Set new Chest bone to new position
chest.tail = chest_top
chest.head = spine.head
chest.head[z_cord] = spine.head[z_cord] + (chest_top[z_cord] - spine.head[z_cord]) / 2
chest.head[y_cord] = spine.head[y_cord] + (chest_top[y_cord] - spine.head[y_cord]) / 2
# Adjust spine bone position
spine.tail = chest.head
# Reparent bones to include new chest
chest.parent = spine
for bone in armature.data.edit_bones:
if bone.parent == spine:
bone.parent = chest
elif spine_count == 2: # Everything correct, just rename them
print('NORMAL')
armature.data.edit_bones.get(spines[0]).name = 'Spine'
armature.data.edit_bones.get(spines[1]).name = 'Chest'
elif spine_count == 4 and source_engine: # SOURCE ENGINE SPECIFIC
print('SOURCE ENGINE')
spine = armature.data.edit_bones.get(spines[0])
chest = armature.data.edit_bones.get(spines[2])
chest.name = 'Chest'
spine.name = 'Spine'
spine.tail = chest.head
temp_list_reweight_bones[spines[1]] = 'Spine'
temp_list_reweight_bones[spines[3]] = 'Chest'
elif spine_count > 2: # Merge spines
print('MASS MERGING')
print(spines)
spine = armature.data.edit_bones.get(spines[0])
chest = armature.data.edit_bones.get(spines[spine_count - 1])
# Correct names
spine.name = 'Spine'
chest.name = 'Chest'
# Adjust spine bone position
spine.tail = chest.head
# Add all redundant spines to the merge list
for spine in spines[1:spine_count-1]:
print(spine)
temp_list_reweight_bones[spine] = 'Spine'
# Fix missing neck
if 'Neck' not in armature.data.edit_bones:
if 'Chest' in armature.data.edit_bones:
if 'Head' in armature.data.edit_bones:
neck = armature.data.edit_bones.new('Neck')
chest = armature.data.edit_bones.get('Chest')
head = armature.data.edit_bones.get('Head')
neck.head = chest.tail
neck.tail = head.head
if neck.head[z_cord] == neck.tail[z_cord]:
neck.tail[z_cord] += 0.1
# Straighten up the head bone
if 'Head' in armature.data.edit_bones:
head = armature.data.edit_bones.get('Head')
head.tail[x_cord] = head.head[x_cord]
head.tail[y_cord] = head.head[y_cord]
if head.tail[z_cord] < head.head[z_cord]:
head.tail[z_cord] = head.head[z_cord] + 0.1
# Correct arm bone positions for better looks
tools.common.correct_bone_positions()
# Hips bone should be fixed as per specification from the SDK code
full_body_tracking = context.scene.full_body
if not mixamo:
if 'Hips' in armature.data.edit_bones:
if 'Spine' in armature.data.edit_bones:
if 'Left leg' in armature.data.edit_bones:
if 'Right leg' in armature.data.edit_bones:
hips = armature.data.edit_bones.get('Hips')
spine = armature.data.edit_bones.get('Spine')
left_leg = armature.data.edit_bones.get('Left leg')
right_leg = armature.data.edit_bones.get('Right leg')
left_knee = armature.data.edit_bones.get('Left knee')
right_knee = armature.data.edit_bones.get('Right knee')
# Fixing the hips
if not full_body_tracking:
# Hips should have x value of 0 in both head and tail
middle_x = (right_leg.head[x_cord] + left_leg.head[x_cord]) / 2
hips.head[x_cord] = middle_x
hips.tail[x_cord] = middle_x
# Make sure the hips bone (tail and head tip) is aligned with the legs Y
hips.head[y_cord] = right_leg.head[y_cord]
hips.tail[y_cord] = right_leg.head[y_cord]
hips.head[z_cord] = right_leg.head[z_cord]
hips.tail[z_cord] = spine.head[z_cord]
if hips.tail[z_cord] < hips.head[z_cord]:
hips.tail[z_cord] = hips.tail[z_cord] + 0.1
# if hips.tail[z_cord] < hips.head[z_cord]:
# hips_height = hips.head[z_cord]
# hips.head = hips.tail
# hips.tail[z_cord] = hips_height
#
#
#
# hips_height = hips.head[z_cord]
# hips.head = hips.tail
# hips.tail[z_cord] = hips_height
# # Hips should have x value of 0 in both head and tail
# hips.head[x_cord] = 0
# hips.tail[x_cord] = 0
# # Make sure the hips bone (tail and head tip) is aligned with the legs Y
# hips.head[y_cord] = right_leg.head[y_cord]
# hips.tail[y_cord] = right_leg.head[y_cord]
# Flip the hips bone and make sure the hips bone is not below the legs bone
# hip_bone_length = abs(hips.tail[z_cord] - hips.head[z_cord])
# hips.head[z_cord] = right_leg.head[z_cord]
# hips.tail[z_cord] = hips.head[z_cord] + hip_bone_length
# hips.head[z_cord] = right_leg.head[z_cord]
# hips.tail[z_cord] = spine.head[z_cord]
# if hips.tail[z_cord] < hips.head[z_cord]:
# hips.tail[z_cord] = hips.tail[z_cord] + 0.1
# elif spine and chest and neck and head:
# bones = [hips, spine, chest, neck, head]
# for bone in bones:
# bone_length = abs(bone.tail[z_cord] - bone.head[z_cord])
# bone.tail[x_cord] = bone.head[x_cord]
# bone.tail[y_cord] = bone.head[y_cord]
# bone.tail[z_cord] = bone.head[z_cord] + bone_length
else:
if left_leg and left_knee and right_leg and right_knee:
hips.head[x_cord] = 0
hips.tail[x_cord] = 0
hips.tail[y_cord] = hips.head[y_cord]
hips.head[z_cord] = spine.head[z_cord]
hips.tail[z_cord] = right_leg.head[z_cord]
left_leg_top = armature.data.edit_bones.new('Left leg top')
right_leg_top = armature.data.edit_bones.new('Right leg top')
left_leg_top.head = left_leg.head
left_leg_top.tail = left_leg.head
left_leg_top.tail[z_cord] = left_leg.head[z_cord] + 0.1
right_leg_top.head = right_leg.head
right_leg_top.tail = right_leg.head
right_leg_top.tail[z_cord] = right_leg.head[z_cord] + 0.1
spine.head = hips.head
# hips.head[z_cord] -= 0.0025
# spine.head[z_cord] += 0.0025
left_leg.name = "Left leg 2"
right_leg.name = "Right leg 2"
left_leg_top.name = "Left leg"
right_leg_top.name = "Right leg"
left_leg_top.parent = hips
right_leg_top.parent = hips
left_leg.parent = left_leg_top
right_leg.parent = right_leg_top
left_knee.parent = left_leg_top
right_knee.parent = right_leg_top
# # Fixing legs
# right_knee = armature.data.edit_bones.get('Right knee')
# left_knee = armature.data.edit_bones.get('Left knee')
# if right_knee and left_knee:
# # Make sure the upper legs tail are the same x/y values as the lower leg tail x/y
# right_leg.tail[x_cord] = right_leg.head[x_cord]
# left_leg.tail[x_cord] = left_knee.head[x_cord]
# right_leg.head[y_cord] = right_knee.head[y_cord]
# left_leg.head[y_cord] = left_knee.head[y_cord]
#
# # Make sure the leg bones are setup straight. (head should be same X as tail)
# left_leg.head[x_cord] = left_leg.tail[x_cord]
# right_leg.head[x_cord] = right_leg.tail[x_cord]
#
# # Make sure the left legs (head tip) have the same Y values as right leg (head tip)
# left_leg.head[y_cord] = right_leg.head[y_cord]
# Function: Reweight all eye children into the eyes
def add_eye_children(eye_bone, parent_name):
for eye in eye_bone.children:
temp_list_reweight_bones[eye.name] = parent_name
add_eye_children(eye, parent_name)
# Reweight all eye children into the eyes
for eye_name in ['Eye_L', 'Eye_R']:
if eye_name in armature.data.edit_bones:
eye = armature.data.edit_bones.get(eye_name)
add_eye_children(eye, eye.name)
# Rotate if on head and not fbx (Unreal engine model)
if 'Hips' in armature.data.edit_bones:
hips = armature.pose.bones.get('Hips')
obj = hips.id_data
matrix_final = obj.matrix_world * hips.matrix
# print(matrix_final)
# print(matrix_final[2][3])
# print(fbx)
if not fbx and matrix_final[2][3] < 0:
# print(hips.head[0], hips.head[1], hips.head[2])
# Rotation of -180 around the X-axis
rot_x_neg180 = Matrix.Rotation(-math.pi, 4, 'X')
armature.matrix_world = rot_x_neg180 * armature.matrix_world
mesh.rotation_euler = (math.radians(180), 0, 0)
# Fixes bones disappearing, prevents bones from having their tail and head at the exact same position
for bone in armature.data.edit_bones:
if round(bone.head[x_cord], 5) == round(bone.tail[x_cord], 5)\
and round(bone.head[y_cord], 5) == round(bone.tail[y_cord], 5)\
and round(bone.head[z_cord], 5) == round(bone.tail[z_cord], 5):
if bone.name == 'Hips' and full_body_tracking:
bone.tail[z_cord] -= 0.1
else:
bone.tail[z_cord] += 0.1
# Mixing the weights
tools.common.unselect_all()
tools.common.switch('OBJECT')
tools.common.select(mesh)
# for bone_name in temp_rename_bones.keys():
# bone = armature.data.bones.get(bone_name)
# if bone:
# print(bone_name)
# bone.hide = False
for bone_new, bones_old in temp_reweight_bones.items():
if '\Left' in bone_new or '\L' in bone_new:
bones = [[bone_new.replace('\Left', 'Left').replace('\left', 'left').replace('\L', 'L').replace('\l', 'l'), ''],
[bone_new.replace('\Left', 'Right').replace('\left', 'right').replace('\L', 'R').replace('\l', 'r'), '']]
else:
bones = [[bone_new, '']]
for bone_old in bones_old:
if '\Left' in bone_new or '\L' in bone_new:
bones[0][1] = bone_old.replace('\Left', 'Left').replace('\left', 'left').replace('\L', 'L').replace('\l', 'l')
bones[1][1] = bone_old.replace('\Left', 'Right').replace('\left', 'right').replace('\L', 'R').replace('\l', 'r')
else:
bones[0][1] = bone_old
for bone in bones: # bone[0] = new name, bone[1] = old name
current_step += 1
wm.progress_update(current_step)
# Seach for vertex group
vg = None
for vg_tmp in mesh.vertex_groups:
if vg_tmp.name.lower() == bone[1].lower():
vg = vg_tmp
break
# Cancel if vertex group was not found
if not vg:
continue
if bone[0] == vg.name:
print('BUG: ' + bone[0] + ' tried to mix weights with itself!')
continue
# print(bone[1] + " to1 " + bone[0])
# If important vertex group is not there create it
if mesh.vertex_groups.get(bone[0]) is None:
if bone[0] in Bones.dont_delete_these_bones and bone[0] in armature.data.bones:
bpy.ops.object.vertex_group_add()
mesh.vertex_groups.active.name = bone[0]
if mesh.vertex_groups.get(bone[0]) is None:
continue
else:
continue
bone_tmp = armature.data.bones.get(vg.name)
if bone_tmp:
for child in bone_tmp.children:
if not temp_list_reparent_bones.get(child.name):
temp_list_reparent_bones[child.name] = bone[0]
# print(bone[1] + " to " + bone[0])
tools.common.mix_weights(mesh, vg.name, bone[0])
# Old mixing weights. Still important
for key, value in temp_list_reweight_bones.items():
current_step += 1
wm.progress_update(current_step)
# Search for vertex groups
vg_from = None
vg_to = None
for vg_tmp in mesh.vertex_groups:
if vg_tmp.name.lower() == key.lower():
vg_from = vg_tmp
if vg_to:
break
elif vg_tmp.name.lower() == value.lower():
vg_to = vg_tmp
if vg_from:
break
# Cancel if vertex groups was not found
if not vg_from or not vg_to:
continue
bone_tmp = armature.data.bones.get(vg_from.name)
if bone_tmp:
for child in bone_tmp.children:
if not temp_list_reparent_bones.get(child.name):
temp_list_reparent_bones[child.name] = vg_to.name
if vg_from.name == vg_to.name:
print('BUG: ' + vg_to.name + ' tried to mix weights with itself!')
continue
# Mix the weights
# print(vg_from.name, 'into', vg_to.name)
tools.common.mix_weights(mesh, vg_from.name, vg_to.name)
tools.common.unselect_all()
tools.common.select(armature)
tools.common.switch('EDIT')
# Reparent all bones to be correct for unity mapping and vrc itself
for key, value in temp_list_reparent_bones.items():
# current_step += 1
# wm.progress_update(current_step)
if key in armature.data.edit_bones and value in armature.data.edit_bones:
armature.data.edit_bones.get(key).parent = armature.data.edit_bones.get(value)
# Bone constraints should be deleted
# if context.scene.remove_constraints:
tools.common.delete_bone_constraints()
# Removes unused vertex groups
tools.common.remove_unused_vertex_groups()
# Zero weight bones should be deleted
if context.scene.remove_zero_weight:
tools.common.delete_zero_weight()
# # This is code for testing
# print('LOOKING FOR BONES!')
# if 'Head' in tools.common.get_armature().pose.bones:
# print('THEY ARE THERE!')
# else:
# print('NOT FOUND!!!!!!')
# return {'FINISHED'}
# At this point, everything should be fixed and now we validate and give errors if needed
# The bone hierarchy needs to be validated
hierarchy_check_hips = check_hierarchy(False, [
['Hips', 'Spine', 'Chest', 'Neck', 'Head'],
['Hips', 'Left leg', 'Left knee', 'Left ankle'],
['Hips', 'Right leg', 'Right knee', 'Right ankle'],
['Chest', 'Left shoulder', 'Left arm', 'Left elbow', 'Left wrist'],
['Chest', 'Right shoulder', 'Right arm', 'Right elbow', 'Right wrist']
])
# Armature should be named correctly (has to be at the end because of multiple armatures)
tools.common.fix_armature_names()
# Fix shading (check for runtime error because of ci tests)
if not source_engine:
try:
bpy.ops.mmd_tools.set_shadeless_glsl_shading()
except RuntimeError:
pass
wm.progress_end()
if not hierarchy_check_hips['result']:
self.report({'ERROR'}, hierarchy_check_hips['message'])
return {'FINISHED'}
if fixed_uv_coords:
tools.common.show_error(6.2, ['The model was successfully fixed, but there were ' + str(fixed_uv_coords) + ' faulty UV coordinates.',
'This could result in broken textures and you might have to fix them manually.',
'This issue is often caused by edits in PMX editor.'])
return {'FINISHED'}
self.report({'INFO'}, 'Model successfully fixed.')
return {'FINISHED'}
def check_hierarchy(check_parenting, correct_hierarchy_array):
armature = tools.common.set_default_stage()
missing_bones = []
missing2 = ['The following bones were not found:', '']
for correct_hierarchy in correct_hierarchy_array: # For each hierarchy array
line = ' - '
for index, bone in enumerate(correct_hierarchy): # For each hierarchy bone item
if bone not in missing_bones and bone not in armature.data.bones:
missing_bones.append(bone)
if len(line) > 3:
line += ', '
line += bone
if len(line) > 3:
missing2.append(line)
if len(missing2) > 2 and not check_parenting:
missing2.append('')
missing2.append('Looks like you found a model which Cats could not fix!')
missing2.append('If this is a non modified model we would love to make it compatible.')
missing2.append('Report it to us in the forum or in our discord, links can be found in the Credits panel.')
tools.common.show_error(6.4, missing2)
return {'result': True, 'message': ''}
if check_parenting:
for correct_hierarchy in correct_hierarchy_array: # For each hierachy array
previous = None
for index, bone in enumerate(correct_hierarchy): # For each hierarchy bone item
if index > 0:
previous = correct_hierarchy[index - 1]
if bone in armature.data.bones:
bone = armature.data.bones[bone]
# If a previous item was found
if previous is not None:
# And there is no parent, then we have a problem mkay
if bone.parent is None:
return {'result': False, 'message': bone.name + ' is not parented at all, this will cause problems!'}
# Previous needs to be the parent of the current item
if previous != bone.parent.name:
return {'result': False, 'message': bone.name + ' is not parented to ' + previous + ', this will cause problems!'}
return {'result': True}
class ModelSettings(bpy.types.Operator):
bl_idname = "armature.settings"
bl_label = "Fix Model Settings"
def execute(self, context):
return {'FINISHED'}
def invoke(self, context, event):
dpi_value = bpy.context.user_preferences.system.dpi
return context.window_manager.invoke_props_dialog(self, width=dpi_value * 3.25, height=-550)
def check(self, context):
# Important for changing options
return True
def draw(self, context):
layout = self.layout
col = layout.column(align=True)
row = col.row(align=True)
row.prop(context.scene, 'full_body')
row = col.row(align=True)
row.active = context.scene.remove_zero_weight
row.prop(context.scene, 'keep_end_bones')
row = col.row(align=True)
row.prop(context.scene, 'combine_mats')
row = col.row(align=True)
row.prop(context.scene, 'remove_zero_weight')
if context.scene.full_body:
col.separator()
row = col.row(align=True)
row.scale_y = 0.7
row.label('INFO:', icon='INFO')
row = col.row(align=True)
row.scale_y = 0.7
row.label('You can safely ignore the', icon_value=tools.supporter.preview_collections["custom_icons"]["empty"].icon_id)
row = col.row(align=True)
row.scale_y = 0.7
row.label('"Spine length zero" warning in Unity.', icon_value=tools.supporter.preview_collections["custom_icons"]["empty"].icon_id)
col.separator()
| 41.611258 | 145 | 0.521622 | 43,581 | 0.921141 | 0 | 0 | 218 | 0.004608 | 0 | 0 | 12,506 | 0.26433 |
b4c089a9dff52456f55324f47ec3da262a9da232 | 8,392 | py | Python | lentil/fourier.py | andykee/lentil | 28b3e449336ec4a405144b302d7c0d92d308f052 | [
"BSD-3-Clause"
] | 6 | 2020-07-30T19:33:22.000Z | 2022-01-19T19:17:13.000Z | lentil/fourier.py | andykee/lentil | 28b3e449336ec4a405144b302d7c0d92d308f052 | [
"BSD-3-Clause"
] | 42 | 2020-06-19T06:02:10.000Z | 2022-03-05T00:06:46.000Z | lentil/fourier.py | andykee/lentil | 28b3e449336ec4a405144b302d7c0d92d308f052 | [
"BSD-3-Clause"
] | 2 | 2021-08-05T18:27:08.000Z | 2022-01-18T19:42:40.000Z | import functools
import numpy as np
def dft2(f, alpha, npix=None, shift=(0, 0), offset=(0, 0), unitary=True, out=None):
"""Compute the 2-dimensional discrete Fourier Transform.
This function allows independent control over input shape, output shape,
and output sampling by implementing the matrix triple product algorithm
described in [1].
Parameters
----------
f : array_like
2D array to Fourier Transform
alpha : float or array_like
Output plane sampling interval (frequency). If :attr:`alpha` is an
array, ``alpha[1]`` represents row-wise sampling and ``alpha[2]``
represents column-wise sampling. If :attr:`alpha` is a scalar,
``alpha[1] = alpha[2] = alpha`` gives uniform sampling across the rows
and columns of the output plane.
npix : int or array_like, optional
Size of the output array :attr:`F`. If :attr:`npix` is an array,
``F.shape = (npix[1], npix[2])``. If :attr:`npi`` is a scalar,
``F.shape = (npix, npix)``. Default is ``f.shape``.
shift : array_like, optional
Number of pixels in (r,c) to shift the DC pixel in the output plane
with the origin centrally located in the plane. Default is ``(0,0)``.
offset : array_like, optional
Number of pixels in (r,c) that the input plane is shifted relative to
the origin. Default is ``(0,0)``.
unitary : bool, optional
Normalization flag. If ``True``, a normalization is performed on the
output such that the DFT operation is unitary and energy is conserved
through the Fourier transform operation (Parseval's theorem). In this
way, the energy in in a limited-area DFT is a fraction of the total
energy corresponding to the limited area. Default is ``True``.
out : ndarray or None
A location into which the result is stored. If provided, it must have
shape = npix and dtype = np.complex. If not provided or None, a
freshly-allocated array is returned.
Returns
-------
F : complex ndarray
Notes
-----
* Setting ``alpha = 1/f.shape`` and ``npix = f.shape`` is equivalent to
::
F = np.fft.ifftshift(np.fft.fft2(np.fft.fftshift(f)))
* ``dft2()`` is designed to place the DC pixel in the same location as a
well formed call to any standard FFT for both even and odd sized input
arrays. The DC pixel is located at ``np.floor(npix/2) + 1``, which is
consistent with calls to Numpy's FFT method where the input and output
are correctly shifted:
``np.fft.ifftshift(np.fft.fft2(np.fft.fftshift(f)))``.
* If the y-axis shift behavior is not what you are expecting, you most
likely have your plotting axes flipped (matplotlib's default behavior is
to place [0,0] in the upper left corner of the axes). This may be resolved
by either flipping the sign of the y component of ``shift`` or by passing
``origin = 'lower'`` to ``imshow()``.
References
----------
[1] Soummer, et. al. Fast computation of Lyot-style coronagraph propagation (2007)
"""
alpha_row, alpha_col = _sanitize_alpha(alpha)
f = np.asarray(f)
m, n = f.shape
if npix is None:
npix = [m, n]
M, N = _sanitize_npix(npix)
shift_row, shift_col = _sanitize_shift(shift)
offset_row, offset_col = _sanitize_npix(offset)
if out is not None:
if not np.can_cast(complex, out.dtype):
raise TypeError(f"Cannot cast complex output to dtype('{out.dtype}')")
E1, E2 = _dft2_matrices(m, n, M, N, alpha_row, alpha_col, shift_row, shift_col,
offset_row, offset_col)
F = np.dot(E1.dot(f), E2, out=out)
# now calculate the answer, without reallocating memory
if unitary:
np.multiply(F, np.sqrt(np.abs(alpha_row * alpha_col)), out=F)
return F
@functools.lru_cache(maxsize=32)
def _dft2_matrices(m, n, M, N, alphar, alphac, shiftr, shiftc, offsetr, offsetc):
R, S, U, V = _dft2_coords(m, n, M, N)
E1 = np.exp(-2.0 * 1j * np.pi * alphar * np.outer(R-shiftr+offsetr, U-shiftr)).T
E2 = np.exp(-2.0 * 1j * np.pi * alphac * np.outer(S-shiftc+offsetc, V-shiftc))
return E1, E2
@functools.lru_cache(maxsize=32)
def _dft2_coords(m, n, M, N):
# R and S are (r,c) coordinates in the (m x n) input plane f
# V and U are (r,c) coordinates in the (M x N) output plane F
R = np.arange(m) - np.floor(m/2.0)
S = np.arange(n) - np.floor(n/2.0)
U = np.arange(M) - np.floor(M/2.0)
V = np.arange(N) - np.floor(N/2.0)
return R, S, U, V
def idft2(F, alpha, npix=None, shift=(0,0), unitary=True, out=None):
"""Compute the 2-dimensional inverse discrete Fourier Transform.
This function allows independent control over input shape, output shape,
and output sampling by implementing the matrix triple product algorithm
described in [1].
Parameters
----------
F : array_like
2D array to Fourier Transform
alpha : float or array_like
Input plane sampling interval (frequency). If :attr:`alpha` is an array,
``alpha[1]`` represents row-wise sampling and ``alpha[2]`` represents
column-wise sampling. If :attr:`alpha` is a scalar,
``alpha[1] = alpha[2] = alpha`` represents uniform sampling across the
rows and columns of the input plane.
npix : int or array_like, optional
Size of the output array :attr:`F`. If :attr:`npix` is an array,
``F.shape = (npix[1], npix[2])``. If :attr:`npix` is a scalar,
``F.shape = (npix, npix)``. Default is ``F.shape``
shift : array_like, optional
Number of pixels in (x,y) to shift the DC pixel in the output plane with
the origin centrally located in the plane. Default is `[0,0]`.
unitary : bool, optional
Normalization flag. If ``True``, a normalization is performed on the
output such that the DFT operation is unitary and energy is conserved
through the Fourier transform operation (Parseval's theorem). In this
way, the energy in in a limited-area DFT is a fraction of the total
energy corresponding to the limited area. Default is ``True``.
Returns
-------
f : complex ndarray
Notes
-----
* Setting ``alpha = 1/F.shape`` and ``npix = F.shape`` is equivalent to
::
F = np.fft.ifftshift(np.fft.ifft2(np.fft.fftshift(F)))
* ``idft2()`` is designed to place the DC pixel in the same location as a
well formed call to any standard FFT for both even and odd sized input
arrays. The DC pixel is located at ``np.floor(npix/2) + 1``, which is
consistent with calls to Numpy's IFFT method where the input and output
are correctly shifted:
``np.fft.ifftshift(np.fft.ifft2(np.fft.fftshift(f)))``.
* If the y-axis shift behavior is not what you are expecting, you most
likely have your plotting axes flipped (matplotlib's default behavior is
to place [0,0] in the upper left corner of the axes). This may be resolved
by either flipping the sign of the y component of ``shift`` or by passing
``origin = 'lower'`` to ``imshow()``.
References
----------
[1] Soummer, et. al. Fast computation of Lyot-style coronagraph propagation (2007)
[2] `Expressing the inverse DFT in terms of the DFT <https://en.wikipedia.org/wiki/Discrete_Fourier_transform#Expressing_the_inverse_DFT_in_terms_of_the_DFT>`_.
"""
F = np.asarray(F)
N = F.size
# will allocate memory for F if out == None
F = dft2(np.conj(F), alpha, npix, shift, unitary=unitary, out=out)
np.conj(F, out=F)
return np.divide(F, N, out=F)
def _sanitize_alpha(x):
"""Return consistent representation of alpha as ar, ac"""
x = np.asarray(x)
if x.size == 1:
ar, ac = float(x), float(x)
else:
ar, ac = float(x[0]), float(x[1])
return ar, ac
def _sanitize_npix(x):
"""Return consistent representation of npix as M, N"""
x = np.asarray(x)
if x.size == 1:
M, N = int(x), int(x)
else:
M, N = int(x[0]), int(x[1])
return M, N
def _sanitize_shift(x):
"""Return consistent representation of shift as sr, sc"""
if isinstance(x, np.ndarray):
sr, sc = float(x[0]), float(x[1])
else:
sr, sc = x
return sr, sc
| 38.319635 | 164 | 0.636916 | 0 | 0 | 0 | 0 | 715 | 0.0852 | 0 | 0 | 6,267 | 0.746783 |
b4c0a42efb0c3bbdc0fcf9baf9ae460765b29cd0 | 999 | py | Python | setup.py | db48x/flask-digest | 6a3138aef4baa1c1a129eb655c2644bf61387af1 | [
"MIT"
] | 8 | 2015-07-18T10:34:38.000Z | 2019-11-04T01:50:15.000Z | setup.py | db48x/flask-digest | 6a3138aef4baa1c1a129eb655c2644bf61387af1 | [
"MIT"
] | 1 | 2019-07-22T14:08:12.000Z | 2020-05-10T16:36:36.000Z | setup.py | db48x/flask-digest | 6a3138aef4baa1c1a129eb655c2644bf61387af1 | [
"MIT"
] | 3 | 2016-05-02T19:04:34.000Z | 2021-07-01T10:58:31.000Z | from setuptools import setup, find_packages
setup(
name = 'Flask-Digest',
version = '0.2.1',
author = 'Victor Andrade de Almeida',
author_email = 'vct.a.almeida@gmail.com',
url = 'https://github.com/vctandrade/flask-digest',
description = 'A RESTful authentication service for Flask applications',
long_description = open('README.rst').read(),
license = 'MIT',
platforms = ['Platform Independent'],
install_requires = ['Flask >= 0.10.1'],
packages = find_packages(),
keywords = ['digest', 'authentication', 'flask'],
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation'
]
)
| 31.21875 | 76 | 0.618619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 573 | 0.573574 |
b4c114322a503517ae099af3c79f2d917b2231a7 | 210 | py | Python | model/contact.py | tarsic99/Python-training- | 96da2df5f249f39370295504748b218247f2935c | [
"Apache-2.0"
] | null | null | null | model/contact.py | tarsic99/Python-training- | 96da2df5f249f39370295504748b218247f2935c | [
"Apache-2.0"
] | null | null | null | model/contact.py | tarsic99/Python-training- | 96da2df5f249f39370295504748b218247f2935c | [
"Apache-2.0"
] | null | null | null | class Contact:
def __init__(self, first_name = None, last_name = None, mobile_phone = None):
self.first_name = first_name
self.last_name = last_name
self.mobile_phone = mobile_phone | 35 | 81 | 0.685714 | 210 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b4c2782a03a0c1f00a31c56a2d167a1098a5ec9d | 42,143 | py | Python | src/mrnet/stochastic/kmc.py | kamronald/mrnet | 1d0bd6c8ac38deb913a77fce3d6ebc007529993f | [
"BSD-3-Clause-LBNL"
] | null | null | null | src/mrnet/stochastic/kmc.py | kamronald/mrnet | 1d0bd6c8ac38deb913a77fce3d6ebc007529993f | [
"BSD-3-Clause-LBNL"
] | null | null | null | src/mrnet/stochastic/kmc.py | kamronald/mrnet | 1d0bd6c8ac38deb913a77fce3d6ebc007529993f | [
"BSD-3-Clause-LBNL"
] | null | null | null | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import math
import random
import numpy as np
import matplotlib.pyplot as plt
from scipy.constants import N_A
from numba import jit
import copy
__author__ = "Ronald Kam, Evan Spotte-Smith, Xiaowei Xie"
__email__ = "kamronald@berkeley.edu"
__copyright__ = "Copyright 2020, The Materials Project"
__version__ = "0.1"
"""
Kinetic Monte Carlo (kMC) simulation for a reaction network, assuming spatial homogeneity. Simulation can be performed
with and without ReactionNetwork objects. The version without ReactionNetwork objects is computationally cheaper.
The algorithm is described by Gillespie (1976).
"""
def initialize_simulation(reaction_network, initial_cond, volume=10 ** -24):
"""
Initial loop through reactions to create lists, mappings, and initial states needed for simulation without
reaction network objects.
Args:
reaction_network: Fully generated ReactionNetwork
initial_cond: dict mapping mol_index to initial concentration [M]. mol_index is entry position in
reaction_network.entries_list
volume: float of system volume
:return:
initial_state: array of initial molecule amounts, indexed corresponding to reaction_network.entries_list
initial_state_dict: dict mapping molecule index to initial molecule amounts
species_rxn_mapping: 2d array; each row i contains reactions which molecule_i takes part in
molid_index_mapping: mapping between species entry id and its molecule index
reactants_array: (n_rxns x 2) array, each row containing reactant mol_index of forward reaction
products_array: (n_rxns x 2) array, each row containing product mol_index of forward reaction
coord_array: (2*n_rxns x 1) array, with coordination number of each for and rev rxn: [c1_f, c1_r, c2_f, c2_r...]
rate_constants: (2*n_rxns x 1) array, with rate constant of each for and rev rxn: [k1_f, k1_r, k2_f, k2_r ...]
propensities: (2*n_rxns x 1) array of reaction propensities, defined as coord_num*rate_constant
"""
num_rxns = len(reaction_network.reactions)
num_species = len(reaction_network.entries_list)
molid_index_mapping = dict()
initial_state = [0 for i in range(num_species)]
initial_state_dict = dict()
for ind, mol in enumerate(reaction_network.entries_list):
molid_index_mapping[mol.entry_id] = ind
this_c = initial_cond.get(mol.entry_id, 0)
this_mol_amt = int(volume * N_A * 1000 * this_c)
initial_state[ind] = this_mol_amt
if mol.entry_id in initial_cond:
initial_state_dict[ind] = this_mol_amt
# Initially compile each species' reactions in lists, later convert to a 2d array
species_rxn_mapping_list = [[] for j in range(num_species)]
reactant_array = -1 * np.ones((num_rxns, 2), dtype=int)
product_array = -1 * np.ones((num_rxns, 2), dtype=int)
coord_array = np.zeros(2 * num_rxns)
rate_constants = np.zeros(2 * num_rxns)
for id, reaction in enumerate(reaction_network.reactions):
# Keep track of reactant amounts, for later calculating coordination number
num_reactants_for = list()
num_reactants_rev = list()
rate_constants[2 * id] = reaction.k_A
rate_constants[2 * id + 1] = reaction.k_B
for idx, react in enumerate(reaction.reactants):
# for each reactant, need to find the corresponding mol_id with the index
mol_ind = molid_index_mapping[react.entry_id]
reactant_array[id, idx] = mol_ind
species_rxn_mapping_list[mol_ind].append(2 * id)
num_reactants_for.append(initial_state[mol_ind])
for idx, prod in enumerate(reaction.products):
mol_ind = molid_index_mapping[prod.entry_id]
product_array[id, idx] = mol_ind
species_rxn_mapping_list[mol_ind].append(2 * id + 1)
num_reactants_rev.append(initial_state[mol_ind])
if len(reaction.reactants) == 1:
coord_array[2 * id] = num_reactants_for[0]
elif (len(reaction.reactants) == 2) and (
reaction.reactants[0] == reaction.reactants[1]
):
coord_array[2 * id] = num_reactants_for[0] * (num_reactants_for[0] - 1)
elif (len(reaction.reactants) == 2) and (
reaction.reactants[0] != reaction.reactants[1]
):
coord_array[2 * id] = num_reactants_for[0] * num_reactants_for[1]
else:
raise RuntimeError(
"Only single and bimolecular reactions supported by this simulation"
)
# For reverse reaction
if len(reaction.products) == 1:
coord_array[2 * id + 1] = num_reactants_rev[0]
elif (len(reaction.products) == 2) and (
reaction.products[0] == reaction.products[1]
):
coord_array[2 * id + 1] = num_reactants_rev[0] * (num_reactants_rev[0] - 1)
elif (len(reaction.products) == 2) and (
reaction.products[0] != reaction.products[1]
):
coord_array[2 * id + 1] = num_reactants_rev[0] * num_reactants_rev[1]
else:
raise RuntimeError(
"Only single and bimolecular reactions supported by this simulation"
)
rxn_mapping_lengths = [len(rxn_list) for rxn_list in species_rxn_mapping_list]
max_mapping_length = max(rxn_mapping_lengths)
species_rxn_mapping = -1 * np.ones((num_species, max_mapping_length), dtype=int)
for index, rxn_list in enumerate(species_rxn_mapping_list):
this_map_length = rxn_mapping_lengths[index]
if this_map_length == max_mapping_length:
species_rxn_mapping[index, :] = rxn_list
else:
species_rxn_mapping[
index, : this_map_length - max_mapping_length
] = rxn_list
propensities = np.multiply(coord_array, rate_constants)
return [
np.array(initial_state, dtype=int),
initial_state_dict,
species_rxn_mapping,
reactant_array,
product_array,
coord_array,
rate_constants,
propensities,
molid_index_mapping,
]
@jit(nopython=True, parallel=True)
def kmc_simulate(
time_steps,
coord_array,
rate_constants,
propensity_array,
species_rxn_mapping,
reactants,
products,
state,
):
"""
KMC Simulation of reaction network and specified initial conditions. Args are all Numpy arrays, to allow
computational speed up with Numba.
Args:
time_steps: int number of time steps desired to run
coord_array: array containing coordination numbers of for and rev rxns.
rate_constants: array containing rate constants of for and rev rxns
propensity_array: array containing propensities of for and rev rxns
species_rxn_mapping: 2d array; each row i contains reactions which molecule_i takes part in
reactants: (n_rxns x 2) array, each row containing reactant mol_index of forward reaction
products: (n_rxns x 2) array, each row containing product mol_index of forward reaction
state: array of initial molecule amounts, indexed corresponding to reaction_network.entries_list
:return: A (2 x time_steps) Numpy array. First row contains the indeces of reactions that occurred.
Second row are the time steps generated at each iteration.
"""
total_propensity = np.sum(propensity_array)
reaction_history = [0 for step in range(time_steps)]
times = [0.0 for step in range(time_steps)]
relevant_ind = np.where(propensity_array > 0)[
0
] # Take advantage of sparsity - many propensities will be 0.
for step_counter in range(time_steps):
r1 = random.random()
r2 = random.random()
tau = -np.log(r1) / total_propensity
random_propensity = r2 * total_propensity
abrgd_reaction_choice_ind = np.where(
np.cumsum(propensity_array[relevant_ind]) >= random_propensity
)[0][0]
reaction_choice_ind = relevant_ind[abrgd_reaction_choice_ind]
converted_rxn_ind = math.floor(reaction_choice_ind / 2)
if reaction_choice_ind % 2:
reverse = True
else:
reverse = False
state = update_state(reactants, products, state, converted_rxn_ind, reverse)
# Log the reactions that need to be altered after reaction is performed, for the coordination array
reactions_to_change = list()
for reactant_id in reactants[converted_rxn_ind, :]:
if reactant_id == -1:
continue
else:
reactions_to_change.extend(list(species_rxn_mapping[reactant_id, :]))
for product_id in products[converted_rxn_ind, :]:
if product_id == -1:
continue
else:
reactions_to_change.extend(list(species_rxn_mapping[product_id, :]))
rxns_change = set(reactions_to_change)
for rxn_ind in rxns_change:
if rxn_ind == -1:
continue
elif rxn_ind % 2:
this_reverse = True
else:
this_reverse = False
this_h = get_coordination(
reactants, products, state, math.floor(rxn_ind / 2), this_reverse
)
coord_array[rxn_ind] = this_h
propensity_array = np.multiply(rate_constants, coord_array)
relevant_ind = np.where(propensity_array > 0)[0]
total_propensity = np.sum(propensity_array[relevant_ind])
reaction_history[step_counter] = int(reaction_choice_ind)
times[step_counter] = tau
return np.vstack((np.array(reaction_history), np.array(times)))
@jit(nopython=True)
def update_state(reactants, products, state, rxn_ind, reverse):
"""
Updating the system state based on chosen reaction, during kMC simulation.
Args:
reactants: (n_rxns x 2) array, each row containing reactant mol_index of forward reaction
products: (n_rxns x 2) array, each row containing product mol_index of forward reaction
state: array of initial molecule amounts, indexed corresponding to reaction_network.entries_list
rxn_ind: int of reaction index, corresponding to position in reaction_network.reactions list
reverse: bool of whether this is the reverse reaction or not
:return: updated state array, after performing the specified reaction
"""
if rxn_ind == -1:
raise RuntimeError("Incorrect reaction index when updating state")
if reverse:
for reactant_id in products[rxn_ind, :]:
if reactant_id == -1:
continue
else:
state[reactant_id] -= 1
if state[reactant_id] < 0:
raise ValueError("State invalid! Negative specie encountered")
for product_id in reactants[rxn_ind, :]:
if product_id == -1:
continue
else:
state[product_id] += 1
else:
for reactant_id in reactants[rxn_ind, :]:
if reactant_id == -1:
continue
else:
state[reactant_id] -= 1
if state[reactant_id] < 0:
raise ValueError("State invalid! Negative specie encountered")
for product_id in products[rxn_ind, :]:
if product_id == -1:
continue
else:
state[product_id] += 1
return state
@jit(nopython=True)
def get_coordination(reactants, products, state, rxn_id, reverse):
"""
Calculate the coordination number of a reaction, for reactions involving two reactions of less.
They are defined as follows:
A -> B; coord = n(A)
A + A --> B; coord = n(A) * (n(A) - 1)
A + B --> C; coord = n(A) * n(B)
Args:
reactants: (n_rxns x 2) array, each row containing reactant mol_index of forward reaction
products: (n_rxns x 2) array, each row containing product mol_index of forward reaction
state: array of initial molecule amounts, indexed corresponding to reaction_network.entries_list
rxn_ind: int of reaction index, corresponding to position in reaction_network.reactions list
reverse: bool of whether this is the reverse reaction or not
:return: float of reaction coordination number
"""
if reverse:
reactant_array = products[rxn_id, :]
num_reactants = len(np.where(reactant_array != -1)[0])
else:
reactant_array = reactants[rxn_id, :]
num_reactants = len(np.where(reactant_array != -1)[0])
num_mols_list = list()
for reactant_id in reactant_array:
num_mols_list.append(state[reactant_id])
if num_reactants == 1:
h_prop = num_mols_list[0]
elif (num_reactants == 2) and (reactant_array[0] == reactant_array[1]):
h_prop = num_mols_list[0] * (num_mols_list[0] - 1) / 2
elif (num_reactants == 2) and (reactant_array[0] != reactant_array[1]):
h_prop = num_mols_list[0] * num_mols_list[1]
else:
raise RuntimeError(
"Only single and bimolecular reactions supported by this simulation"
)
return h_prop
class KmcDataAnalyzer:
"""
Functions to analyze (function-based) KMC outputs from many simulation runs. Ideally, the reaction history and
time history data are list of arrays.
Args:
reaction_network: fully generated ReactionNetwork, used for kMC simulation
molid_ind_mapping: dict mapping each entry's id to its index; of form {entry_id: mol_index, ... }
species_rxn_mapping: 2d array; each row i contains reactions which molecule_i takes part in
initial_state_dict: dict mapping mol_id to its initial amount {mol1_id: amt_1, mol2_id: amt2 ... }
products: (n_rxns x 2) array, each row containing product mol_index of forward reaction
reactants: (n_rxns x 2) array, each row containing reactant mol_index of forward reaction
reaction_history: list of arrays of reaction histories of each simulation.
time_history: list of arrays of time histories of each simulation.
"""
def __init__(
self,
reaction_network,
molid_ind_mapping,
species_rxn_mapping,
initial_state_dict,
products,
reactants,
reaction_history,
time_history,
):
self.reaction_network = reaction_network
self.molid_ind_mapping = molid_ind_mapping
self.species_rxn_mapping = species_rxn_mapping
self.initial_state_dict = initial_state_dict
self.products = products
self.reactants = reactants
self.reaction_history = reaction_history
self.time_history = time_history
self.num_sims = len(self.reaction_history)
if self.num_sims != len(self.time_history):
raise RuntimeError(
"Number of datasets for rxn history and time step history should be same!"
)
self.molind_id_mapping = [
mol.entry_id for mol in self.reaction_network.entries_list
]
def generate_time_dep_profiles(self):
"""
Generate plottable time-dependent profiles of species and rxns from raw KMC output, obtain final states.
:return dict containing species profiles, reaction profiles, and final states from each simulation.
{species_profiles: [ {mol_ind1: [(t0, n(t0)), (t1, n(t1)...], mol_ind2: [...] , ... }, {...}, ... ]
reaction_profiles: [ {rxn_ind1: [t0, t1, ...], rxn_ind2: ..., ...}, {...}, ...]
final_states: [ {mol_ind1: n1, mol_ind2: ..., ...}, {...}, ...] }
"""
species_profiles = list()
reaction_profiles = list()
final_states = list()
for n_sim in range(self.num_sims):
sim_time_history = self.time_history[n_sim]
sim_rxn_history = self.reaction_history[n_sim]
sim_species_profile = dict()
sim_rxn_profile = dict()
cumulative_time = list(np.cumsum(np.array(sim_time_history)))
state = copy.deepcopy(self.initial_state_dict)
for mol_ind in state:
sim_species_profile[mol_ind] = [(0.0, self.initial_state_dict[mol_ind])]
total_iterations = len(sim_rxn_history)
for iter in range(total_iterations):
rxn_ind = sim_rxn_history[iter]
t = cumulative_time[iter]
if rxn_ind not in sim_rxn_profile:
sim_rxn_profile[rxn_ind] = [t]
else:
sim_rxn_profile[rxn_ind].append(t)
converted_ind = math.floor(rxn_ind / 2)
if rxn_ind % 2:
reacts = self.products[converted_ind, :]
prods = self.reactants[converted_ind, :]
else:
reacts = self.reactants[converted_ind, :]
prods = self.products[converted_ind, :]
for r_ind in reacts:
if r_ind == -1:
continue
else:
try:
state[r_ind] -= 1
if state[r_ind] < 0:
raise ValueError(
"State invalid: negative specie: {}".format(r_ind)
)
sim_species_profile[r_ind].append((t, state[r_ind]))
except KeyError:
raise ValueError(
"Reactant specie {} given is not in state!".format(
r_ind
)
)
for p_ind in prods:
if p_ind == -1:
continue
else:
if (p_ind in state) and (p_ind in sim_species_profile):
state[p_ind] += 1
sim_species_profile[p_ind].append((t, state[p_ind]))
else:
state[p_ind] = 1
sim_species_profile[p_ind] = [(0.0, 0), (t, state[p_ind])]
# for plotting convenience, add data point at final time
for mol_ind in sim_species_profile:
sim_species_profile[mol_ind].append(
(cumulative_time[-1], state[mol_ind])
)
species_profiles.append(sim_species_profile)
reaction_profiles.append(sim_rxn_profile)
final_states.append(state)
return {
"species_profiles": species_profiles,
"reaction_profiles": reaction_profiles,
"final_states": final_states,
}
def final_state_analysis(self, final_states):
"""
Gather statistical analysis of the final states of simulation.
Args:
final_states: list of dicts of final states, as generated in generate_time_dep_profiles()
:return: list of tuples containing statistical data for each species, sorted from highest to low avg occurrence
"""
state_arrays = (
dict()
) # For each molecule, compile an array of its final amounts
for iter, final_state in enumerate(final_states):
for mol_ind, amt in final_state.items():
# Store the amount, and convert key from mol_ind to entry_id
if self.molind_id_mapping[mol_ind] not in state_arrays:
state_arrays[self.molind_id_mapping[mol_ind]] = np.zeros(
self.num_sims
)
state_arrays[self.molind_id_mapping[mol_ind]][iter] = amt
analyzed_states = dict() # will contain statistical results of final states
for mol_entry, state_array in state_arrays.items():
analyzed_states[mol_entry] = (np.mean(state_array), np.std(state_array))
# Sort from highest avg final amount to lowest
sorted_analyzed_states = sorted(
[(entry_id, data_tup) for entry_id, data_tup in analyzed_states.items()],
key=lambda x: x[1][0],
reverse=True,
)
return sorted_analyzed_states
def plot_species_profiles(
self,
species_profiles,
final_states,
num_label=12,
num_plots=None,
filename=None,
file_dir=None,
):
"""
Sorting and plotting species profiles for a specified number of simulations. The profiles might be very similar,
so may not need to plot all of the runs for good understanding of results.
Args:
species_profiles: list of dicts of species as function of time, for each simulation
final_states: list of dicts of final states of each simulation
num_label: integer number of species in the legend
filename (str)
file_dir (str)
"""
if num_plots is None:
num_plots = self.num_sims
elif num_plots > self.num_sims:
num_plots = self.num_sims
for n_sim in range(num_plots):
# Sorting and plotting:
fig, ax = plt.subplots()
sorted_state = sorted(
[(k, v) for k, v in final_states[n_sim].items()],
key=lambda x: x[1],
reverse=True,
)
sorted_inds = [mol_tuple[0] for mol_tuple in sorted_state]
sorted_ind_id_mapping = dict()
iter_counter = 0
for id, ind in self.molid_ind_mapping.items():
if ind in sorted_inds[:num_label]:
sorted_ind_id_mapping[ind] = id
iter_counter += 1
if iter_counter == num_label:
break
colors = plt.cm.get_cmap("hsv", num_label)
this_id = 0
t_end = sum(self.time_history[n_sim])
for mol_ind in species_profiles[n_sim]:
# ts = np.append(np.array([e[0] for e in species_profiles[n_sim][mol_ind]]), t_end)
ts = np.array([e[0] for e in species_profiles[n_sim][mol_ind]])
nums = np.array([e[1] for e in species_profiles[n_sim][mol_ind]])
if mol_ind in sorted_inds[:num_label]:
mol_id = sorted_ind_id_mapping[mol_ind]
for entry in self.reaction_network.entries_list:
if mol_id == entry.entry_id:
this_composition = (
entry.molecule.composition.alphabetical_formula
)
this_charge = entry.molecule.charge
this_label = this_composition + " " + str(this_charge)
this_color = colors(this_id)
this_id += 1
break
ax.plot(ts, nums, label=this_label, color=this_color)
else:
ax.plot(ts, nums)
title = "KMC simulation, total time {}".format(t_end)
ax.set(title=title, xlabel="Time (s)", ylabel="# Molecules")
ax.legend(
loc="upper right", bbox_to_anchor=(1, 1), ncol=2, fontsize="small"
)
sim_filename = filename + "_run_" + str(n_sim + 1)
if file_dir is None:
plt.show()
else:
plt.savefig(file_dir + "/" + sim_filename)
def analyze_intermediates(self, species_profiles, cutoff=0.9):
"""
Identify intermediates from species vs time profiles. Species are intermediates if consumed nearly as much
as they are created.
Args:
species_profile: Dict of list of tuples, as generated in generate_time_dep_profiles()
cutoff: (float) fraction to adjust definition of intermediate
:return: Analyzed data in a dict, of the form:
{mol1: {'freqency': (float), 'lifetime': (avg, std), 't_max': (avg, std), 'amt_produced': (avg, std)},
mol2: {...}, ... }
"""
intermediates = dict()
for n_sim in range(self.num_sims):
for mol_ind, prof in species_profiles[n_sim].items():
history = np.array([t[1] for t in prof])
diff_history = np.diff(history)
max_amt = max(history)
amt_produced = np.sum(diff_history == 1)
amt_consumed = np.sum(diff_history == -1)
# Identify the intermediate, accounting for fluctuations
if (amt_produced >= 3) and (amt_consumed > amt_produced * cutoff):
if mol_ind not in intermediates:
intermediates[mol_ind] = dict()
intermediates[mol_ind]["lifetime"] = list()
intermediates[mol_ind]["amt_produced"] = list()
intermediates[mol_ind]["t_max"] = list()
intermediates[mol_ind]["amt_consumed"] = list()
# Intermediate lifetime is approximately the time from its max amount to when nearly all consumed
max_ind = np.where(history == max_amt)[0][0]
t_max = prof[max_ind][0]
for state in prof[max_ind + 1 :]:
if state[1] < (1 - cutoff) * amt_produced + history[0]:
intermediates[mol_ind]["lifetime"].append(state[0] - t_max)
intermediates[mol_ind]["t_max"].append(t_max)
intermediates[mol_ind]["amt_produced"].append(amt_produced)
intermediates[mol_ind]["amt_consumed"].append(amt_consumed)
break
intermediates_analysis = dict()
for mol_ind in intermediates:
entry_id = self.molind_id_mapping[mol_ind]
intermediates_analysis[entry_id] = dict() # convert keys to entry id
if len(intermediates[mol_ind]["lifetime"]) != len(
intermediates[mol_ind]["t_max"]
):
raise RuntimeError("Intermediates data should be of the same length")
intermediates_analysis[entry_id]["frequency"] = (
len(intermediates[mol_ind]["lifetime"]) / self.num_sims
)
lifetime_array = np.array(intermediates[mol_ind]["lifetime"])
intermediates_analysis[entry_id]["lifetime"] = (
np.mean(lifetime_array),
np.std(lifetime_array),
)
t_max_array = np.array(intermediates[mol_ind]["t_max"])
intermediates_analysis[entry_id]["t_max"] = (
np.mean(t_max_array),
np.std(t_max_array),
)
amt_produced_array = np.array(intermediates[mol_ind]["amt_produced"])
intermediates_analysis[entry_id]["amt_produced"] = (
np.mean(amt_produced_array),
np.std(amt_produced_array),
)
amt_consumed_array = np.array(intermediates[mol_ind]["amt_consumed"])
intermediates_analysis[entry_id]["amt_consumed"] = (
np.mean(amt_consumed_array),
np.std(amt_produced_array),
)
# Sort by highest average amount produced
sorted_intermediates_analysis = sorted(
[
(entry_id, mol_data)
for entry_id, mol_data in intermediates_analysis.items()
],
key=lambda x: x[1]["amt_produced"][0],
reverse=True,
)
return sorted_intermediates_analysis
def correlate_reactions(self, reaction_inds):
"""
Correlate two reactions, by finding the average time and steps elapsed for rxn2 to fire after rxn1,
and vice-versa.
Args:
reaction_inds: list, array, or tuple of two reaction indexes
:return: dict containing analysis of how reactions are correlated {rxn1: {'time': (float), 'steps': (float),
'occurrences': float}, rxn2: {...} }
"""
correlation_data = dict()
correlation_analysis = dict()
for rxn_ind in reaction_inds:
correlation_data[rxn_ind] = dict()
correlation_data[rxn_ind]["time"] = list()
correlation_data[rxn_ind]["steps"] = list()
correlation_data[rxn_ind]["occurrences"] = list()
correlation_analysis[rxn_ind] = dict()
for n_sim in range(self.num_sims):
cum_time = np.cumsum(self.time_history[n_sim])
rxn_locations = dict()
# Find the step numbers when reactions fire in the simulation
for rxn_ind in reaction_inds:
rxn_locations[rxn_ind] = list(
np.where(self.reaction_history[n_sim] == rxn_ind)[0]
)
rxn_locations[rxn_ind].append(len(self.reaction_history[n_sim]))
# Correlate between each reaction
for (rxn_ind, location_list) in rxn_locations.items():
time_elapse = list()
step_elapse = list()
occurrences = 0
for (rxn_ind_j, location_list_j) in rxn_locations.items():
if rxn_ind == rxn_ind_j:
continue
for i in range(1, len(location_list)):
for loc_j in location_list_j:
# Find location where reaction j happens after reaction i, before reaction i fires again
if (loc_j > location_list[i - 1]) and (
loc_j < location_list[i]
):
time_elapse.append(
cum_time[loc_j] - cum_time[location_list[i - 1]]
)
step_elapse.append(loc_j - location_list[i - 1])
occurrences += 1
break
if len(time_elapse) == 0:
correlation_data[rxn_ind]["occurrences"].append(0)
else:
correlation_data[rxn_ind]["time"].append(
np.mean(np.array(time_elapse))
)
correlation_data[rxn_ind]["steps"].append(
np.mean(np.array(step_elapse))
)
correlation_data[rxn_ind]["occurrences"].append(occurrences)
for rxn_ind, data_dict in correlation_data.items():
if len(data_dict["time"]) != 0:
correlation_analysis[rxn_ind]["time"] = (
np.mean(np.array(data_dict["time"])),
np.std(np.array(data_dict["time"])),
)
correlation_analysis[rxn_ind]["steps"] = (
np.mean(np.array(data_dict["steps"])),
np.std(np.array(data_dict["steps"])),
)
correlation_analysis[rxn_ind]["occurrences"] = (
np.mean(np.array(data_dict["occurrences"])),
np.std(np.array(data_dict["occurrences"])),
)
else:
print(
"Reaction ",
rxn_ind,
"does not lead to the other reaction in simulation ",
n_sim,
)
return correlation_analysis
def quantify_specific_reaction(self, reaction_history, reaction_index):
"""
Quantify a reaction from one simulation reaction history
Args:
reaction_history: array containing sequence of reactions fired during a simulation.
reaction_index: integer of reaction index of interest
:return: integer number of times reaction is fired
"""
if reaction_index not in reaction_history:
reaction_count = 0
else:
reaction_count = len(reaction_history[reaction_index])
return reaction_count
def quantify_rank_reactions(self, reaction_type=None, num_rxns=None):
"""
Given reaction histories, identify the most commonly occurring reactions, on average.
Can rank generally, or by reactions of a certain type.
Args:
reaction_profiles (list of dicts): reactions fired as a function of time
reaction_type (string)
num_rxns (int): the amount of reactions interested in collecting data on. If None, record for all.
Returns:
reaction_data: list of reactions and their avg, std of times fired. Sorted by the average times fired.
[(rxn1, (avg, std)), (rxn2, (avg, std)) ... ]
"""
allowed_rxn_types = [
"One electron reduction",
"One electron oxidation",
"Intramolecular single bond breakage",
"Intramolecular single bond formation",
"Coordination bond breaking AM -> A+M",
"Coordination bond forming A+M -> AM",
"Molecular decomposition breaking one bond A -> B+C",
"Molecular formation from one new bond A+B -> C",
"Concerted",
]
if reaction_type is not None:
rxns_of_type = list()
if reaction_type not in allowed_rxn_types:
raise RuntimeError(
"This reaction type does not (yet) exist in our reaction networks."
)
for ind, rxn in enumerate(self.reaction_network.reactions):
if rxn.reaction_type()["rxn_type_A"] == reaction_type:
rxns_of_type.append(2 * ind)
elif rxn.reaction_type()["rxn_type_B"] == reaction_type:
rxns_of_type.append(2 * ind + 1)
reaction_data = dict() # keeping record of each iteration
# Loop to count all reactions fired
for n_sim in range(self.num_sims):
rxns_fired = set(self.reaction_history[n_sim])
if reaction_type is not None:
relevant_rxns = [r for r in rxns_fired if r in rxns_of_type]
else:
relevant_rxns = rxns_fired
for rxn_ind in relevant_rxns:
if rxn_ind not in reaction_data:
reaction_data[rxn_ind] = list()
reaction_data[rxn_ind].append(
np.sum(self.reaction_history[n_sim] == rxn_ind)
)
reaction_analysis = dict()
for rxn_ind, counts in reaction_data.items():
reaction_analysis[rxn_ind] = (
np.mean(np.array(counts)),
np.std(np.array(counts)),
)
# Sort reactions by the average amount fired
sorted_reaction_analysis = sorted(
[(i, c) for i, c in reaction_analysis.items()],
key=lambda x: x[1][0],
reverse=True,
)
if num_rxns is None:
return sorted_reaction_analysis
else:
return sorted_reaction_analysis[:num_rxns]
def frequency_analysis(self, rxn_inds, spec_inds, partitions=100):
"""
Calculate the frequency of reaction and species formation as a function of time. Simulation data is
discretized into time intervals, and probabilities in each set are obtained.
Args:
rxn_inds: list of indeces of reactions of interest
spec_inds: list of molecule indexes of interest
partitions: number of intervals in which to discretize time
:return: dict of dicts containing the statistics of reaction fired, product formed at each time interval.
{reaction_data: {rxn_ind1: [(t0, avg0, std0), (t1, avg1, std1), ...], rxn_ind2: [...], ... rxn_ind_n: [...]}
{species_data: {spec1: [(t0, avg0, std0), (t1, avg1, std1), ...], spec2: [...], ... specn: [...]}}
"""
reaction_frequency_data = dict()
reaction_frequency_array = (
dict()
) # Growing arrays of reaction frequencies as fxn of time
species_frequency_data = dict()
species_frequency_array = dict()
new_species_counters = dict()
for ind in rxn_inds:
reaction_frequency_data[ind] = [0 for j in range(partitions)]
for ind in spec_inds:
species_frequency_data[ind] = [0 for j in range(partitions)]
new_species_counters[ind] = 0
for n_sim in range(self.num_sims):
delta_t = np.sum(self.time_history[n_sim]) / partitions
ind_0 = 0
t = 0
n = 0 # for tracking which time interval we are in
species_counters = copy.deepcopy(
new_species_counters
) # for counting species as they appear
rxn_freq_data = copy.deepcopy(reaction_frequency_data)
spec_freq_data = copy.deepcopy(species_frequency_data)
for step_num, tau in enumerate(self.time_history[n_sim]):
t += tau
this_rxn_ind = int(self.reaction_history[n_sim][step_num])
if this_rxn_ind % 2: # reverse reaction
prods = self.reactants[math.floor(this_rxn_ind / 2), :]
else:
prods = self.products[math.floor(this_rxn_ind / 2), :]
for spec_ind in spec_inds:
if spec_ind in prods:
species_counters[spec_ind] += 1
# When t reaches the next discretized time step, or end of the simulation
if (t >= (n + 1) * delta_t) or (
step_num == len(self.reaction_history[n_sim]) - 1
):
n_to_fill = n
if t >= (n + 2) * delta_t:
n += math.floor(t / delta_t - n)
else:
n += 1
steps = step_num - ind_0 + 1
for spec_ind in spec_inds:
spec_freq_data[spec_ind][n_to_fill] = (
species_counters[spec_ind] / steps
)
for rxn_ind in rxn_inds:
rxn_freq = (
np.count_nonzero(
self.reaction_history[n_sim][ind_0 : step_num + 1]
== rxn_ind
)
/ steps
)
# t_mdpt = (self.time_history[n_sim][step_num] + self.time_history[n_sim][ind_0]) / 2
rxn_freq_data[rxn_ind][n_to_fill] = rxn_freq
# Reset and update counters
species_counters = copy.deepcopy(new_species_counters)
ind_0 = step_num + 1
for rxn_ind in rxn_inds:
if n_sim == 0:
reaction_frequency_array[rxn_ind] = np.array(rxn_freq_data[rxn_ind])
else:
reaction_frequency_array[rxn_ind] = np.vstack(
(reaction_frequency_array[rxn_ind], rxn_freq_data[rxn_ind])
)
# print('reaction freq array', reaction_frequency_array)
for spec_ind in spec_inds:
if n_sim == 0:
species_frequency_array[spec_ind] = np.array(
spec_freq_data[spec_ind]
)
else:
species_frequency_array[spec_ind] = np.vstack(
(species_frequency_array[spec_ind], spec_freq_data[spec_ind])
)
# Statistical analysis
statistical_rxn_data = dict()
statistical_spec_data = dict()
avg_delta_t = (
np.mean(np.array([sum(self.time_history[i]) for i in range(self.num_sims)]))
/ partitions
)
time_list = [i * avg_delta_t + avg_delta_t / 2 for i in range(partitions)]
# print('time_list: ', time_list)
for rxn_ind in rxn_inds:
if self.num_sims == 1:
avgs = reaction_frequency_array[rxn_ind]
stds = np.zeros(partitions)
else:
avgs = np.mean(reaction_frequency_array[rxn_ind], 0)
stds = np.std(reaction_frequency_array[rxn_ind], 0)
statistical_rxn_data[rxn_ind] = [
(time_list[n], avgs[n], stds[n]) for n in range(partitions)
]
for spec_ind in spec_inds:
if self.num_sims == 1:
spec_avgs = species_frequency_array[spec_ind]
spec_stds = np.zeros(partitions)
else:
spec_avgs = np.mean(species_frequency_array[spec_ind], 0)
spec_stds = np.std(species_frequency_array[spec_ind], 0)
statistical_spec_data[spec_ind] = [
(time_list[n], spec_avgs[n], spec_stds[n]) for n in range(partitions)
]
return {
"reaction_data": statistical_rxn_data,
"species_data": statistical_spec_data,
}
def find_rxn_index(self, reaction, reverse):
"""
Find the reaction index of a given reaction object
Args:
reaction: Reaction object
reverse: bool to say whether reaction is reverse or forward
:return: integer reaction index
"""
for ind, rxn in enumerate(self.reaction_network.reactions):
if rxn == reaction:
if reverse is True:
rxn_ind = 2 * ind + 1
else:
rxn_ind = 2 * ind
break
return rxn_ind
| 43.312436 | 120 | 0.578008 | 28,787 | 0.683079 | 0 | 0 | 7,061 | 0.167549 | 0 | 0 | 12,836 | 0.304582 |
b4c286e145a31477357b61710d97854704934bc6 | 7,816 | py | Python | Testing/Python/TestLinearOrthotropicMaterial.py | Numerics88/vtkbone | 5a6ab2870679e9e7ea51926c34911607b9d85235 | [
"MIT"
] | 3 | 2017-04-04T04:59:22.000Z | 2022-03-13T11:22:40.000Z | Testing/Python/TestLinearOrthotropicMaterial.py | Numerics88/vtkbone | 5a6ab2870679e9e7ea51926c34911607b9d85235 | [
"MIT"
] | 5 | 2017-04-06T19:46:39.000Z | 2019-12-11T23:41:41.000Z | Testing/Python/TestLinearOrthotropicMaterial.py | Numerics88/vtkbone | 5a6ab2870679e9e7ea51926c34911607b9d85235 | [
"MIT"
] | 2 | 2017-04-29T20:54:57.000Z | 2017-04-29T22:28:10.000Z | from __future__ import division
import sys
import numpy
from numpy.core import *
import vtk
from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk
import vtkbone
import traceback
import unittest
class TestLinearOrthotropicMaterial (unittest.TestCase):
def test_isotropic (self):
material = vtkbone.vtkboneLinearOrthotropicMaterial()
material.SetYoungsModulusX(1234.5)
material.SetYoungsModulusY(1234.5)
material.SetYoungsModulusZ(1234.5)
material.SetPoissonsRatioYZ(0.246)
material.SetPoissonsRatioZX(0.246)
material.SetPoissonsRatioXY(0.246)
G = 1234.5/(2*(1+0.246))
material.SetShearModulusYZ(G)
material.SetShearModulusZX(G)
material.SetShearModulusXY(G)
self.assertEqual (material.GetYoungsModulusX(), 1234.5)
self.assertEqual (material.GetYoungsModulusY(), 1234.5)
self.assertEqual (material.GetYoungsModulusZ(), 1234.5)
self.assertEqual (material.GetPoissonsRatioYZ(), 0.246)
self.assertEqual (material.GetPoissonsRatioZY(), 0.246)
self.assertEqual (material.GetPoissonsRatioZX(), 0.246)
self.assertEqual (material.GetPoissonsRatioXZ(), 0.246)
self.assertEqual (material.GetPoissonsRatioXY(), 0.246)
self.assertEqual (material.GetPoissonsRatioYX(), 0.246)
self.assertEqual (material.GetShearModulusYZ(), G)
self.assertEqual (material.GetShearModulusZY(), G)
self.assertEqual (material.GetShearModulusZX(), G)
self.assertEqual (material.GetShearModulusXZ(), G)
self.assertEqual (material.GetShearModulusXY(), G)
self.assertEqual (material.GetShearModulusYX(), G)
def test_orthotropic (self):
material = vtkbone.vtkboneLinearOrthotropicMaterial()
material.SetYoungsModulusX(1000)
material.SetYoungsModulusY(1100)
material.SetYoungsModulusZ(1200)
material.SetPoissonsRatioYZ(0.25)
material.SetPoissonsRatioZX(0.3)
material.SetPoissonsRatioXY(0.2)
# These values are not necessarily consistent
GYZ = 1000/(2*(1+0.25))
GZX = 1100/(2*(1+0.3))
GXY = 1200/(2*(1+0.2))
material.SetShearModulusYZ(GYZ)
material.SetShearModulusZX(GZX)
material.SetShearModulusXY(GXY)
self.assertEqual (material.GetYoungsModulusX(), 1000)
self.assertEqual (material.GetYoungsModulusY(), 1100)
self.assertEqual (material.GetYoungsModulusZ(), 1200)
self.assertEqual (material.GetPoissonsRatioYZ(), 0.25)
self.assertEqual (material.GetPoissonsRatioZX(), 0.3)
self.assertEqual (material.GetPoissonsRatioXY(), 0.2)
self.assertAlmostEqual (material.GetPoissonsRatioYZ() / material.GetYoungsModulusY(), material.GetPoissonsRatioZY() / material.GetYoungsModulusZ(), delta=1E-8)
self.assertAlmostEqual(material.GetPoissonsRatioZX() / material.GetYoungsModulusZ(), material.GetPoissonsRatioXZ() / material.GetYoungsModulusX(), delta=1E-8 )
self.assertAlmostEqual (material.GetPoissonsRatioXY() / material.GetYoungsModulusX(), material.GetPoissonsRatioYX() / material.GetYoungsModulusY(), delta=1E-8)
self.assertEqual (material.GetShearModulusYZ(), GYZ)
self.assertEqual (material.GetShearModulusZY(), GYZ)
self.assertEqual (material.GetShearModulusZX(), GZX)
self.assertEqual (material.GetShearModulusXZ(), GZX)
self.assertEqual (material.GetShearModulusXY(), GXY)
self.assertEqual (material.GetShearModulusYX(), GXY)
def test_copy (self):
material = vtkbone.vtkboneLinearOrthotropicMaterial()
material.SetYoungsModulusX(1000)
material.SetYoungsModulusY(1100)
material.SetYoungsModulusZ(1200)
material.SetPoissonsRatioYZ(0.25)
material.SetPoissonsRatioZX(0.3)
material.SetPoissonsRatioXY(0.2)
# These values are not necessarily consistent
GYZ = 1000/(2*(1+0.25))
GZX = 1100/(2*(1+0.3))
GXY = 1200/(2*(1+0.2))
material.SetShearModulusYZ(GYZ)
material.SetShearModulusZX(GZX)
material.SetShearModulusXY(GXY)
scaled_material = material.Copy()
self.assertEqual (scaled_material.GetYoungsModulusX(), 1000)
self.assertEqual (scaled_material.GetYoungsModulusY(), 1100)
self.assertEqual (scaled_material.GetYoungsModulusZ(), 1200)
self.assertEqual (scaled_material.GetPoissonsRatioYZ(), 0.25)
self.assertEqual (scaled_material.GetPoissonsRatioZX(), 0.3)
self.assertEqual (scaled_material.GetPoissonsRatioXY(), 0.2)
self.assertAlmostEqual (scaled_material.GetPoissonsRatioYZ() / scaled_material.GetYoungsModulusY(), scaled_material.GetPoissonsRatioZY() / scaled_material.GetYoungsModulusZ(), delta=1E-8)
self.assertAlmostEqual (scaled_material.GetPoissonsRatioZX() / scaled_material.GetYoungsModulusZ(), scaled_material.GetPoissonsRatioXZ() / scaled_material.GetYoungsModulusX(), delta=1E-8)
self.assertAlmostEqual (scaled_material.GetPoissonsRatioXY() / scaled_material.GetYoungsModulusX(), scaled_material.GetPoissonsRatioYX() / scaled_material.GetYoungsModulusY(), delta=1E-8)
self.assertEqual (scaled_material.GetShearModulusYZ(), GYZ)
self.assertEqual (scaled_material.GetShearModulusZY(), GYZ)
self.assertEqual (scaled_material.GetShearModulusZX(), GZX)
self.assertEqual (scaled_material.GetShearModulusXZ(), GZX)
self.assertEqual (scaled_material.GetShearModulusXY(), GXY)
self.assertEqual (scaled_material.GetShearModulusYX(), GXY)
def test_scaled_copy (self):
material = vtkbone.vtkboneLinearOrthotropicMaterial()
material.SetYoungsModulusX(1000)
material.SetYoungsModulusY(1100)
material.SetYoungsModulusZ(1200)
material.SetPoissonsRatioYZ(0.25)
material.SetPoissonsRatioZX(0.3)
material.SetPoissonsRatioXY(0.2)
# These values are not necessarily consistent
GYZ = 1000/(2*(1+0.25))
GZX = 1100/(2*(1+0.3))
GXY = 1200/(2*(1+0.2))
material.SetShearModulusYZ(GYZ)
material.SetShearModulusZX(GZX)
material.SetShearModulusXY(GXY)
scaled_material = material.ScaledCopy(0.5)
self.assertEqual (scaled_material.GetYoungsModulusX(), 0.5*1000)
self.assertEqual (scaled_material.GetYoungsModulusY(), 0.5*1100)
self.assertEqual (scaled_material.GetYoungsModulusZ(), 0.5*1200)
self.assertEqual (scaled_material.GetPoissonsRatioYZ(), 0.25)
self.assertEqual (scaled_material.GetPoissonsRatioZX(), 0.3)
self.assertEqual (scaled_material.GetPoissonsRatioXY(), 0.2)
self.assertAlmostEqual (scaled_material.GetPoissonsRatioYZ() / scaled_material.GetYoungsModulusY(), scaled_material.GetPoissonsRatioZY() / scaled_material.GetYoungsModulusZ(), delta=1E-8)
self.assertAlmostEqual (scaled_material.GetPoissonsRatioZX() / scaled_material.GetYoungsModulusZ(), scaled_material.GetPoissonsRatioXZ() / scaled_material.GetYoungsModulusX(), delta=1E-8)
self.assertAlmostEqual (scaled_material.GetPoissonsRatioXY() / scaled_material.GetYoungsModulusX(), scaled_material.GetPoissonsRatioYX() / scaled_material.GetYoungsModulusY(), delta=1E-8)
self.assertEqual (scaled_material.GetShearModulusYZ(), 0.5*GYZ)
self.assertEqual (scaled_material.GetShearModulusZY(), 0.5*GYZ)
self.assertEqual (scaled_material.GetShearModulusZX(), 0.5*GZX)
self.assertEqual (scaled_material.GetShearModulusXZ(), 0.5*GZX)
self.assertEqual (scaled_material.GetShearModulusXY(), 0.5*GXY)
self.assertEqual (scaled_material.GetShearModulusYX(), 0.5*GXY)
if __name__ == '__main__':
unittest.main()
| 51.084967 | 195 | 0.714304 | 7,562 | 0.967503 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.018552 |
b4c29b196e85ffde60d81fb206a0e333c3a97d09 | 3,376 | py | Python | hailo_model_zoo/datasets/create_kitti_depth_tfrecord.py | markgrobman/hailo_model_zoo | 2ea72272ed2debd7f6bee7c4a65bd41de57ec9cf | [
"MIT"
] | 2 | 2021-07-20T15:09:51.000Z | 2021-11-17T11:05:02.000Z | hailo_model_zoo/datasets/create_kitti_depth_tfrecord.py | markgrobman/hailo_model_zoo | 2ea72272ed2debd7f6bee7c4a65bd41de57ec9cf | [
"MIT"
] | null | null | null | hailo_model_zoo/datasets/create_kitti_depth_tfrecord.py | markgrobman/hailo_model_zoo | 2ea72272ed2debd7f6bee7c4a65bd41de57ec9cf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import argparse
import tensorflow as tf
import numpy as np
from PIL import Image
def _int64_feature(values):
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def _bytes_feature(values):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
def _create_tfrecord(labels, images, num_images):
"""Loop over all the images in filenames and create the TFRecord
"""
tfrecords_filename = os.path.join('./', 'kitti_val.tfrecord')
writer = tf.io.TFRecordWriter(tfrecords_filename)
with tf.Graph().as_default():
image_placeholder = tf.compat.v1.placeholder(dtype=tf.uint8, name='image_placeholder')
encoded_image = tf.image.encode_jpeg(image_placeholder)
i = 0
with tf.compat.v1.Session('') as sess:
for img_path, label in zip(images, labels):
img = np.array(Image.open(img_path), np.uint8)
image_height = img.shape[0]
image_width = img.shape[1]
img_jpeg = sess.run(encoded_image, feed_dict={image_placeholder: img})
depth = labels[i]
print("converting image number {}: {}".format(i, img_path))
example = tf.train.Example(features=tf.train.Features(feature={
'height': _int64_feature(image_height),
'width': _int64_feature(image_width),
'image_name': _bytes_feature(str.encode(os.path.basename(img_path))),
'depth': _bytes_feature(np.array(depth, np.float32).tobytes()),
'image_jpeg': _bytes_feature(img_jpeg)}))
writer.write(example.SerializeToString())
i += 1
if i > num_images:
break
writer.close()
return i
def get_label(gt_file):
gt_depths = np.load(gt_file, fix_imports=True, encoding='latin1',
allow_pickle=True)["data"]
return gt_depths
def get_image_files(data_dir, split_file):
files = []
with open(split_file, 'r') as f:
for line in f:
fdir = line[:line.find(' ')]
cam = '2' if line[-2] == 'l' else '3'
fdir = os.path.join(fdir, 'image_0' + cam, 'data')
img = line[(line.find(' ') + 1):][:-3] + '.png'
files.append(os.path.join(data_dir, fdir, img))
return files
def run(data_dir, split_file, gt_file, num_images):
assert data_dir != '', 'no data directory'
assert split_file != '', 'no split file'
assert gt_file != '', 'no gt file'
images = get_image_files(data_dir, split_file)
labels = get_label(gt_file)
images_num = _create_tfrecord(labels, images, num_images)
print('Done converting {} images'.format(images_num))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', help="data directory", type=str, default='')
parser.add_argument('--split', help="split file", type=str, default='')
parser.add_argument('--gt', help="gt npz file", type=str, default='')
parser.add_argument('--num-images', help="limit the number of images", type=int, default=127)
args = parser.parse_args()
run(args.data, args.split, args.gt, args.num_images)
| 37.932584 | 97 | 0.616706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 470 | 0.139218 |
b4c33445657935893d39d91a0309206fd2cd458e | 391 | py | Python | tellurium/notebooks/__init__.py | kirichoi/tellurium | 77cf6e794600587741ebe209644a78051e0db1d5 | [
"Apache-2.0"
] | 73 | 2016-06-13T12:44:28.000Z | 2021-12-31T14:44:39.000Z | tellurium/notebooks/__init__.py | kirichoi/tellurium | 77cf6e794600587741ebe209644a78051e0db1d5 | [
"Apache-2.0"
] | 461 | 2015-03-26T00:05:16.000Z | 2022-03-16T17:24:35.000Z | tellurium/notebooks/__init__.py | kirichoi/tellurium | 77cf6e794600587741ebe209644a78051e0db1d5 | [
"Apache-2.0"
] | 30 | 2016-01-18T16:50:54.000Z | 2021-07-06T09:29:53.000Z | """
notebook imports
"""
from __future__ import absolute_import
import warnings
import ipywidgets
import IPython
from .notebooktools import *
from .ontologysearch import OntologySearch
from .parameterslider import ParameterSlider
from .speciessearch import SearchBySpeciesForm
# except ImportError:
# warnings.warn("Notebook tools are not imported, due to missing dependencies.")
| 17.772727 | 83 | 0.808184 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.327366 |
b4c33dadc7de5b116b0004a7e5d4f5eac3a9ab0a | 2,297 | py | Python | fashion/warehouse/fashion.core/xform/generateJinja2.py | braddillman/fashion | 2588f3712a72e81f3cb7733e40b6c3751aa5ece2 | [
"Apache-2.0"
] | 1 | 2021-05-23T09:01:39.000Z | 2021-05-23T09:01:39.000Z | fashion/warehouse/fashion.core/xform/generateJinja2.py | braddillman/fashion | 2588f3712a72e81f3cb7733e40b6c3751aa5ece2 | [
"Apache-2.0"
] | null | null | null | fashion/warehouse/fashion.core/xform/generateJinja2.py | braddillman/fashion | 2588f3712a72e81f3cb7733e40b6c3751aa5ece2 | [
"Apache-2.0"
] | null | null | null | '''
Created on 2018-12-21
Copyright (c) 2018 Bradford Dillman
Generate code from a model and a jinja2 template.
'''
import logging
from pathlib import Path
from jinja2 import FileSystemLoader, Environment
from jinja2.exceptions import TemplateNotFound
from munch import munchify
from fashion.mirror import Mirror
# Module level code is executed when this file is loaded.
# cwd is where segment file was loaded.
def init(config, codeRegistry, verbose=False, tags=None):
'''cwd is where segment file was loaded.'''
codeRegistry.addXformObject(Generate(config))
class Generate(object):
'''Generate output by merging a model into a template to produce a file.'''
def __init__(self, config):
'''Constructor.'''
self.version = "1.0.0"
self.templatePath = []
self.name = config.moduleName
self.tags = config.tags
self.inputKinds = ["fashion.core.generate.jinja2.spec",
"fashion.core.mirror"]
self.outputKinds = [ 'fashion.core.output.file' ]
def execute(self, codeRegistry, verbose=False, tags=None):
'''cwd is project root directory.'''
# set up mirrored directories
mdb = codeRegistry.getService('fashion.prime.modelAccess')
mirCfg = munchify(mdb.getSingleton("fashion.core.mirror"))
mirror = Mirror(Path(mirCfg.projectPath), Path(mirCfg.mirrorPath), force=mirCfg.force)
genSpecs = mdb.getByKind(self.inputKinds[0])
for genSpec in genSpecs:
gs = munchify(genSpec)
if mirror.isChanged(Path(gs.targetFile)):
logging.warning("Skipping {0}, file has changed.".format(gs.targetFile))
else:
try:
env = Environment(loader=FileSystemLoader(gs.templatePath))
template = env.get_template(gs.template)
result = template.render(gs.model)
targetPath = Path(gs.targetFile)
with targetPath.open(mode="w") as tf:
tf.write(result)
mirror.copyToMirror(targetPath)
mdb.outputFile(targetPath)
except TemplateNotFound:
logging.error("TemplateNotFound: {0}".format(gs.template))
| 33.779412 | 94 | 0.62734 | 1,715 | 0.746626 | 0 | 0 | 0 | 0 | 0 | 0 | 611 | 0.265999 |
b4c387f472a346519c5c008b90b4db7667d0b481 | 622 | py | Python | algorithm/821.py | ChuangbinWang/leetcode | 79c4ea82dc2f4105ed40ec2f4d3d9ee797c81d3b | [
"MIT"
] | null | null | null | algorithm/821.py | ChuangbinWang/leetcode | 79c4ea82dc2f4105ed40ec2f4d3d9ee797c81d3b | [
"MIT"
] | null | null | null | algorithm/821.py | ChuangbinWang/leetcode | 79c4ea82dc2f4105ed40ec2f4d3d9ee797c81d3b | [
"MIT"
] | null | null | null | class Solution(object):
def shortestToChar(self, S, C):
"""
:type S: str
:type C: str
:rtype: List[int]
"""
pl = []
ret = [0] * len(S)
for i in range(0, len(S)):
if S[i] == C:
pl.append(i)
for i in range(0, len(S)):
minx = 10000000
for l in range(0, len(pl)):
minx = min(minx, abs(pl[l] - i))
ret[i] = minx
return ret
if __name__ == "__main__":
S = "loveleetcode"
C = 'e'
ret = Solution().shortestToChar(S, C)
print(ret) | 27.043478 | 49 | 0.414791 | 498 | 0.800643 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.18328 |
b4c44cb204c3826e5c1e00d10a308dfe64d24ddf | 386 | py | Python | _solved/_solutions/visualization_02_seaborn14.py | jorisvandenbossche/ICES-python-data | 63864947657f37cb26cb4e2dcd67ff106dffe9cd | [
"BSD-3-Clause"
] | 1 | 2022-03-02T17:41:46.000Z | 2022-03-02T17:41:46.000Z | notebooks/_solutions/visualization_02_seaborn14.py | jorisvandenbossche/ICES-python-data | 63864947657f37cb26cb4e2dcd67ff106dffe9cd | [
"BSD-3-Clause"
] | 1 | 2022-03-14T15:15:53.000Z | 2022-03-14T15:15:53.000Z | notebooks/_solutions/visualization_02_seaborn14.py | jorisvandenbossche/ICES-python-data | 63864947657f37cb26cb4e2dcd67ff106dffe9cd | [
"BSD-3-Clause"
] | null | null | null | # Optional solution with tidy data representation (providing x and y)
monthly_victim_counts_melt = monthly_victim_counts.reset_index().melt(
id_vars="datetime", var_name="victim_type", value_name="count"
)
sns.relplot(
data=monthly_victim_counts_melt,
x="datetime",
y="count",
hue="victim_type",
kind="line",
palette="colorblind",
height=3, aspect=4,
) | 27.571429 | 70 | 0.712435 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 147 | 0.380829 |
b4c4c46e92d0cc32c81af417d80cbffbc1577999 | 488 | py | Python | Grokking-Algorithms/Selection_Sort.py | AzuLiu/Algorithms-by-Python | 4c907725e3c55222642990827ca0aba302ab2a8c | [
"MIT"
] | 1 | 2018-03-17T19:51:46.000Z | 2018-03-17T19:51:46.000Z | Grokking-Algorithms/Selection_Sort.py | AzuLiu/Algorithms-by-Python | 4c907725e3c55222642990827ca0aba302ab2a8c | [
"MIT"
] | null | null | null | Grokking-Algorithms/Selection_Sort.py | AzuLiu/Algorithms-by-Python | 4c907725e3c55222642990827ca0aba302ab2a8c | [
"MIT"
] | null | null | null | def findSmallest(arr):
smallest = arr[0]
smallest_index = 0
for i in range(1, len(arr)):
if arr[i] < smallest:
smallest = arr[i]
smallest_index = i
return smallest_index
def selection_sort(arr):
newarr = []
for i in range(len(arr)):
smallest_index = findSmallest(arr)
newarr.append(arr.pop(smallest_index))
return newarr
test_arr = [5, 3, 6, 1, 0, 0, 2, 10]
print(selection_sort(test_arr))
| 24.4 | 47 | 0.581967 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b4c61c8f7d576e30bc60c8fc8b5200df83eec589 | 73 | py | Python | library/source1/mdl/structs/__init__.py | anderlli0053/SourceIO | 3c0c4839939ce698439987ac52154f89ee2f5341 | [
"MIT"
] | 199 | 2019-04-02T02:30:58.000Z | 2022-03-30T21:29:49.000Z | library/source1/mdl/structs/__init__.py | anderlli0053/SourceIO | 3c0c4839939ce698439987ac52154f89ee2f5341 | [
"MIT"
] | 113 | 2019-03-03T19:36:25.000Z | 2022-03-31T19:44:05.000Z | library/source1/mdl/structs/__init__.py | anderlli0053/SourceIO | 3c0c4839939ce698439987ac52154f89ee2f5341 | [
"MIT"
] | 38 | 2019-05-15T16:49:30.000Z | 2022-03-22T03:40:43.000Z | from ....utils.byte_io_mdl import ByteIO
from ....shared.base import Base | 36.5 | 40 | 0.767123 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b4c6b2e3ba90e086aba9fcd3f0c69b5fe6016f3e | 71 | py | Python | web2py-appliances-master/VideoLibrary/languages/it.py | wantsomechocolate/WantsomeBeanstalk | 8c8a0a80490d04ea52661a3114fd3db8de65a01e | [
"BSD-3-Clause"
] | 4 | 2015-05-28T04:37:28.000Z | 2017-12-03T11:11:27.000Z | languages/it.py | sungchi/feed9 | 25aafce5a47fef6133ac7fef42401cbe62003d81 | [
"MIT"
] | null | null | null | languages/it.py | sungchi/feed9 | 25aafce5a47fef6133ac7fef42401cbe62003d81 | [
"MIT"
] | null | null | null | {
'Hello World':'Salve Mondo',
'Welcome to web2py':'Ciao da wek2py',
}
| 14.2 | 37 | 0.661972 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.859155 |
b4c7bce749ccc33db70d2dbdefc0de7260a71e74 | 190 | py | Python | rtmBot/izyrtm_prop.py | izyrtm/izyrtm-server | c1bf0a6c3734decb7cff8266706060791479d46b | [
"Apache-2.0"
] | 1 | 2019-10-05T14:41:02.000Z | 2019-10-05T14:41:02.000Z | rtmBot/izyrtm_prop.py | izyrtm/izyrtm-server | c1bf0a6c3734decb7cff8266706060791479d46b | [
"Apache-2.0"
] | null | null | null | rtmBot/izyrtm_prop.py | izyrtm/izyrtm-server | c1bf0a6c3734decb7cff8266706060791479d46b | [
"Apache-2.0"
] | null | null | null | domain='https://monbot.hopto.org'
apm_id='admin'
apm_pw='New1234!'
apm_url='https://monbot.hopto.org:3000'
db_host='monbot.hopto.org'
db_user='izyrtm'
db_pw='new1234!'
db_datadbase='monbot' | 21.111111 | 39 | 0.752632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.621053 |
b4c80401758b4e5dc91c46566ed5b6f3d9d37ae4 | 721 | py | Python | get_pos_data.py | chinnadhurai/lm-context-analysis | 826a4eb96738777c3a5ce4dc4300f5b5fc29ea8a | [
"Apache-2.0"
] | 42 | 2018-05-14T15:40:45.000Z | 2022-01-21T13:31:42.000Z | get_pos_data.py | chinnadhurai/lm-context-analysis | 826a4eb96738777c3a5ce4dc4300f5b5fc29ea8a | [
"Apache-2.0"
] | null | null | null | get_pos_data.py | chinnadhurai/lm-context-analysis | 826a4eb96738777c3a5ce4dc4300f5b5fc29ea8a | [
"Apache-2.0"
] | 11 | 2018-05-25T07:03:53.000Z | 2020-03-15T15:52:01.000Z | import sys
if len(sys.argv) < 2:
print('Need the dataset name!')
exit(0)
for split in ['train', 'valid', 'test']:
with open('data/'+sys.argv[1]+'/'+split+'.txt', 'r') as f1, open(
'data/'+sys.argv[1]+'_pos_only/'+split+'.txt', 'r') as f2, open(
'data/'+sys.argv[1]+'_pos/'+split+'.txt', 'w') as fout:
for i, (line, pline) in enumerate(zip(f1,f2)):
if line.strip().split(' ')[0] == '': # empty lines in wiki
fout.write(line)
continue
line = line.strip().split(' ')
pline = pline.strip().split(' ')
line = [w+'_'+p for w, p in zip(line, pline)]
fout.write(' '.join(line)+' \n')
| 32.772727 | 78 | 0.481276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.217753 |
b4c97a273a81770eaaca18c88e6e3fb3e5d16e27 | 1,307 | py | Python | cupy/linalg/norm.py | umitanuki/chainer | 225c56b233e684ff4855451d2af4c2fb66915f21 | [
"MIT"
] | null | null | null | cupy/linalg/norm.py | umitanuki/chainer | 225c56b233e684ff4855451d2af4c2fb66915f21 | [
"MIT"
] | null | null | null | cupy/linalg/norm.py | umitanuki/chainer | 225c56b233e684ff4855451d2af4c2fb66915f21 | [
"MIT"
] | 1 | 2018-11-18T00:36:51.000Z | 2018-11-18T00:36:51.000Z | def norm(x, ord=None, axis=None):
# TODO(beam2d): Implement it
raise NotImplementedError
def cond(x, p=None):
# TODO(beam2d): Implement it
raise NotImplementedError
def det(a):
# TODO(beam2d): Implement it
raise NotImplementedError
def matrix_rank(M, tol=None):
# TODO(beam2d): Implement it
raise NotImplementedError
def slogdet(a):
# TODO(beam2d): Implement it
raise NotImplementedError
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""Returns the sum along the diagonals of an array.
It computes the sum along the diagonals at ``axis1`` and ``axis2``.
Args:
a (cupy.ndarray): Array to take trace.
offset (int): Index of diagonals. Zero indicates the main diagonal, a
positive value an upper diagonal, and a negative value a lower
diagonal.
axis1 (int): The first axis along which the trace is taken.
axis2 (int): The second axis along which the trace is taken.
dtype: Data type specifier of the output.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: The trace of ``a`` along axes ``(axis1, axis2)``.
.. seealso:: :func:`numpy.trace`
"""
d = a.diagonal(offset, axis1, axis2)
return d.sum(-1, dtype, out, False)
| 26.673469 | 77 | 0.646519 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 859 | 0.65723 |
b4cc3ae61af4ef20bae2801cc4f46d491ccbd240 | 2,771 | py | Python | examples/optical_elements/examples_refractive_interface.py | srio/minishadow | 019bf46106f09f89297e01c273784e8059e02014 | [
"MIT"
] | 1 | 2019-10-30T10:04:43.000Z | 2019-10-30T10:04:43.000Z | examples/optical_elements/examples_refractive_interface.py | srio/minishadow | 019bf46106f09f89297e01c273784e8059e02014 | [
"MIT"
] | 9 | 2020-09-02T16:13:01.000Z | 2020-09-30T15:09:44.000Z | examples/optical_elements/examples_refractive_interface.py | srio/shadow4 | 019bf46106f09f89297e01c273784e8059e02014 | [
"MIT"
] | null | null | null |
import numpy
from shadow4.sources.source_geometrical.source_geometrical import SourceGeometrical
from shadow4.beamline.optical_elements.refractors.s4_conic_interface import S4ConicInterface, S4ConicInterfaceElement
from shadow4.tools.graphics import plotxy
from shadow4.syned.element_coordinates import ElementCoordinates
def get_sigmas_radiation(photon_energy,undulator_length):
import scipy.constants as codata
lambdan = 1e-10 * codata.h*codata.c/codata.e*1e10 / photon_energy # in m
print("wavelength in m",lambdan)
return 1e6*2.740/4/numpy.pi*numpy.sqrt(lambdan*undulator_length),1e6*0.69*numpy.sqrt(lambdan/undulator_length)
def refractive_interface_with_collimated_beam(do_plot=True):
#
# collimated source
#
src = SourceGeometrical()
src.set_energy_distribution_singleline(value=5000, unit='A')
src.set_spatial_type_rectangle(width=1e-3, height=1e-3)
src.set_angular_distribution_uniform(0,0,0,0)
beam = src.get_beam()
print(beam.info())
SX, SZ = (1e6*beam.get_standard_deviation(1),1e6*beam.get_standard_deviation(3))
#
# lens definition
#
interface1 = S4ConicInterfaceElement(
optical_element=S4ConicInterface(
name="Conic Refractive Interface",
boundary_shape=None,
material_object="vacuum",
material_image="glass",
f_r_ind = 0,
r_ind_obj = 1.0,
r_ind_ima = 1.5,
conic_coefficients=[1.0, 1.0, 1.0, 0.0, -0.0, -0.0, 0.0, 0.0, 3350.0e-3, 0.0],
),
coordinates=ElementCoordinates(p=0.0, q=5000.0e-3,
angle_radial=0.0, angle_azimuthal=0.0, angle_radial_out=numpy.pi))
print(interface1.info())
print(interface1.get_optical_element().get_surface_shape().get_conic_coefficients())
#
# trace
#
beam2, mirr2 = interface1.trace_beam(beam)
#
if do_plot:
plotxy(beam2, 1, 3, nbins=100, title="FOCAL PLANE")
plotxy(mirr2, 1, 3, nbins=100, title="LENS HEIGHT")
# plotxy(mirr2, 4, 5, nbins=100, title="FOOT DIV")
FX, FZ = (1e6*beam2.get_standard_deviation(1),1e6*beam2.get_standard_deviation(3))
print("Source dimensions: %f %f um"%(SX,SZ))
print("Focal dimensions: %f %f um"%(FX,FZ))
print("Demagnification: %g %g"%(SX/FX,SX/FZ))
if __name__ == "__main__":
from srxraylib.plot.gol import set_qt
set_qt()
refractive_interface_with_collimated_beam(do_plot=True)
| 32.6 | 117 | 0.611693 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 286 | 0.103212 |
b4cc52d37e832307c2b993281014ae5693bca403 | 281 | py | Python | recipes/transabyss/setup.py | ieguinoa/bioconda-recipes | da8a66ec30d36273a8cdf8783a26604f23c45534 | [
"MIT"
] | null | null | null | recipes/transabyss/setup.py | ieguinoa/bioconda-recipes | da8a66ec30d36273a8cdf8783a26604f23c45534 | [
"MIT"
] | null | null | null | recipes/transabyss/setup.py | ieguinoa/bioconda-recipes | da8a66ec30d36273a8cdf8783a26604f23c45534 | [
"MIT"
] | null | null | null | import subprocess
from setuptools import setup, find_packages, Extension
setup(
name='transabyss',
version='1.54',
author='transabyss',
license='Free Software License',
packages=['transabyss'],
scripts=['scripts/transabyss', 'scripts/transabyss-merge'],
)
| 23.416667 | 63 | 0.704626 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.395018 |
b4ccff062a09f477b0fda915a3727e153ff816cf | 252 | py | Python | ffs/__init__.py | clbarnes/ffs | bc67692178232d3a77c05cefd1139461cf17882a | [
"MIT"
] | null | null | null | ffs/__init__.py | clbarnes/ffs | bc67692178232d3a77c05cefd1139461cf17882a | [
"MIT"
] | null | null | null | ffs/__init__.py | clbarnes/ffs | bc67692178232d3a77c05cefd1139461cf17882a | [
"MIT"
] | null | null | null | from .classes import Entry, EntryJso
from .spec_version import SPEC_VERSION
from .version import version as __version__ # noqa: F401
from .version import version_tuple as __version_info__ # noqa: F401
__all__ = ["Entry", "EntryJso", "SPEC_VERSION"]
| 36 | 68 | 0.781746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.218254 |
b4cd953c3638b37bbb67c63ed55c3b95b4493e2d | 1,816 | py | Python | src/main/gui/core/functions.py | UntactOrder/UntactOrder.PosServer | c05094fe70fadeeb1ba1a8605606a6002730a699 | [
"MIT"
] | null | null | null | src/main/gui/core/functions.py | UntactOrder/UntactOrder.PosServer | c05094fe70fadeeb1ba1a8605606a6002730a699 | [
"MIT"
] | null | null | null | src/main/gui/core/functions.py | UntactOrder/UntactOrder.PosServer | c05094fe70fadeeb1ba1a8605606a6002730a699 | [
"MIT"
] | null | null | null | # ///////////////////////////////////////////////////////////////
#
# BY: WANDERSON M.PIMENTA
# PROJECT MADE WITH: Qt Designer and PySide6
# V: 1.0.0
#
# This project can be used freely for all uses, as long as they maintain the
# respective credits only in the Python scripts, any information in the visual
# interface (GUI) can be modified without any implication.
#
# There are limitations on Qt licenses if you want to use your products
# commercially, I recommend reading them on the official website:
# https://doc.qt.io/qtforpython/licenses.html
#
# ///////////////////////////////////////////////////////////////
# IMPORT PACKAGES AND MODULES
# ///////////////////////////////////////////////////////////////
import os
# APP FUNCTIONS
# ///////////////////////////////////////////////////////////////
# ---------------------------------------------------------------
# SET SVG ICON
# ///////////////////////////////////////////////////////////////
def set_svg_icon(icon_name):
app_path = os.path.abspath(os.getcwd())
folder = "res/gui/images/svg_icons/"
path = os.path.join(app_path, folder)
icon = os.path.normpath(os.path.join(path, icon_name))
return icon
# SET SVG IMAGE
# ///////////////////////////////////////////////////////////////
def set_svg_image(icon_name):
app_path = os.path.abspath(os.getcwd())
folder = "res/gui/images/svg_images/"
path = os.path.join(app_path, folder)
icon = os.path.normpath(os.path.join(path, icon_name))
return icon
# SET IMAGE
# ///////////////////////////////////////////////////////////////
def set_image(image_name):
app_path = os.path.abspath(os.getcwd())
folder = "res/gui/images/images/"
path = os.path.join(app_path, folder)
image = os.path.normpath(os.path.join(path, image_name))
return image
| 34.264151 | 78 | 0.507709 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,159 | 0.638216 |
b4cdd7ecf5ad286a184af28a0d07e8a49f6ad38d | 9,939 | py | Python | DeepJH/agents/actor_critic_agents/explorer.py | microsoft/Dr-Jekyll-and-Mr-Hyde-The-Strange-Case-of-Off-Policy-Policy-Updates | e085b10156787838a342037e6042af00f5262d5a | [
"MIT"
] | 2 | 2021-12-22T18:18:30.000Z | 2022-02-25T17:57:33.000Z | DeepJH/agents/actor_critic_agents/explorer.py | microsoft/Dr-Jekyll-and-Mr-Hyde-The-Strange-Case-of-Off-Policy-Policy-Updates | e085b10156787838a342037e6042af00f5262d5a | [
"MIT"
] | null | null | null | DeepJH/agents/actor_critic_agents/explorer.py | microsoft/Dr-Jekyll-and-Mr-Hyde-The-Strange-Case-of-Off-Policy-Policy-Updates | e085b10156787838a342037e6042af00f5262d5a | [
"MIT"
] | null | null | null | from multiprocessing.dummy import Value
from agents.Base_Agent import Base_Agent
import copy
import numpy as np
import torch
import torch.nn.functional as F
from torch.optim import Adam
class RunningMeanStd(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, device, epsilon=1e-4, shape=()):
self.device = device
self.mean = torch.zeros(shape).to(self.device)
self.var = torch.ones(shape).to(self.device)
self.count = epsilon
def update(self, x):
batch_mean = torch.mean(x, axis=0)
batch_var = torch.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * (self.count)
m_b = batch_var * (batch_count)
M2 = m_a + m_b + torch.square(delta) * self.count * \
batch_count / (self.count + batch_count)
new_var = M2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count
class Explorer(Base_Agent):
"""Random Network Distillation or count based. Not really an agent"""
agent_name = "Explorer"
def __init__(self, config, state_size=None):
Base_Agent.__init__(self, config, log_info=False, state_size=state_size)
self.hyperparameters = config.hyperparameters
self.count_based = self.hyperparameters.get('count_based', False)
self.scale = self.hyperparameters.get('scale', 1)
self.normalize = self.hyperparameters.get('normalize_rnd', False)
self.batch_size = self.hyperparameters.get('batch_size', 1)
self.rnd_actions = self.hyperparameters.get('rnd_actions', False)
self.state_actions = torch.zeros((225, self.action_size)).to(self.device)
if self.count_based:
self.visited_states = torch.ones((225,)).to(self.device)
self.visited_state_actions = torch.ones((225, self.action_size)).to(self.device)
else:
if self.rnd_actions:
self.predictors, self.targets, self.optimizers = [], [], []
for i in range(self.action_size):
predictor, target, optimizer = self._network_factory()
if i > 0:
self.copy_model_over(self.predictors[0], predictor)
self.predictors.append(predictor)
self.targets.append(target)
self.optimizers.append(optimizer)
else:
self.predictor, self.target, self.optimizer = self._network_factory()
self.steps = 0
self.reward_rms = RunningMeanStd(self.device)
self.losses = []
self.states_so_far_np = []
self._init_states_so_far()
def _init_states_so_far(self, action=None):
if self.rnd_actions:
if action:
self.states_so_far[action] = torch.zeros([0, 1]).to(self.device)
else:
self.states_so_far = [torch.zeros([0, 1]).to(self.device) for _ in range(self.action_size)]
else:
self.states_so_far = torch.zeros([0, 1]).to(self.device)
def _network_factory(self):
predictor = self.create_NN(
input_dim=self.state_size, output_dim=self.hyperparameters['features_size'],
override_seed=self.config.seed + 1, hyperparameters=self.hyperparameters)
target_hyperparameters = copy.deepcopy(self.hyperparameters)
target_hyperparameters["linear_hidden_units"] = target_hyperparameters["target_linear_hidden_units"]
target = self.create_NN(
input_dim=self.state_size, output_dim=self.hyperparameters['features_size'])
optimizer = Adam(predictor.parameters(),
lr=self.hyperparameters["learning_rate"], eps=1e-4)
return predictor, target, optimizer
def log_state_action(self, state, action):
self.state_actions[state[0], action] += 1
def compute_intrinsic_reward_and_learn(self, states, learn=True, actions=None, int_learn_batch=None):
self.steps += 1
if states.ndim == 4:
# Only get last observation for RND
states = torch.unsqueeze(states[:, -1, :, :], 1)
# Get rewards
rewards = self.compute_intrinsic_reward(states, learn=learn, actions=actions, int_learn_batch=int_learn_batch)
if self.normalize:
mean, std, count = torch.mean(rewards), torch.std(rewards), len(rewards)
self.reward_rms.update_from_moments(mean, std ** 2, count)
rewards /= torch.sqrt(self.reward_rms.var)
return rewards.reshape((-1, 1))
def compute_intrinsic_reward(self, states, learn=True, actions=None, int_learn_batch=None):
if self.count_based:
return self.compute_counts(states, learn=learn, actions=actions, int_learn_batch=int_learn_batch)
else:
return self.compute_preds(states, learn=learn, actions=actions, int_learn_batch=int_learn_batch)
def compute_counts(self, states, learn=True, actions=None, int_learn_batch=None):
if actions is not None:
if not self.rnd_actions:
raise ValueError
rewards = 1 / torch.sqrt(self.visited_state_actions[states.long().squeeze(1), actions.long().squeeze(1)]).unsqueeze(1)
if learn:
indices = torch.where(int_learn_batch == 1)[0]
states_to_learn = states[indices]
actions_to_learn = actions[indices]
self.learn_counts(states_to_learn, actions_to_learn)
else:
rewards = 1 / torch.sqrt(self.visited_states[states.long()])
if learn:
states_to_learn = states[torch.where(int_learn_batch == 1)[0]]
self.learn_counts(states_to_learn)
return rewards
def get_counts_all_actions(self, state):
return self.state_actions[state[0].long()]
def compute_preds(self, states, learn=True, actions=None, int_learn_batch=None):
if actions is not None:
if not self.rnd_actions:
raise ValueError
intrinsic_reward = torch.zeros((len(states), 1)).to(self.device)
for i in range(self.action_size):
indices = torch.where(actions == i)[0]
states_per_action = states[indices]
target_next_feature = self.targets[i](states_per_action)
predict_next_feature = self.predictors[i](states_per_action)
intrinsic_reward[indices] = self._compute_intrinsic_reward(target_next_feature, predict_next_feature).unsqueeze(1)
if learn:
self.states_so_far[i] = torch.cat(
(self.states_so_far[i], states_per_action))
self.learn_pred(index_to_train=i)
else:
target_next_feature = self.target(states)
predict_next_feature = self.predictor(states)
intrinsic_reward = self._compute_intrinsic_reward(target_next_feature, predict_next_feature)
if learn and actions is None:
self.learn_pred(predict_next_feature=predict_next_feature, target_next_feature=target_next_feature)
return self.scale * intrinsic_reward
@staticmethod
def _compute_intrinsic_reward(target_feature, predict_feature):
return ((target_feature - predict_feature).pow(2).sum(1) / 2).detach()
def learn(self, states):
""" Minimize the mse loss between predictions and target"""
if self.count_based:
self.learn_counts(states)
else:
self.learn_pred(states)
def learn_counts(self, states, actions=None):
""" Update the visitation counts"""
for i in range(len(states)):
if actions is not None:
self.visited_state_actions[int(states[i].item()), int(actions[i].item())] += 1
else:
self.visited_states[int(states[i].item())] += 1
def learn_pred(self, states=None, predict_next_feature=None, target_next_feature=None, index_to_train=None):
if predict_next_feature is None and states is None:
if index_to_train is None:
raise ValueError
samples = self.states_so_far[index_to_train]
target = self.targets[index_to_train]
predictor = self.predictors[index_to_train]
self._init_states_so_far(action=index_to_train)
if len(samples) >= self.batch_size or index_to_train is not None:
target_next_feature = target(samples)
predict_next_feature = predictor(samples)
else:
return
elif states is not None:
target_next_feature = self.target(states)
predict_next_feature = self.predictor(states)
prediction_loss = F.mse_loss(predict_next_feature, target_next_feature.detach())
self.losses.append(prediction_loss.item())
self.update_rnd_parameters(prediction_loss, index_to_train=index_to_train)
def update_rnd_parameters(self, prediction_loss, index_to_train=None):
"""Updates the parameters for the rnd"""
if index_to_train is not None:
self.take_optimisation_step(self.optimizers[index_to_train], self.predictors[index_to_train], prediction_loss,
self.hyperparameters["gradient_clipping_norm"])
else:
self.take_optimisation_step(self.optimizer, self.predictor, prediction_loss,
self.hyperparameters["gradient_clipping_norm"])
| 46.227907 | 130 | 0.642922 | 9,745 | 0.980481 | 0 | 0 | 160 | 0.016098 | 0 | 0 | 549 | 0.055237 |
b4ce4ceb63dbb6f1fc462df7aa6a104fb75ec08a | 6,079 | py | Python | python/nlusvc/spacy/svc/perform_pos_parse.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
] | null | null | null | python/nlusvc/spacy/svc/perform_pos_parse.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
] | null | null | null | python/nlusvc/spacy/svc/perform_pos_parse.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import pandas as pd
from pandas import DataFrame
from spacy.lang.en import English
from tabulate import tabulate
from base import BaseObject
class PerformPosParse(BaseObject):
""" Perform POS (Part-of-Speech) Parse with spaCy
Sample Input:
Amoebozoa is a major taxonomic group containing about 2,400 described species of
amoeboid protists, often possessing blunt, fingerlike, lobose pseudopods and
tubular mitochondrial cristae.
Sample Output:
+----+----------+-----------+----------+---------------+-------+---------+-------+---------------+
| | Dep | IsAlpha | IsStop | Lemma | POS | Shape | Tag | Text |
|----+----------+-----------+----------+---------------+-------+---------+-------+---------------|
| 0 | nsubj | True | False | amoebozoa | PROPN | Xxxxx | NNP | Amoebozoa |
| 1 | ROOT | True | True | be | VERB | xx | VBZ | is |
| 2 | det | True | True | a | DET | x | DT | a |
| 3 | amod | True | False | major | ADJ | xxxx | JJ | major |
| 4 | amod | True | False | taxonomic | ADJ | xxxx | JJ | taxonomic |
| 5 | attr | True | False | group | NOUN | xxxx | NN | group |
| 6 | acl | True | False | contain | VERB | xxxx | VBG | containing |
| 7 | quantmod | True | True | about | ADV | xxxx | RB | about |
| 8 | nummod | False | False | 2,400 | NUM | d,ddd | CD | 2,400 |
| 9 | amod | True | False | describe | VERB | xxxx | VBN | described |
| 10 | dobj | True | False | specie | NOUN | xxxx | NNS | species |
| 11 | prep | True | True | of | ADP | xx | IN | of |
| 12 | compound | True | False | amoeboid | NOUN | xxxx | NN | amoeboid |
| 13 | pobj | True | False | protist | NOUN | xxxx | NNS | protists |
| 14 | punct | False | False | , | PUNCT | , | , | , |
| 15 | advmod | True | True | often | ADV | xxxx | RB | often |
| 16 | acl | True | False | possess | VERB | xxxx | VBG | possessing |
| 17 | dobj | True | False | blunt | ADJ | xxxx | JJ | blunt |
| 18 | punct | False | False | , | PUNCT | , | , | , |
| 19 | conj | True | False | fingerlike | NOUN | xxxx | NN | fingerlike |
| 20 | punct | False | False | , | PUNCT | , | , | , |
| 21 | amod | True | False | lobose | VERB | xxxx | VB | lobose |
| 22 | conj | True | False | pseudopod | NOUN | xxxx | NNS | pseudopods |
| 23 | cc | True | True | and | CCONJ | xxx | CC | and |
| 24 | amod | True | False | tubular | ADJ | xxxx | JJ | tubular |
| 25 | amod | True | False | mitochondrial | NOUN | xxxx | NN | mitochondrial |
| 26 | conj | True | False | cristae | VERB | xxxx | VBN | cristae |
| 27 | punct | False | False | . | PUNCT | . | . | . |
+----+----------+-----------+----------+---------------+-------+---------+-------+---------------+
Reference:
https://spacy.io/usage/linguistic-features
"""
def __init__(self,
nlp: English,
input_text: str,
is_debug: bool = False):
"""
Created:
6-Feb-2020
craig.trim@ibm.com
* in pursuit of
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1829
"""
BaseObject.__init__(self, __name__)
self._nlp = nlp
self._is_debug = is_debug
self._input_text = input_text
def process(self,
log_sample_size: int = 500) -> DataFrame:
"""
Purpose:
Perform spaCY pos-tagging on input text
:param log_sample_size:
:return:
a dataframe with the following columns:
Text: The original word text.
Lemma: The base form of the word.
POS: The simple part-of-speech tag.
Tag: The detailed part-of-speech tag.
Dep: Syntactic dependency, i.e. the relation between tokens.
Shape: The word shape – capitalization, punctuation, digits.
IsAlpha: Is the token an alpha character?
IsStop: Is the token part of a stop list, i.e. the most common words of the language?
"""
doc = self._nlp(self._input_text)
results = []
for token in doc:
results.append({
"Text": token.text,
"Lemma": token.lemma_,
"POS": token.pos_,
"Tag": token.tag_,
"Dep": token.dep_,
"Shape": token.shape_,
"IsAlpha": token.is_alpha,
"IsStop": token.is_stop})
df = pd.DataFrame(results)
if self._is_debug:
self.logger.debug('\n'.join([
"Part-of-Speech DataFrame Generated",
f"\tSize: {len(df)}",
tabulate(df.head(log_sample_size),
tablefmt='psql',
headers='keys')]))
return df
| 51.516949 | 106 | 0.406317 | 5,888 | 0.968262 | 0 | 0 | 0 | 0 | 0 | 0 | 4,870 | 0.800855 |
b4cefef49f2369f3f8e23dfe6b70e513930b24a7 | 2,229 | py | Python | causal_world/intervention_actors/visual_actor.py | michaelfeil/CausalWorld | ff866159ef0ee9c407893ae204e93eb98dd68be2 | [
"MIT"
] | 2 | 2021-09-22T08:20:12.000Z | 2021-11-16T14:20:45.000Z | causal_world/intervention_actors/visual_actor.py | michaelfeil/CausalWorld | ff866159ef0ee9c407893ae204e93eb98dd68be2 | [
"MIT"
] | null | null | null | causal_world/intervention_actors/visual_actor.py | michaelfeil/CausalWorld | ff866159ef0ee9c407893ae204e93eb98dd68be2 | [
"MIT"
] | null | null | null | from causal_world.intervention_actors.base_actor import \
BaseInterventionActorPolicy
import numpy as np
class VisualInterventionActorPolicy(BaseInterventionActorPolicy):
def __init__(self, **kwargs):
"""
This intervention actor intervenes on all visual components of the
robot, (i.e: colors).
:param kwargs:
"""
super(VisualInterventionActorPolicy, self).__init__()
self.task_intervention_space = None
def initialize(self, env):
"""
This functions allows the intervention actor to query things from the env, such
as intervention spaces or to have access to sampling funcs for goals..etc
:param env: (causal_world.env.CausalWorld) the environment used for the
intervention actor to query
different methods from it.
:return:
"""
self.task_intervention_space = env.get_variable_space_used()
return
def _act(self, variables_dict):
"""
:param variables_dict:
:return:
"""
interventions_dict = dict()
for variable in self.task_intervention_space:
if isinstance(self.task_intervention_space[variable], dict):
if 'color' in self.task_intervention_space[variable]:
interventions_dict[variable] = dict()
interventions_dict[variable]['color'] = np.random.uniform(
self.task_intervention_space[variable]['color'][0],
self.task_intervention_space[variable]['color'][1])
elif 'color' in variable:
interventions_dict[variable] = np.random.uniform(
self.task_intervention_space[variable][0],
self.task_intervention_space[variable][1])
return interventions_dict
def get_params(self):
"""
returns parameters that could be used in recreating this intervention
actor.
:return: (dict) specifying paramters to create this intervention actor
again.
"""
return {'visual_actor': dict()}
| 35.951613 | 87 | 0.599372 | 2,117 | 0.949753 | 0 | 0 | 0 | 0 | 0 | 0 | 918 | 0.411844 |
b4cf1ec1bfea538a87dc03baa5a20d0eebb02913 | 3,259 | py | Python | app/utils.py | AnthonyBloomer/covid-cli | 733ab3678614368dd0010559bec9cbe5ec460670 | [
"MIT"
] | 3 | 2020-03-31T14:28:21.000Z | 2022-03-15T00:28:26.000Z | app/utils.py | AnthonyBloomer/covid-cli | 733ab3678614368dd0010559bec9cbe5ec460670 | [
"MIT"
] | 2 | 2020-11-01T14:01:53.000Z | 2020-11-02T21:32:08.000Z | app/utils.py | AnthonyBloomer/covid-cli | 733ab3678614368dd0010559bec9cbe5ec460670 | [
"MIT"
] | 2 | 2020-11-01T13:59:50.000Z | 2022-03-15T00:28:29.000Z | import csv
import time
from datetime import datetime
from prettytable import PrettyTable
from .api import countries, country, totals, us_states
def to_csv(data):
fieldnames = set()
for event in data:
fieldnames |= event.keys()
with open("%s.csv" % int(time.time()), "w", newline="") as c:
w = csv.DictWriter(c, fieldnames=fieldnames)
w.writeheader()
w.writerows(data)
def calculate_death_rate(deaths, recovered):
if recovered == 0:
return "N/A"
return str(round(100 * deaths / (deaths + recovered))) + "%"
def from_timestamp(updated):
return datetime.fromtimestamp(updated / 1000).strftime("%Y-%m-%d %H:%M")
def create_table(args):
if args.country:
x = PrettyTable()
x.field_names = [
"Country",
"Deaths",
"Critical",
"Cases",
"Recovered",
"Death Rate",
"Updated",
]
json = country(args.country)
x.add_row(
[
json["country"],
json["deaths"],
json["critical"],
json["cases"],
json["recovered"],
calculate_death_rate(json["deaths"], json["recovered"]),
from_timestamp(json["updated"]),
]
)
print(x)
if args.csv:
to_csv(json)
elif args.totals:
x = PrettyTable()
x.field_names = [
"Active",
"Cases",
"Deaths",
"Recovered",
"Death Rate",
"Updated",
]
json = totals()
x.add_row(
[
json["active"],
json["cases"],
json["deaths"],
json["recovered"],
calculate_death_rate(json["deaths"], json["recovered"]),
from_timestamp(json["updated"]),
]
)
print(x)
if args.csv:
to_csv(json)
return True
elif args.all:
x = PrettyTable()
x.field_names = [
"Country",
"Active",
"Cases",
"Deaths",
"Recovered",
"Death Rate",
"Updated",
]
c = countries(args.sort_by)
for d in c:
x.add_row(
[
d["country"],
d["active"],
d["cases"],
d["deaths"],
d["recovered"],
calculate_death_rate(d["deaths"], d["recovered"]),
from_timestamp(d["updated"]),
]
)
print(x)
if args.csv:
to_csv(c)
return True
elif args.us:
x = PrettyTable()
x.field_names = [
"State",
"Active",
"Cases",
"Deaths",
]
states = us_states(sort_by=args.sort_by)
for state in states:
x.add_row(
[state["state"], state["active"], state["cases"], state["deaths"],]
)
print(x)
if args.csv:
to_csv(states)
return True
else:
return False
| 25.069231 | 83 | 0.438785 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 488 | 0.149739 |
b4d1f22a627dce69c863deac50b2db6d2d2229f8 | 2,879 | py | Python | yann/utils/timer.py | michalwols/yann | b3c0f35ec7515ddaeb1f04d365af7b6d136f56cf | [
"MIT"
] | 32 | 2019-04-13T11:03:38.000Z | 2022-01-24T03:00:56.000Z | yann/utils/timer.py | michalwols/yann | b3c0f35ec7515ddaeb1f04d365af7b6d136f56cf | [
"MIT"
] | 13 | 2019-09-29T00:51:24.000Z | 2021-12-12T15:06:00.000Z | yann/utils/timer.py | michalwols/yann | b3c0f35ec7515ddaeb1f04d365af7b6d136f56cf | [
"MIT"
] | 5 | 2020-01-01T10:27:26.000Z | 2021-12-20T18:33:08.000Z | from collections import OrderedDict, defaultdict
from contextlib import contextmanager
from datetime import datetime
import torch.cuda
from ..viz.plot import plot_timeline
def time(name=None, sync=False):
return Task(name=name, sync=sync, log=True)
class Task:
__slots__ = ('name', 'start_time', 'end_time', 'meta', 'sync', 'log')
def __init__(self, name=None, start=None, end=None, meta=None, sync=False, log=False):
self.name = name
self.start_time = start
self.end_time = end
self.meta = meta or {}
self.sync = sync
self.log = log
def start(self, time=None, meta=None, sync=None):
if meta:
self.meta.update(meta)
sync = sync if sync is not None else self.sync
if sync and torch.cuda.is_available():
torch.cuda.synchronize()
self.start_time = time or datetime.now()
if self.log:
print(f'starting {self.name or id(self)}')
def end(self, time=None, meta=None, sync=None):
sync = sync if sync is not None else self.sync
if sync and torch.cuda.is_available():
torch.cuda.synchronize()
self.end_time = time or datetime.now()
if self.log:
print(f'completed {self.name or id(self)} in {self.seconds:.9g} seconds')
if meta:
self.meta.update(meta)
@classmethod
def begin(cls, name=None, meta=None, sync=None):
t = cls(name=name, meta=meta, sync=sync)
t.start()
return t
@property
def seconds(self):
if self.start_time is None or self.end_time is None:
return None
return (self.end_time - self.start_time).total_seconds()
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.end()
def __repr__(self):
return f"Task({self.name or id(self)}, seconds={self.seconds:.9g}, sync={self.sync})"
class Timer:
def __init__(self, name=None, log=False):
self.tasks = []
self.name = name
self.log = log
self.active_tasks = {}
def start(self, name, sync=True, **meta):
task = self.task(name, sync=sync, **meta)
if self.log: print('Started', name)
if task in self.active_tasks:
raise ValueError(f'Nesting tasks is not allowed, "{name}" was already started and not finished')
self.active_tasks[name] = task
def end(self, name, sync=True, **meta):
task = self.active_tasks.pop(name)
if not task:
raise ValueError(f"{name} is not an active task so can't be ended")
task.end(sync=sync, meta=meta)
if self.log:
print('Ended', task.name, ', took', task.seconds, 'seconds')
def task(self, name, sync=True, **meta):
task = Task.begin(name=name, meta=meta, sync=sync)
self.tasks.append(task)
return task
def __enter__(self):
self.start(self.name)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.end(self.name)
def plot(self):
plot_timeline(self.tasks) | 27.160377 | 102 | 0.661688 | 2,622 | 0.910733 | 0 | 0 | 301 | 0.10455 | 0 | 0 | 384 | 0.13338 |
b4d23f41582bbbee77cde2520e06f0addf9b2c8b | 6,731 | py | Python | conans/client/client_cache.py | jbaruch/conan | 263722b5284828c49774ffe18d314b24ee11e178 | [
"MIT"
] | null | null | null | conans/client/client_cache.py | jbaruch/conan | 263722b5284828c49774ffe18d314b24ee11e178 | [
"MIT"
] | null | null | null | conans/client/client_cache.py | jbaruch/conan | 263722b5284828c49774ffe18d314b24ee11e178 | [
"MIT"
] | 1 | 2021-03-03T17:15:46.000Z | 2021-03-03T17:15:46.000Z | import os
from conans.errors import ConanException
from conans.util.files import save, load, normalize
from conans.model.settings import Settings
from conans.client.conf import ConanClientConfigParser, default_client_conf, default_settings_yml
from conans.model.values import Values
from conans.client.detect import detect_defaults_settings
from conans.model.ref import ConanFileReference
from conans.model.manifest import FileTreeManifest
from conans.paths import SimplePaths, CONANINFO, PUT_HEADERS
from genericpath import isdir
from conans.model.info import ConanInfo
CONAN_CONF = 'conan.conf'
CONAN_SETTINGS = "settings.yml"
LOCALDB = ".conan.db"
REGISTRY = "registry.txt"
PROFILES_FOLDER = "profiles"
class ClientCache(SimplePaths):
""" Class to represent/store/compute all the paths involved in the execution
of conans commands. Accesses to real disk and reads/write things. (OLD client ConanPaths)
"""
def __init__(self, base_folder, store_folder, output):
self.conan_folder = os.path.join(base_folder, ".conan")
self._conan_config = None
self._settings = None
self._output = output
self._store_folder = store_folder or self.conan_config.storage_path or self.conan_folder
super(ClientCache, self).__init__(self._store_folder)
@property
def put_headers_path(self):
return os.path.join(self.conan_folder, PUT_HEADERS)
def read_put_headers(self):
ret = {}
if not os.path.exists(self.put_headers_path):
save(self.put_headers_path, "")
return ret
try:
contents = load(self.put_headers_path)
for line in contents.splitlines():
if line and not line.strip().startswith("#"):
tmp = line.split("=", 1)
if len(tmp) != 2:
raise Exception()
name = tmp[0].strip()
value = tmp[1].strip()
ret[str(name)] = str(value)
return ret
except Exception:
raise ConanException("Invalid %s file!" % self.put_headers_path)
@property
def registry(self):
return os.path.join(self.conan_folder, REGISTRY)
@property
def conan_config(self):
def generate_default_config_file():
default_settings = detect_defaults_settings(self._output)
default_setting_values = Values.from_list(default_settings)
client_conf = default_client_conf + default_setting_values.dumps()
save(self.conan_conf_path, normalize(client_conf))
if not self._conan_config:
if not os.path.exists(self.conan_conf_path):
generate_default_config_file()
self._conan_config = ConanClientConfigParser(self.conan_conf_path)
return self._conan_config
@property
def localdb(self):
return os.path.join(self.conan_folder, LOCALDB)
@property
def conan_conf_path(self):
return os.path.join(self.conan_folder, CONAN_CONF)
@property
def profiles_path(self):
return os.path.join(self.conan_folder, PROFILES_FOLDER)
@property
def settings_path(self):
return os.path.join(self.conan_folder, CONAN_SETTINGS)
@property
def settings(self):
"""Returns {setting: [value, ...]} defining all the possible
settings and their values"""
if not self._settings:
# TODO: Read default environment settings
if not os.path.exists(self.settings_path):
save(self.settings_path, normalize(default_settings_yml))
settings = Settings.loads(default_settings_yml)
else:
content = load(self.settings_path)
settings = Settings.loads(content)
self.conan_config.settings_defaults(settings)
self._settings = settings
return self._settings
def conan_packages(self, conan_reference):
""" Returns a list of package_id from a local cache package folder """
assert isinstance(conan_reference, ConanFileReference)
packages_dir = self.packages(conan_reference)
try:
packages = [dirname for dirname in os.listdir(packages_dir)
if isdir(os.path.join(packages_dir, dirname))]
except: # if there isn't any package folder
packages = []
return packages
def conan_builds(self, conan_reference):
""" Returns a list of package ids from a local cache build folder """
assert isinstance(conan_reference, ConanFileReference)
builds_dir = self.builds(conan_reference)
try:
builds = [dirname for dirname in os.listdir(builds_dir)
if isdir(os.path.join(builds_dir, dirname))]
except: # if there isn't any package folder
builds = []
return builds
def load_manifest(self, conan_reference):
'''conan_id = sha(zip file)'''
filename = self.digestfile_conanfile(conan_reference)
return FileTreeManifest.loads(load(filename))
def load_package_manifest(self, package_reference):
'''conan_id = sha(zip file)'''
filename = self.digestfile_package(package_reference, short_paths=None)
return FileTreeManifest.loads(load(filename))
def read_package_recipe_hash(self, package_folder):
filename = os.path.join(package_folder, CONANINFO)
info = ConanInfo.loads(load(filename))
return info.recipe_hash
def conan_manifests(self, conan_reference):
digest_path = self.digestfile_conanfile(conan_reference)
if not os.path.exists(digest_path):
return None, None
return self._digests(digest_path)
def package_manifests(self, package_reference):
digest_path = self.digestfile_package(package_reference, short_paths=None)
if not os.path.exists(digest_path):
return None, None
return self._digests(digest_path)
def _digests(self, digest_path):
readed_digest = FileTreeManifest.loads(load(digest_path))
expected_digest = FileTreeManifest.create(os.path.dirname(digest_path))
return readed_digest, expected_digest
def delete_empty_dirs(self, deleted_refs):
for ref in deleted_refs:
ref_path = self.conan(ref)
for _ in range(4):
if os.path.exists(ref_path):
try: # Take advantage that os.rmdir does not delete non-empty dirs
os.rmdir(ref_path)
except OSError:
break # not empty
ref_path = os.path.dirname(ref_path)
| 38.462857 | 97 | 0.655326 | 6,019 | 0.894221 | 0 | 0 | 1,867 | 0.277373 | 0 | 0 | 755 | 0.112168 |