hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c34648b7e6fe0e43164dec6e0c0022e1e1efabdd | 1,485 | py | Python | fb/forms.py | pure-python/brainmate | 79c83e707a4811dd881832d22f17c29f33c4d7f2 | [
"Apache-2.0"
] | null | null | null | fb/forms.py | pure-python/brainmate | 79c83e707a4811dd881832d22f17c29f33c4d7f2 | [
"Apache-2.0"
] | 1 | 2016-04-14T14:42:52.000Z | 2016-04-14T14:42:52.000Z | fb/forms.py | pure-python/brainmate | 79c83e707a4811dd881832d22f17c29f33c4d7f2 | [
"Apache-2.0"
] | null | null | null | from django.forms import (
Form, CharField, Textarea, PasswordInput, ChoiceField, DateField,
ImageField, BooleanField, IntegerField, MultipleChoiceField
)
from django import forms
from fb.models import UserProfile
| 31.595745 | 102 | 0.682828 |
c346562511e160197f5f2be08e436cdf509a8cc0 | 28,863 | py | Python | Galaxy_Invander/user23_fTVPDKIDhRdCfUp.py | triump0870/Interactive_Programming_Python | 97e0f1f5639aecac683053ed742632db14dc6954 | [
"Apache-2.0"
] | 1 | 2015-06-09T22:40:15.000Z | 2015-06-09T22:40:15.000Z | Galaxy_Invander/user23_fTVPDKIDhRdCfUp.py | triump0870/Interactive_Programming_Python | 97e0f1f5639aecac683053ed742632db14dc6954 | [
"Apache-2.0"
] | null | null | null | Galaxy_Invander/user23_fTVPDKIDhRdCfUp.py | triump0870/Interactive_Programming_Python | 97e0f1f5639aecac683053ed742632db14dc6954 | [
"Apache-2.0"
] | null | null | null | # Simple implementation of GalaxyInvanders game
# Rohan Roy (India) - 3 Nov 2013
# www.codeskulptor.org/#user23_fTVPDKIDhRdCfUp
VER = "1.0"
# "add various aliens"
import simplegui, math, random, time
#Global const
FIELD_WIDTH = 850
FIELD_HEIGHT = 500
TOP_MARGIN = 75
LEFT_MARGIN = 25
ALIEN_WIDTH = 48
ALIEN_HEIGHT = 55
PLAYER_SPEED = 10
BULLET_SPEED = 10
BULLET_POWER = 1
BONUS_SPEED = 10
ALIEN_SPEED = [3, 5]
# Images:
pImage = simplegui.load_image('https://dl.dropbox.com/s/zhnjucatewcmfs4/player.png')
aImages = []
for i in range(7):
aImages.append([])
aImages[0].append(simplegui.load_image('https://dl.dropbox.com/s/0cck7w6r0mt8pzz/alien_1_1.png'))
aImages[0].append(simplegui.load_image('https://dl.dropbox.com/s/j0kubnhzajbdngu/alien_1_2.png'))
aImages[0].append(simplegui.load_image('https://dl.dropbox.com/s/zkeu6hqh9bakj25/alien_1_3.png'))
aImages[1].append(simplegui.load_image('https://dl.dropbox.com/s/e75mkcylat70lnd/alien_2_1.png'))
aImages[1].append(simplegui.load_image('https://dl.dropbox.com/s/pgjvaxg0z6rhco9/alien_2_2.png'))
aImages[1].append(simplegui.load_image('https://dl.dropbox.com/s/en0hycfsi3cuzuo/alien_2_3.png'))
aImages[2].append(simplegui.load_image('https://dl.dropbox.com/s/fu9weoll70acs8f/alien_3_1.png'))
aImages[2].append(simplegui.load_image('https://dl.dropbox.com/s/b2rxru2nt5q2r1u/alien_3_2.png'))
aImages[2].append(simplegui.load_image('https://dl.dropbox.com/s/x66vgj9fc2jlg53/alien_3_3.png'))
aImages[3].append(simplegui.load_image('https://dl.dropbox.com/s/7o04ljg52kniyac/alien_4_1.png'))
aImages[3].append(simplegui.load_image('https://dl.dropbox.com/s/b3v6tvami0rvl6r/alien_4_2.png'))
aImages[3].append(simplegui.load_image('https://dl.dropbox.com/s/j451arcevsag36h/alien_4_3.png'))
aImages[4].append(simplegui.load_image('https://dl.dropbox.com/s/jlhdigkm79nncnm/alien_5_1.png'))
aImages[4].append(simplegui.load_image('https://dl.dropbox.com/s/wvlvjsa8yl6gka3/alien_5_2.png'))
aImages[4].append(simplegui.load_image('https://dl.dropbox.com/s/rrg4y1tnsbrh04r/alien_5_3.png'))
aImages[5].append(simplegui.load_image('https://dl.dropbox.com/s/oufyfy590tzf7cx/alien_6_1.png'))
aImages[5].append(simplegui.load_image('https://dl.dropbox.com/s/p4ehd9f6mo2xfzc/alien_6_2.png'))
aImages[5].append(simplegui.load_image('https://dl.dropbox.com/s/815gq3xyh6wmc0t/alien_6_3.png'))
aImages[6].append(simplegui.load_image('https://dl.dropbox.com/s/bv4ycocuomsvj50/alien_7_1.png'))
aImages[6].append(simplegui.load_image('https://dl.dropbox.com/s/krs2gtvdxxve79z/alien_7_2.png'))
aImages[6].append(simplegui.load_image('https://dl.dropbox.com/s/v2wczi8lxwczq87/alien_7_3.png'))
#backgrounds
bckg = []
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/ibfu2t9vrh4bhxd/back01.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/pcl8vzby25ovis8/back02.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/g8nwo1t9s4i9usg/back03.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/ee8oilluf7pe98h/back04.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/7jfgjoxinzwwlx4/back05.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/wh01g2q3607snvz/back06.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/b72ltp2xii9utnr/back07.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/av73jek8egezs1w/back08.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/ik54ttfklv3x3ai/back09.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/e9e6kpyg3yuoenc/back10.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/zrabwnnvlwvn7it/back11.jpg"))
bckg.append(simplegui.load_image("https://dl.dropbox.com/s/a2infkx0rmn8b8m/back12.jpg"))
# sounds
sndPlayer = simplegui.load_sound('https://dl.dropbox.com/s/vl3as0o2m2wvlwu/player_shoot.wav')
sndAlien = simplegui.load_sound('https://dl.dropbox.com/s/m4x0tldpze29hcr/alien_shoot.wav')
sndPlayerExplosion = simplegui.load_sound('https://dl.dropbox.com/s/10fn2wh7kk7uoxh/explosion%2001.wav')
sndAlienHit = simplegui.load_sound('https://dl.dropbox.com/s/80qdvup27n8j6r1/alien_hit.wav')
sndAlienExplosion = simplegui.load_sound('https://dl.dropbox.com/s/qxm3je9vdlb469g/explosion_02.wav')
sndBonus = simplegui.load_sound('https://dl.dropbox.com/s/tzp7e20e5v19l01/bonus.wav')
sndPause = simplegui.load_sound('https://dl.dropbox.com/s/uzs9nixpd22asno/pause.wav')
sndTheme = simplegui.load_sound('https://dl.dropbox.com/s/52zo892uemfkuzm/theme_01.mp3')
sounds = [sndPlayer, sndAlien, sndPlayerExplosion, sndAlienExplosion, \
sndBonus, sndPause, sndTheme, sndAlienHit]
#Global variables
GameRunning = False
GameEnded = False
player_speed = 0
mes = ""
timer_counter = 0
lives = 0
level = 1
scores = 0
killed = 0
current_back = 0
paused = False
shoot_count = 0
level_time = []
ready, go = False, False
#player = [FIELD_WIDTH //2, FIELD_HEIGHT - 30 + TOP_MARGIN]
#game objects
user_bullet = []
weapon_level = 1
weapon_speed = BULLET_SPEED
alien_bullets = []
alien_fleet = None
player = None
frame = None
aTimer = None
dTimer = None
bonuses = []
dCounter = 0
back = False
bonus_count = [0, 0, 0, 0]
player_killed = False
player_killed_at = 0
level_map = []
for i in range(7):
level_map.append([])
level_map[0] = [ 0, 0, 0, 0]
level_map[1] = [129, 0, 0, 0]
level_map[2] = [195, 129, 0, 0]
level_map[3] = [255, 195, 60, 0]
level_map[4] = [255, 231, 195, 195]
level_map[5] = [255, 255, 231, 195]
level_map[6] = [255, 255, 255, 231]
#helper functions
def dummy(key):
return key
def pause():
global paused
paused = not paused
sndPause.play()
def draw_user_image(canvas, point):
# draw a image of user ship
#
global player
if pImage.get_width()==0:
canvas.draw_circle(point, 12, 5, "Yellow")
else:
canvas.draw_image(pImage, (25, 36), (49, 72), point, (34, 50))
player.width = pImage.get_width()
player.height = pImage.get_height()
return canvas
def draw_lives(canvas):
# draw lives counter
canvas.draw_text("Lives : ", [30, 25], 25, "Red")
if player<>None:
player.draw_lives_counter(canvas)
return canvas
def draw_weapons(canvas):
canvas.draw_text("Weapon : ", [30, 60], 25, "Red")
canvas.draw_text("Rocket lvl: "+str(int(weapon_level)), [135, 60], 25, "Yellow")
canvas.draw_text("WS:"+str(weapon_speed/10.0), [280, 48], 10, "00c5fe")
canvas.draw_text("WP:"+str(player.power), [280, 61], 10, "00c5fe")
return canvas
def draw_level(canvas):
canvas.draw_text("Level : ", [FIELD_WIDTH-200, 50], 50, "Red")
canvas.draw_text(str(level), [FIELD_WIDTH-50, 50], 50, "Yellow")
return canvas
def draw_scores(canvas):
canvas.draw_text(str(int(scores)), [400, 50], 50, "LightBlue")
return canvas
def draw_screen(canvas):
# border of board
canvas.draw_image(bckg[current_back], (425, 250), (850, 500), \
(LEFT_MARGIN+FIELD_WIDTH//2, TOP_MARGIN+FIELD_HEIGHT//2),\
(FIELD_WIDTH, FIELD_HEIGHT))
canvas.draw_polygon([[LEFT_MARGIN, TOP_MARGIN],
[LEFT_MARGIN, FIELD_HEIGHT+TOP_MARGIN],
[FIELD_WIDTH+LEFT_MARGIN, FIELD_HEIGHT+TOP_MARGIN],
[FIELD_WIDTH+LEFT_MARGIN, TOP_MARGIN]], 2, 'Orange')
return canvas
def draw_start_screen(canvas):
img_count = 1 + len(aImages)*(len(aImages[0])) + len(bckg)
loaded_img_count = 0
if pImage.get_width()<>0:
loaded_img_count += 1
for bImage in bckg:
if bImage.get_width()<>0:
loaded_img_count += 1
for aImg in aImages:
for img in aImg:
if img.get_width()<>0:
loaded_img_count += 1
loaded_sounds = 0
for snd in sounds:
if snd <> None:
loaded_sounds += 1
draw_text(canvas, "SPACE INVANDERS", [220, 150], 50, [3, 3], ["blue", "yellow"])
canvas.draw_text("ver. - "+VER, [600, 170], 20, "yellow")
canvas.draw_text("03 nov. 2013", [600, 190], 20, "yellow")
draw_text(canvas, "CONTROLS:", [110, 210], 24, [2, 2], ["green", "yellow"])
draw_text(canvas, "Arrows - to left and right, space - to fire, P to pause game", [110, 240], 24, [2, 2], ["green", "yellow"])
draw_text(canvas, "Bonuses: ", [110, 280], 24, [2, 2], ["green", "yellow"])
b = Bonus(0, [125, 310])
b.draw(canvas)
draw_text(canvas, " - increase user's bullet speed", [150, 320], 24, [2, 2], ["green", "yellow"])
b = Bonus(1, [125, 350])
b.draw(canvas)
draw_text(canvas, " - increase user's bullet number", [150, 360], 24, [2, 2], ["green", "yellow"])
b = Bonus(2, [125, 390])
b.draw(canvas)
draw_text(canvas, " - add life", [150, 400], 24, [2, 2], ["green", "yellow"])
b = Bonus(3, [125, 430])
b.draw(canvas)
draw_text(canvas, " - increase weapon power", [150, 440], 24, [2, 2], ["green", "yellow"])
if loaded_img_count<img_count:
draw_text(canvas, "Please, wait for loading...", [280, 500], 40, [3, 3], ["Blue", "Yellow"])
s = "Loaded "+str(loaded_img_count)+" images of "+str(img_count)
draw_text(canvas, s, [110, 550], 20, [2, 2], ["Blue", "yellow"])
s = "Loaded "+str(loaded_sounds)+" sounds of "+str(len(sounds))
draw_text(canvas, s, [510, 550], 20, [2, 2], ["Blue", "yellow"])
else:
draw_text(canvas, "Click to start game", [300, 500], 40, [3, 3], ["Blue", "yellow"])
frame.set_mouseclick_handler(click_handler)
return canvas
def draw_end_screen(canvas):
draw_text(canvas, "Game over!", [350, 180], 50, [2, 2], ["Blue", "Yellow"])
draw_text(canvas, "Your score is "+str(int(scores)), [330, 240], 35, [2, 2], ["blue", "Yellow"])
draw_text(canvas, "You shoot "+str(int(shoot_count))+" times", [150, 320], 24, [2, 2], ["blue", "Yellow"])
draw_text(canvas, "You kill a "+str(killed)+" aliens", [150, 360], 24, [2, 2], ["blue", "Yellow"])
if shoot_count == 0:
s = "0"
else:
s = str(int(10000*float(killed)/shoot_count)/100.0)
draw_text(canvas, "Your accuracy is "+s+"%", [150, 400], 24, [2, 2], ["blue", "Yellow"])
i = 0
for bc in bonus_count:
b = Bonus(i, [505, 310 + 40*i])
b.draw(canvas)
draw_text(canvas, " - used "+str(bonus_count[i])+" times", [530, 320+40*i], 24, [2, 2], ["blue", "yellow"])
i += 1
draw_text(canvas, "Click to start new game", [300, 500], 40, [2, 2], ["blue", "Yellow"])
canvas.draw_text("ver. - "+VER, [600, 540], 15, "yellow");
return canvas
def draw_game_objects(canvas):
player.draw(canvas)
#draw_user_image(canvas, Player)
for bullet in alien_bullets:
bullet.draw(canvas)
for bullet in user_bullet:
bullet.draw(canvas)
for bonus in bonuses:
bonus.draw(canvas)
alien_fleet.draw(canvas)
readyGo()
if paused:
draw_text(canvas, "P A U S E", [380, 350], 50, [2, 2], ["Green", "Yellow"])
if ready:
draw_text(canvas, "R E A D Y", [380, 350], 50, [2, 2], ["Green", "Yellow"])
if go:
draw_text(canvas, "G O ! ! !", [380, 350], 50, [2, 2], ["Green", "Yellow"])
sndTheme.play()
return canvas
def moving_objects():
global timer_counter
if not GameRunning:
return None
if paused or ready or go or player_killed:
return None
timer_counter += 1
player.move()
for alien in alien_fleet.aliens:
if alien.flying:
alien.move([0,0])
if isBulletHit(alien, player):
player.explode()
if alien.y>FIELD_HEIGHT + TOP_MARGIN+20:
alien.y = TOP_MARGIN
for bonus in bonuses:
bonus.move();
if bonus.y > FIELD_HEIGHT + TOP_MARGIN+20:
bonuses.remove(bonus)
if isBulletHit(bonus, player):
bonus.execute()
bonuses.remove(bonus)
for bullet in user_bullet:
bullet.move()
alien_fleet.check_death()
for bullet in user_bullet:
if bullet.y<TOP_MARGIN+25:
user_bullet.remove(bullet)
# for bullet in alien_bullets:
bullets_to_delete = []
for bullet in list(alien_bullets):
bullet.move()
if bullet.y > FIELD_HEIGHT + TOP_MARGIN -10:
bullets_to_delete.append(bullet)
if isBulletHit(bullet, player):
player.explode()
for bullet in bullets_to_delete:
if bullet in alien_bullets:
alien_bullets.remove(bullet)
alien_fleet.make_shoot()
alien_fleet.alien_fly()
if level<30:
x = 60 - level
else:
x = 1
if timer_counter % x == 0:
alien_fleet.move_side()
if timer_counter % (100 + x) == 0:
alien_fleet.move_down()
if alien_fleet.get_aliens_count() == 0:
new_level()
# Handler to draw on canvas
#Initialization and start of game
# Handler for mouse click
#### keydown_handler
#### keyup_handler to stop keydown
# Create a frame and assign callbacks to event handlers
frame = simplegui.create_frame("Galaxian", 900, 600, 0)
frame.set_draw_handler(draw)
frame.set_keydown_handler(keydown)
frame.set_keyup_handler(keyup)
aTimer = simplegui.create_timer(60, moving_objects)
aTimer.start()
# Start the frame animation
frame.start()
| 35.988778 | 131 | 0.562346 |
c3482f320f27c64a0db4f2f20db98025fee332ce | 1,664 | py | Python | components/py-flask-wa/app.py | ajayns/amoc-project | c22ae62789568c1a784f165fbd4547ac20c290a0 | [
"MIT"
] | 26 | 2017-04-21T06:05:44.000Z | 2020-03-09T11:41:34.000Z | components/py-flask-wa/app.py | ajayns/amoc-project | c22ae62789568c1a784f165fbd4547ac20c290a0 | [
"MIT"
] | 6 | 2017-04-16T03:53:28.000Z | 2019-02-26T07:02:48.000Z | components/py-flask-wa/app.py | ajayns/amoc-project | c22ae62789568c1a784f165fbd4547ac20c290a0 | [
"MIT"
] | 5 | 2017-06-09T06:44:59.000Z | 2019-12-13T07:34:11.000Z | from flask import Flask, jsonify, request, render_template, redirect
from flask_pymongo import PyMongo
from werkzeug import secure_filename
import base64
app = Flask(__name__)
app.config['MONGO_DBNAME'] = 'restdb'
app.config['MONGO_URI'] = 'mongodb://localhost:27017/restdb'
mongo = PyMongo(app)
if __name__ == '__main__':
app.run(debug=True)
| 24.115942 | 73 | 0.658053 |
c34951b6e45c7100c95839ca25a8df621a593d38 | 2,190 | py | Python | wc_lang/util.py | KarrLab/wc_lang | 113a8b473576fa9c13688d2deb71b4b2ab400a03 | [
"MIT"
] | 7 | 2018-05-14T09:26:14.000Z | 2021-05-20T01:11:45.000Z | wc_lang/util.py | KarrLab/wc_lang | 113a8b473576fa9c13688d2deb71b4b2ab400a03 | [
"MIT"
] | 142 | 2018-03-14T16:50:56.000Z | 2021-01-03T16:25:23.000Z | wc_lang/util.py | KarrLab/wc_lang | 113a8b473576fa9c13688d2deb71b4b2ab400a03 | [
"MIT"
] | 4 | 2019-01-06T08:32:23.000Z | 2021-05-20T01:11:49.000Z | """ Utilities
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2016-11-10
:Copyright: 2016, Karr Lab
:License: MIT
"""
from obj_tables import get_models as base_get_models
from wc_lang import core
from wc_lang import io
from wc_utils.util import git
def get_model_size(model):
""" Get numbers of model components
Args:
model (:obj:`core.Model`): model
Returns:
:obj:`dict`: dictionary with numbers of each type of model component
"""
return {
"submodels": len(model.get_submodels()),
"compartments": len(model.get_compartments()),
"species_types": len(model.get_species_types()),
"species": len(model.get_species()),
"parameters": len(model.get_parameters()),
"references": len(model.get_references()),
"reactions": len(model.get_reactions()),
}
def get_model_summary(model):
""" Get textual summary of a model
Args:
model (:obj:`core.Model`): model
Returns:
:obj:`str`: textual summary of the model
"""
return "Model with:" \
+ "\n{:d} submodels".format(len(model.get_submodels())) \
+ "\n{:d} compartments".format(len(model.get_compartments())) \
+ "\n{:d} species types".format(len(model.get_species_types())) \
+ "\n{:d} species".format(len(model.get_species())) \
+ "\n{:d} parameters".format(len(model.get_parameters())) \
+ "\n{:d} references".format(len(model.get_references())) \
+ "\n{:d} dFBA objective reactions".format(len(model.get_dfba_obj_reactions())) \
+ "\n{:d} reactions".format(len(model.get_reactions())) \
+ "\n{:d} rate laws".format(len(model.get_rate_laws()))
def get_models(inline=True):
""" Get list of models
Args:
inline (:obj:`bool`, optional): if true, return inline models
Returns:
:obj:`list` of :obj:`class`: list of models
"""
return base_get_models(module=core, inline=inline)
def gen_ids(model):
""" Generate ids for model objects
Args:
model (:obj:`core.Model`): model
"""
for obj in model.get_related():
if hasattr(obj, 'gen_id'):
obj.id = obj.gen_id()
| 28.441558 | 89 | 0.616895 |
c349777d037bf08d8ee79327a13369ab404b7431 | 5,267 | py | Python | synapse/tests/test_tools_autodoc.py | kcreyts/synapse | fe740fd1e0febfa32f8d431b32ab48f8a0cf306e | [
"Apache-2.0"
] | 1 | 2021-02-15T22:07:05.000Z | 2021-02-15T22:07:05.000Z | synapse/tests/test_tools_autodoc.py | kcreyts/synapse | fe740fd1e0febfa32f8d431b32ab48f8a0cf306e | [
"Apache-2.0"
] | null | null | null | synapse/tests/test_tools_autodoc.py | kcreyts/synapse | fe740fd1e0febfa32f8d431b32ab48f8a0cf306e | [
"Apache-2.0"
] | null | null | null | import synapse.common as s_common
import synapse.tests.utils as s_t_utils
import synapse.tools.autodoc as s_autodoc
| 41.801587 | 136 | 0.566926 |
c34a6dd4d560ec8071e9109e3ca674e32bbace38 | 4,174 | py | Python | tf_idf.py | ricosr/retrieval_chatbot | 567e860f09771cae19e32b3bf20b5ce87266cda6 | [
"MIT"
] | 16 | 2018-12-04T13:55:56.000Z | 2021-11-21T05:53:57.000Z | tf_idf.py | ricosr/retrieval_chatbot | 567e860f09771cae19e32b3bf20b5ce87266cda6 | [
"MIT"
] | 5 | 2019-05-21T12:40:18.000Z | 2019-05-31T18:23:51.000Z | tf_idf.py | ricosr/retrieval_chatbot | 567e860f09771cae19e32b3bf20b5ce87266cda6 | [
"MIT"
] | 4 | 2018-11-22T13:45:05.000Z | 2019-09-16T16:30:28.000Z | # -*- coding: utf-8 -*-
import pickle
import os
import jieba
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.externals import joblib
from sklearn.metrics.pairwise import cosine_similarity
| 36.614035 | 94 | 0.626977 |
c34b267716c64dbcac0061ea5f7b0de5338ff153 | 19,015 | py | Python | d373c7/pytorch/models/classifiers.py | t0kk35/d373c7 | 7780b97545e581244fb4fb74347bb1b052b9ec3f | [
"Apache-2.0"
] | 1 | 2021-07-23T18:04:55.000Z | 2021-07-23T18:04:55.000Z | d373c7/pytorch/models/classifiers.py | t0kk35/d373c7 | 7780b97545e581244fb4fb74347bb1b052b9ec3f | [
"Apache-2.0"
] | null | null | null | d373c7/pytorch/models/classifiers.py | t0kk35/d373c7 | 7780b97545e581244fb4fb74347bb1b052b9ec3f | [
"Apache-2.0"
] | null | null | null | """
Module for classifier Models
(c) 2020 d373c7
"""
import logging
import torch
import torch.nn as nn
from .common import PyTorchModelException, ModelDefaults, _History, _ModelGenerated, _ModelStream
from .encoders import GeneratedAutoEncoder
from ..layers import LSTMBody, ConvolutionalBody1d, AttentionLastEntry, LinearEncoder, TensorDefinitionHead
from ..layers import TransformerBody, TailBinary
from ..loss import SingleLabelBCELoss
from ...features import TensorDefinition, TensorDefinitionMulti
from typing import List, Dict, Union
logger = logging.getLogger(__name__)
| 46.041162 | 120 | 0.645438 |
c34bda54e37900d299bfad9266c734ecc115936d | 5,369 | py | Python | qklnn/plots/hyperpar_scan.py | cambouvy/BSc-Thesis-Project | ca2504cb828ab068545e130eac393ceb34f2a457 | [
"MIT"
] | 1 | 2021-10-02T11:15:10.000Z | 2021-10-02T11:15:10.000Z | qklnn/plots/hyperpar_scan.py | cambouvy/BSc-Thesis-Project | ca2504cb828ab068545e130eac393ceb34f2a457 | [
"MIT"
] | null | null | null | qklnn/plots/hyperpar_scan.py | cambouvy/BSc-Thesis-Project | ca2504cb828ab068545e130eac393ceb34f2a457 | [
"MIT"
] | null | null | null | import re
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use("pdf")
import matplotlib.pyplot as plt
from matplotlib import gridspec
from peewee import AsIs, JOIN, prefetch, SQL
from IPython import embed
from bokeh.layouts import row, column
from bokeh.plotting import figure, show, output_file
from bokeh.transform import linear_cmap
from bokeh.models import (
ColumnDataSource,
Range1d,
LabelSet,
Label,
Rect,
HoverTool,
Div,
)
from qlknn.NNDB.model import (
Network,
PureNetworkParams,
PostprocessSlice,
NetworkMetadata,
TrainMetadata,
Postprocess,
db,
Hyperparameters,
)
from qlknn.plots.statistical_spread import get_base_stats
from qlknn.misc.to_precision import to_precision
# First, get some statistics
target_names = ["efeTEM_GB"]
hyperpars = ["cost_stable_positive_scale", "cost_l2_scale"]
# hyperpars = ['cost_stable_positive_scale', 'cost_stable_positive_offset']
goodness_pars = [
"rms",
"no_pop_frac",
"no_thresh_frac",
"pop_abs_mis_median",
"thresh_rel_mis_median",
"wobble_qlkunstab",
]
try:
report = get_base_stats(target_names, hyperpars, goodness_pars)
except Network.DoesNotExist:
report = pd.DataFrame(columns=goodness_pars, index=["mean", "stddev", "stderr"])
query = (
Network.select(
Network.id.alias("network_id"),
PostprocessSlice,
Postprocess.rms,
Hyperparameters,
)
.join(PostprocessSlice, JOIN.LEFT_OUTER)
.switch(Network)
.join(Postprocess, JOIN.LEFT_OUTER)
.switch(Network)
.where(Network.target_names == target_names)
.switch(Network)
.join(PureNetworkParams)
.join(Hyperparameters)
.where(Hyperparameters.cost_stable_positive_offset.cast("numeric") == -5)
.where(Hyperparameters.cost_stable_positive_function == "block")
)
if query.count() > 0:
results = list(query.dicts())
df = pd.DataFrame(results)
# df['network'] = df['network'].apply(lambda el: 'pure_' + str(el))
# df['l2_norm'] = df['l2_norm'].apply(np.nanmean)
df.drop(["id", "network"], inplace=True, axis="columns")
df.set_index("network_id", inplace=True)
stats = df
stats = stats.applymap(np.array)
stats = stats.applymap(lambda x: x[0] if isinstance(x, np.ndarray) and len(x) == 1 else x)
stats.dropna(axis="columns", how="all", inplace=True)
stats.dropna(axis="rows", how="all", inplace=True)
stats = stats.loc[:, hyperpars + goodness_pars]
stats.reset_index(inplace=True)
# stats.set_index(hyperpars, inplace=True)
# stats.sort_index(ascending=False, inplace=True)
# stats = stats.groupby(level=list(range(len(stats.index.levels)))).mean() #Average equal hyperpars
# stats.reset_index(inplace=True)
aggdict = {"network_id": lambda x: tuple(x)}
aggdict.update({name: "mean" for name in goodness_pars})
stats_mean = stats.groupby(hyperpars).agg(aggdict)
aggdict.update({name: "std" for name in goodness_pars})
stats_std = stats.groupby(hyperpars).agg(aggdict)
stats = stats_mean.merge(stats_std, left_index=True, right_index=True, suffixes=("", "_std"))
stats.reset_index(inplace=True)
for name in hyperpars:
stats[name] = stats[name].apply(str)
for name in goodness_pars:
fmt = lambda x: "" if np.isnan(x) else to_precision(x, 4)
fmt_mean = stats[name].apply(fmt)
stats[name + "_formatted"] = fmt_mean
fmt = lambda x: "" if np.isnan(x) else to_precision(x, 2)
fmt_std = stats[name + "_std"].apply(fmt)
prepend = lambda x: "+- " + x if x != "" else x
stats[name + "_std_formatted"] = fmt_std.apply(prepend)
x = np.unique(stats[hyperpars[1]].values)
x = sorted(x, key=lambda x: float(x))
y = np.unique(stats[hyperpars[0]].values)
y = sorted(y, key=lambda x: float(x))
source = ColumnDataSource(stats)
plotmode = "bokehz"
hover = HoverTool(
tooltips=[
("network_id", "@network_id"),
(hyperpars[0], "@" + hyperpars[0]),
(hyperpars[1], "@" + hyperpars[1]),
]
)
plots = []
for statname in goodness_pars:
fmt = lambda x: "" if np.isnan(x) else to_precision(x, 2)
title = "{:s} (ref={:s}{:s})".format(
statname,
fmt(report[statname]["mean"]),
fmt(report[statname]["stddev"] + report[statname]["stderr"]),
)
p = figure(title=title, tools="tap", toolbar_location=None, x_range=x, y_range=y)
p.add_tools(hover)
color = linear_cmap(statname, "Viridis256", min(stats[statname]), max(stats[statname]))
p.rect(
x=hyperpars[1],
y=hyperpars[0],
width=1,
height=1,
source=source,
fill_color=color,
line_color=None,
nonselection_fill_alpha=0.4,
nonselection_fill_color=color,
)
non_selected = Rect(fill_alpha=0.8)
label_kwargs = dict(
x=hyperpars[1],
y=hyperpars[0],
level="glyph",
source=source,
text_align="center",
text_color="red",
)
labels = LabelSet(text=statname + "_formatted", text_baseline="bottom", **label_kwargs)
labels_std = LabelSet(text=statname + "_std_formatted", text_baseline="top", **label_kwargs)
p.add_layout(labels)
p.add_layout(labels_std)
p.xaxis.axis_label = hyperpars[1]
p.yaxis.axis_label = hyperpars[0]
plots.append(p)
from bokeh.layouts import layout, widgetbox
title = Div(text=",".join(target_names))
l = layout([[title], [plots]])
show(l)
| 30.856322 | 99 | 0.67778 |
c34c38cdb59ab3adcb9297c65de4aee9cda600b1 | 1,328 | py | Python | py3canvas/tests/shared_brand_configs.py | tylerclair/py3canvas | 7485d458606b65200f0ffa5bbe597a9d0bee189f | [
"MIT"
] | null | null | null | py3canvas/tests/shared_brand_configs.py | tylerclair/py3canvas | 7485d458606b65200f0ffa5bbe597a9d0bee189f | [
"MIT"
] | null | null | null | py3canvas/tests/shared_brand_configs.py | tylerclair/py3canvas | 7485d458606b65200f0ffa5bbe597a9d0bee189f | [
"MIT"
] | null | null | null | """SharedBrandConfigs API Tests for Version 1.0.
This is a testing template for the generated SharedBrandConfigsAPI Class.
"""
import unittest
import requests
import secrets
from py3canvas.apis.shared_brand_configs import SharedBrandConfigsAPI
from py3canvas.apis.shared_brand_configs import Sharedbrandconfig
| 37.942857 | 126 | 0.752259 |
c34deabfbf09d812a3e974c9b52d0665996b8dda | 1,095 | py | Python | apps/cars/tests/api/abstract/abstract_base_api_test.py | agorsk1/car-rating-app | 354c5933f4cbad69c9a57d1839f9086cd5cf9a1d | [
"MIT"
] | 1 | 2022-03-03T11:15:25.000Z | 2022-03-03T11:15:25.000Z | apps/cars/tests/api/abstract/abstract_base_api_test.py | agorsk1/car-rating-app | 354c5933f4cbad69c9a57d1839f9086cd5cf9a1d | [
"MIT"
] | null | null | null | apps/cars/tests/api/abstract/abstract_base_api_test.py | agorsk1/car-rating-app | 354c5933f4cbad69c9a57d1839f9086cd5cf9a1d | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from django.test import TestCase
from rest_framework.generics import GenericAPIView
from rest_framework.test import APIRequestFactory
from apps.cars.factory import UserFactory
| 29.594595 | 72 | 0.621918 |
c34e3c84ae9852ef18383b6753e4f283c886e50c | 995 | py | Python | templating-tool.py | salayatana66/vw-serving-flask | 7b91f986b0e03e9784cf481b1f8833508dc40bfb | [
"BSD-2-Clause-FreeBSD"
] | 4 | 2020-10-01T17:31:00.000Z | 2021-05-09T12:21:41.000Z | templating-tool.py | salayatana66/vw-serving-flask | 7b91f986b0e03e9784cf481b1f8833508dc40bfb | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | templating-tool.py | salayatana66/vw-serving-flask | 7b91f986b0e03e9784cf481b1f8833508dc40bfb | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2020-10-01T17:31:01.000Z | 2020-10-02T17:48:01.000Z | """
A simple templating tool for Dockerfiles
"""
import sys
import os
import click
import jinja2
import yaml
cli.add_command(from_yaml)
if __name__ == '__main__':
cli()
| 19.509804 | 61 | 0.648241 |
c3508126a16d94b29f0bc62586532976da28f49d | 11,552 | py | Python | verbforms.py | wmcooper2/Clean-Code-English-Tests | a966ed40e13608a75bb618d35bf812d9229cacc3 | [
"MIT"
] | null | null | null | verbforms.py | wmcooper2/Clean-Code-English-Tests | a966ed40e13608a75bb618d35bf812d9229cacc3 | [
"MIT"
] | 1 | 2018-09-02T12:46:41.000Z | 2018-09-02T12:55:30.000Z | verbforms.py | wmcooper2/TotalEnglishAssistant | a966ed40e13608a75bb618d35bf812d9229cacc3 | [
"MIT"
] | null | null | null | """File for holding the different verb forms for all of the verbs in the Total
English book series."""
verb_forms = {
'become' :
{
'normal' : 'become',
'present' : ['become','becomes'],
'past' : 'became',
'past participle' : 'become',
'gerund' : 'becoming',
},
'be':
{
'normal' : 'be',
'present' : ['am','is','are'],
'past' : ['was', 'were'],
'past participle' : 'been',
'gerund' : 'being',
},
'begin':
{
'normal' : 'begin',
'present' : ['begin','begins'],
'past' : 'began',
'past participle' : 'begun',
'gerund' : 'beginning',
},
'blow':
{
'normal' : 'blow',
'present' : ['blow', 'blows'],
'past' : 'blew',
'past participle' : 'blown',
'gerund' : 'blowing',
},
'bring':
{
'normal' : 'bring',
'present' : ['bring','brings'],
'past' : 'brought',
'past participle' : 'brought',
'gerund' : 'bringing',
},
'build':
{
'normal' : 'build',
'present' : ['build','builds'],
'past' : 'built',
'past participle' : 'built',
'gerund' : 'building',
},
'burn':
{
'normal' : 'burn',
'present' : ['burn','burns'],
'past' : ['burned','burnt'],
'past participle' : ['burned','burnt'],
'gerund' : 'burning',
},
'buy':
{
'normal' : 'buy',
'present' : ['buy','buys'],
'past' : 'bought',
'past participle' : 'bought',
'gerund' : 'buying',
},
'catch':
{
'normal' : 'catch',
'present' : ['catch','catches'],
'past' : 'caught',
'past participle' : 'caught',
'gerund' : 'catching',
},
'choose':
{
'normal' : 'choose',
'present' : ['choose','chooses'],
'past' : 'chose',
'past participle' : 'chosen',
'gerund' : 'choosing',
},
'come':
{
'normal' : 'come',
'present' : ['come','comes'],
'past' : 'came',
'past participle' : 'come',
'gerund' : 'coming',
},
'cut':
{
'normal' : 'cut',
'present' : ['cut','cuts'],
'past' : 'cut',
'past participle' : 'cut',
'gerund' : 'cutting',
},
'do':
{
'normal' : 'do',
'present' : ['do','does'],
'past' : 'did',
'past participle' : 'done',
'gerund' : 'doing',
},
'drink':
{
'normal' : 'drink',
'present' : ['drink','drinks'],
'past' : 'drank',
'past participle' : 'drunk',
'gerund' : 'drinking',
},
'eat':
{
'normal' : 'eat',
'present' : ['eat','eats'],
'past' : 'ate',
'past participle' : 'eaten',
'gerund' : 'eating',
},
'feel':
{
'normal' : 'feel',
'present' : ['feel','feels'],
'past' : 'felt',
'past participle' : 'felt',
'gerund' : 'feeling',
},
'fight':
{
'normal' : 'fight',
'present' : ['fight','fights'],
'past' : 'fought',
'past participle' : 'fought',
'gerund' : 'fighting',
},
'find':
{
'normal' : 'find',
'present' : ['find','finds'],
'past' : 'found',
'past participle' : 'found',
'gerund' : 'finding',
},
'fly':
{
'normal' : 'fly',
'present' : ['fly','flies'],
'past' : 'flew',
'past participle' : 'flown',
'gerund' : 'flying',
},
'forget':
{
'normal' : 'forget',
'present' : ['forget','forgets'],
'past' : 'forgot',
'past participle' : ['forgotten','forgot'],
'gerund' : 'forgetting',
},
'get':
{
'normal' : 'get',
'present' : ['get','gets'],
'past' : 'got',
'past participle' : ['gotten','got'],
'gerund' : 'getting',
},
'give':
{
'normal' : 'give',
'present' : ['give','gives'],
'past' : 'gave',
'past participle' : 'given',
'gerund' : 'giving',
},
'go':
{
'normal' : 'go',
'present' : ['go','goes'],
'past' : 'went',
'past participle' : 'gone',
'gerund' : 'going',
},
'grow':
{
'normal' : 'grow',
'present' : ['grow','grows'],
'past' : 'grew',
'past participle' : 'grown',
'gerund' : 'growing',
},
'have':
{
'normal' : 'have',
'present' : ['have','has'],
'past' : 'had',
'past participle' : 'had',
'gerund' : 'having',
},
'hear':
{
'normal' : 'hear',
'present' : ['hear','hears'],
'past' : 'heard',
'past participle' : 'heard',
'gerund' : 'hearing',
},
'hit':
{
'normal' : 'hit',
'present' : ['hit','hits'],
'past' : 'hit',
'past participle' : 'hit',
'gerund' : 'hitting',
},
'hold':
{
'normal' : 'hold',
'present' : ['hold','holds'],
'past' : 'held',
'past participle' : 'held',
'gerund' : 'holding',
},
'hurt':
{
'normal' : 'hurt',
'present' : ['hurt','hurts'],
'past' : 'hurt',
'past participle' : 'hurt',
'gerund' : 'hurting',
},
'keep':
{
'normal' : 'keep',
'present' : ['keep','keeps'],
'past' : 'kept',
'past participle' : 'kept',
'gerund' : 'keeping',
},
'know':
{
'normal' : 'know',
'present' : ['know','knows'],
'past' : 'knew',
'past participle' : 'known',
'gerund' : 'knowing',
},
'lead':
{
'normal' : 'lead',
'present' : ['lead','leads'],
'past' : 'led',
'past participle' : 'led',
'gerund' : 'leading',
},
'leave':
{
'normal' : 'leave',
'present' : ['leave','leaves'],
'past' : 'left',
'past participle' : 'left',
'gerund' : 'leaving',
},
'lend':
{
'normal' : 'lend',
'present' : ['lend','lends'],
'past' : 'lent',
'past participle' : 'lent',
'gerund' : 'lending',
},
'lie':
{
'normal' : 'lie',
'present' : ['lie','lies'],
'past' : 'lay',
'past participle' : 'lain',
'gerund' : 'lying',
},
'lose':
{
'normal' : 'lose',
'present' : ['lose','loses'],
'past' : 'lost',
'past participle' : 'lost',
'gerund' : 'losing',
},
'make':
{
'normal' : 'make',
'present' : ['make','makes'],
'past' : 'made',
'past participle' : 'made',
'gerund' : 'making',
},
'mean':
{
'normal' : 'mean',
'present' : ['mean','means'],
'past' : 'meant',
'past participle' : 'meant',
'gerund' : 'meaning',
},
'meet':
{
'normal' : 'meet',
'present' : ['meet','meets'],
'past' : 'met',
'past participle' : 'met',
'gerund' : 'meeting',
},
'put':
{
'normal' : 'put',
'present' : ['put','puts'],
'past' : 'put',
'past participle' : 'put',
'gerund' : 'putting',
},
'read':
{
'normal' : 'read',
'present' : ['read','reads'],
'past' : 'read',
'past participle' : 'read',
'gerund' : 'reading',
},
'ride':
{
'normal' : 'ride',
'present' : ['ride','rides'],
'past' : 'rode',
'past participle' : 'ridden',
'gerund' : 'riding',
},
'ring':
{
'normal' : 'ring',
'present' : ['ring','rings'],
'past' : 'rang',
'past participle' : 'rung',
'gerund' : 'ringing',
},
'run':
{
'normal' : 'run',
'present' : ['run','runs'],
'past' : 'ran',
'past participle' : 'run',
'gerund' : 'running',
},
'say':
{
'normal' : 'say',
'present' : ['say','says'],
'past' : 'said',
'past participle' : 'said',
'gerund' : 'saying',
},
'see':
{
'normal' : 'see',
'present' : ['see','sees'],
'past' : 'saw',
'past participle' : 'seen',
'gerund' : 'seeing',
},
'sell':
{
'normal' : 'sell',
'present' : ['sell','sells'],
'past' : 'sold',
'past participle' : 'sold',
'gerund' : 'selling',
},
'send':
{
'normal' : 'send',
'present' : ['send','sends'],
'past' : 'sent',
'past participle' : 'sent',
'gerund' : 'sending',
},
'shake':
{
'normal' : 'shake',
'present' : ['shake','shakes'],
'past' : 'shook',
'past participle' : 'shaken',
'gerund' : 'shaking',
},
'show':
{
'normal' : 'show',
'present' : ['show','shows'],
'past' : 'showed',
'past participle' : 'shown',
'gerund' : 'showing',
},
'shut':
{
'normal' : 'shut',
'present' : ['shut','shuts'],
'past' : 'shut',
'past participle' : 'shut',
'gerund' : 'shutting',
},
'sing':
{
'normal' : 'sing',
'present' : ['sing','sings'],
'past' : 'sang',
'past participle' : 'sung',
'gerund' : 'singing',
},
'sit':
{
'normal' : 'sit',
'present' : ['sit','sits'],
'past' : 'sat',
'past participle' : 'sat',
'gerund' : 'sitting',
},
'sleep':
{
'normal' : 'sleep',
'present' : ['sleep','sleeps'],
'past' : 'slept',
'past participle' : 'slept',
'gerund' : 'sleeping',
},
'smell':
{
'normal' : 'smell',
'present' : ['smell','smells'],
'past' : 'smelled,smelt',
'past participle' : 'smelled,smelt',
'gerund' : 'smelling',
},
'speak':
{
'normal' : 'speak',
'present' : ['speak','speaks'],
'past' : 'spoke',
'past participle' : 'spoken',
'gerund' : 'speaking',
},
'spend':
{
'normal' : 'spend',
'present' : ['spend','spends'],
'past' : 'spent',
'past participle' : 'spent',
'gerund' : 'spending',
},
'stand':
{
'normal' : 'stand',
'present' : ['stand','stands'],
'past' : 'stood',
'past participle' : 'stood',
'gerund' : 'standing',
},
'swim':
{
'normal' : 'swim',
'present' : ['swim','swims'],
'past' : 'swam',
'past participle' : 'swum',
'gerund' : 'swimming',
},
'take':
{
'normal' : 'take',
'present' : ['take','takes'],
'past' : 'took',
'past participle' : 'taken',
'gerund' : 'taking',
},
'teach':
{
'normal' : 'teach',
'present' : ['teach','teaches'],
'past' : 'taught',
'past participle' : 'taught',
'gerund' : 'teaching',
},
'tell':
{
'normal' : 'tell',
'present' : ['tell','tells'],
'past' : 'told',
'past participle' : 'told',
'gerund' : 'telling',
},
'think':
{
'normal' : 'think',
'present' : ['think','thinks'],
'past' : 'thought',
'past participle' : 'thought',
'gerund' : 'thinking',
},
'throw':
{
'normal' : 'throw',
'present' : ['throw','throws'],
'past' : 'threw',
'past participle' : 'thrown',
'gerund' : 'throwing',
},
'understand':
{
'normal' : 'understand',
'present' : ['understand','understands'],
'past' : 'understood',
'past participle' : 'understood',
'gerund' : 'unerstanding',
},
'wear':
{
'normal' : 'wear',
'present' : ['wear','wears'],
'past' : 'wore',
'past participle' : 'worn',
'gerund' : 'wearing',
},
'win':
{
'normal' : 'win',
'present' : ['win','wins'],
'past' : 'won',
'past participle' : 'won',
'gerund' : 'winning',
},
'write':
{
'normal' : 'write',
'present' : ['write','writes'],
'past' : 'wrote',
'past participle' : 'written',
'gerund' : 'writing',},}
| 18.814332 | 78 | 0.428497 |
c3518b43a0aa4df0b06d8f5ad7ea43c927361987 | 169 | py | Python | bokeh/themes/__init__.py | quasiben/bokeh | 738343bd18c851dfd1fdc82cf35fe3eb4cdfd475 | [
"BSD-3-Clause"
] | null | null | null | bokeh/themes/__init__.py | quasiben/bokeh | 738343bd18c851dfd1fdc82cf35fe3eb4cdfd475 | [
"BSD-3-Clause"
] | null | null | null | bokeh/themes/__init__.py | quasiben/bokeh | 738343bd18c851dfd1fdc82cf35fe3eb4cdfd475 | [
"BSD-3-Clause"
] | null | null | null | ''' Provides API for loading themes
'''
from __future__ import absolute_import
from os.path import join
from .theme import Theme
default = Theme(json={})
del join
| 12.071429 | 38 | 0.739645 |
c351ebb4f07cf7eccdee13a557a0b9df8efb0303 | 4,321 | py | Python | files/spam-filter/tracspamfilter/captcha/keycaptcha.py | Puppet-Finland/puppet-trac | ffdf467ba80ff995778c30b0bdc6dc3e7d4e6cd3 | [
"BSD-2-Clause"
] | null | null | null | files/spam-filter/tracspamfilter/captcha/keycaptcha.py | Puppet-Finland/puppet-trac | ffdf467ba80ff995778c30b0bdc6dc3e7d4e6cd3 | [
"BSD-2-Clause"
] | null | null | null | files/spam-filter/tracspamfilter/captcha/keycaptcha.py | Puppet-Finland/puppet-trac | ffdf467ba80ff995778c30b0bdc6dc3e7d4e6cd3 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Dirk Stcker <trac@dstoecker.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://projects.edgewall.com/trac/.
import hashlib
import random
import urllib2
from trac.config import Option
from trac.core import Component, implements
from trac.util.html import tag
from tracspamfilter.api import user_agent
from tracspamfilter.captcha import ICaptchaMethod
| 38.238938 | 82 | 0.610507 |
c353ac7f88d4d2f15d7dbe0bb2a19e95c08d7680 | 3,222 | py | Python | app/model/causalnex.py | splunk/splunk-mltk-container-docker | 6e98e5984d99d7a3318f3e68c224d2a5163b717b | [
"Apache-2.0"
] | 20 | 2019-10-28T10:10:00.000Z | 2022-02-17T02:31:54.000Z | app/model/causalnex.py | splunk/splunk-mltk-container-docker | 6e98e5984d99d7a3318f3e68c224d2a5163b717b | [
"Apache-2.0"
] | 13 | 2019-11-22T16:00:02.000Z | 2022-01-12T10:57:08.000Z | app/model/causalnex.py | splunk/splunk-mltk-container-docker | 6e98e5984d99d7a3318f3e68c224d2a5163b717b | [
"Apache-2.0"
] | 15 | 2019-10-25T23:19:43.000Z | 2022-03-27T16:49:21.000Z | #!/usr/bin/env python
# coding: utf-8
# In[18]:
# this definition exposes all python module imports that should be available in all subsequent commands
import json
import numpy as np
import pandas as pd
from causalnex.structure import DAGRegressor
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
# ...
# global constants
MODEL_DIRECTORY = "/srv/app/model/data/"
# In[22]:
# this cell is not executed from MLTK and should only be used for staging data into the notebook environment
# In[24]:
# initialize your model
# available inputs: data and parameters
# returns the model object which will be used as a reference to call fit, apply and summary subsequently
# In[26]:
# train your model
# returns a fit info json object and may modify the model object
# In[28]:
# apply your model
# returns the calculated results
# In[ ]:
# save model to name in expected convention "<algo_name>_<model_name>"
# In[ ]:
# load model from name in expected convention "<algo_name>_<model_name>"
# In[ ]:
# return a model summary
| 16.272727 | 108 | 0.624146 |
c3545eaf7cf8c0dfbca19e2063b2250b17a5d6be | 6,500 | py | Python | Assignment1/Q4/q4.py | NavneelSinghal/COL774 | d8b473b9cd05984ef4ffe8642ce3ce5cb9a17252 | [
"MIT"
] | null | null | null | Assignment1/Q4/q4.py | NavneelSinghal/COL774 | d8b473b9cd05984ef4ffe8642ce3ce5cb9a17252 | [
"MIT"
] | null | null | null | Assignment1/Q4/q4.py | NavneelSinghal/COL774 | d8b473b9cd05984ef4ffe8642ce3ce5cb9a17252 | [
"MIT"
] | null | null | null | import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
matplotlib.use('Agg')
import math
import numpy as np
import sys
from os.path import join, isfile
import warnings
warnings.filterwarnings("ignore")
if __name__ == '__main__':
main()
| 31.400966 | 158 | 0.534923 |
c35493185a871b0c5b3f41a18ba8dd0865c75b5e | 1,521 | py | Python | var/spack/repos/builtin/packages/bcache/package.py | milljm/spack | b476f8aa63d48f4b959522ece0406caa32992d4a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/bcache/package.py | milljm/spack | b476f8aa63d48f4b959522ece0406caa32992d4a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/bcache/package.py | milljm/spack | b476f8aa63d48f4b959522ece0406caa32992d4a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
| 40.026316 | 104 | 0.738988 |
c354a245e57f7c727ba4576fb602286db50cc1a3 | 645 | py | Python | core/migrations/0010_wagtailsitepage_screenshot.py | admariner/madewithwagtail | a43b3263c0f151ece4994fccd561b0575db4979f | [
"MIT"
] | null | null | null | core/migrations/0010_wagtailsitepage_screenshot.py | admariner/madewithwagtail | a43b3263c0f151ece4994fccd561b0575db4979f | [
"MIT"
] | null | null | null | core/migrations/0010_wagtailsitepage_screenshot.py | admariner/madewithwagtail | a43b3263c0f151ece4994fccd561b0575db4979f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-21 23:50
from django.db import migrations
| 22.241379 | 48 | 0.568992 |
c3556300b12020a7a08798e9741d8eecbab08f07 | 1,555 | py | Python | circuitpython/schedule.py | Flameeyes/birch-books-smarthome | 245a8afc848b2a8cf4dbcde31b36716b44937200 | [
"MIT"
] | null | null | null | circuitpython/schedule.py | Flameeyes/birch-books-smarthome | 245a8afc848b2a8cf4dbcde31b36716b44937200 | [
"MIT"
] | null | null | null | circuitpython/schedule.py | Flameeyes/birch-books-smarthome | 245a8afc848b2a8cf4dbcde31b36716b44937200 | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: 2020 The birch-books-smarthome Authors
# SPDX-License-Identifier: MIT
BOOKSTORE_GROUND_FLOOR = 0x0007
BOOKSTORE_FIRST_FLOOR = 0x0008
BOOKSTORE_TERRARIUM = 0x0010
BOOKSTORE_BEDROOM = 0x0020
HOUSE_BASEMENT = 0x0040
HOUSE_GROUND_FLOOR = 0x0380
HOUSE_BEDROOM_LIGHT = 0x0400
HOUSE_BEDROOM_LAMP = 0x0800
HOUSE_FIREPLACE_1 = 0x1000
HOUSE_FIREPLACE_2 = 0x2000
SCHEDULE = [
BOOKSTORE_BEDROOM | HOUSE_BEDROOM_LIGHT,
BOOKSTORE_TERRARIUM | BOOKSTORE_BEDROOM | HOUSE_BEDROOM_LIGHT,
BOOKSTORE_TERRARIUM | BOOKSTORE_FIRST_FLOOR | HOUSE_BEDROOM_LIGHT,
BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR | HOUSE_GROUND_FLOOR,
BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR | HOUSE_GROUND_FLOOR,
BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR,
BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR,
BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR,
BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR,
BOOKSTORE_TERRARIUM | BOOKSTORE_GROUND_FLOOR | HOUSE_GROUND_FLOOR,
BOOKSTORE_TERRARIUM | BOOKSTORE_FIRST_FLOOR | HOUSE_GROUND_FLOOR,
BOOKSTORE_TERRARIUM | BOOKSTORE_FIRST_FLOOR | HOUSE_BASEMENT | HOUSE_BEDROOM_LIGHT,
BOOKSTORE_TERRARIUM | BOOKSTORE_BEDROOM | HOUSE_BASEMENT | HOUSE_BEDROOM_LAMP,
BOOKSTORE_BEDROOM | HOUSE_BEDROOM_LAMP,
0,
0,
]
TEST_SCHEDULE = [
BOOKSTORE_GROUND_FLOOR,
BOOKSTORE_FIRST_FLOOR,
BOOKSTORE_TERRARIUM,
BOOKSTORE_BEDROOM,
HOUSE_BASEMENT,
HOUSE_GROUND_FLOOR,
HOUSE_BEDROOM_LIGHT,
HOUSE_BEDROOM_LAMP,
HOUSE_FIREPLACE_1,
HOUSE_FIREPLACE_2,
]
| 33.804348 | 87 | 0.803859 |
c3561322c8fe83a3cce278173951cb1c3bdb4ed4 | 284 | py | Python | imdb/utils.py | rinkurajole/imdb_sanic_app | 502852b911eb2cfdc5dfcdb4fba585b91e2ce7c6 | [
"BSD-3-Clause"
] | null | null | null | imdb/utils.py | rinkurajole/imdb_sanic_app | 502852b911eb2cfdc5dfcdb4fba585b91e2ce7c6 | [
"BSD-3-Clause"
] | null | null | null | imdb/utils.py | rinkurajole/imdb_sanic_app | 502852b911eb2cfdc5dfcdb4fba585b91e2ce7c6 | [
"BSD-3-Clause"
] | null | null | null | import bcrypt
salt = bcrypt.gensalt()
| 23.666667 | 70 | 0.683099 |
c3562d1b40f2737d409b58bb9f4467b0ae1bbe8c | 14,200 | py | Python | test/test_modules/test_math.py | dragonteros/unsuspected-hangeul | 52dda3768809f5ba91e4fd7bb754223737b2da3d | [
"MIT"
] | 62 | 2019-02-24T17:45:04.000Z | 2021-06-14T07:34:57.000Z | test/test_modules/test_math.py | dragonteros/unsuspected-hangeul | 52dda3768809f5ba91e4fd7bb754223737b2da3d | [
"MIT"
] | 11 | 2019-02-25T17:19:45.000Z | 2020-07-18T05:04:17.000Z | test/test_modules/test_math.py | dragonteros/unsuspected-hangeul | 52dda3768809f5ba91e4fd7bb754223737b2da3d | [
"MIT"
] | 2 | 2019-02-25T07:51:14.000Z | 2019-09-23T12:36:08.000Z | from test.test_base import TestBase
| 55.905512 | 158 | 0.520352 |
c3565453ab31565d1b32ad8f383deec201854e66 | 1,563 | py | Python | services/smtp.py | sourceperl/docker.mqttwarn | 9d87337f766843c8bdee34eba8d29776e7032009 | [
"MIT"
] | null | null | null | services/smtp.py | sourceperl/docker.mqttwarn | 9d87337f766843c8bdee34eba8d29776e7032009 | [
"MIT"
] | null | null | null | services/smtp.py | sourceperl/docker.mqttwarn | 9d87337f766843c8bdee34eba8d29776e7032009 | [
"MIT"
] | 2 | 2016-09-03T09:12:17.000Z | 2020-03-03T11:58:40.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Jan-Piet Mens <jpmens()gmail.com>'
__copyright__ = 'Copyright 2014 Jan-Piet Mens'
__license__ = """Eclipse Public License - v 1.0 (http://www.eclipse.org/legal/epl-v10.html)"""
import smtplib
from email.mime.text import MIMEText
| 33.978261 | 124 | 0.621241 |
c3566cc0d033b24fec07c1d00481ebc4541fed37 | 1,865 | py | Python | xknx/knxip/disconnect_request.py | Trance-Paradox/xknx | d5603361080f96aafd19c14d17fb1ff391064b3f | [
"MIT"
] | null | null | null | xknx/knxip/disconnect_request.py | Trance-Paradox/xknx | d5603361080f96aafd19c14d17fb1ff391064b3f | [
"MIT"
] | null | null | null | xknx/knxip/disconnect_request.py | Trance-Paradox/xknx | d5603361080f96aafd19c14d17fb1ff391064b3f | [
"MIT"
] | null | null | null | """
Module for Serialization and Deserialization of a KNX Disconnect Request information.
Disconnect requests are used to disconnect a tunnel from a KNX/IP device.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from xknx.exceptions import CouldNotParseKNXIP
from .body import KNXIPBody
from .hpai import HPAI
from .knxip_enum import KNXIPServiceType
if TYPE_CHECKING:
from xknx.xknx import XKNX
| 29.603175 | 85 | 0.657909 |
c357fdd1d20a6b3edc1499c2dfe1f260522fc967 | 391 | py | Python | src/database_setup.py | liuchanglilian/crowdsourcing-text-msg | 8270a8175bc78141d9eff00b53f4b292d0d2678c | [
"MIT"
] | null | null | null | src/database_setup.py | liuchanglilian/crowdsourcing-text-msg | 8270a8175bc78141d9eff00b53f4b292d0d2678c | [
"MIT"
] | null | null | null | src/database_setup.py | liuchanglilian/crowdsourcing-text-msg | 8270a8175bc78141d9eff00b53f4b292d0d2678c | [
"MIT"
] | null | null | null | from src.sqlite_helper import create_message_table, drop_message_table
"""
This script will create a SQLite table for you, and should be one time setup
The table name is message which will store all the Post message
"""
create_message_table()
"""
If you need to drop the message table, un-comment the following code by removing the # sign in the beginning
"""
#
# drop_message_table()
#
| 24.4375 | 108 | 0.769821 |
c3590d5e9d8eea5dee2b2753a4c5f63a26af1754 | 5,401 | py | Python | home/pedrosenarego/zorba/zorba1.0.py | rv8flyboy/pyrobotlab | 4e04fb751614a5cb6044ea15dcfcf885db8be65a | [
"Apache-2.0"
] | 63 | 2015-02-03T18:49:43.000Z | 2022-03-29T03:52:24.000Z | home/pedrosenarego/zorba/zorba1.0.py | hirwaHenryChristian/pyrobotlab | 2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9 | [
"Apache-2.0"
] | 16 | 2016-01-26T19:13:29.000Z | 2018-11-25T21:20:51.000Z | home/pedrosenarego/zorba/zorba1.0.py | hirwaHenryChristian/pyrobotlab | 2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9 | [
"Apache-2.0"
] | 151 | 2015-01-03T18:55:54.000Z | 2022-03-04T07:04:23.000Z | from java.lang import String
import threading
import random
import codecs
import io
import itertools
import time
import os
import urllib2
import textwrap
import socket
import shutil
#############################################################
# This is the ZOrba
#
#############################################################
# All bot specific configuration goes here.
leftPort = "/dev/ttyACM1"
rightPort = "/dev/ttyACM0"
headPort = leftPort
gesturesPath = "/home/pedro/Dropbox/pastaPessoal/3Dprinter/inmoov/scripts/zorba/gestures"
botVoice = "WillBadGuy"
#starting the INMOOV
i01 = Runtime.createAndStart("i01", "InMoov")
i01.setMute(True)
##############STARTING THE RIGHT HAND#########
i01.rightHand = Runtime.create("i01.rightHand", "InMoovHand")
#tweaking defaults settings of right hand
i01.rightHand.thumb.setMinMax(20,155)
i01.rightHand.index.setMinMax(30,130)
i01.rightHand.majeure.setMinMax(38,150)
i01.rightHand.ringFinger.setMinMax(30,170)
i01.rightHand.pinky.setMinMax(30,150)
i01.rightHand.thumb.map(0,180,20,155)
i01.rightHand.index.map(0,180,30,130)
i01.rightHand.majeure.map(0,180,38,150)
i01.rightHand.ringFinger.map(0,180,30,175)
i01.rightHand.pinky.map(0,180,30,150)
#################
#################STARTING RIGHT ARM###############
i01.startRightArm(rightPort)
#i01.rightArm = Runtime.create("i01.rightArm", "InMoovArm")
## tweak default RightArm
i01.detach()
i01.rightArm.bicep.setMinMax(0,60)
i01.rightArm.bicep.map(0,180,0,60)
i01.rightArm.rotate.setMinMax(46,130)
i01.rightArm.rotate.map(0,180,46,130)
i01.rightArm.shoulder.setMinMax(0,155)
i01.rightArm.shoulder.map(0,180,0,155)
i01.rightArm.omoplate.setMinMax(8,85)
i01.rightArm.omoplate.map(0,180,8,85)
########STARTING SIDE NECK CONTROL########
leftneckServo = Runtime.start("leftNeck","Servo")
rightneckServo = Runtime.start("rightNeck","Servo")
right = Runtime.start("i01.right", "Arduino")
#right.connect(rightPort)
leftneckServo.attach(right, 13)
rightneckServo.attach(right, 12)
restPos = 90
delta = 20
neckMoveTo(restPos,delta)
#############STARTING THE HEAD##############
i01.head = Runtime.create("i01.head", "InMoovHead")
#weaking defaults settings of head
i01.head.jaw.setMinMax(35,75)
i01.head.jaw.map(0,180,35,75)
i01.head.jaw.setRest(35)
#tweaking default settings of eyes
i01.head.eyeY.setMinMax(0,180)
i01.head.eyeY.map(0,180,70,110)
i01.head.eyeY.setRest(90)
i01.head.eyeX.setMinMax(0,180)
i01.head.eyeX.map(0,180,70,110)
i01.head.eyeX.setRest(90)
i01.head.neck.setMinMax(40,142)
i01.head.neck.map(0,180,40,142)
i01.head.neck.setRest(70)
i01.head.rothead.setMinMax(21,151)
i01.head.rothead.map(0,180,21,151)
i01.head.rothead.setRest(88)
#########STARTING MOUTH CONTROL###############
i01.startMouthControl(leftPort)
i01.mouthControl.setmouth(0,180)
######################################################################
# mouth service, speech synthesis
mouth = Runtime.createAndStart("i01.mouth", "AcapelaSpeech")
mouth.setVoice(botVoice)
######################################################################
# helper function help debug the recognized text from webkit/sphinx
######################################################################
######################################################################
# Create ProgramAB chat bot ( This is the inmoov "brain" )
######################################################################
zorba2 = Runtime.createAndStart("zorba", "ProgramAB")
zorba2.startSession("Pedro", "zorba")
######################################################################
# Html filter to clean the output from programab. (just in case)
htmlfilter = Runtime.createAndStart("htmlfilter", "HtmlFilter")
######################################################################
# the "ear" of the inmoov TODO: replace this with just base inmoov ear?
ear = Runtime.createAndStart("i01.ear", "WebkitSpeechRecognition")
ear.addListener("publishText", python.name, "heard");
ear.addMouth(mouth)
######################################################################
# MRL Routing webkitspeechrecognition/ear -> program ab -> htmlfilter -> mouth
######################################################################
ear.addTextListener(zorba)
zorba2.addTextListener(htmlfilter)
htmlfilter.addTextListener(mouth)
#starting the INMOOV
i01 = Runtime.createAndStart("i01", "InMoov")
i01.setMute(True)
i01.mouth = mouth
######################################################################
# Launch the web gui and create the webkit speech recognition gui
# This service works in Google Chrome only with the WebGui
#################################################################
webgui = Runtime.createAndStart("webgui","WebGui")
######################################################################
# Helper functions and various gesture definitions
######################################################################
i01.loadGestures(gesturesPath)
ear.startListening()
######################################################################
# starting services
######################################################################
i01.startRightHand(rightPort)
i01.detach()
leftneckServo.detach()
rightneckServo.detach()
i01.startHead(leftPort)
i01.detach()
| 31.95858 | 89 | 0.587854 |
c3591dd7e4fa04185bef35a749e2e0b73d499945 | 837 | py | Python | pocs/tests/test_state_machine.py | zacharyt20/POCS | 8f785eaf27178be7d72106cb82e5400a8b852ba8 | [
"MIT"
] | 1 | 2019-07-19T10:37:08.000Z | 2019-07-19T10:37:08.000Z | pocs/tests/test_state_machine.py | zacharyt20/POCS | 8f785eaf27178be7d72106cb82e5400a8b852ba8 | [
"MIT"
] | null | null | null | pocs/tests/test_state_machine.py | zacharyt20/POCS | 8f785eaf27178be7d72106cb82e5400a8b852ba8 | [
"MIT"
] | null | null | null | import os
import pytest
import yaml
from pocs.core import POCS
from pocs.observatory import Observatory
from pocs.utils import error
| 22.026316 | 60 | 0.746714 |
c3592d71715ada6f67b45406f9503a1122617882 | 7,033 | py | Python | code/BacDup/scripts/gff_parser.py | JFsanchezherrero/TFM_UOC_AMoya | 74d860d90240d96d800031ff449e21e09bad826c | [
"Unlicense"
] | 2 | 2021-03-05T10:20:10.000Z | 2021-12-21T10:50:21.000Z | code/BacDup/scripts/gff_parser.py | JFsanchezherrero/TFM_UOC_AMoya | 74d860d90240d96d800031ff449e21e09bad826c | [
"Unlicense"
] | 7 | 2021-03-03T14:27:50.000Z | 2021-07-21T09:38:27.000Z | code/BacDup/scripts/gff_parser.py | JFsanchezherrero/TFM_UOC_AMoya | 74d860d90240d96d800031ff449e21e09bad826c | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
##############################################################
## Jose F. Sanchez & Alba Moya ##
## Copyright (C) 2020-2021 ##
##############################################################
'''
Created on 28 oct. 2020
@author: alba
Modified in March 2021
@author: Jose F. Sanchez-Herrero
'''
## useful imports
import sys
import os
import pandas as pd
import numpy as np
import HCGB
from Bio import SeqIO, Seq
from Bio.SeqRecord import SeqRecord
from BCBio import GFF
from BacDup.scripts.functions import columns_annot_table
##################################################
def gff_parser_caller(gff_file, ref_file, output_path, debug):
'''This function calls the actual gff parser
It serves as the entry point either from a module or system call
'''
## set output paths
prot_file = os.path.abspath( os.path.join(output_path, 'proteins.fa'))
csv_file = os.path.abspath( os.path.join(output_path, 'annot_df.csv'))
csv_length = os.path.abspath( os.path.join(output_path, 'length_df.csv'))
list_out_files = [prot_file, csv_file, csv_length]
try:
with open (ref_file) as in_handle:
ref_recs = SeqIO.to_dict(SeqIO.parse(in_handle, "fasta"))
## debug messages
if (debug):
debug_message('GenBank record', 'yellow')
print (ref_recs)
## parse
with open(prot_file, "w") as out_handle:
SeqIO.write(protein_recs(gff_file, ref_recs,
list_out_files, debug=debug), out_handle, "fasta")
## return information
return (list_out_files)
except:
return (False)
############################################################
#################################################################
def main (gff_file, ref_file, output_folder, debug=False):
#get name
base, ext = os.path.splitext(gff_file)
gff_file = os.path.abspath(gff_file)
#create folder
output_path = HCGB.functions.file_functions.create_folder(output_path)
if (debug):
print ("## DEBUG:")
print ("base:" , base)
print ("ext:" , ext)
print ()
gff_parser_caller(gff_file, ref_file, output_path, debug)
################################################################################
if __name__ == "__main__":
if len(sys.argv) != 4:
print (__doc__)
print ("## Usage gff_parser")
print ("python %s gff_file ref_fasta_file output_folder\n" %sys.argv[0])
sys.exit()
main(*sys.argv[1:], debug=True)
#main(*sys.argv[1:])
# la variable debug no es obligatoria. tiene un "por defecto definido"
# Se utiliza el "=" para indicar el default.
| 35.882653 | 173 | 0.515854 |
c359a6fbb849b989ceb5b8e12f21bfb4e4e866fd | 1,729 | py | Python | PAL/Cross/client/sources-linux/build_library_zip.py | infosecsecurity/OSPTF | df3f63dc882db6d7e0b7bd80476e9bbc8471ac1f | [
"MIT"
] | 2 | 2017-11-23T01:07:37.000Z | 2021-06-25T05:03:49.000Z | PAL/Cross/client/sources-linux/build_library_zip.py | infosecsecurity/OSPTF | df3f63dc882db6d7e0b7bd80476e9bbc8471ac1f | [
"MIT"
] | null | null | null | PAL/Cross/client/sources-linux/build_library_zip.py | infosecsecurity/OSPTF | df3f63dc882db6d7e0b7bd80476e9bbc8471ac1f | [
"MIT"
] | 1 | 2018-05-22T02:28:43.000Z | 2018-05-22T02:28:43.000Z | import sys
from distutils.core import setup
import os
from glob import glob
import zipfile
import shutil
sys.path.insert(0, os.path.join('resources','library_patches'))
sys.path.insert(0, os.path.join('..','..','pupy'))
import pp
import additional_imports
import Crypto
all_dependencies=set([x.split('.')[0] for x,m in sys.modules.iteritems() if not '(built-in)' in str(m) and x != '__main__'])
print "ALLDEPS: ", all_dependencies
zf = zipfile.ZipFile(os.path.join('resources','library.zip'), mode='w', compression=zipfile.ZIP_DEFLATED)
try:
for dep in all_dependencies:
mdep = __import__(dep)
print "DEPENDENCY: ", dep, mdep
if hasattr(mdep, '__path__'):
print('adding package %s'%dep)
path, root = os.path.split(mdep.__path__[0])
for root, dirs, files in os.walk(mdep.__path__[0]):
for f in list(set([x.rsplit('.',1)[0] for x in files])):
found=False
for ext in ('.pyc', '.so', '.pyo', '.py'):
if ext == '.py' and found:
continue
if os.path.exists(os.path.join(root,f+ext)):
zipname = os.path.join(root[len(path)+1:], f.split('.', 1)[0] + ext)
print('adding file : {}'.format(zipname))
zf.write(os.path.join(root, f+ext), zipname)
found=True
else:
if '<memimport>' in mdep.__file__:
continue
_, ext = os.path.splitext(mdep.__file__)
print('adding %s -> %s'%(mdep.__file__, dep+ext))
zf.write(mdep.__file__, dep+ext)
finally:
zf.close()
| 36.020833 | 124 | 0.54251 |
c35a27ffefb517296b644e56550ee85f278c4beb | 4,742 | py | Python | conans/test/functional/old/short_paths_test.py | Manu343726/conan | fe322a672307d29f99d2e7bc1c02c45c835028d7 | [
"MIT"
] | null | null | null | conans/test/functional/old/short_paths_test.py | Manu343726/conan | fe322a672307d29f99d2e7bc1c02c45c835028d7 | [
"MIT"
] | 1 | 2020-04-18T10:13:37.000Z | 2020-04-18T10:16:37.000Z | conans/test/functional/old/short_paths_test.py | alacasta/conan | 643a9c84fe6dc0cb2f9fcbfc9dc5bd2e789c690e | [
"MIT"
] | 1 | 2018-09-03T05:04:23.000Z | 2018-09-03T05:04:23.000Z | import os
import platform
import unittest
from conans.model.ref import ConanFileReference
from conans.test.utils.tools import NO_SETTINGS_PACKAGE_ID, TestClient
| 41.234783 | 97 | 0.633066 |
c35a9f8a6f746b1900b91c33a9b1be7d36fdde7f | 4,086 | py | Python | data_collection/json2mongodb.py | kwond2/hedgehogs | 58dbed549a1e78e401fc90c7a7041d9979cfc2e4 | [
"MIT"
] | 9 | 2018-02-06T19:08:16.000Z | 2022-03-15T13:31:57.000Z | data_collection/json2mongodb.py | kwond2/hedgehogs | 58dbed549a1e78e401fc90c7a7041d9979cfc2e4 | [
"MIT"
] | 37 | 2018-02-09T21:22:58.000Z | 2021-12-13T19:51:24.000Z | data_collection/json2mongodb.py | kwond2/hedgehogs | 58dbed549a1e78e401fc90c7a7041d9979cfc2e4 | [
"MIT"
] | 10 | 2018-02-27T20:26:55.000Z | 2021-02-06T02:26:30.000Z | #-*- coding: utf-8 -*-
# import os
# from optparse import OptionParser
# from pymongo import MongoClient, bulk
# import json
# import collections
# import sys
from import_hedgehogs import *
HOST = '45.55.48.43'
PORT = 27017
DB = 'SEC_EDGAR'
if __name__ == "__main__":
print("[WARNING] STILL UNDER DEVELOPMENT")
main()
| 41.272727 | 121 | 0.670338 |
c35c0c54bc5945e22f05841e1485001ae7177f54 | 2,984 | py | Python | scripts/convert_to_singlehead.py | Lollipop321/mini-decoder-network | cfdaba579b45cba1d181585e5430178c1dc60049 | [
"BSD-3-Clause"
] | 1 | 2021-09-18T05:07:38.000Z | 2021-09-18T05:07:38.000Z | scripts/convert_to_singlehead.py | Lollipop321/mini-decoder-network | cfdaba579b45cba1d181585e5430178c1dc60049 | [
"BSD-3-Clause"
] | null | null | null | scripts/convert_to_singlehead.py | Lollipop321/mini-decoder-network | cfdaba579b45cba1d181585e5430178c1dc60049 | [
"BSD-3-Clause"
] | 1 | 2021-12-24T13:05:02.000Z | 2021-12-24T13:05:02.000Z | import torch
import math
import time
import struct
import argparse
import numpy as np
from collections import OrderedDict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-model', required=True, help="trained model prefix, also include dir, e.g. ../data/model-100")
args = parser.parse_args()
model_path = args.model
checkpoint = torch.load(model_path, map_location='cpu')
assert 'args' in checkpoint
assert 'model' in checkpoint
args = checkpoint['args']
model = checkpoint['model']
checkpoint_new = {}
model_new = {}
e = [0, 0, 0, 0, 0, 0]
d = [0, 0, 0, 0, 0, 0]
for name, w in model.items():
if "decoder" in name:
if "self_attn.in_proj" in name:
layer = eval(name.split(".")[2])
wq, wk, wv = w.chunk(3, dim=0)
assert args.encoder_embed_dim == args.decoder_embed_dim
model_new[name] = torch.cat([wq[(args.encoder_embed_dim // 8 * e[layer]): (args.encoder_embed_dim // 8 * (e[layer] + 1))],
wk[(args.encoder_embed_dim // 8 * e[layer]): (args.encoder_embed_dim // 8 * (e[layer] + 1))],
wv[(args.encoder_embed_dim // 8 * e[layer]): (args.encoder_embed_dim // 8 * (e[layer] + 1))]], dim=0)
elif "encoder_attn.in_proj" in name:
layer = eval(name.split(".")[2])
wq, wk, wv = w.chunk(3, dim=0)
assert args.encoder_embed_dim == args.decoder_embed_dim
model_new[name] = torch.cat([wq[(args.encoder_embed_dim // 8 * d[layer]): (args.encoder_embed_dim // 8 * (d[layer] + 1))],
wk[(args.encoder_embed_dim // 8 * d[layer]): (args.encoder_embed_dim // 8 * (d[layer] + 1))],
wv[(args.encoder_embed_dim // 8 * d[layer]): (args.encoder_embed_dim // 8 * (d[layer] + 1))]], dim=0)
elif "self_attn.out_proj.weight" in name:
layer = eval(name.split(".")[2])
assert args.encoder_embed_dim == args.decoder_embed_dim
model_new[name] = w[:, (args.encoder_embed_dim // 8 * e[layer]): (args.encoder_embed_dim // 8 * (e[layer] + 1))]
elif "encoder_attn.out_proj.weight" in name:
layer = eval(name.split(".")[2])
assert args.encoder_embed_dim == args.decoder_embed_dim
model_new[name] = w[:, (args.encoder_embed_dim // 8 * d[layer]): (args.encoder_embed_dim // 8 * (d[layer] + 1))]
else:
model_new[name] = w
else:
model_new[name] = w
checkpoint_new['args'] = args
checkpoint_new['args'].arch = "transformer_singlehead_t2t_wmt_en_de"
checkpoint_new['model'] = model_new
# print(checkpoint_new['args'].arch)
torch.save(checkpoint_new, 'checkpoint_singlehead.pt')
print("finished!") | 45.907692 | 146 | 0.560657 |
c35c6a6a052a8839d6a0e36986573f0ad73f479f | 3,719 | py | Python | tests/integration/frameworks/test_detectron2_impl.py | francoisserra/BentoML | 213e9e9b39e055286f2649c733907df88e6d2503 | [
"Apache-2.0"
] | 1 | 2021-06-12T17:04:07.000Z | 2021-06-12T17:04:07.000Z | tests/integration/frameworks/test_detectron2_impl.py | francoisserra/BentoML | 213e9e9b39e055286f2649c733907df88e6d2503 | [
"Apache-2.0"
] | 4 | 2021-05-16T08:06:25.000Z | 2021-11-13T08:46:36.000Z | tests/integration/frameworks/test_detectron2_impl.py | francoisserra/BentoML | 213e9e9b39e055286f2649c733907df88e6d2503 | [
"Apache-2.0"
] | null | null | null | import typing as t
from typing import TYPE_CHECKING
import numpy as np
import torch
import pytest
import imageio
from detectron2 import model_zoo
from detectron2.data import transforms as T
from detectron2.config import get_cfg
from detectron2.modeling import build_model
import bentoml
if TYPE_CHECKING:
from detectron2.config import CfgNode
from bentoml._internal.types import Tag
from bentoml._internal.models import ModelStore
IMAGE_URL: str = "./tests/utils/_static/detectron2_sample.jpg"
def prepare_image(
original_image: "np.ndarray[t.Any, np.dtype[t.Any]]",
) -> "np.ndarray[t.Any, np.dtype[t.Any]]":
"""Mainly to test on COCO dataset"""
_aug = T.ResizeShortestEdge([800, 800], 1333)
image = _aug.get_transform(original_image).apply_image(original_image)
return image.transpose(2, 0, 1)
| 30.483607 | 85 | 0.705835 |
c35c97b552a6619198e65898ccb72250776063d5 | 1,867 | py | Python | molecule/default/tests/test_default.py | escalate/ansible-influxdb-docker | bbb2c259bd1de3c4c40322103a05894494af7104 | [
"MIT"
] | null | null | null | molecule/default/tests/test_default.py | escalate/ansible-influxdb-docker | bbb2c259bd1de3c4c40322103a05894494af7104 | [
"MIT"
] | null | null | null | molecule/default/tests/test_default.py | escalate/ansible-influxdb-docker | bbb2c259bd1de3c4c40322103a05894494af7104 | [
"MIT"
] | null | null | null | """Role testing files using testinfra"""
def test_config_directory(host):
"""Check config directory"""
f = host.file("/etc/influxdb")
assert f.is_directory
assert f.user == "influxdb"
assert f.group == "root"
assert f.mode == 0o775
def test_data_directory(host):
"""Check data directory"""
d = host.file("/var/lib/influxdb")
assert d.is_directory
assert d.user == "influxdb"
assert d.group == "root"
assert d.mode == 0o700
def test_backup_directory(host):
"""Check backup directory"""
b = host.file("/var/backups/influxdb")
assert b.is_directory
assert b.user == "influxdb"
assert b.group == "root"
assert b.mode == 0o775
def test_influxdb_service(host):
"""Check InfluxDB service"""
s = host.service("influxdb")
assert s.is_running
assert s.is_enabled
def test_influxdb_docker_container(host):
"""Check InfluxDB docker container"""
d = host.docker("influxdb.service").inspect()
assert d["HostConfig"]["Memory"] == 1073741824
assert d["Config"]["Image"] == "influxdb:latest"
assert d["Config"]["Labels"]["maintainer"] == "me@example.com"
assert "INFLUXD_REPORTING_DISABLED=true" in d["Config"]["Env"]
assert "internal" in d["NetworkSettings"]["Networks"]
assert \
"influxdb" in d["NetworkSettings"]["Networks"]["internal"]["Aliases"]
def test_backup(host):
"""Check if the backup runs successfully"""
cmd = host.run("/usr/local/bin/backup-influxdb.sh")
assert cmd.succeeded
def test_backup_cron_job(host):
"""Check backup cron job"""
f = host.file("/var/spool/cron/crontabs/root")
assert "/usr/local/bin/backup-influxdb.sh" in f.content_string
def test_restore(host):
"""Check if the restore runs successfully"""
cmd = host.run("/usr/local/bin/restore-influxdb.sh")
assert cmd.succeeded
| 28.287879 | 77 | 0.664167 |
c35eb72d85ca1063b3957ca321301a14a1c4baba | 3,847 | py | Python | ZIP-v0.01/Serial_to_MQTT.py | JittoThomas/IOT | 994fa25087d14e33c2d82b9c9d526f65823b6fa8 | [
"MIT"
] | null | null | null | ZIP-v0.01/Serial_to_MQTT.py | JittoThomas/IOT | 994fa25087d14e33c2d82b9c9d526f65823b6fa8 | [
"MIT"
] | null | null | null | ZIP-v0.01/Serial_to_MQTT.py | JittoThomas/IOT | 994fa25087d14e33c2d82b9c9d526f65823b6fa8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import cayenne.client, datetime, time, serial
# import random
#Delay Start
#print "Time now = ", datetime.datetime.now().strftime("%H-%M-%S")
#time.sleep(60)
#print "Starting now = ", datetime.datetime.now().strftime("%H-%M-%S")
# Cayenne authentication info. This should be obtained from the Cayenne Dashboard.
MQTT_USERNAME = "6375a470-cff9-11e7-86d0-83752e057225"
MQTT_PASSWORD = "26e1dc13f900da7b30b24cad4b320f9bc6dd0d78"
MQTT_CLIENT_ID = "157d1d10-69dd-11e8-84d1-4d9372e87a68"
# Other settings that seem to be embedded in Cayenne's libraries
# MQTT_URL = "mqtt.mydevices.com"
# MQTT_PORT = "1883"
# Default location of serial port on Pi models 1 and 2
#SERIAL_PORT = "/dev/ttyAMA0"
# Default location of serial port on Pi models 3 and Zero
SERIAL_PORT = "/dev/ttyS0"
# How often shall we write values to Cayenne? (Seconds + 1)
interval = 5
#This sets up the serial port specified above. baud rate is the bits per second timeout seconds
#port = serial.Serial(SERIAL_PORT, baudrate=2400, timeout=5)
#This sets up the serial port specified above. baud rate. This WAITS for any cr/lf (new blob of data from picaxe)
port = serial.Serial(SERIAL_PORT, baudrate=2400)
# The callback for when a message is received from Cayenne.
client = cayenne.client.CayenneMQTTClient()
client.on_message = on_message
client.begin(MQTT_USERNAME, MQTT_PASSWORD, MQTT_CLIENT_ID)
#Predefine Data Packet objects for python prior to trying to look for them :)
node = ":01"
channel = "A"
data = 123
cs = 0
while True:
try:
rcv = port.readline() #read buffer until cr/lf
#print("Serial Readline Data = " + rcv)
rcv = rcv.rstrip("\r\n")
node,channel,data,cs = rcv.split(",")
#Test Point print("rcv.split Data = : " + node + channel + data + cs)
if cs == '0':
#if cs = Check Sum is good = 0 then do the following
if channel == 'A':
data = float(data)/1
client.virtualWrite(1, data, "analog_sensor", "null")
client.loop()
if channel == 'B':
data = float(data)/1
client.virtualWrite(2, data, "analog_sensor", "null")
client.loop()
if channel == 'C':
data = float(data)/1
client.virtualWrite(3, data, "analog_sensor", "null")
client.loop()
if channel == 'D':
data = float(data)/1
client.virtualWrite(4, data, "analog_sensor", "null")
client.loop()
if channel == 'E':
data = float(data)/1
client.virtualWrite(5, data, "analog_sensor", "null")
client.loop()
if channel == 'F':
data = float(data)/1
client.virtualWrite(6, data, "analog_sensor", "null")
client.loop()
if channel == 'G':
data = float(data)/1
client.virtualWrite(7, data, "analog_sensor", "null")
client.loop()
if channel == 'H':
data = float(data)/1
client.virtualWrite(8, data, "analog_sensor", "null")
client.loop()
if channel == 'I':
data = float(data)/1
client.virtualWrite(9, data, "analog_sensor", "null")
client.loop()
if channel == 'J':
data = float(data)/1
client.virtualWrite(10, data, "analog_sensor", "null")
client.loop()
if channel == 'K':
data = float(data)/1
client.virtualWrite(11, data, "analog_sensor", "null")
client.loop()
if channel == 'L':
data = float(data)/1
client.virtualWrite(12, data, "analog_sensor", "null")
client.loop()
except ValueError:
#if Data Packet corrupt or malformed then...
print("Data Packet corrupt or malformed")
| 31.276423 | 114 | 0.641539 |
c35efbe149c76dcc538b4f5467731ccd578e9db2 | 1,841 | py | Python | test/test_slimta_queue_proxy.py | nanojob/python-slimta | 70b9c633756a56afaf1fdd53c5ead6d0001036e7 | [
"MIT"
] | 141 | 2015-01-24T23:59:18.000Z | 2022-01-30T16:36:37.000Z | test/test_slimta_queue_proxy.py | nanojob/python-slimta | 70b9c633756a56afaf1fdd53c5ead6d0001036e7 | [
"MIT"
] | 106 | 2015-01-13T22:49:07.000Z | 2021-02-17T15:14:11.000Z | test/test_slimta_queue_proxy.py | nanojob/python-slimta | 70b9c633756a56afaf1fdd53c5ead6d0001036e7 | [
"MIT"
] | 43 | 2015-07-29T14:55:09.000Z | 2021-09-24T22:30:38.000Z |
import unittest
from mox3.mox import MoxTestBase, IsA
from slimta.queue.proxy import ProxyQueue
from slimta.smtp.reply import Reply
from slimta.relay import Relay, TransientRelayError, PermanentRelayError
from slimta.envelope import Envelope
# vim:et:fdm=marker:sts=4:sw=4:ts=4
| 29.693548 | 72 | 0.634438 |
c3601f9d19e300648c3ba875a58c68aa35eadc52 | 1,912 | py | Python | tests/potential/EamPotential/Al__born_exp_fs/test____init__.py | eragasa/pypospack | 21cdecaf3b05c87acc532d992be2c04d85bfbc22 | [
"MIT"
] | 4 | 2018-01-18T19:59:56.000Z | 2020-08-25T11:56:52.000Z | tests/potential/EamPotential/Al__born_exp_fs/test____init__.py | eragasa/pypospack | 21cdecaf3b05c87acc532d992be2c04d85bfbc22 | [
"MIT"
] | 1 | 2018-04-22T23:02:13.000Z | 2018-04-22T23:02:13.000Z | tests/potential/EamPotential/Al__born_exp_fs/test____init__.py | eragasa/pypospack | 21cdecaf3b05c87acc532d992be2c04d85bfbc22 | [
"MIT"
] | 1 | 2019-09-14T07:04:42.000Z | 2019-09-14T07:04:42.000Z | import pytest
from pypospack.potential import EamPotential
symbols = ['Al']
func_pair_name = "bornmayer"
func_density_name = "eam_dens_exp"
func_embedding_name = "fs"
expected_parameter_names_pair_potential = []
expected_parameter_names_density_function = []
expected_parameter_names_embedding_function = []
expected_parameter_names = [
'p_AlAl_phi0', 'p_AlAl_gamma', 'p_AlAl_r0',
'd_Al_rho0', 'd_Al_beta', 'd_Al_r0',
'e_Al_F0', 'e_Al_p', 'e_Al_q', 'e_Al_F1', 'e_Al_rho0']
print(80*'-')
print("func_pair_name={}".format(func_pair_name))
print("func_density_name={}".format(func_density_name))
print("func_embedding_name={}".format(func_density_name))
print(80*'-')
if __name__ == "__main__":
# CONSTRUCTOR TEST
pot = EamPotential(symbols=symbols,
func_pair=func_pair_name,
func_density=func_density_name,
func_embedding=func_embedding_name)
print('pot.potential_type == {}'.format(\
pot.potential_type))
print('pot.symbols == {}'.format(\
pot.symbols))
print('pot.parameter_names == {}'.format(\
pot.parameter_names))
print('pot.is_charge == {}'.format(\
pot.is_charge))
| 30.83871 | 72 | 0.684623 |
c36035176be4720b8166b5477e11e4a52ab157d4 | 417 | py | Python | backend/bin/main/enrichers/enricher.py | anjo-ba/PCAP-Analyzer | ccb13caba9c0c05a7643e63c57575b56ab1233cb | [
"MIT"
] | 4 | 2019-03-29T08:45:36.000Z | 2021-11-11T00:49:36.000Z | backend/bin/main/enrichers/enricher.py | anjo-ba/PCAP-Analyzer | ccb13caba9c0c05a7643e63c57575b56ab1233cb | [
"MIT"
] | 9 | 2019-04-03T18:10:19.000Z | 2020-08-16T12:13:34.000Z | backend/bin/main/enrichers/enricher.py | anjo-ba/PCAP-Analyzer | ccb13caba9c0c05a7643e63c57575b56ab1233cb | [
"MIT"
] | 4 | 2019-05-09T15:33:23.000Z | 2022-02-06T08:01:23.000Z | from typing import Dict
from main.helpers.print_helper import PrintHelper
| 26.0625 | 80 | 0.70024 |
c360b0127afead19c24d728369419544803b4819 | 2,191 | py | Python | Modulo_5/proyecto/presentacion/form_ubicacion/formAUbicacion_designer.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
] | null | null | null | Modulo_5/proyecto/presentacion/form_ubicacion/formAUbicacion_designer.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
] | null | null | null | Modulo_5/proyecto/presentacion/form_ubicacion/formAUbicacion_designer.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
] | 1 | 2022-03-04T00:57:18.000Z | 2022-03-04T00:57:18.000Z | import tkinter as tk
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from pandastable import Table
import util.generic as utl
| 41.339623 | 137 | 0.645824 |
c362e5ae43a55d318ef4b490ee0fc9d950ff6b12 | 138 | py | Python | injector/__init__.py | vnepomuceno/kafka-events-injector | 66fb490b1a3d5f06737689005c639b1785a6bb37 | [
"MIT"
] | null | null | null | injector/__init__.py | vnepomuceno/kafka-events-injector | 66fb490b1a3d5f06737689005c639b1785a6bb37 | [
"MIT"
] | null | null | null | injector/__init__.py | vnepomuceno/kafka-events-injector | 66fb490b1a3d5f06737689005c639b1785a6bb37 | [
"MIT"
] | null | null | null | import coloredlogs
coloredlogs.install()
custom_logger = logging.getLogger(name)
coloredlogs.install(level="INFO", logger=custom_logger)
| 23 | 55 | 0.826087 |
c3636918f6e548937ced74b698a4a4c3213be188 | 4,008 | py | Python | setup.py | Lcvette/qtpyvcp | 4143a4a4e1f557f7d0c8998c886b4a254f0be60b | [
"BSD-3-Clause-LBNL",
"MIT"
] | 71 | 2018-12-13T20:31:18.000Z | 2022-03-26T08:44:22.000Z | setup.py | Lcvette/qtpyvcp | 4143a4a4e1f557f7d0c8998c886b4a254f0be60b | [
"BSD-3-Clause-LBNL",
"MIT"
] | 78 | 2019-01-10T18:16:33.000Z | 2022-03-18T19:30:49.000Z | setup.py | Lcvette/qtpyvcp | 4143a4a4e1f557f7d0c8998c886b4a254f0be60b | [
"BSD-3-Clause-LBNL",
"MIT"
] | 38 | 2018-10-10T19:02:26.000Z | 2022-01-30T04:38:14.000Z | import os
import versioneer
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
if os.getenv('DEB_BUILD') == 'true' or os.getenv('USER') == 'root':
"/usr/share/doc/linuxcnc/examples/sample-configs/sim"
# list of (destination, source_file) tuples
DATA_FILES = [
('/usr/lib/x86_64-linux-gnu/qt5/plugins/designer/', [
'pyqt5designer/Qt5.7.1-64bit/libpyqt5_py2.so',
'pyqt5designer/Qt5.7.1-64bit/libpyqt5_py3.so']),
]
# list of (destination, source_dir) tuples
DATA_DIRS = [
('/usr/share/doc/linuxcnc/examples/sample-configs/sim', 'linuxcnc/configs'),
]
if os.getenv('USER') == 'root':
try:
os.rename('/usr/lib/x86_64-linux-gnu/qt5/plugins/designer/libpyqt5.so',
'/usr/lib/x86_64-linux-gnu/qt5/plugins/designer/libpyqt5.so.old')
except:
pass
else:
# list of (destination, source_file) tuples
DATA_FILES = [
('~/', ['scripts/.xsessionrc',]),
]
# list of (destination, source_dir) tuples
DATA_DIRS = [
('~/linuxcnc/configs/sim.qtpyvcp', 'linuxcnc/configs/sim.qtpyvcp'),
('~/linuxcnc/nc_files/qtpyvcp', 'linuxcnc/nc_files/qtpyvcp'),
# ('~/linuxcnc/vcps', 'examples'),
]
data_files = [(os.path.expanduser(dest), src_list) for dest, src_list in DATA_FILES]
data_files.extend(data_files_from_dirs(DATA_DIRS))
setup(
name="qtpyvcp",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author="Kurt Jacobson",
author_email="kcjengr@gmail.com",
description="Qt and Python based Virtual Control Panel framework for LinuxCNC.",
long_description=long_description,
long_description_content_type="text/markdown",
license="GNU General Public License v2 (GPLv2)",
url="https://github.com/kcjengr/qtpyvcp",
download_url="https://github.com/kcjengr/qtpyvcp/archive/master.zip",
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Manufacturing',
'Intended Audience :: End Users/Desktop',
'Topic :: Software Development :: Widget Sets',
'Topic :: Software Development :: User Interfaces',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Programming Language :: Python :: 2.7',
],
packages=find_packages(),
data_files=data_files,
include_package_data=True,
install_requires=[
'docopt',
'qtpy',
'pyudev',
'psutil',
'HiYaPyCo',
'pyopengl',
'vtk',
'pyqtgraph',
'oyaml',
'simpleeval',
],
entry_points={
'console_scripts': [
'qtpyvcp=qtpyvcp.app:main',
'qcompile=qtpyvcp.tools.qcompile:main',
'editvcp=qtpyvcp.tools.editvcp:main',
# example VCPs
'mini=examples.mini:main',
'brender=examples.brender:main',
# test VCPs
'vtk_test=video_tests.vtk_test:main',
'opengl_test=video_tests.opengl_test:main',
'qtpyvcp_test=video_tests.qtpyvcp_test:main',
],
'qtpyvcp.example_vcp': [
'mini=examples.mini',
'brender=examples.brender',
'actions=examples.actions',
],
'qtpyvcp.test_vcp': [
'vtk_test=video_tests.vtk_test',
'opengl_test=video_tests.opengl_test',
'qtpyvcp_test=video_tests.qtpyvcp_test',
],
},
)
| 31.809524 | 87 | 0.611776 |
c363cccc0f9ae4f989abcc27c186813cc42c4212 | 4,366 | py | Python | hidparser/UsagePage.py | NZSmartie/PyHIDParser | a2758929c82a4316a665a779b9a391740103b318 | [
"MIT"
] | 22 | 2016-04-28T10:29:11.000Z | 2022-02-02T17:30:08.000Z | hidparser/UsagePage.py | NZSmartie/PyHIDParser | a2758929c82a4316a665a779b9a391740103b318 | [
"MIT"
] | 12 | 2016-04-24T03:29:00.000Z | 2018-11-26T22:34:37.000Z | hidparser/UsagePage.py | NZSmartie/PyHIDParser | a2758929c82a4316a665a779b9a391740103b318 | [
"MIT"
] | 5 | 2017-02-21T13:01:25.000Z | 2021-10-04T07:13:53.000Z | from enum import Enum as _Enum
| 35.209677 | 136 | 0.633074 |
c3645b451a58c6438e6127bf646d7ebd0d06fa74 | 1,505 | py | Python | sandbox/lib/jumpscale/JumpscaleLibs/tools/legal_contracts/LegalDoc.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | 2 | 2019-05-09T07:21:25.000Z | 2019-08-05T06:37:53.000Z | sandbox/lib/jumpscale/JumpscaleLibs/tools/legal_contracts/LegalDoc.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | 664 | 2018-12-19T12:43:44.000Z | 2019-08-23T04:24:42.000Z | sandbox/lib/jumpscale/JumpscaleLibs/tools/legal_contracts/LegalDoc.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | 7 | 2019-05-03T07:14:37.000Z | 2019-08-05T12:36:52.000Z | from reportlab.lib.pagesizes import A4
from reportlab.lib.units import cm
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus import BaseDocTemplate, Frame, PageTemplate, Paragraph
| 35 | 108 | 0.639203 |
c3681201a4fff8ff597af63f6abe3f4d4fb7b0ce | 5,627 | py | Python | tests/outcome/test_outcome_models.py | ConnorBarnhill/kf-api-dataservice | 547df467a307788882469a25c947a14965a26336 | [
"Apache-2.0"
] | 6 | 2018-01-25T13:49:24.000Z | 2020-03-07T16:25:09.000Z | tests/outcome/test_outcome_models.py | ConnorBarnhill/kf-api-dataservice | 547df467a307788882469a25c947a14965a26336 | [
"Apache-2.0"
] | 369 | 2018-01-17T15:22:18.000Z | 2022-03-10T19:14:56.000Z | tests/outcome/test_outcome_models.py | ConnorBarnhill/kf-api-dataservice | 547df467a307788882469a25c947a14965a26336 | [
"Apache-2.0"
] | 3 | 2018-04-11T14:18:37.000Z | 2018-10-31T19:09:48.000Z | from datetime import datetime
import uuid
from sqlalchemy.exc import IntegrityError
from dataservice.api.study.models import Study
from dataservice.api.participant.models import Participant
from dataservice.api.outcome.models import Outcome
from dataservice.extensions import db
from tests.utils import FlaskTestCase
| 30.252688 | 72 | 0.594633 |
c3693f12a03bbf78b7f7bcf22ea6cd2fd4184fd8 | 1,043 | py | Python | app/forms/fields/month_year_date_field.py | ons-eq-team/eq-questionnaire-runner | 8d029097faa2b9d53d9621064243620db60c62c7 | [
"MIT"
] | null | null | null | app/forms/fields/month_year_date_field.py | ons-eq-team/eq-questionnaire-runner | 8d029097faa2b9d53d9621064243620db60c62c7 | [
"MIT"
] | null | null | null | app/forms/fields/month_year_date_field.py | ons-eq-team/eq-questionnaire-runner | 8d029097faa2b9d53d9621064243620db60c62c7 | [
"MIT"
] | null | null | null | import logging
from werkzeug.utils import cached_property
from wtforms import FormField, Form, StringField
logger = logging.getLogger(__name__)
| 26.74359 | 68 | 0.607862 |
c36a18741da6b1e9a7e803a47b014cff09f34cfc | 310 | py | Python | inf_classif_analysis/descriptive_analysis.py | Marco-Ametrano/myocardal_infarction_class | d2fb9d4d6643d0b836ffdb94a32911eb4d68c390 | [
"MIT"
] | null | null | null | inf_classif_analysis/descriptive_analysis.py | Marco-Ametrano/myocardal_infarction_class | d2fb9d4d6643d0b836ffdb94a32911eb4d68c390 | [
"MIT"
] | null | null | null | inf_classif_analysis/descriptive_analysis.py | Marco-Ametrano/myocardal_infarction_class | d2fb9d4d6643d0b836ffdb94a32911eb4d68c390 | [
"MIT"
] | null | null | null | #AFTER PREPROCESSING AND TARGETS DEFINITION
newdataset.describe()
LET_IS.value_counts()
LET_IS.value_counts().plot(kind='bar', color='c')
Y_unica.value_counts()
Y_unica.value_counts().plot(kind='bar', color='c')
ZSN.value_counts().plot(kind='bar', color='c')
Survive.value_counts().plot(kind='bar', color='c')
| 34.444444 | 50 | 0.748387 |
c36a8c07a26ce0690e0700b966816f0b550bb368 | 1,568 | py | Python | src/service/eda_service.py | LiuYuWei/service-data-eda-analysis | 7dcbf205a0a3715cf3d199356bd1814b8d47b52d | [
"Apache-2.0"
] | null | null | null | src/service/eda_service.py | LiuYuWei/service-data-eda-analysis | 7dcbf205a0a3715cf3d199356bd1814b8d47b52d | [
"Apache-2.0"
] | null | null | null | src/service/eda_service.py | LiuYuWei/service-data-eda-analysis | 7dcbf205a0a3715cf3d199356bd1814b8d47b52d | [
"Apache-2.0"
] | null | null | null | """Confusion matrix calculation service."""
# coding=utf-8
# import relation package.
from pandas_profiling import ProfileReport
import pandas as pd
import datetime
import json
# import project package.
from config.config_setting import ConfigSetting
| 32.666667 | 88 | 0.655612 |
c36b30969b08b61066b6a7a3898735992cd717ad | 1,385 | py | Python | google/cloud/bigquery_v2/types/__init__.py | KoffieLabs/python-bigquery | 33b317abdc6d69f33722cb0504bb0b78c1c80e30 | [
"Apache-2.0"
] | null | null | null | google/cloud/bigquery_v2/types/__init__.py | KoffieLabs/python-bigquery | 33b317abdc6d69f33722cb0504bb0b78c1c80e30 | [
"Apache-2.0"
] | null | null | null | google/cloud/bigquery_v2/types/__init__.py | KoffieLabs/python-bigquery | 33b317abdc6d69f33722cb0504bb0b78c1c80e30 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .encryption_config import (
EncryptionConfiguration,
)
from .model import (
DeleteModelRequest,
GetModelRequest,
ListModelsRequest,
ListModelsResponse,
Model,
PatchModelRequest,
)
from .model_reference import (
ModelReference,
)
from .standard_sql import (
StandardSqlDataType,
StandardSqlField,
StandardSqlStructType,
StandardSqlTableType,
)
from .table_reference import (
TableReference,
)
__all__ = (
"EncryptionConfiguration",
"DeleteModelRequest",
"GetModelRequest",
"ListModelsRequest",
"ListModelsResponse",
"Model",
"PatchModelRequest",
"ModelReference",
"StandardSqlDataType",
"StandardSqlField",
"StandardSqlStructType",
"StandardSqlTableType",
"TableReference",
)
| 25.181818 | 74 | 0.724188 |
c36b323dde6e6584446ed2e96c3983eea6ffe2a3 | 4,365 | py | Python | blurr/core/store.py | ddrightnow/blurr | a8745101d4a8a85ccf1efc608dba8486d3cebb49 | [
"Apache-2.0"
] | null | null | null | blurr/core/store.py | ddrightnow/blurr | a8745101d4a8a85ccf1efc608dba8486d3cebb49 | [
"Apache-2.0"
] | 7 | 2019-12-16T20:58:29.000Z | 2022-02-09T23:57:32.000Z | blurr/core/store.py | ddrightnow/blurr | a8745101d4a8a85ccf1efc608dba8486d3cebb49 | [
"Apache-2.0"
] | null | null | null | from abc import abstractmethod, ABC
from datetime import datetime, timezone
from typing import Any, List, Tuple, Dict
from blurr.core.base import BaseSchema
from blurr.core.store_key import Key, KeyType
def get_time_range(self, identity, group, start_time, end_time) -> List[Tuple[Key, Any]]:
raise NotImplementedError()
def get_count_range(self, identity, group, time, count):
raise NotImplementedError()
| 33.837209 | 100 | 0.595647 |
c36b874a06452316ba72dfbbdea4c8d952355b51 | 1,411 | py | Python | seamless/core/cache/tempref.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
] | 15 | 2017-06-07T12:49:12.000Z | 2020-07-25T18:06:04.000Z | seamless/core/cache/tempref.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
] | 110 | 2016-06-21T23:20:44.000Z | 2022-02-24T16:15:22.000Z | seamless/core/cache/tempref.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
] | 6 | 2016-06-21T11:19:22.000Z | 2019-01-21T13:45:39.000Z | import time, copy
import asyncio
temprefmanager = TempRefManager()
coro = temprefmanager.loop()
import asyncio
task = asyncio.ensure_future(coro)
import atexit
atexit.register(lambda *args, **kwargs: task.cancel()) | 26.12963 | 57 | 0.546421 |
c36b9227e1e39aa4000c6b92c3dbf8f27a5ea7f5 | 8,775 | py | Python | lib/python27/Lib/site-packages/wx-2.8-msw-ansi/wx/tools/Editra/src/eclib/choicedlg.py | bo3b/iZ3D | ced8b3a4b0a152d0177f2e94008918efc76935d5 | [
"MIT"
] | 27 | 2020-11-12T19:24:54.000Z | 2022-03-27T23:10:45.000Z | lib/python27/Lib/site-packages/wx-2.8-msw-ansi/wx/tools/Editra/src/eclib/choicedlg.py | bo3b/iZ3D | ced8b3a4b0a152d0177f2e94008918efc76935d5 | [
"MIT"
] | 2 | 2020-11-02T06:30:39.000Z | 2022-02-23T18:39:55.000Z | lib/python27/Lib/site-packages/wx-2.8-msw-ansi/wx/tools/Editra/src/eclib/choicedlg.py | bo3b/iZ3D | ced8b3a4b0a152d0177f2e94008918efc76935d5 | [
"MIT"
] | 3 | 2021-08-16T00:21:08.000Z | 2022-02-23T19:19:36.000Z | ###############################################################################
# Name: choicedlg.py #
# Purpose: Generic Choice Dialog #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2008 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
Editra Control Library: Choice Dialog
A generic choice dialog that uses a wx.Choice control to display its choices.
@summary: Generic Choice Dialog
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: choicedlg.py 63820 2010-04-01 21:46:22Z CJP $"
__revision__ = "$Revision: 63820 $"
__all__ = ['ChoiceDialog',]
#--------------------------------------------------------------------------#
# Imports
import wx
#--------------------------------------------------------------------------#
# Globals
ChoiceDialogNameStr = u"ChoiceDialog"
#--------------------------------------------------------------------------#
#--------------------------------------------------------------------------#
#--------------------------------------------------------------------------#
| 30.574913 | 79 | 0.509972 |
c36ce52f1b69aad8e3b2676523c1755292c1c03c | 29,068 | py | Python | src/flower/proto/transport_pb2.py | xinchiqiu/flower | ef12441fdebaa32f34e12dd02dfa376fa2988eaf | [
"Apache-2.0"
] | null | null | null | src/flower/proto/transport_pb2.py | xinchiqiu/flower | ef12441fdebaa32f34e12dd02dfa376fa2988eaf | [
"Apache-2.0"
] | null | null | null | src/flower/proto/transport_pb2.py | xinchiqiu/flower | ef12441fdebaa32f34e12dd02dfa376fa2988eaf | [
"Apache-2.0"
] | 1 | 2020-06-01T11:06:18.000Z | 2020-06-01T11:06:18.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: flower/proto/transport.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='flower/proto/transport.proto',
package='flower.transport',
syntax='proto3',
serialized_options=None,
serialized_pb=b'\n\x1c\x66lower/proto/transport.proto\x12\x10\x66lower.transport\"2\n\nParameters\x12\x0f\n\x07tensors\x18\x01 \x03(\x0c\x12\x13\n\x0btensor_type\x18\x02 \x01(\t\"\xb8\x05\n\rServerMessage\x12>\n\treconnect\x18\x01 \x01(\x0b\x32).flower.transport.ServerMessage.ReconnectH\x00\x12G\n\x0eget_parameters\x18\x02 \x01(\x0b\x32-.flower.transport.ServerMessage.GetParametersH\x00\x12\x39\n\x07\x66it_ins\x18\x03 \x01(\x0b\x32&.flower.transport.ServerMessage.FitInsH\x00\x12\x43\n\x0c\x65valuate_ins\x18\x04 \x01(\x0b\x32+.flower.transport.ServerMessage.EvaluateInsH\x00\x1a\x1c\n\tReconnect\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x1a\x0f\n\rGetParameters\x1a\xad\x01\n\x06\x46itIns\x12\x30\n\nparameters\x18\x01 \x01(\x0b\x32\x1c.flower.transport.Parameters\x12\x42\n\x06\x63onfig\x18\x02 \x03(\x0b\x32\x32.flower.transport.ServerMessage.FitIns.ConfigEntry\x1a-\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\xb7\x01\n\x0b\x45valuateIns\x12\x30\n\nparameters\x18\x01 \x01(\x0b\x32\x1c.flower.transport.Parameters\x12G\n\x06\x63onfig\x18\x02 \x03(\x0b\x32\x37.flower.transport.ServerMessage.EvaluateIns.ConfigEntry\x1a-\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x05\n\x03msg\"\xbc\x04\n\rClientMessage\x12@\n\ndisconnect\x18\x01 \x01(\x0b\x32*.flower.transport.ClientMessage.DisconnectH\x00\x12G\n\x0eparameters_res\x18\x02 \x01(\x0b\x32-.flower.transport.ClientMessage.ParametersResH\x00\x12\x39\n\x07\x66it_res\x18\x03 \x01(\x0b\x32&.flower.transport.ClientMessage.FitResH\x00\x12\x43\n\x0c\x65valuate_res\x18\x04 \x01(\x0b\x32+.flower.transport.ClientMessage.EvaluateResH\x00\x1a\x36\n\nDisconnect\x12(\n\x06reason\x18\x01 \x01(\x0e\x32\x18.flower.transport.Reason\x1a\x41\n\rParametersRes\x12\x30\n\nparameters\x18\x01 \x01(\x0b\x32\x1c.flower.transport.Parameters\x1ak\n\x06\x46itRes\x12\x30\n\nparameters\x18\x01 \x01(\x0b\x32\x1c.flower.transport.Parameters\x12\x14\n\x0cnum_examples\x18\x02 \x01(\x03\x12\x19\n\x11num_examples_ceil\x18\x03 \x01(\x03\x1a\x31\n\x0b\x45valuateRes\x12\x14\n\x0cnum_examples\x18\x01 \x01(\x03\x12\x0c\n\x04loss\x18\x02 \x01(\x02\x42\x05\n\x03msg*R\n\x06Reason\x12\x0b\n\x07UNKNOWN\x10\x00\x12\r\n\tRECONNECT\x10\x01\x12\x16\n\x12POWER_DISCONNECTED\x10\x02\x12\x14\n\x10WIFI_UNAVAILABLE\x10\x03\x32_\n\rFlowerService\x12N\n\x04Join\x12\x1f.flower.transport.ClientMessage\x1a\x1f.flower.transport.ServerMessage\"\x00(\x01\x30\x01\x62\x06proto3'
)
_REASON = _descriptor.EnumDescriptor(
name='Reason',
full_name='flower.transport.Reason',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RECONNECT', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POWER_DISCONNECTED', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WIFI_UNAVAILABLE', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1376,
serialized_end=1458,
)
_sym_db.RegisterEnumDescriptor(_REASON)
Reason = enum_type_wrapper.EnumTypeWrapper(_REASON)
UNKNOWN = 0
RECONNECT = 1
POWER_DISCONNECTED = 2
WIFI_UNAVAILABLE = 3
_PARAMETERS = _descriptor.Descriptor(
name='Parameters',
full_name='flower.transport.Parameters',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tensors', full_name='flower.transport.Parameters.tensors', index=0,
number=1, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tensor_type', full_name='flower.transport.Parameters.tensor_type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=50,
serialized_end=100,
)
_SERVERMESSAGE_RECONNECT = _descriptor.Descriptor(
name='Reconnect',
full_name='flower.transport.ServerMessage.Reconnect',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='seconds', full_name='flower.transport.ServerMessage.Reconnect.seconds', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=385,
serialized_end=413,
)
_SERVERMESSAGE_GETPARAMETERS = _descriptor.Descriptor(
name='GetParameters',
full_name='flower.transport.ServerMessage.GetParameters',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=415,
serialized_end=430,
)
_SERVERMESSAGE_FITINS_CONFIGENTRY = _descriptor.Descriptor(
name='ConfigEntry',
full_name='flower.transport.ServerMessage.FitIns.ConfigEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='flower.transport.ServerMessage.FitIns.ConfigEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='flower.transport.ServerMessage.FitIns.ConfigEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=561,
serialized_end=606,
)
_SERVERMESSAGE_FITINS = _descriptor.Descriptor(
name='FitIns',
full_name='flower.transport.ServerMessage.FitIns',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parameters', full_name='flower.transport.ServerMessage.FitIns.parameters', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='config', full_name='flower.transport.ServerMessage.FitIns.config', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SERVERMESSAGE_FITINS_CONFIGENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=433,
serialized_end=606,
)
_SERVERMESSAGE_EVALUATEINS_CONFIGENTRY = _descriptor.Descriptor(
name='ConfigEntry',
full_name='flower.transport.ServerMessage.EvaluateIns.ConfigEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='flower.transport.ServerMessage.EvaluateIns.ConfigEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='flower.transport.ServerMessage.EvaluateIns.ConfigEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=561,
serialized_end=606,
)
_SERVERMESSAGE_EVALUATEINS = _descriptor.Descriptor(
name='EvaluateIns',
full_name='flower.transport.ServerMessage.EvaluateIns',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parameters', full_name='flower.transport.ServerMessage.EvaluateIns.parameters', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='config', full_name='flower.transport.ServerMessage.EvaluateIns.config', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SERVERMESSAGE_EVALUATEINS_CONFIGENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=609,
serialized_end=792,
)
_SERVERMESSAGE = _descriptor.Descriptor(
name='ServerMessage',
full_name='flower.transport.ServerMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='reconnect', full_name='flower.transport.ServerMessage.reconnect', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='get_parameters', full_name='flower.transport.ServerMessage.get_parameters', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fit_ins', full_name='flower.transport.ServerMessage.fit_ins', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='evaluate_ins', full_name='flower.transport.ServerMessage.evaluate_ins', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SERVERMESSAGE_RECONNECT, _SERVERMESSAGE_GETPARAMETERS, _SERVERMESSAGE_FITINS, _SERVERMESSAGE_EVALUATEINS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='msg', full_name='flower.transport.ServerMessage.msg',
index=0, containing_type=None, fields=[]),
],
serialized_start=103,
serialized_end=799,
)
_CLIENTMESSAGE_DISCONNECT = _descriptor.Descriptor(
name='Disconnect',
full_name='flower.transport.ClientMessage.Disconnect',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='reason', full_name='flower.transport.ClientMessage.Disconnect.reason', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1086,
serialized_end=1140,
)
_CLIENTMESSAGE_PARAMETERSRES = _descriptor.Descriptor(
name='ParametersRes',
full_name='flower.transport.ClientMessage.ParametersRes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parameters', full_name='flower.transport.ClientMessage.ParametersRes.parameters', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1142,
serialized_end=1207,
)
_CLIENTMESSAGE_FITRES = _descriptor.Descriptor(
name='FitRes',
full_name='flower.transport.ClientMessage.FitRes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parameters', full_name='flower.transport.ClientMessage.FitRes.parameters', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_examples', full_name='flower.transport.ClientMessage.FitRes.num_examples', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_examples_ceil', full_name='flower.transport.ClientMessage.FitRes.num_examples_ceil', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1209,
serialized_end=1316,
)
_CLIENTMESSAGE_EVALUATERES = _descriptor.Descriptor(
name='EvaluateRes',
full_name='flower.transport.ClientMessage.EvaluateRes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_examples', full_name='flower.transport.ClientMessage.EvaluateRes.num_examples', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss', full_name='flower.transport.ClientMessage.EvaluateRes.loss', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1318,
serialized_end=1367,
)
_CLIENTMESSAGE = _descriptor.Descriptor(
name='ClientMessage',
full_name='flower.transport.ClientMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='disconnect', full_name='flower.transport.ClientMessage.disconnect', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parameters_res', full_name='flower.transport.ClientMessage.parameters_res', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fit_res', full_name='flower.transport.ClientMessage.fit_res', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='evaluate_res', full_name='flower.transport.ClientMessage.evaluate_res', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CLIENTMESSAGE_DISCONNECT, _CLIENTMESSAGE_PARAMETERSRES, _CLIENTMESSAGE_FITRES, _CLIENTMESSAGE_EVALUATERES, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='msg', full_name='flower.transport.ClientMessage.msg',
index=0, containing_type=None, fields=[]),
],
serialized_start=802,
serialized_end=1374,
)
_SERVERMESSAGE_RECONNECT.containing_type = _SERVERMESSAGE
_SERVERMESSAGE_GETPARAMETERS.containing_type = _SERVERMESSAGE
_SERVERMESSAGE_FITINS_CONFIGENTRY.containing_type = _SERVERMESSAGE_FITINS
_SERVERMESSAGE_FITINS.fields_by_name['parameters'].message_type = _PARAMETERS
_SERVERMESSAGE_FITINS.fields_by_name['config'].message_type = _SERVERMESSAGE_FITINS_CONFIGENTRY
_SERVERMESSAGE_FITINS.containing_type = _SERVERMESSAGE
_SERVERMESSAGE_EVALUATEINS_CONFIGENTRY.containing_type = _SERVERMESSAGE_EVALUATEINS
_SERVERMESSAGE_EVALUATEINS.fields_by_name['parameters'].message_type = _PARAMETERS
_SERVERMESSAGE_EVALUATEINS.fields_by_name['config'].message_type = _SERVERMESSAGE_EVALUATEINS_CONFIGENTRY
_SERVERMESSAGE_EVALUATEINS.containing_type = _SERVERMESSAGE
_SERVERMESSAGE.fields_by_name['reconnect'].message_type = _SERVERMESSAGE_RECONNECT
_SERVERMESSAGE.fields_by_name['get_parameters'].message_type = _SERVERMESSAGE_GETPARAMETERS
_SERVERMESSAGE.fields_by_name['fit_ins'].message_type = _SERVERMESSAGE_FITINS
_SERVERMESSAGE.fields_by_name['evaluate_ins'].message_type = _SERVERMESSAGE_EVALUATEINS
_SERVERMESSAGE.oneofs_by_name['msg'].fields.append(
_SERVERMESSAGE.fields_by_name['reconnect'])
_SERVERMESSAGE.fields_by_name['reconnect'].containing_oneof = _SERVERMESSAGE.oneofs_by_name['msg']
_SERVERMESSAGE.oneofs_by_name['msg'].fields.append(
_SERVERMESSAGE.fields_by_name['get_parameters'])
_SERVERMESSAGE.fields_by_name['get_parameters'].containing_oneof = _SERVERMESSAGE.oneofs_by_name['msg']
_SERVERMESSAGE.oneofs_by_name['msg'].fields.append(
_SERVERMESSAGE.fields_by_name['fit_ins'])
_SERVERMESSAGE.fields_by_name['fit_ins'].containing_oneof = _SERVERMESSAGE.oneofs_by_name['msg']
_SERVERMESSAGE.oneofs_by_name['msg'].fields.append(
_SERVERMESSAGE.fields_by_name['evaluate_ins'])
_SERVERMESSAGE.fields_by_name['evaluate_ins'].containing_oneof = _SERVERMESSAGE.oneofs_by_name['msg']
_CLIENTMESSAGE_DISCONNECT.fields_by_name['reason'].enum_type = _REASON
_CLIENTMESSAGE_DISCONNECT.containing_type = _CLIENTMESSAGE
_CLIENTMESSAGE_PARAMETERSRES.fields_by_name['parameters'].message_type = _PARAMETERS
_CLIENTMESSAGE_PARAMETERSRES.containing_type = _CLIENTMESSAGE
_CLIENTMESSAGE_FITRES.fields_by_name['parameters'].message_type = _PARAMETERS
_CLIENTMESSAGE_FITRES.containing_type = _CLIENTMESSAGE
_CLIENTMESSAGE_EVALUATERES.containing_type = _CLIENTMESSAGE
_CLIENTMESSAGE.fields_by_name['disconnect'].message_type = _CLIENTMESSAGE_DISCONNECT
_CLIENTMESSAGE.fields_by_name['parameters_res'].message_type = _CLIENTMESSAGE_PARAMETERSRES
_CLIENTMESSAGE.fields_by_name['fit_res'].message_type = _CLIENTMESSAGE_FITRES
_CLIENTMESSAGE.fields_by_name['evaluate_res'].message_type = _CLIENTMESSAGE_EVALUATERES
_CLIENTMESSAGE.oneofs_by_name['msg'].fields.append(
_CLIENTMESSAGE.fields_by_name['disconnect'])
_CLIENTMESSAGE.fields_by_name['disconnect'].containing_oneof = _CLIENTMESSAGE.oneofs_by_name['msg']
_CLIENTMESSAGE.oneofs_by_name['msg'].fields.append(
_CLIENTMESSAGE.fields_by_name['parameters_res'])
_CLIENTMESSAGE.fields_by_name['parameters_res'].containing_oneof = _CLIENTMESSAGE.oneofs_by_name['msg']
_CLIENTMESSAGE.oneofs_by_name['msg'].fields.append(
_CLIENTMESSAGE.fields_by_name['fit_res'])
_CLIENTMESSAGE.fields_by_name['fit_res'].containing_oneof = _CLIENTMESSAGE.oneofs_by_name['msg']
_CLIENTMESSAGE.oneofs_by_name['msg'].fields.append(
_CLIENTMESSAGE.fields_by_name['evaluate_res'])
_CLIENTMESSAGE.fields_by_name['evaluate_res'].containing_oneof = _CLIENTMESSAGE.oneofs_by_name['msg']
DESCRIPTOR.message_types_by_name['Parameters'] = _PARAMETERS
DESCRIPTOR.message_types_by_name['ServerMessage'] = _SERVERMESSAGE
DESCRIPTOR.message_types_by_name['ClientMessage'] = _CLIENTMESSAGE
DESCRIPTOR.enum_types_by_name['Reason'] = _REASON
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Parameters = _reflection.GeneratedProtocolMessageType('Parameters', (_message.Message,), {
'DESCRIPTOR' : _PARAMETERS,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.Parameters)
})
_sym_db.RegisterMessage(Parameters)
ServerMessage = _reflection.GeneratedProtocolMessageType('ServerMessage', (_message.Message,), {
'Reconnect' : _reflection.GeneratedProtocolMessageType('Reconnect', (_message.Message,), {
'DESCRIPTOR' : _SERVERMESSAGE_RECONNECT,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ServerMessage.Reconnect)
})
,
'GetParameters' : _reflection.GeneratedProtocolMessageType('GetParameters', (_message.Message,), {
'DESCRIPTOR' : _SERVERMESSAGE_GETPARAMETERS,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ServerMessage.GetParameters)
})
,
'FitIns' : _reflection.GeneratedProtocolMessageType('FitIns', (_message.Message,), {
'ConfigEntry' : _reflection.GeneratedProtocolMessageType('ConfigEntry', (_message.Message,), {
'DESCRIPTOR' : _SERVERMESSAGE_FITINS_CONFIGENTRY,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ServerMessage.FitIns.ConfigEntry)
})
,
'DESCRIPTOR' : _SERVERMESSAGE_FITINS,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ServerMessage.FitIns)
})
,
'EvaluateIns' : _reflection.GeneratedProtocolMessageType('EvaluateIns', (_message.Message,), {
'ConfigEntry' : _reflection.GeneratedProtocolMessageType('ConfigEntry', (_message.Message,), {
'DESCRIPTOR' : _SERVERMESSAGE_EVALUATEINS_CONFIGENTRY,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ServerMessage.EvaluateIns.ConfigEntry)
})
,
'DESCRIPTOR' : _SERVERMESSAGE_EVALUATEINS,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ServerMessage.EvaluateIns)
})
,
'DESCRIPTOR' : _SERVERMESSAGE,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ServerMessage)
})
_sym_db.RegisterMessage(ServerMessage)
_sym_db.RegisterMessage(ServerMessage.Reconnect)
_sym_db.RegisterMessage(ServerMessage.GetParameters)
_sym_db.RegisterMessage(ServerMessage.FitIns)
_sym_db.RegisterMessage(ServerMessage.FitIns.ConfigEntry)
_sym_db.RegisterMessage(ServerMessage.EvaluateIns)
_sym_db.RegisterMessage(ServerMessage.EvaluateIns.ConfigEntry)
ClientMessage = _reflection.GeneratedProtocolMessageType('ClientMessage', (_message.Message,), {
'Disconnect' : _reflection.GeneratedProtocolMessageType('Disconnect', (_message.Message,), {
'DESCRIPTOR' : _CLIENTMESSAGE_DISCONNECT,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ClientMessage.Disconnect)
})
,
'ParametersRes' : _reflection.GeneratedProtocolMessageType('ParametersRes', (_message.Message,), {
'DESCRIPTOR' : _CLIENTMESSAGE_PARAMETERSRES,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ClientMessage.ParametersRes)
})
,
'FitRes' : _reflection.GeneratedProtocolMessageType('FitRes', (_message.Message,), {
'DESCRIPTOR' : _CLIENTMESSAGE_FITRES,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ClientMessage.FitRes)
})
,
'EvaluateRes' : _reflection.GeneratedProtocolMessageType('EvaluateRes', (_message.Message,), {
'DESCRIPTOR' : _CLIENTMESSAGE_EVALUATERES,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ClientMessage.EvaluateRes)
})
,
'DESCRIPTOR' : _CLIENTMESSAGE,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ClientMessage)
})
_sym_db.RegisterMessage(ClientMessage)
_sym_db.RegisterMessage(ClientMessage.Disconnect)
_sym_db.RegisterMessage(ClientMessage.ParametersRes)
_sym_db.RegisterMessage(ClientMessage.FitRes)
_sym_db.RegisterMessage(ClientMessage.EvaluateRes)
_SERVERMESSAGE_FITINS_CONFIGENTRY._options = None
_SERVERMESSAGE_EVALUATEINS_CONFIGENTRY._options = None
_FLOWERSERVICE = _descriptor.ServiceDescriptor(
name='FlowerService',
full_name='flower.transport.FlowerService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=1460,
serialized_end=1555,
methods=[
_descriptor.MethodDescriptor(
name='Join',
full_name='flower.transport.FlowerService.Join',
index=0,
containing_service=None,
input_type=_CLIENTMESSAGE,
output_type=_SERVERMESSAGE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_FLOWERSERVICE)
DESCRIPTOR.services_by_name['FlowerService'] = _FLOWERSERVICE
# @@protoc_insertion_point(module_scope)
| 39.494565 | 2,507 | 0.762901 |
c36ddd7acdde8453a1b9743b8e731fb3b4051614 | 80 | py | Python | shopyo/__init__.py | rehmanis/shopyo2 | 3e26602149f09aa4c13a1b4b6fba296bd82af99f | [
"MIT"
] | 2 | 2021-04-17T09:59:48.000Z | 2021-04-17T10:12:02.000Z | shopyo/__init__.py | rehmanis/shopyo2 | 3e26602149f09aa4c13a1b4b6fba296bd82af99f | [
"MIT"
] | 15 | 2021-04-01T19:54:46.000Z | 2021-04-07T22:25:40.000Z | shopyo/__init__.py | rehmanis/shopyo2 | 3e26602149f09aa4c13a1b4b6fba296bd82af99f | [
"MIT"
] | null | null | null | version_info = (4, 0, 1)
__version__ = ".".join([str(v) for v in version_info])
| 26.666667 | 54 | 0.65 |
c36e4faa6f3051be3ca85cd0b16d04294152aa32 | 3,748 | py | Python | check_digit_calc.py | zhoffm/Check-Digit-Calculator | 5f86304901279678c74858811a452866785bd8f4 | [
"MIT"
] | 1 | 2019-08-29T13:07:08.000Z | 2019-08-29T13:07:08.000Z | check_digit_calc.py | zhoffm/Check-Digit-Calculator | 5f86304901279678c74858811a452866785bd8f4 | [
"MIT"
] | null | null | null | check_digit_calc.py | zhoffm/Check-Digit-Calculator | 5f86304901279678c74858811a452866785bd8f4 | [
"MIT"
] | null | null | null | from random import randint
import pandas as pd
# Class to calculate the check digit for 11 digit UPC's
if __name__ == '__main__':
test_upc = random_11_digit_upc()
obj = CheckDigitCalculations()
print(obj.get_full_upc(test_upc))
| 32.591304 | 109 | 0.627535 |
c36ea7dbd20120b593de7ef575a4b4b1a54e3de9 | 4,976 | py | Python | test/test_load.py | ramsdalesteve/forest | 12cac1b3dd93c4475a8a4f696c522576b44f16eb | [
"BSD-3-Clause"
] | null | null | null | test/test_load.py | ramsdalesteve/forest | 12cac1b3dd93c4475a8a4f696c522576b44f16eb | [
"BSD-3-Clause"
] | null | null | null | test/test_load.py | ramsdalesteve/forest | 12cac1b3dd93c4475a8a4f696c522576b44f16eb | [
"BSD-3-Clause"
] | null | null | null | import yaml
import forest
from forest import main
def test_build_loader_given_files():
"""replicate main.py as close as possible"""
files = ["file_20190101T0000Z.nc"]
args = main.parse_args.parse_args(files)
config = forest.config.from_files(args.files, args.file_type)
group = config.file_groups[0]
loader = forest.Loader.group_args(group, args)
assert isinstance(loader, forest.data.DBLoader)
assert loader.locator.paths == files
def test_build_loader_given_database(tmpdir):
"""replicate main.py as close as possible"""
database_file = str(tmpdir / "database.db")
config_file = str(tmpdir / "config.yml")
settings = {
"files": [
{
"label": "UM",
"pattern": "*.nc",
"locator": "database"
}
]
}
with open(config_file, "w") as stream:
yaml.dump(settings, stream)
args = main.parse_args.parse_args([
"--database", database_file,
"--config-file", config_file])
config = forest.config.load_config(args.config_file)
group = config.file_groups[0]
database = forest.db.Database.connect(database_file)
loader = forest.Loader.group_args(group, args, database=database)
database.close()
assert hasattr(loader.locator, "connection")
assert loader.locator.directory is None
| 32.103226 | 88 | 0.696744 |
c3704e5ac8ab23d0d2914d6aa73d29d45471acf6 | 4,309 | py | Python | swagger_server/models/rule.py | Capping-WAR/API | 981823732f2b4f8bc007da657d5195579eb7dad3 | [
"MIT"
] | null | null | null | swagger_server/models/rule.py | Capping-WAR/API | 981823732f2b4f8bc007da657d5195579eb7dad3 | [
"MIT"
] | 2 | 2019-09-24T23:45:34.000Z | 2019-10-11T20:06:54.000Z | swagger_server/models/rule.py | Capping-WAR/API | 981823732f2b4f8bc007da657d5195579eb7dad3 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
| 27.44586 | 120 | 0.589464 |
c371765c42e0c448d7d486fc65c3f350acc4e5ed | 864 | py | Python | Project1/mazes/gen_sparses.py | VFerrari/MC906 | b04d3df58ef56203882fc59c03874f92c0d223fe | [
"MIT"
] | null | null | null | Project1/mazes/gen_sparses.py | VFerrari/MC906 | b04d3df58ef56203882fc59c03874f92c0d223fe | [
"MIT"
] | null | null | null | Project1/mazes/gen_sparses.py | VFerrari/MC906 | b04d3df58ef56203882fc59c03874f92c0d223fe | [
"MIT"
] | null | null | null | import os
import re
import numpy as np
# WARNING: this function overrides the mazes in sparse directory; don't run it
# as the idea is that everyone test the same mazes
def gen_sparses(dir_path):
''' Randomly remove points from dense instances '''
pattern = re.compile('^([0-9]+[a-zA-Z]+)')
denses_fn = [x for x in os.listdir(dir_path + '/dense') if pattern.match(x)]
print(denses_fn)
for dense_fn in denses_fn:
sparse = np.genfromtxt(dir_path + '/dense/' + dense_fn, dtype='str', delimiter=1)
for r in range(0, len(sparse)):
for c in range(0, len(sparse[0])):
if sparse[r][c] == '.':
sparse[r][c] = ' ' if bool(np.random.choice(np.arange(0,2), p=[0.25,0.75])) else '.'
np.savetxt(dir_path + '/sparse/' + dense_fn, sparse, fmt='%s', delimiter='')
gen_sparses('.') | 34.56 | 102 | 0.605324 |
c3718e6eac42b785991cffcfe402fff63a2a5da0 | 1,592 | py | Python | cryomem/cmtools/lib/jjivarray2.py | bebaek/cryomem | 088fba2568d10451adda51a068c15c8c2a73d9ce | [
"MIT"
] | 1 | 2018-09-16T12:29:04.000Z | 2018-09-16T12:29:04.000Z | cryomem/cmtools/lib/jjivarray2.py | bebaek/cryomem | 088fba2568d10451adda51a068c15c8c2a73d9ce | [
"MIT"
] | null | null | null | cryomem/cmtools/lib/jjivarray2.py | bebaek/cryomem | 088fba2568d10451adda51a068c15c8c2a73d9ce | [
"MIT"
] | null | null | null | """
Analyze JJ IV curve array (core) v.2
BB, 2016
"""
import numpy as np
from . import jjiv2 as jjiv
import sys
def fit2rsj_arr(iarr, varr, **kwargs):
"""Fit IV array to 2 Ic RSJ model and return arrays of fit params, error.
Keyword arguments:
guess: array of (Ic+, Ic-, Rn, Vo)
io: fixed Io.
updateguess: guess update ratio 0 to 1
"""
if 'guess' in kwargs:
kwargs['guess'] = np.array(kwargs['guess']) # array type
update = kwargs.get('updateguess', 0.95)
n = len(iarr)
npopt = 4
popt_arr, pcov_arr = np.zeros((n, npopt)), np.zeros((n, npopt, npopt))
for k in range(n):
try:
done = False; l = 0
while not done:
# fit
popt, pcov = jjiv.fit2rsj(iarr[k], varr[k], **kwargs)
# update guess
if k == 0:
kwargs['guess'] = popt
else:
kwargs['guess'] = (1-update)*kwargs['guess'] + update*popt
# check if fit is good
l += 1
if np.shape(pcov)==(4,4):
perr = np.sqrt(np.diag(pcov))
else:
perr = (np.inf, np.inf, np.inf, np.inf)
if (np.amax(perr) < .05) or (l > 5):
done = True
popt_arr[k], pcov_arr[k] = popt, pcov
else:
print('Fit not good. Index: {}, Trial: {}'.format(k,l))
except RuntimeError:
print('Can\'t fit. Index: {}!'.format(k))
return popt_arr, pcov_arr
| 28.945455 | 78 | 0.478643 |
c372b444a020f4105b4dff97edb032deea88f217 | 567 | py | Python | python/0122.py | garywei944/LeetCode | 77453b6e3329f3309ad61fe578cb7b608317ba1b | [
"MIT"
] | null | null | null | python/0122.py | garywei944/LeetCode | 77453b6e3329f3309ad61fe578cb7b608317ba1b | [
"MIT"
] | null | null | null | python/0122.py | garywei944/LeetCode | 77453b6e3329f3309ad61fe578cb7b608317ba1b | [
"MIT"
] | null | null | null | from leetcode_tester import Tester
from typing import Optional, List
if __name__ == '__main__':
solution = Solution()
test = Tester(solution.maxProfit)
test.addTest(
[7, 1, 5, 3, 6, 4], 7
)
test.addTest(
[1, 2, 3, 4, 5], 4
)
test.addTest(
[7, 6, 4, 3, 1], 0
)
test.doTest()
| 19.551724 | 50 | 0.511464 |
c37355b23d392a1bb9299b5a5621376e2bdb4e8e | 1,406 | py | Python | dataset.py | songrotek/wechat_jump_end_to_end_train | 119e8a172bf31b70da1004c88567c41d3183711a | [
"MIT"
] | 26 | 2018-01-10T12:23:54.000Z | 2018-02-24T06:31:34.000Z | dataset.py | floodsung/wechat_jump_end_to_end_train | 119e8a172bf31b70da1004c88567c41d3183711a | [
"MIT"
] | 3 | 2018-06-20T17:28:31.000Z | 2018-07-03T13:35:36.000Z | dataset.py | songrotek/wechat_jump_end_to_end_train | 119e8a172bf31b70da1004c88567c41d3183711a | [
"MIT"
] | 10 | 2018-01-11T12:42:42.000Z | 2018-03-12T04:51:35.000Z | import torch
import json
import os
from torch.utils.data import DataLoader,Dataset
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
data_folder = "./dataset/images"
press_times = json.load(open("./dataset/dataset.json"))
image_roots = [os.path.join(data_folder,image_file) \
for image_file in os.listdir(data_folder)]
| 30.565217 | 103 | 0.687767 |
c373e158e091fc846ebe00cd19f68260787532ea | 921 | py | Python | grafana_backup/create_snapshot.py | Keimille/grafana-backup-tool | ea824c908c0b98ff934cfe3efdf90121b6edd49d | [
"MIT"
] | 515 | 2016-06-16T20:01:30.000Z | 2022-03-29T03:03:24.000Z | grafana_backup/create_snapshot.py | Keimille/grafana-backup-tool | ea824c908c0b98ff934cfe3efdf90121b6edd49d | [
"MIT"
] | 159 | 2016-12-06T03:06:58.000Z | 2022-03-17T16:10:40.000Z | grafana_backup/create_snapshot.py | Keimille/grafana-backup-tool | ea824c908c0b98ff934cfe3efdf90121b6edd49d | [
"MIT"
] | 195 | 2016-07-19T06:00:13.000Z | 2022-03-09T05:58:32.000Z | import json
from grafana_backup.dashboardApi import create_snapshot
| 35.423077 | 125 | 0.674267 |
c37733d1ef97d9bfcb5fc78d09053dd294d1f132 | 1,928 | py | Python | examples/keras_ssd_example.py | jiayunhan/perceptron-benchmark | 39958a15e9f8bfa82938a3f81d4f216457744b22 | [
"Apache-2.0"
] | 38 | 2019-06-10T04:19:42.000Z | 2022-02-15T05:21:23.000Z | examples/keras_ssd_example.py | jiayunhan/perceptron-benchmark | 39958a15e9f8bfa82938a3f81d4f216457744b22 | [
"Apache-2.0"
] | 4 | 2019-07-30T19:00:23.000Z | 2019-09-26T01:35:05.000Z | examples/keras_ssd_example.py | jiayunhan/perceptron-benchmark | 39958a15e9f8bfa82938a3f81d4f216457744b22 | [
"Apache-2.0"
] | 10 | 2019-06-10T05:45:33.000Z | 2021-04-22T08:33:28.000Z | """ Test case for Keras """
from perceptron.zoo.ssd_300.keras_ssd300 import SSD300
from perceptron.models.detection.keras_ssd300 import KerasSSD300Model
from perceptron.utils.image import load_image
from perceptron.benchmarks.brightness import BrightnessMetric
from perceptron.utils.criteria.detection import TargetClassMiss
from perceptron.utils.tools import bcolors
from perceptron.utils.tools import plot_image_objectdetection
# instantiate the model from keras applications
ssd300 = SSD300()
# initialize the KerasResNet50RetinaNetModel
kmodel = KerasSSD300Model(ssd300, bounds=(0, 255))
# get source image and label
# the model expects values in [0, 1], and channles_last
image = load_image(shape=(300, 300), bounds=(0, 255), fname='car.png')
metric = BrightnessMetric(kmodel, criterion=TargetClassMiss(7))
print(bcolors.BOLD + 'Process start' + bcolors.ENDC)
adversary = metric(image, unpack=False)
print(bcolors.BOLD + 'Process finished' + bcolors.ENDC)
if adversary.image is None:
print(bcolors.WARNING + 'Warning: Cannot find an adversary!' + bcolors.ENDC)
exit(-1)
################### print summary info #####################################
keywords = ['Keras', 'SSD300', 'TargetClassMiss', 'BrightnessMetric']
print(bcolors.HEADER + bcolors.UNDERLINE + 'Summary:' + bcolors.ENDC)
print('Configuration:' + bcolors.CYAN + ' --framework %s '
'--model %s --criterion %s '
'--metric %s' % tuple(keywords) + bcolors.ENDC)
print('Minimum perturbation required: %s' % bcolors.BLUE
+ str(adversary.distance) + bcolors.ENDC)
print('\n')
# print the original image and the adversary
plot_image_objectdetection(adversary, kmodel, bounds=(0, 255), title=", ".join(keywords), figname='examples/images/%s.png' % '_'.join(keywords))
| 41.021277 | 145 | 0.669087 |
c377c853596a16e597f271a0e7e5269f859cd807 | 224 | py | Python | math/470.ImplementRand10UsingRand7.py | bzd111/leetcode | fd8db9ef52000cd4373f00a8f60d131caa5fe25d | [
"Apache-2.0"
] | null | null | null | math/470.ImplementRand10UsingRand7.py | bzd111/leetcode | fd8db9ef52000cd4373f00a8f60d131caa5fe25d | [
"Apache-2.0"
] | null | null | null | math/470.ImplementRand10UsingRand7.py | bzd111/leetcode | fd8db9ef52000cd4373f00a8f60d131caa5fe25d | [
"Apache-2.0"
] | null | null | null | import sys
| 16 | 51 | 0.482143 |
c37854af006991db33cfa5319fe951302a09dbf2 | 164 | py | Python | segmentation/data/transforms/__init__.py | RajasekharChowdary9/panoptic-deeplab | 7645bc1cf51e3ebc85153666f26f8630a407b52b | [
"Apache-2.0"
] | 506 | 2020-06-12T01:07:56.000Z | 2022-03-26T00:56:52.000Z | segmentation/data/transforms/__init__.py | RajasekharChowdary9/panoptic-deeplab | 7645bc1cf51e3ebc85153666f26f8630a407b52b | [
"Apache-2.0"
] | 85 | 2020-06-12T04:51:31.000Z | 2022-03-23T16:19:44.000Z | segmentation/data/transforms/__init__.py | RajasekharChowdary9/panoptic-deeplab | 7645bc1cf51e3ebc85153666f26f8630a407b52b | [
"Apache-2.0"
] | 102 | 2020-06-12T06:45:44.000Z | 2022-03-22T14:03:04.000Z | from .build import build_transforms
from .pre_augmentation_transforms import Resize
from .target_transforms import PanopticTargetGenerator, SemanticTargetGenerator
| 41 | 79 | 0.896341 |
c379116efb10da15e4d433c54d3c5da28ac9b233 | 46,937 | py | Python | plasmapy/diagnostics/proton_radiography.py | MarikinPaulina/PlasmaPy | 9a9e4200981618fdfba4bd9347180b6cbe3040d7 | [
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | plasmapy/diagnostics/proton_radiography.py | MarikinPaulina/PlasmaPy | 9a9e4200981618fdfba4bd9347180b6cbe3040d7 | [
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | plasmapy/diagnostics/proton_radiography.py | MarikinPaulina/PlasmaPy | 9a9e4200981618fdfba4bd9347180b6cbe3040d7 | [
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | """
Routines for the analysis of proton radiographs. These routines can be broadly
classified as either creating synthetic radiographs from prescribed fields or
methods of 'inverting' experimentally created radiographs to reconstruct the
original fields (under some set of assumptions).
"""
__all__ = [
"SyntheticProtonRadiograph",
]
import astropy.constants as const
import astropy.units as u
import numpy as np
import sys
import warnings
from tqdm import tqdm
from plasmapy import particles
from plasmapy.formulary.mathematics import rot_a_to_b
from plasmapy.particles import Particle
from plasmapy.plasma.grids import AbstractGrid
from plasmapy.simulation.particle_integrators import boris_push
def _coerce_to_cartesian_si(pos):
"""
Takes a tuple of `astropy.unit.Quantity` values representing a position
in space in either Cartesian, cylindrical, or spherical coordinates, and
returns a numpy array representing the same point in Cartesian
coordinates and units of meters.
"""
# Auto-detect geometry based on units
geo_units = [x.unit for x in pos]
if geo_units[2].is_equivalent(u.rad):
geometry = "spherical"
elif geo_units[1].is_equivalent(u.rad):
geometry = "cylindrical"
else:
geometry = "cartesian"
# Convert geometrical inputs between coordinates systems
pos_out = np.zeros(3)
if geometry == "cartesian":
x, y, z = pos
pos_out[0] = x.to(u.m).value
pos_out[1] = y.to(u.m).value
pos_out[2] = z.to(u.m).value
elif geometry == "cylindrical":
r, t, z = pos
r = r.to(u.m)
t = t.to(u.rad).value
z = z.to(u.m)
pos_out[0] = (r * np.cos(t)).to(u.m).value
pos_out[1] = (r * np.sin(t)).to(u.m).value
pos_out[2] = z.to(u.m).value
elif geometry == "spherical":
r, t, p = pos
r = r.to(u.m)
t = t.to(u.rad).value
p = p.to(u.rad).value
pos_out[0] = (r * np.sin(t) * np.cos(p)).to(u.m).value
pos_out[1] = (r * np.sin(t) * np.sin(p)).to(u.m).value
pos_out[2] = (r * np.cos(t)).to(u.m).value
return pos_out
def _coast_to_grid(self):
r"""
Coasts all particles to the timestep when the first particle should
be entering the grid. Doing in this in one step (rather than pushing
the particles through zero fields) saves computation time.
"""
# Distance from the source to the nearest gridpoint
dist = np.min(np.linalg.norm(self.grid_arr - self.source, axis=3))
# Find the particle with the highest speed towards the grid
vmax = np.max(np.dot(self.v, self.src_n))
# Time for fastest possible particle to reach the grid.
t = dist / vmax
# Coast the particles to the advanced position
self.x = self.x + self.v * t
def _coast_to_plane(self, center, hdir, vdir, x=None):
"""
Calculates the positions where the current trajectories of each
particle impact a plane, described by the plane's center and
horizontal and vertical unit vectors.
Returns an [nparticles, 3] array of the particle positions in the plane
By default this function does not alter self.x. The optional keyword
x can be used to pass in an output array that will used to hold
the positions in the plane. This can be used to directly update self.x
as follows:
self._coast_to_plane(self.detector, self.det_hdir, self.det_vdir, x = self.x)
"""
normal = np.cross(hdir, vdir)
# Calculate the time required to evolve each particle into the
# plane
t = np.inner(center[np.newaxis, :] - self.x, normal) / np.inner(self.v, normal)
# Calculate particle positions in the plane
if x is None:
# If no output array is provided, preallocate
x = np.empty_like(self.x)
x[...] = self.x + self.v * t[:, np.newaxis]
# Check that all points are now in the plane
# (Eq. of a plane is nhat*x + d = 0)
plane_eq = np.dot(x - center, normal)
assert np.allclose(plane_eq, 0, atol=1e-6)
return x
def _remove_deflected_particles(self):
r"""
Removes any particles that have been deflected away from the detector
plane (eg. those that will never hit the grid)
"""
dist_remaining = np.dot(self.x, self.det_n) + np.linalg.norm(self.detector)
v_towards_det = np.dot(self.v, -self.det_n)
# If particles have not yet reached the detector plane and are moving
# away from it, they will never reach the detector.
# So, we can remove them from the arrays
# Find the indices of all particles that we should keep:
# i.e. those still moving towards the detector.
ind = np.logical_not((v_towards_det < 0) & (dist_remaining > 0)).nonzero()[0]
# Drop the other particles
self.x = self.x[ind, :]
self.v = self.v[ind, :]
self.v_init = self.v_init[ind, :]
self.nparticles_grid = self.x.shape[0]
# Store the number of particles deflected
self.fract_deflected = (self.nparticles - ind.size) / self.nparticles
# Warn the user if a large number of particles are being deflected
if self.fract_deflected > 0.05:
warnings.warn(
f"{100*self.fract_deflected:.1f}% particles have been "
"deflected away from the detector plane. The fields "
"provided may be too high to successfully radiograph "
"with this particle energy.",
RuntimeWarning,
)
def _push(self):
r"""
Advance particles using an implementation of the time-centered
Boris algorithm
"""
# Get a list of positions (input for interpolator)
pos = self.x[self.grid_ind, :] * u.m
# Update the list of particles on and off the grid
self.on_grid = self.grid.on_grid(pos)
# entered_grid is zero at the end if a particle has never
# entered the grid
self.entered_grid += self.on_grid
# Estimate the E and B fields for each particle
# Note that this interpolation step is BY FAR the slowest part of the push
# loop. Any speed improvements will have to come from here.
if self.field_weighting == "volume averaged":
Ex, Ey, Ez, Bx, By, Bz = self.grid.volume_averaged_interpolator(
pos, "E_x", "E_y", "E_z", "B_x", "B_y", "B_z", persistent=True,
)
elif self.field_weighting == "nearest neighbor":
Ex, Ey, Ez, Bx, By, Bz = self.grid.nearest_neighbor_interpolator(
pos, "E_x", "E_y", "E_z", "B_x", "B_y", "B_z", persistent=True,
)
# Create arrays of E and B as required by push algorithm
E = np.array(
[Ex.to(u.V / u.m).value, Ey.to(u.V / u.m).value, Ez.to(u.V / u.m).value]
)
E = np.moveaxis(E, 0, -1)
B = np.array([Bx.to(u.T).value, By.to(u.T).value, Bz.to(u.T).value])
B = np.moveaxis(B, 0, -1)
# Calculate the adaptive timestep from the fields currently experienced
# by the particles
# If user sets dt explicitly, that's handled in _adpative_dt
dt = self._adaptive_dt(Ex, Ey, Ez, Bx, By, Bz)
# TODO: Test v/c and implement relativistic Boris push when required
# vc = np.max(v)/_c
x = self.x[self.grid_ind, :]
v = self.v[self.grid_ind, :]
boris_push(x, v, B, E, self.q, self.m, dt)
self.x[self.grid_ind, :] = x
self.v[self.grid_ind, :] = v
def _stop_condition(self):
r"""
The stop condition is that most of the particles have entered the grid
and almost all have now left it.
"""
# Count the number of particles who have entered, which is the
# number of non-zero entries in entered_grid
self.num_entered = np.nonzero(self.entered_grid)[0].size
# How many of the particles have entered the grid
self.fract_entered = np.sum(self.num_entered) / self.nparticles_grid
# Of the particles that have entered the grid, how many are currently
# on the grid?
# if/else avoids dividing by zero
if np.sum(self.num_entered) > 0:
still_on = np.sum(self.on_grid) / np.sum(self.num_entered)
else:
still_on = 0.0
if self.fract_entered > 0.1 and still_on < 0.001:
# Warn user if < 10% of the particles ended up on the grid
if self.num_entered < 0.1 * self.nparticles:
warnings.warn(
f"Only {100*self.num_entered/self.nparticles:.2f}% of "
"particles entered the field grid: consider "
"decreasing the max_theta to increase this "
"number.",
RuntimeWarning,
)
return True
else:
return False
def run(
self, dt=None, field_weighting="volume averaged",
):
r"""
Runs a particle-tracing simulation.
Timesteps are adaptively calculated based on the
local grid resolution of the particles and the electric and magnetic
fields they are experiencing. After all particles
have left the grid, they are advanced to the
detector plane where they can be used to construct a synthetic
diagnostic image.
Parameters
----------
dt : `~astropy.units.Quantity`, optional
An explicitly set timestep in units convertable to seconds.
Setting this optional keyword overrules the adaptive time step
capability and forces the use of this timestep throughout. If a tuple
of timesteps is provided, the adaptive timstep will be clamped
between the first and second values.
field_weighting : str
String that selects the field weighting algorithm used to determine
what fields are felt by the particles. Options are:
* 'nearest neighbor': Particles are assigned the fields on
the grid vertex closest to them.
* 'volume averaged' : The fields experienced by a particle are a
volume-average of the eight grid points surrounding them.
The default is 'volume averaged'.
Returns
-------
None.
"""
# Load and validate inputs
field_weightings = ["volume averaged", "nearest neighbor"]
if field_weighting in field_weightings:
self.field_weighting = field_weighting
else:
raise ValueError(
f"{field_weighting} is not a valid option for ",
"field_weighting. Valid choices are",
f"{field_weightings}",
)
if dt is None:
# Set dt as an infinite range by default (auto dt with no restrictions)
self.dt = np.array([0.0, np.inf]) * u.s
else:
self.dt = dt
self.dt = (self.dt).to(u.s).value
# Check to make sure particles have already been generated
if not hasattr(self, "x"):
raise ValueError(
"Either the create_particles or load_particles method must be "
"called before running the particle tracing algorithm."
)
# If meshes have been added, apply them now
for mesh in self.mesh_list:
self._apply_wire_mesh(**mesh)
# Store a copy of the initial velocity distribution in memory
# This will be used later to calculate the maximum deflection
self.v_init = np.copy(self.v)
# Calculate the maximum velocity
# Used for determining the grid crossing maximum timestep
self.vmax = np.max(np.linalg.norm(self.v, axis=-1))
# Determine which particles should be tracked
# This array holds the indices of all particles that WILL hit the grid
# Only these particles will actually be pushed through the fields
self.grid_ind = np.where(self.theta < self.max_theta_hit_grid)[0]
self.nparticles_grid = len(self.grid_ind)
self.fract_tracked = self.nparticles_grid / self.nparticles
# Create flags for tracking when particles during the simulation
# on_grid -> zero if the particle is off grid, 1
self.on_grid = np.zeros([self.nparticles_grid])
# Entered grid -> non-zero if particle EVER entered the grid
self.entered_grid = np.zeros([self.nparticles_grid])
# Generate a null distribution of points (the result in the absence of
# any fields) for statistical comparison
self.x0 = self._coast_to_plane(self.detector, self.det_hdir, self.det_vdir)
# Advance the particles to the near the start of the grid
self._coast_to_grid()
# Initialize a "progress bar" (really more of a meter)
# Setting sys.stdout lets this play nicely with regular print()
pbar = tqdm(
initial=0,
total=self.nparticles_grid + 1,
disable=not self.verbose,
desc="Particles on grid",
unit="particles",
bar_format="{l_bar}{bar}{n:.1e}/{total:.1e} {unit}",
file=sys.stdout,
)
# Push the particles until the stop condition is satisfied
# (no more particles on the simulation grid)
while not self._stop_condition():
n_on_grid = np.sum(self.on_grid)
pbar.n = n_on_grid
pbar.last_print_n = n_on_grid
pbar.update()
self._push()
pbar.close()
# Remove particles that will never reach the detector
self._remove_deflected_particles()
# Advance the particles to the image plane
self._coast_to_plane(self.detector, self.det_hdir, self.det_vdir, x=self.x)
# Log a summary of the run
self._log("Run completed")
self._log("Fraction of particles tracked: " f"{self.fract_tracked*100:.1f}%")
self._log(
"Fraction of tracked particles that entered the grid: "
f"{self.fract_entered*100:.1f}%"
)
self._log(
"Fraction of tracked particles deflected away from the "
"detector plane: "
f"{self.fract_deflected*100}%"
)
# *************************************************************************
# Synthetic diagnostic methods (creating output)
# *************************************************************************
def synthetic_radiograph(
self, size=None, bins=[200, 200], ignore_grid=False, optical_density=False
):
r"""
Calculate a "synthetic radiograph" (particle count histogram in the
image plane).
Parameters
----------
size : `~astropy.units.Quantity`, shape (2,2)
The size of the detector array, specified as the minimum
and maximum values included in both the horizontal and vertical
directions in the detector plane coordinates. Shape is
[[hmin,hmax], [vmin, vmax]]. Units must be convertable to meters.
bins : array of integers, shape (2)
The number of bins in each direction in the format [hbins, vbins].
The default is [200,200].
ignore_grid: bool
If True, returns the intensity in the image plane in the absence
of simulated fields.
optical_density: bool
If True, return the optical density rather than the intensity
.. math::
OD = -log_{10}(Intensity/I_0)
where I_O is the intensity on the detector plane in the absence of
simulated fields. Default is False.
Returns
-------
hax : `~astropy.units.Quantity` array shape (hbins,)
The horizontal axis of the synthetic radiograph in meters.
vax : `~astropy.units.Quantity` array shape (vbins, )
The vertical axis of the synthetic radiograph in meters.
intensity : ndarray, shape (hbins, vbins)
The number of particles counted in each bin of the histogram.
"""
# Note that, at the end of the simulation, all particles were moved
# into the image plane.
# If ignore_grid is True, use the predicted positions in the absence of
# simulated fields
if ignore_grid:
x = self.x0
else:
x = self.x
# Determine locations of points in the detector plane using unit
# vectors
xloc = np.dot(x - self.detector, self.det_hdir)
yloc = np.dot(x - self.detector, self.det_vdir)
if size is None:
# If a detector size is not given, choose lengths based on the
# dimensions of the grid
w = self.mag * np.max(
[
np.max(np.abs(self.grid.pts0.to(u.m).value)),
np.max(np.abs(self.grid.pts1.to(u.m).value)),
np.max(np.abs(self.grid.pts2.to(u.m).value)),
]
)
# The factor of 5 here is somewhat arbitrary: we just want a
# region a few times bigger than the image of the grid on the
# detector, since particles could be deflected out
size = 5 * np.array([[-w, w], [-w, w]]) * u.m
# Generate the histogram
intensity, h, v = np.histogram2d(
xloc, yloc, range=size.to(u.m).value, bins=bins
)
# h, v are the bin edges: compute the centers to produce arrays
# of the right length (then trim off the extra point)
h = ((h + np.roll(h, -1)) / 2)[0:-1]
v = ((v + np.roll(v, -1)) / 2)[0:-1]
# Throw a warning if < 50% of the particles are included on the
# histogram
percentage = np.sum(intensity) / self.nparticles
if percentage < 0.5:
warnings.warn(
f"Only {percentage:.2%} of the particles are shown "
"on this synthetic radiograph. Consider increasing "
"the size to include more.",
RuntimeWarning,
)
if optical_density:
# Generate the null radiograph
x, y, I0 = self.synthetic_radiograph(size=size, bins=bins, ignore_grid=True)
# Calculate I0 as the mean of the non-zero values in the null
# histogram. Zeros are just outside of the illuminate area.
I0 = np.mean(I0[I0 != 0])
# Overwrite any zeros in intensity to avoid log10(0)
intensity[intensity == 0] = 1
# Calculate the optical_density
intensity = -np.log10(intensity / I0)
return h * u.m, v * u.m, intensity
| 38.919569 | 90 | 0.582994 |
c37a40407f09301be18f33044c4803950764471c | 924 | py | Python | polyengine/switch_start.py | AkanshDivker/polyengine | f81e1ef68d92470b51888db1d0c693b6d8c6b45f | [
"MIT"
] | 5 | 2020-04-11T23:56:13.000Z | 2021-05-22T09:09:36.000Z | polyengine/switch_start.py | AkanshDivker/polyengine | f81e1ef68d92470b51888db1d0c693b6d8c6b45f | [
"MIT"
] | 4 | 2019-10-29T07:17:36.000Z | 2019-11-27T05:36:01.000Z | polyengine/switch_start.py | AkanshDivker/polyengine | f81e1ef68d92470b51888db1d0c693b6d8c6b45f | [
"MIT"
] | 2 | 2020-10-29T14:03:09.000Z | 2021-01-01T07:53:16.000Z | # switch_start.py
# Adding another switch statement
# Authors : Seoyeon Hwang
import string
import random
| 30.8 | 165 | 0.548701 |
c37d3cf95d24a23185d3d7d87e99934db95b537d | 5,494 | py | Python | focal_mech/demo/test6.py | blasscoc/FocalMechClassifier | 8d54d5a19cea447c030ada596369e47e7f39d483 | [
"MIT"
] | 12 | 2016-05-31T04:18:13.000Z | 2021-10-09T06:45:43.000Z | focal_mech/demo/test6.py | blasscoc/FocalMechClassifier | 8d54d5a19cea447c030ada596369e47e7f39d483 | [
"MIT"
] | 2 | 2019-08-09T20:30:26.000Z | 2021-02-09T02:14:04.000Z | focal_mech/demo/test6.py | blasscoc/FocalMechClassifier | 8d54d5a19cea447c030ada596369e47e7f39d483 | [
"MIT"
] | 7 | 2016-08-06T03:13:24.000Z | 2021-09-26T14:39:41.000Z | from numpy import array, rad2deg, pi, mgrid, argmin
from matplotlib.pylab import contour
import matplotlib.pyplot as plt
import mplstereonet
from obspy.imaging.beachball import aux_plane
from focal_mech.lib.classify_mechanism import classify, translate_to_sphharm
from focal_mech.io.read_hash import read_demo, read_hash_solutions
from focal_mech.util.hash_routines import hash_to_classifier
from focal_mech.lib.sph_harm import get_sph_harm
from focal_mech.lib.correlate import corr_shear
hash_solns = read_hash_solutions("example1.out")
# we want solutions that are symetric
polarity_data = read_demo("north1.phase", "scsn.reverse", reverse=True)
inputs = hash_to_classifier(polarity_data, parity=1)
event = 3146815
result = classify(*inputs[event], kernel_degree=2)
Alm = translate_to_sphharm(*result, kernel_degree=2)
coeffs = array([Alm[0,0],
Alm[1,-1], Alm[1,0], Alm[1,1],
Alm[2,-2], Alm[2,-1], Alm[2,0], Alm[2,1], Alm[2,2]])
svm_soln, f = corr_shear(Alm)
resolution = (200,400)
longi, lati, Z = get_sph_harm(resolution=resolution)
mech = coeffs.dot(Z).real
longi.shape = resolution
lati.shape = resolution
mech.shape = resolution
c = contour(longi, lati, mech, [0])
pth1 = c.collections[0].get_paths()[0].vertices
pth1 = rad2deg(pth1)
pth2 = c.collections[0].get_paths()[1].vertices
pth2 = rad2deg(pth2)
hash_focal = rad2deg(hash_solns[event])
event2 = 3158361
result = classify(*inputs[event2], kernel_degree=2)
Alm = translate_to_sphharm(*result, kernel_degree=2)
coeffs = array([Alm[0,0],
Alm[1,-1], Alm[1,0], Alm[1,1],
Alm[2,-2], Alm[2,-1], Alm[2,0], Alm[2,1], Alm[2,2]])
svm_soln2, f = corr_shear(Alm)
resolution = (200,400)
longi, lati, Z = get_sph_harm(resolution=resolution)
mech = coeffs.dot(Z).real
longi.shape = resolution
lati.shape = resolution
mech.shape = resolution
c = contour(longi, lati, mech, [0])
pth3 = c.collections[0].get_paths()[0].vertices
pth3 = rad2deg(pth3)
pth4 = c.collections[0].get_paths()[1].vertices
pth4 = rad2deg(pth4)
hash_focal2 = rad2deg(hash_solns[event2])
event3 = 3153955
result = classify(*inputs[event3], kernel_degree=2)
Alm = translate_to_sphharm(*result, kernel_degree=2)
coeffs = array([Alm[0,0],
Alm[1,-1], Alm[1,0], Alm[1,1],
Alm[2,-2], Alm[2,-1], Alm[2,0], Alm[2,1], Alm[2,2]])
svm_soln3, f = corr_shear(Alm)
resolution = (200,400)
longi, lati, Z = get_sph_harm(resolution=resolution)
mech = coeffs.dot(Z).real
longi.shape = resolution
lati.shape = resolution
mech.shape = resolution
c = contour(longi, lati, mech, [0])
pth5 = c.collections[0].get_paths()[0].vertices
pth5 = rad2deg(pth5)
pth6 = c.collections[0].get_paths()[1].vertices
pth6 = rad2deg(pth6)
hash_focal3 = rad2deg(hash_solns[event3])
fig = plt.figure(facecolor="white", figsize=(10,20))
ax = fig.add_subplot(221, projection='stereonet')
ax.rake(pth1[:,0], pth1[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
ax.rake(pth2[:,0], pth2[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
strike, dip, rake = svm_soln
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = aux_plane(*svm_soln)
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = hash_focal
ax.plane(strike-90, dip, 'g-', linewidth=2)
strike, dip, rake = aux_plane(*hash_focal)
ax.plane(strike-90, dip,'g-', linewidth=2)
azi = rad2deg(polarity_data[event][:,0])
toa = rad2deg(polarity_data[event][:,1])
polarity = polarity_data[event][:,2]
for a, t, p in zip(azi, toa, polarity):
if p > 0:
ax.pole(a, t,'o', markeredgecolor='red', markerfacecolor='red')
else:
ax.pole(a, t,'o', markeredgecolor='blue', markerfacecolor='white')
ax.grid()
ax = fig.add_subplot(222, projection='stereonet')
ax.rake(pth3[:,0], pth3[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
ax.rake(pth4[:,0], pth4[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
strike, dip, rake = svm_soln2
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = aux_plane(*svm_soln2)
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = hash_focal2
ax.plane(strike-90, dip, 'g-', linewidth=2)
strike, dip, rake = aux_plane(*hash_focal2)
ax.plane(strike-90, dip,'g-', linewidth=2)
azi = rad2deg(polarity_data[event2][:,0])
toa = rad2deg(polarity_data[event2][:,1])
polarity = polarity_data[event2][:,2]
for a, t, p in zip(azi, toa, polarity):
if p > 0:
ax.pole(a, t,'o', markeredgecolor='red', markerfacecolor='red')
else:
ax.pole(a, t,'o', markeredgecolor='blue', markerfacecolor='white')
ax.grid()
ax = fig.add_subplot(224, projection='stereonet')
ax.rake(pth5[:,0], pth5[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
ax.rake(pth6[:,0], pth6[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
strike, dip, rake = svm_soln3
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = aux_plane(*svm_soln3)
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = hash_focal3
ax.plane(strike-90, dip, 'g-', linewidth=2)
strike, dip, rake = aux_plane(*hash_focal3)
ax.plane(strike-90, dip,'g-', linewidth=2)
azi = rad2deg(polarity_data[event3][:,0])
toa = rad2deg(polarity_data[event3][:,1])
polarity = polarity_data[event3][:,2]
for a, t, p in zip(azi, toa, polarity):
if p > 0:
ax.pole(a, t,'o', markeredgecolor='red', markerfacecolor='red')
else:
ax.pole(a, t,'o', markeredgecolor='blue', markerfacecolor='white')
ax.grid()
plt.tight_layout(pad=4.0, h_pad=20.0)
plt.show()
| 26.669903 | 76 | 0.67874 |
c37d8d6e64bf2027aa73ad7627b83cab9c6c0c89 | 3,102 | py | Python | gigantumcli/changelog.py | fossabot/gigantum-cli | d8054a8741484592ef1da750dd23affadc99fb5f | [
"MIT"
] | null | null | null | gigantumcli/changelog.py | fossabot/gigantum-cli | d8054a8741484592ef1da750dd23affadc99fb5f | [
"MIT"
] | null | null | null | gigantumcli/changelog.py | fossabot/gigantum-cli | d8054a8741484592ef1da750dd23affadc99fb5f | [
"MIT"
] | null | null | null | # Copyright (c) 2017 FlashX, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import requests
import json
| 33.717391 | 94 | 0.627982 |
c37ed9ece51e833849523b39409da272c30bdafb | 7,311 | py | Python | main_old/understanding_smoothing_microsoft.py | benjaminleroy/smooth_rf | de166a7e777e8a203656b194d772def9d3c8f06d | [
"MIT"
] | 3 | 2019-04-04T04:57:36.000Z | 2022-01-14T09:42:05.000Z | main_old/understanding_smoothing_microsoft.py | benjaminleroy/smooth_rf | de166a7e777e8a203656b194d772def9d3c8f06d | [
"MIT"
] | 1 | 2019-04-04T04:57:24.000Z | 2019-05-29T18:03:31.000Z | main_old/understanding_smoothing_microsoft.py | benjaminleroy/smooth_rf | de166a7e777e8a203656b194d772def9d3c8f06d | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn.ensemble
import sklearn.metrics
import sklearn
import progressbar
import sklearn.model_selection
from plotnine import *
import pdb
import sys
sys.path.append("smooth_rf/")
import smooth_base
import smooth_level
# function
def average_depth(random_forest, data):
"""
calculate the average depth of each point (average across trees)
Arguments:
----------
random_forest : sklearn random forest model (fit)
data : array (n, p)
data frame that can be predicted from random_forest
Returns:
--------
average_depth : array (n,)
vector of average depth in forest of each data point
"""
# test:
#rf_fit
#smooth_rf_opt
#d1 = average_depth(rf_fit, data)
#d2 = average_depth(smooth_rf_opt, data)
#np.all(d1 == d2)
n_trees = len(random_forest.estimators_)
n_obs = data.shape[0]
depth = np.zeros(n_obs)
for t in random_forest.estimators_:
d_path = t.decision_path(data)
depth = depth + np.array(d_path.sum(axis = 1)).ravel()
return depth / n_trees
# start of analysis
data, y = smooth_base.generate_data(large_n = 650)
data_vis = pd.DataFrame(data = {"x1":data[:,0],
"x2":data[:,1],
"y":y},
columns = ["x1","x2","y"])
ggout = ggplot(data_vis) +\
geom_point(aes(x = "x1",y ="x2", color = "factor(y)")) +\
theme_minimal() +\
labs(x= "X1", y = "X2", color = "value (minus 100)")
rf = sklearn.ensemble.RandomForestRegressor(n_estimators = 300)
rf_fit = rf.fit(data,y)
smooth_rf_opt, smooth_rf_last ,_, _ = smooth_base.smooth(
rf_fit,
X_trained = data,
y_trained = y.ravel(),
X_tune = None,
y_tune = None,
resample_tune= False, # oob
no_constraint = False,
subgrad_max_num = 10000,
subgrad_t_fix = 1,
parents_all=True,
verbose = True,
all_trees = False,
initial_lamb_seed = None)
# test data
data_test, y_test = smooth_base.generate_data(large_n = 10000)
reorder = np.random.choice(data_test.shape[0],
size = data_test.shape[0], replace= False)
data_test = data_test[reorder,:]
y_test = y_test[reorder]
yhat_base = rf_fit.predict(data_test)
yhat_smooth = smooth_rf_opt.predict(data_test)
base_mse = sklearn.metrics.mean_squared_error(y_true = y_test, y_pred = yhat_base)
smooth_mse = sklearn.metrics.mean_squared_error(y_true = y_test, y_pred = yhat_smooth)
error_base = np.abs(yhat_base - y_test)
error_smooth = np.abs(yhat_smooth - y_test)
extreme_binary = np.max([np.max(np.abs(error_base)),
np.max(np.abs(error_smooth))])
col_vis = error_base - error_smooth
extreme = np.max(np.abs(col_vis))
mean_depth_test = average_depth(rf_fit,data_test)
data_vis = pd.DataFrame(data = {"X1":data_test[:,0],
"X2":data_test[:,1],
"y": y_test.ravel(),
"error_base":error_base.copy(),
"error_smooth":error_smooth.copy(),
"error":col_vis.copy(),
"mean_depth":mean_depth_test.copy()},
columns = ["X1","X2","y","error",
"error_base","error_smooth",
"mean_depth"])
a = ggplot(data_vis) +\
geom_point(aes(x = "X1", y="X2", color = "error"),
size = .5) +\
scale_color_continuous(name = "bwr",
limits= [-extreme, extreme]) +\
theme_bw() +\
labs(color = "Difference in Error",
title = r'Difference in Error ($Error_{base} - Error_{smooth}$)')
b = ggplot(data_vis) +\
geom_point(aes(x = "X1", y="X2", color = "error_base"),
size = .5) +\
scale_color_continuous(name = "binary",
limits= [0, extreme_binary]) +\
theme_bw() +\
labs(color = "Error",
title = "Error from Base Random Forest")
c = ggplot(data_vis) +\
geom_point(aes(x = "X1", y="X2", color = "error_smooth"),
size = .5) +\
scale_color_continuous(name = "binary",
limits= [0, extreme_binary]) +\
theme_bw() +\
labs(color = "Error",
title = "Error from Smoothed Random Forest")
d = ggplot(data_vis) +\
geom_point(aes(x = "X1", y="X2", color = "factor(y)"),
size = .5) +\
theme_bw() +\
labs(color = "True Value (discrete)",
title = "Test Set True Values")
e = ggplot(data_vis,aes(x = "mean_depth", y = "error")) +\
geom_point(alpha = .1) +\
theme_bw() +\
labs(x = "Mean depth in Forest",
y = "Difference in Error",
title = "Lack of relationship between diff in errors and depth")
f = ggplot(data_vis, aes(x = "X1", y = "X2", color = "mean_depth")) +\
geom_point() +\
scale_color_continuous(name = "Blues") +\
theme_bw() +\
labs(color = "Mean depth in Forest",
title = "Mean depth in Forest (Depth averaged across trees)")
g = ggplot(data_vis) +\
geom_point(aes(x = "error_base", y = "error_smooth"),
alpha = .05) +\
geom_abline(intercept = 0, slope = 1) +\
theme_bw() +\
labs(x = "Error from Random Forest",
y = "Error from Smooth Random Forest",
title = "Comparing Errors Between Models",
subtitle = r"(total error: rf: %f vs srf: %f)" %\
(base_mse, smooth_mse))
save_as_pdf_pages([a + theme(figure_size = (8,6))],
filename = "images/diff_error"+"_understanding_smoothing.pdf")
save_as_pdf_pages([b + theme(figure_size = (8,6))],
filename = "images/error_base"+"_understanding_smoothing.pdf")
save_as_pdf_pages([c + theme(figure_size = (8,6))],
filename = "images/error_smooth"+"_understanding_smoothing.pdf")
save_as_pdf_pages([d + theme(figure_size = (8,6))],
filename = "images/truth"+"_understanding_smoothing.pdf")
save_as_pdf_pages([e + theme(figure_size = (8,6))],
filename = "images/mean_depth_diff_error"+"_understanding_smoothing.pdf")
save_as_pdf_pages([f + theme(figure_size = (8,6))],
filename = "images/mean_depth"+"_understanding_smoothing.pdf")
save_as_pdf_pages([g + theme(figure_size = (8,6))],
filename = "images/error_vs_error"+"_understanding_smoothing.pdf")
save_as_pdf_pages([a + theme(figure_size = (8,6)),
b + theme(figure_size = (8,6)),
c + theme(figure_size = (8,6)),
d + theme(figure_size = (8,6)),
e + theme(figure_size = (8,6)),
f + theme(figure_size = (8,6)),
g + theme(figure_size = (8,6))],
filename = "images/understanding_smoothing.pdf")
# some of these observations might be due to the decision on the values of the classes
# we'll see
| 34.004651 | 91 | 0.56285 |
c37ef55b28f73e2f2453409e73faf8e176864615 | 1,147 | py | Python | AER/Experiments/Metrics.py | LeBenchmark/Interspeech2021 | 2a3b424389631b317b39973291b7252bbf44a73b | [
"MIT"
] | 48 | 2021-03-25T14:00:04.000Z | 2022-03-27T17:00:00.000Z | AER/Experiments/Metrics.py | LeBenchmark/Interspeech2021 | 2a3b424389631b317b39973291b7252bbf44a73b | [
"MIT"
] | 2 | 2021-04-16T13:21:44.000Z | 2021-06-16T15:23:09.000Z | AER/Experiments/Metrics.py | LeBenchmark/Interspeech2021 | 2a3b424389631b317b39973291b7252bbf44a73b | [
"MIT"
] | 2 | 2021-07-05T13:42:23.000Z | 2021-09-01T10:24:00.000Z | import numpy as np
def CCC(y_true, y_pred):
"""
Calculate the CCC for two numpy arrays.
"""
x = y_true
y = y_pred
xMean = x.mean()
yMean = y.mean()
xyCov = (x * y).mean() - (xMean * yMean)
# xyCov = ((x-xMean) * (y-yMean)).mean()
xVar = x.var()
yVar = y.var()
return 2 * xyCov / (xVar + yVar + (xMean - yMean) ** 2)
def MSE(y_true, y_pred):
"""
Calculate the Mean Square Error for two numpy arrays.
"""
mse = (np.square(y_true - y_pred)).mean(axis=0)
return mse
def RMSE(y_true, y_pred):
"""
Calculate the Mean Square Error for two numpy arrays.
"""
return np.sqrt(MSE(y_true, y_pred))
def perfMeasure(y_actual, y_pred):
"""
Calculate the confusion matrix for two numpy arrays.
"""
TP = 0
FP = 0
TN = 0
FN = 0
for i in range(len(y_pred)):
if y_actual[i]==y_pred[i]==1:
TP += 1
if y_pred[i]==1 and y_actual[i]!=y_pred[i]:
FP += 1
if y_actual[i]==y_pred[i]==-1:
TN += 1
if y_pred[i]==-1 and y_actual[i]!=y_pred[i]:
FN += 1
return (TP, FP, TN, FN)
| 23.408163 | 59 | 0.529207 |
c37f533b46624d83873bcd5b9e4314c8ccb4405c | 11,734 | py | Python | myo/device_listener.py | ehliang/myo-unlock | 059e130a90e44df3869dd892e216c020d6d97a7e | [
"MIT"
] | 1 | 2021-06-25T02:27:31.000Z | 2021-06-25T02:27:31.000Z | myo/device_listener.py | ehliang/myo-unlock | 059e130a90e44df3869dd892e216c020d6d97a7e | [
"MIT"
] | null | null | null | myo/device_listener.py | ehliang/myo-unlock | 059e130a90e44df3869dd892e216c020d6d97a7e | [
"MIT"
] | null | null | null | # Copyright (c) 2015 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import abc
import six
import time
import threading
import warnings
from .lowlevel.enums import EventType, Pose, Arm, XDirection
from .utils.threading import TimeoutClock
from .vector import Vector
from .quaternion import Quaternion
def __init__(self):
super(Feed, self).__init__()
self.synchronized = threading.Condition()
self._myos = {}
def get_devices(self):
"""
get_devices() -> list of Feed.MyoProxy
Returns a list of paired and connected Myo's.
"""
with self.synchronized:
return list(self._myos.values())
def get_connected_devices(self):
"""
get_connected_devices(self) -> list of Feed.MyoProxy
Returns a list of connected Myo's.
"""
with self.synchronized:
return [myo for myo in self._myos.values() if myo.connected]
def wait_for_single_device(self, timeout=None, interval=0.5):
"""
wait_for_single_device(timeout) -> Feed.MyoProxy or None
Waits until a Myo is was paired **and** connected with the Hub
and returns it. If the *timeout* is exceeded, returns None.
This function will not return a Myo that is only paired but
not connected.
:param timeout: The maximum time to wait for a device.
:param interval: The interval at which the function should
exit sleeping. We can not sleep endlessly, otherwise
the main thread can not be exit, eg. through a
KeyboardInterrupt.
"""
timer = TimeoutClock(timeout)
start = time.time()
with self.synchronized:
# As long as there are no Myo's connected, wait until we
# get notified about a change.
while not timer.exceeded:
# Check if we found a Myo that is connected.
for myo in six.itervalues(self._myos):
if myo.connected:
return myo
remaining = timer.remaining
if interval is not None and remaining > interval:
remaining = interval
self.synchronized.wait(remaining)
return None
# DeviceListener
| 32.325069 | 82 | 0.587183 |
c3806b9e128d8474be2a0c8c16ed645a6cd61414 | 333 | py | Python | utilities/poisson.py | lukepinkel/pylmm | b9e896222f077b000f9a752be77cfc9e60b49f19 | [
"MIT"
] | null | null | null | utilities/poisson.py | lukepinkel/pylmm | b9e896222f077b000f9a752be77cfc9e60b49f19 | [
"MIT"
] | null | null | null | utilities/poisson.py | lukepinkel/pylmm | b9e896222f077b000f9a752be77cfc9e60b49f19 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 12 13:34:49 2020
@author: lukepinkel
"""
import numpy as np
import scipy as sp
import scipy.special
| 19.588235 | 65 | 0.618619 |
c382207d4a3aa645831dc8af78380466763f0458 | 581 | py | Python | iotest.py | AaltoRSE/ImageNetTools | 1ed8b8c38bd14eb47fc6167bf194f327a2696bf1 | [
"BSD-3-Clause"
] | 1 | 2021-11-15T11:21:55.000Z | 2021-11-15T11:21:55.000Z | iotest.py | AaltoRSE/ImageNetTools | 1ed8b8c38bd14eb47fc6167bf194f327a2696bf1 | [
"BSD-3-Clause"
] | null | null | null | iotest.py | AaltoRSE/ImageNetTools | 1ed8b8c38bd14eb47fc6167bf194f327a2696bf1 | [
"BSD-3-Clause"
] | null | null | null | '''
Created on Sep 29, 2021
@author: thomas
'''
import ImageNetTools
import sys
import getopt
main(sys.argv[1:]) | 22.346154 | 59 | 0.555938 |
c3825a98b9b5079c534d11d77f64da2d82f8a541 | 1,775 | py | Python | sagas/tests/sinkers/test_results_render.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 3 | 2020-01-11T13:55:38.000Z | 2020-08-25T22:34:15.000Z | sagas/tests/sinkers/test_results_render.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | null | null | null | sagas/tests/sinkers/test_results_render.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 1 | 2021-01-01T05:21:44.000Z | 2021-01-01T05:21:44.000Z | """
$ pytest -s -v test_results_render.py
"""
import logging
import pytest
from sagas.nlu.results_render import ResultsRender
| 36.979167 | 90 | 0.450704 |
c382afee49a8dcf277085e3abd2845bbc944eef7 | 5,838 | py | Python | equatation.py | asteinig4018/mhacks19 | 64b5ba211f38f3b30927bf926073b777fb23a46f | [
"MIT"
] | 1 | 2019-10-12T21:30:15.000Z | 2019-10-12T21:30:15.000Z | equatation.py | asteinig4018/mhacks19 | 64b5ba211f38f3b30927bf926073b777fb23a46f | [
"MIT"
] | null | null | null | equatation.py | asteinig4018/mhacks19 | 64b5ba211f38f3b30927bf926073b777fb23a46f | [
"MIT"
] | null | null | null | import json
import math
from HistoricalTweetDataFetcher import getHistoricalData
joelsarray = getHistoricalData(0)
arrs = []
arrm = []
arrp = []
arrsTotal = 0
arrmTotal = 0
ncount = 0
ccount = 0
lcount = 0
time = joelsarray[0]["h"]
for dictionary in joelsarray:
arrs.append(dictionary["s"])
arrm.append(dictionary["m"])
arrp.append(dictionary["p"])
for x in range(len(arrs)):
arrsTotal += arrs[x]
arrmTotal += arrm[x]
if arrp[x]=='l':
lcount += 1
elif arrp[x]=='c':
ccount += 1
elif arrp[x]=='n':
ncount += 1
arrsAvg = arrsTotal/len(arrs)#sentiment value
arrmAvg = arrmTotal/len(arrm)#magnitude value
#print(arrsTotal)
#print(len(arrs))
#rint(arrsAvg)
#print(arrmAvg)
#print(lcount)
#print(ccount)
###################################################################
filename2 = "weather_us.json"
if filename2:
with open(filename2, 'r') as f:
weatherstore = json.load(f)
for x in range(50):
statearray = list(weatherstore.keys())
statesAverage = 0
for state in statearray:
for x in range(50):
temptemp = float(weatherstore[state]["temperature"])
temphigh = float(weatherstore[state]["average_monthly_high"])
templow = float(weatherstore[state]["average_monthly_low"])
statesAverage+=((temptemp-temphigh)*(templow-temptemp))/(math.pow(((temphigh+templow)/2),2))
statesAverage = statesAverage/50 #this is the average tempeature multiplyer
print(statesAverage)
#####################################################################################
filename3 = "sp500_price.json"
if filename3:
with open(filename3, 'r') as f:
stockdata = json.load(f)
stockpricecurrent = stockdata["current_price"]
stockpricechange = stockdata["percent_change"]#percent change of S&P500
if stockpricechange <= 0.73 and stockpricechange >=-0.73:
stockmultiply = 0;
else:
stockmultiply = stockpricechange*0.5*0.73
print(stockpricechange)
#########################################################################################
filename4 = "trump_approval_rating.json"
if filename4:
with open(filename4, 'r') as f:
approvalratingdata = json.load(f)
approveAvg = approvalratingdata["approve_avg"]#approval average data
currentApproval = approvalratingdata["approve"]#current approval percentage
########################################################################################
my_list = equation(arrsAvg, stockmultiply, currentApproval, approveAvg, statesAverage, 0, 0, lcount, ccount, 17, 70, 60, 50, 45, 25)
| 39.714286 | 198 | 0.604488 |
5ed9ef5b5cccf956209757de81563a4bc4e12b59 | 43,492 | py | Python | oscar/apps/offer/models.py | endgame/django-oscar | e5d78436e20b55902537a6cc82edf4e22568f9d6 | [
"BSD-3-Clause"
] | null | null | null | oscar/apps/offer/models.py | endgame/django-oscar | e5d78436e20b55902537a6cc82edf4e22568f9d6 | [
"BSD-3-Clause"
] | null | null | null | oscar/apps/offer/models.py | endgame/django-oscar | e5d78436e20b55902537a6cc82edf4e22568f9d6 | [
"BSD-3-Clause"
] | 1 | 2019-07-10T06:32:14.000Z | 2019-07-10T06:32:14.000Z | from decimal import Decimal as D, ROUND_DOWN, ROUND_UP
import math
import datetime
from django.core import exceptions
from django.template.defaultfilters import slugify
from django.db import models
from django.utils.translation import ungettext, ugettext as _
from django.utils.importlib import import_module
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.conf import settings
from oscar.apps.offer.managers import ActiveOfferManager
from oscar.templatetags.currency_filters import currency
from oscar.models.fields import PositiveDecimalField, ExtendedURLField
# ==========
# Conditions
# ==========
# ========
# Benefits
# ========
# =================
# Shipping benefits
# =================
| 38.150877 | 117 | 0.616849 |
5eda690b685cc647a25421c21d7eb3efc87731a9 | 3,288 | py | Python | htk-lite/commandlist/help.py | otherbeast/hackers-tool-kit | 12991889db1f6843dde82e7da4b4cdfb50740da5 | [
"Apache-2.0"
] | 393 | 2019-01-21T05:52:54.000Z | 2022-03-29T06:07:04.000Z | htk-lite/commandlist/help.py | urantialife/hackers-tool-kit | 34dbabf3e94825684fd1a684f522d3dc3565eb2d | [
"Apache-2.0"
] | 19 | 2019-02-22T00:49:28.000Z | 2021-12-30T20:28:59.000Z | htk-lite/commandlist/help.py | urantialife/hackers-tool-kit | 34dbabf3e94825684fd1a684f522d3dc3565eb2d | [
"Apache-2.0"
] | 138 | 2019-03-15T23:22:19.000Z | 2022-03-20T17:19:09.000Z | #!/usr/local/bin/python
# coding: latin-1
#if you use this code give me credit @tuf_unkn0wn
#i do not give you permission to show / edit this script without my credit
#to ask questions or report a problem message me on instagram @tuf_unkn0wn
"""
"""
import os
import sys
import random
lred = '\033[91m'
lblue = '\033[94m'
lgreen = '\033[92m'
yellow = '\033[93m'
cyan = '\033[1;36m'
purple = '\033[95m'
red = '\033[31m'
green = '\033[32m'
blue = '\033[34m'
orange = '\033[33m'
colorlist = [red, blue, green, yellow, lblue, purple, cyan, lred, lgreen, orange]
randomcolor = random.choice(colorlist)
banner3list = [red, blue, green, purple]
helpbanner()
| 46.971429 | 102 | 0.253041 |
5edd1d618589e67fdc13ac60dffe9edc5736896c | 2,980 | py | Python | scripts/core/soldier.py | whackashoe/entwinement | 4acff2147b86e08e267fc50c327917a338c7bf36 | [
"Unlicense"
] | 1 | 2020-03-10T10:52:13.000Z | 2020-03-10T10:52:13.000Z | scripts/core/soldier.py | whackashoe/entwinement | 4acff2147b86e08e267fc50c327917a338c7bf36 | [
"Unlicense"
] | null | null | null | scripts/core/soldier.py | whackashoe/entwinement | 4acff2147b86e08e267fc50c327917a338c7bf36 | [
"Unlicense"
] | null | null | null | d_soldiers = []
| 18.742138 | 74 | 0.617785 |
5eddcc0044e85262897bbd20777730764bb0e4ac | 785 | py | Python | dzTraficoBackend/dzTrafico/BusinessLayer/Statistics/DataVisualizationController.py | DZAymen/dz-Trafico | 74ff9caf9e3845d8af977c46b04a2d3421a0661b | [
"MIT"
] | null | null | null | dzTraficoBackend/dzTrafico/BusinessLayer/Statistics/DataVisualizationController.py | DZAymen/dz-Trafico | 74ff9caf9e3845d8af977c46b04a2d3421a0661b | [
"MIT"
] | null | null | null | dzTraficoBackend/dzTrafico/BusinessLayer/Statistics/DataVisualizationController.py | DZAymen/dz-Trafico | 74ff9caf9e3845d8af977c46b04a2d3421a0661b | [
"MIT"
] | null | null | null | from dzTrafico.BusinessEntities.Simulation import Simulation
import lxml.etree as etree
| 23.787879 | 67 | 0.680255 |
5edde40f3283ddaa109a18bcb421a16c3e99b304 | 7,343 | py | Python | bunkai/algorithm/lbd/custom_tokenizers.py | megagonlabs/bunkai | 28ea1c891f6ee8f96269f41a0642cd6194dd04e9 | [
"Apache-2.0"
] | 149 | 2021-04-21T06:25:21.000Z | 2022-03-29T08:57:49.000Z | bunkai/algorithm/lbd/custom_tokenizers.py | megagonlabs/bunkai | 28ea1c891f6ee8f96269f41a0642cd6194dd04e9 | [
"Apache-2.0"
] | 41 | 2021-05-11T00:46:16.000Z | 2022-03-22T05:17:47.000Z | bunkai/algorithm/lbd/custom_tokenizers.py | megagonlabs/bunkai | 28ea1c891f6ee8f96269f41a0642cd6194dd04e9 | [
"Apache-2.0"
] | 5 | 2021-04-21T10:54:46.000Z | 2022-02-25T17:41:21.000Z | #!/usr/bin/env python3
import collections
import logging
import os
import typing
import unicodedata
from janome.tokenizer import Tokenizer
from transformers.file_utils import cached_path
from transformers.models.bert.tokenization_bert import BertTokenizer, WordpieceTokenizer, load_vocab
import bunkai.constant
"""
The original source code is from cl-tohoku/bert-japanese.
https://github.com/cl-tohoku/bert-japanese/blob/master/tokenization.py
The original source code is under Apache-2.0 License.
"""
logger = logging.getLogger(__name__)
KNOWN_PRETRAINED_VOCABS = {
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
}
| 36.715 | 116 | 0.626719 |
5edecbbe347219a2740ccd3534f648ace677fd24 | 10,232 | py | Python | tests/exchanges_tests.py | tomwalton78/Crypto-Exchange-API-Aggregator | c5b1756eac46274cdbe5c4e49db62450a35b70a6 | [
"MIT"
] | null | null | null | tests/exchanges_tests.py | tomwalton78/Crypto-Exchange-API-Aggregator | c5b1756eac46274cdbe5c4e49db62450a35b70a6 | [
"MIT"
] | null | null | null | tests/exchanges_tests.py | tomwalton78/Crypto-Exchange-API-Aggregator | c5b1756eac46274cdbe5c4e49db62450a35b70a6 | [
"MIT"
] | 1 | 2019-11-16T07:31:00.000Z | 2019-11-16T07:31:00.000Z | import unittest
from datetime import datetime
import os
import sys
from api.exchanges.exchange import ExchangeAPICallFailedException
from api.exchanges.gdax_exchange import GdaxExchange
from api.exchanges.kraken_exchange import KrakenExchange
from api.exchanges.bitstamp_exchange import BitstampExchange
from api.exchanges.bitfinex_exchange import BitfinexExchange
if __name__ == '__main__':
unittest.main(exit=False)
| 30.094118 | 79 | 0.583268 |
5edf354d82c1df0367e44041106c0bf97648bea1 | 1,342 | py | Python | stats/clustering.py | KNSI-Golem/assets-generation | e366b96e0f2bba16c90816e2690b3b89fd50e514 | [
"MIT"
] | null | null | null | stats/clustering.py | KNSI-Golem/assets-generation | e366b96e0f2bba16c90816e2690b3b89fd50e514 | [
"MIT"
] | 33 | 2019-12-02T18:56:18.000Z | 2022-02-10T01:18:01.000Z | stats/clustering.py | KNSI-Golem/assets-generation | e366b96e0f2bba16c90816e2690b3b89fd50e514 | [
"MIT"
] | 2 | 2020-09-11T13:11:59.000Z | 2021-02-16T17:08:33.000Z | from sklearn.cluster import KMeans
import image_processing
import numpy as np
import some_analysis
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from autoencoder import ConvAutoencoder
input_path = './bin'
output_shape = (32, 48)
processing_output = './processed/results_processing'
data = image_processing.get_data_from_images(processing_output)
data = data[:, :, :, :-1]
encoder, _, _ = ConvAutoencoder.build(32, 48, 3,
filters=(32, 64),
latentDim=512)
encoder.load_weights('encoder.h5')
data_encoded = encoder.predict(data)
#data_reshaped = data.reshape((data.shape[0], -1))
n_clusters = 200
# Runs in parallel 4 CPUs
kmeans = KMeans(n_clusters=n_clusters, n_init=15, n_jobs=8)
# Train K-Means.
y_pred_kmeans = kmeans.fit_predict(data_encoded)
data += 1.0
data *= 127.5
array = np.empty((n_clusters), dtype=object)
for i in range(n_clusters):
array[i] = []
for cluster, idx in zip(y_pred_kmeans, range(data.shape[0])):
array[cluster].append(idx)
i = 1
for l in array:
cluster = data[l]
some_analysis.make_preview(cluster, f'./previews/cluster_v3_{i}.png', n_cols=5)
i += 1
'''
data_embedded = TSNE(learning_rate=200).fit_transform(data_reshaped)
plt.scatter(data_embedded[:, 0], data_embedded[:, 1])
''' | 28.553191 | 83 | 0.69076 |
5edf63e904c948abd2995cb1fd09ff2f09a7f87a | 572 | py | Python | CursoEmVideo/Aula22/ex109/ex109.py | lucashsouza/Desafios-Python | abb5b11ebdfd4c232b4f0427ef41fd96013f2802 | [
"MIT"
] | null | null | null | CursoEmVideo/Aula22/ex109/ex109.py | lucashsouza/Desafios-Python | abb5b11ebdfd4c232b4f0427ef41fd96013f2802 | [
"MIT"
] | null | null | null | CursoEmVideo/Aula22/ex109/ex109.py | lucashsouza/Desafios-Python | abb5b11ebdfd4c232b4f0427ef41fd96013f2802 | [
"MIT"
] | null | null | null | """
Modifique as funes que foram criadas no desafio 107 para
que elas aceitem um parametro a mais, informando se o valor
retornado por elas vai ser ou no formatado pela funo
moeda(), desenvolvida no desafio 108.
"""
from Aula22.ex109 import moeda
from Aula22.ex109.titulo import titulo
preco = float(input("Preo: R$"))
titulo('Informaes Calculadas: ')
print(f"Metade: {moeda.metade(preco, True)}")
print(f"Dobro: {moeda.dobro(preco, True)}")
print(f"10% Acrscimo: {moeda.aumentar(preco, 10, True)}")
print(f"10% Desconto: {moeda.diminuir(preco, 10, True)}")
| 28.6 | 59 | 0.737762 |
5ee0230190b385a1bf8afa9cd7f0b235b7db13a2 | 4,787 | py | Python | mocu/graphical_model/mocu/scripts/visualizetoysystem.py | exalearn/oded | e706c48d60360b041b9f1cfc64fa208d01fbb65a | [
"MIT"
] | null | null | null | mocu/graphical_model/mocu/scripts/visualizetoysystem.py | exalearn/oded | e706c48d60360b041b9f1cfc64fa208d01fbb65a | [
"MIT"
] | null | null | null | mocu/graphical_model/mocu/scripts/visualizetoysystem.py | exalearn/oded | e706c48d60360b041b9f1cfc64fa208d01fbb65a | [
"MIT"
] | null | null | null | from mocu.utils.toysystems import *
import matplotlib.pyplot as plt
import matplotlib.cm as cm
if __name__ == '__main__':
main()
| 29.012121 | 113 | 0.524546 |
5ee24f1707c0b95700a49b8f88fefee14ccd1a6c | 9,053 | py | Python | sbm/stochastic_block_model.py | pmacg/pysbm | e2f6ceeb4fff903b53a4d3c05694411026a084c3 | [
"MIT"
] | 1 | 2021-09-17T12:37:34.000Z | 2021-09-17T12:37:34.000Z | sbm/stochastic_block_model.py | pmacg/pysbm | e2f6ceeb4fff903b53a4d3c05694411026a084c3 | [
"MIT"
] | null | null | null | sbm/stochastic_block_model.py | pmacg/pysbm | e2f6ceeb4fff903b53a4d3c05694411026a084c3 | [
"MIT"
] | null | null | null | """
Several methods for generating graphs from the stochastic block model.
"""
import itertools
import math
import random
import scipy.sparse
import numpy as np
def _get_num_pos_edges(c1_size, c2_size, same_cluster, self_loops, directed):
"""
Compute the number of possible edges between two clusters.
:param c1_size: The size of the first cluster
:param c2_size: The size of the second cluster
:param same_cluster: Whether these are the same cluster
:param self_loops: Whether we will generate self loops
:param directed: Whether we are generating a directed graph
:return: the number of possible edges between these clusters
"""
if not same_cluster:
# The number is simply the product of the number of vertices
return c1_size * c2_size
else:
# The base number is n choose 2
possible_edges_between_clusters = int((c1_size * (c1_size - 1)) / 2)
# If we are allowed self-loops, then add them on
if self_loops:
possible_edges_between_clusters += c1_size
# The number is normally the same for undirected and directed graphs, unless the clusters are the same, in which
# case the number for the directed graph is double since we need to consider both directions of each edge.
if directed:
possible_edges_between_clusters *= 2
# But if we are allowed self-loops, then we shouldn't double them since there is only one 'direction'.
if directed and self_loops:
possible_edges_between_clusters -= c1_size
return possible_edges_between_clusters
def _get_number_of_edges(c1_size, c2_size, prob, same_cluster, self_loops, directed):
"""
Compute the number of edges there will be between two clusters.
:param c1_size: The size of the first cluster
:param c2_size: The size of the second cluster
:param prob: The probability of an edge between the clusters
:param same_cluster: Whether these are the same cluster
:param self_loops: Whether we will generate self loops
:param directed: Whether we are generating a directed graph
:return: the number of edges to generate between these clusters
"""
# We need to compute the number of possible edges
possible_edges_between_clusters = _get_num_pos_edges(c1_size, c2_size, same_cluster, self_loops, directed)
# Sample the number of edges from the binomial distribution
return np.random.binomial(possible_edges_between_clusters, prob)
def sbm_adjmat(cluster_sizes, prob_mat_q, directed=False, self_loops=False):
"""
Generate a graph from the stochastic block model.
The list cluster_sizes gives the number of vertices inside each cluster and the matrix Q gives the probability of
each edge between pairs of clusters.
For two vertices u and v where u is in cluster i and v is in cluster j, there is an edge between u and v with
probability Q_{i, j}.
For the undirected case, we assume that the matrix Q is symmetric (and in practice look only at the upper triangle).
For the directed case, we generate edges (u, v) and (v, u) with probabilities Q_{i, j} and Q_{j, i} respectively.
Returns the adjacency matrix of the graph as a sparse scipy matrix in the CSR format.
:param cluster_sizes: The number of vertices in each cluster.
:param prob_mat_q: A square matrix where Q_{i, j} is the probability of each edge between clusters i and j. Should
be symmetric in the undirected case.
:param directed: Whether to generate a directed graph (default is false).
:param self_loops: Whether to generate self-loops (default is false).
:return: The sparse adjacency matrix of the graph.
"""
# Initialize the adjacency matrix
adj_mat = scipy.sparse.lil_matrix((sum(cluster_sizes), sum(cluster_sizes)))
# Generate the edges in the graph
for (u, v) in _generate_sbm_edges(cluster_sizes, prob_mat_q, directed=directed):
if u != v or self_loops:
# Add this edge to the adjacency matrix.
adj_mat[u, v] = 1
if not directed:
adj_mat[v, u] = 1
# Reformat the output matrix to the CSR format
return adj_mat.tocsr()
def sbm_adjmat_equal_clusters(n, k, prob_mat_q, directed=False):
"""
Generate a graph from the general stochastic block model.
Generates a graph with n vertices and k clusters. Every cluster will have floor(n/k) vertices. The probability of
each edge inside a cluster is given by the probability matrix Q.
:param n: The number of vertices in the graph.
:param k: The number of clusters.
:param prob_mat_q: q[i][j] gives the probability of an edge between clusters i and j
:param directed: Whether to generate a directed graph.
:return: The sparse adjacency matrix of the graph.
"""
return sbm_adjmat([int(n/k)] * k, prob_mat_q, directed=directed)
def ssbm_adjmat(n, k, p, q, directed=False):
"""
Generate a graph from the symmetric stochastic block model.
Generates a graph with n vertices and k clusters. Every cluster will have floor(n/k) vertices. The probability of
each edge inside a cluster is given by p. The probability of an edge between two different clusters is q.
:param n: The number of vertices in the graph.
:param k: The number of clusters.
:param p: The probability of an edge inside a cluster.
:param q: The probability of an edge between clusters.
:param directed: Whether to generate a directed graph.
:return: The sparse adjacency matrix of the graph.
"""
# Every cluster has the same size.
cluster_sizes = [int(n/k)] * k
# Construct the k*k probability matrix Q. The off-diagonal entries are all q and the diagonal entries are all p.
prob_mat_q = []
for row_num in range(k):
new_row = [q] * k
new_row[row_num] = p
prob_mat_q.append(new_row)
# Call the general sbm method.
return sbm_adjmat(cluster_sizes, prob_mat_q, directed=directed)
| 44.377451 | 120 | 0.68353 |
5ee2508b1563859bc37a102d678ee13eb3c4fb40 | 3,496 | py | Python | py_include/__init__.py | mauro-balades/py-include | d2d3b90fc840a550a49d76798bc8a97b076480d8 | [
"MIT"
] | 2 | 2022-01-30T13:23:22.000Z | 2022-01-31T10:23:46.000Z | py_include/__init__.py | mauro-balades/py-include | d2d3b90fc840a550a49d76798bc8a97b076480d8 | [
"MIT"
] | null | null | null | py_include/__init__.py | mauro-balades/py-include | d2d3b90fc840a550a49d76798bc8a97b076480d8 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""
| --------------------- Py include <Mauro Balads> ---------------------
| ___ _ _ _ __ _ _ ___ ____
| | |_) \ \_/ | | | |\ | / /` | | | | | | | \ | |_
| |_| |_| |_| |_| \| \_\_, |_|__ \_\_/ |_|_/ |_|__
| ----------------------------------------------------------------------
| MIT License
|
| Copyright (c) 2022 Mauro Balads
|
| Permission is hereby granted, free of charge, to any person obtaining a copy
| of this software and associated documentation files (the "Software"), to deal
| in the Software without restriction, including without limitation the rights
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
| copies of the Software, and to permit persons to whom the Software is
| furnished to do so, subject to the following conditions:
|
| The above copyright notice and this permission notice shall be included in all
| copies or substantial portions of the Software.
|
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
| FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
| AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
| LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
| SOFTWARE.
|
"""
from pathlib import Path
import sys
def include(*args, **kwargs):
"""Here is where all the magic ocour. This function takes an
infinite amount of paths and they are being executend to
feel like user imported it.
Note:
It can also be used to store it into a variable if user
needs it. This can be done by adding the argument `ret`
to True (more detail in #Args).
Note:
Please note how (for the import statement) you will need a
`__init__.py` and paths separated by dots. With py-include,
you don't need. Py-include will make your path supported
by the current platform and it will open it's content and
execute it, so you don't need a path divided by `.` or
a `__init__.py`
Args:
files [list(str)]: A list of paths to include.
ret [bool]: If it is set to True, return the module (defaults to False).
Note:
If `ret` is set to `True`, the function will return all modules
as user will need to unpack them.
"""
# Get the value whether user whan't to execute
# the module or to return it. (defaults to False)
ret = kwargs.get("ret", False)
# Check if user inserted `ret` as True. If it not,
# we will open the file and execute it's content.
# If it is True, we will return the module they
# whanted to import.
if not ret:
_exec_modules(*args, **kwargs)
return _ret_modules(*args, **kwargs)
| 33.295238 | 80 | 0.65246 |
5ee3400a48d58dbe03ad61379d1f85e22cd4df99 | 7,201 | py | Python | src/scripts/load_data.py | murphycj/agfusionweb-react | 9305aa3caa653fa74608d1ae3dd59c03a3df6294 | [
"MIT"
] | 1 | 2019-12-11T22:22:02.000Z | 2019-12-11T22:22:02.000Z | src/scripts/load_data.py | murphycj/agfusionweb-react | 9305aa3caa653fa74608d1ae3dd59c03a3df6294 | [
"MIT"
] | 15 | 2020-03-25T02:21:18.000Z | 2022-03-27T20:05:01.000Z | src/scripts/load_data.py | murphycj/agfusionweb-react | 9305aa3caa653fa74608d1ae3dd59c03a3df6294 | [
"MIT"
] | null | null | null | import pyensembl
import sys
import sqlite3
import boto3
import pickle
dynamodb = boto3.resource('dynamodb')
table_agfusion_gene_synonyms = dynamodb.Table('agfusion_gene_synonyms')
table_agfusion_genes = dynamodb.Table('agfusion_genes')
table_agfusion_sequences = dynamodb.Table('agfusion_sequences')
# process_data('homo_sapiens', 94, '/Users/charliemurphy/Downloads/agfusion.homo_sapiens.94.db')
# process_data('homo_sapiens', 75, 'GRCh37', '/Users/charliemurphy/Downloads/agfusion.homo_sapiens.75.db')
# process_data('mus_musculus', 92, 'GRCm38', '/Users/charliemurphy/Downloads/agfusion.mus_musculus.92.db') | 34.956311 | 150 | 0.550618 |
5ee4affc10253568c59d31e6e7ecff29108ae1b0 | 10,118 | py | Python | google-cloud-sdk/lib/third_party/cloud_ml_engine_sdk/dataflow/io/multifiles_source.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/third_party/cloud_ml_engine_sdk/dataflow/io/multifiles_source.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/third_party/cloud_ml_engine_sdk/dataflow/io/multifiles_source.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | 3 | 2017-07-27T18:44:13.000Z | 2020-07-25T17:48:53.000Z | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Mutiple files/file patterns source.
Multiple File source, which reads the union of multiple files and/or file
patterns.
"""
from apache_beam import coders
from apache_beam.io import iobase
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import Read
from apache_beam.io.range_trackers import OffsetRangeTracker
from apache_beam.io.textio import _TextSource as TextSource
from apache_beam.io.tfrecordio import _TFRecordSource as TFRecordSource
from apache_beam.transforms import PTransform
from apache_beam.transforms.display import DisplayDataItem
# pylint: disable=g-import-not-at-top
try:
from apache_beam.options.value_provider import ValueProvider
from apache_beam.options.value_provider import StaticValueProvider
except ImportError:
from apache_beam.utils.value_provider import ValueProvider
from apache_beam.utils.value_provider import StaticValueProvider
# pylint: enable=g-import-not-at-top
FILE_LIST_SEPARATOR = ','
# TODO(user): currently compression_type is not a ValueProvider valure in
# filebased_source, thereby we have to make seperate classes for
# non-compressed and compressed version of TFRecord sources. Consider to
# make the compression_type a ValueProvider in filebased_source.
| 37.062271 | 80 | 0.720202 |
5ee51c2ffdafe95ae165b98a996207a8a39f4653 | 10,830 | py | Python | eureka/S5_lightcurve_fitting/s5_fit.py | evamariaa/Eureka | a3e739a528fbe85ec588bca996188765649b7778 | [
"MIT"
] | 15 | 2020-08-07T12:12:17.000Z | 2022-03-29T10:20:38.000Z | eureka/S5_lightcurve_fitting/s5_fit.py | evamariaa/Eureka | a3e739a528fbe85ec588bca996188765649b7778 | [
"MIT"
] | 159 | 2020-08-05T14:34:59.000Z | 2022-03-31T21:02:10.000Z | eureka/S5_lightcurve_fitting/s5_fit.py | evamariaa/Eureka | a3e739a528fbe85ec588bca996188765649b7778 | [
"MIT"
] | 17 | 2021-06-16T09:40:41.000Z | 2022-03-22T18:28:07.000Z | import numpy as np
import matplotlib.pyplot as plt
import glob, os, time
from ..lib import manageevent as me
from ..lib import readECF as rd
from ..lib import sort_nicely as sn
from ..lib import util, logedit
from . import parameters as p
from . import lightcurve as lc
from . import models as m
from .utils import get_target_data
#FINDME: Keep reload statements for easy testing
from importlib import reload
reload(p)
reload(m)
reload(lc)
def fitJWST(eventlabel, s4_meta=None):
'''Fits 1D spectra with various models and fitters.
Parameters
----------
eventlabel: str
The unique identifier for these data.
s4_meta: MetaClass
The metadata object from Eureka!'s S4 step (if running S4 and S5 sequentially).
Returns
-------
meta: MetaClass
The metadata object with attributes added by S5.
Notes
-------
History:
- November 12-December 15, 2021 Megan Mansfield
Original version
- December 17-20, 2021 Megan Mansfield
Connecting S5 to S4 outputs
- December 17-20, 2021 Taylor Bell
Increasing connectedness of S5 and S4
'''
print("\nStarting Stage 5: Light Curve Fitting\n")
# Initialize a new metadata object
meta = MetaClass()
meta.eventlabel = eventlabel
# Load Eureka! control file and store values in Event object
ecffile = 'S5_' + eventlabel + '.ecf'
ecf = rd.read_ecf(ecffile)
rd.store_ecf(meta, ecf)
# load savefile
if s4_meta == None:
# Search for the S2 output metadata in the inputdir provided in
# First just check the specific inputdir folder
rootdir = os.path.join(meta.topdir, *meta.inputdir.split(os.sep))
if rootdir[-1]!='/':
rootdir += '/'
files = glob.glob(rootdir+'S4_'+meta.eventlabel+'*_Meta_Save.dat')
if len(files)==0:
# There were no metadata files in that folder, so let's see if there are in children folders
files = glob.glob(rootdir+'**/S4_'+meta.eventlabel+'*_Meta_Save.dat', recursive=True)
files = sn.sort_nicely(files)
if len(files)==0:
# There may be no metafiles in the inputdir - raise an error and give a helpful message
raise AssertionError('Unable to find an output metadata file from Eureka!\'s S4 step '
+'in the inputdir: \n"{}"!'.format(rootdir))
elif len(files)>1:
# There may be multiple runs - use the most recent but warn the user
print('WARNING: There are multiple metadata save files in your inputdir: \n"{}"\n'.format(rootdir)
+'Using the metadata file: \n{}\n'.format(files[-1])
+'and will consider aperture ranges listed there. If this metadata file is not a part\n'
+'of the run you intended, please provide a more precise folder for the metadata file.')
fname = files[-1] # Pick the last file name (should be the most recent or only file)
fname = fname[:-4] # Strip off the .dat ending
s4_meta = me.loadevent(fname)
# Need to remove the topdir from the outputdir
s4_outputdir = s4_meta.outputdir[len(s4_meta.topdir):]
if s4_outputdir[0]=='/':
s4_outputdir = s4_outputdir[1:]
s4_allapers = s4_meta.allapers
# Overwrite the temporary meta object made above to be able to find s4_meta
meta = s4_meta
# Load Eureka! control file and store values in the S4 metadata object
ecffile = 'S5_' + eventlabel + '.ecf'
ecf = rd.read_ecf(ecffile)
rd.store_ecf(meta, ecf)
# Overwrite the inputdir with the exact output directory from S4
meta.inputdir = s4_outputdir
meta.old_datetime = s4_meta.datetime # Capture the date that the
meta.datetime = None # Reset the datetime in case we're running this on a different day
meta.inputdir_raw = meta.inputdir
meta.outputdir_raw = meta.outputdir
if (not s4_allapers) or (not meta.allapers):
# The user indicated in the ecf that they only want to consider one aperture
# in which case the code will consider only the one which made s4_meta.
# Alternatively, S4 was run without allapers, so S5's allapers will only conside that one
meta.spec_hw_range = [meta.spec_hw,]
meta.bg_hw_range = [meta.bg_hw,]
run_i = 0
for spec_hw_val in meta.spec_hw_range:
for bg_hw_val in meta.bg_hw_range:
t0 = time.time()
meta.spec_hw = spec_hw_val
meta.bg_hw = bg_hw_val
# Do some folder swapping to be able to reuse this function to find S4 outputs
tempfolder = meta.outputdir_raw
meta.outputdir_raw = meta.inputdir_raw
meta.inputdir = util.pathdirectory(meta, 'S4', meta.runs[run_i], old_datetime=meta.old_datetime, ap=spec_hw_val, bg=bg_hw_val)
meta.outputdir_raw = tempfolder
run_i += 1
if meta.testing_S5:
# Only fit a single channel while testing
chanrng = [0]
else:
chanrng = range(meta.nspecchan)
for channel in chanrng:
# Create directories for Stage 5 processing outputs
run = util.makedirectory(meta, 'S5', ap=spec_hw_val, bg=bg_hw_val, ch=channel)
meta.outputdir = util.pathdirectory(meta, 'S5', run, ap=spec_hw_val, bg=bg_hw_val, ch=channel)
# Copy existing S4 log file and resume log
meta.s5_logname = meta.outputdir + 'S5_' + meta.eventlabel + ".log"
log = logedit.Logedit(meta.s5_logname, read=meta.s4_logname)
log.writelog("\nStarting Channel {} of {}\n".format(channel+1, meta.nspecchan))
log.writelog(f"Input directory: {meta.inputdir}")
log.writelog(f"Output directory: {meta.outputdir}")
# Copy ecf (and update outputdir in case S5 is being called sequentially with S4)
log.writelog('Copying S5 control file')
# shutil.copy(ecffile, meta.outputdir)
new_ecfname = meta.outputdir + ecffile.split('/')[-1]
with open(new_ecfname, 'w') as new_file:
with open(ecffile, 'r') as file:
for line in file.readlines():
if len(line.strip())==0 or line.strip()[0]=='#':
new_file.write(line)
else:
line_segs = line.strip().split()
if line_segs[0]=='inputdir':
new_file.write(line_segs[0]+'\t\t/'+meta.inputdir+'\t'+' '.join(line_segs[2:])+'\n')
else:
new_file.write(line)
# Set the intial fitting parameters
params = p.Parameters(param_file=meta.fit_par)
# Subtract off the zeroth time value to avoid floating point precision problems when fitting for t0
t_offset = int(np.floor(meta.bjdtdb[0]))
t_mjdtdb = meta.bjdtdb - t_offset
params.t0.value -= t_offset
# Get the flux and error measurements for the current channel
flux = meta.lcdata[channel,:]
flux_err = meta.lcerr[channel,:]
# Normalize flux and uncertainties to avoid large flux values
flux_err /= flux.mean()
flux /= flux.mean()
if meta.testing_S5:
# FINDME: Use this area to add systematics into the data
# when testing new systematics models. In this case, I'm
# introducing an exponential ramp to test m.ExpRampModel().
log.writelog('****Adding exponential ramp systematic to light curve****')
fakeramp = m.ExpRampModel(parameters=params, name='ramp', fmt='r--')
fakeramp.coeffs = np.array([-1,40,-3, 0, 0, 0])
flux *= fakeramp.eval(time=t_mjdtdb)
# Load the relevant values into the LightCurve model object
lc_model = lc.LightCurve(t_mjdtdb, flux, channel, meta.nspecchan, unc=flux_err, name=eventlabel, time_units=f'MJD_TDB = BJD_TDB - {t_offset}')
# Make the astrophysical and detector models
modellist=[]
if 'transit' in meta.run_myfuncs:
t_model = m.TransitModel(parameters=params, name='transit', fmt='r--')
modellist.append(t_model)
if 'polynomial' in meta.run_myfuncs:
t_polynom = m.PolynomialModel(parameters=params, name='polynom', fmt='r--')
modellist.append(t_polynom)
if 'expramp' in meta.run_myfuncs:
t_ramp = m.ExpRampModel(parameters=params, name='ramp', fmt='r--')
modellist.append(t_ramp)
model = m.CompositeModel(modellist)
# Fit the models using one or more fitters
log.writelog("=========================")
if 'lsq' in meta.fit_method:
log.writelog("Starting lsq fit.")
model.fitter = 'lsq'
lc_model.fit(model, meta, fitter='lsq')
log.writelog("Completed lsq fit.")
log.writelog("-------------------------")
if 'emcee' in meta.fit_method:
log.writelog("Starting emcee fit.")
model.fitter = 'emcee'
lc_model.fit(model, meta, fitter='emcee')
log.writelog("Completed emcee fit.")
log.writelog("-------------------------")
if 'dynesty' in meta.fit_method:
log.writelog("Starting dynesty fit.")
model.fitter = 'dynesty'
lc_model.fit(model, meta, fitter='dynesty')
log.writelog("Completed dynesty fit.")
log.writelog("-------------------------")
if 'lmfit' in meta.fit_method:
log.writelog("Starting lmfit fit.")
model.fitter = 'lmfit'
lc_model.fit(model, meta, fitter='lmfit')
log.writelog("Completed lmfit fit.")
log.writelog("-------------------------")
log.writelog("=========================")
# Plot the results from the fit(s)
if meta.isplots_S5 >= 1:
lc_model.plot(meta)
return meta, lc_model
| 44.204082 | 158 | 0.568421 |
5ee68ea9e8a99cf09e7e6d0ca6ce334ef983ef7f | 4,724 | py | Python | model_compression_toolkit/common/graph/graph_matchers.py | eladc-git/model_optimization | 46d1c893ca23e61d8ef7597184ad2ba6e2ae6e7a | [
"Apache-2.0"
] | null | null | null | model_compression_toolkit/common/graph/graph_matchers.py | eladc-git/model_optimization | 46d1c893ca23e61d8ef7597184ad2ba6e2ae6e7a | [
"Apache-2.0"
] | null | null | null | model_compression_toolkit/common/graph/graph_matchers.py | eladc-git/model_optimization | 46d1c893ca23e61d8ef7597184ad2ba6e2ae6e7a | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Any, List
from model_compression_toolkit.common.graph.base_node import BaseNode
from model_compression_toolkit.common.matchers import node_matcher, walk_matcher, edge_matcher
| 31.704698 | 108 | 0.650296 |
5ee6b363eabe25c724e148a500f83b42a84aa031 | 3,022 | py | Python | initialize_app_db.py | daniel-julio-iglesias/microblog | 360198198336f0dda7d20aafeb337f59cb4a2329 | [
"MIT"
] | null | null | null | initialize_app_db.py | daniel-julio-iglesias/microblog | 360198198336f0dda7d20aafeb337f59cb4a2329 | [
"MIT"
] | null | null | null | initialize_app_db.py | daniel-julio-iglesias/microblog | 360198198336f0dda7d20aafeb337f59cb4a2329 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The next steps use just in case to recreate the already existing DB
Backup and Delete the folder "migrations"
Backup and Delete the file "app.db"
Execute the next console commands
Linux
(venv) $ export FLASK_APP=microblog.py
MS Windows
(venv) $ set FLASK_APP=microblog.py
(venv) $ flask db init
(venv) $ flask db migrate -m "initialization"
(venv) $ python initialize_app_db.py
### (venv) $ flask shell
(venv) $ flask run
http://localhost:5000/
http://localhost:5000/index
Use the function "initialize_data_into_db()"
for data recreation.
Use the function "remove_data_from_db()"
for data deletion. Then you can simply
use again the function "initialize_data_into_db()"
for data recreation.
"""
from datetime import datetime, timedelta
from app import create_app, db
from app.models import User, Post
from config import Config
def remove_data_from_db():
"""
In case of removing data...
"""
app = create_app(Config)
app_context = app.app_context()
app_context.push()
db.create_all()
db.session.remove()
db.drop_all()
app_context.pop()
if __name__ == '__main__':
initialize_data_into_db()
# remove_data_from_db()
| 26.982143 | 67 | 0.648246 |
5ee6e99348be1e75186fd4d95f9769f455fc8a1a | 4,328 | py | Python | gpytorch/kernels/rbf_kernel.py | techshot25/gpytorch | b4aee6f81a3428172d4914e7e0fef0e71cd1f519 | [
"MIT"
] | 1 | 2019-11-08T11:25:56.000Z | 2019-11-08T11:25:56.000Z | gpytorch/kernels/rbf_kernel.py | VonRosenchild/gpytorch | 092d523027a844939ba85d7ea8c8c7b7511843d5 | [
"MIT"
] | null | null | null | gpytorch/kernels/rbf_kernel.py | VonRosenchild/gpytorch | 092d523027a844939ba85d7ea8c8c7b7511843d5 | [
"MIT"
] | 1 | 2021-07-02T19:40:07.000Z | 2021-07-02T19:40:07.000Z | #!/usr/bin/env python3
from .kernel import Kernel
from ..functions import RBFCovariance
| 47.56044 | 115 | 0.575092 |
5ee75d983cd35cd4e28ec87b90865e27b89bfd3b | 5,132 | py | Python | src/dependenpy/finder.py | gitter-badger/dependenpy | db411b7bbd466b79064cbb419049f17cd3bff4c1 | [
"ISC"
] | 10 | 2020-01-08T10:42:32.000Z | 2021-07-08T01:58:08.000Z | src/dependenpy/finder.py | gitter-badger/dependenpy | db411b7bbd466b79064cbb419049f17cd3bff4c1 | [
"ISC"
] | 2 | 2020-10-07T09:48:54.000Z | 2020-11-03T23:37:13.000Z | src/dependenpy/finder.py | gitter-badger/dependenpy | db411b7bbd466b79064cbb419049f17cd3bff4c1 | [
"ISC"
] | 1 | 2019-12-10T18:32:05.000Z | 2019-12-10T18:32:05.000Z | # -*- coding: utf-8 -*-
"""dependenpy finder module."""
from importlib.util import find_spec
from os.path import basename, exists, isdir, isfile, join, splitext
| 28.353591 | 79 | 0.563718 |
5ee8d965db0dc6afc3a0712b8a012c62228c1b2d | 1,738 | py | Python | pearsonr/beta.py | rkhullar/pearsonr-pure-python | 955fbca6af0a234cf5132d5f83d36a2c411fec7a | [
"MIT"
] | null | null | null | pearsonr/beta.py | rkhullar/pearsonr-pure-python | 955fbca6af0a234cf5132d5f83d36a2c411fec7a | [
"MIT"
] | null | null | null | pearsonr/beta.py | rkhullar/pearsonr-pure-python | 955fbca6af0a234cf5132d5f83d36a2c411fec7a | [
"MIT"
] | null | null | null | import math
| 34.76 | 108 | 0.533947 |
5eeb434867ca1d9eaca8effbf5839d14aaa33835 | 33,018 | py | Python | hisim/components/generic_pv_system.py | FZJ-IEK3-VSA/HiSim | e9b3a69c6db331523b9ed5ac7aa6f57f9b4798b2 | [
"MIT"
] | 12 | 2021-10-05T11:38:24.000Z | 2022-03-25T09:56:08.000Z | hisim/components/generic_pv_system.py | FZJ-IEK3-VSA/HiSim | e9b3a69c6db331523b9ed5ac7aa6f57f9b4798b2 | [
"MIT"
] | 6 | 2021-10-06T13:27:55.000Z | 2022-03-10T12:55:15.000Z | hisim/components/generic_pv_system.py | FZJ-IEK3-VSA/HiSim | e9b3a69c6db331523b9ed5ac7aa6f57f9b4798b2 | [
"MIT"
] | 4 | 2022-02-21T19:00:50.000Z | 2022-03-22T11:01:38.000Z | # Generic/Built-in
import datetime
import math
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pvlib
from dataclasses_json import dataclass_json
from typing import Optional
from dataclasses import dataclass
from functools import lru_cache
from hisim.simulationparameters import SimulationParameters
# Owned
from hisim import component as cp
from hisim import loadtypes as lt
from hisim import utils
from hisim import log
from hisim.components.weather import Weather
__authors__ = "Vitor Hugo Bellotto Zago"
__copyright__ = "Copyright 2021, the House Infrastructure Project"
__credits__ = ["Noah Pflugradt"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Vitor Hugo Bellotto Zago"
__email__ = "vitor.zago@rwth-aachen.de"
__status__ = "development"
"""
The functions cited in this module are at some degree based on the tsib project:
[tsib-kotzur]:
Kotzur, Leander, Detlef Stolten, and Hermann-Josef Wagner. Future grid load of the residential building sector. No. RWTH-2018-231872. Lehrstuhl fr Brennstoffzellen (FZ Jlich), 2019.
ID: http://hdl.handle.net/2128/21115
http://nbn-resolving.org/resolver?verb=redirect&identifier=urn:nbn:de:0001-2019020614
The implementation of the tsib project can be found under the following repository:
https://github.com/FZJ-IEK3-VSA/tsib
"""
temp_model = pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS["sapm"]["open_rack_glass_glass"]
def simPhotovoltaicSimple(
dni_extra=None,
DNI=None,
DHI=None,
GHI=None,
azimuth=None,
apparent_zenith=None,
temperature=None,
wind_speed=None,
surface_tilt=30,
surface_azimuth=180,
albedo=0.2):
"""
Simulates a defined PV array with the Sandia PV Array Performance Model.
The implementation is done in accordance with following tutorial:
https://github.com/pvlib/pvlib-python/blob/master/docs/tutorials/tmy_to_power.ipynb
Based on the tsib project @[tsib-kotzur] (Check header)
Parameters
----------
tmy_data: pandas.DataFrame(), required
Weatherfile in the format of a tmy file.
surface_tilt: int or float, optional (default:30)
Tilt angle of of the array in degree.
surface_azimuth: int or float, optional (default:180)
Azimuth angle of of the array in degree. 180 degree means south,
90 degree east and 270 west.
albedo: float, optional (default: 0.2)
Reflection coefficient of the surrounding area.
losses: float, optional (default: 0.1)
Losses due to soiling, mismatch, diode connections, dc wiring etc.
load_module_data: Boolean, optional (default: False)
If True the module data base is loaded from the Sandia Website.
Otherwise it is loaded from this relative path
'\\profiles\\PV-Modules\\sandia_modules.csv'.
module_name: str, optional (default:'Hanwha_HSL60P6_PA_4_250T__2013_')
Module name. The string must be existens in Sandia Module database.
integrateInverter: bool, optional (default: True)
If an inverter shall be added to the simulation, providing the photovoltaic output after the inverter.
inverter_name: str, optional (default: 'ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_')
Type of inverter.
Returns
--------
"""
# automatic pd time series in future pvlib version
# calculate airmass
airmass = pvlib.atmosphere.get_relative_airmass(apparent_zenith)
# use perez model to calculate the plane of array diffuse sky radiation
poa_sky_diffuse = pvlib.irradiance.perez(
surface_tilt,
surface_azimuth,
DHI,
np.float64(DNI),
dni_extra,
apparent_zenith,
azimuth,
airmass,
)
# calculate ground diffuse with specified albedo
poa_ground_diffuse = pvlib.irradiance.get_ground_diffuse(
surface_tilt, GHI, albedo=albedo
)
# calculate angle of incidence
aoi = pvlib.irradiance.aoi(surface_tilt, surface_azimuth, apparent_zenith, azimuth)
# calculate plane of array irradiance
poa_irrad = pvlib.irradiance.poa_components(aoi, np.float64(DNI), poa_sky_diffuse, poa_ground_diffuse)
# calculate pv cell and module temperature
temp_model = pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS["sapm"]["open_rack_glass_glass"]
pvtemps = pvlib.temperature.sapm_cell(poa_irrad["poa_global"], temperature, wind_speed, **temp_model)
pv_dc = pvlib.pvsystem.pvwatts_dc(poa_irrad["poa_global"], temp_cell=pvtemps, pdc0=1, gamma_pdc=-0.002,
temp_ref=25.0)
if math.isnan(pv_dc):
pv_dc = 0
return pv_dc
def readTRY(location="Aachen", year=2010):
"""
Reads a test reference year file and gets the GHI, DHI and DNI from it.
Based on the tsib project @[tsib-kotzur] (Check header)
Parameters
-------
try_num: int (default: 4)
The region number of the test reference year.
year: int (default: 2010)
The year. Only data for 2010 and 2030 available
"""
# get the correct file path
filepath = os.path.join(utils.HISIMPATH["weather"][location])
# get the geoposition
with open(filepath + ".dat", encoding="utf-8") as fp:
lines = fp.readlines()
location_name = lines[0].split(maxsplit=2)[2].replace('\n', '')
lat = float(lines[1][20:37])
lon = float(lines[2][15:30])
location = {"name": location_name, "latitude": lat, "longitude": lon}
# check if time series data already exists as .csv with DNI
if os.path.isfile(filepath + ".csv"):
data = pd.read_csv(filepath + ".csv", index_col=0, parse_dates=True,sep=";",decimal=",")
data.index = pd.to_datetime(data.index, utc=True).tz_convert("Europe/Berlin")
# else read from .dat and calculate DNI etc.
else:
# get data
data = pd.read_csv(
filepath + ".dat", sep=r"\s+", skiprows=([i for i in range(0, 31)])
)
data.index = pd.date_range(
"{}-01-01 00:00:00".format(year), periods=8760, freq="H", tz="Europe/Berlin"
)
data["GHI"] = data["D"] + data["B"]
data = data.rename(columns={"D": "DHI", "t": "T", "WG": "WS"})
# calculate direct normal
data["DNI"] = calculateDNI(data["B"], lon, lat)
# data["DNI"] = data["B"]
# save as .csv
#data.to_csv(filepath + ".csv",sep=";",decimal=",")
return data, location
def calculateDNI(directHI, lon, lat, zenith_tol=87.0):
"""
Calculates the direct NORMAL irradiance from the direct horizontal irradiance with the help of the PV lib.
Based on the tsib project @[tsib-kotzur] (Check header)
Parameters
----------
directHI: pd.Series with time index
Direct horizontal irradiance
lon: float
Longitude of the location
lat: float
Latitude of the location
zenith_tol: float, optional
Avoid cosines of values above a certain zenith angle of in order to avoid division by zero.
Returns
-------
DNI: pd.Series
"""
solarPos = pvlib.solarposition.get_solarposition(directHI.index, lat, lon)
solarPos["apparent_zenith"][solarPos.apparent_zenith > zenith_tol] = zenith_tol
DNI = directHI.div(solarPos["apparent_zenith"].apply(math.radians).apply(math.cos))
DNI = DNI.fillna(0)
if DNI.isnull().values.any():
raise ValueError("Something went wrong...")
return DNI
| 45.168263 | 183 | 0.594009 |
5eeb79ff59fe8c898948a4d629f95025dddf840d | 1,843 | py | Python | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/__init__.py | t-triobox/gQuant | 6ee3ba104ce4c6f17a5755e7782298902d125563 | [
"Apache-2.0"
] | null | null | null | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/__init__.py | t-triobox/gQuant | 6ee3ba104ce4c6f17a5755e7782298902d125563 | [
"Apache-2.0"
] | null | null | null | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/__init__.py | t-triobox/gQuant | 6ee3ba104ce4c6f17a5755e7782298902d125563 | [
"Apache-2.0"
] | null | null | null | """
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from .loadCsvNode import LoadCsvNode
from .bootstrapNode import BootstrapNode
from .logReturnNode import LogReturnNode
from .distanceNode import DistanceNode
from .hierarchicalClusteringNode import HierarchicalClusteringNode
from .hrpWeight import HRPWeightNode
from .portfolioNode import PortfolioNode
from .performanceMetricNode import PerformanceMetricNode
from .nrpWeightNode import NRPWeightNode
from .maxDrawdownNode import MaxDrawdownNode
from .featureNode import FeatureNode
from .aggregateTimeFeature import AggregateTimeFeatureNode
from .mergeNode import MergeNode
from .diffNode import DiffNode
from .rSquaredNode import RSquaredNode
from .shapSummaryPlotNode import ShapSummaryPlotPlotNode
from .leverageNode import LeverageNode
from .rawDataNode import RawDataNode
from .transactionCostNode import TransactionCostNode
__all__ = ["LoadCsvNode", "BootstrapNode", "LogReturnNode",
"DistanceNode", "HierarchicalClusteringNode", "HRPWeightNode",
"PortfolioNode", "PerformanceMetricNode", "NRPWeightNode",
"MaxDrawdownNode", "FeatureNode", "AggregateTimeFeatureNode",
"MergeNode", "DiffNode", "RSquaredNode", "ShapSummaryPlotPlotNode",
"LeverageNode", "RawDataNode", "TransactionCostNode"]
| 42.860465 | 78 | 0.72382 |
5eecada079f1111eeed67c73ca6a1720da167194 | 1,541 | py | Python | pythran/tests/rosetta/greatest_subsequential_sum.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 1,647 | 2015-01-13T01:45:38.000Z | 2022-03-28T01:23:41.000Z | pythran/tests/rosetta/greatest_subsequential_sum.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 1,116 | 2015-01-01T09:52:05.000Z | 2022-03-18T21:06:40.000Z | pythran/tests/rosetta/greatest_subsequential_sum.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 180 | 2015-02-12T02:47:28.000Z | 2022-03-14T10:28:18.000Z | #from http://rosettacode.org/wiki/Greatest_subsequential_sum#Python
#pythran export maxsum(int list)
#pythran export maxsumseq(int list)
#pythran export maxsumit(int list)
#runas maxsum([0, 1, 0])
#runas maxsumseq([-1, 2, -1, 3, -1])
#runas maxsumit([-1, 1, 2, -5, -6])
def maxsum(sequence):
"""Return maximum sum."""
maxsofar, maxendinghere = 0, 0
for x in sequence:
# invariant: ``maxendinghere`` and ``maxsofar`` are accurate for ``x[0..i-1]``
maxendinghere = max(maxendinghere + x, 0)
maxsofar = max(maxsofar, maxendinghere)
return maxsofar
| 31.44898 | 96 | 0.580792 |
5eed202c73e618fc929047ee896a35003f968654 | 28,280 | py | Python | src/binwalk/__init__.py | dotysan/binwalk | d3b5d73538557f2a290996dcea84352fcfb6d1a1 | [
"MIT"
] | 1 | 2020-03-04T15:14:40.000Z | 2020-03-04T15:14:40.000Z | src/binwalk/__init__.py | dotysan/binwalk | d3b5d73538557f2a290996dcea84352fcfb6d1a1 | [
"MIT"
] | null | null | null | src/binwalk/__init__.py | dotysan/binwalk | d3b5d73538557f2a290996dcea84352fcfb6d1a1 | [
"MIT"
] | null | null | null | __all__ = ["Binwalk"]
import os
import re
import time
import magic
from binwalk.compat import *
from binwalk.config import *
from binwalk.update import *
from binwalk.filter import *
from binwalk.parser import *
from binwalk.plugins import *
from binwalk.plotter import *
from binwalk.hexdiff import *
from binwalk.entropy import *
from binwalk.extractor import *
from binwalk.prettyprint import *
from binwalk.smartstrings import *
from binwalk.smartsignature import *
from binwalk.common import file_size, unique_file_name, BlockFile
| 37.858099 | 232 | 0.701627 |
5eed38c8799a8f20aa9075adc117edac9f20f714 | 149 | py | Python | dodo.py | Ublimjo/nwt | 08b04c376a3792fd4663de2f229617ddabc3d032 | [
"MIT"
] | 1 | 2018-08-24T09:01:09.000Z | 2018-08-24T09:01:09.000Z | dodo.py | Ublimjo/nwt | 08b04c376a3792fd4663de2f229617ddabc3d032 | [
"MIT"
] | null | null | null | dodo.py | Ublimjo/nwt | 08b04c376a3792fd4663de2f229617ddabc3d032 | [
"MIT"
] | null | null | null | def task_clean_junk():
"""Remove junk file"""
return {
'actions': ['rm -rdf $(find . | grep pycache)'],
'clean': True,
}
| 21.285714 | 56 | 0.496644 |
5eeebe655d0529cd4e57b3684dd0b12853503ba1 | 442 | py | Python | greedy_algorithms/6_maximum_salary/largest_number.py | Desaiakshata/Algorithms-problems | 90f4e40ba05e4bdfc783614bb70b9156b05eec0b | [
"MIT"
] | null | null | null | greedy_algorithms/6_maximum_salary/largest_number.py | Desaiakshata/Algorithms-problems | 90f4e40ba05e4bdfc783614bb70b9156b05eec0b | [
"MIT"
] | null | null | null | greedy_algorithms/6_maximum_salary/largest_number.py | Desaiakshata/Algorithms-problems | 90f4e40ba05e4bdfc783614bb70b9156b05eec0b | [
"MIT"
] | null | null | null | #Uses python3
import sys
if __name__ == '__main__':
#input = sys.stdin.read()
data = input().split(' ')
a = data[1:]
print(largest_number(a))
| 19.217391 | 59 | 0.506787 |
5eefaff8065c5ecea5f5a36834a9168d04d5bd4e | 2,403 | py | Python | screenblankmgr.py | nsw42/pijuui | fee9b96ceaab6b4f5db7ea41cb86da9a1a3b9eac | [
"BSD-2-Clause"
] | 1 | 2021-12-20T15:02:54.000Z | 2021-12-20T15:02:54.000Z | screenblankmgr.py | nsw42/pijuui | fee9b96ceaab6b4f5db7ea41cb86da9a1a3b9eac | [
"BSD-2-Clause"
] | null | null | null | screenblankmgr.py | nsw42/pijuui | fee9b96ceaab6b4f5db7ea41cb86da9a1a3b9eac | [
"BSD-2-Clause"
] | null | null | null | import logging
import subprocess
profiles = {
'none': ScreenBlankProfileNone(),
'balanced': ScreenBlankProfileBalanced(),
'onoff': ScreenBlankProfileOnWhenPlaying()
}
| 22.669811 | 94 | 0.615481 |