max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
bigdatacloudapi/client.py
|
robertreist/python-api-client
| 6
|
12784951
|
<gh_stars>1-10
import requests
import re
import json
class Client:
def __init__(
self,
apiKey,
nameSpace='data',
server='api.bigdatacloud.net'
):
self.apiKey=apiKey
self.nameSpace=nameSpace
self.server=server
def __getattr__(self,p):
def mm(params):
def conversion(m):
return "-"+m.group(1).lower()
key = re.sub('([A-Z])',conversion,p)
segs = key.split('-')
method = segs.pop(0).upper()
return self.communicate('-'.join(segs),method,params)
return mm
def communicate(self,endpoint,method,payload):
url='https://'+self.server+'/'+self.nameSpace+'/'+endpoint
headers={'X-BDC-Key':self.apiKey}
if (method=='POST' or method=='PUT' or method=='PATCH'):
headers['content-type']='application/x-www-form-urlencoded'
r=getattr(requests,method.lower())(url,headers=headers,params=payload,data=payload)
return [r.json(),r.status_code]
| 2.765625
| 3
|
BoManifolds/pymanopt_addons/tools/autodiff/__init__.py
|
NoemieJaquier/GaBOtorch
| 21
|
12784952
|
from ._theano import TheanoBackend
from ._autograd import AutogradBackend
from ._tensorflow import TensorflowBackend
from ._pytorch import PytorchBackend
__all__ = ["AutogradBackend", "PytorchBackend",
"TensorflowBackend", "TheanoBackend"]
| 1.296875
| 1
|
pylps/plan.py
|
astraldawn/pylps
| 1
|
12784953
|
<gh_stars>1-10
import copy
from collections import deque
from ordered_set import OrderedSet
from pylps.config import CONFIG
from pylps.constants import *
from pylps.utils import *
class Proposed(object):
def __init__(self):
self._actions = OrderedSet()
self._fluents = OrderedSet()
def __repr__(self):
ret = "Proposed Actions: %s\n" % (str(
[a for a in self._actions if a.BaseClass is ACTION]))
ret += "Proposed Fluents: %s\n" % (str(self._fluents))
return ret
def __eq__(self, other):
return self._to_tuple() == other._to_tuple()
def __hash__(self):
return hash(self.to_tuple())
def to_tuple(self):
return (
tuple(a.to_tuple() for a in self.actions),
tuple(f.to_tuple() for f in self.fluents),
)
@property
def actions(self):
return self._actions
def add_action(self, action):
self._actions.add(action)
def add_actions(self, actions):
self._actions.update(actions)
def clear_actions(self):
self._actions = OrderedSet()
@property
def fluents(self):
return self._fluents
def add_fluent(self, fluent):
self._fluents.add(fluent)
def clear_fluents(self):
self._fluents = OrderedSet()
def reset(self):
self.clear_actions()
self.clear_fluents()
class Solution(object):
def __init__(self, proposed: Proposed, states):
self._proposed = proposed
self._states = states
self._solved = 0
self._process()
def __repr__(self):
ret = "Solution\n"
ret += "Solved: %s\n" % (self.solved)
ret += "States: %s\n" % (str(self.states))
return ret
def __eq__(self, other):
return self.proposed == other.proposed
def __hash__(self):
return hash(self.proposed)
@property
def proposed(self):
return self._proposed
@property
def states(self):
return self._states
@property
def solved(self):
return self._solved
def add_state(self, state):
self._states.append(state)
if state.result is G_SOLVED:
self._solved += 1
def _process(self):
for state in self.states:
if state.result is G_SOLVED:
self._solved += 1
class Plan(object):
def __init__(self, goals=[], subs={},
proposed=Proposed(), from_reactive=False,
result=G_NPROCESSED):
self._goals = deque(goals)
self._subs = subs
self._proposed = proposed
self._temporal_used = False
self._goal_pos = 0
self._result = result
self._counter = 0
self._reactive_id = None
self.reactive_only = True
if from_reactive:
self._reactive_id = CONFIG.reactive_id
CONFIG.reactive_id += 1
def __repr__(self):
ret = "STATE\n"
ret += "Reactive ID %s\n" % self.reactive_id
ret += "Goal pos: %s Result: %s Reactive only: %s\n" % (
str(self.goal_pos), self.result, self.reactive_only)
for item, goal in enumerate(self.goals):
t_goal = goal
if isinstance(goal, tuple):
t_goal = goal[0]
if t_goal.BaseClass is ACTION:
r_goal = reify_action(t_goal, self.subs)
else:
r_goal = reify_obj_args(t_goal, self.subs)
ret += "Goal %d\n" % (item)
ret += "N%d: %s\nR%d: %s\n" % (item, str(goal), item, str(r_goal))
ret += "Subs:\n"
for item in self.subs.items():
ret += str(item) + '\n'
ret += "Temporal used: %s\n" % self._temporal_used
ret += "Counter %s\n" % self._counter
ret += "%s\n" % (str(self._proposed))
return ret
# IDENTITY
@property
def reactive_id(self):
return self._reactive_id
# COMPARISON
def __eq__(self, other):
return self.to_tuple() == other.to_tuple()
def __hash__(self):
return hash(self.to_tuple())
def to_tuple(self):
convert = tuple(goal for goal in self.goals) + \
tuple((sub, val) for sub, val in self.subs.items())
# convert = tuple(goal for goal in self.goals)
return convert
@property
def actions(self):
return self._proposed.actions
def add_action(self, action):
self._proposed.add_action(action)
def clear_actions(self):
self._proposed.clear_actions()
@property
def counter(self):
return self._counter
@property
def fluents(self):
return self._proposed.fluents
def add_fluent(self, fluent):
self._proposed.add_fluent(fluent)
def clear_fluents(self):
self._proposed.clear_fluents()
@property
def goals(self):
return self._goals
def replace_event(self, event, outcome, reqs):
# Workaround for now
n_reqs = []
for req in reqs:
if isinstance(req, tuple):
n_reqs.append(req)
else:
n_reqs.append((req, outcome))
new_goals = deque()
for goal in self.goals:
goal_obj = goal
if isinstance(goal, tuple):
goal_obj = goal[0]
if goal_obj == event:
new_goals.extend(n_reqs) # REMOVED_DEEPCOPY
if CONFIG.experimental:
goal_obj.completed = True
new_goals.append(goal) # handle the event
continue
new_goals.append(goal)
self._goals = new_goals
# Reduce due to the replacement
self._goal_pos -= 1
def add_to_goals(self, goal):
self._goals.appendleft(goal) # REMOVED_DEEPCOPY
def compress(self, cpos=0):
# cpos = self._goal_pos
for i in range(self._goal_pos - cpos):
self._goals.popleft()
self._goal_pos = cpos
@property
def goal_pos(self):
return self._goal_pos
def get_next_goal(self, reactive=False):
try:
cur_goal = self.goals[self.goal_pos]
self._goal_pos += 1
return cur_goal
except IndexError:
return None
@property
def subs(self):
return self._subs
def update_subs(self, subs):
self._subs.update(subs)
@property
def result(self):
return self._result
def set_result(self, new_result):
self._result = new_result
@property
def temporal_used(self):
return self._temporal_used
def set_temporal_used(self, new_temporal_used):
self._temporal_used = new_temporal_used
def temporal_used_true(self):
self._temporal_used = True
| 2.671875
| 3
|
machine_learning/Preprocessing.py
|
jbofill10/House-Prices-Kaggle-Competition
| 1
|
12784954
|
<reponame>jbofill10/House-Prices-Kaggle-Competition<gh_stars>1-10
from sklearn.preprocessing import RobustScaler
import pandas as pd
def preprocess(train_test_df, train_df, test_df):
train_test_encoded = pd.get_dummies(train_test_df)
train_encoded = train_test_encoded.iloc[:len(train_df)]
test_encoded = train_test_encoded.iloc[len(test_df)+1:]
print(train_encoded.shape)
print(test_encoded.shape)
scaler = RobustScaler()
train_scaled = scaler.fit_transform(train_encoded)
test_scaled = scaler.transform(test_encoded)
return train_scaled, test_scaled
| 3.15625
| 3
|
saleor/product/urls.py
|
YunazGilang/saleor-custom
| 2
|
12784955
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^(?P<slug>[a-z0-9-_]+?)-(?P<product_id>[0-9]+)/$',
views.product_details, name='details'),
url(r'^category/(?P<path>[a-z0-9-_/]+?)-(?P<category_id>[0-9]+)/$',
views.category_index, name='category'),
url(r'^brand/(?P<path>[a-z0-9-_/]+?)-(?P<brand_id>[0-9]+)/$',
views.brand_index, name='brand'),
url(r'^tags/(?P<path>[a-z0-9-_/]+?)-(?P<tag_id>[0-9]+)/$',
views.tags_index, name='tags'),
url(r'^tags/render/$',
views.tags_render, name='tags_render'),
url(r'^sale/$',
views.get_all_discounted_product, name='sale_product'),
url(r'^sale/render/$',
views.render_discounted_product, name='sale_render'),
url(r'(?P<product_id>[0-9]+)/similar/$',
views.render_similar_product, name="similar-products"),
url(r'(?P<product_id>[0-9]+)/similar/all/$',
views.all_similar_product, name="all-similar-products"),
url(r'(?P<product_id>[0-9]+)/similar/all/render/$',
views.render_all_similar_product, name="render-all-similar-products"),
url(r'^recommendation/all/$',
views.all_recommendation, name="all-recommended-products"),
url(r'^recommendation/all/render/$',
views.get_render_all_recommendation, name="render-all-recommended-products"),
url(r'(?P<slug>[a-z0-9-_]+?)-(?P<product_id>[0-9]+)/add/$',
views.product_add_to_cart, name="add-to-cart"),
url(r'^collection/(?P<slug>[a-z0-9-_/]+?)-(?P<pk>[0-9]+)/$',
views.collection_index, name='collection')]
api_urlpatterns = [
url(r'^product/similar/(?P<product_id>[0-9]+)/$',
views.get_similar_product, name='similarproduct'),
url(r'^update/rating/$',
views.update_product_rating, name='update_rating'),
url(r'^recommender/arc/(?P<mode>[a-z0-9-_/]+?)/(?P<limit>[0-9]+)/$',
views.get_arc_recommendation, name='arcrecommendaion'),
url(r'^recommendation/hybrid/$',
views.get_recommendation, name='hybridrecommendation'),
url(r'^recommendation/partial/render/$',
views.render_recommendation, name='renderhomerecommendation'),
url(r'^recommendation/evaluate/$',
views.evaluate_recommendation, name="evaluate_recommendation"),
]
| 1.96875
| 2
|
distributed_social_network/posts/migrations/0005_auto_20190327_0252.py
|
leevtori/CMPUT404-project
| 0
|
12784956
|
<gh_stars>0
# Generated by Django 2.1.7 on 2019-03-27 02:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0004_auto_20190317_2154'),
]
operations = [
migrations.AlterField(
model_name='post',
name='categories',
field=models.ManyToManyField(blank=True, to='posts.Categories'),
),
]
| 1.484375
| 1
|
python-news_aggregator/tests/libs/feed_utils_test.py
|
hanakhry/Crime_Admin
| 0
|
12784957
|
import unittest
from jarr.controllers.feed_builder import FeedBuilderController as FBC
from jarr.lib.enums import FeedType
class ConstructFeedFromTest(unittest.TestCase):
@property
def jdh_feed(self):
return {'icon_url': 'https://www.journalduhacker.net/assets/jdh-ico-31'
'1c23d65a3a9928889718838e2626c0665d83712d488713c9a'
'6c2ba2c676c0e.ico',
'link': 'https://www.journalduhacker.net/rss',
'links': ['https://www.journalduhacker.net/rss',
'https://www.journalduhacker.net/comments.rss'],
'site_link': 'https://www.journalduhacker.net/',
'title': 'Journal du hacker',
'feed_type': FeedType.classic}
def test_url(self):
jh = FBC('https://www.journalduhacker.net/').construct()
self.assertEqual(self.jdh_feed, jh)
def test_url_non_https(self):
jh = FBC('http://journalduhacker.net/').construct()
self.assertEqual(self.jdh_feed, jh)
def test_url_rss(self):
jdh_feed = self.jdh_feed
jh = FBC('http://journalduhacker.net/rss').construct()
self.assertEqual(jdh_feed, jh)
def test_joies_du_code(self):
self.maxDiff = None
joi = FBC('https://lesjoiesducode.fr/feed').construct()
joi.pop('icon_url')
self.assertEqual(
{'feed_type': FeedType.classic,
'link': 'https://lesjoiesducode.fr/feed',
'site_link': 'https://lesjoiesducode.fr',
'title': 'Les Joies du Code – Humour de développeurs '
': gifs, memes, blagues'}, joi)
def test_apod_from_site(self):
nasa = FBC('http://apod.nasa.gov/').construct()
self.assertEqual(
{'icon_url': 'https://apod.nasa.gov/favicon.ico',
'feed_type': FeedType.classic,
'site_link': 'https://apod.nasa.gov/apod/astropix.html',
'title': 'Astronomy Picture of the Day'}, nasa)
def test_apod_from_feed(self):
nasa = FBC('http://apod.nasa.gov/apod.rss').construct()
self.assertEqual(
{'description': 'Astronomy Picture of the Day',
'feed_type': FeedType.classic,
'icon_url': 'https://apod.nasa.gov/favicon.ico',
'link': 'https://apod.nasa.gov/apod.rss',
'site_link': 'https://apod.nasa.gov/',
'title': 'APOD'}, nasa)
def test_reddit_from_site(self):
reddit = FBC('https://www.reddit.com/r/france/').construct()
self.assertEqual({
'description': 'La France et les Français.',
'feed_type': FeedType.reddit,
'icon_url': 'https://www.redditstatic.com/desktop2x/'
'img/favicon/android-icon-192x192.png',
'site_link': 'https://www.reddit.com/r/france/',
'link': 'https://www.reddit.com/r/france/.rss',
'title': 'France'}, reddit)
def test_reddit_from_feed(self):
reddit = FBC('https://www.reddit.com/r/france/.rss').construct()
self.assertEqual(
{'description': 'La France et les Français.',
'feed_type': FeedType.reddit,
'icon_url': 'https://www.redditstatic.com/desktop2x/'
'img/favicon/android-icon-192x192.png',
'link': 'https://www.reddit.com/r/france/.rss',
'site_link': 'https://www.reddit.com/r/france/',
'title': 'France'}, reddit)
def test_instagram(self):
insta = FBC('http://www.instagram.com/jaesivsm/').construct()
self.assertEqual('jaesivsm', insta['link'])
self.assertEqual(FeedType.instagram, insta['feed_type'])
def test_twitter(self):
feed = FBC('http://twitter.com/jaesivsm/').construct()
self.assertEqual('jaesivsm', feed['link'])
self.assertEqual(FeedType.twitter, feed['feed_type'])
def test_soundcloud(self):
soundcloud = FBC('//soundcloud.com/popotes-podcast/').construct()
self.assertEqual({
'feed_type': FeedType.soundcloud,
'icon_url': 'https://a-v2.sndcdn.com/assets/'
'images/sc-icons/favicon-2cadd14bdb.ico',
'link': 'popotes-podcast',
'site_link': 'https://soundcloud.com/popotes-podcast/',
'title': 'SoundCloud'}, soundcloud)
def test_youtube(self):
yt_channel = 'www.youtube.com/channel/UCOWsWZTiXkbvQvtWO9RA0gA'
feed = FBC(yt_channel).construct()
self.assertEqual(FeedType.classic, feed['feed_type'])
self.assertEqual('https://www.youtube.com/feeds/videos.xml'
'?channel_id=UCOWsWZTiXkbvQvtWO9RA0gA', feed['link'])
self.assertEqual('BenzaieLive', feed['title'])
def test_json(self):
feed = FBC('https://daringfireball.net/feeds/json').construct()
self.assertEqual({'feed_type': FeedType.json,
'icon_url': 'https://daringfireball.net/'
'graphics/favicon-64.png',
'link': 'https://daringfireball.net/feeds/json',
'links': ['https://daringfireball.net/feeds/main',
'https://daringfireball.net/feeds/json'],
'site_link': 'https://daringfireball.net/',
'title': 'Daring Fireball'}, feed)
| 2.46875
| 2
|
data_structures/tree.py
|
claudiogar/learningPython
| 0
|
12784958
|
class Tree:
def __init__(self, value):
self.rootValue = value
self.children = []
pass
def addChild(self, value):
node = Tree(value)
self.children.append(node)
return node
def removeChild(self, node):
for subChild in node.children:
self.children.append(subChild)
self.children.remove(node)
pass
def getChildren(self):
return self.children
def countNodes(self):
sum = 1
for child in self.getChildren():
sum = sum + child.countNodes()
return sum
def depth(self):
if len(self.children) == 0 :
return 1
max = 0
for child in self.children:
depth = 1 + child.depth()
if depth > max:
max = depth
return max
def __str__(self) -> str:
s = self.rootValue
s = str(s)+ '\n'
for child in self.children:
print(child.rootValue)
s = s+child.__str__()
return s
| 3.765625
| 4
|
mcse/libmpi/check.py
|
manny405/mcse
| 5
|
12784959
|
<reponame>manny405/mcse
# -*- coding: utf-8 -*-
import os,shutil
from mcse.io import read,write
from mcse.io import check_dir,check_struct_dir
from mcse.libmpi.base import _NaiveParallel
from mpi4py import MPI
def null_check_fn(struct_path):
"""
Checking Structure from input directory will always return False.
"""
return False
def check_properties(struct_path, key="RSF"):
"""
Basic check function. Check functions take a path as the first argument
followed by any number of keyword arguments that may change the behavior
of the check function. The function will return True or False. This
will indicate whether the structure needs to be calculated or not.
"""
struct = read(struct_path)
if struct.properties.get(key):
return True
else:
return False
class Check(_NaiveParallel):
"""
SUMMARY: - USE FOR RESTARTABLE MPI PARALLELIZED ANALYSIS OF STRUCTURES.
- USE TO PERFORM PARALLEL IO, DECREASING THE TIME TO CHECK
IF CALCULATIONS HAVE COMPLETED FOR LARGE NUMBERS OF CALCULATIONS.
Results for simple test of parallel io scaling for setting up calculations
for 50 structures:
Cores Time x100 Iterations
1 11.13
2 6.367
3 4.982
4 3.987
5 3.664
6 3.704
cores = [1,2,3,4,5,6]
time = [11.13, 6.367, 4.982, 3.987, 3.664, 3.704]
optimal = [11.13, 5.565, 3.71, 2.7825, 2.226, 1.855]
Results for simple test of parallel io scaling for setting up calculations
for 5000 structures:
Cores Time x5 Iterations
1 66.714
2 43.423
3 26.495
4 22.245
5 19.183
6 18.175
cores = [1,2,3,4,5,6]
time = [66.714, 43.423, 26.495, 22.245, 19.183, 18.175]
optimal = [66.6714, 33.3357, 22.2238, 16.66785, 13.33428, 11.1119]
Large, parallel analysis may get interrupted unexpectedly. The advantage
of using this function is that it will enable the ability to check
in parallel what structures have been calculated and what still needs to
be done if the analysis is very large. The function may also be used to
check if larger calculations, such as FHI-aims calculations,
have been completed or need to be restarted.
Basic operation is as follows:
- There is a structure directory that you would like to perform a
calculation on.
- The calculation will be performed in a temporary calculation
directory.
- The results from the calculation will be written to an output
directory. This directory can be the same as the input structure
directory by overwriting the original files with the new information.
Arguments
---------
struct_dir: str
Path to the directory of structures to use.
output_dir: str
Path to the directory to place finished structures
file_format: str
File format for writing resulting structures.
calc_dir: str
Path to the directory where calculations should take place.
check_fn: callable(path, **check_fn_kw)
Callable function that takes in the path as the first argument and
then any keyword arguments the user would like to provide.
check_fn_kw: dict
Dictionary of the keyword functions to pass to check_fn
calculator: callable(Structure, **calculator_kw)
If the user would like to use the check function to also initialize
or perform a calculation, a calculator can be set.
calculator_kw: dict
If keywork arguments to the dictionary need to be passed in at
runtime, this can be done with this argument.
TODO: CHANGE THE CHECK_FN TO JUST A CLASS THAT TAKES IN THE STRUCTURE AND
RETURNS THE TRUE OR FALSE. THIS IS MUCH MORE FLEXIBLE.
"""
def __init__(self,
struct_dir,
output_dir="",
file_format="json",
calc_dir = "temp",
check_fn=check_properties,
check_fn_kw={"key": "RSF"},
calculator=None,
calculator_kw={},
comm=None,
):
if comm == None:
comm = MPI.COMM_WORLD
self.comm = comm
self.size = comm.Get_size()
self.rank = comm.Get_rank()
self.struct_dir = struct_dir
## Make sure that struct_dir is a proper structure directory
if not check_struct_dir(self.struct_dir):
raise Exception("Input {} was not recogized ".format(struct_dir)+
"as a valid structure directory.")
if len(output_dir) == 0:
self.output_dir = self.struct_dir
else:
self.output_dir = output_dir
self.file_format = file_format
self.calc_dir = calc_dir
self.check_fn = check_fn
self.check_fn_kw = check_fn_kw
self.calculator = calculator
self.calculator_kw = calculator_kw
## Setup calc and output dir if they don't already exist
if self.comm.rank == 0:
check_dir(self.calc_dir)
check_dir(self.output_dir)
self.comm.Barrier()
## Storage
self.my_files = []
def calc(self):
"""
Main function that manages checking of the struct_dir and calc_dir
"""
## Start by building list of files that this rank needs to calculate
self.my_files = self.get_files(self.struct_dir)
self.construct_filename_dict()
## Check to see what's already in the output dir
self.check_dir(self.output_dir, move=False)
## Reconstruct filename_dict with modified my_files
self.construct_filename_dict()
## Check if anything is finished from calc_dir that should be moved
## to the output dir. Otherwise, since it is already in the calc
## directory, then should be removed from self.my_files as to not
## overwrite anything in the calc_dir
self.check_dir(self.calc_dir, delete=True, move=True)
## Reconstruct filename_dict with modified my_files
self.construct_filename_dict()
## Now setup files for calculation in the calc_dir
self.setup_calc()
self.comm.Barrier()
## And calculate if calculator was attached
self.calculate()
def construct_filename_dict(self):
## Create dict for fast checking and indexing purposes
my_file_names = []
for file in self.my_files:
s = read(file)
my_file_names.append(s.struct_id)
self.my_filename_dict = {}
self.my_filename_dict.update(
zip(my_file_names,
[idx for idx in range(len(my_file_names))]))
def calculate(self):
"""
Simple calculate function using mcse style calculators. In addition,
the calculator could be as simple as creating a Slurm submission script
and submitting it.
"""
if self.calculator != None:
## Get files for calc_dir
self.my_files = self.get_files(self.calc_dir)
for calc_dir in self.my_files:
s = read(calc_dir)
if len(s) == 1:
struct = [x for x in s.values()][0]
self.calculator.calc(struct)
self.calculator.write(self.output_dir,
file_format=self.file_format,
overwrite=True)
else:
self.calculator.calc(s)
self.calculator.write(self.output_dir,
file_format=self.file_format,
overwrite=True)
shutil.rmtree(calc_dir)
def check_dir(self, path, delete=False, move=False):
"""
Argumnets
---------
path: str
Path to the direcrtory to check
delete: str
If one of my_files is even found, then delete it from self.my_files.
This is necessary when checking the temp directory.
move: bool
If the structure is identified as being finished, then move to
the output directory.
"""
## Get iterator
path_file_iter = os.scandir(path)
## No need to check
if len(self.my_files) == 0:
return
del_list = []
for file_direntry in path_file_iter:
file_name = file_direntry.name
file_path = os.path.join(path, file_name)
## If it's a file_path, assume it's a structure file
if os.path.isfile(file_path):
s = read(file_path)
file_name = s.struct_id
## Otherwise it's a directory already named as a struct_id
## So set file_path equal to path of structure file inside the
## directory path
else:
file_path = os.path.join(file_path, file_name+".json")
## Check if this rank is responsible for this file
if self.my_filename_dict.get(file_name) != None:
## Get idx w.r.t. self.my_files
my_files_idx = self.my_filename_dict[file_name]
if self.check_fn(file_path, **self.check_fn_kw):
## Always delete from my_files if it passes
del_list.append(my_files_idx)
## Move to output dir if checking calc_dir
if move:
calc_dir = os.path.join(path, file_name)
## Write structure file to output directory
s = read(file_path)
temp_dict = {s.struct_id: s}
write(self.output_dir,
temp_dict,
file_format=self.file_format)
shutil.rmtree(calc_dir)
else:
if delete:
del_list.append(my_files_idx)
## Otherwise, file should stay in self.my_files because
## it needs to be calculated
pass
## Delete all indicies that have been collected in reverse order so
## that index to be deleted is always correct
for idx in sorted(del_list, reverse=True):
del(self.my_files[idx])
def setup_calc(self):
"""
Sets up files for calculation in the calculation directory. In the
calculation directory, there will be a directory for each structure
that needs to be calculated. Inside will be the Structure.json file
for each structure.
"""
for file in self.my_files:
s = read(file)
temp_dict = {s.struct_id: s}
output = os.path.join(self.calc_dir, s.struct_id)
write(output, temp_dict, file_format=self.file_format)
| 2.453125
| 2
|
junospyez_ossh_server/__init__.py
|
jeremyschulman/junospyez-ossh-server
| 3
|
12784960
|
from .ossh_server import OutboundSSHServer
| 1.03125
| 1
|
ssd_liverdet/data/data_custom_v2.py
|
L0SG/Liver_segmentation
| 0
|
12784961
|
<reponame>L0SG/Liver_segmentation
# NEW impl for 1904 dataset: things are getting too big, use online data loading from disk!
# for arbitrary image dataset
# courtesy of https://github.com/amdegroot/ssd.pytorch/issues/72
# # for debugging perf hotspot
# from line_profiler import LineProfiler
# from utils import augmentations
import os
import os.path
import sys
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image, ImageDraw, ImageFont
# import cv2
import numpy as np
# if sys.version_info[0] == 2:
# import xml.etree.cElementTree as ET
# else:
# import xml.etree.ElementTree as ET
LABELS = ['lesion']
LABELS_2_IND = {
'lesion': 0
}
# for making bounding boxes pretty
COLORS = ((255, 0, 0, 128), (0, 255, 0, 128), (0, 0, 255, 128),
(0, 255, 255, 128), (255, 0, 255, 128), (255, 255, 0, 128))
class DataSplitter():
def __init__(self, data_path, cross_validation=5, num_test_subject=10):
# self.root = root
self.data_path = data_path
self.metadata_path = os.path.join(self.data_path, "metadata.txt")
self.data = []
self.subjects = []
with open(self.metadata_path, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip().split("|")
self.data.append(line)
# append subject only
self.subjects.append(line[1])
self.subjects = sorted(list(set(self.subjects)))
self.cross_validation = cross_validation
self.num_test_subject = num_test_subject
self.subjects_train = self.subjects[:-num_test_subject]
self.subjects_test = self.subjects[-num_test_subject:]
self.subjects_cv_eval = []
self.subjects_cv_train = []
if cross_validation != 1:
divider = int(len(self.subjects_train) / cross_validation)
elif cross_validation == 1:
print("INFO: cross_validation set to 0. splitting data into single train-val set... (val defaults to 0.2)")
divider = int(len(self.subjects_train) * 0.2)
for i_cv in range(self.cross_validation):
self.subjects_cv_eval.append(self.subjects_train[divider * i_cv: min(divider * (i_cv+1), len(self.subjects_train))])
self.subjects_cv_train.append(list(filter(lambda x: x not in self.subjects_cv_eval[i_cv], self.subjects_train)))
self.data_train = list(filter(lambda x: x[1] in self.subjects_train, self.data))
self.data_test = list(filter(lambda x: x[1] in self.subjects_test, self.data))
self.data_cv_eval = []
self.data_cv_train = []
for i_cv in range(self.cross_validation):
self.data_cv_eval.append(list(filter(lambda x: x[1] in self.subjects_cv_eval[i_cv], self.data_train)))
self.data_cv_train.append(list(filter(lambda x: x[1] in self.subjects_cv_train[i_cv], self.data_train)))
# self.data_mean = self.get_data_mean()
# print("calculated data_mean: {}".format(self.data_mean))
def get_data_mean(self):
print("calculating data mean...")
# calcaulte mean pixel value of data by iterating through all "training" dataset
mean_pixel = []
for index in range(len(self.data_train)):
img = np.load(os.path.join(self.data_path, self.data_train[index][0] + "_ct.npy")) * 255.
mean = img.mean()
mean_pixel.append(mean)
mean_pixel = np.array(mean_pixel).mean()
return mean_pixel
class FISHdetectionV2(data.Dataset):
"""VOC Detection Dataset Object
input is image, target is annotation
Arguments:
root (string): filepath to VOCdevkit folder.
image_set (string): imageset to use (eg. 'train', 'val', 'test')
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
dataset_name (string, optional): which dataset to load
(default: 'VOC2007')
"""
def __init__(self, data_path, data, transform=None, dataset_name='fish_detection', load_data_to_ram=False, debug=False, use_pixel_link=False):
# self.data_path will recieve root absolute path of the dataset
self.data_path = data_path
# self.data will receive partitioned list of paths from DataSplitter
self.data = [x[0] for x in data]
self.load_data_to_ram = load_data_to_ram
if self.load_data_to_ram:
print("loading data to ram...")
self.data_img = []
self.data_target = []
self.data_phase = []
for i in range(len(data)):
img = np.load(os.path.join(self.data_path, self.data[i] + "_ct.npy"))
# convert to [phase, H, W, C]
img = np.transpose(img, [0, 2, 3, 1])
# make it 0~255 uint8
img = (img * 255).astype(np.uint8)
# target = ET.parse(self._annopath % img_id).getroot()
target = np.load(os.path.join(self.data_path, self.data[i] + "_bbox.npy"))
# cast to float32 for proper scaling
target = target.astype(np.float32)
phase = np.load(os.path.join(self.data_path, self.data[i] + "_phase.npy"))
self.data_img.append(img)
self.data_target.append(target)
self.data_phase.append(phase)
if debug:
import cv2
import torchvision.utils as vutils
img_middle = img[:, :, :, 1]
# consider single image as [N, 1, H, W]
image_for_vis = torch.tensor(img_middle).unsqueeze(1)
img_out = []
target_for_vis = torch.tensor(target)
for j in range(image_for_vis.shape[0]):
img_rgb = np.repeat(image_for_vis[j, :, :], 3, axis=0).permute(1, 2, 0).numpy().copy()
for k in range(target_for_vis.shape[0]):
xmin, ymin, xmax, ymax = (target_for_vis[k][:4]).long().numpy()
cv2.rectangle(img_rgb, (xmin, ymin), (xmax, ymax), (255, 0, 0), 1)
img_out.append(torch.tensor(img_rgb))
img_out = torch.stack(img_out).permute(0, 3, 1, 2)
input_visual = vutils.make_grid(img_out.float(), normalize=True, scale_each=True)
print("test")
self.transform = transform
# self.target_transform = target_transform
self.name = dataset_name
# self._annopath = os.path.join('%s', 'Annotations', '%s.xml')
# self._imgpath = os.path.join('%s', 'JPEGImages', '%s.jpg')
# self.ids = list()
# for (year, name) in image_sets:
# rootpath = os.path.join(self.root, 'VOC' + year)
# for line in open(os.path.join(rootpath, 'ImageSets', 'Main', name + '.txt')):
# self.ids.append((rootpath, line.strip()))
self.use_pixel_link = use_pixel_link
if self.use_pixel_link:
print("INFO: using pixel_link version of dataset!")
def __getitem__(self, index):
im, gt, h, w = self.pull_item(index)
return im, gt
def __len__(self):
return len(self.data)
def pull_item(self, index):
# TODO: double-check new implementations are corrent
# img_id = self.ids[index]
if self.load_data_to_ram:
img = self.data_img[index]
target = self.data_target[index]
else:
#img_path = self.image_paths[index]
img = np.load(os.path.join(self.data_path, self.data[index] + "_ct.npy"))
# convert to [phase, H, W, C]
img = np.transpose(img, [0, 2, 3, 1])
# make it 0~255 uint8
img = (img * 255).astype(np.uint8)
# target = ET.parse(self._annopath % img_id).getroot()
target = np.load(os.path.join(self.data_path, self.data[index] + "_bbox.npy"))
# cast to float32 for proper scaling
target = target.astype(np.float32)
# target = np.asarray(target).reshape(-1, 5)
try:
#img = cv2.imread(img_path)
# input img_path is already numpy
# this is unnecessary overhead
# img = img_path
# single phase image
if len(img.shape) == 3:
height, width, channels = img.shape
# multi-phase image: discard phase info
elif len(img.shape) == 4:
_, height, width, channels = img.shape
except:
raise NotImplementedError
# img = cv2.imread('random_image.jpg')
# height, width, channels = img.shape
# if self.target_transform is not None:
# target = self.target_transform(target, width, height)
if self.transform is not None:
#target = np.array(target)
"""
# multi-phase data have 4 ground truth
# randomly select one target
# TODO: fix this if the data is multi-class target since this assumes that all labels are the same
rng = np.random.randint(low=0, high=target.shape[0]+1)
target = target[rng]
"""
"""
# TODO: fix this later, generic dataset have multiple lesions
# currently data have only 1 lesion, unsqueeze first dim
target = np.expand_dims(target, 0)
"""
# scale each coord from absolute pixels to 0~1
for idx in range(target.shape[0]):
"""
x_center, y_center, h, w, cls = target[idx]
x_center /= height
y_center /= width
h /= height
w /= width
# WARNING: SSDAugmentation uses y_center as first elem, change it properly
target[idx] = np.array([y_center, x_center, w, h, cls])
"""
x_min, y_min, x_max, y_max, cls = target[idx]
x_min, x_max = x_min/width, x_max/width
y_min, y_max = y_min/height, y_max/height
target[idx] = np.array([x_min, y_min, x_max, y_max, cls])
# # debug hotspot
# lp = LineProfiler()
# lp.add_function(augmentations.ConvertFromInts.__call__)
# lp.add_function(augmentations.ToAbsoluteCoords.__call__)
# lp.add_function(augmentations.PixelJitter.__call__)
# lp.add_function(augmentations.PhotometricDistort.__call__)
# lp.add_function(augmentations.Expand.__call__)
# lp.add_function(augmentations.RandomSampleCrop.__call__)
# lp.add_function(augmentations.RandomMirror.__call__)
# lp.add_function(augmentations.ToPercentCoords.__call__)
# lp.add_function(augmentations.Resize.__call__)
# lp.add_function(augmentations.SubtractMeans.__call__)
# lp_wrapper = lp(self.transform.__call__)
# lp_wrapper(img, target[:, :4], target[:, 4])
# lp.print_stats()
# exit()
img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])
""" our data is not rgb
# to rgb
img = img[:, :, (2, 1, 0)]
# img = img.transpose(2, 0, 1)
"""
if self.use_pixel_link:
target = np.hstack((boxes, np.expand_dims(labels["labels"], axis=1)))
labels["boxes"] = target
target = labels
else:
target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
# single-phase case
if len(img.shape) == 3:
return torch.from_numpy(img).permute(2, 0, 1), target, height, width
# multi-phase case
if len(img.shape) == 4:
img_torch = torch.from_numpy(img).permute(0, 3, 1, 2)
""" contiguous call for CPU seems slow, collapse after CUDA instead
img_torch = torch.from_numpy(img).permute(0, 3, 1, 2).contiguous()
# collapse phase & channel
img_torch = img_torch.view(-1, img_torch.shape[2], img_torch.shape[3])
"""
return img_torch, target, height, width
# return torch.from_numpy(img), target, height, width
def pull_image(self, index):
'''Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
PIL img
'''
# img_id = self.ids[index]
if self.load_data_to_ram:
img = self.data_img[index]
else:
img = np.load(os.path.join(self.data_path, self.data[index] + "_ct.npy"))
# convert to [phase, H, W, C]
img = np.transpose(img, [0, 2, 3, 1])
# make it 0~255 uint8
img = (img * 255).astype(np.uint8)
# ct data is already numpy
#return cv2.imread(img_path, cv2.IMREAD_COLOR)
return img
def pull_anno(self, index):
'''Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
# img_id = self.ids[index]
# anno = ET.parse(self._annopath % img_id).getroot()
# gt = self.target_transform(anno, 1, 1)
#img_path = self.image_paths[index]
# target = ET.parse(self._annopath % img_id).getroot()
if self.load_data_to_ram:
target = self.data_target[index]
else:
target = np.load(os.path.join(self.data_path, self.data[index] + "_bbox.npy"))
# cast to float32 for proper scaling
target = target.astype(np.float32)
#return img_path, target
return target
def pull_phase(self, index):
phase = np.load(os.path.join(self.data_path, self.data[index] + "_phase.npy"))
return phase
def pull_tensor(self, index):
'''Returns the original image at an index in tensor form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
tensorized version of img, squeezed
'''
return torch.Tensor(self.pull_image(index)).unsqueeze_(0)
def detection_collate_v2(batch, p_only=False):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on 0 dim
"""
targets = []
imgs = []
for sample in batch:
imgs.append(sample[0])
targets.append(torch.FloatTensor(sample[1]))
return torch.stack(imgs, 0), targets
def detection_collate_v2_pixel_link(batch, p_only=False):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on 0 dim
"""
targets = []
pixel_mask, neg_pixel_mask, labels, pixel_pos_weight, link_mask, boxes = [], [], [], [], [], []
imgs = []
for sample in batch:
imgs.append(sample[0])
sample_1 = sample[1]
pixel_mask.append(torch.LongTensor(sample_1["pixel_mask"]))
neg_pixel_mask.append(torch.LongTensor(sample_1["neg_pixel_mask"]))
labels.append(torch.FloatTensor(sample_1["labels"]))
pixel_pos_weight.append(torch.FloatTensor(sample_1["pixel_pos_weight"]))
link_mask.append(torch.LongTensor(sample_1["link_mask"]))
boxes.append(torch.FloatTensor(sample_1["boxes"]))
imgs = torch.stack(imgs, 0)
pixel_mask = torch.stack(pixel_mask, 0)
neg_pixel_mask = torch.stack(neg_pixel_mask, 0)
pixel_pos_weight = torch.stack(pixel_pos_weight, 0)
link_mask = torch.stack(link_mask, 0)
# no stacking for labels and boxes: variable number of lesions
targets = {'pixel_mask': pixel_mask, 'neg_pixel_mask': neg_pixel_mask,
'pixel_pos_weight': pixel_pos_weight, 'link_mask': link_mask, 'lables': labels, 'boxes': boxes}
return imgs, targets
if __name__ == "__main__":
from utils.augmentations import SSDAugmentation
data_path = "/home/tkdrlf9202/Datasets/liver_year1_dataset_extended_1904_preprocessed/ml_ready"
debug_dataset = DataSplitter(data_path, cross_validation=1, num_test_subject=10)
debug_dataset_cv0_train = debug_dataset.data_cv_train[0]
debug_dataset_cv0_eval = debug_dataset.data_cv_eval[0]
debug_fishdataset = FISHdetectionV2(data_path, debug_dataset_cv0_train, SSDAugmentation(), load_data_to_ram=True, debug=True)
test_batch = next(iter(debug_fishdataset))
| 2.15625
| 2
|
data_collection/ts/indonesia_timeseries/download_indonesia_prices.py
|
f4bD3v/humanitas
| 6
|
12784962
|
#!/usr/bin/env python2
import urllib2
import shutil
import re
import sys
import datetime
from lxml import etree
usage_str = """
This scripts downloads daily food prices from http://m.pip.kementan.org/index.php (Indonesia).
Provide date in DD/MM/YYYY format.
Example:
./download_indonesia_prices.py 15/03/2013
"""
def download_table(date):
"""Download price table for a given date"""
main_url = 'http://m.pip.kementan.org/index.php'
params = 'laporan=LHK-01&tanggal=%s&bulan=%s&tahun=%s&pilihlaporan=View+Laporan' % (date.day, date.month, date.year)
req = urllib2.Request(main_url, params)
response = urllib2.urlopen(req)
html_code = response.read()
regex = re.compile(r'<div id="content" align="center">.*(<table.+</table>)', re.DOTALL)
match = regex.search(html_code)
if not match:
print "ERROR: table not detected"
sys.exit(1)
table_html = match.group(1)
# Remove commas
table_html = re.sub(r'(?<=\d),(?=\d)', '', table_html)
table = etree.XML(table_html)
rows = iter(table)
actual_headers = [col.text for col in next(rows)]
# TODO: translate this bullshit ;)
headers = ['Dried Grain Harvest', 'Dry unhusked', 'Rice Medium', 'Rice Premium', 'Corn', 'Local soybean', 'Local Peanuts', 'Green Beans', 'Cassava', 'Sweet potato', 'Cassava spindles']
print "; ".join(headers), "\n"
# Print table
for row in rows:
if all(v.text is None for v in row):
continue
print ('''"%s"''') % row[0].text,
for col in row[1:]:
print col.text,
print
def parse_date(date_string):
"""Check date"""
match = re.match(r'(\d{2})/(\d{2})/(\d{4})', date_string)
if not match:
sys.exit("ERROR: invalid date")
day, month, year = int(match.group(1)), int(match.group(2)), int(match.group(3))
return datetime.date(year, month, day)
def usage():
print usage_str
if __name__ == "__main__":
if len(sys.argv) == 1 or sys.argv[1] in ['-h', '--help']:
usage()
sys.exit(0)
date_string = sys.argv[1]
date = parse_date(date_string)
download_table(date)
| 3.390625
| 3
|
Seq2Seq/seq2seq_advanced.py
|
STHSF/DeepNaturallanguageprocessing
| 15
|
12784963
|
# coding=utf-8
import tensorflow as tf
import numpy as np
import helpers
tf.reset_default_graph()
sess = tf.InteractiveSession()
PAD = 0
EOS = 1
vocab_size = 10
input_embedding_size = 20
encoder_hidden_units = 512
decoder_hidden_units = encoder_hidden_units * 2
# define inputs
encoder_input = tf.placeholder(shape=(None, None), dtype=tf.int32, name='encoder_inputs')
# encoder_input_length = tf.placeholder(shape=(None,), dtype=tf.int32, name='encoder_input_length')
decoder_targets = tf.placeholder(shape=(None, None), dtype=tf.int32, name='decoder_target')
decoder_input = tf.placeholder(shape=(None, None), dtype=tf.int32, name='decoder_input')
# define embedding layer
embedding = tf.Variable(tf.random_uniform([vocab_size, input_embedding_size], -1.0, 1.0), dtype=tf.float32)
encoder_inputs_embedded = tf.nn.embedding_lookup(embedding, encoder_input, name='encoder_inputs_embedded')
decoder_inputs_embedded = tf.nn.embedding_lookup(embedding, decoder_input, name='decoder_inputs_embedded')
encoder_cell_fw = tf.contrib.rnn.LSTMCell(encoder_hidden_units)
encoder_cell_bw = tf.contrib.rnn.LSTMCell(encoder_hidden_units)
((encoder_fw_outputs,
encoder_bw_outputs),
(encoder_fw_final_state,
encoder_bw_final_state)) = (tf.nn.bidirectional_dynamic_rnn(cell_fw=encoder_cell_fw,
cell_bw=encoder_cell_bw,
inputs=encoder_inputs_embedded,
dtype=tf.float32,
time_major=False))
encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2)
encoder_final_state_c = tf.concat((encoder_fw_final_state.c, encoder_bw_final_state.c), 1)
encoder_final_state_h = tf.concat((encoder_fw_final_state.h, encoder_bw_final_state.h), 1)
encoder_final_state = tf.contrib.rnn.LSTMStateTuple(c=encoder_final_state_c,
h=encoder_final_state_h)
decoder_cell = tf.contrib.rnn.LSTMCell(decoder_hidden_units)
encoder_max_time, batch_size = tf.unstack(tf.shape(encoder_input))
print(encoder_max_time)
print(batch_size)
# decoder_length = encoder_input_length + 3
decoder_outputs, decoder_final_state = tf.nn.dynamic_rnn(decoder_cell, encoder_outputs,
initial_state=encoder_final_state, dtype=tf.float32)
decoder_logits = tf.contrib.layers.linear(decoder_outputs, vocab_size)
decoder_prediction = tf.argmax(decoder_logits, 2)
stepwise_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=tf.one_hot(decoder_targets, depth=vocab_size, dtype=tf.float32),
logits=decoder_logits)
loss = tf.reduce_mean(stepwise_cross_entropy)
train_op = tf.train.AdamOptimizer().minimize(loss)
sess.run(tf.global_variables_initializer())
batch_ = [[6], [3, 4], [9, 8, 7]]
batch_, batch_length_ = helpers.batch(batch_)
print('batch_encoded:\n' + str(batch_))
din_, dlen_ = helpers.batch(np.ones(shape=(3, 1), dtype=np.int32), max_sequence_length=4)
print('decoder inputs:\n' + str(din_))
pred_ = sess.run(decoder_prediction,
feed_dict={
encoder_input: batch_,
decoder_input: din_,})
print('decoder predictions:\n' + str(pred_))
| 2.34375
| 2
|
data/states/lobby/lobby_screen.py
|
iminurnamez/Python_Arcade_Collab
| 2
|
12784964
|
import math
import pygame as pg
from collections import OrderedDict
from data.core import tools, constants
from data.components.labels import Button, ButtonGroup
from data.components.special_buttons import GameButton, NeonButton
from data.components.animation import Animation
from data.components.state_machine import _State
class LobbyScreen(_State):
"""
This state represents the lobby where the player can choose
which game they want to play or view their high scores. This is also
the exit point for the game.
"""
per_page = 6
def __init__(self, controller):
super(LobbyScreen, self).__init__(controller)
self.animations = pg.sprite.Group()
def update_screen_buttons(self, games):
screen_rect = pg.Rect((0, 0), constants.RENDER_SIZE)
number_of_pages = int(math.ceil(len(games) / float(self.per_page)))
self.loop_length = constants.RENDER_SIZE[0] * number_of_pages
self.game_buttons = self.make_game_pages(games, screen_rect, self.per_page)
nav_buttons = self.make_navigation_buttons(screen_rect)
main_buttons = self.make_main_buttons(screen_rect)
self.buttons = ButtonGroup(nav_buttons, main_buttons)
def make_game_pages(self, games, screen_rect, per):
games_list = list(games.keys())
groups = (games_list[i:i+per] for i in range(0, len(games), per))
columns = 3
width, height = GameButton.width, GameButton.height
spacer_x, spacer_y = 50, 80
start_x = (screen_rect.w - width * columns - spacer_x * (columns-1))//2
start_y = screen_rect.top + 105
step_x, step_y = width + spacer_x, height + spacer_y
buttons = ButtonGroup()
for offset,group in enumerate(groups):
offset *= constants.RENDER_SIZE[0]
for i,game in enumerate(group):
y, x = divmod(i, columns)
pos = (start_x + step_x * x + offset, start_y + step_y * y)
GameButton(pos, game, games[game], self.change_state, buttons)
return buttons
def make_navigation_buttons(self, screen_rect):
sheet = constants.GFX["nav_buttons"]
size = (53, 50)
y = 530
from_center = 15
icons = tools.strip_from_sheet(sheet, (0, 0), size, 4)
buttons = ButtonGroup()
l_kwargs = {"idle_image" : icons[0], "hover_image" : icons[1],
"call" : self.scroll_page, "args" : 1,
"bindings" : [pg.K_LEFT, pg.K_KP4]}
r_kwargs = {"idle_image" : icons[2], "hover_image" : icons[3],
"call" : self.scroll_page, "args" : -1,
"bindings" : [pg.K_RIGHT, pg.K_KP6]}
left = Button(((0, y), size), buttons, **l_kwargs)
left.rect.right = screen_rect.centerx - from_center
right = Button(((0, y), size), buttons, **r_kwargs)
right.rect.x = screen_rect.centerx + from_center
return buttons
def make_main_buttons(self, screen_rect):
buttons = ButtonGroup()
pos = (9, screen_rect.bottom-(NeonButton.height+11))
NeonButton(pos, "Credits", 32, self.change_state, "credits", buttons)
pos = (screen_rect.right-(NeonButton.width+10),
screen_rect.bottom-(NeonButton.height+11))
NeonButton(pos, "High Scores", 28, self.change_state, "high_scores",
buttons)
pos = (screen_rect.centerx-(NeonButton.width//2),
screen_rect.bottom-(NeonButton.height+11))
NeonButton(pos, "Exit", 32, self.exit_game, None,
buttons, bindings=[pg.K_ESCAPE])
rect_style = (screen_rect.left, screen_rect.top, 150, 95)
return buttons
def scroll_page(self, mag):
if not self.animations and len(self.game_buttons) > self.per_page:
for game in self.game_buttons:
self.normalize_scroll(game, mag)
fx, fy = game.rect.x+constants.RENDER_SIZE[0]*mag, game.rect.y
ani = Animation(x=fx, y=fy, duration=350.0,
transition='in_out_quint', round_values=True)
ani.start(game.rect)
self.animations.add(ani)
constants.SFX["cardplace4"].play()
def normalize_scroll(self, game, mag):
if game.rect.x < 0 and mag == -1:
game.rect.x += self.loop_length
elif game.rect.x >= constants.RENDER_SIZE[0] and mag == 1:
game.rect.x -= self.loop_length
def startup(self, persistent):
super(LobbyScreen, self).startup(persistent)
games = self.controller.game_thumbs
self.update_screen_buttons(games)
def exit_game(self, *args):
self.done = True
self.quit = True
def change_state(self, next_state):
self.done = True
self.next = next_state
def get_event(self, event, scale=(1,1)):
if event.type == pg.QUIT:
self.exit_game()
else:
self.buttons.get_event(event)
self.game_buttons.get_event(event)
def update(self, surface, keys, current_time, dt, scale):
mouse_pos = tools.scaled_mouse_pos(scale)
self.buttons.update(mouse_pos)
self.game_buttons.update(mouse_pos)
self.animations.update(dt)
self.draw(surface)
def draw(self, surface):
rect = surface.get_rect()
surface.fill(constants.BACKGROUND_BASE)
self.buttons.draw(surface)
for button in self.game_buttons:
if button.rect.colliderect(rect):
button.draw(surface)
| 2.671875
| 3
|
DIYgod/0006/a.py
|
saurabh896/python-1
| 3,976
|
12784965
|
<gh_stars>1000+
f = open('a.txt', 'rb')
lines = f.readlines()
for line in lines:
pass
f.close()
f = open('a.txt', 'rb')
for line in f:
pass
f.close()
f = open('a.txt', 'rb')
while true:
line = f.readline()
if not line:
break
pass
f.close()
| 2.921875
| 3
|
torchpruner/torchpruner/pruner/opt_pruner.py
|
germanenik/CS231N-Pruning
| 0
|
12784966
|
<gh_stars>0
import torch
class OptimizerPruner:
@staticmethod
def prune(optimizer, param, axis, keep_indices, device):
if isinstance(optimizer, torch.optim.SGD):
return OptimizerPruner._prune_sgd(optimizer, param, axis, keep_indices, device)
elif isinstance(optimizer, torch.optim.Adam):
return OptimizerPruner._prune_adam(optimizer, param, axis, keep_indices, device)
@staticmethod
def _prune_sgd(optimizer, param, axis, keep_indices, device):
for p in optimizer.param_groups[0]["params"]:
if id(p) == id(param):
pass
# if "momentum_buffer" in optimizer.state_dict()["state"][id(p)]:
# momentum = optimizer.state_dict()["state"][id(p)]["momentum_buffer"]
# momentum.data = momentum.data.index_select(
# axis, torch.tensor(keep_indices).to(device)
# )
@staticmethod
def _prune_adam(optimizer, param, axis, keep_indices, device):
for p in optimizer.param_groups[0]["params"]:
if id(p) == id(param):
pass
# if "momentum_buffer" in optimizer.state_dict()["state"][id(p)]:
# momentum = optimizer.state_dict()["state"][id(p)]["momentum_buffer"]
# momentum.data = momentum.data.index_select(
# axis, torch.tensor(keep_indices).to(device)
# )
| 2.1875
| 2
|
jsk_rqt_plugins/src/jsk_rqt_plugins/label.py
|
ShunjiroOsada/jsk_visualization_package
| 5
|
12784967
|
<reponame>ShunjiroOsada/jsk_visualization_package<gh_stars>1-10
from rqt_gui_py.plugin import Plugin
import python_qt_binding.QtGui as QtGui
from python_qt_binding.QtGui import (QAction, QIcon, QMenu, QWidget,
QPainter, QColor, QFont, QBrush,
QPen, QMessageBox, QSizePolicy,
QLabel, QComboBox)
from python_qt_binding.QtCore import Qt, QTimer, qWarning, Slot, QEvent, QSize
from threading import Lock
import rospy
import roslib
import rqt_plot
import python_qt_binding.QtCore as QtCore
from std_msgs.msg import Bool, Time
import math
from resource_retriever import get_filename
import yaml
import os, sys
from std_msgs.msg import String
from .util import get_slot_type_field_names
from .hist import ROSData
from .button_general import LineEditDialog
class StringLabel(Plugin):
"""
rqt plugin to provide simple label
"""
def __init__(self, context):
super(StringLabel, self).__init__(context)
self.setObjectName("StringLabel")
self._widget = StringLabelWidget()
context.add_widget(self._widget)
def save_settings(self, plugin_settings, instance_settings):
self._widget.save_settings(plugin_settings, instance_settings)
def restore_settings(self, plugin_settings, instance_settings):
self._widget.restore_settings(plugin_settings, instance_settings)
def trigger_configuration(self):
self._widget.trigger_configuration()
class StringLabelWidget(QWidget):
def __init__(self):
super(StringLabelWidget, self).__init__()
self.lock = Lock()
vbox = QtGui.QVBoxLayout(self)
self.label = QLabel()
self.label.setAlignment(Qt.AlignLeft)
self.label.setSizePolicy(QSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored))
font = QFont("Helvetica", 14)
self.label.setFont(font)
self.label.setWordWrap(True)
vbox.addWidget(self.label)
self.string_sub = None
self._string_topics = []
self._update_topic_timer = QTimer(self)
self._update_topic_timer.timeout.connect(self.updateTopics)
self._update_topic_timer.start(1000)
self._active_topic = None
# to update label visualization
self._dialog = LineEditDialog()
self._rosdata = None
self._start_time = rospy.get_time()
self._update_label_timer = QTimer(self)
self._update_label_timer.timeout.connect(self.updateLabel)
self._update_label_timer.start(40)
def trigger_configuration(self):
self._dialog.exec_()
self.setupSubscriber(self._dialog.value)
def updateLabel(self):
if not self._rosdata:
return
try:
_, data_y = self._rosdata.next()
except rqt_plot.rosplot.RosPlotException, e:
self._rosdata = None
return
if len(data_y) == 0:
return
latest = data_y[-1] # get latest data
# supports std_msgs/String as well as string data nested in rosmsg
if type(latest) == String:
self.string = latest.data
else:
self.string = latest
try:
self.label.setText(self.string)
except TypeError, e:
rospy.logwarn(e)
def updateTopics(self):
need_to_update = False
for (topic, topic_type) in rospy.get_published_topics():
msg = roslib.message.get_message_class(topic_type)
field_names = get_slot_type_field_names(msg, slot_type='string')
for field in field_names:
string_topic = topic + field
if string_topic not in self._string_topics:
self._string_topics.append(string_topic)
need_to_update = True
if need_to_update:
self._string_topics = sorted(self._string_topics)
self._dialog.combo_box.clear()
for topic in self._string_topics:
self._dialog.combo_box.addItem(topic)
if self._active_topic:
if self._active_topic not in self._string_topics:
self._string_topics.append(self._active_topic)
self._dialog.combo_box.addItem(self._active_topic)
self._dialog.combo_box.setCurrentIndex(self._string_topics.index(self._active_topic))
def setupSubscriber(self, topic):
if not self._rosdata:
self._rosdata = ROSData(topic, self._start_time)
else:
if self._rosdata != topic:
self._rosdata.close()
self._rosdata = ROSData(topic, self._start_time)
else:
rospy.logwarn("%s is already subscribed", topic)
self._active_topic = topic
def onActivated(self, number):
self.setupSubscriber(self._string_topics[number])
def save_settings(self, plugin_settings, instance_settings):
if self._active_topic:
instance_settings.set_value("active_topic", self._active_topic)
def restore_settings(self, plugin_settings, instance_settings):
if instance_settings.value("active_topic"):
topic = instance_settings.value("active_topic")
self._dialog.combo_box.addItem(topic)
self.setupSubscriber(topic)
| 1.921875
| 2
|
PythonExercicios/ex032.py
|
gabjohann/python_3
| 0
|
12784968
|
<filename>PythonExercicios/ex032.py
# Faça um programa que leia um ano qualquer e mostre se ele é bissexto
# ----------------------------errado----------------------------
ano = int(input('Digite o ano que deseja saber se é bissexto: '))
bissexto = ano % 4
if bissexto == 0:
print('O ano {} é bissexto.'.format(ano))
else:
print('O ano não é bissexto.')
# ----------------------------errado----------------------------
# Resolução da aula
# ano = int(input('Que ano quer analisar? '))
# if ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0:
# print('O ano {} é BISSEXTO'.format(ano))
# else:
# print('O ano {} NÃO É BISSEXTO'.format(ano))
# outra resolução
# from datetime import date
# ano = int(input('Que ano quer analisar? Coloque 0 para analisar o ano atual: '))
# if ano == 0:
# ano = date.today().year
# if ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0:
# print('O ano {} é BISSEXTO'.format(ano))
# else:
# print('O ano {} NÃO É BISSEXTO'.format(ano))
| 4.125
| 4
|
mat/mat1.py
|
vhmercadoo/pytrail
| 0
|
12784969
|
<reponame>vhmercadoo/pytrail
var = 1
print (var)
| 0.964844
| 1
|
stitch.py
|
TheShrug/advanced_cartography
| 2
|
12784970
|
# Copyright (c) 2020 <NAME>
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
from os import listdir, path
from time import sleep, strftime
from PIL import Image, ImageDraw
from threading import Thread
from math import floor
class pic:
def __init__(self, fpath, cropFactor, featherFactor):
self.name = path.basename(fpath)
self.fpath = fpath
self.im = Image.open(fpath)
self.size = self.im.size
self.coords = self.name.lstrip('0123456789_')
self.coords = self.coords.lstrip('(')
self.coords = self.coords.rstrip(').jpg')
self.coords = self.coords.split(',')
self.coords = [float(i) for i in self.coords]
self.mtime = path.getmtime(fpath)
self.featherFactor = featherFactor
self.cTuple = (round((self.size[0]-self.size[0]*cropFactor)/2), # LEFT
round((self.size[1]-self.size[1]*cropFactor)/2), # TOP
round((self.size[0]+self.size[0]*cropFactor)/2), # RITE
round((self.size[1]+self.size[1]*cropFactor)/2)) # BOTM
self.cSize = (self.cTuple[2]-self.cTuple[0],
self.cTuple[3]-self.cTuple[1])
self.cim = self.im.crop(self.cTuple)
def getFMask(self):
""" Returns an edge feather mask to be used in paste function.
"""
if self.featherFactor < 0.01:
mask = Image.new('L', self.cSize, color=255)
return mask
mask = Image.new('L', self.cSize, color=255)
draw = ImageDraw.Draw(mask)
x0, y0 = 0, 0
x1, y1 = self.cSize
# print(f'Crop Tuple: {(x1, y1)}')
feather = round(self.cSize[1] * self.featherFactor)
# print(f'Feather Pixels: {feather}')
for i in range(round(self.cSize[1]/2)):
x1, y1 = x1-1, y1-1
alpha = 255 if i > feather else round(255*(feather/(i+1))**(-1))
draw.rectangle([x0, y0, x1, y1], outline=alpha)
x0, y0 = x0+1, y0+1
return mask
def closePIL(self):
"""closes the im and cim PIL objects
"""
self.im.close()
self.cim.close()
def getSkull(self):
"""adds a semi-transparent 8-bit image (skull) to cropped
PIL image. For use on last pic in list when fPath is marked
with [DEAD]
"""
skull = Image.open('.\\files\\gfx\\skull.png')
skull = skull.resize(self.cSize)
self.cim.paste(skull, mask=skull)
def getYAH(self):
"""adds a semi-transparent 8-bit image ("you are here" marker)
to cropped PIL image. For use on last pic in list when fPath
is NOT marked with [DEAD]
"""
yah = Image.open('.\\files\\gfx\\yah.png')
yah = yah.resize(self.cSize)
self.cim.paste(yah, mask=yah)
def stitch2(rawPath,
destPath='',
cropFactor=0.75,
featherFactor=0.15,
marks=True):
piclist = []
batches = listdir(rawPath)
cnt = 0
tooBig = False
dstPath = destPath
if dstPath == '':
dstPath = rawPath.replace('raws', 'maps', 1)
fullPath = dstPath + '\\' + '[MAP]_' + strftime("%Y%m%d-%H%M%S") + '.png'
print(f'Getting images from {rawPath}')
for i in batches:
names = listdir(rawPath + '\\' + i)
paths = [rawPath + '\\' + i + '\\' + j for j in names]
for k in range(len(names)):
cnt += 1
print(f'Images found: {cnt}', end='\r')
piclist.append(pic(paths[k], cropFactor, featherFactor))
piclist.sort(key=lambda i: i.mtime)
if rawPath.find('DEAD') != -1 and marks:
piclist[-1].getSkull()
elif marks:
piclist[-1].getYAH()
# This next section calculates the bounds of the final map. It
# may run into trouble in the future with image overwrites, as
# currently I'm not doing anything to prevent them.
xCoordMax = max(i.coords[0] for i in piclist)
xCoordMin = min(i.coords[0] for i in piclist)
yCoordMax = max(i.coords[1] for i in piclist)
yCoordMin = min(i.coords[1] for i in piclist)
xMaxPad = round(next(
((i.size[0]/2) for i in piclist if i.coords[0] == xCoordMax)
))
xMinPad = round(next(
((i.size[0]/2) for i in piclist if i.coords[0] == xCoordMin)
))
yMaxPad = round(next(
((i.size[1]/2) for i in piclist if i.coords[1] == yCoordMax)
))
yMinPad = round(next(
((i.size[1]/2) for i in piclist if i.coords[1] == yCoordMin)
))
mapWidth = round(xCoordMax - xCoordMin) + xMaxPad + xMinPad
mapHeight = round(yCoordMax - yCoordMin) + yMaxPad + yMinPad
print(f'\nYou have explored an area {mapWidth} pixels wide ' +
f'and {mapHeight} pixels deep.')
sleep(1)
if mapWidth > 65535 or mapHeight > 65535:
tooBig = True
ratio = 65535 / max(mapWidth, mapHeight)
mapWidth = floor(mapWidth*ratio-10) # subtract 10 just to be sure
mapHeight = floor(mapHeight*ratio-10)
print(f"That's too many, downscaling by {round(ratio*100, 3)}%")
print('(Limit is 65535px on either axis)')
sleep(1)
print(f'New output dimensions: {mapWidth} x {mapHeight}')
sleep(1)
print('Building canvas')
bigMap = Image.new('RGB', (mapWidth, mapHeight), color=0)
if tooBig:
for i in piclist:
print(f'Applying image {piclist.index(i)+1} of {len(piclist)}',
end='\r')
nWidth = round(i.cSize[0]*ratio)
nHeight = round(i.cSize[1]*ratio)
resized = i.cim.resize((nWidth, nHeight))
resizedMask = i.getFMask().resize((nWidth, nHeight))
targ = (round((i.coords[0]-xCoordMin) * ratio),
round((i.coords[1]-yCoordMin) * ratio))
bigMap.paste(resized, targ, resizedMask)
i.closePIL()
else:
for i in piclist:
print(f'Applying image {piclist.index(i)+1} of {len(piclist)}',
end='\r')
targ = (round(i.coords[0] - xCoordMin),
round(i.coords[1] - yCoordMin))
bigMap.paste(i.cim, targ, i.getFMask())
i.closePIL()
# Main thread to save map
saver = Thread(
target=bigMap.save,
args=(fullPath,),
kwargs={'subsampling': 0, 'quality': 100})
def fEllipsis():
"""makes the ellipsis move while file is being saved, so user
doesn't get bored.
"""
print('\n', end='\r')
while saver.is_alive():
print('Drawing map ', end='\r')
sleep(0.3)
print('Drawing map. ', end='\r')
sleep(0.3)
print('Drawing map.. ', end='\r')
sleep(0.3)
print('Drawing map...', end='\r')
sleep(0.3)
ellipsis = Thread(target=fEllipsis)
saver.start()
ellipsis.start()
saver.join()
ellipsis.join()
print('\nMap Complete')
print(f'Path: {fullPath}')
return(fullPath)
| 2.375
| 2
|
harvester/server/expiring_queue.py
|
Nisthar/CaptchaHarvester
| 545
|
12784971
|
<reponame>Nisthar/CaptchaHarvester
from queue import Empty, Queue, SimpleQueue
from threading import Timer
from typing import Any
class ExpiringQueue(Queue):
def __init__(self, timeout: int, maxsize=0):
super().__init__(maxsize)
self.timeout = timeout
self.timers: 'SimpleQueue[Timer]' = SimpleQueue()
def put(self, item: Any) -> None:
thread = Timer(self.timeout, self.expire)
thread.start()
self.timers.put(thread)
super().put(item)
def get(self, block=True, timeout=None) -> Any:
thread = self.timers.get(block, timeout)
thread.cancel()
return super().get(block, timeout)
def expire(self):
self.get()
def to_list(self):
with self.mutex:
return list(self.queue)
if __name__ == '__main__':
import time
eq = ExpiringQueue(timeout=1)
print(eq)
eq.put(1)
time.sleep(.5)
eq.put(2)
print(eq.to_list())
time.sleep(.6)
print(eq.get_nowait())
print(eq.get_nowait())
| 2.84375
| 3
|
ExceptionHandling/getints.py
|
Vashesh08/Learn-Python-Programming-Masterclass
| 0
|
12784972
|
<gh_stars>0
import sys
def get_int(prompt):
while True:
try:
number = int(input(prompt))
return number
except EOFError:
sys.exit(1)
except: # Should be except ValueError:
print("Invalid Number Entered. Please Try Again:")
finally:
print("The finally clause always execute")
def divide_numbers(a, b):
# while True:
try:
print(a/b)
except ZeroDivisionError:
print("Cannot Divide by Zero.")
sys.exit(2)
# b = get_int("Please Enter Valid Second Number:")
else:
print("Division performed successfully")
first_number = get_int("Please Enter First Number:")
second_number = get_int("Please Enter Second Number:")
divide_numbers(first_number, second_number)
| 3.6875
| 4
|
wikipediabase/resolvers/term.py
|
fakedrake/WikipediaBase
| 1
|
12784973
|
<reponame>fakedrake/WikipediaBase
# -*- coding: utf-8 -*-
import re
from wikipediabase.provider import provide
from wikipediabase.resolvers.base import BaseResolver
from wikipediabase.lispify import lispify
from wikipediabase.util import get_infoboxes, get_article, totext, markup_unlink
class TermResolver(BaseResolver):
"""
A resolver that can resolve a fixed set of simple attributes.
"""
priority = 11
# TODO: add provided methods for SHORT-ARTICLE
# as described in the WikipediaBase InfoLab wiki page
def _should_resolve(self, cls):
return cls == 'wikibase-term'
@provide(name="coordinates")
def coordinates(self, article, _):
for ibox in get_infoboxes(article):
src = ibox.html_source()
if src is None:
return None
xpath = ".//span[@id='coordinates']"
lat = src.find(xpath + "//span[@class='latitude']")
lon = src.find(xpath + "//span[@class='longitude']")
if lat is None or lon is None:
return None
nlat = self._dton(totext(lat))
nlon = self._dton(totext(lon))
return lispify([nlat, nlon], typecode='coordinates')
@provide(name="image-data")
def image(self, article, attribute):
# Make sure we are not getting back a LispType.
infoboxes = get_infoboxes(article)
imgs = [ibx.get('image') for ibx in infoboxes]
if not imgs:
return None
img = imgs[0]
fnam = img.replace(" ", "_")
if "File:" in img:
fnam = fnam.split("File:")[1]
# TODO : is this a temporary fix? investigate what this annotation means
# see '<NAME>' for an example
if "{{!}}border" in img:
fnam = fnam.split("{{!}}border")[0]
caps = [ibx.get('caption') for ibx in infoboxes]
caps = filter(lambda x: x, caps) # remove None values
return lispify([0, fnam] + ([markup_unlink(caps[0])] if caps else []))
@provide(name='number')
def number(self, article, _):
"""
True if it is plural.
"""
# First paragraph refers more often to the symbol itself
# rather than things related to it.
txt = get_article(article).first_paragraph()
nay = sum(map(txt.count, [' is ', ' was ', ' has ']))
yay = sum(map(txt.count, [' are ', ' were ', ' have ']))
# inequality because there are many more nays
return lispify(yay > nay, typecode='calculated')
@provide(name='proper')
def proper(self, article, _):
"""
Get a quick boolean answer based on the symbol text and the
article text.
"""
# Blindly copied by the ruby version
a = re.sub(r"\s*\(.*\)\s*", "", article.replace("_", " "))
txt = totext(get_article(article).html_source())
ret = (txt.count(a.lower()) - txt.count(". " + a.lower()) <
txt.count(a))
return lispify(ret, typecode='calculated')
@provide(name='short-article')
def short_article(self, symbol, _):
"""
The first paragraph of the article, or if the first paragraph is
shorter than 350 characters, then returns the first paragraphs such
that the sum of the rendered characters is at least 350.
"""
# TODO: check if the first paragraph is shorter than 350 characters
first_paragraph = get_article(symbol).first_paragraph(keep_html=True)
return lispify(first_paragraph, typecode='html')
@provide(name='url')
def url(self, article, _):
"""
Note that this url is the wikipedia.org url. NOT the place where
we got the page.
"""
# Will also teake care of redirections.
article = get_article(article)
url = article.url()
return lispify(url, typecode='url')
@provide(name="word-count")
def word_count(self, article, attribute):
self.log().info("Trying 'word-count' tag from static resolver.")
self._tag = "html"
return len(self._words(self.fetcher.html_source(article)))
@staticmethod
def _dton(s):
"""
degrees minutes, seconds to float or int.
"""
ls = re.split(ur'(?:°|′|″)', s.strip())
sig = -1 if ls.pop() in ['S', 'W'] else 1
ret = int(ls.pop(0))
if len(ls):
m = float(ls.pop(0))
ret += m / 60
if len(ls):
s = float(ls.pop(0))
ret += s / 3600
return sig * (ret if isinstance(ret, int) else round(ret, 4))
def _words(self, article):
return re.findall('\w+', article.lower())
| 2.609375
| 3
|
examples/TushareDataService/test3.py
|
gongkai90000/2010test
| 0
|
12784974
|
<reponame>gongkai90000/2010test
# encoding: UTF-8
import requests
import sys
import csv
import csv
import os
'''
future_code = 'M0'
url_str = ('http://stock2.finance.sina.com.cn/futures/api/json.php/IndexService.getInnerFuturesMiniKLine15m?symbol=' + future_code)
r = requests.get(url_str)
#http://stock2.finance.sina.com.cn/futures/api/json.php/IndexService.getInnerFuturesMiniKLine15m?symbol=M0
r_json = r.json()
r_lists = list(r_json)
#print('future_code,date,open,high,low,close,vol')
headers=['id','username','password','age','country']
rows=[(1001,'qiye','qiye_pass',20,'china'),(1002,'mary','mary_pass',23,'usa')]
f=open("csvfile.csv",'a+')
wf =csv.writer(f)
#wf.writerow(headers)
wf.writerows(r_lists)
f.close()
'''
with open("csvfile.csv") as cf:
lines = csv.reader(cf)
for line in lines:
print(line)
gg=os.path.exists("csvfile.csv")
print gg
ty=[1,2,3,4,5,6,7]
print ty
del(ty[0])
print ty
#http://stock2.finance.sina.com.cn/futures/api/json.php/IndexService.getInnerFuturesMiniKLine5m?symbol=M0
| 2.921875
| 3
|
src/metrics.py
|
anastasiia-kornilova/mom-tools
| 8
|
12784975
|
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import open3d as o3d
from random_geometry_points.plane import Plane
def mean_map_entropy(pc_map, map_tips=None, KNN_RAD=1):
MIN_KNN = 5
map_tree = o3d.geometry.KDTreeFlann(pc_map)
points = np.asarray(pc_map.points)
metric = []
for i in range(points.shape[0]):
point = points[i]
[k, idx, _] = map_tree.search_radius_vector_3d(point, KNN_RAD)
if len(idx) > MIN_KNN:
cov = np.cov(points[idx].T)
det = np.linalg.det(2 * np.pi * np.e * cov)
if det > 0:
metric.append(0.5 * np.log(det))
return 0 if len(metric) == 0 else np.mean(metric)
def mean_plane_variance(pc_map, map_tips=None, KNN_RAD=1):
MIN_KNN = 5
map_tree = o3d.geometry.KDTreeFlann(pc_map)
points = np.asarray(pc_map.points)
metric = []
for i in range(points.shape[0]):
point = points[i]
[k, idx, _] = map_tree.search_radius_vector_3d(point, KNN_RAD)
if len(idx) > MIN_KNN:
cov = np.cov(points[idx].T)
eigenvalues = np.linalg.eig(cov)[0]
metric.append(min(eigenvalues))
return 0 if len(metric) == 0 else np.mean(metric)
def orth_mme(pc_map, map_tips, knn_rad=0.5):
map_tree = o3d.geometry.KDTreeFlann(pc_map)
points = np.asarray(pc_map.points)
orth_axes_stats = []
orth_list = map_tips['orth_list']
for k, chosen_points in enumerate(orth_list):
metric = []
plane_error = []
for i in range(chosen_points.shape[0]):
point = chosen_points[i]
[_, idx, _] = map_tree.search_radius_vector_3d(point, knn_rad)
if len(idx) > 5:
metric.append(mme(points[idx]))
avg_metric = np.mean(metric)
orth_axes_stats.append(avg_metric)
return np.sum(orth_axes_stats)
def orth_mpv(pc_map, map_tips, knn_rad=1):
map_tree = o3d.geometry.KDTreeFlann(pc_map)
points = np.asarray(pc_map.points)
orth_axes_stats = []
orth_list = map_tips['orth_list']
for k, chosen_points in enumerate(orth_list):
metric = []
plane_error = []
for i in range(chosen_points.shape[0]):
point = chosen_points[i]
[_, idx, _] = map_tree.search_radius_vector_3d(point, knn_rad)
if len(idx) > 5:
metric.append(mpv(points[idx]))
avg_metric = np.median(metric)
orth_axes_stats.append(avg_metric)
return np.sum(orth_axes_stats)
def mme(points):
cov = np.cov(points.T)
det = np.linalg.det(2 * np.pi * np.e * cov)
return 0.5 * np.log(det) if det > 0 else -math.inf
def mpv(points):
cov = np.cov(points.T)
eigenvalues = np.linalg.eig(cov)[0]
return min(eigenvalues)
def rpe(T_gt, T_est):
seq_len = len(T_gt)
err = 0
for i in range(seq_len):
for j in range(seq_len):
d_gt = T_gt[i] @ np.linalg.inv(T_gt[j])
d_est = T_est[i] @ np.linalg.inv(T_est[j])
dt = d_est[:3, 3] - d_gt[:3, 3]
err += np.linalg.norm(dt) ** 2
return err
| 1.726563
| 2
|
modules_tf/util.py
|
kibernetika-ai/first-order-model
| 0
|
12784976
|
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.python.ops.gen_image_ops import resize_nearest_neighbor
@tf.function
def kp2gaussian(kp_value, spatial_size, kp_variance):
"""
Transform a keypoint into gaussian like representation
"""
mean = kp_value # B, 10, 2
coordinate_grid = make_coordinate_grid(spatial_size, mean.dtype) # 64, 64, 2
grid_shape = tf.shape(coordinate_grid)
coordinate_grid = tf.reshape(
coordinate_grid,
[1, grid_shape[0], grid_shape[1], 1, 2]
) # 1, 64, 64, 1, 2
# repeats = # B, 1, 1, 10, 1
coordinate_grid = tf.tile(coordinate_grid, [tf.shape(mean)[0], 1, 1, mean.shape[1], 1]) # B, 64, 64, 10, 2
# Preprocess kp shape
mean = tf.reshape(mean, [tf.shape(mean)[0], 1, 1, mean.shape[1], mean.shape[2]]) # B, 1, 1, 10, 2
mean_sub = (coordinate_grid - mean) # B, 64, 64, 10, 2
out = tf.exp(-0.5 * tf.reduce_sum(mean_sub ** 2, -1) / kp_variance) # B, 64, 64, 10
return out
def make_coordinate_grid(spatial_size, type):
"""
Create a meshgrid [-1,1] x [-1,1] of given spatial_size.
"""
h, w = spatial_size[0], spatial_size[1]
x = tf.range(w, dtype=type)
y = tf.range(h, dtype=type)
x = (2. * (x / (tf.cast(w, type) - 1.)) - 1.)
y = (2. * (y / (tf.cast(h, type) - 1.)) - 1.)
yy = tf.repeat(tf.reshape(y, [-1, 1]), w, 1)
xx = tf.repeat(tf.reshape(x, [1, -1]), h, 0)
# yy = y.view(-1, 1).repeat(1, w)
# xx = x.view(1, -1).repeat(h, 1)
meshed = tf.concat([tf.expand_dims(xx, 2), tf.expand_dims(yy, 2)], 2)
return meshed
class ResBlock2d(layers.Layer):
"""
Res block, preserve spatial resolution.
"""
def __init__(self, in_features, kernel_size, padding):
super(ResBlock2d, self).__init__()
self.conv1 = layers.Conv2D(in_features, kernel_size=kernel_size, padding='same')
self.conv2 = layers.Conv2D(in_features, kernel_size=kernel_size, padding='same')
self.norm1 = layers.BatchNormalization()
self.norm2 = layers.BatchNormalization()
def call(self, x, **kwargs):
out = self.norm1(x)
out = tf.keras.activations.relu(out)
out = self.conv1(out)
out = self.norm2(out)
out = tf.keras.activations.relu(out)
out = self.conv2(out)
out += x
return out
class UpBlock2d(layers.Layer):
"""
Upsampling block for use in decoder.
"""
def __init__(self, out_features, kernel_size=(3, 3), groups=1):
super(UpBlock2d, self).__init__()
self.conv = layers.Conv2D(out_features, kernel_size=kernel_size, padding='same')
self.norm = layers.BatchNormalization()
self.up = layers.UpSampling2D()
def call(self, x, **kwargs):
out = self.up(x)
out = self.conv(out)
out = self.norm(out)
out = tf.keras.activations.relu(out)
return out
class DownBlock2d(layers.Layer):
"""
Downsampling block for use in encoder.
"""
def __init__(self, out_features, kernel_size=3):
super(DownBlock2d, self).__init__()
self.conv = layers.Conv2D(out_features, kernel_size=kernel_size, padding='same')
self.norm = layers.BatchNormalization()
self.pool = layers.AvgPool2D(pool_size=(2, 2))
def call(self, x, **kwargs):
out = self.conv(x)
out = self.norm(out)
out = tf.keras.activations.relu(out)
out = self.pool(out)
return out
class SameBlock2d(layers.Layer):
"""
Simple block, preserve spatial resolution.
"""
def __init__(self, out_features, kernel_size=3):
super(SameBlock2d, self).__init__()
self.conv = layers.Conv2D(out_features, kernel_size=kernel_size, padding='same')
self.norm = layers.BatchNormalization()
def call(self, x, **kwargs):
out = self.conv(x)
out = self.norm(out)
out = tf.keras.activations.relu(out)
return out
class Encoder(layers.Layer):
"""
Hourglass Encoder
"""
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
super(Encoder, self).__init__()
self.num_blocks = num_blocks
# down_blocks = []
for i in range(num_blocks):
block = DownBlock2d(
min(max_features, block_expansion * (2 ** (i + 1))),
kernel_size=3
)
setattr(self, f'down_block{i}', block)
# down_blocks.append(block)
# self.down_blocks = tf.keras.Sequential(down_blocks)
def call(self, x, **kwargs):
outs = [x]
for i in range(self.num_blocks):
res = getattr(self, f'down_block{i}')(outs[-1])
outs.append(res)
return outs
class Decoder(layers.Layer):
"""
Hourglass Decoder
"""
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
super(Decoder, self).__init__()
# up_blocks = []
self.num_blocks = num_blocks
for i in range(num_blocks)[::-1]:
out_filters = min(max_features, block_expansion * (2 ** i))
block = UpBlock2d(out_filters, kernel_size=3)
setattr(self, f'up_block{i}', block)
# up_blocks.append()
# self.up_blocks = nn.ModuleList(up_blocks)
self.out_filters = block_expansion + in_features
def call(self, x, **kwargs):
out = x.pop()
for i in range(self.num_blocks)[::-1]:
out = getattr(self, f'up_block{i}')(out)
skip = x.pop()
out = tf.concat([out, skip], axis=3)
return out
class Hourglass(layers.Layer):
"""
Hourglass architecture.
"""
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
super(Hourglass, self).__init__()
self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features)
self.decoder = Decoder(block_expansion, in_features, num_blocks, max_features)
self.out_filters = self.decoder.out_filters
def call(self, x, **kwargs):
return self.decoder(self.encoder(x))
class AntiAliasInterpolation2d(layers.Layer):
"""
Band-limited downsampling, for better preservation of the input signal.
"""
def __init__(self, channels, scale):
super(AntiAliasInterpolation2d, self).__init__()
sigma = (1 / scale - 1) / 2
kernel_size = 2 * round(sigma * 4) + 1
self.ka = kernel_size // 2
self.kb = self.ka - 1 if kernel_size % 2 == 0 else self.ka
kernel_size = [kernel_size, kernel_size]
sigma = [sigma, sigma]
# The gaussian kernel is the product of the
# gaussian function of each dimension.
kernel = 1
meshgrids = tf.meshgrid(
[tf.range(kernel_size[0], dtype=tf.float32)],
[tf.range(kernel_size[1], dtype=tf.float32)],
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= tf.exp(-(mgrid - mean) ** 2 / (2 * std ** 2))
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / tf.reduce_sum(kernel)
# Reshape to depthwise convolutional weight
kernel = tf.reshape(kernel, [1, 1, *kernel.shape])
kernel = tf.repeat(kernel, channels, 0)
kernel = tf.transpose(tf.constant(kernel, name='kernel'), [2, 3, 1, 0])
self.kernel = tf.Variable(tf.tile(kernel, [1, 1, 1, 1]), trainable=False)
self.groups = channels
self.scale = scale
self.kernels = tf.split(self.kernel, self.groups, axis=3)
def call(self, input, **kwargs):
if self.scale == 1.0:
return input
padded = tf.keras.backend.spatial_2d_padding(input, ((self.ka, self.kb), (self.ka, self.kb)))
# split & concat - to work on CPU
splitted = tf.split(padded, 3, axis=3)
parts = []
for i in range(3):
parts.append(tf.nn.conv2d(splitted[i], self.kernels[i], strides=1, padding='VALID'))
out = tf.concat([*parts], axis=3)
# out = tf.nn.conv2d(padded, self.kernel, strides=1, padding='VALID')
size = (tf.cast(out.shape[1] * self.scale, tf.int32), tf.cast(out.shape[2] * self.scale, tf.int32))
# out = tf.image.resize(out, size, method=tf.image.ResizeMethod.BILINEAR)
out = resize_nearest_neighbor(out, size)
return out
| 2.71875
| 3
|
ut.py
|
michaelpdu/adversary_ml
| 1
|
12784977
|
import os, sys
import unittest
import tlsh
from logging import *
sys.path.append(os.path.join(os.path.dirname(__file__),'UT'))
# from ut_trendx_predictor import TrendXPredictorTestCase
from ut_dna_manager import DNAManagerTestCase
from ut_housecallx_report import HouseCallXReportTestCase
from ut_trendx_wrapper import TrendXWrapperTestCase
from ut_pe_generator import PEGeneratorTestCase
from ut_workflow import AdversaryWorkflowTestCase
from ut_tlsh import TLSHTestCase
from ut_adversary_tlsh import TLSHAdversaryTestCase
def suite():
suite = unittest.TestSuite()
# TrendXPredictor Test Cases
# suite.addTest(TrendXPredictorTestCase("test_script_adversary"))
# suite.addTest(TrendXPredictorTestCase("test_pe_adversary"))
# # DNAManager Test Cases
# suite.addTest(DNAManagerTestCase("test_load_dna"))
# suite.addTest(DNAManagerTestCase("test_load_dna_random"))
# suite.addTest(DNAManagerTestCase("test_generate_random_indexes"))
# # HouseCallXReport Test Cases
# suite.addTest(HouseCallXReportTestCase("test_get_scores"))
# # TrendX Wrapper Test Cases
# if sys.version_info.major >= 3:
# suite.addTest(TrendXWrapperTestCase("test_scan_pe_file"))
# suite.addTest(TrendXWrapperTestCase("test_scan_pe_dir"))
# suite.addTest(TrendXWrapperTestCase("test_scan_pe_list"))
# else:
# suite.addTest(TrendXWrapperTestCase("test_scan_script_file"))
# suite.addTest(TrendXWrapperTestCase("test_scan_script_dir"))
# suite.addTest(TrendXWrapperTestCase("test_scan_script_list"))
# # PEGenerator Test Cases
# if sys.version_info.major >= 3:
# suite.addTest(PEGeneratorTestCase("test_generate"))
# # suite.addTest(PEGeneratorRandomTestCase("test_generate_imports"))
# else:
# pass
# AdversaryWorkflow Test Case
if sys.version_info.major >= 3:
# suite.addTest(AdversaryWorkflowTestCase("test_process_trendx_pe_bruteforce"))
# suite.addTest(AdversaryWorkflowTestCase("test_process_trendx_script_bruteforce"))
# suite.addTest(AdversaryWorkflowTestCase("test_process_trendx_pe_ga"))
# suite.addTest(AdversaryWorkflowTestCase("test_process_trendx_script_ga"))
# suite.addTest(AdversaryWorkflowTestCase("test_process_tlsh_pe_bruteforce"))
# suite.addTest(AdversaryWorkflowTestCase("test_process_tlsh_script_bruteforce"))
# suite.addTest(AdversaryWorkflowTestCase("test_process_tlsh_pe_ga"))
# suite.addTest(AdversaryWorkflowTestCase("test_process_tlsh_script_ga"))
suite.addTest(AdversaryWorkflowTestCase("test_start"))
# suite.addTest(AdversaryWorkflowTestCase("test_attack"))
else:
pass
# # TLSH Test Case
# suite.addTest(TLSHTestCase("test_get_tlsh"))
# suite.addTest(TLSHTestCase("test_gen_csv_for_file"))
# suite.addTest(TLSHTestCase("test_gen_csv_for_dir"))
# suite.addTest(TLSHTestCase("test_gen_and_scan_csv"))
# suite.addTest(TLSHTestCase("test_tlsh_wrapper_scan"))
# # TLSHGAAdversary Test Case
# suite.addTest(TLSHAdversaryTestCase("test_process_tlsh_pe_ga"))
return suite
if __name__ == "__main__":
basicConfig(filename='adversary_ml_ut_{}.log'.format(os.getpid()), format='[%(asctime)s][%(process)d.%(thread)d][%(levelname)s] - %(message)s', level=DEBUG)
unittest.main(defaultTest = 'suite')
| 1.945313
| 2
|
service/Speech.py
|
sola1993/inmoov
| 1
|
12784978
|
<filename>service/Speech.py
from time import sleep
from org.myrobotlab.service import Speech
from org.myrobotlab.service import Runtime
# sayThings.py
# example script for MRL showing various methods
# of the Speech Service
# http://myrobotlab.org/doc/org/myrobotlab/service/Speech.html
# The preferred method for creating services is
# through the Runtime. This will allow
# the script to be rerun without creating a new
# Service each time. The initial delay from Service
# creation and caching voice files can be large, however
# after creation and caching methods should return
# immediately
# Create a running instance of the Speech Service.
# Name it "speech".
speech = Runtime.create("speech","Speech")
speech.startService()
# Speak with initial defaults - Google en
speech.speak("hello brave new world")
# Google must have network connectivity
# the back-end will cache a sound file
# once it is pulled from Goole. So the
# first time it is slow but subsequent times its very
# quick and can be run without network connectivity.
speech.setBackendType("GOOGLE")
speech.setLanguage("en")
speech.speak("Hello World From Google.")
speech.setLanguage("pt") # Google supports some language codes
speech.speak("Hello World From Google.")
speech.setLanguage("de")
speech.speak("Hello World From Google.")
speech.setLanguage("ja")
speech.speak("Hello World From Google.")
speech.setLanguage("da")
speech.speak("Hello World From Google.")
speech.setLanguage("ro")
speech.speak("Hello World From Google.")
sleep(3)
# Switching to FreeTTS <<url>>
# http://freetts.sourceforge.net/docs/index.php
speech.setBackendType("FREETTS")
speech.speak("Hello World From Free TTS.")
| 3.46875
| 3
|
lib/awx_cli/commands/BaseCommand.py
|
bennojoy/awx-cli
| 2
|
12784979
|
# Copyright 2013, AnsibleWorks Inc.
# <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import exceptions
class BaseCommand(object):
def __init__(self, toplevel):
self.toplevel = toplevel
self.name = "BASE-COMMAND"
def run(self, args):
raise exceptions.NotImplementedError()
| 1.921875
| 2
|
hard-gists/2038329/snippet.py
|
jjhenkel/dockerizeme
| 21
|
12784980
|
<reponame>jjhenkel/dockerizeme
from bigfloat import *
N = 5
setcontext(precision(10**N))
def f(x):
return x + sin(x)
def g(n):
x = 3
for i in xrange(n):
x = f(x)
return x
import sympy
goodpi = str(sympy.pi.evalf(10**N))
def goodtill(n):
cand = g(n)
for i,(a,b) in enumerate(zip(str(cand),goodpi)):
if a != b:
return i-1
inds = range(20)
print map(goodtill,inds)
| 2.609375
| 3
|
server.py
|
MeiRose/Datamanager
| 0
|
12784981
|
from flask import Flask, send_from_directory, jsonify
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
CLIENT_DIR = os.path.join(BASE_DIR, "client", "dist")
STATIC_DIR = os.path.join(BASE_DIR, "static")
app = Flask(__name__)
app.secret_key = "Developer Key"
@app.route('/')
def index():
return send_from_directory(CLIENT_DIR, "index.html")
@app.route('/<path:filename>')
def client_app(filename):
return send_from_directory(CLIENT_DIR, filename)
@app.route('/static/<path:filename>')
def static_files(filename):
return send_from_directory(STATIC_DIR, filename)
@app.route('/api/items')
def data():
reply = {
"results": [
"Item 1",
"Item 2",
]
}
return jsonify(result=reply)
| 2.515625
| 3
|
switch/controller/main_controller.py
|
fnrg-nfv/P4SFC-public
| 2
|
12784982
|
from flask import Flask, request
import argparse
import time
import threading
from p4_controller import P4Controller
from state_allocator import EntryManager
app = Flask(__name__)
p4_controller = None
entry_manager = None
@app.route('/test')
def test():
return "Hello from switch control plane controller~!"
@app.route('/config_pipeline', methods=["POST"])
def config_pipeline():
data = request.get_json()
operation_type = data.get("type")
if operation_type == "insert":
entries = []
entry_infos = data.get("entry_infos")
for entry_info in entry_infos:
entries.append(p4_controller.build_table_entry(entry_info))
p4_controller.insert_table_entries(entries)
return "OK"
@app.route("/config_route", methods=["POST"])
def config_route():
data = request.get_json()
chain_id = data.get("chain_id")
chain_length = data.get("chain_length")
output_port = data.get("output_port")
operation_type = data.get("type")
if operation_type == "insert":
p4_controller.insert_route(chain_id, chain_length, output_port)
return str(int(time.time() * 1000))
@app.route("/new_nf", methods=["POST"])
def new_nf():
data = request.get_json()
ip_addr = data.get("ip_addr")
port = data.get("port")
entry_manager.add_grpc_client("%s:%s" % (ip_addr, port))
def main(p4info_file_path, tofino_config_fpath, server_port, max_rule,
polling_time, enable_topk, debug):
global p4_controller
p4_controller = P4Controller(p4info_file_path, tofino_config_fpath, debug)
if max_rule > 0 or polling_time > 0:
global entry_manager
entry_manager = EntryManager(
p4_controller, [],
max_rule, polling_time, enable_topk)
entry_manager.start()
app.run(host="0.0.0.0", port=server_port)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='P4SFC switch controller')
parser.add_argument('--p4info',
help='p4info proto in text format from p4c',
type=str,
action="store",
required=False,
default='../../build/p4info.pb.txt')
parser.add_argument('--tofino-config',
help='Tofino config file from p4c',
type=str,
action="store",
required=False,
default='../../build/device_config')
parser.add_argument('--server-port',
help='port for RESTful API',
type=str,
action="store",
required=False,
default=8090)
parser.add_argument('--max-rule',
help='The maximum number of rules a switch can hold',
type=int,
action="store",
required=False,
default=1000)
parser.add_argument(
'--polling-time',
help='The polling time (in seconds) for poll the entries',
type=float,
action="store",
required=False,
default=30)
parser.add_argument('--enable-topk',
dest='enable_topk',
action='store_true')
parser.add_argument('--disable-topk',
dest='enable_topk',
action='store_false')
parser.add_argument('--debug', dest='debug', action='store_true')
parser.set_defaults(enable_topk=True)
parser.set_defaults(debug=False)
args = parser.parse_args()
main(args.p4info, args.tofino_config, args.server_port, args.max_rule,
args.polling_time, args.enable_topk, args.debug)
| 2.359375
| 2
|
itemIndex.py
|
adamb70/CSGO-Market-Float-Finder
| 73
|
12784983
|
index = {
"ID0": "Vanilla",
"ID2": "Groundwater",
"ID3": "Candy Apple",
"ID5": "Forest DDPAT",
"ID6": "Arctic Camo",
"ID8": "Desert Storm",
"ID9": "Bengal Tiger",
"ID10": "Copperhead",
"ID11": "Skulls",
"ID12": "Crimson Web",
"ID13": "Blue Streak",
"ID14": "Red Laminate",
"ID15": "Gunsmoke",
"ID16": "Jungle Tiger",
"ID17": "Urban DDPAT",
"ID20": "Virus",
"ID21": "Granite Marbleized",
"ID22": "Contrast Spray",
"ID25": "Forest Leaves",
"ID26": "Lichen Dashed",
"ID27": "Bone Mask",
"ID28": "Anodized Navy",
"ID30": "Snake Camo",
"ID32": "Silver",
"ID33": "Hot Rod",
"ID34": "Metallic DDPAT",
"ID36": "Ossified",
"ID37": "Blaze",
"ID38": "Fade",
"ID39": "Bulldozer",
"ID40": "Night",
"ID41": "Copper",
"ID42": "Blue Steel",
"ID43": "Stained",
"ID44": "Case Hardened",
"ID46": "Contractor",
"ID47": "Colony",
"ID48": "Dragon Tattoo",
"ID51": "Lightning Strike",
"ID59": "Slaughter",
"ID60": "Dark Water",
"ID61": "Hypnotic",
"ID62": "Bloomstick",
"ID67": "Cold Blooded",
"ID70": "Carbon Fiber",
"ID71": "Scorpion",
"ID72": "Safari Mesh",
"ID73": "Wings",
"ID74": "Polar Camo",
"ID75": "Blizzard Marbleized",
"ID76": "Winter Forest",
"ID77": "Boreal Forest",
"ID78": "Forest Night",
"ID83": "Orange DDPAT",
"ID84": "Pink DDPAT",
"ID90": "Mudder",
"ID92": "Cyanospatter",
"ID93": "Caramel",
"ID95": "Grassland",
"ID96": "Blue Spruce",
"ID98": "Ultraviolet",
"ID99": "Sand Dune",
"ID100": "Storm",
"ID101": "Tornado",
"ID102": "Whiteout",
"ID104": "Grassland Leaves",
"ID107": "Polar Mesh",
"ID110": "Condemned",
"ID111": "Glacier Mesh",
"ID116": "Sand Mesh",
"ID119": "Sage Spray",
"ID122": "Jungle Spray",
"ID124": "Sand Spray",
"ID135": "Urban Perforated",
"ID136": "Waves Perforated",
"ID141": "Orange Peel",
"ID143": "Urban Masked",
"ID147": "Jungle Dashed",
"ID148": "Sand Dashed",
"ID149": "Urban Dashed",
"ID151": "Jungle",
"ID153": "Demolition",
"ID154": "Afterimage",
"ID155": "Bullet Rain",
"ID156": "Death by Kitty",
"ID157": "Palm",
"ID158": "Walnut",
"ID159": "Brass",
"ID162": "Splash",
"ID164": "Modern Hunter",
"ID165": "Splash Jam",
"ID166": "Blaze Orange",
"ID167": "Radiation Hazard",
"ID168": "Nuclear Threat",
"ID169": "Fallout Warning",
"ID170": "Predator",
"ID171": "Irradiated Alert",
"ID172": "Black Laminate",
"ID174": "BOOM",
"ID175": "Scorched",
"ID176": "Faded Zebra",
"ID177": "Memento",
"ID178": "Doomkitty",
"ID179": "Nuclear Threat",
"ID180": "Fire Serpent",
"ID181": "Corticera",
"ID182": "Emerald Dragon",
"ID183": "Overgrowth",
"ID184": "Corticera",
"ID185": "Golden Koi",
"ID186": "Wave Spray",
"ID187": "Zirka",
"ID188": "Graven",
"ID189": "Bright Water",
"ID190": "Black Limba",
"ID191": "Tempest",
"ID192": "Shattered",
"ID193": "Bone Pile",
"ID194": "Spitfire",
"ID195": "Demeter",
"ID196": "Emerald",
"ID197": "Anodized Navy",
"ID198": "Hazard",
"ID199": "Dry Season",
"ID200": "Mayan Dreams",
"ID201": "Palm",
"ID202": "Jungle DDPAT",
"ID203": "Rust Coat",
"ID204": "Mosaico",
"ID205": "Jungle",
"ID206": "Tornado",
"ID207": "Facets",
"ID208": "Sand Dune",
"ID209": "Groundwater",
"ID210": "Anodized Gunmetal",
"ID211": "Ocean Foam",
"ID212": "Graphite",
"ID213": "Ocean Foam",
"ID214": "Graphite",
"ID215": "X-Ray",
"ID216": "Blue Titanium",
"ID217": "Blood Tiger",
"ID218": "Hexane",
"ID219": "Hive",
"ID220": "Hemoglobin",
"ID221": "Serum",
"ID222": "Blood in the Water",
"ID223": "Nightshade",
"ID224": "Water Sigil",
"ID225": "Ghost Camo",
"ID226": "Blue Laminate",
"ID227": "Electric Hive",
"ID228": "Blind Spot",
"ID229": "Azure Zebra",
"ID230": "Steel Disruption",
"ID231": "Cobalt Disruption",
"ID232": "Crimson Web",
"ID233": "Tropical Storm",
"ID234": "Ash Wood",
"ID235": "VariCamo",
"ID236": "Night Ops",
"ID237": "Urban Rubble",
"ID238": "VariCamo Blue",
"ID240": "CaliCamo",
"ID241": "Hunting Blind",
"ID242": "Army Mesh",
"ID243": "Gator Mesh",
"ID244": "Teardown",
"ID245": "Army Recon",
"ID246": "Amber Fade",
"ID247": "Damascus Steel",
"ID248": "Red Quartz",
"ID249": "Cobalt Quartz",
"ID250": "Full Stop",
"ID251": "Pit Viper",
"ID252": "Silver Quartz",
"ID253": "Acid Fade",
"ID254": "Nitro",
"ID255": "Asiimov",
"ID256": "The Kraken",
"ID257": "Guardian",
"ID258": "Mehndi",
"ID259": "Redline",
"ID260": "Pulse",
"ID261": "Marina",
"ID262": "Rose Iron",
"ID263": "Rising Skull",
"ID264": "Sandstorm",
"ID265": "Kami",
"ID266": "Magma",
"ID267": "Cobalt Halftone",
"ID268": "Tread Plate",
"ID269": "The Fuschia Is Now",
"ID270": "Victoria",
"ID271": "Undertow",
"ID272": "Titanium Bit",
"ID273": "Heirloom",
"ID274": "Copper Galaxy",
"ID275": "Red FragCam",
"ID276": "Panther",
"ID277": "Stainless",
"ID278": "Blue Fissure",
"ID279": "Asiimov",
"ID280": "Chameleon",
"ID281": "Corporal",
"ID282": "Redline",
"ID283": "Trigon",
"ID284": "Heat",
"ID285": "Terrain",
"ID286": "Antique",
"ID287": "Pulse",
"ID288": "Sergeant",
"ID289": "Sandstorm",
"ID290": "Guardian",
"ID291": "Heaven Guard",
"ID293": "Death Rattle",
"ID294": "Green Apple",
"ID295": "Franklin",
"ID296": "Meteorite",
"ID297": "Tuxedo",
"ID298": "<NAME>",
"ID299": "Caged Steel",
"ID300": "<NAME>",
"ID301": "Atomic Alloy",
"ID302": "Vulcan",
"ID303": "Isaac",
"ID304": "Slashed",
"ID305": "Torque",
"ID306": "Antique",
"ID307": "Retribution",
"ID308": "Kami",
"ID309": "Howl",
"ID310": "Curse",
"ID311": "Desert Warfare",
"ID312": "Cyrex",
"ID313": "Orion",
"ID314": "Heaven Guard",
"ID315": "Poison Dart",
"ID316": "Jaguar",
"ID317": "Bratatat",
"ID318": "Road Rash",
"ID319": "Detour",
"ID320": "Red Python",
"ID321": "Master Piece",
"ID322": "Nitro",
"ID323": "Rust Coat",
"ID325": "Chalice",
"ID326": "Knight",
"ID327": "Chainmail",
"ID328": "Hand Cannon",
"ID329": "Dark Age",
"ID330": "Briar",
"ID332": "Royal Blue",
"ID333": "Indigo",
"ID334": "Twist",
"ID335": "Module",
"ID336": "Desert-Strike",
"ID337": "Tatter",
"ID338": "Pulse",
"ID339": "Caiman",
"ID340": "Jet Set",
"ID341": "First Class",
"ID342": "Leather",
"ID343": "Commuter",
"ID344": "Dragon Lore",
"ID345": "First Class",
"ID346": "Coach Class",
"ID347": "Pilot",
"ID348": "Red Leather",
"ID349": "Osiris",
"ID350": "Tigris",
"ID351": "Conspiracy",
"ID352": "Fowl Play",
"ID353": "Water Elemental",
"ID354": "Urban Hazard",
"ID355": "Desert-Strike",
"ID356": "Koi",
"ID357": "Ivory",
"ID358": "Supernova",
"ID359": "Asiimov",
"ID360": "Cyrex",
"ID361": "Abyss",
"ID362": "Labyrinth",
"ID363": "Traveler",
"ID364": "Business Class",
"ID365": "Olive Plaid",
"ID366": "Green Plaid",
"ID367": "Reactor",
"ID368": "Setting Sun",
"ID369": "Nuclear Waste",
"ID370": "Bone Machine",
"ID371": "Styx",
"ID372": "Nuclear Garden",
"ID373": "Contamination",
"ID374": "Toxic",
"ID375": "Radiation Hazard",
"ID376": "Chemical Green",
"ID377": "Hot Shot",
"ID378": "Fallout Warning",
"ID379": "Cerberus",
"ID380": "Wasteland Rebel",
"ID381": "Grinder",
"ID382": "Murky",
"ID383": "Basilisk",
"ID384": "Griffin",
"ID385": "Firestarter",
"ID386": "Dart",
"ID387": "Urban Hazard",
"ID388": "Cartel",
"ID389": "Fire Elemental",
"ID390": "Highwayman",
"ID391": "Cardiac",
"ID392": "Delusion",
"ID393": "Tranquility",
"ID394": "Cartel",
"ID395": "Man-o'-war",
"ID396": "Urban Shock",
"ID397": "Naga",
"ID398": "Chatterbox",
"ID399": "Catacombs",
"ID400": "Dragon King",
"ID401": "System Lock",
"ID402": "Malachite",
"ID403": "Deadly Poison",
"ID404": "Muertos",
"ID405": "Serenity",
"ID406": "Grotto",
"ID407": "Quicksilver",
"ID409": "Tiger Tooth",
"ID410": "<NAME>",
"ID411": "D<NAME>el",
"ID413": "<NAME>",
"ID414": "<NAME>",
"ID415": "Doppler (Ruby)",
"ID416": "Doppler (Sapphire)",
"ID417": "Doppler (Blackpearl)",
"ID418": "Doppler (Phase 1)",
"ID419": "Doppler (Phase 2)",
"ID420": "Doppler (Phase 3)",
"ID421": "Doppler (Phase 4)",
"ID422": "Elite Build",
"ID423": "Armor Core",
"ID424": "Worm God",
"ID425": "Bronze Deco",
"ID426": "Valence",
"ID427": "Monkey Business",
"ID428": "Eco",
"ID429": "Djinn",
"ID430": "Hyper Beast",
"ID431": "Heat",
"ID432": "Man-o'-war",
"ID433": "Neon Rider",
"ID434": "Origami",
"ID435": "Pole Position",
"ID436": "Grand Prix",
"ID437": "Twilight Galaxy",
"ID438": "Chronos",
"ID439": "Hades",
"ID440": "Icarus Fell",
"ID441": "Minotaur's Labyrinth",
"ID442": "Asterion",
"ID443": "Pathfinder",
"ID444": "Daedalus",
"ID445": "Hot Rod",
"ID446": "Medusa",
"ID447": "Duelist",
"ID448": "Pandora's Box",
"ID449": "Poseidon",
"ID450": "Moon in Libra",
"ID451": "Sun in Leo",
"ID452": "Shipping Forecast",
"ID453": "Emerald",
"ID454": "Para Green",
"ID455": "Akihabara Accept",
"ID456": "Hydroponic",
"ID457": "Bamboo Print",
"ID458": "Bamboo Shadow",
"ID459": "Bamboo Forest",
"ID460": "Aqua Terrace",
"ID462": "Counter Terrace",
"ID463": "Terrace",
"ID464": "Neon Kimono",
"ID465": "Orange Kimono",
"ID466": "<NAME>",
"ID467": "<NAME>",
"ID468": "Midnight Storm",
"ID469": "Sunset Storm 2",
"ID470": "Sunset Storm",
"ID471": "Daybreak",
"ID472": "Impact Drill",
"ID473": "Seabird",
"ID474": "Aquamarine Revenge",
"ID475": "Hyper Beast",
"ID476": "Yellow Jacket",
"ID477": "Neural Net",
"ID478": "Rocket Pop",
"ID479": "Bunsen Burner",
"ID480": "Evil Daimyo",
"ID481": "Nemesis",
"ID482": "Ruby Poison Dart",
"ID483": "Loudmouth",
"ID484": "Ranger",
"ID485": "Handgun",
"ID486": "Elite Build",
"ID487": "Cyrex",
"ID488": "Riot",
"ID489": "Torque",
"ID490": "Frontside Misty",
"ID491": "Dualing Dragons",
"ID492": "Survivor Z",
"ID493": "Flux",
"ID494": "Stone Cold",
"ID495": "Wraiths",
"ID496": "Nebula Crusader",
"ID497": "Golden Coil",
"ID498": "Rangeen",
"ID499": "Cobalt Core",
"ID500": "Special Delivery",
"ID501": "Wingshot",
"ID502": "Green Marine",
"ID503": "Big Iron",
"ID504": "Kill Confirmed",
"ID505": "Scumbria",
"ID506": "Point Disarray",
"ID507": "Ricochet",
"ID508": "Fuel Rod",
"ID509": "Corinthian",
"ID510": "Retrobution",
"ID511": "The Executioner",
"ID512": "Royal Paladin",
"ID514": "Power Loader",
"ID515": "Imperial",
"ID516": "Shapewood",
"ID517": "Yorick",
"ID518": "Outbreak",
"ID519": "Tiger Moth",
"ID520": "Avalanche",
"ID521": "Teclu Burner",
"ID522": "Fade",
"ID523": "Amber Fade",
"ID524": "Fuel Injector",
"ID525": "Elite Build",
"ID526": "Photic Zone",
"ID527": "Kumicho Dragon",
"ID528": "Cartel",
"ID529": "Valence",
"ID530": "Triumvirate",
"ID532": "Royal Legion",
"ID533": "The Battlestar",
"ID534": "Lapis Gator",
"ID535": "Praetorian",
"ID536": "Impire",
"ID537": "Hyper Beast",
"ID538": "Necropos",
"ID539": "Jambiya",
"ID540": "Lead Conduit",
"ID541": "Fleet Flock",
"ID542": "Judgement of Anubis",
"ID543": "Red Astor",
"ID544": "Ventilators",
"ID545": "Orange Crash",
"ID546": "Firefight",
"ID547": "Spectre",
"ID548": "Chantico's Fire",
"ID549": "Bioleak",
"ID550": "Oceanic",
"ID551": "Asiimov",
"ID552": "Fubar",
"ID553": "Atlas",
"ID554": "Ghost Crusader",
"ID555": "Re-Entry",
"ID556": "Primal Saber",
"ID557": "Black Tie",
"ID558": "Lore",
"ID559": "Lore",
"ID560": "Lore",
"ID561": "Lore",
"ID562": "Lore",
"ID563": "Black Laminate",
"ID564": "Black Laminate",
"ID565": "Black Laminate",
"ID566": "Black Laminate",
"ID567": "Black Laminate",
"ID568": "Gamma Doppler (Emerald Marble)",
"ID569": "Gamma Doppler (Phase 1)",
"ID570": "Gamma Doppler (Phase 2)",
"ID571": "Gamma Doppler (Phase 3)",
"ID572": "Gamma Doppler (Phase 4)",
"ID573": "Autotronic",
"ID574": "Autotronic",
"ID575": "Autotronic",
"ID576": "Autotronic",
"ID577": "Autotronic",
"ID578": "Bright Water",
"ID579": "Bright Water",
"ID580": "Freehand",
"ID581": "Freehand",
"ID582": "Freehand",
"ID583": "Aristocrat",
"ID584": "Phobos",
"ID585": "Violent Daimyo",
"ID586": "Wasteland Rebel",
"ID587": "Mecha Industries",
"ID588": "Desolate Space",
"ID589": "Carnivore",
"ID590": "Exo",
"ID591": "Imperial Dragon",
"ID592": "Iron Clad",
"ID593": "Chopper",
"ID594": "Harvester",
"ID595": "Reboot",
"ID596": "Limelight",
"ID597": "Bloodsport",
"ID598": "Aerial",
"ID599": "Ice Cap",
"ID600": "Neon Revolution",
"ID601": "Syd Mead",
"ID602": "Imprint",
"ID603": "Directive",
"ID604": "Roll Cage",
"ID605": "Scumbria",
"ID606": "Ventilator",
"ID607": "Weasel",
"ID608": "Petroglyph",
"ID609": "Airlock",
"ID610": "Dazzle",
"ID611": "Grim",
"ID612": "Powercore",
"ID613": "Triarch",
"ID614": "Fuel Injector",
"ID615": "Briefing",
"ID616": "Slipstream",
"ID617": "Doppler",
"ID618": "Doppler",
"ID619": "Doppler",
"ID620": "Ultraviolet",
"ID621": "Ultraviolet",
"ID622": "Polymer",
"ID623": "Ironwork",
"ID624": "Dragonfire",
"ID625": "Royal Consorts",
"ID626": "Mecha Industries",
"ID627": "Cirrus",
"ID628": "Stinger",
"ID629": "Black Sand",
"ID630": "Sand Scale",
"ID631": "Flashback",
"ID632": "Buzz Kill",
"ID633": "Sonar",
"ID634": "Gila",
"ID635": "Turf",
"ID636": "Shallow Grave",
"ID637": "Cyrex",
"ID638": "Wasteland Princess",
"ID639": "Bloodsport",
"ID640": "Fever Dream",
"ID641": "Jungle Slipstream",
"ID642": "Blueprint",
"ID643": "Xiangliu",
"ID644": "Decimator",
"ID645": "Oxide Blaze",
"ID646": "Capillary",
"ID647": "Crimson Tsunami",
"ID648": "Emerald Poison Dart",
"ID649": "Akoben",
"ID650": "Ripple",
"ID651": "Last Dive",
"ID652": "Scaffold",
"ID653": "Neo-Noir",
"ID654": "Seasons",
"ID655": "Zander",
"ID656": "Orbit Mk01",
"ID657": "Blueprint",
"ID658": "Cobra Strike",
"ID659": "Macabre",
"ID660": "Hyper Beast",
"ID661": "Sugar Rush",
"ID662": "Oni Taiji",
"ID663": "Briefing",
"ID664": "Hellfire",
"ID665": "Aloha",
"ID666": "Hard Water",
"ID667": "Woodsman",
"ID668": "Red Rock",
"ID669": "Death Grip",
"ID670": "Death's Head",
"ID671": "Cut Out",
"ID672": "Metal Flowers",
"ID673": "Morris",
"ID674": "Triqua",
"ID675": "The Empress",
"ID676": "High Roller",
"ID677": "Hunter",
"ID678": "See Ya Later",
"ID679": "Goo",
"ID680": "Off World",
"ID681": "Leaded Glass",
"ID682": "Oceanic",
"ID683": "Llama Cannon",
"ID684": "Cracked Opal",
"ID685": "Jungle Slipstream",
"ID686": "Phantom",
"ID687": "Tacticat",
"ID688": "Exposure",
"ID689": "Ziggy",
"ID690": "Stymphalian",
"ID691": "Mortis",
"ID692": "Night Riot",
"ID693": "Flame Test",
"ID694": "Moonrise",
"ID695": "Neo-Noir",
"ID696": "Bloodsport",
"ID697": "Black Sand",
"ID698": "Lionfish",
"ID699": "Wild Six",
"ID700": "Urban Hazard",
"ID701": "Grip",
"ID702": "Aloha",
"ID703": "SWAG-7",
"ID704": "Arctic Wolf",
"ID705": "Cortex",
"ID706": "Oxide Blaze",
"ID10006": "Charred",
"ID10007": "Snakebite",
"ID10008": "Bronzed",
"ID10009": "Leather",
"ID10010": "Spruce DDPAT",
"ID10013": "Lunar Weave",
"ID10015": "Convoy",
"ID10016": "Crimson Weave",
"ID10018": "Superconductor",
"ID10019": "Arid",
"ID10021": "Slaughter",
"ID10024": "Eclipse",
"ID10026": "Spearmint",
"ID10027": "Boom!",
"ID10028": "Cool Mint",
"ID10030": "Forest DDPAT",
"ID10033": "<NAME>",
"ID10034": "Emerald Web",
"ID10035": "Foundation",
"ID10036": "Badlands",
"ID10037": "Pandora's Box",
"ID10038": "Hedge Maze",
"ID10039": "Guerrilla",
"ID10040": "Diamondback",
"ID10041": "King Snake",
"ID10042": "Imperial Plaid",
"ID10043": "Overtake",
"ID10044": "Racing Green",
"ID10045": "Amphibious",
"ID10046": "Bronze Morph",
"ID10047": "Omega",
"ID10048": "Vice",
"ID10049": "POW!",
"ID10050": "Turtle",
"ID10051": "Transport",
"ID10052": "Polygon",
"ID10053": "Cobalt Skulls",
"ID10054": "Overprint",
"ID10055": "Duct Tape",
"ID10056": "Arboreal",
"ID10057": "Emerald",
"ID10058": "Mangrove",
"ID10059": "Rattler",
"ID10060": "Case Hardened",
"ID10061": "Crimson Web",
"ID10062": "Buckshot",
"ID10063": "Fade",
"ID10064": "Mogul"
}
| 1.34375
| 1
|
pyimq/bin/utils/create_photo_test_set.py
|
bradday4/PyImageQualityRanking
| 8
|
12784984
|
<reponame>bradday4/PyImageQualityRanking
"""
<NAME> - 2015 - <EMAIL>
A small utility that generates Gaussian blurred images from a
series of base images. This utility was use to create an autofocus function test
dataset.
"""
import os
import sys
from scipy import ndimage, misc
import numpy
def main():
# Check input parameters
if len(sys.argv) < 2 or not os.path.isdir(sys.argv[1]):
print("Please specify a path to a directory that contains the pictures")
sys.exit(1)
path = sys.argv[1]
# Create output directory
output_dir = os.path.join(path, "test_image_series")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Process every image in the input path
for image_name in os.listdir(path):
real_path = os.path.join(path, image_name)
if not real_path.endswith((".jpg", ".tif", ".tiff", ".png")):
continue
original = misc.imread(real_path, flatten=True)
path_parts = os.path.split(real_path)
extension = path_parts[1].split(".")[1]
base_name = path_parts[1].split(".")[0]
# Save original
output_path = os.path.join(output_dir, image_name)
misc.imsave(output_path, original)
# Blur with Gaussian filter, Sigma=1
file_name = base_name + "_gaussian_1." + extension
output_path = os.path.join(output_dir, file_name)
misc.imsave(output_path, ndimage.gaussian_filter(original, 1))
# Blur with Gaussian filter, Sigma=2
file_name = base_name + "_gaussian_2." + extension
output_path = os.path.join(output_dir, file_name)
misc.imsave(output_path, ndimage.gaussian_filter(original, 2))
if __name__ == "__main__":
main()
| 2.859375
| 3
|
common/rtemsext.py
|
cillianodonnell/rtems-docs
| 1
|
12784985
|
from docutils import nodes
import sphinx.domains.std
# Borrowed from: http://stackoverflow.com/questions/13848328/sphinx-references-to-other-sections-containing-section-number-and-section-title
class CustomStandardDomain(sphinx.domains.std.StandardDomain):
def __init__(self, env):
env.settings['footnote_references'] = 'superscript'
sphinx.domains.std.StandardDomain.__init__(self, env)
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
res = super(CustomStandardDomain, self).resolve_xref(env, fromdocname, builder, typ, target, node, contnode)
if res is None:
return res
if typ == 'ref' and not node['refexplicit']:
docname, labelid, sectname = self.data['labels'].get(target, ('','',''))
res['refdocname'] = docname
return res
def doctree_resolved(app, doctree, docname):
secnums = app.builder.env.toc_secnumbers
for node in doctree.traverse(nodes.reference):
if 'refdocname' in node:
refdocname = node['refdocname']
if refdocname in secnums:
secnum = secnums[refdocname]
emphnode = node.children[0]
textnode = emphnode.children[0]
toclist = app.builder.env.tocs[refdocname]
anchorname = None
for refnode in toclist.traverse(nodes.reference):
if refnode.astext() == textnode.astext():
anchorname = refnode['anchorname']
if anchorname is None:
continue
sec_number = secnum[anchorname]
chapter = sec_number[0]
section = None
if len(sec_number) > 1:
section = ".".join(map(str, sec_number[1:]))
if section:
node.replace(emphnode, nodes.Text("Chapter %s Section %s - %s" % (chapter, section, textnode)))
else:
node.replace(emphnode, nodes.Text("Chapter %s - %s" % (chapter, textnode)))
def setup(app):
app.override_domain(CustomStandardDomain)
app.connect('doctree-resolved', doctree_resolved)
return {'version': "1.0", 'parallel_read_safe': True}
| 2.234375
| 2
|
ctc_fast/swbd-utils/score_frag_utts.py
|
SrikarSiddarth/stanford-ctc
| 268
|
12784986
|
<filename>ctc_fast/swbd-utils/score_frag_utts.py<gh_stars>100-1000
'''
Look at error rates only scoring utterances that contain frags
'''
FRAG_FILE = '/deep/u/zxie/ctc_clm_transcripts/frags.txt'
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('hyp')
parser.add_argument('stm')
parser.add_argument('hyp_out')
parser.add_argument('stm_out')
args = parser.parse_args()
# Read in utt keys with frags
frag_utts = set()
with open(FRAG_FILE, 'r') as fin:
lines = fin.read().strip().splitlines()
for l in lines:
utt_key, frag_word = l.split(' ', 1)
frag_utts.add(utt_key)
# Read in hyps
with open(args.hyp, 'r') as fin:
lines = fin.read().splitlines()
# Write the filtered hyp file
frag_utt_count = 0
tot_utt_count = 0
fout = open(args.hyp_out, 'w')
for l in lines:
parts = l.split(' ', 1)
if len(parts) == 1:
assert False
utt_key, utt = parts
if utt_key not in frag_utts:
tot_utt_count += 1
continue
fout.write(l + '\n')
tot_utt_count += 1
frag_utt_count += 1
fout.close()
# Sanity check
print '%d/%d utts contain frags' % (frag_utt_count, tot_utt_count)
# Read in stm reference file
stm_frag_utts = 0
frag_utt_starts = set()
for ou in frag_utts:
ou_start = '-'.join(ou.split('-')[0:2])
frag_utt_starts.add(ou_start)
with open(args.stm, 'r') as fin:
lines = fin.read().strip().splitlines()
fout = open(args.stm_out, 'w')
for l in lines:
# Handle comments
if l.startswith(';;'):
fout.write(l+'\n')
continue
parts = l.split(' ', 6)
utt_key_part, channel, utt_key, t_start, t_end, metadata, utt = parts
stm_utt_key = '%s-%s_%06d' % (utt_key_part, channel.lower(), int(float(t_start) * 100))
print stm_utt_key
if stm_utt_key not in frag_utt_starts:
continue
fout.write(l + '\n')
stm_frag_utts += 1
fout.close()
# Sanity check
print '%d/%d stm utts contain frags' % (stm_frag_utts, tot_utt_count)
| 2.859375
| 3
|
leadgen.py
|
jsmiles/LeadGen
| 0
|
12784987
|
<gh_stars>0
import pandas as pd
input = pd.read_csv('input.txt',sep=',')
ref = pd.read_csv('reference.txt',sep=',')
def ref_lookup(company):
if company in ref.loc[:,'Company'].tolist():
dom = ref.loc[ref['Company']== f'{company}', 'Domain'].iloc[:]
pat = ref.loc[ref['Company']== f'{company}', 'Pattern'].iloc[:]
return dom, pat
else:
x = pd.Series(1)
y = pd.Series(2)
return x, y
c_list = input.loc[:,'Company'].iloc[:]
c_list = c_list.tolist()
def emailgen():
EmailList = []
NoEmailList = []
for i in c_list:
d, p = ref_lookup(i)
d = d.item()
p = p.item()
if p == 'p1':
a = input.loc[input['Company']==i,'firstname'].item()
b = input.loc[input['Company']==i,'surname'].item()
EmailList.append(f"{a}.{b}{d}")
elif p == 'p2':
a = input.loc[input['Company']==i,'firstname'].item()[0]
b = input.loc[input['Company']==i,'surname'].item()
EmailList.append(f"{a}{b}{d}")
elif p == 'p3':
a = input.loc[input['Company']==i,'firstname'].item()[0]
b = input.loc[input['Company']==i,'surname'].item()
EmailList.append(f"{b}.{a}{d}")
else:
NoEmailList.append(i)
return EmailList, NoEmailList
a, b = emailgen()
for i in a:
print(f"{i}, ")
print("""
ABOVE: A list of emails, formatted. Don't forget
to remove the last trailing comma.
BELOW: A list of the companies with no lookup
pattern contained in \'reference.txt\'
""")
print(b)
| 3.140625
| 3
|
data_loader/data_loaders.py
|
Neronjust2017/pytorch-classification-project
| 0
|
12784988
|
import torch
from torch.utils.data import TensorDataset
from torchvision import datasets, transforms
from base import BaseDataLoader, BaseDataLoader_2
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
from .utils import readmts_uci_har, transform_labels
class MnistDataLoader(BaseDataLoader):
"""
MNIST data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True):
trsfm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
self.data_dir = data_dir
self.dataset = datasets.MNIST(self.data_dir, train=training, download=True, transform=trsfm)
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
class HumanActivityRecognitionDataLoader2(BaseDataLoader):
"""
HumanActivityRecognition data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, test_split=0.0, num_workers=1,
training=True):
x_train, y_train, x_test, y_test = readmts_uci_har(data_dir)
x_train = x_train.swapaxes(1, 2)
x_test = x_test.swapaxes(1, 2)
y_train, y_test = transform_labels(y_train, y_test)
for i in range(len(x_train)):
for j in range(len(x_test)):
c = (x_train[i] == x_test[j])
d = c.all()
if d:
break
X = np.concatenate((x_train, x_test))
Y = np.concatenate((y_train, y_test))
X = torch.from_numpy(X).float()
Y = torch.from_numpy(Y)
dataset = TensorDataset(X, Y)
super().__init__(dataset, batch_size, shuffle, validation_split, test_split, num_workers, normalization=True)
class HumanActivityRecognitionDataLoader1(BaseDataLoader):
"""
HumanActivityRecognition data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, test_split=0.0, num_workers=1,
training=True):
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATASET_PATH = data_dir
TRAIN = "/train/"
TEST = "/test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
"""
Given attribute (train or test) of feature, read all 9 features into an
np ndarray of shape [sample_sequence_idx, time_step, feature_num]
argument: X_signals_paths str attribute of feature: 'train' or 'test'
return: np ndarray, tensor of features
"""
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(b' ', b' ').strip().split(b' ') for row in file
]]
)
file.close()
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
x_train = load_X(X_train_signals_paths)
x_test = load_X(X_test_signals_paths)
x_train = x_train.swapaxes(1, 2)
x_test = x_test.swapaxes(1, 2)
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
"""
Read Y file of values to be predicted
argument: y_path str attibute of Y: 'train' or 'test'
return: Y ndarray / tensor of the 6 one_hot labels of each sample
"""
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(b' ', b' ').strip().split(b' ') for row in file
]],
dtype=np.int32
)
file.close()
# Substract 1 to each output class for friendly 0-based indexing
# return one_hot(y_ - 1)
return y_ - 1
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
y_train = load_y(y_train_path)
y_test = load_y(y_test_path)
X = np.concatenate((x_train, x_test))
Y = np.concatenate((y_train, y_test))
Y = Y.reshape((len(Y), ))
X = torch.from_numpy(X).float()
Y = torch.from_numpy(Y).long()
dataset = TensorDataset(X, Y)
super().__init__(dataset, batch_size, shuffle, validation_split, test_split, num_workers, normalization=True)
class HumanActivityRecognitionDataLoader3(BaseDataLoader_2):
"""
HumanActivityRecognition data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, test_split=0.0, num_workers=1,
training=True):
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATASET_PATH = data_dir
TRAIN = "/train/"
TEST = "/test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
"""
Given attribute (train or test) of feature, read all 9 features into an
np ndarray of shape [sample_sequence_idx, time_step, feature_num]
argument: X_signals_paths str attribute of feature: 'train' or 'test'
return: np ndarray, tensor of features
"""
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(b' ', b' ').strip().split(b' ') for row in file
]]
)
file.close()
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
x_train = load_X(X_train_signals_paths)
x_test = load_X(X_test_signals_paths)
x_train = x_train.swapaxes(1, 2)
x_test = x_test.swapaxes(1, 2)
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
"""
Read Y file of values to be predicted
argument: y_path str attibute of Y: 'train' or 'test'
return: Y ndarray / tensor of the 6 one_hot labels of each sample
"""
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(b' ', b' ').strip().split(b' ') for row in file
]],
dtype=np.int32
)
file.close()
# Substract 1 to each output class for friendly 0-based indexing
# return one_hot(y_ - 1)
return y_ - 1
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
y_train = load_y(y_train_path)
y_test = load_y(y_test_path)
X = np.concatenate((x_train, x_test))
Y = np.concatenate((y_train, y_test))
n_train = len(x_train)
n_test = len(x_test) // 2
n_val = n_test
idx_full = np.arange(len(X))
test_idx = idx_full[-n_test:]
valid_idx = idx_full[-(n_test+n_val):-n_test]
train_idx = idx_full[:n_train]
Y = Y.reshape((len(Y), ))
X = torch.from_numpy(X).float()
Y = torch.from_numpy(Y).long()
dataset = TensorDataset(X, Y)
super().__init__(dataset, batch_size, shuffle, train_idx, valid_idx, test_idx, num_workers, normalization=True)
class HumanActivityRecognitionDataLoader(BaseDataLoader_2):
"""
HumanActivityRecognition data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, test_split=0.0, num_workers=1,
training=True):
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATASET_PATH = data_dir
TRAIN = "/train/"
TEST = "/test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
"""
Given attribute (train or test) of feature, read all 9 features into an
np ndarray of shape [sample_sequence_idx, time_step, feature_num]
argument: X_signals_paths str attribute of feature: 'train' or 'test'
return: np ndarray, tensor of features
"""
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(b' ', b' ').strip().split(b' ') for row in file
]]
)
file.close()
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
x_train = load_X(X_train_signals_paths)
x_test = load_X(X_test_signals_paths)
x_train = x_train.swapaxes(1, 2)
x_test = x_test.swapaxes(1, 2)
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
"""
Read Y file of values to be predicted
argument: y_path str attibute of Y: 'train' or 'test'
return: Y ndarray / tensor of the 6 one_hot labels of each sample
"""
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(b' ', b' ').strip().split(b' ') for row in file
]],
dtype=np.int32
)
file.close()
# Substract 1 to each output class for friendly 0-based indexing
# return one_hot(y_ - 1)
return y_ - 1
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
y_train = load_y(y_train_path)
y_test = load_y(y_test_path)
X = np.concatenate((x_train, x_test))
Y = np.concatenate((y_train, y_test))
n_train = len(x_train)
n_test = len(x_test)
n_val = n_test
idx_full = np.arange(len(X))
test_idx = idx_full[-n_test:]
train_idx = idx_full[:n_train]
np.random.seed(123)
np.random.shuffle(train_idx)
valid_idx = train_idx[-n_test//2:]
train_idx = train_idx[:-n_test//2]
Y = Y.reshape((len(Y), ))
X = torch.from_numpy(X).float()
Y = torch.from_numpy(Y).long()
dataset = TensorDataset(X, Y)
super().__init__(dataset, batch_size, shuffle, train_idx, valid_idx, test_idx, num_workers, normalization=True)
| 2.71875
| 3
|
sandbox/lhs_reduced.py
|
MiroK/fenics_ii
| 10
|
12784989
|
<filename>sandbox/lhs_reduced.py
from block import block_mat, block_vec, block_bc
from dolfin import *
from xii import *
import mshr
N = 10
EPS = 1E-3
R = 0.25
box_domain = mshr.Box(dolfin.Point(0, 0, 0), dolfin.Point(1, 1, 1))
_mesh = mshr.generate_mesh(box_domain, N)
stokes_subdomain = dolfin.CompiledSubDomain(
"sqrt((x[0]-0.5) * (x[0]-0.5) + (x[1]-0.5) * (x[1]-0.5)) < R", R=R
)
subdomains = MeshFunction('size_t', _mesh, _mesh.topology().dim(), 0)
# Awkward marking
for cell in cells(_mesh):
x = cell.midpoint().array()
if stokes_subdomain.inside(x, False):
subdomains[cell] = 1
else:
subdomains[cell] = 0
submeshes, interface, _ = mortar_meshes(subdomains, list(range(2)), strict=True, tol=EPS)
stokes_domain = submeshes[0]
porous_domain = submeshes[1]
interface = interface
## function spaces
# biot
Vp = FunctionSpace(porous_domain, "RT", 1)
Qp = FunctionSpace(porous_domain, "DG", 0)
U = VectorFunctionSpace(porous_domain, "CG", 2)
# # stokes
Vf = VectorFunctionSpace(stokes_domain, "CG", 2)
Qf = FunctionSpace(stokes_domain, "CG", 1)
# # lagrange multiplier
X = FunctionSpace(interface, "DG", 1)
W = [Vp, Qp, U, Vf, Qf, X]
## this is where the troubles start
up, pp, dp, uf, pf, lbd = list(map(TrialFunction, W))
vp, wp, ep, vf, wf, mu = list(map(TestFunction, W))
up_prev, pp_prev, dp_prev, uf_prev, pf_prev, lbd_prev = list(map(Function, W))
# up_prev, pp_prev, dp_prev, uf_prev, pf_prev, lbd_prev = ii_Function(W)
Tup, Tdp, Tuf = [Trace(x, interface) for x in [up, dp, uf]]
Tvp, Tep, Tvf = [Trace(x, interface) for x in [vp, ep, vf]]
dxGamma = Measure("dx", domain=interface)
n_Gamma_f = OuterNormal(interface, [0.5, 0.5, 0.5])
n_Gamma_p = -n_Gamma_f# L_mult = inner(Trace(dp, interface), n_Gamma_p) * mu * dxGamma
AL_mult = ii_assemble(inner(Trace(dp, interface), n_Gamma_p) * mu * dxGamma)
b = AL_mult*dp_prev.vector()
assert b.size() == X.dim()
import numpy as np
L_mult = inner(Trace(dp_prev, interface), n_Gamma_p) * mu * dxGamma
AL_mult = ii_assemble(inner(Trace(dp, interface), n_Gamma_p) * mu * dxGamma)
for i in range(3):
dp_prev.vector().set_local(np.random.rand(dp_prev.vector().local_size()))
dp_prev.vector().apply('insert')
x = ii_assemble(L_mult)
y = AL_mult*dp_prev.vector()
assert (ii_convert(x) - ii_convert(y)).norm('linf') < DOLFIN_EPS
| 1.921875
| 2
|
geoist/magmod/magnetic_model/model_composed.py
|
CHEN-Zhaohui/geoist
| 53
|
12784990
|
#-------------------------------------------------------------------------------
#
# Aggregated Magnetic Model
#
# Author: <NAME> <<EMAIL>>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2018 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from collections import namedtuple
from numpy import inf, zeros, asarray
from .._pymm import (
GEOCENTRIC_SPHERICAL, GEODETIC_ABOVE_WGS84, GEOCENTRIC_CARTESIAN,
convert, vrot_sph2geod, vrot_sph2cart,
)
from .model import GeomagneticModel
Component = namedtuple("_Component", ["model", "scale", "parameters"])
def _validity_overlap(validity1, validity2):
start1, end1 = validity1
start2, end2 = validity2
start = max(start1, start2)
end = min(end1, end2)
if end < start:
return -inf, -inf
return start, end
class ComposedGeomagneticModel(GeomagneticModel):
""" Composed Earth magnetic field model aggregating multiple models
into one.
"""
def __init__(self, *models):
self._parameters = set()
self._components = []
self._validity = (-inf, inf)
for model in models:
self.push(model)
def push(self, model, scale=1.0, **parameters):
""" Add model. """
self._parameters.update(model.parameters)
self._validity = _validity_overlap(self.validity, model.validity)
self._components.append(Component(model, scale, parameters))
@property
def validity(self):
return self._validity
@property
def parameters(self):
""" required parameters. """
return tuple(self._parameters)
def eval(self, time, location,
input_coordinate_system=GEOCENTRIC_SPHERICAL,
output_coordinate_system=GEOCENTRIC_SPHERICAL,
**options):
# convert input coordinates to spherical coordinates
coord_sph = convert(
location, input_coordinate_system, GEOCENTRIC_SPHERICAL
)
# get output dimension
time = asarray(time)
location = asarray(location)
if time.ndim > (location.ndim - 1):
shape = time.shape
else:
shape = location.shape[:-1]
result = zeros(shape + (3,))
final_scale = options.pop("scale", None)
for model, scale, params in self._components:
args = options.copy()
args.update(params)
result += model.eval(time, coord_sph, scale=scale, **args)
# rotate result to the desired coordinate frame
if output_coordinate_system == GEODETIC_ABOVE_WGS84:
if input_coordinate_system == GEODETIC_ABOVE_WGS84:
coord_out = location
else:
coord_out = convert(
coord_sph, GEOCENTRIC_SPHERICAL, GEODETIC_ABOVE_WGS84
)
result = vrot_sph2geod(result, coord_out[..., 0] - coord_sph[..., 0])
elif output_coordinate_system == GEOCENTRIC_CARTESIAN:
result = vrot_sph2cart(result, coord_sph[..., 0], coord_sph[..., 1])
# apply the final scale
if final_scale is not None:
result *= final_scale
return result
| 1.21875
| 1
|
src/classification/experiment.py
|
rubencart/es-img-captioning
| 0
|
12784991
|
<filename>src/classification/experiment.py
import torchvision
from torchvision.transforms import transforms
from algorithm.tools.experiment import Experiment
from algorithm.tools.utils import Config
class MnistExperiment(Experiment):
"""
Subclass for MNIST experiment
"""
def __init__(self, exp, config: Config, master=True):
super().__init__(exp, config, master=master)
def init_loaders(self, config=None, batch_size=None, workers=None, _=None):
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
self.trainloader, self.valloader, self.testloader = \
self._init_torchvision_loaders(torchvision.datasets.MNIST, transform, config, batch_size, workers)
self._orig_trainloader_lth = len(self.trainloader)
| 2.375
| 2
|
children/basic_logging.py
|
IgorIvkin/Children
| 0
|
12784992
|
"""
Author: Igor
Date: 2020.06.07
"""
import logging
import sys
from datetime import datetime
from logging.handlers import TimedRotatingFileHandler
LOG_FORMATTING_STRING = logging.Formatter('%(asctime)s - %(module)s - %(filename)s - '
'%(lineno)d - %(levelname)s - %(message)s')
def get_file_handler():
file_name = 'log/error.log.{0}'.format(datetime.utcnow().strftime("%Y%m%d"))
file_handler = TimedRotatingFileHandler(filename=file_name, when='midnight')
file_handler.setFormatter(LOG_FORMATTING_STRING)
file_handler.setLevel(logging.WARN)
file_handler.suffix = "%Y%m%d"
return file_handler
def get_console_handler():
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(LOG_FORMATTING_STRING)
console_handler.setLevel(logging.DEBUG)
return console_handler
def get_logger(logger_name):
"""Returns a named logger that includes console logger that prints everything
starting from 'debug' level and file logger that prints only the sensible errors starting
from 'warning' level.
How to use:
(behind the scene application has a global variable log that
points to this logger)
from children import log
log.warn('Test warn')
log.debug('Test debug')
log.error('Test error')
You can put these output errors in any place of your code. It is recommended to avoid
using it for the errors that are easily recoverable, for example not enough params in
user input.
"""
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
logger.addHandler(get_console_handler())
logger.addHandler(get_file_handler())
return logger
| 3.3125
| 3
|
test_myFunction.py
|
Guidofaassen/yolo-tyrion
| 0
|
12784993
|
<filename>test_myFunction.py
import myFunction
import numpy as np
class TestClass:
def test_answer(self):
assert myFunction.add_one(3) == 4
def test_answer2(self):
assert myFunction.add_one(4) == 5
def test_answer3(self):
assert myFunction.add_one(-2) == -1
# def test_similarity_conditional2(self):
# freqUV = np.asarray([[2,3],[3,4]])
# freqU = np.asarray([2,0])
# u = 0
# v = 1
# assert myFunction.similarity_conditional(freqUV,freqU, u, v) == 0
def test_similarity_conditional(self):
freqUV = np.asarray([[2,3],[3,4]])
freqU = np.asarray([2,0])
u = 0
v = 1
assert myFunction.similarity_conditional(freqUV,freqU, u, v) == 0
def test_similarity_conditional2(self):
freqUV = np.asarray([[2,3],[3,4]])
freqU = np.asarray([2,1])
u = 0
v = 1
assert myFunction.similarity_conditional(freqUV,freqU, u, v) == 1.5
def test_myDivide(self):
assert myFunction.myDivide(1,0) == 0
| 2.96875
| 3
|
Node_Classification/2.GraphSN_GIN/cora/models.py
|
wokas36/GraphSNN
| 11
|
12784994
|
<gh_stars>10-100
import math
import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.nn.parameter import Parameter
from torch.nn import Sequential, Linear, ReLU, Parameter, Sequential, BatchNorm1d, Dropout
class Graphsn_GIN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(Graphsn_GIN, self).__init__()
self.nn = Linear(nfeat, nhid)
self.fc = Linear(nhid, nclass)
self.dropout = dropout
self.eps = nn.Parameter(torch.FloatTensor(1))
self.reset_parameters()
def reset_parameters(self):
stdv_eps = 0.44 / math.sqrt(self.eps.size(0))
nn.init.constant_(self.eps, stdv_eps)
def forward(self, x, adj):
v = self.eps*torch.diag(adj)
mask = torch.diag(torch.ones_like(v))
adj = mask*torch.diag(v) + (1. - mask)*adj
x = torch.mm(adj, x)
x = F.relu(self.nn(x))
x = F.dropout(x, self.dropout, training=self.training)
x = self.fc(x)
return F.log_softmax(x, dim=-1)
| 2.515625
| 3
|
apps/fund/permissions.py
|
jfterpstra/onepercentclub-site
| 7
|
12784995
|
from rest_framework import permissions
class IsUser(permissions.BasePermission):
""" Read / write permissions are only allowed if the obj.user is the logged in user. """
def has_object_permission(self, request, view, obj):
return obj.user == request.user
| 2.71875
| 3
|
cli/decorators/count_calls.py
|
oleoneto/django-clite
| 22
|
12784996
|
<gh_stars>10-100
import functools
def count_calls(f):
@functools.wraps(f)
def wrapper_count_calls(*args, **kwargs):
wrapper_count_calls.num_calls += 1
print(f"Call {wrapper_count_calls.num_calls} of {f.__name__!r}")
return f(*args, **kwargs)
wrapper_count_calls.num_calls = 0
return wrapper_count_calls
| 3.09375
| 3
|
coralinede/auto_detect_df/detect_datatype.py
|
coralinetech/coralinede
| 0
|
12784997
|
import operator
import sqlalchemy
import pandas as pd
import numpy as np
from math import ceil
DEFAULT_VARCHAR_LENGTH=100
def get_detected_column_types(df):
""" Get data type of each columns ('DATETIME', 'NUMERIC' or 'STRING')
Parameters:
df (df): pandas dataframe
Returns
df (df): dataframe that all datatypes are converted (df)
"""
assert isinstance(df, pd.DataFrame), 'Parameter must be DataFrame'
for c in df.columns:
# Convert column to string
col_data = df[c].map(str)
col_data = col_data.replace("NaT", None)
col_data = col_data.replace("NaN", None)
# Check NULL column
if(df[c].isnull().values.all()):
continue
# Check DATETIME
try:
# Check if it's able to convert column to datetime
# if column is datetime, then skip to convert
if 'datetime' in str(col_data.dtype):
continue
df[c] = pd.to_datetime(col_data)
continue
except ValueError:
pass
# Check NUMERIC
try:
# Drop NaN rows
series = df[c].dropna()
# if column_name is int or float, then skip to convert
if 'int' in str(col_data.dtype) or 'float' in str(col_data.dtype):
continue
# Check if it can be converted to numeric
df[c] = pd.to_numeric(series)
except ValueError:
pass
return df
def get_max_length_columns(df):
""" find maximum length of value in each column and ceil it
Parameters:
df (df): dataframe
Returns
arr_max_len_columns (array): array of length for each column
arr_max_decimal (array): array of maximum decimal for float, double, and decimal datatype, otherwise its value is zero
"""
assert isinstance(df, pd.DataFrame), 'Parameter must be DataFrame'
measurer = np.vectorize(len)
arr_max_len_columns = []
arr_max_decimal = []
for i, x in enumerate(measurer(df.values.astype(str)).max(axis=0)):
if 'float' in str(df.iloc[:, i].dtype):
col_data = df.iloc[:, i].map(str).str.extract('\.(.*)')
max_decimal = measurer(col_data.values.astype(str)).max(axis=0)[0]
arr_max_decimal.append(max_decimal)
else:
arr_max_decimal.append(0)
arr_max_len_columns.append(ceil(x / 10) * 10)
return arr_max_len_columns, arr_max_decimal
def convert_df_datatype_to_sqlalchemy_datatype(df):
""" convert dataframe's data type into SQLAlchemy's data type
Parameters:
df (df): dataframe
Returns:
dtype_dict (dict): dict of data type of each column in SQLAlchemy standard
"""
assert isinstance(df, pd.DataFrame), 'Parameter must be DataFrame'
arr_max_len_columns, arr_max_decimal = get_max_length_columns(df)
dtype_dict = {}
for i, col_name in enumerate(df.columns):
if(df[col_name].isnull().values.all()):
dtype_dict[col_name] = sqlalchemy.types.VARCHAR(DEFAULT_VARCHAR_LENGTH)
elif 'bool' in str(df[col_name].dtype):
# Compatible with SQL-Server and MySQL, since MySQL doesn't have BOOLEAN.
dtype_dict[col_name] = sqlalchemy.types.INTEGER()
elif 'int' in str(df[col_name].dtype):
dtype_dict[col_name] = sqlalchemy.types.INTEGER()
elif 'float' in str(df[col_name].dtype):
if df[col_name].dropna().apply(float.is_integer).all():
dtype_dict[col_name] = sqlalchemy.types.INTEGER()
else:
dtype_dict[col_name] = sqlalchemy.types.DECIMAL(precision=arr_max_len_columns[i], scale=arr_max_decimal[i])
elif 'datetime' in str(df[col_name].dtype):
dtype_dict[col_name] = sqlalchemy.types.DateTime()
elif 'object' in str(df[col_name].dtype):
# check the limit of varhcar, if the length exeeds, then use TEXT
if arr_max_len_columns[i] > 1000:
dtype_dict[col_name] = sqlalchemy.types.Text()
else:
dtype_dict[col_name] = sqlalchemy.types.VARCHAR(length=arr_max_len_columns[i])
else:
dtype_dict[col_name] = sqlalchemy.types.VARCHAR(length=arr_max_len_columns[i])
return dtype_dict
def get_datatype_each_col(df):
""" main function to call sub-function in order to find data type and data length for each column
Parameters:
df (df): dataframe
Returns:
dtype_dict (dict): dict of data type of each column in SQLAlchemy standard (dict)
"""
assert isinstance(df, pd.DataFrame), 'Parameter must be DataFrame'
df = get_detected_column_types(df)
dtype_dict = convert_df_datatype_to_sqlalchemy_datatype(df)
del df
return dtype_dict
| 3.296875
| 3
|
samples/python/40.timex-resolution/ambiguity.py
|
Aliacf21/BotBuilder-Samples
| 1,998
|
12784998
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from recognizers_date_time import recognize_datetime, Culture
class Ambiguity:
"""
TIMEX expressions are designed to represent ambiguous rather than definite dates. For
example: "Monday" could be any Monday ever. "May 5th" could be any one of the possible May
5th in the past or the future. TIMEX does not represent ambiguous times. So if the natural
language mentioned 4 o'clock it could be either 4AM or 4PM. For that the recognizer (and by
extension LUIS) would return two TIMEX expressions. A TIMEX expression can include a date and
time parts. So ambiguity of date can be combined with multiple results. Code that deals with
TIMEX expressions is frequently dealing with sets of TIMEX expressions.
"""
@staticmethod
def date_ambiguity():
# Run the recognizer.
results = recognize_datetime(
"Either Saturday or Sunday would work.", Culture.English
)
# We should find two results in this example.
for result in results:
# The resolution includes two example values: going backwards and forwards from NOW in the calendar.
# Each result includes a TIMEX expression that captures the inherent date but not time ambiguity.
# We are interested in the distinct set of TIMEX expressions.
# There is also either a "value" property on each value or "start" and "end".
distinct_timex_expressions = {
value["timex"]
for value in result.resolution["values"]
if "timex" in value
}
print(f"{result.text} ({','.join(distinct_timex_expressions)})")
@staticmethod
def time_ambiguity():
# Run the recognizer.
results = recognize_datetime(
"We would like to arrive at 4 o'clock or 5 o'clock.", Culture.English
)
# We should find two results in this example.
for result in results:
# The resolution includes two example values: one for AM and one for PM.
# Each result includes a TIMEX expression that captures the inherent date but not time ambiguity.
# We are interested in the distinct set of TIMEX expressions.
distinct_timex_expressions = {
value["timex"]
for value in result.resolution["values"]
if "timex" in value
}
# TIMEX expressions don't capture time ambiguity so there will be two distinct expressions for each result.
print(f"{result.text} ({','.join(distinct_timex_expressions)})")
@staticmethod
def date_time_ambiguity():
# Run the recognizer.
results = recognize_datetime(
"It will be ready Wednesday at 5 o'clock.", Culture.English
)
# We should find a single result in this example.
for result in results:
# The resolution includes four example values: backwards and forward in the calendar and then AM and PM.
# Each result includes a TIMEX expression that captures the inherent date but not time ambiguity.
# We are interested in the distinct set of TIMEX expressions.
distinct_timex_expressions = {
value["timex"]
for value in result.resolution["values"]
if "timex" in value
}
# TIMEX expressions don't capture time ambiguity so there will be two distinct expressions for each result.
print(f"{result.text} ({','.join(distinct_timex_expressions)})")
| 3.4375
| 3
|
sensors/rg.py
|
caioluders/PII-Identifier
| 14
|
12784999
|
import re
def check(data) :
if not isinstance(data, list) :
data = [data]
maybe_rgs_parsed = [ re.sub("[^0-9]","",r) for r in data ]
true_rgs = []
for r in maybe_rgs_parsed :
r1 = list(map(int,list(r)))
r4 = r1[:8]
r2 = [ r4[rr]*(2+rr) for rr in range(len(r4)) ]
d1 = 11-(sum(r2)%11)
r4.append(d1)
if r == ''.join(map(str,r4)) : true_rgs.append(r)
return true_rgs
| 2.96875
| 3
|
kyu_7/remove_the_minimum/test_remove_the_minimum.py
|
pedrocodacyorg2/codewars
| 1
|
12785000
|
<filename>kyu_7/remove_the_minimum/test_remove_the_minimum.py<gh_stars>1-10
# Created by <NAME>.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
# FUNDAMENTALS, LISTS, DATA STRUCTURES, ARRAYS
import unittest
import allure
from numpy.random import randint
from kyu_7.remove_the_minimum.remove_the_minimum import remove_smallest
@allure.epic('7 kyu')
@allure.parent_suite('Beginner')
@allure.suite("Data Structures")
@allure.sub_suite("Unit Tests")
@allure.feature("Lists")
@allure.story('The museum of incredible dull things')
@allure.tag()
@allure.link(url='',
name='Source/Kata')
class RemoveSmallestTestCase(unittest.TestCase):
"""
Testing remove_smallest function
"""
@staticmethod
def random_list():
"""
Helper function
:return:
"""
with allure.step("Create a random list"):
return list(randint(400, size=randint(1, 10)))
def test_remove_smallest(self):
"""
Test lists with multiple digits
:return:
"""
allure.dynamic.title("'multiply' function verification: "
"lists with multiple digits")
allure.dynamic.severity(allure.severity_level.NORMAL)
allure.dynamic.description_html('<h3>Codewars badge:</h3>'
'<img src="https://www.codewars.com/users/myFirstCode'
'/badges/large">'
'<h3>Test Description:</h3>'
"<p></p>")
with allure.step("Remove smallest value from "
"the start of the list"):
self.assertEqual(remove_smallest([1, 2, 3, 4, 5]),
[2, 3, 4, 5],
"Wrong result for [1, 2, 3, 4, 5]")
with allure.step("Remove smallest value from "
"near the end of the list"):
self.assertEqual(remove_smallest([5, 3, 2, 1, 4]),
[5, 3, 2, 4],
"Wrong result for [5, 3, 2, 1, 4]")
with allure.step("Remove smallest value from "
"the end of the list"):
self.assertEqual(remove_smallest([1, 2, 3, 1, 1]),
[2, 3, 1, 1],
"Wrong result for [1, 2, 3, 1, 1]")
def test_remove_smallest_empty_list(self):
"""
Test with empty list
:return:
"""
allure.dynamic.title("'multiply' function verification "
"with empty list")
allure.dynamic.severity(allure.severity_level.NORMAL)
allure.dynamic.description_html('<h3>Codewars badge:</h3>'
'<img src="https://www.codewars.com/users/myFirstCode'
'/badges/large">'
'<h3>Test Description:</h3>'
"<p></p>")
with allure.step("Remove smallest value from "
"the empty list"):
self.assertEqual(remove_smallest([]),
[],
"Wrong result for []")
def test_remove_smallest_one_element_list(self):
"""
Returns [] if list has only one element
:return:
"""
allure.dynamic.title("'multiply' function verification "
"with one element list")
allure.dynamic.severity(allure.severity_level.NORMAL)
allure.dynamic.description_html('<h3>Codewars badge:</h3>'
'<img src="https://www.codewars.com/users/myFirstCode'
'/badges/large">'
'<h3>Test Description:</h3>'
"<p></p>")
with allure.step("Remove smallest value from "
"the empty list with one element only"):
i = 0
while i < 10:
x = randint(1, 400)
self.assertEqual(remove_smallest([x]),
[],
"Wrong result for [{}]".format(x))
i += 1
def test_remove_smallest_random_list(self):
"""
Returns a list that misses only one element
:return:
"""
allure.dynamic.title("'multiply' function verification "
"with random list")
allure.dynamic.severity(allure.severity_level.NORMAL)
allure.dynamic.description_html('<h3>Codewars badge:</h3>'
'<img src="https://www.codewars.com/users/myFirstCode'
'/badges/large">'
'<h3>Test Description:</h3>'
"<p></p>")
with allure.step("Remove smallest value from "
"the random list"):
i = 0
while i < 10:
arr = self.random_list()
self.assertEqual(len(remove_smallest(arr[:])),
len(arr) - 1,
"Wrong sized result for {}".format(arr))
i += 1
| 3.234375
| 3
|
day 4 .py
|
ShubhamSinghThakur2907/100daysofcode
| 0
|
12785001
|
<reponame>ShubhamSinghThakur2907/100daysofcode<filename>day 4 .py
Python 3.7.4 (v3.7.4:e09359112e, Jul 8 2019, 14:54:52)
[Clang 6.0 (clang-600.0.57)] on darwin
Type "help", "copyright", "credits" or "license()" for more information.
>>> ## there are basically 2 types of functions in python
>>> ##pre-defined user defined
>>> ##user defined
>>> ## built in function are the functions which can be used by just calling it
>>> ## example of built in functions help() print() input() min()
>>> ##user defined functions these are the functions wich need to be defined before calling it .
>>> def avarage(x,y):
print("avarage of x & y is :",(x+y)/2)
avarage(3,4)
SyntaxError: unindent does not match any outer indentation level
>>>
>>> print(avarage(3,4))
Traceback (most recent call last):
File "<pyshell#10>", line 1, in <module>
print(avarage(3,4))
NameError: name 'avarage' is not defined
>>> def avarage(x,y):
z=(x+y)/2
return(z)
avarage(4,6)
SyntaxError: invalid syntax
>>> ##an argument is value sent to function during execution
>>> ## recrtion function means a function can also call itself.
>>> #example of recursive function
>>> def tri_recursion(k):
if(k>0):
result = k+tri_recursion(k-1)
print(result)
else:
result = 0
return result
print("\n\nRecursion Example Results")
tri_recursion(6)
SyntaxError: invalid syntax
>>> #A lambda function can take any number of arguments, but can only have one expression
>>> #example of lambada function
>>> x = lambda a : a + 10
print(x(5))
SyntaxError: multiple statements found while compiling a single statement
>>>
| 3.4375
| 3
|
fetching_data.py
|
mosihere/selenium
| 0
|
12785002
|
<gh_stars>0
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
# Install selenium driver in case that doesn't exist
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
# Count fetched items
counter = 1
# page numbers you want to scrape
for page in range(1, 11):
url = f'https://divar.ir/s/tehran/buy-apartment/parand?page={page}'
driver.get(url)
elements = driver.find_elements(By.CLASS_NAME, 'kt-post-card__body')
urls = driver.find_elements(By.CLASS_NAME, 'kt-post-card')
f = open('data.txt', 'a')
for element, url in zip(elements, urls):
f.write(f'{counter} --> {element.text}')
f.write('\n')
f.write(f'Link --> {url.get_attribute("href")}')
counter += 1
time.sleep(10)
f.close()
driver.quit()
| 3.046875
| 3
|
openprocurement/auctions/geb/tests/fixtures/active_tendering.py
|
oleksiyVeretiuk/openprocurement.auctions.geb
| 0
|
12785003
|
<reponame>oleksiyVeretiuk/openprocurement.auctions.geb
from copy import deepcopy
from openprocurement.auctions.core.utils import get_now
from openprocurement.auctions.geb.tests.fixtures.active_rectification import (
ACTIVE_RECTIFICATION_AUCTION_DEFAULT_FIXTURE
)
from openprocurement.auctions.geb.tests.fixtures.questions import (
TEST_QESTION_IN_TENDERING_PERIOD
)
from openprocurement.auctions.geb.tests.fixtures.bids import (
DRAFT_BID,
PENDING_BID_FIRST,
ACTIVE_BID_FIRST,
ACTIVE_BID_SECOND
)
from openprocurement.auctions.geb.tests.fixtures.calculator import (
Calculator
)
now = get_now()
calculator = Calculator(now, 'tenderPeriod', 'start')
# blank auction fixture
auction = deepcopy(ACTIVE_RECTIFICATION_AUCTION_DEFAULT_FIXTURE)
auction['status'] = 'active.tendering'
auction["rectificationPeriod"] = {
"startDate": calculator.rectificationPeriod.startDate.isoformat(),
"endDate": calculator.rectificationPeriod.endDate.isoformat()
}
auction["tenderPeriod"] = {
"startDate": calculator.tenderPeriod.startDate.isoformat(),
"endDate": calculator.tenderPeriod.endDate.isoformat()
}
auction["enquiryPeriod"] = {
"startDate": calculator.enquiryPeriod.startDate.isoformat(),
"endDate": calculator.enquiryPeriod.endDate.isoformat()
}
auction["auctionPeriod"] = {
"shouldStartAfter": calculator.auctionPeriod.shouldStartAfter.isoformat()
}
auction["next_check"] = calculator.tenderPeriod.endDate.isoformat()
auction["date"] = calculator.auctionDate.date.isoformat()
ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE = auction
# auction with questions fixture
auction = deepcopy(ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE)
auction['questions'] = [TEST_QESTION_IN_TENDERING_PERIOD]
ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE_WITH_QUESTION = auction
# auction with bids
auction = deepcopy(ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE)
auction['bids'] = [DRAFT_BID]
AUCTION_WITH_DRAFT_BID = auction
auction = deepcopy(ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE)
auction['bids'] = [PENDING_BID_FIRST]
AUCTION_WITH_PENDING_BID = auction
auction = deepcopy(ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE)
auction['bids'] = [ACTIVE_BID_FIRST]
AUCTION_WITH_ACTIVE_BID = auction
# end tendering period fixtures
calculator = Calculator(now, 'tenderPeriod', 'end')
auction = deepcopy(ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE)
auction["rectificationPeriod"] = {
"startDate": calculator.rectificationPeriod.startDate.isoformat(),
"endDate": calculator.rectificationPeriod.endDate.isoformat()
}
auction["tenderPeriod"] = {
"startDate": calculator.tenderPeriod.startDate.isoformat(),
"endDate": calculator.tenderPeriod.endDate.isoformat()
}
auction["enquiryPeriod"] = {
"startDate": calculator.enquiryPeriod.startDate.isoformat(),
"endDate": calculator.enquiryPeriod.endDate.isoformat()
}
auction["auctionPeriod"] = {
"shouldStartAfter": calculator.auctionPeriod.shouldStartAfter.isoformat()
}
auction["next_check"] = calculator.enquiryPeriod.endDate.isoformat()
auction["date"] = calculator.auctionDate.date.isoformat()
END_ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE = auction
# end tendering period with one bid fixture
auction = deepcopy(END_ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE)
auction['bids'] = [ACTIVE_BID_FIRST]
END_ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE_WITH_ONE_BID = auction
# end tendering period with two bid fixture
auction = deepcopy(END_ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE)
auction['bids'] = [ACTIVE_BID_FIRST, ACTIVE_BID_SECOND]
END_ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE_WITH_TWO_BIDS = auction
# end tendering period with two bids and one draft bid
auction = deepcopy(END_ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE)
auction['bids'] = [ACTIVE_BID_FIRST, ACTIVE_BID_SECOND, DRAFT_BID]
END_ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE_WITH_TWO_BIDS_AND_ONE_DRAFT = auction
| 2.015625
| 2
|
src/abstract_factory2.py
|
MrRezoo/design-patterns-python
| 6
|
12785004
|
"""
Creational:
abstract factory
Car => Benz, Bmw => Suv, Coupe
benz suv => gla, glc
bmw suv => x1, x2
benz coupe => cls, E-class
bmw coupe => m2, m4
"""
from abc import ABC, abstractmethod
class Car(ABC):
@abstractmethod
def call_suv(self):
pass
@abstractmethod
def call_coupe(self):
pass
# -------------------------------------------------------------
class Benz(Car):
def call_suv(self):
return Gla()
def call_coupe(self):
return Cls()
# -------------------------------------------------------------
class Bmw(Car):
def call_suv(self):
return X1()
def call_coupe(self):
return M2()
# -------------------------------------------------------------
class Suv(ABC):
@abstractmethod
def creating_suv(self):
pass
class Coupe(ABC):
@abstractmethod
def creating_coupe(self):
pass
# -------------------------------------------------------------
class Gla(Suv):
def creating_suv(self):
print("This is your suv benz gla. . . ")
class X1(Suv):
def creating_suv(self):
print("This is your suv bmw x1. . .")
# -------------------------------------------------------------
class Cls(Coupe):
def creating_coupe(self):
print("This is your coupe benz cls. . .")
class M2(Coupe):
def creating_coupe(self):
print("This is your coupe bmw m2. . .")
# -------------------------------------------------------------
def client_suv(order):
suv = order.call_suv()
suv.creating_suv()
def client_coupe(order):
coupe = order.call_coupe()
coupe.creating_coupe()
# -------------------------------------------------------------
"""Test the app"""
client_coupe(Bmw())
client_coupe(Benz())
client_suv(Benz())
client_suv(Bmw())
| 4.15625
| 4
|
coinlib/wallet.py
|
santosmarco/coinlib
| 3
|
12785005
|
import coinlib
class Wallet():
def __init__(self, contents={}):
coins_by_symbol = coinlib.get_all_coins_by_symbol()
coins_by_name = coinlib.get_all_coins_by_name()
self.contents = {}
for coin in contents:
coin_lower = coin.lower().strip()
if (coin_lower not in coins_by_symbol
and coin_lower not in coins_by_name):
raise ValueError(coinlib.errors('103', [coin]))
else:
self.contents[coin_lower] = contents[coin]
def get_value(self, convert='USD'):
if len(self.contents) == 0:
return 0
convert = convert.upper().strip()
if convert not in coinlib.VALID_CONVERSION_CURRENCIES:
raise ValueError(coinlib.errors('101', [convert]))
contents_coins = coinlib.get_coins(list(self.contents.keys()), convert)
values = []
for coin in contents_coins:
values.append(contents_coins[coin]['price']*self.contents[coin])
return sum(values)
def add(self, coin, quantity):
coin = coin.lower().strip()
if coin in self.contents:
self.contents[coin] += quantity
else:
self.contents[coin] = quantity
return self.contents
def add_many(self, coins):
for coin in coins:
coin = coin.lower().strip()
if coin in self.contents:
self.contents[coin] += coins[coin]
else:
self.contents[coin] = coins[coin]
return self.contents
def subtract(self, coin, quantity):
self.contents[coin.lower().strip()] -= quantity
return self.contents
def subtract_many(self, coins):
for coin in coins:
coin = coin.lower().strip()
self.contents[coin] -= coins[coin]
return self.contents
def remove(self, coin):
del self.contents[coin.lower().strip()]
return self.contents
def remove_many(self, coins):
for coin in coins:
coin = coin.lower().strip()
del self.contents[coin]
| 3.34375
| 3
|
instagram/views.py
|
amiinegal/instagram
| 0
|
12785006
|
from django.shortcuts import render, redirect, get_object_or_404
from .models import Image,Profile,Location,tags
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from .forms import NewImageForm
from .email import send_welcome_email
from .forms import NewsLetterForm, NewCommentForm
# Views
@login_required(login_url='/accounts/login/')
def home_images(request):
# Display all images here:
# images = Image.objects.all()
locations = Location.objects.all()
if request.GET.get('location'):
pictures = Image.filter_by_location(request.GET.get('location'))
elif request.GET.get('tags'):
pictures = Image.filter_by_tag(request.GET.get('tags'))
elif request.GET.get('search_term'):
pictures = Image.search_image(request.GET.get('search_term'))
else:
pictures = Image.objects.all()
if request.method == 'POST':
form = NewsLetterForm(request.POST)
if form.is_valid():
name = form.cleaned_data['your_name']
email = form.cleaned_data['email']
HttpResponseRedirect('home_images')
else:
form = NewsLetterForm()
return render(request, 'index.html', {'locations':locations,
'pictures':pictures, 'letterForm':form})
@login_required(login_url='/accounts/login/')
def image(request):
images = Image.objects.all()
# try:
# image = Image.objects.get(pk = id)
# except DoesNotExist:<div class="col-md-1"></div>
# raise Http404()
# current_user = request.user
return render(request,'registration/image_list.html', {"images":images})
@login_required(login_url='/accounts/login/')
def new_image(request):
current_user = request.user
if request.method == 'POST':
form = NewImageForm(request.POST, request.FILES)
if form.is_valid():
image = form.save(commit=False)
image.user = current_user
image.save()
return redirect('homePage')
else:
form = NewImageForm()
return render(request, 'registration/new_image.html', {"form": form})
def search_users(request):
# search for a user by their username
if 'user' in request.GET and request.GET["user"]:
search_term = request.GET.get("user")
searched_users = Profile.search_users(search_term)
message = f"{search_term}"
return render(request, 'search.html', {"message": message, "profiles": searched_users})
else:
message = "You haven't searched for any person"
return render(request, 'search.html', {"message": message})
def search_image(request):
# search for an image by the description of the image
if 'image' in request.GET and request.GET["image"]:
search_term = request.GET.get("image")
searched_images = Image.search_image(search_term)
message = f"{search_term}"
return render(request, 'search.html', {"message": message, "pictures": searched_images})
else:
message = "You haven't searched for any image"
return render(request, 'search.html', {"message": message})
@login_required(login_url='/accounts/login/')
def myprofile(request, username = None):
current_user = request.user
pictures = Image.objects.filter(user=current_user).all()
return render(request, 'profile.html', locals(), {'pictures':pictures})
@login_required(login_url='/accounts/login/')
def individual_profile_page(request, username):
print(username)
if not username:
username = request.user.username
# images by user id
images = Image.objects.filter(user_id=username)
user = request.user
profile = Profile.objects.get(user=user)
userf = User.objects.get(pk=username)
if userf:
print('user found')
profile = Profile.objects.get(user=userf)
else:
print('No suchuser')
return render (request, 'registration/profile.html', {'images':images,'profile':profile,'user':user, 'username': username})
def user_list(request):
user_list = User.objects.all()
context = {'user_list': user_list}
return render(request, 'image_details.html', context)
def image_detail(request, image_id):
image = Image.objects.get(id = image_id)
return render(request, 'image_details.html', {"image":image})
@login_required(login_url='/accounts/login/')
def new_comment(request, username):
current_user =request.user
username = current_user.username
if request.method =='POST':
form = NewCommentForm(request.POST, request.FILES)
if form.is_valid():
comment = form.save()
comment.user = request.user
comment.save()
return redirect('homePage')
else:
form = NewCommentForm()
return render(request, 'new_comment.html', {"form":form})
@login_required(login_url='/accounts/login/')
def single_image_like(request, image_id):
image = Image.objects.get(id=image_id)
image.likes = image.likes + 1
image.save()
return redirect('homePage')
| 2.15625
| 2
|
2021/day08/main.py
|
jonathonfletcher/adventofcode
| 0
|
12785007
|
<gh_stars>0
from collections import defaultdict
class P1(object):
def __init__(self):
self.counter = 0
self.length_mapping = {
2:1,
3:7,
4:4,
7:8
}
def add(self, inputs, outputs):
for o in outputs:
if len(o) in self.length_mapping.keys():
self.counter += 1
@property
def value(self):
return self.counter
class P2(P1):
def deduce_inputs(self, inputs):
found = {}
segments = {}
remaining = list()
for i in inputs:
x = self.length_mapping.get(len(i))
if x is not None:
found[x] = i
remaining.append(i)
remaining = sorted(remaining, key=lambda x: len(x))
if found.get(1):
for seg in found[1]:
c = 0
for e in remaining:
if not set([seg]).issubset(set(e)):
c+=1
if 1 == c:
segments['f'] = seg
for e in remaining:
if not set([seg]).issubset(set(e)):
found[2] = e
break
elif 2 == c:
segments['c'] = seg
if found.get(7):
b_seg = set(found[7])-set(found[1])
if 1 == len(b_seg):
segments['a'] = b_seg.pop()
for e in remaining:
if not 5 == len(e):
continue
if e in found.values():
continue
seg_c = segments['c']
seg_f = segments['f']
if set([seg_c]).issubset(set(e)):
found[3] = e
elif set([seg_f]).issubset(set(e)):
found[5] = e
i = set(e).intersection(set(found[2])).intersection(set(found[4]))
if 1 == len(i):
segments['d'] = i.pop()
if found.get(4):
e = set(found[4])
for k in ['c', 'd', 'f']:
v = segments.get(k)
if v is not None:
e = e - set([v])
if 1 == len(e):
segments['b'] = e.pop()
if found.get(3) and found.get(5):
e = set(found[3]).intersection(set(found[5]))
for k, v in segments.items():
e = e - set([v])
if 1 == len(e):
segments['g'] = e.pop()
for r in remaining:
if not 7 == len(r):
continue
e = set(r)
for k, v in segments.items():
e = e - set([v])
if 1 == len(e):
segments['e'] = e.pop()
for r in remaining:
if not 6 == len(r):
continue
d_seg = set([segments['d']])
e_seg = set([segments['e']])
if d_seg.union(e_seg).issubset(set(r)):
found[6] = r
elif d_seg.issubset(set(r)):
found[9] = r
elif e_seg.issubset(set(r)):
found[0] = r
if False:
rfound = {str(v):str(k) for k, v in found.items()}
rsegments = {str(v):str(k) for k, v in segments.items()}
for r in remaining:
row = list()
for e in list("ABCDEFG"):
v = " "
if set([e]).issubset(set(r)):
v = e
row.append(f'{rsegments.get(v,"")}-{v}')
row.append(str(len(r)))
row.append(rfound.get(r, ""))
print("\t".join(row))
return {str(v):int(k) for k, v in found.items()}
def add(self, inputs, outputs):
found = self.deduce_inputs(inputs)
if 10 == len(found):
# print(outputs)
# print(found)
v = 0
for o in outputs:
v *= 10
v += found[o]
print(f'{outputs} - {v}')
self.counter += v
if __name__ == '__main__':
with open("part1.txt") as ifp:
p1 = P1()
p2 = P2()
for line in list(map(lambda x: x.strip().upper(), ifp.readlines())):
inputs, outputs = list(map(lambda x: x.strip(), line.split('|')))
inputs = list(map(lambda x: ''.join(sorted(list(x))), inputs.split()))
outputs = list(map(lambda x: ''.join(sorted(list(x))), outputs.split()))
p1.add(inputs, outputs)
p2.add(inputs, outputs)
print(p1.value)
print(p2.value)
| 2.59375
| 3
|
TriangleStrategy.py
|
cibobo/FCoin
| 0
|
12785008
|
<reponame>cibobo/FCoin<filename>TriangleStrategy.py
import FCoinRestLib
import time
import datetime
import json
import fcoin3
import math
class TriangleStrategy(object):
# minimum trading volumn for the reference coin
# BTC: 0.001; ETH: 0.01; USDT: 6.23
minNotional = 0.001
# standard volumn used for triangle strategy
# buy_volumn = minNotional * 1
# minimum trading volumn unit for the symbol|ref_coin[0], symbol|ref_coin[1] and ref_coin[1]|ref_coin[0]
# for the trading group FT/USDT, FT/BTC, BTC/USDT
# minQty = [0.0001, 0.0001, 0.000001]
# minPrice = [0.000001, 0.0001, 0.000001]
# price_precise = [int(-math.log10(x)) for x in minPrice]
# volumn toloranz (0.05%) to match the request delay, that some one has already buy/sell with the detected price
volumn_toloranz = 1.1
# define trigger interval for trading
trigger_threshold = 1.003
# target win rate
target_win_rate = 0.0006
# Fee quote, which is use to calculate exactly selling volumn
fee_quote = 0.001
def __init__(self, symbol, coin):
self.fcoin = fcoin3.Fcoin()
self.fcoin.auth(FCoinRestLib.APIKey, FCoinRestLib.privateKey)
self.symbol = symbol
self.coin = coin
self.price = {}
self.getExchangeInfo()
# Use traiding minQty to ask first price, so that the requried buy/sell volumn can be estimated
self.volumn = []
for i in range(3):
self.volumn.append({'buy':self.minQty[i],'sell':self.minQty[i]})
self.balance_coin_list = coin
self.balance_coin_list.append(symbol)
self.saveAccountInfo()
def saveAccountInfo(self):
# get the current price
self.getTrianglePrice()
file_out = open('AccountInfo.log','a')
# save date time
file_out.write(str(datetime.datetime.now())+'\n')
file_out.write(str(time.time())+'\n')
# get balance
balance = FCoinRestLib.getBalance(self.balance_coin_list)
json.dump(balance, file_out)
file_out.write("\n")
# calculate the total price in coin 0 (usdt)
symbol_free = float(balance[self.symbol])*self.price['direct_sell']
between_free = float(balance[self.coin[1]])*self.price['rate_sell']
total_free = float(balance[self.coin[0]]) + symbol_free + between_free
file_out.write("Total balance in " + str(self.coin[0]) + " is: ")
file_out.write(str(total_free))
file_out.write('\n\n')
file_out.close()
def getExchangeInfo(self):
exchangeInfo = self.fcoin.get_symbols()
# minimum trading price unit for the symbol|ref_coin[0], symbol|ref_coin[1] and ref_coin[1]|ref_coin[0]
self.minPrice = []
self.price_precise = []
self.minQty = []
# get exchange info from symbol|ref_coin[0]
# get all filters for the target trading symbol
filters = next(item for item in exchangeInfo if item['name'] == str(self.symbol+self.coin[0]))['price_decimal']
self.price_precise.append(filters)
Qty = next(item for item in exchangeInfo if item['name'] == str(self.symbol+self.coin[0]))['amount_decimal']
self.minQty.append(10**(-1*Qty))
# get exchange info from symbol|ref_coin[1]
filters = next(item for item in exchangeInfo if item['name'] == str(self.symbol+self.coin[1]))['price_decimal']
self.price_precise.append(filters)
Qty = next(item for item in exchangeInfo if item['name'] == str(self.symbol+self.coin[1]))['amount_decimal']
self.minQty.append(10**(-1*Qty))
# get exchange info from ref_coin[1]|ref_coin[0]
filters = next(item for item in exchangeInfo if item['name'] == str(self.coin[1]+self.coin[0]))['price_decimal']
self.price_precise.append(filters)
Qty = next(item for item in exchangeInfo if item['name'] == str(self.coin[1]+self.coin[0]))['amount_decimal']
self.minQty.append(10**(-1*Qty))
# calculate the precise
self.minPrice = [math.pow(10,-x) for x in self.price_precise]
print(self.minPrice)
print(self.price_precise)
print(self.minQty)
def getTrianglePrice(self):
# Create 3 threads to get the 3 prices of triangle trading parallel
thread1 = FCoinRestLib.getPriceThread(1, "Thread-1", self.symbol, self.coin[0], self.volumn[0])
thread2 = FCoinRestLib.getPriceThread(2, "Thread-2", self.symbol, self.coin[1], self.volumn[1])
thread3 = FCoinRestLib.getPriceThread(3, "Thread-3", self.coin[1], self.coin[0], self.volumn[2])
# Start new Threads
thread1.start()
thread2.start()
thread3.start()
# Wait for all threads to complete
thread1.join()
thread2.join()
thread3.join()
if 'NAN' in thread1.price.values() or 'NAN' in thread2.price.values() or 'NAN' in thread3.price.values():
print("Market depth is too shallow, wait for another round")
return 0
# Get the price from thread back and calculate the triangle price
self.price['direct_buy'] = thread1.price['asks_vol']
self.price['direct_sell'] = thread1.price['bids_vol']
# Add sell_1 price for limit trading
self.price['direct_sell_1'] = thread1.price['bids_1']
self.price['between_buy'] = thread2.price['asks_vol']
self.price['between_sell'] = thread2.price['bids_vol']
# Add buy_1 price for limit between sell
self.price['between_buy_1'] = thread2.price['asks_1']
self.price['rate_buy'] = thread3.price['asks_vol']
self.price['rate_sell'] = thread3.price['bids_vol']
# Add buy_1 price for limit rate sell
self.price['rate_buy_1'] = thread3.price['asks_1']
# Two trading directions are possible:
# 1. coin[0] --> coin[1](between) --> symbol --> coin[0]: call BBS (buy buy sell)
# 2. coin[0] --> symbol --> coin[1](between) --> coin[0]: call BSS (buy sell sell)
# Calculate BBS price and win
self.price['BBS_price'] = self.price['between_buy']*self.price['rate_buy']
self.price['BBS_win'] = self.price['direct_sell']/self.price['BBS_price']
# Calculate BSS price and win
self.price['BSS_price'] = self.price['between_sell']*self.price['rate_sell']
self.price['BSS_win'] = self.price['BSS_price']/self.price['direct_buy']
#TODO Test Code
# self.price['BBS_win'] = self.price['BSS_price']/self.price['direct_buy']
# self.price['BSS_win'] = self.price['BSS_price']/(self.price['direct_sell']+0.00001)
# calculate buy volumn based on the BTC minNotation
self.buy_volumn = self.minNotional*self.price['rate_buy']
print("Current buy Volumn is: ", self.buy_volumn)
# Prepare the volumn for the next price request
symoble_buy = self.buy_volumn/self.price['direct_buy']
symbole_sell = self.buy_volumn/self.price['direct_sell']
# Direct trading volumn
self.volumn[0]['buy'] = symoble_buy*self.volumn_toloranz
self.volumn[0]['sell'] = symbole_sell*self.volumn_toloranz
# Between trading volumn
self.volumn[1]['buy'] = symoble_buy*self.price['between_buy']*self.volumn_toloranz
self.volumn[1]['sell'] = symoble_buy*self.price['between_sell']*self.volumn_toloranz
# Rate trading volumn
self.volumn[2]['buy'] = (self.buy_volumn/self.price['rate_buy'])*self.volumn_toloranz
self.volumn[2]['sell'] = (self.buy_volumn/self.price['rate_sell'])*self.volumn_toloranz
print("Price: --------")
print(self.price)
print("Volumn: --------")
print(self.volumn)
return 1
# Use limit Direct Buy and also limit sell of between coin
def triangleTradingLimitTwice(self):
# recalculate the direct buy price with sell price (bid_1) + minPrice allowed by platform
self.price['direct_buy'] = round(float(self.price['direct_sell_1'] + self.minPrice[0]),self.price_precise[0])
# recalculate the between sell price with buy price (ask_1) - minPrice allowed by platform
self.price['between_sell'] = round(float(self.price['between_buy_1'] - self.minPrice[1]*3),self.price_precise[1])
# Calculate BSS price and win
self.price['BSS_price'] = self.price['between_sell']*self.price['rate_sell']
self.price['BSS_win'] = self.price['BSS_price']/self.price['direct_buy']
# print(self.price, " @", self.price_time)
# in case the BSS minuse handling fee (0.15%) is still cheeper
if self.price['BSS_win'] > self.trigger_threshold:
self.trading_begin_time = int(time.time()*1000)
# because of the minimum quantity of the trading, calculate how much target Coin(symbol) should be buy with triangle price
self.cal_buy_volumn_symbol = self.buy_volumn/self.price['direct_buy']
# self.real_buy_volumn_symbol = (int(self.cal_buy_volumn_symbol/self.minQty[0]))*self.minQty[0]
buy_volumn_precise = int(-math.log10(self.minQty[0]))
self.real_buy_volumn_symbol = round(self.cal_buy_volumn_symbol,buy_volumn_precise)
# becasue the fee is withdrewed directly from trading volumn, calculate the sell volumn from target coin
self.cal_sell_volumn_symbol = self.real_buy_volumn_symbol*(1-self.fee_quote)
self.real_sell_volumn_symbol = (int(self.cal_sell_volumn_symbol/self.minQty[1]))*self.minQty[1]
self.real_sell_volumn_symbol = round(self.real_sell_volumn_symbol, buy_volumn_precise)
# caclulate how much between reference coin is needed based on real selling volumn
# because in FCoin, the market selling price is based on coin0 (usdt), use the bss price to calculate the volumn
self.cal_trading_volumn_between = self.real_sell_volumn_symbol*self.price['between_sell']*(1-self.fee_quote)
# use round up integer to calculate the needed between reference coin volumn
# self.real_trading_volumn_between = (math.ceil(self.cal_trading_volumn_between/self.minQty[2]))*self.minQty[2]
# use round instead of ceil to balance the volumn of Between Coin and Main Coin
between_volumn_precise = int(-math.log10(self.minQty[2]))
self.real_trading_volumn_between = round(self.cal_trading_volumn_between,between_volumn_precise)
# buy target coin with direct reference coin in Limit Trading
# self.response_1 = BinanceRestLib.createLimitOrder(self.symbol,self.coin[0],"BUY",self.real_buy_volumn_symbol,self.price['direct_buy'],self.time_offset)
self.response_1 = self.fcoin.buy(self.symbol+self.coin[0], self.price['direct_buy'], self.real_buy_volumn_symbol)
print("Fill the triangle trading condition ----------------------------------")
print("Calculated Buy Symbol: ", self.cal_buy_volumn_symbol)
print("Real Buy Symbol: ", self.real_buy_volumn_symbol)
print("Calculated Sell Symbol: ", self.cal_sell_volumn_symbol)
print("Real Sell Symbol: ", self.real_sell_volumn_symbol)
print("Calculated Trading Between: ", self.cal_trading_volumn_between)
print("Real Trading Between: ",self.real_trading_volumn_between)
print("Response_1------------------------")
print(json.dumps(self.response_1, indent=4))
# get the order id
orderId = self.response_1['data']
# wait a small time interval, until the limit trad is taken by the others
for i in range(2):
self.limit_order = self.fcoin.get_order(orderId)
if 'data' in self.limit_order:
self.limit_order = self.limit_order['data']
# if the order is filled, complete the triangle trading
if self.limit_order['state'] == "filled":
# finish the selling process
self.triangleTradingSellLimit()
return 1
else:
print("Unknown response:")
print(json.dumps(self.limit_order, indent=4))
print("End of %dth loop" %(i))
# if the limit trading is not taken by others, cancel it
self.cancel_limit_order = self.fcoin.cancel_order(orderId)
print("cancel_limit_order------------------------")
print(json.dumps(self.cancel_limit_order, indent=4))
# some times the trading is already executed, in this case a error code will be returned
# if 'msg' in self.cancel_limit_order:
# No return, if a HttpError Exception has been catched in fcoin3 in Siagned Request
if self.cancel_limit_order is None:
print("Special Case: the trading is already filled. Complete the rest selling phase")
# finish the selling process
self.triangleTradingSellLimit()
return 1
# check whether some part of the trading is already completed before cancel
# get the current price
thread1 = FCoinRestLib.getPriceThread(1, "Thread-1", self.symbol, self.coin[0], self.volumn[0])
thread1.start()
self.cancel_order = self.fcoin.get_order(orderId)
print("cancel_order------------------------")
print(json.dumps(self.cancel_order, indent=4))
thread1.join()
# calculate the trading price
cancel_sell_price = round(float(thread1.price['asks_1'] - self.minPrice[0]),self.price_precise[0])
# if only a part is completed sell them directly
# check whether the response is sucessfully
if 'data' in self.cancel_order:
self.cancel_order = self.cancel_order['data']
# if the state is pending, wait, until the handling in server is finished
if self.cancel_order['state'] == 'pending_cancel':
while True:
time.sleep(1)
self.cancel_order = self.fcoin.get_order(orderId)
print("Pending cancel_order------------------------")
print(json.dumps(self.cancel_order, indent=4))
if 'data' in self.cancel_order:
self.cancel_order = self.cancel_order['data']
if self.cancel_order['state'] != 'pending_cancel':
break
else:
print("Still pending cancel")
else:
print("Bed Server response during the cancel pending")
# check whether the state is marked as "partial_canceled"
if self.cancel_order['state'] == 'partial_canceled':
print("Sell the not canceled part")
# calculate already filled volumn
cal_cancel_volumn = float(self.cancel_order['filled_amount']) - float(self.cancel_order['fill_fees'])
real_cancel_volumn = round(cal_cancel_volumn, buy_volumn_precise)
# in case the filled amount is too small
if real_cancel_volumn >= 3:
# create a limit trade to sell all target coin back to coin[0], with the sell_1 price
self.limit_order = self.fcoin.sell(self.symbol+self.coin[0], cancel_sell_price, real_cancel_volumn)
limit_order_Id = self.limit_order['data']
# wait until the trading is completed
while True:
time.sleep(1)
# print("Waiting limit sell for target coin ...")
self.limit_order = self.fcoin.get_order(limit_order_Id)
if 'data' in self.limit_order:
self.limit_order = self.limit_order['data']
if self.limit_order['state'] == "filled":
break
# if the trading is cancelled manually, wait 5 min for the next rund
if self.limit_order['state'] == "canceled":
print("Cancel the trading and sell the coin manually")
time.sleep(300)
break
else:
print("Unknown status:")
print(json.dumps(self.limit_order, indent=4))
print(json.dumps(self.limit_order, indent=4))
else:
print("already filled amount is too less, ignore it")
# if the order has been already filled
if self.cancel_order['state'] == 'filled':
print("Trade is canceled at the same time as the state is returned")
# finish the selling process
self.triangleTradingSellLimit()
return 1
self.trading_end_time = int(time.time()*1000)
return 0
# the sell phase always with limit trading on between coin
def triangleTradingSellLimit(self):
# get current between sell price
thread2 = FCoinRestLib.getPriceThread(2, "Thread-2", self.symbol, self.coin[1], self.volumn[1])
thread2.start()
# sell between refrence coin with market price firstly
self.response_3 = self.fcoin.sellMarket(self.coin[1]+self.coin[0], self.real_trading_volumn_between)
print(json.dumps(self.response_3, indent=4))
thread2.join()
# # create a current sell price based on current sell_1 price
# current_between_sell = round(float(thread2.price['asks_1'] - self.minPrice[1]),self.price_precise[1])
# # calculate minimum price to sell the between coin without loss
# #TODO: need to get the real selling price from response3
# minimum_between_sell = round((self.price['direct_buy']*1.0015/self.price['rate_sell']),self.price_precise[1])
# print(self.price, " @", self.price_time)
# print("Saved price: ", self.price['between_sell'])
# print("Current sell price: ", current_between_sell)
# print("Minimum sell price: ", minimum_between_sell)
# # choose the bigger price betwen current sell price and minimum sell price as the executing selling price
# if minimum_between_sell > current_between_sell:
# self.price['between_sell'] = minimum_between_sell
# else:
# self.price['between_sell'] = current_between_sell
# use a conservative trading price to garantee the continue tradings
minimum_between_sell = round((self.price['direct_buy']*(self.trigger_threshold+self.target_win_rate)/self.price['rate_sell']),self.price_precise[1])
self.price['between_sell'] = minimum_between_sell
print("Saved price: ", self.price['between_sell'])
print("Minimum sell price: ", minimum_between_sell)
print("begin between sell")
# create limit trading for between coin
self.response_2 = self.fcoin.sell(self.symbol+self.coin[1], self.price['between_sell'], self.real_sell_volumn_symbol)
print(json.dumps(self.response_2, indent=4))
# get the order id
orderId = self.response_2['data']
# check the order state
self.limit_order = self.fcoin.get_order(orderId)
begin_waiting_time = time.time()
begin_time = time.time()
# wait until the limit order is filled
while True:
time.sleep(1)
# print("Waiting limit sell for bewteen coin ...")
self.limit_order = self.fcoin.get_order(orderId)
if 'data' in self.limit_order:
self.limit_order = self.limit_order['data']
if self.limit_order['state'] == "filled":
# update the basic buy volumn with last win
self.minNotional *= (1+self.target_win_rate)
print("New Basic Buy Volumn is: ", self.buy_volumn)
break
# if the trading is cancelled manually, wait 5 min for the next rund
if self.limit_order['state'] == "canceled":
print("Cancel the trading and sell the coin manually")
time.sleep(300)
break
else:
print("Unknown status:")
print(json.dumps(self.limit_order, indent=4))
# resnycho time offset in every 10min
if time.time()-begin_time > 600:
# if the limit sell is already hold more than 3hours, just keep the current limit sell and restart a round once again.
if time.time()-begin_waiting_time > 10800:
current_price = FCoinRestLib.getCurrentPrice(self.symbol, self.coin[1], self.volumn[1])
print("Special case, that a limit trade is not taken for a long time. ", current_price['asks_vol'])
# in case the current limit sell price is 5% less than the expected one
if current_price['asks_vol']/self.price['between_sell'] < 0.95:
break
print(json.dumps(self.limit_order, indent=4))
self.saveAccountInfo()
self.trading_end_time = int(time.time()*1000)
def writeLog(self):
file_out = open('TradingInfo.log','a')
file_out.write(str(datetime.datetime.now())+'\n')
file_out.write(str(self.price) + '\n')
file_out.write("Fill the triangle trading condition ----------------------------------")
file_out.write("Calculated Buy Symbol: %f \n" %(self.cal_buy_volumn_symbol))
file_out.write("Real Buy Symbol: %f \n" %(self.real_buy_volumn_symbol))
file_out.write("Calculated Trading Between: %f \n" %(self.cal_trading_volumn_between))
file_out.write("Real Trading Between: %f \n" %(self.real_trading_volumn_between))
file_out.write("Trading begin -------------------------------@ %f \n" %(self.trading_begin_time))
file_out.write("Step 1:\n")
json.dump(self.response_1, file_out)
file_out.write("Step 2:\n")
json.dump(self.response_2, file_out)
file_out.write("Step 3:\n")
json.dump(self.response_3, file_out)
file_out.write("Trading end -------------------------------@ %f \n" %(self.trading_end_time))
file_out.write("Buy volumn: %f \n" %(self.real_buy_volumn_symbol))
file_out.write("Sell volumn: %f \n" %(self.real_sell_volumn_symbol))
file_out.write("Between Sell volumn: %f \n" %(self.real_trading_volumn_between))
file_out.close()
| 2.828125
| 3
|
airflow_run/__init__.py
|
TheWeatherCompany/airflow-run
| 3
|
12785009
|
<filename>airflow_run/__init__.py<gh_stars>1-10
from airflow_run.run import AirflowRun
| 1.15625
| 1
|
PoseEstimationModel.py
|
zabir-nabil/keras-human-pose
| 10
|
12785010
|
<gh_stars>1-10
import keras
from keras.models import Sequential
from keras.models import Model
from keras.layers import Input, Dense, Activation, Lambda
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import Concatenate
import scipy
import math
import numpy as np
import util
import cv2
class PoseEstimationModel:
def __init__(self, weights_path):
self.weights_path = weights_path
def relu(self, x):
return Activation('relu')(x)
def conv(self, x, nf, ks, name):
x1 = Conv2D(nf, (ks, ks), padding='same', name=name)(x)
return x1
def pooling(self, x, ks, st, name):
x = MaxPooling2D((ks, ks), strides=(st, st), name=name)(x)
return x
def vgg_block(self, x):
# Block 1
x = self.conv(x, 64, 3, "conv1_1")
x = self.relu(x)
x = self.conv(x, 64, 3, "conv1_2")
x = self.relu(x)
x = self.pooling(x, 2, 2, "pool1_1")
# Block 2
x = self.conv(x, 128, 3, "conv2_1")
x = self.relu(x)
x = self.conv(x, 128, 3, "conv2_2")
x = self.relu(x)
x = self.pooling(x, 2, 2, "pool2_1")
# Block 3
x = self.conv(x, 256, 3, "conv3_1")
x = self.relu(x)
x = self.conv(x, 256, 3, "conv3_2")
x = self.relu(x)
x = self.conv(x, 256, 3, "conv3_3")
x = self.relu(x)
x = self.conv(x, 256, 3, "conv3_4")
x = self.relu(x)
x = self.pooling(x, 2, 2, "pool3_1")
# Block 4
x = self.conv(x, 512, 3, "conv4_1")
x = self.relu(x)
x = self.conv(x, 512, 3, "conv4_2")
x = self.relu(x)
# Additional non vgg layers
x = self.conv(x, 256, 3, "conv4_3_CPM")
x = self.relu(x)
x = self.conv(x, 128, 3, "conv4_4_CPM")
x = self.relu(x)
return x
def stage1_block(self, x, num_p, branch):
# Block 1
x = self.conv(x, 128, 3, "conv5_1_CPM_L%d" % branch)
x = self.relu(x)
x = self.conv(x, 128, 3, "conv5_2_CPM_L%d" % branch)
x = self.relu(x)
x = self.conv(x, 128, 3, "conv5_3_CPM_L%d" % branch)
x = self.relu(x)
x = self.conv(x, 512, 1, "conv5_4_CPM_L%d" % branch)
x = self.relu(x)
x = self.conv(x, num_p, 1, "conv5_5_CPM_L%d" % branch)
return x
def stageT_block(self, x, num_p, stage, branch):
# Block 1
x = self.conv(x, 128, 7, "Mconv1_stage%d_L%d" % (stage, branch))
x = self.relu(x)
x = self.conv(x, 128, 7, "Mconv2_stage%d_L%d" % (stage, branch))
x = self.relu(x)
x = self.conv(x, 128, 7, "Mconv3_stage%d_L%d" % (stage, branch))
x = self.relu(x)
x = self.conv(x, 128, 7, "Mconv4_stage%d_L%d" % (stage, branch))
x = self.relu(x)
x = self.conv(x, 128, 7, "Mconv5_stage%d_L%d" % (stage, branch))
x = self.relu(x)
x = self.conv(x, 128, 1, "Mconv6_stage%d_L%d" % (stage, branch))
x = self.relu(x)
x = self.conv(x, num_p, 1, "Mconv7_stage%d_L%d" % (stage, branch))
return x
def create_model(self):
input_shape = (None,None,3)
img_input = Input(shape=input_shape)
stages = 6
np_branch1 = 38
np_branch2 = 19
img_normalized = Lambda(lambda x: x / 256 - 0.5)(img_input) # [-0.5, 0.5]
# VGG
stage0_out = self.vgg_block(img_normalized)
# stage 1
stage1_branch1_out = self.stage1_block(stage0_out, np_branch1, 1)
stage1_branch2_out = self.stage1_block(stage0_out, np_branch2, 2)
x = Concatenate()([stage1_branch1_out, stage1_branch2_out, stage0_out])
# stage t >= 2
for sn in range(2, stages + 1):
stageT_branch1_out = self.stageT_block(x, np_branch1, sn, 1)
stageT_branch2_out = self.stageT_block(x, np_branch2, sn, 2)
if (sn < stages):
x = Concatenate()([stageT_branch1_out, stageT_branch2_out, stage0_out])
model = Model(img_input, [stageT_branch1_out, stageT_branch2_out])
model.load_weights(self.weights_path)
# save the model
self.model = model
return model
| 2.640625
| 3
|
src/colors/colors.py
|
pmk456/Py-Colors
| 1
|
12785011
|
"""
Author: <NAME>
Licence: Apache 2.0
Version: 0.1
"""
import os
import sys
_colors = ["RED", "BLUE", "CYAN", "YELLOW", "GREEN", "MAGENTA", "WHITE", "BLACK"]
backgrounds = ["BG_RED", "BG_BLUE", "BG_CYAN", "BG_YELLOW", "BG_GREEN", "BG_MAGENTA", "BG_WHITE", "BG_BLACK"]
styles = ['BOLD', 'REVERSE', 'UNDERLINE', 'RESET', 'ITALIC', 'BLINK', 'INVISIBLE', "RED_UNDERLINE",
"GREEN_UNDERLINE", 'DOUBLE_UNDERLINE', 'STRIKE', 'RED_UNDERLINE', 'GREEN_UNDERLINE', 'INVISIBLE']
if sys.platform == "win32":
path = __file__
if not os.path.exists(path.replace(
"colors.py", "completed")):
print("Registering Colors In Windows Registry......")
cmd = os.popen(r'reg add HKEY_CURRENT_USER\Console /v VirtualTerminalLevel /t REG_DWORD /d 0x00000001 '
'/f').read()
assert cmd.rstrip() == "The operation completed successfully.", "Run As Administrator"
with open(path.replace("colors.py", "completed"), "x") as file: ...
print("Successfully Registered Colors In Windows Registry")
colors = {
'RED': "\033[1;31m", 'BLUE': "\033[1;34m", 'CYAN': "\033[1;36m", 'YELLOW': "\u001b[33m", 'GREEN': "\033[0;32m",
'MAGENTA': "\u001b[35m", 'WHITE': "\u001b[37m", 'BLACK': "\u001b[30m", 'BG_BLACK': "\u001b[40m",
'BG_RED': "\u001b[41m", 'BG_GREEN': "\u001b[42m", 'BG_YELLOW': "\u001b[43m", 'BG_BLUE': "\u001b[44m",
'BG_MAGENTA': "\u001b[45m", 'BG_CYAN': "\u001b[46m", 'BG_WHITE': "\u001b[47m", 'RESET': "\033[0;0m",
'BOLD': "\033[;1m", 'REVERSE': "\033[7m", 'UNDERLINE': "\u001b[4m", 'ITALIC': "\u001b[3m", 'BLINK': "\033[5m",
'INVISIBLE': "\033[8m", 'RED_UNDERLINE': "\u001b[4m\u001b[31m", 'GREEN_UNDERLINE': "\u001b[4m\u001b[32m",
'STRIKE': "\u001b[9m", "CURLY_UNDERLINE": '\u001b[4:3m'}
def is_string(string):
"""
:param string: string to check weather it is a string
:raise ValueError if string is not a string
"""
if not isinstance(string, str):
raise ValueError("Please Enter A String Not %s" % str(type(string))[8:].replace("'>", ""))
def color(string, fg=None, bg=None, style=None):
"""
:param string: String
:param fg: ForeGround Color
:param bg: BackGround Color
:param style: Style To use
:return: string Formatted with fg, bg and style
"""
is_string(string)
if fg is not None:
if fg.upper() not in _colors:
raise ValueError("%s Not Available" % fg)
string = colors[fg.upper()] + string
if bg is not None:
if "BG_" + bg.upper() not in backgrounds:
raise ValueError("%s Not Available" % bg)
string = colors["BG_" + bg.upper()] + string
if style is not None:
if style.upper() not in styles:
raise ValueError("%s Style Not Available" % style)
string = colors[style.upper()] + string
string += reset
return string
# Shortcut For Foreground Colors
blue = colors['BLUE']
green = colors['GREEN']
yellow = colors["YELLOW"]
white = colors["WHITE"]
cyan = colors["CYAN"]
magenta = colors["MAGENTA"]
red = colors["RED"]
black = colors["BLACK"]
# Shortcut For Background Colors
bg_black = colors["BG_BLACK"]
bg_red = colors["BG_RED"]
bg_blue = colors["BG_BLUE"]
bg_cyan = colors["BG_CYAN"]
bg_white = colors["BG_WHITE"]
bg_green = colors["BG_GREEN"]
bg_magenta = colors["BG_MAGENTA"]
bg_yellow = colors["BG_YELLOW"]
# Shortcut For Styles
bold = colors["BOLD"]
italic = colors["ITALIC"]
reverse = colors['REVERSE']
underline = colors['UNDERLINE']
red_underline = colors['RED_UNDERLINE']
green_underline = colors['GREEN_UNDERLINE']
invisible = colors['INVISIBLE']
blink = colors['BLINK']
strike = colors['STRIKE']
curly_underline = colors["CURLY_UNDERLINE"]
reset = colors["RESET"]
def print_all_colors():
for k, v in colors.items():
print(v, k, reset)
| 2.484375
| 2
|
sara_flexbe_behaviors/src/sara_flexbe_behaviors/leftorright_sm.py
|
WalkingMachine/sara_behaviors
| 5
|
12785012
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.ClosestObject import ClosestObject
from flexbe_states.check_condition_state import CheckConditionState
from sara_flexbe_states.sara_say import SaraSay
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Fri Jul 05 2019
@author: Huynh-Anh
'''
class leftOrRightSM(Behavior):
'''
return if an object is at the left or right of the robot
'''
def __init__(self):
super(leftOrRightSM, self).__init__()
self.name = 'leftOrRight'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:818 y:151, x:162 y:460
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['object'])
_state_machine.userdata.object = "snacks"
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:124 y:77
OperatableStateMachine.add('ClosestObject',
ClosestObject(),
transitions={'found': 'CheckAngle', 'not_found': 'failed', 'non_existant': 'failed'},
autonomy={'found': Autonomy.Off, 'not_found': Autonomy.Off, 'non_existant': Autonomy.Off},
remapping={'object': 'object', 'angle': 'angle', 'closestObject': 'closestObject'})
# x:342 y:132
OperatableStateMachine.add('CheckAngle',
CheckConditionState(predicate=lambda x: x>0),
transitions={'true': 'left', 'false': 'sayright'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'input_value': 'angle'})
# x:552 y:92
OperatableStateMachine.add('left',
SaraSay(sentence=lambda x: "The "+ str(x[1]) + " is on the left of the" + str(x[0].name), input_keys=["object", "closestObject"], emotion=0, block=True),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off},
remapping={'object': 'object', 'closestObject': 'closestObject'})
# x:441 y:296
OperatableStateMachine.add('sayright',
SaraSay(sentence=lambda x: "The " + str(x[1]) + " is on the right of the " + str(x[0].name), input_keys=["object","closestObject"], emotion=0, block=True),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off},
remapping={'object': 'object', 'closestObject': 'closestObject'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| 1.890625
| 2
|
tests/test_ipm.py
|
klauer/pcdsdevices
| 1
|
12785013
|
<reponame>klauer/pcdsdevices
import logging
import pytest
from ophyd.sim import make_fake_device
from unittest.mock import Mock
from pcdsdevices.inout import InOutRecordPositioner
from pcdsdevices.ipm import IPM
from conftest import HotfixFakeEpicsSignal
logger = logging.getLogger(__name__)
@pytest.fixture(scope='function')
def fake_ipm():
FakeIPM = make_fake_device(IPM)
FakeIPM.state.cls = HotfixFakeEpicsSignal
FakeIPM.diode.cls.state.cls = HotfixFakeEpicsSignal
ipm = FakeIPM("Test:My:IPM", name='test_ipm')
ipm.diode.state.sim_put(0)
ipm.diode.state.sim_set_enum_strs(['Unknown'] +
InOutRecordPositioner.states_list)
ipm.state.sim_put(0)
ipm.state.sim_set_enum_strs(['Unknown'] + IPM.states_list)
return ipm
def test_ipm_states(fake_ipm):
logger.debug('test_ipm_states')
ipm = fake_ipm
ipm.state.put(5)
assert ipm.removed
assert not ipm.inserted
ipm.state.put(1)
assert not ipm.removed
assert ipm.inserted
def test_ipm_motion(fake_ipm):
logger.debug('test_ipm_motion')
ipm = fake_ipm
# Remove IPM Targets
ipm.remove(wait=True, timeout=1.0)
assert ipm.state.get() == 5
# Insert IPM Targets
ipm.set(1)
assert ipm.state.get() == 1
# Move diodes in
ipm.diode.insert()
assert ipm.diode.state.get() == 1
ipm.diode.remove()
# Move diodes out
assert ipm.diode.state.get() == 2
def test_ipm_subscriptions(fake_ipm):
logger.debug('test_ipm_subscriptions')
ipm = fake_ipm
# Subscribe a pseudo callback
cb = Mock()
ipm.subscribe(cb, event_type=ipm.SUB_STATE, run=False)
# Change the target state
ipm.state.put(2)
assert cb.called
| 2.15625
| 2
|
jowilder_utils.py
|
fielddaylab/OGDUtils
| 0
|
12785014
|
# google imports
# standard library imports
import sys
import copy
import pickle
import os
from collections import Counter
from io import BytesIO
from zipfile import ZipFile
import copy
import pickle
from math import ceil
import importlib
import urllib.request
# math imports
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
import seaborn as sns
sns.set()
# Jupyter Imports
from IPython.display import display
# from google.colab import files
# ML imports
# models
from sklearn.naive_bayes import CategoricalNB
from sklearn.tree import ExtraTreeClassifier, DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from xgboost import XGBClassifier
from sklearn.linear_model import RidgeCV, SGDRegressor
from sklearn.svm import LinearSVR
# preprocessing
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler, OneHotEncoder, KBinsDiscretizer
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.model_selection import train_test_split
# sampling
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler, EditedNearestNeighbours, RepeatedEditedNearestNeighbours
from imblearn.combine import SMOTEENN, SMOTETomek
# metrics
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import plot_precision_recall_curve, plot_confusion_matrix, plot_roc_curve
from sklearn.metrics import f1_score, roc_auc_score, roc_curve, accuracy_score
# other
from imblearn.pipeline import make_pipeline
from sklearn.model_selection import GridSearchCV
# custom imports
import feature_utils as feat_util
def response_boxplot(df, category, verbose=False):
print('\n'+category)
fig, axs = plt.subplots(1, 3, figsize=(20, 10))
qs = ['EFL_yes_no', 'skill_low_med_high', 'enjoy_high_med_low_none']
for i, f in enumerate(['R0_quiz_response', 'R1_quiz_response', 'R2_quiz_response', ]):
if verbose:
print(qs[i])
bp = df.boxplot(column=category, by=df[f].astype(
'category'), ax=axs[i])
bp.set_xlabel('')
for choice in range(df[f].min(), df[f].max()+1):
query = f"{f}=={choice}"
cat_df = df.query(query)[category]
num_chose = len(cat_df)
mean = cat_df.mean()
std = cat_df.std()
if verbose:
print(
f'{f} # chose {choice}: {num_chose} ({round(num_chose/len(df)*100)}%). Avg {mean}, std {std}.')
plt.suptitle(f'{category} Boxplot')
fig.show()
def group_by_func(df, func, title='', show=True):
r0_groups = {0: 'native', 1: 'nonnative'}
r1_groups = {0: 'not very good skill',
1: 'okay skill', 2: 'very good skill'}
r2_groups = {0: 'really enjoy', 1: 'enjoy', 2: 'okay', 3: 'not enjoy'}
def group_string(r0, r1, r2): return ', '.join(
[r0_groups[r0], r1_groups[r1], r2_groups[r2]])
result_dfs = [pd.DataFrame(index=r1_groups.values(), columns=r2_groups.values(
)), pd.DataFrame(index=r1_groups.values(), columns=r2_groups.values())]
if show:
print(f'{"-"*6} {title} {"-"*6}')
for r0 in [0, 1]:
subtitle = "Nonnatives" if r0 else "Natives"
if show:
print(f'\n{subtitle}:')
tdf0 = df.query(f"R0_quiz_response == {r0}")
for r1 in [0, 1, 2]:
tdf1 = tdf0.query(f"R1_quiz_response == {r1}")
for r2 in [0, 1, 2, 3]:
tdf2 = tdf1.query(f"R2_quiz_response == {r2}")
result_dfs[r0].loc[r1_groups[r1], r2_groups[r2]
] = func(df, tdf0, tdf1, tdf2)
if show:
display(result_dfs[r0])
return result_dfs
def standard_group_by_func(fulldf, per_category_stats_list=None):
per_category_stats_list = None or ['sess_count_clicks',
'sess_count_hovers',
'sess_meaningful_action_count',
'sess_EventCount',
'sess_count_notebook_uses',
'sess_avg_time_between_clicks',
'sess_first_enc_words_read',
'sess_first_enc_boxes_read',
'sess_num_enc',
'sess_first_enc_duration',
'sess_first_enc_avg_wps',
'sess_first_enc_var_wps',
'sess_first_enc_avg_tbps',
'sess_first_enc_var_tbps',
'sess_start_obj',
'sess_end_obj',
'start_level',
'max_level',
'sessDuration']
dfs_list = []
title_list = []
def df_func(df, tdf0, tdf1, tdf2): return len(tdf2)
title = 'count'
dfs = group_by_func(fulldf, df_func, title)
dfs_list.append(dfs)
title_list.append(title)
def df_func(df, tdf0, tdf1, tdf2): return round(len(tdf2)/len(df)*100, 2)
title = 'percent total pop'
dfs = group_by_func(fulldf, df_func, title)
dfs_list.append(dfs)
title_list.append(title)
def df_func(df, tdf0, tdf1, tdf2): return round(len(tdf2)/len(tdf0)*100, 2)
title = 'percent native class pop'
dfs = group_by_func(fulldf, df_func, title)
dfs_list.append(dfs)
title_list.append(title)
for category in per_category_stats_list:
df_func = get_avg_std_df_func(category)
title = f'(avg, std) {category}'
dfs = group_by_func(fulldf, df_func, title)
dfs_list.append(dfs)
title_list.append(title)
return title_list, dfs_list
def get_avg_std_df_func(category_name):
def inner(df, tdf0, tdf1, tdf2):
mean = tdf2[category_name].mean()
std = tdf2[category_name].std()
if not pd.isna(mean):
mean = round(mean, 2)
if not pd.isna(std):
std = round(std, 2)
return (mean, std)
return inner
def html_stats(df):
html_strs = ['<div class="container">', '<h3>{Stats}</h3>']
qs = ['EFL_yes_no', 'skill_low_med_high', 'enjoy_high_med_low_none']
html_strs.append(f'<p> Total pop {len(df)} </p>')
for i, f in enumerate(['R0_quiz_response', 'R1_quiz_response', 'R2_quiz_response', ]):
html_strs.append(f'<p> {qs[i]}</p>')
for choice in range(df[f].min(), df[f].max()+1):
query = f"{f}=={choice}"
cat_df = df.query(query)
num_chose = len(cat_df)
html_strs.append(
f'<p>{f} # chose {choice}: {num_chose} ({round(num_chose/len(df)*100)}%).</p>')
return '\n'.join(html_strs+['</div>'])
def full_html(base_df, title_list, dfs_list, suptitle=None):
HEADER = '''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
</head>
<body>
<style>
.flex-container {
display: flex;
flex-wrap: wrap;
}
.container {
border: thick solid black;
padding: 10px;
margin: 5px;
}
.container table:nth-of-type(2) td {
background-color: rgb(161, 161, 230);
}
.container table:nth-of-type(2) th {
background-color: rgb(20, 20, 194);
color: white;
}
.container table:nth-of-type(2n-1) td {
background-color: rgb(235, 158, 158);
}
.container table:nth-of-type(2n-1) th {
background-color: rgb(160, 11, 11);
color: white;
}
.break {
flex-basis: 100%;
height: 0;
}
</style>
<div class="flex-container">'''
FOOTER = ''' </div>
</body>
</html>'''
def table_header(title): return f''' <div class="container">
<h3>{title}</h3>'''
table_footer = ''' </div>'''
def table_html(title, dfs): return '\n'.join([table_header(
title), "<p>Natives:</p>", dfs[0].to_html(), "<p>Nonnatives:</p>", dfs[1].to_html(), table_footer])
if suptitle is not None:
suptitle = f'<h2>{suptitle}</h2>\n<div class="break"></div> <!-- break -->'
else:
suptitle = ''
return '\n'.join([HEADER, suptitle, html_stats(base_df)] +
[table_html(t, dfs) for t, dfs in zip(title_list, dfs_list)] +
[FOOTER])
def download_full_html(base_df, title_list, dfs_list, filename, suptitle=None):
with open(filename, 'w+') as f:
f.write(full_html(base_df, title_list, dfs_list, suptitle=suptitle))
print("Wrote to", filename)
files.download(filename)
onext_int_feats = [f'obj{i}_onext_int' for i in range(80)]
onext_int_cats = [["nan", 1],
["nan", 11],
["nan", 12, 86, 111, 125],
["nan", 13, 14, 113, 116, 118],
["nan", 14, 15, 113, 114, 116, 118],
["nan", 13, 15, 113, 114, 116, 118],
["nan", 16, 86, 115, 118, 132, 161],
["nan", 17, 86, 115, 118, 128, 161],
["nan", 18, 86, 115, 118, 161],
["nan", 19, 86, 117, 118, 127, 133, 134, 161],
["nan", 20, 133, 134, 136],
["nan", 2, 80, 81, 82, 83],
["nan", 21, 86, 117, 127, 136, 137, 161],
["nan", 22, 137, 141],
["nan", 23, 24, 86, 117, 127, 136, 161],
["nan", 23, 24, 117, 127, 136, 161],
["nan", 25, 86, 117, 118, 127, 136, 140, 147, 151, 161],
["nan", 26, 142, 145],
["nan", 27, 143],
["nan", 28, 86, 117, 118, 136, 140, 150, 161],
["nan", 29, 119, 130],
["nan", 29, 30, 35, 86, 117, 118, 126, 136, 140, 149],
["nan", 3, 80, 82, 83, 86, 87, 88, 93],
["nan", 31, 38],
["nan", 32, 153],
["nan", 33, 154],
["nan", 34, 155],
["nan", 35, 156],
["nan", 36, 157],
["nan", 37, 158],
["nan", 30],
["nan", 39, 163],
["nan", 40, 160],
["nan", 3],
["nan", 41, 164, 166],
["nan", 42, 166],
["nan", 30],
["nan", 44, 85, 125],
["nan", 29, 45, 47, 84, 118, 125, 136, 140, 149, 168, 169, 184],
["nan", 45, 46, 169, 170],
["nan", 29, 45, 47, 92, 118, 136, 140, 149, 169, 184],
["nan", 29, 45, 48, 92, 118, 140, 149, 168, 184],
["nan", 46, 49, 168],
["nan", 46, 50, 168, 170],
["nan", 5, 80, 82, 83, 86, 89, 91, 95, 97, 125],
["nan", 29, 51, 92, 118, 136, 140, 149, 168, 184],
["nan", 52, 92, 118, 136, 149, 171, 184],
["nan", 53, 54, 92, 118, 136, 140, 149, 184],
["nan", 53, 54, 55, 59, 60, 90, 92, 94,
118, 136, 140, 149, 168, 184],
["nan", 53, 55, 59, 60, 90, 92, 94, 118, 136, 140, 149, 184],
["nan", 55, 56, 59, 60, 149, 174],
["nan", 57, 59, 60, 174],
["nan", 58, 59, 60, 136, 172, 174, 184],
["nan", 29, 59, 60, 61, 92, 118, 136, 149, 168, 172, 184],
["nan", 55, 56, 57, 58, 60, 61, 140, 172, 174, 184],
["nan", 6, 80, 82, 83, 86, 98, 100, 125],
["nan", 55, 56, 57, 58, 59, 61, 92, 118,
136, 140, 149, 172, 174, 184],
["nan", 59, 62, 136, 140, 149, 172, 173, 175, 184],
["nan", 63, 64, 176],
["nan", 64, 66, 149, 175, 184],
["nan", 29, 65, 66, 92, 118, 136, 140, 172, 175, 177, 184],
["nan", 66, 67, 68, 92, 118, 136, 140, 146, 175, 177, 184],
["nan", 67, 144],
["nan", 29, 64, 65, 68, 92, 118, 131, 136,
140, 148, 149, 172, 175, 177, 184],
["nan", 92, 118, 122, 123, 124, 131, 136, 140,
146, 148, 168, 172, 175, 177, 184],
["nan", 70],
["nan", 7],
["nan", 71, 178],
["nan", 72, 179],
["nan", 73, 180],
["nan", 74, 181],
["nan", 75, 182],
["nan", 69],
["nan", 77, 78, 185],
["nan", 78, 185],
["nan", 79],
[0],
["nan", 8],
["nan", 9, 103],
["nan", 104, 105, 108]]
QA_1_feats = [f'Q{i}_A1' for i in range(19)]
QA_1_cats = [['0', 'A', 'B', 'C', 'D'],
['0', 'A', 'B', 'C', 'D'],
['0', 'A', 'B', 'C', 'D'],
['0', 'A', 'B', 'C', 'D'],
['0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'],
['0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'O', 'P', 'Q'],
['0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'],
['0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'],
['0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'],
['0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'],
['0', '?', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'],
['0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'],
['0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'],
['0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'],
['0', 'A', 'B', 'C', 'D', 'F', 'G', 'I', 'M', 'N', 'O', 'P', 'Q',
'R', 'S', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f'],
['0', 'Q', 'V', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f'],
['0', '?', 'X', 'Y', 'Z', 'b', 'c', 'd', 'e', 'f'],
['0', 'X', 'Y', 'b', 'c', 'd', 'e', 'f'],
['0', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f']]
def get_preprocessor(df, scaler=StandardScaler(), imputer=SimpleImputer(strategy='constant'), bool_dtype='int64'):
"""
By default has a number of steps:
1. drops columns from use in preprocessor if present:
- [f'Q{q}_answers' for q in range(19)]
- ["play_year", "play_month", "play_day", "play_hour", "play_minute", "play_second"]
- ["_continue", "continue", "save_code", "music", "hq", "fullscreen", "persistentSessionID"]
2. Creates a preprocessor for all non-y columns and non-boolean columns with the following steps:
a. Standard Scaler (0 mean, 1 std)
b. Simple Imputer(strategy='constant') (fill NaN with 0)
3. Fits the preprocessor to the given X
4. returns the unfitted preprocessor (sklearn pipeline), and the unprocessed X dataframe
:param df: jowilder dataframe
:param scaler: sklearn compatible scaler
:param imputer: sklearn compatible imputer
:return: the unfitted preprocessor (sklearn pipeline), and the unprocessed X dataframe
"""
df = df.drop(
[f'Q{q}_answers' for q in range(19)] + ["play_year", "play_month", "play_day", "play_hour", "play_minute",
"play_second",
"_continue", "continue", "save_code", "music", "hq", "fullscreen",
"persistentSessionID", ], axis=1, errors='ignore').copy()
y_cols, bool_cols, num_cols = separate_columns(df, bool_dtype=bool_dtype)
X = df.loc[:, num_cols+bool_cols]
# too complicated to allow for pipeline order
# pipeline_strings = [pipeline_order[i:i+2] for i in range(0,len(pipeline_order),2)]
# transformers = []
# num_sa, num_sc, num_im = 0,0,0
# for s in pipeline_strings:
# if s == 'Sa':
# transformer = make_pipeline(sampler)
# cols = num_cols + bool_cols
# name = f'{s}{num_sa}'
# num_sa += 1
# elif s == 'Sc':
# transformer = scaler
# name = f'{s}{num_sc}'
# cols = num_cols
# num_sc += 1
# elif s == 'Im':
# transformer = imputer
# name = f'{s}{num_im}'
# cols = num_cols
# num_im += 1
# else:
# raise ValueError("Pipeline substrings must be Sa Sc or Im")
# transformers.append((name, transformer, cols))
def col_str_to_int(col_strs): return [
X.columns.get_loc(s) for s in col_strs]
column_transformer = ColumnTransformer(
transformers=[
('num', make_pipeline(scaler, imputer), col_str_to_int(num_cols)),
('bool', 'passthrough', col_str_to_int(bool_cols))
],
remainder='drop')
return column_transformer, X
def get_ys(df):
"""
:rtype: dictionary of y columns (df series). keys: y0,y1,y2,y1_bin,y2_bin,y1_bin_x,y2_bin_x
"""
ys = {}
for key, y_col in [
('y0', 'R0_quiz_response'),
('y1', 'R1_quiz_response'),
('y2', 'R2_quiz_response'),
('y1_bin', 'R1_quiz_response_bin'),
('y1_bin_0v12', 'R1_quiz_response_0v12'),
('y1_bin_01v2', 'R1_quiz_response_01v2'),
('y1_bin_x', 'R1_quiz_response_bin_x'),
('y2_bin', 'R2_quiz_response_bin'),
('y2_bin_x', 'R2_quiz_response_bin_x'),
('y2_bin_0v123', 'R2_quiz_response_bin0v123'),
('y2_bin_01v23', 'R2_quiz_response_bin01v23'),
('y2_bin_012v3', 'R2_quiz_response_bin012v3'),
]:
if y_col in df.columns:
ys[key] = df.loc[:, y_col].astype('category').copy()
return ys
def separate_columns(df, bool_dtype='int64', expect_bool_cols = True) -> (list, list, list):
"""
:param df:
:param bool_dtype: Defaults to 'int64'. Should be int64 if coming from import csv otherwise could be 'uint8'
if coming from the pd dummies.
:return: tuple of lists of column names for y_columns, bool_columns, and integer_columns
"""
y_cols = [col for col in df.columns if 'quiz_response' in col]
bool_cols = [col for col in df.select_dtypes(include=[bool_dtype])
if np.isin(df[col].dropna().unique(), [0, 1]).all() and
col not in y_cols]
num_cols = [
col for col in df.columns if col not in bool_cols and col not in y_cols]
if not bool_cols and expect_bool_cols:
print('Warning! No bool columns. Consider changing bool_dtype="int_64" to "uint8"')
return y_cols, bool_cols, num_cols
end_obj_to_last_Q = {
9: 0,
10: 3,
11: 3,
12: 3,
13: 3,
14: 3,
15: 3,
16: 3,
17: 3,
18: 3,
19: 3,
20: 3,
21: 3,
22: 3,
23: 3,
24: 3,
25: 3,
26: 3,
27: 3,
28: 3,
29: 3,
30: 3,
31: 3,
32: 4,
33: 5,
34: 6,
35: 7,
36: 8,
37: 9,
38: 9,
39: 10,
40: 11,
41: 12,
42: 13,
43: 13,
44: 13,
45: 13,
46: 13,
47: 13,
48: 13,
49: 13,
50: 13,
51: 13,
52: 13,
53: 13,
54: 13,
55: 13,
56: 13,
57: 13,
58: 13,
59: 13,
60: 13,
61: 13,
62: 13,
63: 13,
64: 13,
65: 13,
66: 13,
67: 13,
68: 13,
69: 13,
70: 13,
71: 14,
72: 15,
73: 16,
74: 17,
75: 18,
76: 18,
77: 18,
78: 18,
79: 18,
}
end_obj_to_last_lvl = {
0: 0,
1: 0,
2: 0,
3: 1,
4: 2,
5: 2,
6: 3,
7: 3,
8: 4,
9: 4,
10: 4,
11: 4,
12: 5,
13: 6,
14: 6,
15: 6,
16: 6,
17: 6,
18: 6,
19: 7,
20: 7,
21: 8,
22: 8,
23: 9,
24: 9,
25: 9,
26: 10,
27: 10,
28: 11,
29: 11,
30: 12,
31: 12,
32: 12,
33: 12,
34: 12,
35: 12,
36: 12,
37: 12,
38: 12,
39: 12,
40: 12,
41: 12,
42: 12,
43: 13,
44: 13,
45: 14,
46: 15,
47: 15,
48: 16,
49: 16,
50: 17,
51: 17,
52: 18,
53: 18,
54: 18,
55: 18,
56: 18,
57: 18,
58: 18,
59: 18,
60: 18,
61: 18,
62: 19,
63: 19,
64: 19,
65: 20,
66: 20,
67: 21,
68: 21,
69: 22,
70: 22,
71: 22,
72: 22,
73: 22,
74: 22,
75: 22,
76: 22,
77: 23,
78: 23,
79: 23,
}
class GridSearcher():
def __init__(self, csv_fpath=None, df=None, preprocessor=None, fillna=0, meta=[], expect_bool_cols=True):
# either give csv_fpath or df.
assert csv_fpath or not df.empty
print(f'Loading from {csv_fpath}...')
# load df
if df is None:
print(f'Loading from {csv_fpath}...')
self.df, self.meta = feat_util.open_csv_from_path_with_meta(
csv_fpath, index_col=0)
else:
self.df, self.meta = df, meta
# set X and ys, and preprocessor
if not preprocessor:
self.preprocessor, self.X = get_preprocessor(self.df)
self.X = self.X.fillna(fillna)
else:
_, bool_cols, num_cols = separate_columns(self.df, expect_bool_cols=expect_bool_cols)
self.X = df[bool_cols+num_cols]
self.preprocessor = preprocessor
self.ys = get_ys(self.df)
# set object vars
self.model_dict = {}
self.cur_model = None
def split_data(self):
nonnull_X, nonnull_y = feat_util.remove_nan_labels(self.X, self.y)
X_train, X_test, y_train, y_test = train_test_split(
nonnull_X, nonnull_y, test_size=0.2, random_state=1)
self.X_train, self.X_test, self.y_train, self.y_test = X_train, X_test, y_train, y_test
def set_y(self, y_key=None, other_col=None):
if y_key:
print(f'Switching to {y_key}...')
self.y = self.ys[y_key]
elif other_col:
self.y = self.X[other_col]
self.X = self.X.drop(other_col, axis=1)
else:
print("Did not change y. Invalid inputs.")
self.split_data()
def run_fit(self, classifier, sampler=None, verbose=False, preprocess_twice=True, sampler_index=None, full_pipeline=False):
# fit self.cur_model as a pipeline of the given preprocessor, sampler, preprocessor, classifer
# if preprocess_twice is false, self.cur_model is sampler, preprocessor, classifier
# if full_pipeline and sampler index, self.cur_model is the classifier
# (must be a pipeline containing a sampler or a placeholder (None) for the sampler)
if full_pipeline:
assert sampler_index is not None
clf = classifier
elif preprocess_twice:
clf = make_pipeline(self.preprocessor, sampler,
copy.deepcopy(self.preprocessor), classifier)
sampler_index = 1
else:
clf = make_pipeline(sampler, self.preprocessor, classifier)
sampler_index = 0
self._sampling_pipeline = clf[:sampler_index+1]
self._classifying_pipeline = clf[sampler_index+1:]
if clf[sampler_index] is not None:
self.X_train_sampled, self.y_train_sampled = self._sampling_pipeline.fit_resample(
self.X_train, self.y_train)
else:
self.X_train_sampled, self.y_train_sampled = self.X_train, self.y_train
clf = self._classifying_pipeline
# model_name = f'{sampler} {classifier}'
# if verbose:
# print(f'Running {model_name}.')
self._classifying_pipeline.fit(
self.X_train_sampled, self.y_train_sampled)
self.cur_model = clf
# if verbose:
# print("model trained to: %.3f" %
# clf.score(self.X_train, self.y_train))
# print("model score: %.3f" % clf.score(self.X_test, self.y_test))
return clf
def metrics(self, graph_dir=None, graph_prefix=None, binary_classification=True):
# return list of (metric: float, metric_name: str) tuples of metrics of given classifier (default: self.cur_model)
# can only do metrics for binary classification as of right now
assert binary_classification
metric_list = []
clf = self.cur_model
# label metrics
if graph_prefix:
for flipped_labels in [False, True]:
flipped_labels_suffix = '' if not flipped_labels else '_flipped'
fig, axes = plt.subplots(3, 3, figsize=(20, 20))
for i, (yarray, Xarray, label) in enumerate([(self.y_test, self.X_test, 'test'),
(self.y_train_sampled,
self.X_train_sampled, 'train'),
(self.y_train,
self.X_train, 'train_raw'),
]):
for j, (graph_type, func) in enumerate([
('', plot_confusion_matrix),
('_PR', plot_precision_recall_curve),
('_ROC', plot_roc_curve),
]):
ax = axes[j, i]
graph_yarray = yarray.astype(bool)
if flipped_labels:
graph_yarray = ~graph_yarray
disp = func(clf, Xarray, graph_yarray, ax=ax)
title = f'{label}{graph_type}{flipped_labels_suffix}'
ax.set_title(title)
if graph_type in ['_PR', '_ROC']:
ax.set_xlim(-0.05, 1.05)
ax.set_ylim(-0.05, 1.05)
ax.set_aspect('equal', adjustable='box')
suptitle = f'{graph_prefix}{flipped_labels_suffix}'
plt.suptitle(suptitle)
savepath = os.path.join(graph_dir, f'{suptitle}.png')
fig.savefig(savepath, dpi=100)
plt.close()
for i, (yarray, Xarray, label) in enumerate([(self.y_test, self.X_test, 'test'),
(self.y_train_sampled,
self.X_train_sampled, 'train'),
(self.y_train,
self.X_train, 'train_raw'),
]):
y_pred = clf.predict(Xarray)
y_prob = clf.predict_proba(Xarray)[:, 1]
y_true = yarray
X_shape = Xarray.shape
metric_list.extend(feat_util.binary_metric_list(
y_true=y_true, y_pred=y_pred, y_prob=y_prob, X_shape=X_shape,
label_prefix=f'{label}_'
))
return metric_list
def model_stats(self, classifier=None, graph=True):
# counter, auc, and optional graph of given classifer (default: self.cur_model)
classifier = classifier or self.cur_model
y_prob = classifier.predict_proba(self.X_test)[:, 1]
print(f"dimension y_prob: {y_prob.shape}")
print(f"dimension y_test: {self.y_test.shape}")
print(f'Predicts:', Counter(list(classifier.predict(self.X_test))))
print(f'True Labels:', Counter(self.y_test))
if graph:
fpr, tpr, thres = roc_curve(self.y_test, y_prob)
plt.plot(fpr, tpr, color='green')
plt.plot([0, 1], [0, 1], color='red', linestyle='--')
plt.show()
roc_auc = roc_auc_score(self.y_test, y_prob)
print(f"ROC-AUC Score: {roc_auc}")
def classification_report(self):
# classification report on current model
y_true = self.y_test
y_pred = self.cur_model.predict(self.X_test)
print(classification_report(y_true, y_pred))
class JWWindowSelector:
ycols = ['R0_quiz_response','R1_quiz_response','R2_quiz_response','R1_quiz_response_bin',
'R1_quiz_response_0v12','R1_quiz_response_01v2','R1_quiz_response_bin_x',
'R2_quiz_response_bin','R2_quiz_response_bin_x','R2_quiz_response_bin0v123',
'R2_quiz_response_bin01v23','R2_quiz_response_bin012v3']
INTERACTION = 0
LEVEL = 1
QUIZ = 2
OBJECTIVE = 3
def __init__(self, csv_fpath=None, df=None, meta=None):
assert csv_fpath is not None or df is not None
# load df
if df is None:
print(f'Loading from {csv_fpath}...')
self.df, self.meta = feat_util.open_csv_from_path_with_meta(
csv_fpath, index_col=0)
else:
self.df = df
self.meta = meta or []
self.df_cols = list(df.columns)
@staticmethod
def get_abbrev(window_type):
if window_type == JWWindowSelector.INTERACTION:
return 'int'
if window_type == JWWindowSelector.LEVEL:
return 'lvl'
if window_type == JWWindowSelector.QUIZ:
return 'q'
if window_type == JWWindowSelector.OBJECTIVE:
return 'obj'
@staticmethod
def get_prefix(n, window_type):
if window_type == JWWindowSelector.INTERACTION:
return f'int{n}_i'
if window_type == JWWindowSelector.LEVEL:
return f'lvl{n}_'
if window_type == JWWindowSelector.QUIZ:
return f'Q{n}_'
if window_type == JWWindowSelector.OBJECTIVE:
return f'obj{n}_o'
@staticmethod
def get_window_range(window_type, skip_Q23=False):
if window_type == JWWindowSelector.INTERACTION:
return range(189)
if window_type == JWWindowSelector.LEVEL:
return range(24)
if window_type == JWWindowSelector.QUIZ:
if not skip_Q23:
return range(19)
else:
return [0,1,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18]
if window_type == JWWindowSelector.OBJECTIVE:
return range(80)
def cols_startwith(self, prefix):
return [c for c in self.df_cols if c.startswith(prefix)]
def get_feats(self, n, window_type):
prefix = self.get_prefix(n, window_type)
feats = self.cols_startwith(prefix)
return feats
def get_filter_queries(self, n, window_type, max_seconds_per_word=2):
prefix = JWWindowSelector.get_prefix(n, window_type)
queries = [f"R1_quiz_response == R1_quiz_response"]
if window_type in [JWWindowSelector.INTERACTION, JWWindowSelector.LEVEL]:
queries.extend([
f"{prefix}first_enc_duration == {prefix}first_enc_duration",
f"{prefix}first_enc_duration > 0",
])
if window_type == JWWindowSelector.QUIZ:
queries.extend([
f'{prefix}A1_nan!=1'
])
elif window_type == JWWindowSelector.INTERACTION:
num_words = self.df[f"int{n}_ifirst_enc_words_read"].max()
queries.extend([
f"{prefix}first_enc_words_read == {num_words}",
f"{prefix}time_to > 0",
f"{prefix}first_enc_duration < {prefix}first_enc_words_read*{max_seconds_per_word}",
])
elif window_type == JWWindowSelector.OBJECTIVE:
if n < 79:
queries.append(f'obj{n}_onext_int_nan==0')
queries.append(f"obj{n}_otime_to_next_obj < 600")
queries.append(f"obj{n}_otime_to_next_obj > 0 ")
elif window_type == JWWindowSelector.LEVEL:
queries.append(f"{prefix}time_in_level < 1200")
queries.append(f"{prefix}time_in_level > 0")
queries.extend([f"R{i}_quiz_response == R{i}_quiz_response" for i in [0,1,2]])
return queries
def get_base_meta(self):
return self.meta
@staticmethod
def join_XY(X,Y):
return X.join(Y)
def get_X_Y_meta(self, n, window_type, max_seconds_per_word=2,nbins=0, drop_first_next_int_col = True):
meta = []
prefix = JWWindowSelector.get_prefix(n, window_type)
Xfeats = self.get_feats(n, window_type)
meta.append(f'Using feats: {Xfeats}')
if window_type==JWWindowSelector.INTERACTION:
total_words = self.df[f"int{n}_ifirst_enc_words_read"].max()
if total_words is np.nan:
return None, None, meta
elif total_words < 10:
print('Total words < 10!')
queries = self.get_filter_queries(n, window_type, max_seconds_per_word=max_seconds_per_word)
filtered_df, filtered_df_meta = feat_util.filter_df(self.df[Xfeats+JWWindowSelector.ycols], query_list=queries, verbose=True, fillna=None)
meta.extend(filtered_df_meta)
X = filtered_df[Xfeats].fillna(0).copy()
meta.append(f'Filled X with 0')
Y = filtered_df[JWWindowSelector.ycols].copy()
drop_cols = []
if window_type in [JWWindowSelector.INTERACTION, JWWindowSelector.LEVEL]:
drop_cols = [
f"{prefix}first_enc_boxes_read",
f"{prefix}first_enc_words_read",
]
if window_type==JWWindowSelector.INTERACTION:
drop_cols.extend([
f"{prefix}time_to",
f"{prefix}total_duration"
])
if window_type==JWWindowSelector.OBJECTIVE:
drop_cols.append(f"{prefix}next_int_nan")
# if window_type==JWWindowSelector.QUIZ:
# drop_cols.append(f"{prefix}answers")
X = X.drop(columns=drop_cols)
meta.append(f"Dropped drop_cols: {drop_cols}")
constant_cols = X.columns[X.nunique()==1]
X = X.drop(columns=constant_cols)
meta.append(f'Dropped constant_cols: {constant_cols}')
if not len(X):
return None, None, meta
if window_type == JWWindowSelector.OBJECTIVE and drop_first_next_int_col:
next_int_cols = [c for c in X.columns if 'next_int' in c]
if next_int_cols:
X = X.drop(columns=next_int_cols[0])
meta.append(f'Dropped onehot column {next_int_cols[0]} from {next_int_cols}')
## does not bin by default
if nbins:
est = KBinsDiscretizer(n_bins=nbins, encode='onehot-dense', strategy='quantile')
bin_feats = [f'{prefix}first_enc_avg_tbps',
f'{prefix}first_enc_avg_wps',
# f'{prefix}first_enc_duration',
f'{prefix}first_enc_var_tbps',
f'{prefix}first_enc_var_wps']
bin_feats = [c for c in bin_feats if c in X.columns]
if bin_feats:
Xt = est.fit_transform(X[bin_feats])
new_feat_names = [f'{feat}>{x:.2f}' for bins,feat in zip(est.bin_edges_,bin_feats) for x in list(bins)[:-1]]
Xt_df = pd.DataFrame(Xt, index=X.index, columns=new_feat_names)
X = X.join(Xt_df)
X = X.drop(columns=bin_feats)
meta.append(f'Quantized n_bins={nbins} feats {bin_feats} to {new_feat_names}')
return (X, Y, meta)
def get_X_Y_meta_range(self, ns, window_type, max_seconds_per_word=2,nbins=0, drop_first_next_int_col = True, verbose=True):
X, Y, meta = None, None, []
for n in ns:
tX, tY, tmeta = self.get_X_Y_meta(n, window_type, max_seconds_per_word=max_seconds_per_word, nbins=nbins, drop_first_next_int_col=drop_first_next_int_col)
X, Y, meta = JWWindowSelector.join_X_Y_meta(X, Y, meta, tX, tY, tmeta, copy=False)
print('Join Size:', X.shape)
X, Y = X.copy(), Y.copy()
return X, Y, meta
@staticmethod
def join_X_Y_meta(X1, Y1, meta1, X2, Y2, meta2, copy=True):
meta = meta1+meta2
if X1 is None:
X = X2
Y = Y2
elif X2 is None:
X = X1
Y = Y1
else:
X = X1.join(X2, how='inner')
Y = Y1.loc[X.index, :]
meta = meta1+['--Inner Join--']+meta2+[f'Resultant Join Shape: {X.shape}']
if copy and X is not None:
X, Y = X.copy(), Y.copy()
return X, Y, meta
| 1.992188
| 2
|
mods/libvis_mods/project-templates/test-interactive.py
|
danlkv/pywebviz
| 0
|
12785015
|
from subprocess import Popen, PIPE
import sys
import os
from queue import Queue, Empty
import subprocess
import threading
import time
class LocalShell(object):
def __init__(self):
pass
def run(self, cmd):
env = os.environ.copy()
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=subprocess.STDOUT, shell=True, env=env)
def writeall(p):
while True:
# print("read data: ")
data = p.stdout.read(1).decode("utf-8")
if not data:
break
sys.stdout.write(data)
sys.stdout.flush()
writer = threading.Thread(target=writeall, args=(p,))
writer.start()
def reader(readq):
try:
while True:
d = sys.stdin.read(1)
if not d:
break
readq.put(d)
except EOFError:
pass
readq = Queue()
r = threading.Thread(target=reader, args=(readq,))
r.daemon=True
r.start()
while True:
if not writer.isAlive():
break
try:
d = readq.get(block=False)
self._write(p, bytes(d, 'utf-8'))
time.sleep(0.01)
except Empty:
pass
def _write(self, process, message):
process.stdin.write(message)
process.stdin.flush()
cmd = ['cookiecutter', 'source-files']
cmd = ' '.join(cmd)
def main():
shell = LocalShell()
try:
shell.run(cmd)
except KeyboardInterrupt:
return
if __name__ == '__main__':
main()
| 2.40625
| 2
|
python/testData/codeInsight/smartEnter/methodParameterStaticMethod_after.py
|
tgodzik/intellij-community
| 2
|
12785016
|
<reponame>tgodzik/intellij-community<filename>python/testData/codeInsight/smartEnter/methodParameterStaticMethod_after.py<gh_stars>1-10
class MyClass:
@staticmethod
def method():
<caret>
| 1.617188
| 2
|
fancy/trainer/transforms/normalize_both_input_and_target.py
|
susautw/fancy-trainer
| 0
|
12785017
|
from typing import Optional, Callable
import torch
import numpy as np
from PIL.Image import Image
from ..transforms import TargetHandler
class NormalizeBothInputAndTarget:
transform: Callable[[Image], Image]
target_handler: TargetHandler
def __init__(
self,
transform: Callable[[Image], Image],
target_handler: Optional[TargetHandler] = None,
):
self.target_handler = target_handler
self.transform = transform
def forward(self, x: Image) -> torch.Tensor:
image_data = np.array(x)
std = image_data.std()
mean = image_data.mean()
erased_img = self.transform(x)
erased_img_data = torch.tensor(np.array(erased_img), dtype=torch.float32)
normed_img_data = (erased_img_data - mean) / std
target = self.target_handler.get()
if target is None:
raise RuntimeError("target has not generated.")
if not isinstance(target, Image):
raise TypeError("the generated target must be an PIL.Image")
target_data = torch.tensor(np.array(target), dtype=torch.float32)
self.target_handler.set((target_data - mean) / std)
return normed_img_data
| 2.6875
| 3
|
version.py
|
TheNomet/blackbricks
| 0
|
12785018
|
<reponame>TheNomet/blackbricks<filename>version.py<gh_stars>0
import re
import os
def get_version():
"""Return the current version number from blackbricks/__init__.py"""
with open(os.path.join(os.path.dirname(__file__), "blackbricks/__init__.py")) as f:
match = re.search(r'__version__ = "([0-9\.]+)"', f.read())
assert match, "No version in blackbricks/__init__.py !"
return match.group(1)
if __name__ == "__main__":
print(get_version())
| 2.46875
| 2
|
tests/snippets/decorators.py
|
gabhijit/RustPython
| 1
|
12785019
|
<gh_stars>1-10
def logged(f):
def wrapper(a, b):
print('Calling function', f)
return f(a, b + 1)
return wrapper
@logged
def add(a, b):
return a + b
c = add(10, 3)
assert c == 14
| 2.921875
| 3
|
assets/tinyurl.py
|
Code-Cecilia/BotMan.py
| 4
|
12785020
|
import aiohttp
async def get_tinyurl(link: str):
url = f"http://tinyurl.com/api-create.php?url={link}"
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
response = (await response.content.read()).decode('utf-8')
return response
| 2.953125
| 3
|
YTPP001 Competition/ISGOODNM.py
|
8Bit1Byte/Codechef-Solutions
| 2
|
12785021
|
'''
Problem Name: Good Number Or Not
Problem Code: ISGOODNM
Problem Link: https://www.codechef.com/problems/ISGOODNM
Solution Link: https://www.codechef.com/viewsolution/47005382
'''
def divisors(m):
from math import sqrt
l = set()
for i in range(1, int(sqrt(m)+1)):
if m%i == 0:
l.add(i)
l.add(m//i)
l = list(l)
l.sort()
return l[:-1]
def solve(n):
l = sum(divisors(n))
if l == n:
return 'YES'
return 'NO'
if __name__ == '__main__':
n = int(input())
print(solve(n))
| 3.71875
| 4
|
reproduction/coreference_resolution/data_load/cr_loader.py
|
KuNyaa/fastNLP
| 1
|
12785022
|
<filename>reproduction/coreference_resolution/data_load/cr_loader.py
from fastNLP.io.dataset_loader import JsonLoader,DataSet,Instance
from fastNLP.io.file_reader import _read_json
from fastNLP.core.vocabulary import Vocabulary
from fastNLP.io.base_loader import DataBundle
from reproduction.coreference_resolution.model.config import Config
import reproduction.coreference_resolution.model.preprocess as preprocess
class CRLoader(JsonLoader):
def __init__(self, fields=None, dropna=False):
super().__init__(fields, dropna)
def _load(self, path):
"""
加载数据
:param path:
:return:
"""
dataset = DataSet()
for idx, d in _read_json(path, fields=self.fields_list, dropna=self.dropna):
if self.fields:
ins = {self.fields[k]: v for k, v in d.items()}
else:
ins = d
dataset.append(Instance(**ins))
return dataset
def process(self, paths, **kwargs):
data_info = DataBundle()
for name in ['train', 'test', 'dev']:
data_info.datasets[name] = self.load(paths[name])
config = Config()
vocab = Vocabulary().from_dataset(*data_info.datasets.values(), field_name='sentences')
vocab.build_vocab()
word2id = vocab.word2idx
char_dict = preprocess.get_char_dict(config.char_path)
data_info.vocabs = vocab
genres = {g: i for i, g in enumerate(["bc", "bn", "mz", "nw", "pt", "tc", "wb"])}
for name, ds in data_info.datasets.items():
ds.apply(lambda x: preprocess.doc2numpy(x['sentences'], word2id, char_dict, max(config.filter),
config.max_sentences, is_train=name=='train')[0],
new_field_name='doc_np')
ds.apply(lambda x: preprocess.doc2numpy(x['sentences'], word2id, char_dict, max(config.filter),
config.max_sentences, is_train=name=='train')[1],
new_field_name='char_index')
ds.apply(lambda x: preprocess.doc2numpy(x['sentences'], word2id, char_dict, max(config.filter),
config.max_sentences, is_train=name=='train')[2],
new_field_name='seq_len')
ds.apply(lambda x: preprocess.speaker2numpy(x["speakers"], config.max_sentences, is_train=name=='train'),
new_field_name='speaker_ids_np')
ds.apply(lambda x: genres[x["doc_key"][:2]], new_field_name='genre')
ds.set_ignore_type('clusters')
ds.set_padder('clusters', None)
ds.set_input("sentences", "doc_np", "speaker_ids_np", "genre", "char_index", "seq_len")
ds.set_target("clusters")
# train_dev, test = self.ds.split(348 / (2802 + 343 + 348), shuffle=False)
# train, dev = train_dev.split(343 / (2802 + 343), shuffle=False)
return data_info
| 2.375
| 2
|
problems/problem25.py
|
phanghos/euler-solutions
| 1
|
12785023
|
<gh_stars>1-10
def digits(n):
c = 0
while n > 0:
c += 1
n //= 10
return c
fib1 = fib2 = 1
fib3 = fib1 + fib2
c = 3
while digits(fib3) != 1000:
fib1, fib2 = fib2, fib3
fib3 = fib1 + fib2
c += 1
print(c)
| 3.53125
| 4
|
reservationen_package/db_util.py
|
cyrilwelschen/reservationen_package
| 0
|
12785024
|
import sqlite3
import os
class DbUtil:
def __init__(self, db_file):
self.db_path = db_file
self.conn = self.create_connection()
self.c = self.create_cursor()
def create_connection(self):
try:
os.remove(self.db_path)
print("removing existing db file")
except OSError as e:
print(e)
print("Db file doesn't exist, creating it...")
conn = sqlite3.connect(self.db_path)
return conn
def create_cursor(self):
return self.conn.cursor()
def create_table(self, creation_string):
self.c.execute(creation_string)
self.conn.commit()
def execute_on_db(self, execution_string):
exec_st = ""
try:
# todo: check if encode/decode is really necessary?
exec_st = execution_string.encode("utf-8")
self.c.execute(exec_st.decode("utf-8"))
except sqlite3.OperationalError as e:
# todo: send mail notification
print(exec_st)
raise e
def close_db(self):
self.conn.close()
| 3.109375
| 3
|
spectrespecs/core/models.py
|
Spacehug/loony-lovegood
| 1
|
12785025
|
<gh_stars>1-10
from django.utils import timezone
from django.db import models
class Profile(models.Model):
uid = models.BigIntegerField(
blank=False,
null=False,
help_text="Telegram user ID.",
verbose_name="User ID",
db_index=True,
)
gid = models.BigIntegerField(
blank=False,
null=False,
help_text="Telegram group ID this user has sent message into.",
verbose_name="Group ID",
db_index=True,
)
has_messages = models.BooleanField(blank=False, null=False, default=False)
created_at = models.DateTimeField(
blank=False,
auto_now_add=True,
help_text="Date and time the profile was created at.",
verbose_name="Created at",
)
updated_at = models.DateTimeField(
blank=False,
auto_now=True,
help_text="Date and time the profile was updated at.",
verbose_name="Updated at",
)
grace_at = models.DateTimeField(
blank=True,
null=True,
help_text="Date and time the profile grace period started at.",
verbose_name="Grace started at",
)
warnings = models.IntegerField(
blank=False,
null=False,
help_text="Warnings this user have.",
verbose_name="Warnings",
default=0,
)
class Meta:
db_table = "profiles"
verbose_name = "Profile"
verbose_name_plural = "Profiles"
unique_together = ("uid", "gid")
def __repr__(self):
return f"Profile({self.uid} from {self.gid})"
def __str__(self):
return f"{self.uid} from {self.gid}"
def reset_grace(self):
self.grace_at = timezone.now()
self.save()
| 2.375
| 2
|
dl/keras-efficientnet-regression/efficient_net_keras_regression.py
|
showkeyjar/beauty
| 1
|
12785026
|
from typing import Iterator, List, Union, Tuple, Any
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from tensorflow import keras
import tensorflow_addons as tfa
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers, models, Model
from tensorflow.python.keras.callbacks import TensorBoard, EarlyStopping, ModelCheckpoint
from tensorflow.keras.losses import MeanAbsoluteError, MeanAbsolutePercentageError
from tensorflow.keras.models import Sequential
from tensorflow.keras.applications import EfficientNetB0
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import History
# the next 3 lines of code are for my machine and setup due to https://github.com/tensorflow/tensorflow/issues/43174
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
def visualize_augmentations(data_generator: ImageDataGenerator, df: pd.DataFrame):
"""Visualizes the keras augmentations with matplotlib in 3x3 grid. This function is part of create_generators() and
can be accessed from there.
Parameters
----------
data_generator : Iterator
The keras data generator of your training data.
df : pd.DataFrame
The Pandas DataFrame containing your training data.
"""
# super hacky way of creating a small dataframe with one image
series = df.iloc[2]
df_augmentation_visualization = pd.concat([series, series], axis=1).transpose()
iterator_visualizations = data_generator.flow_from_dataframe( # type: ignore
dataframe=df_augmentation_visualization,
x_col="image_location",
y_col="price",
class_mode="raw",
target_size=(224, 224), # size of the image
batch_size=1, # use only one image for visualization
)
for i in range(9):
ax = plt.subplot(3, 3, i + 1) # create a 3x3 grid
batch = next(iterator_visualizations) # get the next image of the generator (always the same image)
img = batch[0] # type: ignore
img = img[0, :, :, :] # remove one dimension for plotting without issues
plt.imshow(img)
plt.show()
plt.close()
def get_mean_baseline(train: pd.DataFrame, val: pd.DataFrame) -> float:
"""Calculates the mean MAE and MAPE baselines by taking the mean values of the training data as prediction for the
validation target feature.
Parameters
----------
train : pd.DataFrame
Pandas DataFrame containing your training data.
val : pd.DataFrame
Pandas DataFrame containing your validation data.
Returns
-------
float
MAPE value.
"""
y_hat = train["price"].mean()
val["y_hat"] = y_hat
mae = MeanAbsoluteError()
mae = mae(val["price"], val["y_hat"]).numpy() # type: ignore
mape = MeanAbsolutePercentageError()
mape = mape(val["price"], val["y_hat"]).numpy() # type: ignore
print(mae)
print("mean baseline MAPE: ", mape)
return mape
def split_data(df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""Accepts a Pandas DataFrame and splits it into training, testing and validation data. Returns DataFrames.
Parameters
----------
df : pd.DataFrame
Your Pandas DataFrame containing all your data.
Returns
-------
Union[pd.DataFrame, pd.DataFrame, pd.DataFrame]
[description]
"""
train, val = train_test_split(df, test_size=0.2, random_state=1) # split the data with a validation size o 20%
train, test = train_test_split(
train, test_size=0.125, random_state=1
) # split the data with an overall test size of 10%
print("shape train: ", train.shape) # type: ignore
print("shape val: ", val.shape) # type: ignore
print("shape test: ", test.shape) # type: ignore
print("Descriptive statistics of train:")
print(train.describe()) # type: ignore
return train, val, test # type: ignore
def create_generators(
df: pd.DataFrame, train: pd.DataFrame, val: pd.DataFrame, test: pd.DataFrame, plot_augmentations: Any
) -> Tuple[Iterator, Iterator, Iterator]:
"""Accepts four Pandas DataFrames: all your data, the training, validation and test DataFrames. Creates and returns
keras ImageDataGenerators. Within this function you can also visualize the augmentations of the ImageDataGenerators.
Parameters
----------
df : pd.DataFrame
Your Pandas DataFrame containing all your data.
train : pd.DataFrame
Your Pandas DataFrame containing your training data.
val : pd.DataFrame
Your Pandas DataFrame containing your validation data.
test : pd.DataFrame
Your Pandas DataFrame containing your testing data.
Returns
-------
Tuple[Iterator, Iterator, Iterator]
keras ImageDataGenerators used for training, validating and testing of your models.
"""
train_generator = ImageDataGenerator(
rescale=1.0 / 255,
rotation_range=5,
width_shift_range=0.1,
height_shift_range=0.1,
brightness_range=(0.75, 1),
shear_range=0.1,
zoom_range=[0.75, 1],
horizontal_flip=True,
validation_split=0.2,
) # create an ImageDataGenerator with multiple image augmentations
validation_generator = ImageDataGenerator(
rescale=1.0 / 255
) # except for rescaling, no augmentations are needed for validation and testing generators
test_generator = ImageDataGenerator(rescale=1.0 / 255)
# visualize image augmentations
if plot_augmentations == True:
visualize_augmentations(train_generator, df)
train_generator = train_generator.flow_from_dataframe(
dataframe=train,
x_col="image_location", # this is where your image data is stored
y_col="price", # this is your target feature
class_mode="raw", # use "raw" for regressions
target_size=(224, 224),
batch_size=32, # increase or decrease to fit your GPU
)
validation_generator = validation_generator.flow_from_dataframe(
dataframe=val, x_col="image_location", y_col="price", class_mode="raw", target_size=(224, 224), batch_size=128,
)
test_generator = test_generator.flow_from_dataframe(
dataframe=test, x_col="image_location", y_col="price", class_mode="raw", target_size=(224, 224), batch_size=128,
)
return train_generator, validation_generator, test_generator
def get_callbacks(model_name: str) -> List[Union[TensorBoard, EarlyStopping, ModelCheckpoint]]:
"""Accepts the model name as a string and returns multiple callbacks for training the keras model.
Parameters
----------
model_name : str
The name of the model as a string.
Returns
-------
List[Union[TensorBoard, EarlyStopping, ModelCheckpoint]]
A list of multiple keras callbacks.
"""
logdir = (
"logs/scalars/" + model_name + "_" + datetime.now().strftime("%Y%m%d-%H%M%S")
) # create a folder for each model.
tensorboard_callback = TensorBoard(log_dir=logdir)
# use tensorboard --logdir logs/scalars in your command line to startup tensorboard with the correct logs
early_stopping_callback = EarlyStopping(
monitor="val_mean_absolute_percentage_error",
min_delta=1, # model should improve by at least 1%
patience=10, # amount of epochs with improvements worse than 1% until the model stops
verbose=2,
mode="min",
restore_best_weights=True, # restore the best model with the lowest validation error
)
model_checkpoint_callback = ModelCheckpoint(
"./data/models/" + model_name + ".h5",
monitor="val_mean_absolute_percentage_error",
verbose=0,
save_best_only=True, # save the best model
mode="min",
save_freq="epoch", # save every epoch
) # saving eff_net takes quite a bit of time
return [tensorboard_callback, early_stopping_callback, model_checkpoint_callback]
def small_cnn() -> Sequential:
"""A very small custom convolutional neural network with image input dimensions of 224x224x3.
Returns
-------
Sequential
The keras Sequential model.
"""
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation="relu", input_shape=(224, 224, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation="relu"))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation="relu"))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(1))
return model
def run_model(
model_name: str,
model_function: Model,
lr: float,
train_generator: Iterator,
validation_generator: Iterator,
test_generator: Iterator,
) -> History:
"""This function runs a keras model with the Ranger optimizer and multiple callbacks. The model is evaluated within
training through the validation generator and afterwards one final time on the test generator.
Parameters
----------
model_name : str
The name of the model as a string.
model_function : Model
Keras model function like small_cnn() or adapt_efficient_net().
lr : float
Learning rate.
train_generator : Iterator
keras ImageDataGenerators for the training data.
validation_generator : Iterator
keras ImageDataGenerators for the validation data.
test_generator : Iterator
keras ImageDataGenerators for the test data.
Returns
-------
History
The history of the keras model as a History object. To access it as a Dict, use history.history. For an example
see plot_results().
"""
callbacks = get_callbacks(model_name)
model = model_function
model.summary()
plot_model(model, to_file=model_name + ".png", show_shapes=True)
radam = tfa.optimizers.RectifiedAdam(learning_rate=lr)
ranger = tfa.optimizers.Lookahead(radam, sync_period=6, slow_step_size=0.5)
optimizer = ranger
model.compile(
optimizer=optimizer, loss="mean_absolute_error", metrics=[MeanAbsoluteError(), MeanAbsolutePercentageError()]
)
history = model.fit(
train_generator,
epochs=100,
validation_data=validation_generator,
callbacks=callbacks,
workers=6, # adjust this according to the number of CPU cores of your machine
)
model.evaluate(
test_generator, callbacks=callbacks,
)
return history # type: ignore
def adapt_efficient_net() -> Model:
"""This code uses adapts the most up-to-date version of EfficientNet with NoisyStudent weights to a regression
problem. Most of this code is adapted from the official keras documentation.
Returns
-------
Model
The keras model.
"""
inputs = layers.Input(
shape=(224, 224, 3)
) # input shapes of the images should always be 224x224x3 with EfficientNetB0
# use the downloaded and converted newest EfficientNet wheights
model = EfficientNetB0(include_top=False, input_tensor=inputs, weights="efficientnetb0_notop.h5")
# Freeze the pretrained weights
model.trainable = False
# Rebuild top
x = layers.GlobalAveragePooling2D(name="avg_pool")(model.output)
x = layers.BatchNormalization()(x)
top_dropout_rate = 0.4
x = layers.Dropout(top_dropout_rate, name="top_dropout")(x)
outputs = layers.Dense(1, name="pred")(x)
# Compile
model = keras.Model(inputs, outputs, name="EfficientNet")
return model
def plot_results(model_history_small_cnn: History, model_history_eff_net: History, mean_baseline: float):
"""This function uses seaborn with matplotlib to plot the trainig and validation losses of both input models in an
sns.relplot(). The mean baseline is plotted as a horizontal red dotted line.
Parameters
----------
model_history_small_cnn : History
keras History object of the model.fit() method.
model_history_eff_net : History
keras History object of the model.fit() method.
mean_baseline : float
Result of the get_mean_baseline() function.
"""
# create a dictionary for each model history and loss type
dict1 = {
"MAPE": model_history_small_cnn.history["mean_absolute_percentage_error"],
"type": "training",
"model": "small_cnn",
}
dict2 = {
"MAPE": model_history_small_cnn.history["val_mean_absolute_percentage_error"],
"type": "validation",
"model": "small_cnn",
}
dict3 = {
"MAPE": model_history_eff_net.history["mean_absolute_percentage_error"],
"type": "training",
"model": "eff_net",
}
dict4 = {
"MAPE": model_history_eff_net.history["val_mean_absolute_percentage_error"],
"type": "validation",
"model": "eff_net",
}
# convert the dicts to pd.Series and concat them to a pd.DataFrame in the long format
s1 = pd.DataFrame(dict1)
s2 = pd.DataFrame(dict2)
s3 = pd.DataFrame(dict3)
s4 = pd.DataFrame(dict4)
df = pd.concat([s1, s2, s3, s4], axis=0).reset_index()
grid = sns.relplot(data=df, x=df["index"], y="MAPE", hue="model", col="type", kind="line", legend=False)
grid.set(ylim=(20, 100)) # set the y-axis limit
for ax in grid.axes.flat:
ax.axhline(
y=mean_baseline, color="lightcoral", linestyle="dashed"
) # add a mean baseline horizontal bar to each plot
ax.set(xlabel="Epoch")
labels = ["small_cnn", "eff_net", "mean_baseline"] # custom labels for the plot
plt.legend(labels=labels)
plt.savefig("training_validation.png")
plt.show()
def run(small_sample=False):
"""Run all the code of this file.
Parameters
----------
small_sample : bool, optional
If you just want to check if the code is working, set small_sample to True, by default False
"""
df = pd.read_pickle("./data/df.pkl")
df["image_location"] = (
"./data/processed_images/" + df["zpid"] + ".png"
) # add the correct path for the image locations.
if small_sample == True:
df = df.iloc[0:1000] # set small_sampe to True if you want to check if your code works without long waiting
train, val, test = split_data(df) # split your data
mean_baseline = get_mean_baseline(train, val)
train_generator, validation_generator, test_generator = create_generators(
df=df, train=train, val=val, test=test, plot_augmentations=True
)
small_cnn_history = run_model(
model_name="small_cnn",
model_function=small_cnn(),
lr=0.001,
train_generator=train_generator,
validation_generator=validation_generator,
test_generator=test_generator,
)
eff_net_history = run_model(
model_name="eff_net",
model_function=adapt_efficient_net(),
lr=0.5,
train_generator=train_generator,
validation_generator=validation_generator,
test_generator=test_generator,
)
plot_results(small_cnn_history, eff_net_history, mean_baseline)
if __name__ == "__main__":
run(small_sample=False)
| 3
| 3
|
src/thirdparty/harfbuzz/src/check-includes.py
|
devbrain/neutrino
| 0
|
12785027
|
#!/usr/bin/env python3
import sys, os, re
os.chdir(os.getenv('srcdir', os.path.dirname(__file__)))
HBHEADERS = [os.path.basename(x) for x in os.getenv('HBHEADERS', '').split()] or \
[x for x in os.listdir('.') if x.startswith('hb') and x.endswith('.h')]
HBSOURCES = [os.path.basename(x) for x in os.getenv('HBSOURCES', '').split()] or \
[x for x in os.listdir('.') if x.startswith('hb') and x.endswith(('.cc', '.hh'))]
stat = 0
print('Checking that public header files #include "hb-common.h" or "hb.h" first (or none)')
for x in HBHEADERS:
if x == 'hb.h' or x == 'hb-common.h': continue
with open(x, 'r', encoding='utf-8') as f:
content = f.read()
first = re.findall(r'#.*include.*', content)[0]
if first not in ['#include "hb.h"', '#include "hb-common.h"']:
print('failure on %s' % x)
stat = 1
print('Checking that source files #include a private header first (or none)')
for x in HBSOURCES:
with open(x, 'r', encoding='utf-8') as f:
content = f.read()
includes = re.findall(r'#.*include.*', content)
if includes:
if not len(re.findall(r'"hb.*\.hh"', includes[0])):
print('failure on %s' % x)
stat = 1
print('Checking that there is no #include <hb-*.h>')
for x in HBHEADERS + HBSOURCES:
with open(x, 'r', encoding='utf-8') as f:
content = f.read()
if re.findall('#.*include.*<.*hb', content):
print('failure on %s' % x)
stat = 1
sys.exit(stat)
| 2.796875
| 3
|
bit_manipulation/swapVariables.py
|
itsvinayak/cracking_the_codeing_interview
| 4
|
12785028
|
<gh_stars>1-10
# given two integer swap them without using any temporary variable
# link : https://www.youtube.com/watch?v=DtnH3V_Vjek&list=PLNmW52ef0uwvkul_e_wLD525jbTfMKLIJ&index=4
# x = x + y
# y = x - y
# x = x - y
def swapVariables(a, b):
a = a ^ b
b = a ^ b
a = a ^ b
return [a, b]
def main():
a, b = map(int, input().split())
print(*swapVariables(a, b))
if __name__ == "__main__":
main()
| 3.78125
| 4
|
data_aug/analyze.py
|
carboncoo/UNITER
| 0
|
12785029
|
import os
import sys
import lmdb
import json
import torch
import pickle
import random
import msgpack
import numpy as np
import msgpack_numpy
# from transformers import AutoTokenizer
from lz4.frame import compress, decompress
from os.path import exists, abspath, dirname
from sklearn.metrics.pairwise import cosine_similarity
from PIL import Image, ImageFont, ImageDraw, ImageEnhance
import pprint
pp = pprint.PrettyPrinter()
msgpack_numpy.patch()
origin_img_dir = '/data/share/UNITER/origin_imgs/flickr30k/flickr30k-images/'
def load_txt_db(db_dir):
# db loading
env_in = lmdb.open(db_dir, readonly=True, create=False)
txn_in = env_in.begin()
db = {}
for key, value in txn_in.cursor():
db[key] = value
print('db length:', len(db)) # db length: 443757
env_in.close()
return db
def load_img_db(img_dir, conf_th=0.2, max_bb=100, min_bb=10, num_bb=36, compress=False):
if conf_th == -1:
db_name = f'feat_numbb{num_bb}'
name2nbb = defaultdict(lambda: num_bb)
else:
db_name = f'feat_th{conf_th}_max{max_bb}_min{min_bb}'
nbb = f'nbb_th{conf_th}_max{max_bb}_min{min_bb}.json'
if not os.path.exists(f'{img_dir}/{nbb}'):
# nbb is not pre-computed
name2nbb = None
else:
name2nbb = json.load(open(f'{img_dir}/{nbb}'))
# => {'coco_test2015_000000043222.npz': 57, ...}
if compress:
db_name += '_compressed'
if name2nbb is None:
if compress:
db_name = 'all_compressed'
else:
db_name = 'all'
# db loading
env = lmdb.open(f'{img_dir}/{db_name}', readonly=True, create=False)
txn = env.begin(buffers=True)
return name2nbb, txn
def load_single_img(txn, file_name, compress=False):
# load single image with its file_name
dump = txn.get(file_name.encode('utf-8'))
if compress:
with io.BytesIO(dump) as reader:
img_dump = np.load(reader, allow_pickle=True)
img_dump = {'features': img_dump['features'],
'norm_bb': img_dump['norm_bb']}
else:
img_dump = msgpack.loads(dump, raw=False)
return img_dump
def get_concat_h(im1, im2):
dst = Image.new('RGB', (im1.width + im2.width, im1.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (im1.width, 0))
return dst
def draw_bounding_box(img_name, img_bb, outline=(0, 0, 0, 255)):
source_img = Image.open(origin_img_dir + img_name).convert("RGB")
width, height = source_img.size
draw = ImageDraw.Draw(source_img, 'RGBA')
p1 = (width*img_bb[0], height*img_bb[1])
p2 = (width*img_bb[2], height*img_bb[3])
draw.rectangle((p1, p2), outline=outline, width=2)
# draw.text((img_bb[0], img_bb[1]), "something123", font=ImageFont.truetype("font_path123"))
return source_img
# source_img.save('bb_' + img_name, "JPEG")
def crop_bb(img_name, img_bbs):
source_img = Image.open(origin_img_dir + img_name).convert("RGB")
width, height = source_img.size
for i in range(img_bbs.shape[0]):
p1 = (width*img_bbs[i][0], height*img_bbs[i][1])
p2 = (width*img_bbs[i][2], height*img_bbs[i][3])
crop = source_img.crop((p1[0], p1[1], p2[0], p2[1]))
crop.save('crop_%d.jpg'%(i), 'JPEG')
def main():
NUM_LABELS = 1600
# tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
id2tok = json.load(open('id2tok.json'))
labels_ids = json.load(open('object_labels_ids.json'))
def convert_ids_to_tokens(i):
if isinstance(i, int):
return id2tok[str(i)]
else:
i = list(i)
return [id2tok[str(ii)] for ii in i]
def get_label_str(i):
if isinstance(i, int):
return convert_ids_to_tokens(labels_ids[i])
else:
i = list(i)
return [convert_ids_to_tokens(labels_ids[ii]) if ii > 0 else '[BACKGROUND]' for ii in i]
def get_hard_labels(soft_labels, top_k=3):
if len(soft_labels.shape) < 2:
soft_labels = soft_labels.reshape(1, -1)
sorted_labels = soft_labels.argsort(axis=-1)[:, ::-1][:, :top_k]
sorted_labels = sorted_labels - 1
res = []
for l in sorted_labels:
res.append(get_label_str(l))
return res
checkpoint = torch.load(
"/data/private/cc/experiment/MMP/pretrained_ckpts/pretrained/uniter-base.pt")
emb_weight = checkpoint['uniter.embeddings.word_embeddings.weight']
txt_db_old = load_txt_db('/data/share/UNITER/ve/txt_db/ve_train.db')
txt_db_new = load_txt_db(
'/data/share/UNITER/ve/da/GloVe/seed2/txt_db/ve_train.db')
name2nbb, img_db_txn = load_img_db('/data/share/UNITER/ve/img_db/flickr30k')
def display(k):
d1 = msgpack.loads(decompress(txt_db_old[k.split(b'_')[1]]), raw=False)
d2 = msgpack.loads(decompress(txt_db_new[k]), raw=False)
# input_1 = tokenizer.convet_ids_to_tokens(d1['input_ids'])
# input_2 = tokenizer.convert_ids_to_tokens(d2['input_ids'])
input_1 = convert_ids_to_tokens(d1['input_ids'])
input_2 = convert_ids_to_tokens(d2['input_ids'])
input_3 = convert_ids_to_tokens(d2['mix_input_ids'])
hard_labels = get_hard_labels(d2['mix_soft_labels'])
# img1 = load_single_img(img_db_txn, d1['img_fname'])
img = load_single_img(img_db_txn, d2['img_fname'])
origin_img_name = str(k).split('_')[1].split('#')[0]
im1 = draw_bounding_box(origin_img_name, img['norm_bb'][d2['mix_index']])
im2 = draw_bounding_box(d2['mix_img_flk_id'], d2['mix_bb'], (200, 0, 0, 255))
cat_im = get_concat_h(im1, im2)
cat_im.save('bb_' + origin_img_name + '_' + d2['mix_img_flk_id'], 'JPEG')
# crop_bb(origin_img_name, img['norm_bb'])
return input_1, input_2, input_3, hard_labels
# print(list(txt_db_new.keys())[:10])
pp.pprint(display(list(txt_db_new.keys())[3]))
# pp.pprint(display(list(txt_db_new.keys())[1]))
# pp.pprint(display(list(txt_db_new.keys())[2]))
# import ipdb
# ipdb.set_trace()
if __name__ == '__main__':
main()
| 1.742188
| 2
|
bs4ocr.py
|
JKamlah/BackgroundSubtractor4OCR
| 5
|
12785030
|
<filename>bs4ocr.py
#!/usr/bin/env python3
import argparse
import subprocess
import sys
from pathlib import Path
import cv2
import numpy as np
# Command line arguments.
arg_parser = argparse.ArgumentParser(description='Subtract background for better OCR results.')
arg_parser.add_argument("fname", type=lambda x: Path(x), help="filename of text file or path to files", nargs='*')
arg_parser.add_argument("-o", "--outputfolder", default="./cleaned", help="filename of the output")
arg_parser.add_argument("-e", "--extension", default="jpg", help="Extension of the img")i
arg_parser.add_argument("-r", "--replace", action="store_true", help="Replace the original image")
arg_parser.add_argument("--extensionaddon", default=".prep", help="Addon to the fileextension")
arg_parser.add_argument("-b", "--blursize", default=59, type=int, help="Kernelsize for medianBlur")
arg_parser.add_argument("-i", "--bluriter", default=1, type=int, help="Iteration of the medianBlur")
arg_parser.add_argument("-f", "--fixblursize", action="store_true", help="Deactivate decreasing Blurkernelsize")
arg_parser.add_argument("--blurfilter", default="Gaussian", type=str, help="Kernelsize for dilation",
choices=["Gaussian", "Median"])
arg_parser.add_argument("-d", "--dilsize", default=5, type=int, help="Kernelsize for dilation")
arg_parser.add_argument("-s", "--kernelshape", default="ellipse", type=str, help="Shape of the kernel for dilation",
choices=["cross", "ellipse", "rect"])
arg_parser.add_argument("-c", "--contrast", default=0.0, type=float, help="Higher contrast (experimental)")
arg_parser.add_argument("-n", "--normalize", action="store_true", help="Higher contrast (experimental)")
arg_parser.add_argument("--normalize-only", action="store_true", help="Normalizes the image but doesnt subtract")
arg_parser.add_argument("--normalize_auto", action="store_true", help="Auto-Normalization (experimental)")
arg_parser.add_argument("--normalize_min", default=0, type=int, help="Min value for background normalization")
arg_parser.add_argument("--normalize_max", default=255, type=int, help="Max value for background normalization")
arg_parser.add_argument("--scale_channel", default="None", type=str, help="Shape of the kernel for dilation",
choices=["None", "red", "green", "blue", "cyan", "magenta", "yellow"])
arg_parser.add_argument("--scale_channel_value", default=0.0, type=float, help="Scale value")
arg_parser.add_argument("--binarize", action="store_true", help="Use Adaptive-Otsu-Binarization")
arg_parser.add_argument("--dpi", default=300, type=int, help="Dots per inch (This value is used for binarization)")
arg_parser.add_argument("-t", "--textdilation", action="store_false", help="Deactivate extra dilation for text")
arg_parser.add_argument("-q", "--quality", default=100, help="Compress quality of the image like jpg")
arg_parser.add_argument("-v", "--verbose", help="show ignored files", action="store_true")
args = arg_parser.parse_args()
def channelscaler(channel, value):
channel = cv2.multiply(channel, value)
channel = np.where(channel < 255, 255, channel)
return channel
# -i 4 -b 150 -d 10 good settings atm for 300 dpi
def subtractor(img, dilsize: int = 15, blursize: int = 59, kernelshape: str = "ellipse",
bluriter: int = 1, fix_blursize: bool = False, blurfilter: str = "Gaussian",
textdilation: bool = True, contrast: bool = False, verbose: bool = False):
"""
The text in the image will be removed, the background smoothed and than extracted from the original image
:param img:
:param dilsize:
:param blursize:
:param kernelshape:
:param normalize:
:param norm_min:
:param norm_max:
:param norm_auto:
:param bluriter:
:param fix_blursize:
:param blurfilter:
:param textdilation:
:param contrast:
:param verbose:
:return:
"""
rgb_planes = cv2.split(img)
result_planes = []
# Only odd blurkernelsize are valid
blursize = blursize + 1 if blursize % 2 == 0 else blursize
for idx, plane in enumerate(rgb_planes[:3]):
dilated_img = plane
kshape = {"rect": cv2.MORPH_RECT, "ellipse": cv2.MORPH_ELLIPSE, "cross": cv2.MORPH_CROSS}.get(kernelshape,
cv2.MORPH_ELLIPSE)
# Reduce influence of the text by dilation (round kernel produce atm the best results)
if textdilation:
dil_kernel = cv2.getStructuringElement(kshape, (int(dilsize / 2), dilsize))
dilated_img = cv2.dilate(plane, dil_kernel, iterations=3)
dil_kernel = cv2.getStructuringElement(kshape, (int(dilsize / 2) + 1, dilsize + 1))
dilated_img = cv2.erode(dilated_img, dil_kernel, iterations=1)
else:
dil_kernel = cv2.getStructuringElement(kshape, (dilsize, dilsize))
dilated_img = cv2.dilate(dilated_img, dil_kernel)
bg_img = dilated_img
for ksize in np.linspace(blursize, 1, num=bluriter):
if not fix_blursize:
if blurfilter == "Gaussian":
bg_img = cv2.GaussianBlur(bg_img,
(int(ksize) + (1 + int(ksize) % 2), int(ksize) + (1 + int(ksize) % 2)), 0)
else:
bg_img = cv2.medianBlur(bg_img, (int(ksize) + (1 + int(ksize) % 2)))
else:
if blurfilter == "Gaussian":
bg_img = cv2.GaussianBlur(bg_img, (blursize, blursize), 0)
else:
bg_img = cv2.medianBlur(bg_img, blursize)
if verbose:
cv2.imwrite(f"Filtered_{idx}.jpg", bg_img)
cv2.imwrite(f"Dilate_{idx}.jpg", dilated_img)
# Subtract bg from fg
diff_img = 255 - cv2.absdiff(plane, bg_img)
norm_img = cv2.normalize(diff_img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
# Increases the contrast
if contrast:
diff_img = cv2.add(norm_img, plane * contrast, dtype=cv2.CV_8U)
# Normalize the final image to the range 0-255
norm_img = cv2.normalize(diff_img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
result_planes.append(norm_img)
return cv2.merge(result_planes)
def normalizer(img, norm_min: int = 0, norm_max: int = 255, norm_auto: bool = False):
"""
Normalizes the histogram of the image
:param img: path object of the image
:param norm_min: max min value
:param norm_max: min max value
:param auto: auto normalizer
:return:
"""
rgb_planes = cv2.split(img)
result_planes = []
for idx, plane in enumerate(rgb_planes[:3]):
if norm_auto:
auto_min = np.min(np.where((norm_min <= 25, 255)))
auto_max = np.max(np.where((norm_min <= 220, 0)))
plane = np.where(plane <= auto_min, auto_min, plane)
plane = np.where(plane >= auto_max, auto_max, plane)
else:
plane = np.where(plane <= norm_min, norm_min, plane)
plane = np.where(plane >= norm_max, norm_max, plane)
norm_img = cv2.normalize(plane, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
result_planes.append(norm_img)
return cv2.merge(result_planes)
def main():
# Set filenames or path
if len(args.fname) == 1 and not args.fname[0].is_file():
args.fname = list(Path(args.fname[0]).rglob(f"*.{args.extension}"))
for fname in args.fname:
print(fname.name + " in process!")
try:
# Try to get dpi information
from PIL import Image
dpi = Image.open(fname).info['dpi']
args.dpi = np.mean(dpi, dtype=int)
print("DPI was set to:", args.dpi)
except:
pass
# Read image
img = cv2.imread(str(fname), -1)
resimg = img
# Channel scaler
if args.scale_channel != 'None' and len(img.shape) > 2:
if args.scale_channel in ['red', 'yellow', 'magenta']:
img[:, :, 0] = channelscaler(img[:, :, 0], args.scale_channel_value)
if args.scale_channel in ['green', 'yellow', 'cyan']:
img[:, :, 1] = channelscaler(img[:, :, 1], args.scale_channel_value)
if args.scale_channel in ['blue', 'magenta', 'cyan']:
img[:, :, 2] = channelscaler(img[:, :, 2], args.scale_channel_value)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Background normalizer
if args.normalize or args.normalize_only:
img = normalizer(img, args.normalize_min, args.normalize_max, args.normalize_auto)
# Background subtractor
if not args.normalize_only:
resimg = subtractor(img, dilsize=args.dilsize, blursize=args.blursize, kernelshape=args.kernelshape,
bluriter=args.bluriter, fix_blursize=args.fixblursize,
textdilation=args.textdilation, contrast=args.contrast, verbose=args.verbose)
# Image binarizer
if args.binarize:
DPI = args.dpi + 1 if args.dpi % 2 == 0 else args.dpi
resimg = resimg if len(resimg.shape) == 2 else cv2.cvtColor(resimg, cv2.COLOR_BGR2GRAY)
resimg = cv2.adaptiveThreshold(resimg, 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY, DPI, int(DPI / 12))
args.extensionaddon = args.extensionaddon + ".bin"
# Output
fout = Path(args.outputfolder).absolute().joinpath(
fname.name.rsplit(".", 1)[0] + f"{args.extensionaddon}.{args.extension}")i
if not fout.parent.exists():
fout.parent.mkdir()
if args.replace:
cv2.imwrite(str(fname.resolve()), resimg)
elif args.extension == "jpg":
cv2.imwrite(str(fout.absolute()), resimg, [int(cv2.IMWRITE_JPEG_QUALITY), args.quality])
else:
cv2.imwrite(str(fout.absolute()), resimg)
print(str(fout) + " created!")
if __name__ == "__main__":
main()
| 2.953125
| 3
|
accounts/urls.py
|
imuno/Diploma
| 3
|
12785031
|
<gh_stars>1-10
from django.contrib import admin
from django.urls import path, include
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('/', views.login, name='login'),
] + static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| 1.585938
| 2
|
lifx.py
|
Noatz/lifx-api
| 0
|
12785032
|
import requests
from typing import Dict, List
class LIFX:
'''
docs: https://api.developer.lifx.com
selectors: https://api.developer.lifx.com/docs/selectors
'''
url = 'https://api.lifx.com'
def __init__(self, token):
self.headers = {
'Authorization': f'Bearer {token}',
'Content-Type': 'application/json'
}
def list_lights(self, selector: str = 'all'):
'''
Args:
selector = what lights to list
Returns:
response object
'''
response = requests.get(
url=f'{LIFX.url}/v1/lights/{selector}',
headers=self.headers
)
return response
def set_state(self, color: str, selector: str = 'all', power: str = 'on', brightness: float = 1, duration: float = 0, fast: bool = True):
'''
Args
selector = what lights to change
power = on|off
color = color to change state to
brightness = 0.0 - 1.0
duration = how long until state is full
fast = don't make checks and just change
Returns
response object
'''
response = requests.put(
url=f'{LIFX.url}/v1/lights/{selector}/state',
headers=self.headers,
json={
'power': power,
'color': color,
'brightness': brightness,
'duration': duration,
'fast': fast
}
)
return response
def set_states(self, states: List = [], defaults: Dict = {}, fast: bool = True):
'''
Args:
states = a list of state objects
defaults = default parameters for each state object
fast = don't make checks and just change
Returns:
response object
'''
response = requests.put(
url=f'{LIFX.url}/v1/lights/states',
headers=self.headers,
json={
'states': states,
'defaults': defaults,
'fast': fast
}
)
return response
def pulse_effect(self, color: str, selector: str = 'all', from_color: str = '', period: float = 2, cycles: float = 5, power_on: bool = True):
'''
Args:
color = the color for the effect
from_color = the color to start the effect from
period = time in seconds for one cycle
cycles = number of times to repeat
power_on = turn on the light if not already on
Returns:
response object
'''
response = requests.post(
url=f'{LIFX.url}/v1/lights/{selector}/effects/pulse',
headers=self.headers,
json={
'color': color,
'from_color': from_color,
'period': period,
'cycles': cycles,
'power_on': power_on
}
)
return response
def effects_off(self, selector: str = 'all', power_off: bool = False):
'''
Args:
power_off = also turn the lights off
Returns:
response object
'''
response = requests.post(
url=f'{LIFX.url}/v1/lights/{selector}/effects/off',
headers=self.headers,
json={'power_off': power_off}
)
return response
| 2.84375
| 3
|
tests/test_gosubdag_relationships.py
|
flying-sheep/goatools
| 477
|
12785033
|
#!/usr/bin/env python
"""Plot both the standard 'is_a' field and the optional 'part_of' relationship."""
from __future__ import print_function
__copyright__ = "Copyright (C) 2016-2018, <NAME>, <NAME>, All rights reserved."
import os
import sys
import timeit
import datetime
from goatools.base import download_go_basic_obo
from goatools.obo_parser import GODag
from goatools.gosubdag.gosubdag import GoSubDag
REPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../..")
def test_gosubdag_relationships(prt=sys.stdout):
"""Plot both the standard 'is_a' field and the 'part_of' relationship."""
goids = set([
"GO:0032501",
"GO:0044707", # alt_id: GO:0032501 # BP 1011 L01 D01 B multicellular organismal process
"GO:0050874",
"GO:0007608", # sensory perception of smell
"GO:0050911"]) # detection of chemical stimulus involved in sensory perception of smell
# Load GO-DAG: Load optional 'relationship'
fin_obo = os.path.join(REPO, "go-basic.obo")
download_go_basic_obo(fin_obo, prt, loading_bar=None)
go2obj_plain = GODag(fin_obo)
go2obj_relat = GODag(fin_obo, optional_attrs=['relationship'])
print("\nCreate GoSubDag with GO DAG containing no relationships.")
tic = timeit.default_timer()
# Create Plot object; Plot both 'is_a' and optional 'part_of' relationship
gosubdag = GoSubDag(goids, go2obj_plain, relationships=False, prt=prt)
# gosubdag.prt_goids(gosubdag.go2obj)
goids_plain = set(gosubdag.go2obj)
tic = _rpt_hms(tic, len(gosubdag.go2obj))
print("\nCreate GoSubDag while IGNORING relationships")
# Create Plot object; Plot both 'is_a' and optional 'part_of' relationship
gosubdag = GoSubDag(goids, go2obj_relat, relationships=False, prt=prt)
# gosubdag.prt_goids(gosubdag.go2obj)
goids_false = set(gosubdag.go2obj)
tic = _rpt_hms(tic, len(gosubdag.go2obj))
assert goids_plain == goids_false
print("\nCreate GoSubDag while loading only the 'part_of' relationship")
gosubdag = GoSubDag(goids, go2obj_relat, relationships=['part_of'], prt=prt)
# gosubdag.prt_goids(gosubdag.go2obj)
goids_part_of = set(gosubdag.go2obj)
tic = _rpt_hms(tic, len(gosubdag.go2obj))
assert goids_plain.intersection(goids_part_of) == goids_plain
assert len(goids_part_of) > len(goids_plain)
print("\nCreate GoSubDag while loading all relationships")
gosubdag = GoSubDag(goids, go2obj_relat, relationships=True, prt=prt)
# gosubdag.prt_goids(gosubdag.go2obj)
goids_true = set(gosubdag.go2obj)
tic = _rpt_hms(tic, len(gosubdag.go2obj))
assert goids_part_of.intersection(goids_true) == goids_part_of
assert len(goids_true) >= len(goids_part_of)
def _rpt_hms(tic, num_goids):
"""Report the elapsed time for particular events."""
elapsed_time = str(datetime.timedelta(seconds=(timeit.default_timer()-tic)))
print("Elapsed HMS: {HMS} {N} GO IDs".format(HMS=elapsed_time, N=num_goids))
return timeit.default_timer()
if __name__ == '__main__':
test_gosubdag_relationships()
# Copyright (C) 2016-2018, <NAME>, <NAME>, All rights reserved.
| 2.40625
| 2
|
pystatreduce/optimize/OAS_Example1/pyopt_oas_problem.py
|
OptimalDesignLab/pyStatReduce
| 0
|
12785034
|
<filename>pystatreduce/optimize/OAS_Example1/pyopt_oas_problem.py
# pyopt_oas_problem.py
# The following file contains the optimization of the deterministic problem shown
# in the quick example within OpenAeroStruct tutorial, the difference being,
# pyOptSparse is called separately, whereas the Quick example uses a driver.
import numpy as np
import chaospy as cp
# Import the OpenMDAo shenanigans
from openmdao.api import IndepVarComp, Problem, Group, NewtonSolver, \
ScipyIterativeSolver, LinearBlockGS, NonlinearBlockGS, \
DirectSolver, LinearBlockGS, PetscKSP, SqliteRecorder
from openaerostruct.geometry.utils import generate_mesh
from openaerostruct.geometry.geometry_group import Geometry
from openaerostruct.aerodynamics.aero_groups import AeroPoint
import pyoptsparse
class OASExample1Opt(object):
"""
Trial test to see if I can ge the optimization running using my framework
"""
def __init__(self):
# Create a dictionary to store options about the mesh
mesh_dict = {'num_y' : 7,
'num_x' : 2,
'wing_type' : 'CRM',
'symmetry' : True,
'num_twist_cp' : 5}
# Generate the aerodynamic mesh based on the previous dictionary
mesh, twist_cp = generate_mesh(mesh_dict)
# Create a dictionary with info and options about the aerodynamic
# lifting surface
surface = {
# Wing definition
'name' : 'wing', # name of the surface
'type' : 'aero',
'symmetry' : True, # if true, model one half of wing
# reflected across the plane y = 0
'S_ref_type' : 'wetted', # how we compute the wing area,
# can be 'wetted' or 'projected'
'fem_model_type' : 'tube',
'twist_cp' : twist_cp,
'mesh' : mesh,
'num_x' : mesh.shape[0],
'num_y' : mesh.shape[1],
# Aerodynamic performance of the lifting surface at
# an angle of attack of 0 (alpha=0).
# These CL0 and CD0 values are added to the CL and CD
# obtained from aerodynamic analysis of the surface to get
# the total CL and CD.
# These CL0 and CD0 values do not vary wrt alpha.
'CL0' : 0.0, # CL of the surface at alpha=0
'CD0' : 0.015, # CD of the surface at alpha=0
# Airfoil properties for viscous drag calculation
'k_lam' : 0.05, # percentage of chord with laminar
# flow, used for viscous drag
't_over_c_cp' : np.array([0.15]), # thickness over chord ratio (NACA0015)
'c_max_t' : .303, # chordwise location of maximum (NACA0015)
# thickness
'with_viscous' : True, # if true, compute viscous drag
'with_wave' : False, # if true, compute wave drag
}
# Create the OpenMDAO problem
self.prob = Problem()
# Create an independent variable component that will supply the flow
# conditions to the problem.
indep_var_comp = IndepVarComp()
indep_var_comp.add_output('v', val=248.136, units='m/s')
indep_var_comp.add_output('alpha', val=5., units='deg')
indep_var_comp.add_output('Mach_number', val=0.84)
indep_var_comp.add_output('re', val=1.e6, units='1/m')
indep_var_comp.add_output('rho', val=0.38, units='kg/m**3')
indep_var_comp.add_output('cg', val=np.zeros((3)), units='m')
# Add this IndepVarComp to the problem model
self.prob.model.add_subsystem('prob_vars',
indep_var_comp,
promotes=['*'])
# Create and add a group that handles the geometry for the
# aerodynamic lifting surface
geom_group = Geometry(surface=surface)
self.prob.model.add_subsystem(surface['name'], geom_group)
# Create the aero point group, which contains the actual aerodynamic
# analyses
aero_group = AeroPoint(surfaces=[surface])
point_name = 'aero_point_0'
self.prob.model.add_subsystem(point_name, aero_group,
promotes_inputs=['v', 'alpha', 'Mach_number', 're', 'rho', 'cg'])
name = surface['name']
# Connect the mesh from the geometry component to the analysis point
self.prob.model.connect(name + '.mesh', point_name + '.' + name + '.def_mesh')
# Perform the connections with the modified names within the
# 'aero_states' group.
self.prob.model.connect(name + '.mesh', point_name + '.aero_states.' + name + '_def_mesh')
self.prob.model.connect(name + '.t_over_c', point_name + '.' + name + '_perf.' + 't_over_c')
self.prob.setup()
# print "wing.twist_cp", self.prob['wing.twist_cp']
def objfunc(xdict):
dv = xdict['xvars']
QoI.prob['wing.twist_cp'] = dv
QoI.prob.run_model()
funcs = {}
funcs['obj'] = QoI.prob['aero_point_0.wing_perf.CD']
conval = np.zeros(1)
conval[0] = QoI.prob['aero_point_0.wing_perf.CL']
funcs['con'] = conval
fail = False
return funcs, fail
def sens(xdict, funcs):
dv = xdict['xvars']
QoI.prob['wing.twist_cp'] = dv
QoI.prob.run_model()
deriv = QoI.prob.compute_totals(of=['aero_point_0.wing_perf.CD', 'aero_point_0.wing_perf.CL'], wrt=['wing.twist_cp'])
funcsSens = {}
funcsSens['obj', 'xvars'] = deriv['aero_point_0.wing_perf.CD', 'wing.twist_cp']
funcsSens['con', 'xvars'] = deriv['aero_point_0.wing_perf.CL', 'wing.twist_cp']
fail = False
return funcsSens, fail
if __name__ == "__main__":
# Lets do this
QoI = OASExample1Opt()
optProb = pyoptsparse.Optimization('OASExample1', objfunc)
optProb.addVarGroup('xvars', 5, 'c', lower=-10., upper=15.)
optProb.addConGroup('con', 1, lower=0.5, upper=0.5)
optProb.addObj('obj')
# print optProb
# opt = pyoptsparse.SLSQP(tol=1.e-9)
opt = pyoptsparse.SNOPT(optOptions = {'Major feasibility tolerance' : 1e-10})
sol = opt(optProb, sens=sens)
print sol
| 2.609375
| 3
|
update.py
|
QuarkSources/quarksources.github.io
| 34
|
12785035
|
<gh_stars>10-100
from sourceUtil import AltSourceManager, AltSourceParser, GithubParser, Unc0verParser
sourcesData = [
{
"parser": AltSourceParser,
"kwargs": {"filepath": "https://altstore.oatmealdome.me"},
"ids": ["me.oatmealdome.dolphinios-njb", "me.oatmealdome.DolphiniOS-njb-patreon-beta"]
},
{
"parser": AltSourceParser,
"kwargs": {"filepath": "quarksource.json"},
"ids": ["com.libretro.dist.ios.RetroArch", "com.louisanslow.record", "org.scummvm.scummvm", "net.namedfork.minivmac", "com.dry05.filzaescaped11-12", "com.virtualapplications.play"]
},
{
"parser": AltSourceParser,
"kwargs": {"filepath": "https://provenance-emu.com/apps.json"},
"ids": ["org.provenance-emu.provenance"]
},
{
"parser": AltSourceParser,
"kwargs": {"filepath": "https://theodyssey.dev/altstore/odysseysource.json"},
"ids": ["org.coolstar.odyssey"]
},
{
"parser": GithubParser,
"kwargs": {"repo_author": "Odyssey-Team", "repo_name": "Taurine"},
#"kwargs": {"filepath": "https://taurine.app/altstore/taurinestore.json"},
"ids": ["com.odysseyteam.taurine"]
},
{
"parser": AltSourceParser,
"kwargs": {"filepath": "https://alt.getutm.app"},
"ids": ["com.utmapp.UTM", "com.utmapp.UTM-SE"]
},
{
"parser": AltSourceParser,
"kwargs": {"filepath": "https://demo.altstore.io"},
"ids": ["com.rileytestut.GBA4iOS"]
},
{
"parser": GithubParser,
"kwargs": {"repo_author": "iNDS-Team", "repo_name": "iNDS"},
"ids": ["net.nerd.iNDS"]
},
{
"parser": GithubParser,
"kwargs": {"repo_author": "yoshisuga", "repo_name": "MAME4iOS"},
"ids": ["com.example.mame4ios"]
},
{
"parser": GithubParser,
"kwargs": {"repo_author": "brandonplank", "repo_name": "flappybird"},
"ids": ["org.brandonplank.flappybird15"]
},
{
"parser": GithubParser,
"kwargs": {"repo_author": "Wh0ba", "repo_name": "XPatcher"},
"ids": ["com.wh0ba.xpatcher"]
},
{
"parser": GithubParser,
"kwargs": {"repo_author": "litchie", "repo_name": "dospad"},
"ids": ["com.litchie.idosgames"]
},
{
"parser": GithubParser,
"kwargs": {"repo_author": "mariolopjr", "repo_name": "ppsspp-builder"},
"ids": ["org.ppsspp.ppsspp"]
},
{
"parser": Unc0verParser,
"kwargs": {"url": "https://unc0ver.dev/releases.json"},
"ids": ["science.xnu.undecimus"]
}
]
alternateAppData = {
"org.ppsspp.ppsspp": {
"tintColor": "#21486b",
"subtitle": "PlayStation Portable games on iOS.",
"screenshotURLs": [
"https://i.imgur.com/CWl6GgH.png",
"https://i.imgur.com/SxmN1M0.png",
"https://i.imgur.com/sGWgR6z.png",
"https://i.imgur.com/AFKTdmZ.png"
],
"iconURL": "https://i.imgur.com/JP0Fncv.png"
},
"com.rileytestut.GBA4iOS": {
"iconURL": "https://i.imgur.com/SBrqO9g.png",
"screenshotURLs": [
"https://i.imgur.com/L4H0yM3.png",
"https://i.imgur.com/UPGYLVr.png",
"https://i.imgur.com/sWpUAii.png",
"https://i.imgur.com/UwnDXRc.png"
]
},
"org.provenance-emu.provenance": {
"localizedDescription": "Provenance is a multi-system emulator frontend for a plethora of retro gaming systems. You can keep all your games in one place, display them with cover art, and play to your heart's content.\n\nSystems Supported:\n\n• Atari\n - 2600\n - 5200\n - 7800\n - Lynx\n - Jaguar\n• Bandai\n - WonderSwan / WonderSwan Color\n• NEC\n - PC Engine / TurboGrafx-16 (PCE/TG16)\n - PC Engine Super CD-ROM² System / TurboGrafx-CD\n - PC Engine SuperGrafx\n - PC-FX\n• Nintendo\n - Nintendo Entertainment System / Famicom (NES/FC)\n - Famicom Disk System\n - Super Nintendo Entertainment System / Super Famicom (SNES/SFC)\n - Game Boy / Game Boy Color (GB/GBC)\n - Virtual Boy\n - Game Boy Advance (GBA)\n - Pokémon mini\n• Sega\n - SG-1000\n - Master System\n - Genesis / Mega Drive\n - Game Gear\n - CD / MegaCD\n - 32X\n• SNK\n - Neo Geo Pocket / Neo Geo Pocket Color\n• Sony\n - PlayStation (PSX/PS1)",
"tintColor": "#1c7cf3",
"permissions": [
{
"type": "camera",
"usageDescription": "Used for album artwork."
},
{
"type": "photos",
"usageDescription": "Provenance can set custom artworks from your photos or save screenshots to your photos library."
},
{
"type": "music",
"usageDescription": "This will let you play your imported music on Spotify."
},
{
"type": "bluetooth",
"usageDescription": "Provenance uses Bluetooth to support game controllers."
},
{
"type": "background-fetch",
"usageDescription": "Provenance can continue running while in the background."
},
{
"type": "background-audio",
"usageDescription": "Provenance can continue playing game audio while in the background."
}
]
}
}
quantumsrc = AltSourceManager("quantumsource.json", sourcesData, alternateAppData, prettify=False) # if prettify is true, output will have indents and newlines
try:
quantumsrc.update()
except Exception as err:
print("Unable to update Quantum Source.")
print(f"{type(err).__name__}: {str(err)}")
sourcesData = [
{
"parser": AltSourceParser,
"kwargs": {"filepath": "https://apps.altstore.io"},
"ids": ["com.rileytestut.AltStore", "com.rileytestut.AltStore.Beta", "com.rileytestut.Delta", "com.rileytestut.Delta.Beta", "com.rileytestut.Clip", "com.rileytestut.Clip.Beta"],
"getAllNews": True
},
{
"parser": AltSourceParser,
"kwargs": {"filepath": "https://alpha.altstore.io"},
"ids": ["com.rileytestut.AltStore.Alpha", "com.rileytestut.Delta.Alpha"],
"getAllNews": True
}
]
alternateAppData = {
"com.rileytestut.AltStore.Beta": {
"name": "AltStore (Beta)",
"beta": False
},
"com.rileytestut.Delta.Beta": {
"name": "Delta (Beta)",
"beta": False
},
"com.rileytestut.Clip.Beta": {
"name": "Clip (Beta)",
"beta": False
}
}
alt_complete = AltSourceManager("altstore-complete.json", sourcesData, alternateAppData, prettify=False)
try:
alt_complete.update()
except Exception as err:
print("Unable to update AltStore Complete.")
print(f"{type(err).__name__}: {str(err)}")
sourcesData = [
{
"parser": AltSourceParser,
"kwargs": {"filepath": "quarksource++.json"},
"ids": ["com.crunchyroll.iphone", "com.duolingo.DuolingoMobile", "com.deezer.Deezer", "com.spotify.client", "syto203.reddit.pp", "com.antique.Popcorn-iOS", "mediaboxhd.event.2", "comicreader.net.ios", "com.channelsapp.channels", "com.burbn.instagram", "com.Lema.Michael.InstagramApp", "net.whatsapp.WhatsApp", "com.hotdog.popcorntime81.ios", "tv.twitch"]
},
{
"parser": AltSourceParser,
"kwargs": {"filepath": "quarksource-cracked.json"},
"ids": ["com.grailr.CARROTweather", "com.wolframalpha.wolframalpha", "com.firecore.infuse", "com.stey", "com.luma-touch.LumaFusion"]
},
{
"parser": AltSourceParser,
"kwargs": {"filepath": "https://9ani.app/api/altstore"},
"ids": ["com.marcuszhou.NineAnimator"]
},
{
"parser": GithubParser,
"kwargs": {"repo_author": "Paperback-iOS", "repo_name": "app", "ver_parse": lambda x: x.replace("-r", ".").lstrip("v")}, #ver_parse allows you to specify how to github tag should be processed before comparing to AltStore version tags
"ids": ["com.faizandurrani.paperback.ios"]
},
{
"parser": GithubParser,
"kwargs": {"repo_author": "tatsuz0u", "repo_name": "EhPanda"},
"ids": ["app.ehpanda"]
},
{
"parser": AltSourceParser,
"kwargs": {"filepath": "https://raw.githubusercontent.com/ytjailed/ytjailed.github.io/main/apps.json"},
"ids": ["com.google.ios.youtube", "com.google.ios.youtube.cercube", "com.atebits.Tweetie2"]
}
]
alternateAppData = {
"com.google.ios.youtube.cercube": {
"tintColor": "#1E47D0",
"permissions": [
{
"type": "camera",
"usageDescription": "This lets you create videos using the app."
},
{
"type": "photos",
"usageDescription": "This lets you upload videos you've already created."
},
{
"type": "bluetooth",
"usageDescription": "YouTube needs bluetooth access to scan for nearby Cast devices."
},
{
"type": "contacts",
"usageDescription": "Your contacts will be sent to YouTube servers to help you find friends to share videos with."
},
{
"type": "network",
"usageDescription": "Access to your network allows YouTube to discover and connect to devices such as your TV."
},
{
"type": "music",
"usageDescription": "This lets you add your own audio files to your videos."
},
{
"type": "microphone",
"usageDescription": "This lets you include audio with your videos and search using your voice."
},
{
"type": "location",
"usageDescription": "Makes it easier for you to attach location information to your videos and live streams and allows for features such as improved recommendations and ads."
},
{
"type": "background-fetch",
"usageDescription": "YouTube can continue running while in the background."
},
{
"type": "background-audio",
"usageDescription": "YouTube can continue playing audio while in the background."
}
]
},
"com.google.ios.youtube": {
"permissions": [
{
"type": "camera",
"usageDescription": "This lets you create videos using the app."
},
{
"type": "photos",
"usageDescription": "This lets you upload videos you've already created."
},
{
"type": "bluetooth",
"usageDescription": "YouTube needs bluetooth access to scan for nearby Cast devices."
},
{
"type": "contacts",
"usageDescription": "Your contacts will be sent to YouTube servers to help you find friends to share videos with."
},
{
"type": "network",
"usageDescription": "Access to your network allows YouTube to discover and connect to devices such as your TV."
},
{
"type": "music",
"usageDescription": "This lets you add your own audio files to your videos."
},
{
"type": "microphone",
"usageDescription": "This lets you include audio with your videos and search using your voice."
},
{
"type": "location",
"usageDescription": "Makes it easier for you to attach location information to your videos and live streams and allows for features such as improved recommendations and ads."
},
{
"type": "background-fetch",
"usageDescription": "YouTube can continue running while in the background."
},
{
"type": "background-audio",
"usageDescription": "YouTube can continue playing audio while in the background."
}
]
}
}
quantumsrc_plus = AltSourceManager("quantumsource++.json", sourcesData, alternateAppData, prettify=False)
try:
quantumsrc_plus.update()
except Exception as err:
print("Unable to update Quantum Source Plus.")
print(f"{type(err).__name__}: {str(err)}")
| 1.617188
| 2
|
logger.py
|
tommyvsfu1/ADL2019_ReinforcementLearning
| 1
|
12785036
|
import torch
import torchvision
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets, transforms
# Writer will output to ./runs/ directory by default
class TensorboardLogger(object):
def __init__(self, dir):
"""Create a summary writer logging to log_dir."""
self.writer = SummaryWriter(log_dir=dir)
self.time_s = 0
def scalar_summary(self, tag, value, t=-1):
if t == -1:
self.writer.add_scalar(tag, value, global_step=self.time_s)
else :
self.writer.add_scalar(tag, value, global_step=t)
def histogram_summary(self, tag, tensor, t=-1):
if t == -1:
self.writer.add_histogram(tag, tensor, global_step=self.time_s)
else :
self.writer.add_histogram(tag, tensor, global_step=t)
def logger_close(self):
self.writer.close()
def update(self):
self.time_s += 1
| 2.9375
| 3
|
app/utils/generator.py
|
hpi-studyu/analysis-project-generator
| 1
|
12785037
|
import json
import os
from dotenv import load_dotenv
from utils.copier import generate_files_from_template
from utils.gitlab_service import GitlabService
from utils.study_data import fetch_new_study_data
from utils.supabase_service import SupabaseService
load_dotenv()
def generate_repo(sbs: SupabaseService, gs: GitlabService, study_id: str):
study = sbs.fetch_study(study_id)
generate_files_from_template(
study_title=study["title"], path=os.environ.get("PROJECT_PATH")
)
project = gs.create_project(study["title"])
print(f"Created project: {project.name}")
generate_initial_commit(gs, project)
print("Committed initial files")
fetch_new_study_data(sbs, gs, study_id, project, 'create')
print("Fetched newest study data")
return project.id
def generate_initial_commit(gs, project):
commit_actions = [
commit_action(file_path, os.environ.get("PROJECT_PATH"))
for file_path in walk_generated_project(os.environ.get("PROJECT_PATH"))
]
return gs.make_commit(
project=project,
message="Generated project from copier-studyu\n\nhttps://github.com/hpi-studyu/copier-studyu",
actions=commit_actions,
)
def commit_action(file_path: str, project_path: str, action: str = "create"):
return {
"action": action,
"file_path": file_path,
"content": open(os.path.join(project_path, file_path)).read(),
}
def walk_generated_project(project_path):
for root, dirs, files in os.walk(project_path):
for name in files:
yield os.path.relpath(os.path.join(root, name), project_path).replace(
os.sep, "/"
)
| 2.28125
| 2
|
nogi/endpoints.py
|
Cooomma/nogi-backup-blog
| 0
|
12785038
|
import requests
HEADERS = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
}
# Bilibili Videos Backup
def get_channel_info(member_id: int):
return requests.get(
url='https://api.bilibili.com/x/space/acc/info',
params=dict(mid=member_id, jsonp='jsonp'),
headers=HEADERS,).json()
def get_videos(member_id: int, page_num: int = 1, page_size: int = 30, keyword: str = None) -> list:
return requests.get(
url='https://space.bilibili.com/ajax/member/getSubmitVideos',
params=dict(
mid=member_id, pagesize=page_size, tid=0,
page=page_num, keyword=keyword, order='pubdate'),
headers=HEADERS,).json()
# Nogizaka Web Archive Backup
def get_web_archive_archive_snapshot_url(member_sub: str) -> str:
return requests.get(
url='http://web.archive.org/web/timemap/link/http://blog.nogizaka46.com/{}/'.format(member_sub),
headers=HEADERS).text
# Nogizaka Official Blog
def get_nogi_official_archives_html(roma_name: str) -> str:
return requests.get(url='http://blog.nogizaka46.com/{}/'.format(roma_name), headers=HEADERS).text
| 2.25
| 2
|
deep_utils/vision/face_detection/haarcascade/cv2/haarcascade_cv2_face_detection.py
|
dornasabet/deep_utils
| 36
|
12785039
|
from deep_utils.vision.face_detection.main import FaceDetector
from deep_utils.utils.lib_utils.lib_decorators import get_from_config, expand_input, get_elapsed_time, rgb2bgr
from deep_utils.utils.lib_utils.download_utils import download_decorator
from deep_utils.utils.box_utils.boxes import Box, Point
from .config import Config
class HaarcascadeCV2FaceDetector(FaceDetector):
def __init__(self, **kwargs):
super().__init__(name=self.__class__.__name__,
file_path=__file__,
download_variables=("haarcascade_frontalface",
"haarcascade_eye",
"haarcascade_nose",
"landmarks"),
**kwargs)
self.config: Config
@download_decorator
def load_model(self):
import cv2
face_detector = cv2.CascadeClassifier(self.config.haarcascade_frontalface)
nose_detector = cv2.CascadeClassifier(self.config.haarcascade_nose)
eye_detector = cv2.CascadeClassifier(self.config.haarcascade_eye)
landmark_detector = cv2.face.createFacemarkLBF()
landmark_detector.loadModel(self.config.landmarks)
self.model = dict(face=face_detector, nose=nose_detector, eye=eye_detector, landmarks=landmark_detector)
@get_elapsed_time
@expand_input(3)
@get_from_config
@rgb2bgr('gray')
def detect_faces(self,
img,
is_rgb,
scaleFactor=None,
minNeighbors=None,
minSize=None,
maxSize=None,
flags=None,
get_landmarks=True,
get_nose=True,
get_eye=True,
get_time=False,
):
boxes, landmarks_, confidences, eye_poses, nose_poses = [], [], [], [], []
for image in img:
faces = self.model['face'].detectMultiScale(image,
scaleFactor=scaleFactor,
minNeighbors=minNeighbors,
minSize=minSize,
maxSize=maxSize,
flags=flags)
boxes.append(Box.box2box(faces, in_source='CV', to_source='Numpy', in_format='XYWH', to_format='XYXY'))
if get_nose:
nose_pos = self.model['nose'].detectMultiScale(image,
scaleFactor=scaleFactor,
minNeighbors=minNeighbors,
minSize=minSize,
maxSize=maxSize,
flags=flags)
nose_pos = Point.point2point(nose_pos, in_source='CV', to_source='Numpy')
nose_poses.append(nose_pos)
if get_eye:
eye_pos = self.model['eye'].detectMultiScale(image,
scaleFactor=scaleFactor,
minNeighbors=minNeighbors,
minSize=minSize,
maxSize=maxSize,
flags=flags)
eye_pos = Point.point2point(eye_pos, in_source='CV', to_source='Numpy')
eye_poses.append(eye_pos)
if len(faces) != 0 and get_landmarks:
_, landmarks = self.model['landmarks'].fit(image, faces)
landmarks = [Point.point2point(face_landmarks[0].tolist(),
in_source='CV', to_source='Numpy') for face_landmarks in landmarks]
landmarks_.append(landmarks)
return dict(boxes=boxes,
confidences=confidences,
landmarks=landmarks_,
eye_poses=eye_poses,
nose_poses=nose_poses)
| 2.328125
| 2
|
test/test_vuelos_entrantes_aena_controller.py
|
SergioCMDev/Busines-Inteligence-applied-to-tourism
| 0
|
12785040
|
<reponame>SergioCMDev/Busines-Inteligence-applied-to-tourism
# coding: utf-8
from __future__ import absolute_import
from . import BaseTestCase
from six import BytesIO
from flask import json
class TestVuelosEntrantesAenaController(BaseTestCase):
""" VuelosEntrantesAenaController integration test stubs """
def test_obtener_cantidad_anio_ciudad(self):
"""
Test case for obtener_cantidad_anio_ciudad
Obtener cantidades de vuelos entrantes en la ciudad del pais divididas por meses
"""
query_string = [('CiudadDestino', 'Valencia'),
('Anio', 2009)]
response = self.client.open('/server/Aena/VuelosEntrantes/ObtenerCantidadAnioCiudad/{PaisDestino}'.format(PaisDestino='PaisDestino_example'),
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_obtener_cantidad_anio_por_ciudades(self):
"""
Test case for obtener_cantidad_anio_por_ciudades
Obtener cantidad de vuelos entrantes en las ciudades de un pais durante un año
"""
query_string = [('Anio', 2009)]
response = self.client.open('/server/Aena/VuelosEntrantes/ObtenerCantidadAnioPorCiudades/{PaisDestino}'.format(PaisDestino='PaisDestino_example'),
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_obtener_cantidad_anualmente(self):
"""
Test case for obtener_cantidad_anualmente
Obtener cantidad de vuelos entrantes anualmente dado un pais destino y un rango de años
"""
query_string = [('AnioInicio', 2009),
('AnioFin', 2015)]
response = self.client.open('/server/Aena/VuelosEntrantes/ObtenerCantidadAnualmente/{PaisDestino}'.format(PaisDestino='PaisDestino_example'),
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_obtener_cantidad_ciudad(self):
"""
Test case for obtener_cantidad_ciudad
Obtener cantidad de vuelos entrantes en una ciudad de un pais durante el rango de años seleccionado
"""
query_string = [('CiudadDestino', 'Valencia'),
('AnioInicio', 2009),
('AnioFin', 2015)]
response = self.client.open('/server/Aena/VuelosEntrantes/ObtenerCantidadCiudad/{PaisDestino}'.format(PaisDestino='PaisDestino_example'),
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_obtener_cantidad_entrantes_por_ciudades(self):
"""
Test case for obtener_cantidad_entrantes_por_ciudades
Obtener cantidad de vuelos entrantes en las ciudades de un pais durante un rango de años
"""
query_string = [('AnioInicio', 2009),
('AnioFin', 2015)]
response = self.client.open('/server/Aena/VuelosEntrantes/ObtenerCantidadPorCiudades/{PaisDestino}'.format(PaisDestino='PaisDestino_example'),
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_obtener_cantidad_mensual(self):
"""
Test case for obtener_cantidad_mensual
Obtener cantidad de vuelos entrantes durante los meses de un año
"""
query_string = [('Anio', 2009)]
response = self.client.open('/server/Aena/VuelosEntrantes/ObtenerCantidadMensual/{PaisDestino}'.format(PaisDestino='PaisDestino_example'),
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_obtener_cantidad_mensualmente(self):
"""
Test case for obtener_cantidad_mensualmente
Obtener cantidad de vuelos entrantes de forma mensual dado un pais destino y un rango de años
"""
query_string = [('AnioInicio', 2009),
('AnioFin', 2015)]
response = self.client.open('/server/Aena/VuelosEntrantes/ObtenerCantidadMensualmente/{PaisDestino}'.format(PaisDestino='PaisDestino_example'),
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_obtener_cantidad_mes(self):
"""
Test case for obtener_cantidad_mes
Obtener cantidad de vuelos entrantes durante el mismo mes en un rango de años para el pais destino
"""
query_string = [('Mes', 'Enero'),
('AnioInicio', 2009),
('AnioFin', 2015)]
response = self.client.open('/server/Aena/VuelosEntrantes/ObtenerCantidadMes/{PaisDestino}'.format(PaisDestino='PaisDestino_example'),
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_obtener_cantidad_mes_anio_por_ciudades(self):
"""
Test case for obtener_cantidad_mes_anio_por_ciudades
Obtener cantidad de vuelos entrantes en las ciudades del pais durante el mes del año seleccionado
"""
query_string = [('Anio', 2009),
('Mes', 'Enero')]
response = self.client.open('/server/Aena/VuelosEntrantes/ObtenerCantidadMesAnioPorCiudades/{PaisDestino}'.format(PaisDestino='PaisDestino_example'),
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_obtener_cantidad_mes_ciudad(self):
"""
Test case for obtener_cantidad_mes_ciudad
Obtener cantidad de vuelos entrantes en la ciudad del pais durante el mes del rango de años seleccionado
"""
query_string = [('CiudadDestino', 'Valencia'),
('Mes', 'Enero'),
('AnioInicio', 2009),
('AnioFin', 2015)]
response = self.client.open('/server/Aena/VuelosEntrantes/ObtenerCantidadMesCiudad/{PaisDestino}'.format(PaisDestino='PaisDestino_example'),
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_obtener_cantidad_mes_por_ciudades(self):
"""
Test case for obtener_cantidad_mes_por_ciudades
Obtener cantidad de vuelos entrantes en las ciudades de un pais durante un rango de años durante un mismo mes
"""
query_string = [('AnioInicio', 2009),
('AnioFin', 2015),
('Mes', 'Enero')]
response = self.client.open('/server/Aena/VuelosEntrantes/ObtenerCantidadMesPorCiudades/{PaisDestino}'.format(PaisDestino='PaisDestino_example'),
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
| 2.6875
| 3
|
sdk/python/tests/integration/online_store/test_universal_online.py
|
marsishandsome/feast
| 1
|
12785041
|
import random
import unittest
import pandas as pd
from tests.integration.feature_repos.test_repo_configuration import (
Environment,
parametrize_online_test,
)
@parametrize_online_test
def test_online_retrieval(environment: Environment):
fs = environment.feature_store
full_feature_names = environment.test_repo_config.full_feature_names
sample_drivers = random.sample(environment.driver_entities, 10)
drivers_df = environment.driver_df[
environment.driver_df["driver_id"].isin(sample_drivers)
]
sample_customers = random.sample(environment.customer_entities, 10)
customers_df = environment.customer_df[
environment.customer_df["customer_id"].isin(sample_customers)
]
entity_rows = [
{"driver": d, "customer_id": c}
for (d, c) in zip(sample_drivers, sample_customers)
]
feature_refs = [
"driver_stats:conv_rate",
"driver_stats:avg_daily_trips",
"customer_profile:current_balance",
"customer_profile:avg_passenger_count",
"customer_profile:lifetime_trip_count",
]
unprefixed_feature_refs = [f.rsplit(":", 1)[-1] for f in feature_refs]
online_features = fs.get_online_features(
features=feature_refs,
entity_rows=entity_rows,
full_feature_names=full_feature_names,
)
assert online_features is not None
keys = online_features.to_dict().keys()
assert (
len(keys) == len(feature_refs) + 2
) # Add two for the driver id and the customer id entity keys.
for feature in feature_refs:
if full_feature_names:
assert feature.replace(":", "__") in keys
else:
assert feature.rsplit(":", 1)[-1] in keys
assert "driver_stats" not in keys and "customer_profile" not in keys
online_features_dict = online_features.to_dict()
tc = unittest.TestCase()
for i, entity_row in enumerate(entity_rows):
df_features = get_latest_feature_values_from_dataframes(
drivers_df, customers_df, entity_row
)
assert df_features["customer_id"] == online_features_dict["customer_id"][i]
assert df_features["driver_id"] == online_features_dict["driver_id"][i]
for unprefixed_feature_ref in unprefixed_feature_refs:
tc.assertEqual(
df_features[unprefixed_feature_ref],
online_features_dict[
response_feature_name(unprefixed_feature_ref, full_feature_names)
][i],
)
# Check what happens for missing values
missing_responses_dict = fs.get_online_features(
features=feature_refs,
entity_rows=[{"driver": 0, "customer_id": 0}],
full_feature_names=full_feature_names,
).to_dict()
assert missing_responses_dict is not None
for unprefixed_feature_ref in unprefixed_feature_refs:
tc.assertIsNone(
missing_responses_dict[
response_feature_name(unprefixed_feature_ref, full_feature_names)
][0]
)
def response_feature_name(feature: str, full_feature_names: bool) -> str:
if (
feature in {"current_balance", "avg_passenger_count", "lifetime_trip_count"}
and full_feature_names
):
return f"customer_profile__{feature}"
if feature in {"conv_rate", "avg_daily_trips"} and full_feature_names:
return f"driver_stats__{feature}"
return feature
def get_latest_feature_values_from_dataframes(driver_df, customer_df, entity_row):
driver_rows = driver_df[driver_df["driver_id"] == entity_row["driver"]]
latest_driver_row: pd.DataFrame = driver_rows.loc[
driver_rows["event_timestamp"].idxmax()
].to_dict()
customer_rows = customer_df[customer_df["customer_id"] == entity_row["customer_id"]]
latest_customer_row = customer_rows.loc[
customer_rows["event_timestamp"].idxmax()
].to_dict()
latest_customer_row.update(latest_driver_row)
return latest_customer_row
| 2.453125
| 2
|
examples/04-rl-ray.py
|
nnaisense/pgpelib
| 36
|
12785042
|
# Solving reinforcement learning problems using pgpelib with parallelization
# and with observation normalization
# ==========================================================================
#
# This example demonstrates how to solve locomotion tasks.
# The following techniques are used:
#
# - dynamic population size
# - observation normalization
# - parallelization (using the ray library)
#
# Because we are using both parallelization and observation normalization,
# we will have to synchronize the observation stats between the remote
# workers and the main process.
# We demonstrate how to do this synchronization using ray,
# however the logic is applicable to other parallelization libraries.
from pgpelib import PGPE
from pgpelib.policies import Policy, LinearPolicy, MLPPolicy
from pgpelib.restore import to_torch_module
from pgpelib.runningstat import RunningStat
from typing import Tuple, Iterable
from numbers import Real
import numpy as np
import torch
import gym
import ray
import multiprocessing as mp
from time import sleep
import pickle
# Here is the gym environment to solve.
ENV_NAME = 'Walker2d-v2'
# The environment we are considering to solve is a locomotion problem.
# It defines an "alive bonus" to encourage the agent to stand on its
# feet without falling.
# However, such alive bonuses might drive the evolution process into
# generating agents which focus ONLY on standing on their feet (without
# progressing), just to collect these bonuses.
# We therefore remove this alive bonus by subtracting 1.0 at every
# simulator timestep.
DECREASE_REWARDS_BY = 1.0
# Ray supports stateful parallelization via remote actors.
# An actor is a class instance which lives on different process than
# the main process, and which stores its state.
# Here, we define a remote actor class (which will be instantiated
# multiple times, so that we will be able to use the instances for
# parallelized evaluation of our solutions)
@ray.remote
class Worker:
def __init__(self, policy, decrease_rewards_by):
policy: Policy
self.policy: Policy = policy
self.decrease_rewards_by = decrease_rewards_by
def set_main_obs_stats(self, rs):
# Set the main observation stats of the remote worker.
# The goal of this function is to receive the observation
# stats from the main process.
rs: RunningStat
self.policy.set_main_obs_stats(rs)
def pop_collected_obs_stats(self):
# Pop the observation stats collected by the worker.
# At the time of synchronization, the main process will call
# this method of each remote worker, and update its main
# observation stats with those collected data.
return self.policy.pop_collected_obs_stats()
def run(self, d):
# Run a each solution in the dictionary d.
# The dictionary d is expected in this format:
# { solution_index1: solution1,
# solution_index2: solution2,
# ... }
# and the result will be:
# { solution_index1: (cumulative_reward1, number_of_interactions1)
# solution_index2: (cumulative_reward2, number_of_interactions2)
# ... }
return self.policy.set_params_and_run_all(
d,
decrease_rewards_by=self.decrease_rewards_by
)
# Set the number of workers to be instantiated as the number of CPUs.
NUM_WORKERS = mp.cpu_count()
# List of workers.
# Initialized as a list containing `None`s in the beginning.
WORKERS = [None] * NUM_WORKERS
def prepare_workers(policy, decrease_rewards_by):
# Fill the WORKERS list.
# Initialize the ray library.
ray.init()
# For each index i of WORKERS list, fill the i-th element with a new
# worker instance.
for i in range(len(WORKERS)):
WORKERS[i] = Worker.remote(policy, decrease_rewards_by)
Reals = Iterable[Real]
def evaluate_solutions(solutions: Iterable[np.ndarray]) -> Tuple[Reals, Reals]:
# This function evaluates the given solutions in parallel.
# Get the number of solutions
nslns = len(solutions)
if len(WORKERS) > nslns:
# If the number of workers is greater than the number of solutions
# then the workers that we are going to actually use here
# is the first `nslns` amount of workers, not all of them.
workers = WORKERS[:nslns]
else:
# If the number of solutions is equal to or greater than the
# number of workers, then we will use all of the instantiated
# workers.
workers = WORKERS
# Number of workers that are going to be used now.
nworkers = len(workers)
# To each worker, we aim to send a dictionary, each dictionary being
# in this form:
# { solution_index1: solution1, solution_index2: solution2, ...}
# We keep those dictionaries in the `to_worker` variable.
# to_worker[i] stores the dictionary to be sent to the i-th worker.
to_worker = [dict() for _ in range(nworkers)]
# Iterate over the solutions and assign them one by one to the
# workers.
i_worker = 0
for i, solution in enumerate(solutions):
to_worker[i_worker][i] = solution
i_worker = (i_worker + 1) % nworkers
# Each worker executes the solution dictionary assigned to itself.
# The results are then collected to the list `worker_results`.
# The workers do their tasks in parallel.
worker_results = ray.get(
[
workers[i].run.remote(to_worker[i])
for i in range(nworkers)
]
)
# Allocate a list for storing the fitnesses, and another list for
# storing the number of interactions.
fitnesses = [None] * nslns
num_interactions = [None] * nslns
# For each worker:
for worker_result in worker_results:
# For each solution and its index mentioned in the worker's
# result dictionary:
for i, result in worker_result.items():
fitness, timesteps = result
# Store the i-th solution's fitness in the fitnesses list
fitnesses[i] = fitness
# Store the i-th solution's number of interactions in the
# num_interactions list.
num_interactions[i] = timesteps
# Return the fitnesses and the number of interactions lists.
return fitnesses, num_interactions
def sync_obs_stats(main_policy: Policy):
# This function synchronizes the observation stats of the
# main process and of the main workers.
# Collect observation stats from the remote workers
collected_stats = ray.get(
[
worker.pop_collected_obs_stats.remote()
for worker in WORKERS
]
)
# In the main process, update the main policy's
# observation stats with the stats collected from the remote workers.
for stats in collected_stats:
main_policy.update_main_obs_stats(stats)
# To each worker, send the main policy's up-to-date stats.
ray.get(
[
worker.set_main_obs_stats.remote(
main_policy.get_main_obs_stats()
)
for worker in WORKERS
]
)
def main():
# This is the main function.
# The main evolution procedure will be defined here.
# Make a linear policy.
policy = LinearPolicy(
env_name=ENV_NAME, # Name of the environment
observation_normalization=True
)
# Prepare the workers
prepare_workers(policy, DECREASE_REWARDS_BY)
# Initial solution
x0 = np.zeros(policy.get_parameters_count(), dtype='float32')
# The following are the Walker2d-v2 hyperparameters used in the paper:
# ClipUp: A Simple and Powerful Optimizer for Distribution-based
# Policy Evolution
N = policy.get_parameters_count()
max_speed = 0.015
center_learning_rate = max_speed / 2.0
radius = max_speed * 15
# Compute the stdev_init from the radius:
stdev_init = np.sqrt((radius ** 2) / N)
popsize = 100
popsize_max = 800
# Below we initialize our PGPE solver.
pgpe = PGPE(
solution_length=N,
popsize=popsize,
popsize_max=popsize_max,
num_interactions=int(popsize * 1000 * (3 / 4)),
center_init=x0,
center_learning_rate=center_learning_rate,
optimizer='clipup',
optimizer_config={'max_speed': max_speed},
stdev_init=stdev_init,
stdev_learning_rate=0.1,
stdev_max_change=0.2,
solution_ranking=True,
dtype='float32'
)
num_iterations = 500
# The main loop of the evolutionary computation
for i in range(1, 1 + num_iterations):
total_episodes = 0
while True:
# Get the solutions from the pgpe solver
solutions = pgpe.ask()
# Evaluate the solutions in parallel and get the fitnesses
fitnesses, num_interactions = evaluate_solutions(solutions)
sync_obs_stats(policy)
# Send the pgpe solver the received fitnesses
iteration_finished = pgpe.tell(fitnesses, num_interactions)
total_episodes += len(fitnesses)
if iteration_finished:
break
print(
"Iteration:", i,
" median score:", np.median(fitnesses),
" num.episodes:", total_episodes
)
print("Visualizing the center solution...")
# Get the center solution
center_solution = pgpe.center.copy()
# Make the gym environment for visualizing the center solution
env = gym.make(ENV_NAME)
# Load the center solution into the policy
policy.set_parameters(center_solution)
# Save the policy into a pickle file
with open(__file__ + '.pickle', 'wb') as f:
pickle.dump(policy, f)
# Convert the policy to a PyTorch module
net = to_torch_module(policy)
while True:
print("Please choose: 1> Visualize the agent 2> Quit")
response = input(">>")
if response == '1':
cumulative_reward = 0.0
# Reset the environment, and get the observation of the initial
# state into a variable.
observation = env.reset()
# Visualize the initial state
env.render()
# Main loop of the trajectory
while True:
with torch.no_grad():
action = net(
torch.as_tensor(observation, dtype=torch.float32)
).numpy()
if isinstance(env.action_space, gym.spaces.Box):
interaction = action
elif isinstance(env.action_space, gym.spaces.Discrete):
interaction = int(np.argmax(action))
else:
assert False, "Unknown action space"
observation, reward, done, info = env.step(interaction)
env.render()
cumulative_reward += reward
if done:
break
print("cumulative_reward", cumulative_reward)
elif response == '2':
break
else:
print('Unrecognized response:', repr(response))
if __name__ == "__main__":
main()
| 2.9375
| 3
|
util/__init__.py
|
Ovakefali13/buerro
| 2
|
12785043
|
from .singleton import Singleton
from .to_html import link_to_html, list_to_html, dict_to_html, table_to_html
| 1.1875
| 1
|
logscanner.py
|
thiscodedbox/TerrariaPythonEmail
| 0
|
12785044
|
# ThisCodedBox Terraria notifier python script
# last modified 26-MAY-2017
# time for pauses
# smtplib for notifications
# random to add that random string
import time
import smtplib
import random, string
#setup the random, random, random maker
def randomword(length):
return ''.join(random.choice(string.lowercase) for i in range(length))
# Reset the variables (maybe not needed)
oldAmountOfLines = 0
numNewLines = 0
emailNoFlood = 0
# Pull initial figures, set this to the default
# configuration to prevent false positives on server
# resarts where the file wasn't deleted and was appended
# or when the python script starts after server start
# basically, we don't want it trying to send emails for
# all previous join events on startup
fileHandle = open ( r"c:\tshock\logs\tlivelog.txt" )
lineList = fileHandle.readlines()
fileHandle.close()
# Set the balance at zero to start so it doesn't go back in time
oldAmountOfLines = len(lineList)
print "Ready and waiting for Joins..."
# loop it forever and ever, truth is forever.
while True:
# don't let email flood variable get too low
# just in case the number gets OOC
if emailNoFlood < 0:
emailNoFlood = 0
# Pull the log file, get the lines
fileHandle = open ( r"c:\tshock\logs\tlivelog.txt" )
lineList = fileHandle.readlines()
fileHandle.close()
# make variable that that contains how many new lines there are
numNewLines = (len(lineList) - oldAmountOfLines)
# loop through the new lines to search for joins
# keep in mind this won't even run if there are
# no new lines so that is pretty efficient
while numNewLines > 0:
# now grab the first new log line (string) as lineItem
lineItem = lineList[len(lineList) - (numNewLines) ]
# check for "has joined." text
if "has joined." in lineItem:
# if it's there, print the line, and begin the email
# process provided you haven't sent an email in an
# 30m or so (1800 seconds, see counter at end)
print lineList[len(lineList) - (numNewLines) ]
if emailNoFlood < 1:
print "EMAIL(s) TO BE SENT"
fromaddr = '<EMAIL>'
toaddrs = '<EMAIL>','<EMAIL>'
# All of this email is boilerplate, save for the
# "randomword" which prevents threading on gmail
# et al by adding a random string to the subject line
msg = 'From: yourserver@yourhost.com\nSubject:'+lineItem+' Notification:['+randomword(10)+']\n\n'+lineItem
user = 'smtpuser'
password = '<PASSWORD>'
server = smtplib.SMTP_SSL('smtp.yoursmtp.com:465')
server.login(user,password)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
print "and ... SENT!"
# OK, IF you sent an email, don't send one for 30 minutes now
# important to not set this to 1800 if no email was sent,
# hence the else statement and this remains in the if statement.
emailNoFlood = 1800
else:
print "***Email sent less than 30m ago, no email sent***"
# we're back in the original while statement, sequence the numNewLines
# down one, and run it again (or break out if we're through with new lines)
numNewLines -= 1
# maybe this doesn't need to be in the while loop, not sure, but it works.
oldAmountOfLines = len(lineList)
# The sleep command allows this to cycle approximately once per
# second, so subtrack 1 from the emailNoFlood variable every time
# it runs through to allow it to countdown.
emailNoFlood -= 1
time.sleep(1)
| 2.625
| 3
|
src/rosthrottle/tests/throttledListener.py
|
UTNuclearRoboticsPublic/rosthrottle
| 3
|
12785045
|
import rospy
from std_msgs.msg import String
def callback(data):
rospy.loginfo(rospy.get_caller_id() + "I heard %s", data.data)
def listener():
rospy.init_node('throttled_listener')
rospy.Subscriber('chatter_message_throttled', String, callback)
rospy.Subscriber('chatter_bandwidth_throttled', String, callback)
rospy.spin()
if __name__ == '__main__':
listener()
| 2.25
| 2
|
tests/serializers/test_snippet.py
|
juliofelipe/snippets
| 1
|
12785046
|
<filename>tests/serializers/test_snippet.py
import json
import uuid
from src.domain.snippet import Snippet
from src.serializers.snippet import SnippetJsonEncoder
def test_serializers_domain_snippet():
code = uuid.uuid4()
snippet = Snippet(
code=code,
language="Python",
title="Replace to join",
description="''.join(data)",
created_at="2021-11-13"
)
expected_json = f"""
{{
"code": "{code}",
"language": "Python",
"title": "Replace to join",
"description": "''.join(data)",
"created_at": "2021-11-13"
}}
"""
json_snippet = json.dumps(snippet, cls=SnippetJsonEncoder)
assert json.loads(json_snippet) == json.loads(expected_json)
| 2.515625
| 3
|
MedString.py
|
soorajbhatia/bioinformatics
| 0
|
12785047
|
<reponame>soorajbhatia/bioinformatics
from itertools import permutations as p
import itertools
with open('data.txt', 'r') as file:
k = int(file.readline())
Dna = file.readlines()
#creates a list of all possible kmers
def HammingDistance(Text,Text2):
#defines variables to be used
n = len(Text)
k = 1
j=0
#goes through both sequences and increments j by one every time there's a mismatch
for i in range(n):
MatchPattern1 = Text[i:i + k]
MatchPattern2 = Text2[i:i + k]
if MatchPattern1 != MatchPattern2:
j = j + 1
#prints number of mismatches
return(j)
# Function which returns subset or r length from n
def perm(n, seq):
listy=[]
for p in itertools.product(seq, repeat=n):
listy.append("".join(p))
return(listy)
#Finds median string
def MedianString(Dna, k):
final=[]
kmers = perm(k, "ATCG")
for kmer in kmers:
tosum=[]
for string in Dna:
minham=[]
for i in range(len(string) - k + 1):
pattern = string[i:i + k]
Text = pattern
minham.append(HammingDistance(kmer,Text))
tosum.append(min(minham))
final.append(sum(tosum))
index = final.index(min(final))
return(kmers[index])
print(MedianString(Dna,k))
| 3.75
| 4
|
misc/python/btn.py
|
Kolkir/orange-pi
| 1
|
12785048
|
<reponame>Kolkir/orange-pi<gh_stars>1-10
#!/usr/bin/env python
import os
import sys
import subprocess
if not os.getegid() == 0:
sys.exit('Script must be run as root')
from time import sleep
from subprocess import *
from pyA20.gpio import gpio
from pyA20.gpio import connector
from pyA20.gpio import port
button_shutdown = port.PA2 #PIN 20
"""Init gpio module"""
gpio.init()
"""Set directions"""
gpio.setcfg(button_shutdown, gpio.INPUT)
"""Enable pullup resistor"""
gpio.pullup(button_shutdown, gpio.PULLUP)
def delay_milliseconds(milliseconds):
seconds = milliseconds / float(1000)
sleep(seconds)
def run_cmd(cmd):
p = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT)
output = p.communicate()[0]
return output
shutdown_pressed = 1
while True:
value_out = 0
shutdown_pressed = gpio.input(button_shutdown)
if (shutdown_pressed == 0):
value_out = 1
run_cmd("shutdown now")
delay_milliseconds(100)
| 2.5
| 2
|
.archived/snakecode/other/dogfood.py
|
gearbird/calgo
| 4
|
12785049
|
<reponame>gearbird/calgo
def feedDog(sp: list[str], k: int) -> int:
feedCount = 0
nextDog = 0
for i, v in enumerate(sp):
if v != 'F': continue
while nextDog < min(i+k+1, len(sp)):
if sp[nextDog] != 'D':
nextDog += 1
continue
if abs(nextDog-i) <= k: break
nextDog += 1
if nextDog == len(sp): return feedCount
if sp[nextDog] == 'D' and abs(nextDog-i) <= k:
feedCount += 1
nextDog += 1
return feedCount
def test():
sp = ['D', 'D', 'F', 'F', 'D']
sp = ['D', 'D', 'F', 'F', 'D', 'D', 'D', 'F', 'F', 'D', 'D']
# sp = ['D', 'D', 'D', 'F', 'F', 'F']
# sp = ['D', 'D', 'D', 'D', 'D', 'D']
# sp = ['F', 'F', 'F', 'D', 'F', 'F']
print(feedDog(sp, 1))
test()
| 2.875
| 3
|
cme/enum/wmiquery.py
|
padfoot999/CrackMapExec
| 2
|
12785050
|
#!/usr/bin/python
# Copyright (c) 2003-2015 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Description: [MS-WMI] example. It allows to issue WQL queries and
# get description of the objects.
#
# e.g.: select name from win32_account
# e.g.: describe win32_process
#
# Author:
# <NAME> (@agsolino)
#
# Reference for:
# DCOM
#
import logging
import traceback
from impacket.dcerpc.v5.dtypes import NULL
from impacket.dcerpc.v5.dcom import wmi
from impacket.dcerpc.v5.dcomrt import DCOMConnection
class WMIQUERY:
def __init__(self, connection):
self.__logger = connection.logger
self.__addr = connection.host
self.__username = connection.username
self.__password = connection.password
self.__hash = connection.hash
self.__domain = connection.domain
self.__namespace = connection.args.wmi_namespace
self.__query = connection.args.wmi
self.__iWbemServices = None
self.__doKerberos = False
self.__aesKey = None
self.__oxidResolver = True
self.__lmhash = ''
self.__nthash = ''
if self.__hash is not None:
self.__lmhash, self.__nthash = self.__hash.split(':')
if self.__password is None:
self.__password = ''
self.__dcom = DCOMConnection(self.__addr, self.__username, self.__password, self.__domain,
self.__lmhash, self.__nthash, self.__aesKey, self.__oxidResolver, self.__doKerberos)
try:
iInterface = self.__dcom.CoCreateInstanceEx(wmi.CLSID_WbemLevel1Login,wmi.IID_IWbemLevel1Login)
iWbemLevel1Login = wmi.IWbemLevel1Login(iInterface)
self.__iWbemServices= iWbemLevel1Login.NTLMLogin(self.__namespace, NULL, NULL)
iWbemLevel1Login.RemRelease()
except Exception as e:
self.__logger.error(e)
def query(self):
query = self.__query.strip('\n')
if query[-1:] == ';':
query = query[:-1]
if self.__iWbemServices:
iEnumWbemClassObject = self.__iWbemServices.ExecQuery(query.strip('\n'))
self.__logger.success('Executed specified WMI query')
self.printReply(iEnumWbemClassObject)
iEnumWbemClassObject.RemRelease()
self.__iWbemServices.RemRelease()
self.__dcom.disconnect()
def describe(self, sClass):
sClass = sClass.strip('\n')
if sClass[-1:] == ';':
sClass = sClass[:-1]
try:
iObject, _ = self.iWbemServices.GetObject(sClass)
iObject.printInformation()
iObject.RemRelease()
except Exception as e:
traceback.print_exc()
def printReply(self, iEnum):
printHeader = True
while True:
try:
pEnum = iEnum.Next(0xffffffff,1)[0]
record = pEnum.getProperties()
line = []
for rec in record:
line.append('{}: {}'.format(rec, record[rec]['value']))
self.__logger.highlight(' | '.join(line))
except Exception, e:
#import traceback
#print traceback.print_exc()
if str(e).find('S_FALSE') < 0:
raise
else:
break
iEnum.RemRelease()
| 2.265625
| 2
|