max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
ISMLnextGen/dynamicCoro.py
|
Ravenclaw-OIer/ISML_auto_voter
| 128
|
12781851
|
import asyncio
from threading import Thread
async def production_task():
i = 0
while 1:
# 将consumption这个协程每秒注册一个到运行在线程中的循环,thread_loop每秒会获得一个一直打印i的无限循环任务
asyncio.run_coroutine_threadsafe(consumption(i),
thread_loop) # 注意:run_coroutine_threadsafe 这个方法只能用在运行在线程中的循环事件使用
await asyncio.sleep(2) # 必须加await
i += 1
async def consumption(i):
while True:
print("我是第{}任务".format(i))
await asyncio.sleep(1)
def start_loop(loop):
# 运行事件循环, loop以参数的形式传递进来运行
asyncio.set_event_loop(loop)
loop.run_forever()
#消费者循环
thread_loop = asyncio.new_event_loop() # 获取一个事件循环
run_loop_thread = Thread(target=start_loop, args=(thread_loop,)) # 将次事件循环运行在一个线程中,防止阻塞当前主线程
run_loop_thread.start() # 运行线程,同时协程事件循环也会运行
#生产者循环
advocate_loop = asyncio.get_event_loop() # 将生产任务的协程注册到这个循环中
advocate_loop.run_until_complete(production_task()) # 运行次循环
| 3.4375
| 3
|
__init__.py
|
JHP4911/SLAM-on-Raspberry-Pi
| 5
|
12781852
|
'''
This file is a modification of the file below to enable map save
https://github.com/simondlevy/PyRoboViz/blob/master/roboviz/__init__.py
roboviz.py - Python classes for displaying maps and robots
Requires: numpy, matplotlib
Copyright (C) 2018 <NAME>
This file is part of PyRoboViz.
PyRoboViz is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
PyRoboViz is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
'''
# Essential imports
import matplotlib.pyplot as plt
import matplotlib.cm as colormap
import matplotlib.lines as mlines
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import datetime
# This helps with Raspberry Pi
import matplotlib
matplotlib.use('TkAgg')
class Visualizer(object):
# Robot display params
ROBOT_HEIGHT_M = 0.5
ROBOT_WIDTH_M = 0.3
def __init__(self, map_size_pixels, map_size_meters, title, show_trajectory=False, zero_angle=0):
# Put origin in center
self._init(map_size_pixels, map_size_meters, title, -map_size_pixels / 2, show_trajectory, zero_angle)
def display(self, x_m, y_m, theta_deg):
self._setPose(x_m, y_m, theta_deg)
return self._refresh()
def _init(self, map_size_pixels, map_size_meters, title, shift, show_trajectory=False, zero_angle=0):
# Store constants for update
map_size_meters = map_size_meters
self.map_size_pixels = map_size_pixels
self.map_scale_meters_per_pixel = map_size_meters / float(map_size_pixels)
# Create a byte array to display the map with a color overlay
self.bgrbytes = bytearray(map_size_pixels * map_size_pixels * 3)
# Make a nice big (10"x10") figure
fig = plt.figure(figsize=(10,10), facecolor="white")
fig.set_facecolor("white")
# Added this line to make sure the map background is white
plt.rcParams['figure.facecolor'] = 'white'
# Store Python ID of figure to detect window close
self.figid = id(fig)
fig.canvas.set_window_title('SLAM')
plt.title(title)
# Use an "artist" to speed up map drawing
self.img_artist = None
# No vehicle to show yet
self.vehicle = None
# Create axes
self.ax = fig.gca()
self.ax.set_xlabel('X (m)')
self.ax.set_ylabel('Y (m)')
# self.ax.grid(False)
# Hence we must relabel the axis ticks to show millimeters
ticks = np.arange(shift,self.map_size_pixels+shift+100,100)
labels = [str(self.map_scale_meters_per_pixel * tick) for tick in ticks]
self.ax.set_xticklabels(labels)
self.ax.set_yticklabels(labels)
self.ax.set_facecolor('w')
# Store previous position for trajectory
self.prevpos = None
self.showtraj = show_trajectory
# We base the axis on pixels, to support displaying the map
self.ax.set_xlim([shift, self.map_size_pixels+shift])
self.ax.set_ylim([shift, self.map_size_pixels+shift])
# Set up default shift for centering at origin
shift = -self.map_size_pixels / 2
# print("shift = " + str(shift))
self.zero_angle = zero_angle
self.start_angle = None
self.rotate_angle = 0
def _setPose(self, x_m, y_m, theta_deg):
'''
Sets vehicle pose:
X: left/right (m)
Y: forward/back (m)
theta: rotation (degrees)
'''
# If zero-angle was indicated, grab first angle to compute rotation
if self.start_angle is None and self.zero_angle != 0:
self.start_angle = theta_deg
self.rotate_angle = self.zero_angle - self.start_angle
# Rotate by computed angle, or zero if no zero-angle indicated
d = self.rotate_angle
a = np.radians(d)
c = np.cos(a)
s = np.sin(a)
x_m,y_m = x_m*c-y_m*s, y_m*c+x_m*s
# Erase previous vehicle image after first iteration
if not self.vehicle is None:
self.vehicle.remove()
# Use a very short arrow shaft to orient the head of the arrow
theta_rad = np.radians(theta_deg+d)
c = np.cos(theta_rad)
s = np.sin(theta_rad)
l = 0.1
dx = l * c
dy = l * s
s = self.map_scale_meters_per_pixel
self.vehicle=self.ax.arrow(x_m/s, y_m/s,
dx, dy, head_width=Visualizer.ROBOT_WIDTH_M/s,
head_length=Visualizer.ROBOT_HEIGHT_M/s, fc='r', ec='r')
# Show trajectory if indicated
currpos = self._m2pix(x_m,y_m)
if self.showtraj and not self.prevpos is None:
if (self.prevpos[0] != 0 and self.prevpos[1] != 0):
self.ax.add_line(mlines.Line2D((self.prevpos[0],currpos[0]), (self.prevpos[1],currpos[1])))
self.prevpos = currpos
def _refresh(self):
# If we have a new figure, something went wrong (closing figure failed)
if self.figid != id(plt.gcf()):
return False
# Added this line to make sure the map background is white
plt.rcParams['figure.facecolor'] = 'white'
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['savefig.facecolor'] = 'white'
# Redraw current objects without blocking
plt.draw()
now = datetime.datetime.now()
# Create a directory named 'gif' inside the base directory
plt.savefig('gif/slamMap' + '- ' + str(now.hour).zfill(2) + '- ' + str(now.minute).zfill(2) + '- ' + str(now.second).zfill(2) + '.png')
# Refresh display, setting flag on window close or keyboard interrupt
try:
plt.pause(.01) # Arbitrary pause to force redraw
return True
except:
return False
return True
def _m2pix(self, x_m, y_m):
s = self.map_scale_meters_per_pixel
return x_m/s, y_m/s
class MapVisualizer(Visualizer):
def __init__(self, map_size_pixels, map_size_meters, title='MapVisualizer', show_trajectory=False):
# Put origin in lower left; disallow zero-angle setting
Visualizer._init(self, map_size_pixels, map_size_meters, title, 0, show_trajectory, 0)
def display(self, x_m, y_m, theta_deg, mapbytes):
self._setPose(x_m, y_m, theta_deg)
mapimg = np.reshape(np.frombuffer(mapbytes, dtype=np.uint8), (self.map_size_pixels, self.map_size_pixels))
# Pause to allow display to refresh
plt.pause(.001)
if self.img_artist is None:
self.img_artist = self.ax.imshow(mapimg, cmap=colormap.gray)
else:
self.img_artist.set_data(mapimg)
return self._refresh()
| 2.65625
| 3
|
code/nbs/reco-tut-asr-99-10-metrics-calculation.py
|
sparsh-ai/reco-tut-asr
| 0
|
12781853
|
#!/usr/bin/env python
# coding: utf-8
# # Import libraries and data
#
# Dataset was obtained in the capstone project description (direct link [here](https://d3c33hcgiwev3.cloudfront.net/_429455574e396743d399f3093a3cc23b_capstone.zip?Expires=1530403200&Signature=FECzbTVo6TH7aRh7dXXmrASucl~Cy5mlO94P7o0UXygd13S~Afi38FqCD7g9BOLsNExNB0go0aGkYPtodekxCGblpc3I~R8TCtWRrys~2gciwuJLGiRp4CfNtfp08sFvY9NENaRb6WE2H4jFsAo2Z2IbXV~llOJelI3k-9Waj~M_&Key-Pair-Id=<KEY>)) and splited manually in separated csv files. They were stored at my personal github account (folder link [here](https://github.com/caiomiyashiro/RecommenderSystemsNotebooks/tree/master/data/capstone)) and you can download and paste inside your working directory in order for this notebook to run.
# In[1]:
import pandas as pd
import numpy as np
# ## Preprocess data
#
# Float data came with ',' in the csv and python works with '.', so it treated the number as text. In order to convert them to numbers, I first replaced all the commas by punct and then converted the columns to float.
# In[2]:
items = pd.read_csv('data/capstone/Capstone Data - Office Products - Items.csv', index_col=0)
actual_ratings = pd.read_csv('data/capstone/Capstone Data - Office Products - Ratings.csv', index_col=0)
content_based = pd.read_csv('data/capstone/Capstone Data - Office Products - CBF.csv', index_col=0)
user_user = pd.read_csv('data/capstone/Capstone Data - Office Products - User-User.csv', index_col=0)
item_item = pd.read_csv('data/capstone/Capstone Data - Office Products - Item-Item.csv', index_col=0)
matrix_fact = pd.read_csv('data/capstone/Capstone Data - Office Products - MF.csv', index_col=0)
pers_bias = pd.read_csv('data/capstone/Capstone Data - Office Products - PersBias.csv', index_col=0)
items[['Availability','Price']] = items[['Availability','Price']].apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float)
# preprocess
content_based = content_based.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float)
user_user = user_user.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float)
item_item = item_item.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float)
matrix_fact = matrix_fact.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float)
pers_bias = pers_bias.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float)
print('items.shape = ' + str(items.shape))
print('actual_ratings.shape = ' + str(actual_ratings.shape))
print('content_based.shape = ' + str(content_based.shape))
print('user_user.shape = ' + str(user_user.shape))
print('item_item.shape = ' + str(item_item.shape))
print('matrix_fact.shape = ' + str(matrix_fact.shape))
print('pers_bias.shape = ' + str(pers_bias.shape))
actual_ratings.head()
# # Class RecommenderEvaluator
#
# In order to become easier to evaluate the metrics, I created a class that receives all the original ratings and predicted ratings for every recommender system and defined functions to extract all the metrics established in section 1 of the capstone report. Lets take a look at a summary of the class before looking at the code:
# - **Constructor (init)**: receive all recommendation algorithms, besides the actual rating list and the list of items. All data is contained in the data downloaded from Coursera. Besides storing all recommendation algorithms, the constructor also calculate the 20 most frequent items, which is used in the popularity metric calculation.
#
# - **get_observed_ratings**: as the ratings matrix is sparse, this method only returns the items a user with id userId has purchased.
#
# - **get_top_n**: by ordering all the predicted ratings for each recommendation algorithm, we can extract what would be their 'top' recommendation for a given user. Given a parameter $n$, we can then return all the top $n$ recommendations for all the recommendation algorithms.
#
# - **rmse**: by comparing the observed ratings a given user has given to an item and the predicted rating an algorithm has defined for a user, we can have an idea of how much error the algorithm is predicting the user's ratings. Here we don't work with lists, as usually each user has rated only a few amount of items. So here we get all the items the user has rated, recover these items from the algorithms' recommendations and them calculate the error.
#
# - **nDCG**: By looking at lists now, we can have an idea of how optimal the ranked lists are. By using the scoring factor defined in the report, we can calculate the overall DCG for the recommenders' lists and then normalise them using the concepts of the nDCG.
#
# - **Price and avalaibility diversity**: Diversity metric which evaluate how the recommended items' prices vary, *i.e.*, how is the standard deviation of the price. The higher, the better in this case. The same is for the availability index, but here, with higher standard deviations, it means the models are recommending items which are present and not present in local stores.
#
# - **Popularity**: A popular recommender tries to recommend items which has a high chance of being purchased. In the formulation of this metric, an item has a high chance of being purchased if lots of people have purchased them. In the class constructor, we take the observed ratings data and the item list and select which were the top $n$ (standard = 20) most purchased data. In a recommendation list, we return the ration of how many items were inside this list of top $n$ ones.
# In[3]:
class RecommenderEvaluator:
def __init__(self, items, actual_ratings, content_based, user_user, item_item, matrix_fact, pers_bias):
self.items = items
self.actual_ratings = actual_ratings
# static data containing the average score given by each user
self.average_rating_per_userid = actual_ratings.apply(lambda row: np.average(row[~np.isnan(row)]))
self.content_based = content_based
self.user_user = user_user
self.item_item = item_item
self.matrix_fact = matrix_fact
self.pers_bias = pers_bias
# aggregate list. Makes for loops among all recommenders' predictions easier
self.recommenders_list = [self.content_based, self.user_user, self.item_item, self.matrix_fact,self.pers_bias]
self.recommenders_list_names = ['content_based', 'user_user', 'item_item', 'matrix_fact','pers_bias']
# Used for item popularity metric.
# Calculate the 20 most popular items (item which most of the customers bought)
N_LIM = 20
perc_users_bought_item = self.actual_ratings.apply(lambda item: np.sum(~np.isnan(item)), axis=0)/actual_ratings.shape[1]
sort_pop_items = np.argsort(perc_users_bought_item)[::-1]
self.pop_items = perc_users_bought_item.iloc[sort_pop_items][:N_LIM].index.values.astype(np.int)
def get_observed_ratings(self, userId):
"""
Returns all the items a given user evaluated and their ratings. Used mainly by all the metrics calculation
:parameter: userId - user id
:return: array of rated items. Index is the item id and value is the item rating
"""
userId = str(userId)
filtered_ratings = self.actual_ratings[userId]
rated_items = filtered_ratings[~np.isnan(filtered_ratings)]
return rated_items
def get_top_n(self, userId, n):
"""
Get the top n recommendations for every recommender in the list given a user id
:parameter: userId - user id
:parameter: n - max number of recommendations to return
:return: dictionary where the key is the recommender's name and the value is an array of size n for the top n recommnendations.
"""
userId = str(userId)
predicted_ratings = dict()
for recommender, recommender_name in zip(self.recommenders_list,self.recommenders_list_names):
item_ids = recommender[userId].argsort().sort_values()[:n].index.values
predicted_ratings[recommender_name] = item_ids
return predicted_ratings
def rmse(self, userId):
"""
Root Mean Square Error of the predicted and observed values between the recommender's prediction and the actual ratings
:parameter: userId - user id
:return: dataframe of containing the rmse from all recommenders given user id
"""
userId = str(userId)
observed_ratings = self.get_observed_ratings(userId)
rmse_list = {'rmse': []}
for recommender in self.recommenders_list:
predicted_ratings = recommender.loc[observed_ratings.index, userId]
rmse_list['rmse'].append(np.sqrt(np.average((predicted_ratings - observed_ratings)**2)))
rmse_list = pd.DataFrame(rmse_list, index = self.recommenders_list_names)
return rmse_list
def nDCG(self, userId, top_n = 5, individual_recommendation = None):
"""
Normalised Discounted Cumulative Gain for all recommenders given user id
:parameter: userId - user id
:return: dataframe of containing the nDCG from all recommenders given user id
"""
ri = self.get_observed_ratings(userId)
if(individual_recommendation is None):
topn = self.get_top_n(userId,top_n)
results_pandas_index = self.recommenders_list_names
else:
topn = individual_recommendation
results_pandas_index = list(individual_recommendation.keys())
# 1st step: Given recommendations, transform list into scores (see score transcriptions in the capstone report)
scores_all = []
for name, item_list in topn.items():
scores = np.empty_like(item_list) # initialise 'random' array
scores[:] = -10 ###########################
# check which items returned by the recommender
is_already_rated = np.isin(item_list, ri.index.values) # the user already rated. Items users didn't rate
scores[~is_already_rated] = 0 # receive score = 0
for index, score in enumerate(scores):
if(score != 0): # for each recommended items the user rated
if(ri[item_list[index]] < self.average_rating_per_userid[userId] - 1): # score accordingly the report
scores[index] = -1
elif((ri[item_list[index]] >= self.average_rating_per_userid[userId] - 1) &
(ri[item_list[index]] < self.average_rating_per_userid[userId] + 0.5)):
scores[index] = 1
else:
scores[index] = 2
scores_all.append(scores) # append all the transformed scores
scores_all
# 2nd step: Given scores, calculate the model's DCG, ideal DCG and then nDCG
nDCG_all = dict()
for index_model, scores_model in enumerate(scores_all): # for each model
model_DCG = 0 # calculate model's DCG
for index, score in enumerate(scores_model): #
index_ = index + 1 #
model_DCG = model_DCG + score/np.log2(index_ + 1) #
ideal_rank_items = np.sort(scores_model)[::-1] # calculate model's ideal DCG
ideal_rank_DCG = 0 #
for index, ideal_score in enumerate(ideal_rank_items): #
index_ = index + 1 #
ideal_rank_DCG = ideal_rank_DCG + ideal_score/np.log2(index_ + 1) #
if((ideal_rank_DCG == 0) | (np.abs(ideal_rank_DCG) < np.abs(model_DCG))): # if nDCG is 0 or only negative scores came up
nDCG = 0
else: # calculate final nDCG when ideal DCG is != 0
nDCG = model_DCG/ideal_rank_DCG
nDCG_all[results_pandas_index[index_model]] = nDCG # save each model's nDCG in a dict
# convert it to dataframe
result_final = pd.DataFrame(nDCG_all, index=range(1)).transpose()
result_final.columns = ['nDCG']
return result_final
def price_diversity(self,userId,top_n = 5,individual_recommendation = None):
"""
Mean and standard deviation of the price of the top n products recommended by each algorithm.
Intuition for a high price wise diversity recommender is to have a high price standard deviation
:parameter: userId - user id
:return: dataframe of containing the price's mean and standard deviation from all recommenders given user id
"""
if(individual_recommendation is None):
topn = self.get_top_n(userId,top_n)
else:
topn = individual_recommendation
stats = pd.DataFrame()
for key, value in topn.items():
data_filtered = self.items.loc[topn[key]][['Price']].agg(['mean','std']).transpose()
data_filtered.index = [key]
stats = stats.append(data_filtered)
return stats
def availability_diversity(self,userId,top_n = 5,individual_recommendation = None):
"""
Mean and standard deviation of the availabity index of the top n products recommended by each algorithm.
Intuition for a high availabity diversity is to have a small mean value in the availabity index
:parameter: userId - user id
:return: dataframe of containing the availabity index's mean and standard deviation from all recommenders given user id
"""
if(individual_recommendation is None):
topn = self.get_top_n(userId,top_n)
else:
topn = individual_recommendation
stats = pd.DataFrame()
for key, value in topn.items():
data_filtered = self.items.loc[topn[key]][['Availability']].agg(['mean','std']).transpose()
data_filtered.index = [key]
stats = stats.append(data_filtered)
return stats
def popularity(self, userId,top_n = 5,individual_recommendation = None):
"""
Return the ratio of how many items of the top n items are among the most popular purchased items. Default is
the 20 most purchased items.
:parameter: userId - user id
:return: dataframe of containing ratio of popular items in the recommended list from all recommenders given user id
"""
if(individual_recommendation is None):
topn = self.get_top_n(userId,top_n)
results_pandas_index = self.recommenders_list_names
else:
topn = individual_recommendation
results_pandas_index = list(individual_recommendation.keys())
results = {'popularity': []}
for recommender, recommendations in topn.items():
popularity = np.sum(np.isin(recommendations,self.pop_items))
results['popularity'].append(popularity)
return pd.DataFrame(results,index = results_pandas_index)
def precision_at_n(self, userId, top_n = 5, individual_recommendation = None):
if(individual_recommendation is None):
topn = self.get_top_n(userId,top_n)
results_pandas_index = self.recommenders_list_names
else:
topn = individual_recommendation
results_pandas_index = list(individual_recommendation.keys())
observed_ratings = self.get_observed_ratings(userId).index.values
precisions = {'precision_at_'+str(top_n): []}
for recommender, recommendations in topn.items():
precisions['precision_at_'+str(top_n)].append(np.sum(np.isin(recommendations, observed_ratings))/top_n)
return pd.DataFrame(precisions,index = results_pandas_index)
# # Test methods:
#
# Just to have an idea of the output of each method, lets call all them with a test user. At the next section we will calculate these metrics for all users.
# In[4]:
userId = '64'
re = RecommenderEvaluator(items, actual_ratings, content_based, user_user, item_item, matrix_fact, pers_bias)
# ## Test RMSE
# In[5]:
re.rmse(userId)
# ## Test nDCG
# In[6]:
re.nDCG(userId)
# ## Test Diversity - Price and Availability
# In[7]:
re.price_diversity(userId)
# In[8]:
re.availability_diversity(userId)
# ## Test Popularity
# In[9]:
re.popularity(userId)
# ## Test Precision@N
# In[10]:
re.precision_at_n(userId)
# # Average metrics by all users
#
# Espefically for user 907, the recommendations from the user user came with all nulls (original dataset). This specifically impacted the RMSE calculation, as one Nan damaged the entire average calculation. So specifically for RMSE we did a separate calculation section. All the other metrics are going the be calculated in the next code block.
# In[11]:
re = RecommenderEvaluator(items, actual_ratings, content_based, user_user, item_item, matrix_fact, pers_bias)
i = 0
count = np.array([0,0,0,0,0])
for userId in actual_ratings.columns:
if(userId == '907'):
rmse_recommenders = re.rmse(userId).fillna(0)
else:
rmse_recommenders = re.rmse(userId)
count = count + rmse_recommenders['rmse']
# as we didn't use user 907 for user user, divide it by the number of users - 1
denominator = [len(actual_ratings.columns)] * 5
denominator[1] = len(actual_ratings.columns) - 1
print('Average RMSE for all users')
count/ denominator
# In[12]:
count_nDCG = np.array([0,0,0,0,0])
count_diversity_price = np.ndarray([5,2])
count_diversity_availability = np.ndarray([5,2])
count_popularity = np.array([0,0,0,0,0])
count_precision_at_5 = np.array([0,0,0,0,0])
for userId in actual_ratings.columns:
nDCG_recommenders = re.nDCG(userId)
count_nDCG = count_nDCG + nDCG_recommenders['nDCG']
diversity_price_recommenders = re.price_diversity(userId)
count_diversity_price = count_diversity_price + diversity_price_recommenders[['mean','std']]
diversity_availability_recommenders = re.availability_diversity(userId)
count_diversity_availability = count_diversity_availability + diversity_availability_recommenders[['mean','std']]
popularity_recommenders = re.popularity(userId)
count_popularity = count_popularity + popularity_recommenders['popularity']
precision_recommenders = re.precision_at_n(userId)
count_precision_at_5 = count_precision_at_5 + precision_recommenders['precision_at_5']
print('\n---')
print('Average nDCG')
print('---\n')
print(count_nDCG/len(actual_ratings.columns))
print('\n---')
print('Average Price - Diversity Measure')
print('---\n')
print(count_diversity_price/len(actual_ratings.columns))
print('\n---')
print('Average Availability - Diversity Measure')
print('---\n')
print(count_diversity_availability/len(actual_ratings.columns))
print('\n---')
print('Average Popularity')
print('---\n')
print(count_popularity/len(actual_ratings.columns))
print('---\n')
print('Average Precision@5')
print('---\n')
print(count_precision_at_5/len(actual_ratings.columns))
# # Final Analysis
#
# In terms of **RMSE**, the user-user collaborative filtering showed to be the most effective, despite it not being significantly better.
#
# For nDCG rank score, again user user and now item item collaborative filtering were the best.
#
# In terms of price diversity, the item item algorith was the most diverse, providing products varying ~32 dollars from the mean item price list. Matrix factorisation and user user follow right behind, with price standard deviation around 25 dollars. An interesting factor here was the *pers_bias* algorithm, as it recommended basically cheap products with a low standard deviation.
#
# For the availabity index, all the algorithms besides the user user managed to recommend items not so present in the local stores **together** with items present in local stores, as we can see they also provided items with availability index high (high standard deviation).
#
# In terms of popularity, no algorithm actually managed to obtain good scores in the way we defined. So, if the popularity is focused in the future, we can either change the popularity concept or improve mechanics in the recommender so it predict higher scores for the most popular items in the store.
#
# After this evaluation, it seemed to us that the item-item recommender system had an overall better performance, highlighted in terms of its diversity scores. Unfortunately, the items that item item recommender has suggested are in overall pricy, and we can check if there is any mixture possibility with the pers_bias algorithm, as it really indicated cheap prices and a low price standard deviation. Matrix factorization performed good as well but it didn't outperform any of the other recommenders.
# # Hibridization Techniques - Part III
#
# We are trying four different types of hibridization here.
#
# 1. Linear ensemble
# 2. Non linear ensemble
# 3. Top 1 from each recommender
# 4. Recommender switching
#
# The first two options approach the recommender's performance in terms of how good it predicts the users' ratings, so its only evaluation will be in terms of RMSE.
#
# The third approach have the intuition that, if we get the top 1 recommendation from each algorithm, the resulting 5 item list will have a better performance in terms of identyfing 'good' items to users. In this case, we defined the good items if the recommender suggested an already bought item for an user. Therefore, the final measurement of this hibridization mechanism is through the precision@5, as we end up with a 5 item list.
#
# The final mixing algorithm has the underlying theory of how collaborative filtering mechanisms perform with items that had not enough users/items in its calculations. As a well known weakness of these recommenders, the idea was to check how many items we would affect if we established a threshold of enough data in order for us to use a collaborative filtering. Otherwise, if the item doesn't have enough support in form of users' ratings we could have a support of a content based recommendation, or even, in last case, a non personalised one.
#
#
# ## Dataset Creation and User Sample Definition
#
# ### Dataset
#
# For the first and second approach, we need another perspective on the data. The dataset contains all the existing ratings from all users and concatenates all the predictions made the 5 traditional recommenders. The idea is to use the observed rating as target variable and all recommenders' predictions as dependent variable, *i.e.* treat this as a regression problems.
# In[13]:
obs_ratings_list = []
content_based_list = []
user_user_list = []
item_item_list = []
matrix_fact_list = []
pers_bias_list = []
re = RecommenderEvaluator(items, actual_ratings, content_based, user_user, item_item, matrix_fact, pers_bias)
for userId in actual_ratings.columns:
observed_ratings = re.get_observed_ratings(userId)
obs_ratings_list.extend(observed_ratings.values)
content_based_list.extend(content_based.loc[observed_ratings.index, userId].values)
user_user_list.extend(user_user.loc[observed_ratings.index, userId].values)
item_item_list.extend(item_item.loc[observed_ratings.index, userId].values)
matrix_fact_list.extend(matrix_fact.loc[observed_ratings.index, userId].values)
pers_bias_list.extend(pers_bias.loc[observed_ratings.index, userId].values)
dataset = pd.DataFrame({'rating': obs_ratings_list, 'content_based':content_based_list, 'user_user': user_user_list,
'item_item':item_item_list, 'matrix_fact':matrix_fact_list,'pers_bias':pers_bias_list})
dataset = dataset.dropna()
dataset.head()
# ### In order to have an idea of the results, let's choose 3 users randomly to show the predictions using the new hybrid models
# In[14]:
np.random.seed(42)
sample_users = np.random.choice(actual_ratings.columns, 3).astype(str)
print('sample_users: ' + str(sample_users))
# ### Get recommenders' predictions for sample users in order to create input for ensemble models (hybridization I and II)
# In[15]:
from collections import OrderedDict
df_sample = pd.DataFrame()
for user in sample_users:
content_based_ = re.content_based[user]
user_user_ = re.user_user[user]
item_item_ = re.item_item[user]
matrix_fact_ = re.matrix_fact[user]
pers_bias_ = re.pers_bias[user]
df_sample = df_sample.append(pd.DataFrame(OrderedDict({'user':user,'item':actual_ratings.index.values,'content_based':content_based_, 'user_user':user_user_, 'item_item':item_item_,
'matrix_fact':matrix_fact_,'pers_bias':pers_bias_})), ignore_index=True)
df_sample.head()
#
# ## Focus on Performance (RMSE) I - Linear Model
# In[16]:
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
linear = LinearRegression()
print('RMSE for linear ensemble of recommender systems:')
np.mean(cross_val_score(linear, dataset.drop('rating', axis=1), dataset['rating'], cv=5))
# ### Predictions for sample users: Creating top 5 recommendations for sample users
# In[17]:
pred_cols = ['content_based','user_user','item_item','matrix_fact','pers_bias']
predictions = linear.fit(dataset.drop('rating', axis=1), dataset['rating']).predict(df_sample[pred_cols])
recommendations = pd.DataFrame(OrderedDict({'user':df_sample['user'], 'item':df_sample['item'], 'predictions':predictions}))
recommendations.groupby('user').apply(lambda df_user : df_user.loc[df_user['predictions'].sort_values(ascending=False)[:5].index.values])
# ## Focus on Performance (RMSE) II - Emsemble
# In[18]:
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(random_state=42)
print('RMSE for non linear ensemble of recommender systems:')
np.mean(cross_val_score(rf, dataset.drop('rating', axis=1), dataset['rating'], cv=5))
# ### Predictions for sample users:
# In[19]:
predictions = rf.fit(dataset.drop('rating', axis=1), dataset['rating']).predict(df_sample[pred_cols])
recommendations = pd.DataFrame(OrderedDict({'user':df_sample['user'], 'item':df_sample['item'], 'predictions':predictions}))
recommendations.groupby('user').apply(lambda df_user : df_user.loc[df_user['predictions'].sort_values(ascending=False)[:5].index.values])
# ## Focus on Recommendations - Top 1 from each Recommender
#
# With the all top 1 recommender, we can evaluate its performance not just with RMSE, but all the list metrics we evaluated before. As a business constraint, we will also pay more attention to the *precision@5* metric, as a general information on how good is the recommender on providing suggestions that the user will buy, or already bought in this case.
# The majority of metrics were in the same scale as the best metrics in the all models comparison. However, it's good to highlight the the top 1 all recommender had the best *precision@5* metric among all recommender, showing to be a **good suitable hibridization mechanism**.
# In[20]:
count_nDCG = np.array([0])
count_diversity_price = np.ndarray([1,2])
count_diversity_availability = np.ndarray([1,2])
count_popularity = np.array([0])
count_precision = np.array([0])
for userId in actual_ratings.columns:
top_n_1 = re.get_top_n(userId,1)
user_items = {}
user_items['top_1_all'] = [a[0] for a in top_n_1.values()]
nDCG_recommenders = re.nDCG(userId, individual_recommendation = user_items)
count_nDCG = count_nDCG + nDCG_recommenders['nDCG']
diversity_price_recommenders = re.price_diversity(userId, individual_recommendation = user_items)
count_diversity_price = count_diversity_price + diversity_price_recommenders[['mean','std']]
diversity_availability_recommenders = re.availability_diversity(userId, individual_recommendation = user_items)
count_diversity_availability = count_diversity_availability + diversity_availability_recommenders[['mean','std']]
popularity_recommenders = re.popularity(userId, individual_recommendation = user_items)
count_popularity = count_popularity + popularity_recommenders['popularity']
precision_recommenders = re.precision_at_n(userId, individual_recommendation = user_items)
count_precision = count_precision + precision_recommenders['precision_at_5']
print('\n---')
print('Average nDCG')
print('---\n')
print(count_nDCG/len(actual_ratings.columns))
print('\n---')
print('Average Price - Diversity Measure')
print('---\n')
print(count_diversity_price/len(actual_ratings.columns))
print('\n---')
print('Average Availability - Diversity Measure')
print('---\n')
print(count_diversity_availability/len(actual_ratings.columns))
print('\n---')
print('Average Popularity')
print('---\n')
print(count_popularity/len(actual_ratings.columns))
print('\n---')
print('Average Precision@5')
print('---\n')
print(count_precision/len(actual_ratings.columns))
# ### Predictions for sample users:
# In[21]:
results = {}
for user_sample in sample_users:
results[user_sample] = [a[0] for a in list(re.get_top_n(user_sample, 1).values())]
results
# ## Focus on Recommendations - Switching algorithm
#
# ### Can we use a Content Based Recommender for items with less evaluations?
#
# We can see in the cumulative histogram that only around 20% of the rated items had 10 or more ratings. This signals us that maybe we can prioritize the use of a content based recommender or even a non personalised one for the majority of the items which don't have a sufficient amount of ratings in order to make the collaborative filtering algorithms to be stable.
# In[23]:
import matplotlib.pyplot as plt
item_nbr_ratings = actual_ratings.apply(lambda col: np.sum(~np.isnan(col)), axis=1)
item_max_nbr_ratings = item_nbr_ratings.max()
range_item_max_nbr_ratings = range(item_max_nbr_ratings+1)
plt.figure(figsize=(15,3))
plt.subplot(121)
nbr_ratings_items = []
for i in range_item_max_nbr_ratings:
nbr_ratings_items.append(len(item_nbr_ratings[item_nbr_ratings == i]))
plt.plot(nbr_ratings_items)
plt.xlabel('Number of ratings')
plt.ylabel('Amount of items')
plt.title('Histogram of amount of ratings')
plt.subplot(122)
cum_nbr_ratings_items = []
for i in range(len(nbr_ratings_items)):
cum_nbr_ratings_items.append(np.sum(nbr_ratings_items[:i]))
cum_nbr_ratings_items = np.array(cum_nbr_ratings_items)
plt.plot(cum_nbr_ratings_items/actual_ratings.shape[0])
plt.xlabel('Number of ratings')
plt.ylabel('Cumulative distribution')
plt.title('Cumulative histogram of amount of ratings');
# In[ ]:
| 3.109375
| 3
|
src/scmdata/__init__.py
|
chrisroadmap/scmdata
| 2
|
12781854
|
"""
scmdata, simple data handling for simple climate model data
"""
from ._version import get_versions # isort:skip
__version__ = get_versions()["version"]
del get_versions
from scmdata.run import ScmRun, run_append # noqa: F401, E402
| 1.335938
| 1
|
UNIQ/actquant.py
|
aqui-tna/darts-UNIQ
| 6
|
12781855
|
import torch.nn as nn
from UNIQ.quantize import act_quantize, act_noise, check_quantization
import torch.nn.functional as F
class ActQuant(nn.Module):
def __init__(self, quatize_during_training=False, noise_during_training=False, quant=False, noise=False,
bitwidth=32):
super(ActQuant, self).__init__()
self.quant = quant
self.noise = noise
self.bitwidth = bitwidth
self.quatize_during_training = quatize_during_training
self.noise_during_training = noise_during_training
def update_stage(self, quatize_during_training=False, noise_during_training=False):
self.quatize_during_training = quatize_during_training
self.noise_during_training = noise_during_training
def forward(self, input):
if self.quant and (not self.training or (self.training and self.quatize_during_training)):
assert (isinstance(self.bitwidth, int))
x = act_quantize.apply(input, self.bitwidth)
elif self.noise and self.training and self.noise_during_training:
assert (False)
x = act_noise.apply(input, bitwidth=self.bitwidth, training=self.training)
else:
x = F.relu(input)
# print('Activation is quantized to {} values'.format(check_quantization(x)))
return x
| 2.421875
| 2
|
src/crumblebundle/input/keycodes.py
|
Peilonrayz/crumblebundle
| 0
|
12781856
|
class _KeyCode:
__slots__ = ("_windows", "_unix")
raw = False
def __init__(self, windows, unix):
self._windows = windows
self._unix = unix
def __repr__(self):
return "_KeyCode(windows={self._windows!r}, unix={self._unix!r})".format(
self=self
)
class KeyCodes:
BACKSPACE = _KeyCode(windows="\x08", unix="")
TAB = _KeyCode(windows="\t", unix="")
ENTER = _KeyCode(windows="\r", unix="")
ESCAPE = _KeyCode(windows="\x1b", unix="")
SPACE = _KeyCode(windows=" ", unix="")
PAGE_UP = _KeyCode(windows="\xe0I", unix="")
PAGE_DOWN = _KeyCode(windows="\xe0Q", unix="")
END = _KeyCode(windows="\xe0O", unix="")
HOME = _KeyCode(windows="\xe0G", unix="")
LEFT_ARROW = _KeyCode(windows="\xe0K", unix="")
UP_ARROW = _KeyCode(windows="\xe0H", unix="")
RIGHT_ARROW = _KeyCode(windows="\xe0M", unix="")
DOWN_ARROW = _KeyCode(windows="\xe0P", unix="")
INSERT = _KeyCode(windows="\xe0R", unix="")
DELETE = _KeyCode(windows="\xe0S", unix="")
F1 = _KeyCode(windows="\x00;", unix="")
F2 = _KeyCode(windows="\x00<", unix="")
F3 = _KeyCode(windows="\x00=", unix="")
F4 = _KeyCode(windows="\x00>", unix="")
F5 = _KeyCode(windows="\x00?", unix="")
F6 = _KeyCode(windows="\x00@", unix="")
F7 = _KeyCode(windows="\x00A", unix="")
F8 = _KeyCode(windows="\x00B", unix="")
F9 = _KeyCode(windows="\x00C", unix="")
F10 = _KeyCode(windows="\x00D", unix="")
F11 = _KeyCode(windows="\xe0\x85", unix="")
F12 = _KeyCode(windows="\xe0\x86", unix="")
KEYPAD_0 = _KeyCode(windows="\x00R", unix="")
KEYPAD_1 = _KeyCode(windows="\x00O", unix="")
KEYPAD_2 = _KeyCode(windows="\x00P", unix="")
KEYPAD_3 = _KeyCode(windows="\x00Q", unix="")
KEYPAD_4 = _KeyCode(windows="\x00K", unix="")
# KEYPAD_5 = _KeyCode(windows='\x00', unix='')
KEYPAD_6 = _KeyCode(windows="\x00M", unix="")
KEYPAD_7 = _KeyCode(windows="\x00G", unix="")
KEYPAD_8 = _KeyCode(windows="\x00H", unix="")
KEYPAD_9 = _KeyCode(windows="\x00I", unix="")
KEYPAD_DOT = _KeyCode(windows="\x00S", unix="")
INTERRUPT = _KeyCode(windows="\x03", unix="")
class RawInput:
__slots__ = ("value",)
raw = True
def __init__(self, value):
self.value = value
| 3.203125
| 3
|
star_tides/services/databases/mongo/schemas/project_schema.py
|
STAR-TIDES/kb
| 2
|
12781857
|
<reponame>STAR-TIDES/kb
''' star_tides.services.databases.mongo.schemas.project_schema
'''
from marshmallow.utils import EXCLUDE
from marshmallow import Schema, fields
from marshmallow_enum import EnumField
from star_tides.services.databases.mongo.schemas import camelcase
from star_tides.services.databases.mongo.schemas.location_schema import LocationSchema
from star_tides.services.databases.mongo.schemas.engagement_schema import EngagementSchema
from star_tides.services.databases.mongo.models.project_status import ProjectStatus
class ProjectSchema(Schema):
'''ProjectSchema is the Marshmallow schema subclass for Projects.'''
name = fields.String(required=True)
location = fields.Nested(LocationSchema, required=True)
engagement = fields.Nested(EngagementSchema, required=True)
contacts = fields.List(fields.String(), required=False)
summary = fields.String(required=True)
grants = fields.List(fields.String(), required=False)
solution_costs = fields.String(required=False, missing=None)
# TODO(ljr): Add this field back in once the Model/MongoEngine bug is fixed.
# updates = fields.List(fields.Nested(UpdateSchema),
# required=False, missing=[])
notes = fields.String(required=False, missing=None)
status = EnumField(ProjectStatus, required=True)
class Meta:
unknown = EXCLUDE
def on_bind_field(self, field_name: str, field_obj: fields.Field) -> None:
field_obj.data_key = camelcase(field_obj.data_key or field_name)
| 2.015625
| 2
|
test/test_assertions.py
|
maxtremaine/sudoku_solver.py
| 0
|
12781858
|
<gh_stars>0
from unittest import TestCase
from src.assertions import is_sudoku_file, is_sudoku_string
class TestAssertions(TestCase):
def test_is_sudoku_file(self):
valid_file = '\n'.join([
' abc def ghi',
'1 7__|_4_|__1',
'2 __1|___|2__',
'3 _6_|2_9|_8_',
' -----------',
'4 __3|5_4|9__',
'5 1__|___|__4',
'6 __2|1_8|5__',
' -----------',
'7 _1_|9_6|_7_',
'8 __8|___|4__',
'9 6__|_2_|__8'])
invalid_file = '\n'.join([
' wbc def ghi',
'1 7__|_4_|__1',
'2 __1|___|2__',
'3 _6_|2_9|_8_',
' -----------',
'4 __3|5_4|9__',
'5 1__|___|__4',
'6 __2|1_8|5__',
' -----------',
'7 _1_|9_6|_7_',
'8 __8|___|4__',
'9 6__|_2_|__8 '])
self.assertTrue(is_sudoku_file(valid_file))
self.assertFalse(is_sudoku_file(invalid_file))
def test_is_sudoku_string(self):
valid_puzzle = '________1________2________3________4________5________6________7________8________9'
long_puzzle = '________1________2________3________4________5________6________7________8________9_'
improper_chars = '________1________2___x____3________4____a___5________6________7________8________9_'
self.assertTrue(is_sudoku_string(valid_puzzle))
self.assertFalse(is_sudoku_string(long_puzzle))
self.assertFalse(is_sudoku_string(improper_chars))
| 3.125
| 3
|
badwing/firework.py
|
kfields/badwing
| 3
|
12781859
|
import random
import arcade
from badwing.constants import *
from badwing.effect import Effect
from badwing.particle import AnimatedAlphaParticle
#TODO: Some of this will go up into ParticleEffect
class Firework(Effect):
def __init__(self, position=(0,0), r1=30, r2=40):
super().__init__(position)
self.radius = random.randint(r1, r2)
self.emitters = []
self.make_sparks(position)
def draw(self):
for e in self.emitters:
e.draw()
def update(self, delta_time):
# prevent list from being mutated (often by callbacks) while iterating over it
emitters_to_update = self.emitters.copy()
# update
for e in emitters_to_update:
e.update()
# remove emitters that can be reaped
to_del = [e for e in emitters_to_update if e.can_reap()]
for e in to_del:
self.emitters.remove(e)
def make_sparks(self, position):
spark_texture = random.choice(SPARK_TEXTURES)
sparks = arcade.Emitter(
center_xy=position,
emit_controller=arcade.EmitBurst(self.radius),
particle_factory=lambda emitter: AnimatedAlphaParticle(
filename_or_texture=spark_texture,
change_xy=arcade.rand_in_circle((0.0, 0.0), 9.0),
start_alpha=255,
duration1=random.uniform(0.6, 1.0),
mid_alpha=0,
duration2=random.uniform(0.1, 0.2),
end_alpha=255,
mutation_callback=firework_spark_mutator
)
)
self.emitters.append(sparks)
def firework_spark_mutator(particle: arcade.FadeParticle):
"""mutation_callback shared by all fireworks sparks"""
# gravity
particle.change_y += -0.03
# drag
particle.change_x *= 0.92
particle.change_y *= 0.92
| 2.734375
| 3
|
cutter_without_UI.py
|
Kameron2442/css-cutter
| 0
|
12781860
|
read_file = "YOUR_FILE.css" # The path of the file which will be cut
to_keep = ".navbar-" # The class name or prefix of class names that you want to keep
file = open(read_file, "r")
new_file = "" # String to hold the style rules that are kept
get_styles = 0 # Flag for if to_keep in found in a line
get_media = 0 # Flag for if a media rule is present
flag = 0 # Flag gets activated if a "}" has already been claimed by a previous if-statment for belonging to a style rule. In short, it checks if the "}" belongs to a media rule.
for line in file:
flag = 0
if get_styles == 1 and "}" not in line:
new_file += line
if to_keep in line:
new_file += line
get_styles = 1
if get_styles == 1 and "}" in line:
new_file += line
get_styles = 0
flag = 1
if to_keep not in line and "{" in line:
get_styles = -1
if get_styles == -1 and "}" in line:
get_styles = 0
flag = 1
if "media" in line:
new_file += line
get_media = 1
if get_media == 1 and "}" in line and get_styles == 0 and flag == 0:
new_file += line
get_media = 0
print(new_file)
file = open("output.css", "w")
file.write(new_file)
file.close()
| 3.703125
| 4
|
tf_quant_finance/math/qmc/__init__.py
|
slowy07/tf-quant-finance
| 3,138
|
12781861
|
# Lint as: python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RQMC support."""
from tf_quant_finance.math.qmc import utils
from tf_quant_finance.math.qmc.digital_net import digital_net_sample
from tf_quant_finance.math.qmc.digital_net import random_digital_shift
from tf_quant_finance.math.qmc.digital_net import random_scrambling_matrices
from tf_quant_finance.math.qmc.digital_net import scramble_generating_matrices
from tf_quant_finance.math.qmc.lattice_rule import lattice_rule_sample
from tf_quant_finance.math.qmc.lattice_rule import random_scrambling_vectors
from tf_quant_finance.math.qmc.sobol import sobol_generating_matrices
from tf_quant_finance.math.qmc.sobol import sobol_sample
from tensorflow.python.util.all_util import remove_undocumented # pylint: disable=g-direct-tensorflow-import
_allowed_symbols = [
'digital_net_sample',
'lattice_rule_sample',
'random_digital_shift',
'random_scrambling_matrices',
'random_scrambling_vectors',
'scramble_generating_matrices',
'sobol_generating_matrices',
'sobol_sample',
'utils',
]
remove_undocumented(__name__, _allowed_symbols)
| 1.609375
| 2
|
apex-html-logs.py
|
ktorstensson/apex-html-logs
| 0
|
12781862
|
<filename>apex-html-logs.py
#!/usr/bin/env python
# coding: utf-8
''' apex-html-logs.py
Script to summarise APEX html observing logs.
<EMAIL>
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import argparse
import numpy as np
import pandas as pd
import re
from os.path import expanduser
from getpass import getuser
from glob import glob
def read_one_log(filename):
'''Read a html obslog to pandas DataFrame
Parameters
----------
filename : string
obslog html file
Returns
-------
df : pandas.DataFrame
The obslog data
'''
print('Reading obslog:', filename)
df = pd.read_html(filename, header=0)[0]
df['UTC'] = pd.to_datetime(df.UTC, format='%Y-%m-%dU%H:%M:%S')
cancelled = df[df['Scan duration'] == -999]
df.loc[cancelled.index, 'Scan duration'] = 0
df['Scan duration'] = pd.to_timedelta(df['Scan duration'], unit='s')
df['mm PWV'] = df[['mm PWV']].astype(str)
df.drop(df[df.Source.isin(['PARK', 'ZENITH', 'RECYCLE', 'RECYCLING']) |
(df['mm PWV'] == 'Shutter closed')].index, inplace=True)
df['mm PWV'] = df[['mm PWV']].apply(pd.to_numeric)
return df
def get_line_name(string):
'''Get first white spaced delimited part of string'''
return string.split()[0]
def read_obslogs(dir=None):
'''Read APEX html obslogs
Parameters
----------
dir : string (optional)
Directory with html log files, defaults to ~/obslogs/
Returns
-------
df : pandas.DataFrame
The obslog data
'''
if dir is None:
dir = expanduser('~/obslogs/')
logs = glob(dir + '*.html')
print('')
df = read_one_log(logs[0])
for log in logs[1:]:
df = pd.concat([df, read_one_log(log)], axis=0)
df['Line'] = df['Mol. line'].apply(lambda x: x.split()[0])
df.rename(columns=(lambda x: re.sub('[().]', '', x)), inplace=True)
df.rename(columns=(lambda x: re.sub('[ -]', '_', x)), inplace=True)
df.rename(columns=(lambda x: x.lower()), inplace=True)
df.set_index('utc', inplace=True)
df.sort_index(inplace=True)
df.reset_index(inplace=True)
return df
def parse_inputs():
'''Parse optional catalogs and obslogs dir'''
parser = argparse.ArgumentParser(description='Summarises APEX html obslogs')
parser.add_argument('-s', '--source', type=str,
help='Source name')
args = parser.parse_args()
return args.source
def main():
source = parse_inputs()
df = read_obslogs(dir=None)
if source is None:
dfs = df.groupby(['scan_status', 'source', 'line'])[['scan_duration']].sum()
else:
dfs = df[df.source == source].groupby(['scan_status', 'line'])[['scan_duration']].sum()
print(dfs)
return df, dfs
if __name__ == '__main__':
df, dfs = main()
| 2.71875
| 3
|
app/api/v1/routes.py
|
antonnifo/StackOverflow-lite
| 8
|
12781863
|
"""all routes"""
from flask import Blueprint
from flask_restful import Api
from .questions.views import Questions, Question, UpdateTitle, UpdateQuestion
VERSION_UNO = Blueprint('api', __name__, url_prefix='/api/v1')
API = Api(VERSION_UNO)
API.add_resource(Questions, '/questions')
API.add_resource(Question, '/questions/<int:question_id>')
API.add_resource(UpdateTitle, '/questions/<int:question_id>/title')
API.add_resource(UpdateQuestion, '/questions/<int:question_id>/question')
| 2.296875
| 2
|
cwe_relation_cve/cwe_wrapper.py
|
pawlaczyk/sarenka_tools
| 3
|
12781864
|
<gh_stars>1-10
class CWEWrapper:
def __init__(self, cwe_id, description):
self.__cwe_id = cwe_id
self.__description = description
self.__cve_ids_list = []
def add_cve(self, cve_id):
self.__cve_ids_list.extend(cve_id)
@property
def cve_ids(self):
return self.__cve_ids_list
@property
def cwe_id(self):
return self.__cwe_id
@property
def description(self):
return self.__description
def __str__(self):
return f"CWE_ID: {self.cwe_id}\nDescription: {self.description}"
@property
def values(self):
return {"cwe_id": self.cwe_id, "description": self.description}
| 2.484375
| 2
|
bo/pp/pp_gp_my_distmat.py
|
ZachZhu7/banana-git
| 167
|
12781865
|
<gh_stars>100-1000
"""
Classes for GP models without any PP backend, using a given distance matrix.
"""
from argparse import Namespace
import time
import copy
import numpy as np
from scipy.spatial.distance import cdist
from bo.pp.pp_core import DiscPP
from bo.pp.gp.gp_utils import kern_exp_quad, kern_matern32, \
get_cholesky_decomp, solve_upper_triangular, solve_lower_triangular, \
sample_mvn, squared_euc_distmat, kern_distmat
from bo.util.print_utils import suppress_stdout_stderr
class MyGpDistmatPP(DiscPP):
""" GPs using a kernel specified by a given distance matrix, without any PP
backend """
def __init__(self, data=None, modelp=None, printFlag=True):
""" Constructor """
self.set_model_params(modelp)
self.set_data(data)
self.set_model()
super(MyGpDistmatPP,self).__init__()
if printFlag:
self.print_str()
def set_model_params(self, modelp):
""" Set self.modelp """
if modelp is None:
pass #TODO
self.modelp = modelp
def set_data(self, data):
""" Set self.data """
if data is None:
pass #TODO
self.data_init = copy.deepcopy(data)
self.data = copy.deepcopy(self.data_init)
def set_model(self):
""" Set GP regression model """
self.model = self.get_model()
def get_model(self):
""" Returns model object """
return None
def infer_post_and_update_samples(self, print_result=False):
""" Update self.sample_list """
self.sample_list = [Namespace(ls=self.modelp.kernp.ls,
alpha=self.modelp.kernp.alpha,
sigma=self.modelp.kernp.sigma)]
if print_result: self.print_inference_result()
def get_distmat(self, xmat1, xmat2):
""" Get distance matrix """
#return squared_euc_distmat(xmat1, xmat2, .5)
from data import Data
self.distmat = Data.generate_distance_matrix
#print('distmat')
#print(self.distmat(xmat1, xmat2, self.modelp.distance))
return self.distmat(xmat1, xmat2, self.modelp.distance)
def print_inference_result(self):
""" Print results of stan inference """
print('*ls pt est = '+str(self.sample_list[0].ls)+'.')
print('*alpha pt est = '+str(self.sample_list[0].alpha)+'.')
print('*sigma pt est = '+str(self.sample_list[0].sigma)+'.')
print('-----')
def sample_pp_post_pred(self, nsamp, input_list, full_cov=False):
""" Sample from posterior predictive of PP.
Inputs:
input_list - list of np arrays size=(-1,)
Returns:
list (len input_list) of np arrays (size=(nsamp,1))."""
samp = self.sample_list[0]
postmu, postcov = self.gp_post(self.data.X, self.data.y, input_list,
samp.ls, samp.alpha, samp.sigma, full_cov)
if full_cov:
ppred_list = list(sample_mvn(postmu, postcov, nsamp))
else:
ppred_list = list(np.random.normal(postmu.reshape(-1,),
postcov.reshape(-1,),
size=(nsamp, len(input_list))))
return list(np.stack(ppred_list).T), ppred_list
def sample_pp_pred(self, nsamp, input_list, lv=None):
""" Sample from predictive of PP for parameter lv.
Returns: list (len input_list) of np arrays (size (nsamp,1))."""
if lv is None:
lv = self.sample_list[0]
postmu, postcov = self.gp_post(self.data.X, self.data.y, input_list, lv.ls,
lv.alpha, lv.sigma)
pred_list = list(sample_mvn(postmu, postcov, 1)) ###TODO: sample from this mean nsamp times
return list(np.stack(pred_list).T), pred_list
def gp_post(self, x_train_list, y_train_arr, x_pred_list, ls, alpha, sigma,
full_cov=True):
""" Compute parameters of GP posterior """
kernel = lambda a, b, c, d: kern_distmat(a, b, c, d, self.get_distmat)
k11_nonoise = kernel(x_train_list, x_train_list, ls, alpha)
lmat = get_cholesky_decomp(k11_nonoise, sigma, 'try_first')
smat = solve_upper_triangular(lmat.T, solve_lower_triangular(lmat,
y_train_arr))
k21 = kernel(x_pred_list, x_train_list, ls, alpha)
mu2 = k21.dot(smat)
k22 = kernel(x_pred_list, x_pred_list, ls, alpha)
vmat = solve_lower_triangular(lmat, k21.T)
k2 = k22 - vmat.T.dot(vmat)
if full_cov is False:
k2 = np.sqrt(np.diag(k2))
return mu2, k2
# Utilities
def print_str(self):
""" Print a description string """
print('*MyGpDistmatPP with modelp='+str(self.modelp)+'.')
print('-----')
| 2.5
| 2
|
Practice/subsetprod.py
|
ashishjayamohan/competitive-programming
| 0
|
12781866
|
<gh_stars>0
T = int(input())
while T:
n = int(input())
a = list(map(int, input().split()))
a.sort()
negs0 = a[-5] * a[-4] * a[-3] * a[-2] * a[-1]
negs2 = a[0] * a[1] * a[-1] * a[-2] * a[-3]
negs4 = a[0] * a[1] * a[2] * a[3] * a[-1]
print(max(negs0, negs2, negs4))
T -= 1
| 2.265625
| 2
|
compiled/construct/position_in_seq.py
|
smarek/ci_targets
| 4
|
12781867
|
<reponame>smarek/ci_targets
from construct import *
from construct.lib import *
position_in_seq__header_obj = Struct(
'qty_numbers' / Int32ul,
)
position_in_seq = Struct(
'numbers' / Array(this.header.qty_numbers, Int8ub),
'header' / Pointer(16, LazyBound(lambda: position_in_seq__header_obj)),
)
_schema = position_in_seq
| 1.726563
| 2
|
02/checksum.py
|
redfast00/advent_of_code_2017
| 0
|
12781868
|
with open("input_2.txt") as spreadsheet:
total = 0
for line in spreadsheet.readlines():
numbers = line.split('\t')
numbers = [int(number) for number in numbers]
difference = max(numbers) - min(numbers)
total += difference
print(total)
def find_divisor(numbers):
for number in numbers:
for second in numbers:
if number % second == 0 and number != second:
print(f'{number},{second}')
return number / second
with open("input_2.txt") as spreadsheet:
total = 0
for line in spreadsheet.readlines():
numbers = line.split('\t')
numbers = [int(number) for number in numbers]
total += find_divisor(numbers)
print(total)
| 3.53125
| 4
|
train.py
|
SkyLord2/Yolo-v1-by-keras
| 0
|
12781869
|
<gh_stars>0
# -*- coding: utf-8 -*-
import tensorflow as tf
import tensorflow.keras as keras
import config as cfg
from YOLOv1_beta_1 import YOLOv1Net
from utils.pascal_voc import pascal_voc
# 需要首先下载数据集
pascal = pascal_voc('train')
def get_train_data_by_batch():
while 1:
for i in range(0, len(pascal.gt_labels), 64):
images, labels = pascal.get()
yield (images, labels)
def lr_scheduler(epoch):
lr = 1e-4
if(epoch <= 75):
lr = 1e-2
elif(75 < epoch and epoch <= 105):
lr = 1e-3
elif(105 < epoch and epoch <= 135):
lr = 1e-4
return lr
if __name__ == '__main__':
yolo = YOLOv1Net()
yolo.compile_model()
lr_schedule = tf.keras.callbacks.LearningRateScheduler(lr_scheduler)
modelcheck = keras.callbacks.ModelCheckpoint("weights_{epoch:03d}-{val_loss:.4f}.h5",
monitor='val_loss',
verbose=0,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
result = yolo.train_generator(get_train_data_by_batch(), steps_per_epoch=len(pascal.gt_labels) // 64,
epochs=135, callbacks=[lr_schedule, modelcheck])
| 2.5
| 2
|
Examples/PDFTool/MergeNew.py
|
wxh0000mm/TKinterDesigner
| 1
|
12781870
|
#coding=utf-8
#import libs
import MergeNew_cmd
import MergeNew_sty
import Fun
import os
import tkinter
from tkinter import *
import tkinter.ttk
import tkinter.font
#Add your Varial Here: (Keep This Line of comments)
#Define UI Class
class MergeNew:
def __init__(self,root,isTKroot = True):
uiName = self.__class__.__name__
Fun.Register(uiName,'UIClass',self)
self.root = root
style = MergeNew_sty.SetupStyle()
if isTKroot == True:
root.title("Form1")
Fun.CenterDlg(uiName,root,563,375)
root['background'] = '#efefef'
Form_1= tkinter.Canvas(root,width = 10,height = 4)
Form_1.place(x = 0,y = 0,width = 563,height = 375)
Form_1.configure(bg = "#efefef")
Form_1.configure(highlightthickness = 0)
Fun.Register(uiName,'root',root)
Fun.Register(uiName,'Form_1',Form_1)
#Create the elements of root
Button_2= tkinter.Button(root,text="打开文件夹",width = 10,height = 4)
Fun.Register(uiName,'Button_2',Button_2)
Button_2.place(x = 16,y = 15,width = 109,height = 35)
Button_2.configure(command=lambda:MergeNew_cmd.Button_2_onCommand(uiName,"Button_2"))
Button_2_Ft=tkinter.font.Font(family='System', size=12,weight='bold',slant='roman',underline=0,overstrike=0)
Button_2.configure(font = Button_2_Ft)
ListBox_3= tkinter.Listbox(root)
Fun.Register(uiName,'ListBox_3',ListBox_3)
ListBox_3.place(x = 16,y = 57,width = 210,height = 215)
Button_4= tkinter.Button(root,text=">",width = 10,height = 4)
Fun.Register(uiName,'Button_4',Button_4)
Button_4.place(x = 241,y = 86,width = 80,height = 28)
Button_4.configure(command=lambda:MergeNew_cmd.Button_4_onCommand(uiName,"Button_4"))
Button_4_Ft=tkinter.font.Font(family='System', size=12,weight='bold',slant='roman',underline=0,overstrike=0)
Button_4.configure(font = Button_4_Ft)
Button_5= tkinter.Button(root,text=">>",width = 10,height = 4)
Fun.Register(uiName,'Button_5',Button_5)
Button_5.place(x = 241,y = 132,width = 80,height = 28)
Button_5.configure(command=lambda:MergeNew_cmd.Button_5_onCommand(uiName,"Button_5"))
Button_5_Ft=tkinter.font.Font(family='System', size=12,weight='bold',slant='roman',underline=0,overstrike=0)
Button_5.configure(font = Button_5_Ft)
Button_6= tkinter.Button(root,text="<",width = 10,height = 4)
Fun.Register(uiName,'Button_6',Button_6)
Button_6.place(x = 241,y = 178,width = 80,height = 28)
Button_6.configure(command=lambda:MergeNew_cmd.Button_6_onCommand(uiName,"Button_6"))
Button_6_Ft=tkinter.font.Font(family='System', size=12,weight='bold',slant='roman',underline=0,overstrike=0)
Button_6.configure(font = Button_6_Ft)
Button_7= tkinter.Button(root,text="<<",width = 10,height = 4)
Fun.Register(uiName,'Button_7',Button_7)
Button_7.place(x = 241,y = 222,width = 80,height = 28)
Button_7.configure(command=lambda:MergeNew_cmd.Button_7_onCommand(uiName,"Button_7"))
Button_7_Ft=tkinter.font.Font(family='System', size=12,weight='bold',slant='roman',underline=0,overstrike=0)
Button_7.configure(font = Button_7_Ft)
ListBox_8= tkinter.Listbox(root)
Fun.Register(uiName,'ListBox_8',ListBox_8)
ListBox_8.place(x = 337,y = 59,width = 210,height = 215)
Entry_9_Variable = Fun.AddTKVariable(uiName,'Entry_9','')
Entry_9= tkinter.Entry(root,textvariable=Entry_9_Variable)
Fun.Register(uiName,'Entry_9',Entry_9)
Entry_9.place(x = 134,y = 293,width = 199,height = 34)
Entry_9.configure(relief = "sunken")
Label_10= tkinter.Label(root,text="合并后文件名",width = 10,height = 4)
Fun.Register(uiName,'Label_10',Label_10)
Label_10.place(x = 15,y = 298,width = 111,height = 24)
Label_10.configure(relief = "flat")
Label_10_Ft=tkinter.font.Font(family='System', size=12,weight='bold',slant='roman',underline=0,overstrike=0)
Label_10.configure(font = Label_10_Ft)
Button_11= tkinter.Button(root,text="合并",width = 10,height = 4)
Fun.Register(uiName,'Button_11',Button_11)
Button_11.place(x = 370,y = 292,width = 115,height = 36)
Button_11.configure(command=lambda:MergeNew_cmd.Button_11_onCommand(uiName,"Button_11"))
Button_11_Ft=tkinter.font.Font(family='System', size=12,weight='bold',slant='roman',underline=0,overstrike=0)
Button_11.configure(font = Button_11_Ft)
Label_12= tkinter.Label(root,text="需要合并的文件列表",width = 10,height = 4)
Fun.Register(uiName,'Label_12',Label_12)
Label_12.place(x = 341,y = 22,width = 205,height = 34)
Label_12.configure(relief = "flat")
Label_12_Ft=tkinter.font.Font(family='System', size=12,weight='bold',slant='roman',underline=0,overstrike=0)
Label_12.configure(font = Label_12_Ft)
#Inital all element's Data
Fun.InitElementData(uiName)
#Add Some Logic Code Here: (Keep This Line of comments)
#Create the root of Kinter
if __name__ == '__main__':
root = tkinter.Tk()
MyDlg = MergeNew(root)
root.mainloop()
| 2.984375
| 3
|
setup.py
|
m-vdb/sentry-scrapy
| 7
|
12781871
|
#!/usr/bin/env python
from setuptools import setup, find_packages
VERSION = '0.2'
with open('README.md') as readme:
long_description = readme.read()
setup(
name='sentry-scrapy',
version=VERSION,
description='Scrapy integration with Sentry SDK (unofficial)',
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
install_requires=['Scrapy', 'sentry-sdk'],
license="MIT",
keywords="sentry scrapy sdk integration",
url='https://github.com/m-vdb/sentry-scrapy',
download_url='https://github.com/m-vdb/sentry-scrapy/archive/v{}.tar.gz'.format(VERSION),
project_urls={
"Source Code": "https://github.com/m-vdb/sentry-scrapy",
}
)
| 1.289063
| 1
|
common/appenginepatch/ragendja/auth/models.py
|
westurner/nhs-social-web
| 2
|
12781872
|
<reponame>westurner/nhs-social-web<gh_stars>1-10
from django.contrib.auth.models import *
from django.contrib.auth.models import DjangoCompatibleUser as User
| 1.257813
| 1
|
Crawler/GitCrawler_U.py
|
FalconLK/FaCoY
| 24
|
12781873
|
#!/usr/bin/jython
# -*- coding: utf-8 -*-
from subprocess import call # , Popen
import requests
from os import makedirs, removedirs, walk, remove, listdir, stat, rename
from os.path import isdir, join, getmtime, abspath
from pymysql import connect
from time import strftime, gmtime
def many(cur):
while True:
results = cur.fetchmany(10)
if not results:
break
for result in results:
yield result
import hashlib
def md5(s):
return hashlib.md5(s).hexdigest()
def read_file(path):
with open(path, "r") as f:
return f.read()
class GitCrawler:
def __init__(self, home="/home/hdd/gitArchive/New/New_20161104/"): # 에 저장하겠다.
self.base = home
#self.home_path = "%s/%s" % (home, strftime('%Y.%m.%d'))
self.home_path = "%s" % (home)
self.conn = None
self.count = 0
def start(self, num):
print "Crawling Starts.."
# if not isdir(self.home_path):
# makedirs(self.home_path)
# TODO 이 부분을 localhost 에서 실제 DB서버로 바꾸기만 하면 된다잉..
self.conn = connect(host='localhost', port=3306, user='root', passwd='<PASSWORD>', db='ghtorrent')
""" Retrieves Github API urls from MySQL Github Archive and check if repo has been forked at least once """
sql = "SELECT url FROM ghtorrent.projects WHERE language = '\"Java\"' AND forked_from IS NULL AND deleted = 0;" # 5001,5000;
cur = self.conn.cursor()
cur.execute(sql)
print "DB Query... Done..."
iterator = 0
for row in many(cur):
self.count += 1
if self.count < num:
print self.count
continue
else:
iterator = self.count #starts from count
url = row[0][1:].strip()
url = url[:-1]
# url = row[0]
self.clone_repo(url)
iterator += 1
print str(iterator) + " / 1,541,018"
def is_forked(self, url):
try:
res = requests.get(url, auth=('rsirres', '<PASSWORD>'))
data = res.text.encode("utf-8")
num_forks = data.split('"forks":')[1].split(",")[0].strip()
# print "forked : " + str(num_forks != "0")
except Exception as e:
# print "Repository is probably unavailable or you reached the limit of your GitHub requests"
return False
return num_forks != "0"
# TODO: Add time and date of download
def clone_repo(self, url):
project_name = url.split("/")[-1]
username = url.split("/")[-2]
new_url = "https://github.com/%s/%s" % (username, project_name)
project_dir = "%s/%s_%s" % (self.home_path, username, project_name)
if not isdir(project_dir):
makedirs(project_dir)
if self.is_forked(url):
print "Clone: %s" % url
call(["git", "clone", new_url], cwd=project_dir)
self.filter_java_files(project_dir)
else:
removedirs(project_dir)
print "NOt forked==============================="
else:
# print "Project: %s already exists." % project_dir
print "private or already Exist====================="
def filter_java_files(self, directory): # Java file이 아니면 모두 지워라.
# from multiprocessing import Process
from threading import Thread
def remove_non_java_files(directory):
non_java_files = (join(dirpath, f)
for dirpath, dirnames, files in walk(directory)
for f in files if not f.endswith('.java'))
for non_java_file in non_java_files:
remove(non_java_file)
t = Thread(target=remove_non_java_files, args=(directory,)) # edited by Leo
t.start()
def stats(self):
files = (join(dirpath, f)
for dirpath, dirnames, files in walk(self.base)
for f in files)
num_files = 0
duplicates = 0
loc = 0
file_hash_set = set()
for f in files:
try:
f_content = read_file(f)
h = md5(f_content)
if h in file_hash_set:
duplicates += 1
else:
file_hash_set.add(h)
loc += f_content.count('\n')
num_files += 1
except:
pass
print "Number of files: %s Duplicates: %s, LOC: %s" % (num_files, duplicates, loc)
if __name__ == '__main__':
crawler = GitCrawler()
num = 1
crawler.start(num)
crawler.stats()
| 2.34375
| 2
|
pymongo_inmemory/_pim.py
|
ItsKarma/pymongo_inmemory
| 25
|
12781874
|
import pymongo
from .mongod import Mongod
class MongoClient(pymongo.MongoClient):
def __init__(self, host=None, port=None, **kwargs):
self._mongod = Mongod()
self._mongod.start()
super().__init__(self._mongod.connection_string, **kwargs)
def close(self):
self._mongod.stop()
super().close()
def pim_mongodump(self, *args, **kwargs):
return self._mongod.mongodump(*args, **kwargs)
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.DEBUG)
m = MongoClient("mongodb://127.0.0.1/something", 27017)
m.close()
| 2.625
| 3
|
spinta/datasets/commands/error.py
|
atviriduomenys/spinta
| 2
|
12781875
|
<filename>spinta/datasets/commands/error.py
from typing import Dict
from spinta import commands
from spinta.datasets.components import Resource, Entity, Attribute
@commands.get_error_context.register(Resource)
def get_error_context(resource: Resource, *, prefix='this') -> Dict[str, str]:
context = commands.get_error_context(resource.dataset, prefix=f'{prefix}.dataset')
context['resource'] = f'{prefix}.name'
return context
@commands.get_error_context.register(Entity)
def get_error_context(external: Entity, *, prefix='this') -> Dict[str, str]:
context = commands.get_error_context(external.model, prefix=f'{prefix}.model')
context['external'] = f'{prefix}.name'
return context
@commands.get_error_context.register(Attribute)
def get_error_context(external: Attribute, *, prefix='this') -> Dict[str, str]:
context = commands.get_error_context(external.prop, prefix=f'{prefix}.prop')
context['external'] = f'{prefix}.name'
return context
| 2.34375
| 2
|
netbox_ddns/migrations/0003_dnsstatus.py
|
FreedomNetNL/netbox-ddns
| 58
|
12781876
|
# Generated by Django 3.0.5 on 2020-04-15 10:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ipam', '0036_standardize_description'),
('netbox_ddns', '0002_add_ttl'),
]
operations = [
migrations.CreateModel(
name='DNSStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)),
('last_update', models.DateTimeField(auto_now=True)),
('forward_action', models.PositiveSmallIntegerField(blank=True, null=True)),
('forward_rcode', models.PositiveIntegerField(blank=True, null=True)),
('reverse_action', models.PositiveSmallIntegerField(blank=True, null=True)),
('reverse_rcode', models.PositiveIntegerField(blank=True, null=True)),
('ip_address', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='ipam.IPAddress')),
],
options={
'verbose_name': 'DNS status',
'verbose_name_plural': 'DNS status',
},
),
]
| 1.6875
| 2
|
run_tests.py
|
unistra/schedulesy
| 1
|
12781877
|
import os
import shutil
import sys
import django
from django.apps import apps
from django.conf import settings
from django.test.utils import get_runner
def manage_model(model):
model._meta.managed = True
if __name__ == '__main__':
os.environ['DJANGO_SETTINGS_MODULE'] = 'schedulesy.settings.unittest'
django.setup()
# Set all tested models to "managed = True"
for app in settings.LOCAL_APPS:
config = (
apps.get_app_config(app.split('.')[-1])
if not app.endswith('Config')
else apps.get_app_config(app.split('.')[-3])
)
print(type(config))
list(map(manage_model, config.get_models()))
test_apps = ['schedulesy'] if len(sys.argv) <= 1 else sys.argv[1:]
TestRunner = get_runner(settings)
test_runner = TestRunner(
pattern='test_*.py', verbosity=2, interactive=True, failfast=False
)
failures = test_runner.run_tests(test_apps)
# Delete temporary directory
shutil.rmtree(settings.MEDIA_ROOT, ignore_errors=True)
sys.exit(failures)
| 2.03125
| 2
|
sentiments.py
|
asmaaziz/sentiment-analysis
| 0
|
12781878
|
from nltk.sentiment.vader import SentimentIntensityAnalyzer
dir= 'C:\\Users\\asmazi01\\dir_path'
commentfile= 'input.txt'
delim ='\t'
fname = dir + '\\' + commentfile
with open(fname, encoding='utf-8', errors='ignore') as f:
sentences = f.readlines()
sid = SentimentIntensityAnalyzer()
totalCompoundScore = 0.0
totalNegativeScore = 0.0
totalNeutralScore = 0.0
totalPositiveScore = 0.0
totalNumOfSentences = 0.0
outfpath = fname + '.sentiment.txt'
outf = open(outfpath,'wb')
outf.write("Sentence\tcompound score\tnegative score\tneutral score\tpositive score\n".encode('utf-8'))
for sentence in sentences:
if sentence.strip() == "":
continue
totalNumOfSentences += 1.0
print(sentence)
ss = sid.polarity_scores(sentence)
outline = "\"" + sentence.strip() + "\""
compScore = 0.0
negScore = 0.0
neuScore = 0.0
posScore = 0.0
for k in sorted(ss):
print('{0}: {1}, '.format(k, ss[k]), end='')
if k == "compound":
compScore = ss[k]
if k == "neg":
negScore = ss[k]
if k == "neu":
neuScore = ss[k]
if k == "pos":
posScore = ss[k]
outline = outline + delim \
+ str(compScore) + delim \
+ str(negScore) + delim \
+ str(neuScore) + delim \
+ str(posScore) + "\n"
totalCompoundScore += compScore
totalNegativeScore += negScore
totalNeutralScore += neuScore
totalPositiveScore += posScore
print()
outf.write(outline.encode('utf-8'))
avgCompoundScore = str(totalCompoundScore/totalNumOfSentences)
avgNegativeScore = str(totalNegativeScore/totalNumOfSentences)
avgNeutralScore = str(totalNeutralScore/totalNumOfSentences)
avgPositiveScore = str(totalPositiveScore/totalNumOfSentences)
outline = "total sentence=" + str(int(totalNumOfSentences))\
+ delim + avgCompoundScore\
+ delim + avgNegativeScore\
+ delim + avgNeutralScore\
+ delim + avgPositiveScore + "\n"
print(outline)
#outf.write(outline.encode('utf-8'))
outf.close()
| 2.984375
| 3
|
migrations/versions/00034ea37afb_meals_dinner_many2many.py
|
fennec-fox-cast-team/fennec-fox-fit
| 0
|
12781879
|
"""meals dinner many2many
Revision ID: 00034ea37afb
Revises: <PASSWORD>
Create Date: 2019-12-16 11:54:41.895663
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('dinners_meals',
sa.Column('dinner_id', sa.Integer(), nullable=True),
sa.Column('meal_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['dinner_id'], ['dinners.id'], ),
sa.ForeignKeyConstraint(['meal_id'], ['meals.id'], )
)
op.drop_table('airports')
op.add_column('meals', sa.Column('nutrition_value', sa.Float(), nullable=True))
op.add_column('meals', sa.Column('vitamins', sa.String(length=100), nullable=True))
op.add_column('users', sa.Column('role', sa.String(length=20), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'role')
op.drop_column('meals', 'vitamins')
op.drop_column('meals', 'nutrition_value')
op.create_table('airports',
sa.Column('IATA_CODE', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('AIRPORT', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('CITY', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('STATE', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('COUNTRY', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('LATITUDE', sa.NUMERIC(precision=7, scale=5), autoincrement=False, nullable=True),
sa.Column('LONGITUDE', sa.NUMERIC(precision=8, scale=5), autoincrement=False, nullable=True)
)
op.drop_table('dinners_meals')
# ### end Alembic commands ###
| 1.828125
| 2
|
tests/test_box.py
|
ikalnytskyi/picobox
| 35
|
12781880
|
<filename>tests/test_box.py
"""Test picobox class."""
import collections
import inspect
import itertools
import traceback
import picobox
import pytest
def test_box_put_key(boxclass, supported_key):
testbox = boxclass()
testbox.put(supported_key, "the-value")
assert testbox.get(supported_key) == "the-value"
def test_box_put_value(boxclass, supported_value):
testbox = boxclass()
testbox.put("the-key", supported_value)
assert testbox.get("the-key") is supported_value
def test_box_put_factory(boxclass):
testbox = boxclass()
testbox.put("the-key", factory=object)
objects = [testbox.get("the-key") for _ in range(10)]
assert len(objects) == 10
assert len(set(map(id, objects))) == 10
def test_box_put_factory_singleton_scope(boxclass):
testbox = boxclass()
testbox.put("the-key", factory=object, scope=picobox.singleton)
objects = [testbox.get("the-key") for _ in range(10)]
assert len(objects) == 10
assert len(set(map(id, objects))) == 1
def test_box_put_factory_custom_scope(boxclass):
class namespacescope(picobox.Scope):
def __init__(self):
self._store = collections.defaultdict(dict)
def set(self, key, value):
self._store[namespace][key] = value
def get(self, key):
return self._store[namespace][key]
testbox = boxclass()
testbox.put("the-key", factory=object, scope=namespacescope)
objects = []
namespace = "one"
objects.extend(
[
testbox.get("the-key"),
testbox.get("the-key"),
]
)
namespace = "two"
objects.extend(
[
testbox.get("the-key"),
testbox.get("the-key"),
]
)
assert len(objects) == 4
assert len(set(map(id, objects[:2]))) == 1
assert len(set(map(id, objects[2:]))) == 1
assert len(set(map(id, objects))) == 2
def test_box_put_factory_dependency(boxclass):
testbox = boxclass()
@testbox.pass_("a")
def fn(a):
return a + 1
testbox.put("a", 13)
testbox.put("b", factory=fn)
assert testbox.get("b") == 14
def test_box_put_value_and_factory(boxclass):
testbox = boxclass()
with pytest.raises(ValueError) as excinfo:
testbox.put("the-key", 42, factory=object)
excinfo.match("either 'value' or 'factory'/'scope' pair must be passed")
def test_box_put_value_and_scope(boxclass):
testbox = boxclass()
with pytest.raises(ValueError) as excinfo:
testbox.put("the-key", 42, scope=picobox.threadlocal)
excinfo.match("either 'value' or 'factory'/'scope' pair must be passed")
def test_box_get_keyerror(boxclass):
testbox = boxclass()
with pytest.raises(KeyError, match="the-key"):
testbox.get("the-key")
def test_box_get_default(boxclass):
testbox = boxclass()
sentinel = object()
assert testbox.get("the-key", sentinel) is sentinel
@pytest.mark.parametrize(
"args, kwargs, rv",
[
((1, 2, 3), {}, 6),
((1, 2), {"c": 3}, 6),
((1,), {"b": 2, "c": 3}, 6),
((), {"a": 1, "b": 2, "c": 3}, 6),
((), {"b": 2, "c": 3}, 15),
],
)
def test_box_pass_a(args, kwargs, rv, boxclass):
testbox = boxclass()
testbox.put("a", 10)
@testbox.pass_("a")
def fn(a, b, c):
return a + b + c
assert fn(*args, **kwargs) == rv
@pytest.mark.parametrize(
"args, kwargs, rv",
[
((1, 2, 3), {}, 6),
((1, 2), {"c": 3}, 6),
((1,), {"b": 2, "c": 3}, 6),
((1,), {"c": 3}, 14),
((), {"a": 1, "b": 2, "c": 3}, 6),
((), {"a": 1, "c": 3}, 14),
],
)
def test_box_pass_b(args, kwargs, rv, boxclass):
testbox = boxclass()
testbox.put("b", 10)
@testbox.pass_("b")
def fn(a, b, c):
return a + b + c
assert fn(*args, **kwargs) == rv
@pytest.mark.parametrize(
"args, kwargs, rv",
[
((1, 2, 3), {}, 6),
((1, 2), {"c": 3}, 6),
((1, 2), {}, 13),
((1,), {"b": 2, "c": 3}, 6),
((1,), {"b": 2}, 13),
((), {"a": 1, "b": 2, "c": 3}, 6),
((), {"a": 1, "b": 2}, 13),
],
)
def test_box_pass_c(args, kwargs, rv, boxclass):
testbox = boxclass()
testbox.put("c", 10)
@testbox.pass_("c")
def fn(a, b, c):
return a + b + c
assert fn(*args, **kwargs) == rv
@pytest.mark.parametrize(
"args, kwargs, rv",
[
((1, 2, 3), {}, 6),
((1, 2), {"c": 3}, 6),
((1, 2), {}, 13),
((1,), {"b": 2, "c": 3}, 6),
((1,), {"b": 2}, 13),
((), {"a": 1, "b": 2, "c": 3}, 6),
((), {"a": 1, "b": 2}, 13),
],
)
def test_box_pass_c_default(args, kwargs, rv, boxclass):
testbox = boxclass()
testbox.put("c", 10)
@testbox.pass_("c")
def fn(a, b, c=20):
return a + b + c
assert fn(*args, **kwargs) == rv
@pytest.mark.parametrize(
"args, kwargs, rv",
[
((1, 2, 3), {}, 6),
((1, 2), {"c": 3}, 6),
((1,), {"b": 2, "c": 3}, 6),
((1,), {"c": 3}, 104),
((), {"a": 1, "b": 2, "c": 3}, 6),
((), {"a": 1, "c": 3}, 104),
((), {"b": 2, "c": 3}, 15),
((), {"c": 3}, 113),
],
)
def test_box_pass_ab(args, kwargs, rv, boxclass):
testbox = boxclass()
testbox.put("a", 10)
testbox.put("b", 100)
@testbox.pass_("a")
@testbox.pass_("b")
def fn(a, b, c):
return a + b + c
assert fn(*args, **kwargs) == rv
@pytest.mark.parametrize(
"args, kwargs, rv",
[
((1, 2, 3), {}, 6),
((1, 2), {"c": 3}, 6),
((1, 2), {}, 103),
((1,), {"b": 2, "c": 3}, 6),
((1,), {"b": 2}, 103),
((1,), {"c": 3}, 14),
((1,), {}, 111),
((), {"a": 1, "b": 2, "c": 3}, 6),
((), {"a": 1, "b": 2}, 103),
((), {"a": 1, "c": 3}, 14),
((), {"a": 1}, 111),
],
)
def test_box_pass_bc(args, kwargs, rv, boxclass):
testbox = boxclass()
testbox.put("b", 10)
testbox.put("c", 100)
@testbox.pass_("b")
@testbox.pass_("c")
def fn(a, b, c):
return a + b + c
assert fn(*args, **kwargs) == rv
@pytest.mark.parametrize(
"args, kwargs, rv",
[
((1, 2, 3), {}, 6),
((1, 2), {"c": 3}, 6),
((1, 2), {}, 103),
((1,), {"b": 2, "c": 3}, 6),
((1,), {"b": 2}, 103),
((), {"a": 1, "b": 2, "c": 3}, 6),
((), {"a": 1, "b": 2}, 103),
((), {"b": 2, "c": 3}, 15),
((), {"b": 2}, 112),
],
)
def test_box_pass_ac(args, kwargs, rv, boxclass):
testbox = boxclass()
testbox.put("a", 10)
testbox.put("c", 100)
@testbox.pass_("a")
@testbox.pass_("c")
def fn(a, b, c):
return a + b + c
assert fn(*args, **kwargs) == rv
@pytest.mark.parametrize(
"args, kwargs, rv",
[
((1, 2, 3), {}, 6),
((1, 2), {"c": 3}, 6),
((1, 2), {}, 1003),
((1,), {"b": 2, "c": 3}, 6),
((1,), {"b": 2}, 1003),
((1,), {"c": 3}, 104),
((1,), {}, 1101),
((), {"a": 1, "b": 2, "c": 3}, 6),
((), {"a": 1, "b": 2}, 1003),
((), {"a": 1, "c": 3}, 104),
((), {"a": 1}, 1101),
((), {}, 1110),
],
)
def test_box_pass_abc(args, kwargs, rv, boxclass):
testbox = boxclass()
testbox.put("a", 10)
testbox.put("b", 100)
testbox.put("c", 1000)
@testbox.pass_("a")
@testbox.pass_("b")
@testbox.pass_("c")
def fn(a, b, c):
return a + b + c
assert fn(*args, **kwargs) == rv
@pytest.mark.parametrize(
"args, kwargs, rv",
[
((1, 2, 3), {}, 6),
((1, 2), {"c": 3}, 6),
((1,), {"b": 2, "c": 3}, 6),
((1,), {"c": 3}, 14),
((), {"a": 1, "b": 2, "c": 3}, 6),
((), {"a": 1, "c": 3}, 14),
],
)
def test_box_pass_d_as_b(args, kwargs, rv, boxclass):
testbox = boxclass()
testbox.put("d", 10)
@testbox.pass_("d", as_="b")
def fn(a, b, c):
return a + b + c
assert fn(*args, **kwargs) == rv
@pytest.mark.parametrize(
"args, kwargs, rv",
[
((1,), {}, 1),
((), {"x": 1}, 1),
((), {}, 42),
],
)
def test_box_pass_method(args, kwargs, rv, boxclass):
testbox = boxclass()
testbox.put("x", 42)
class Foo:
@testbox.pass_("x")
def __init__(self, x):
self.x = x
assert Foo(*args, **kwargs).x == rv
@pytest.mark.parametrize(
"args, kwargs, rv",
[
((0,), {}, 41),
((), {"x": 0}, 41),
((), {}, 42),
],
)
def test_box_pass_key_type(args, kwargs, rv, boxclass):
class key:
pass
testbox = boxclass()
testbox.put(key, 1)
@testbox.pass_(key, as_="x")
def fn(x):
return x + 41
assert fn(*args, **kwargs) == rv
def test_box_pass_unexpected_argument(boxclass):
testbox = boxclass()
testbox.put("d", 10)
@testbox.pass_("d")
def fn(a, b):
return a + b
with pytest.raises(TypeError) as excinfo:
fn(1, 2)
assert str(excinfo.value) == "fn() got an unexpected keyword argument 'd'"
def test_box_pass_keyerror(boxclass):
testbox = boxclass()
@testbox.pass_("b")
def fn(a, b):
return a + b
with pytest.raises(KeyError) as excinfo:
fn(1)
excinfo.match("b")
def test_box_pass_optimization(boxclass, request):
testbox = boxclass()
testbox.put("a", 1)
testbox.put("b", 1)
testbox.put("d", 1)
@testbox.pass_("a")
@testbox.pass_("b")
@testbox.pass_("d", as_="c")
def fn(a, b, c):
backtrace = list(
itertools.dropwhile(
lambda frame: frame[2] != request.function.__name__,
traceback.extract_stack(),
)
)
return backtrace[1:-1]
assert len(fn()) == 1
def test_box_pass_optimization_complex(boxclass, request):
testbox = boxclass()
testbox.put("a", 1)
testbox.put("b", 1)
testbox.put("c", 1)
testbox.put("d", 1)
def passthrough(fn):
def wrapper(*args, **kwargs):
return fn(*args, **kwargs)
return wrapper
@testbox.pass_("a")
@testbox.pass_("b")
@passthrough
@testbox.pass_("c")
@testbox.pass_("d")
def fn(a, b, c, d):
backtrace = list(
itertools.dropwhile(
lambda frame: frame[2] != request.function.__name__,
traceback.extract_stack(),
)
)
return backtrace[1:-1]
assert len(fn()) == 3
def test_chainbox_put_changes_box():
testbox = picobox.Box()
testchainbox = picobox.ChainBox(testbox)
with pytest.raises(KeyError, match="the-key"):
testchainbox.get("the-key")
testchainbox.put("the-key", 42)
assert testbox.get("the-key") == 42
def test_chainbox_get_chained():
testbox_a = picobox.Box()
testbox_a.put("the-key", 42)
testbox_b = picobox.Box()
testbox_b.put("the-key", 13)
testbox_b.put("the-pin", 12)
testchainbox = picobox.ChainBox(testbox_a, testbox_b)
assert testchainbox.get("the-key") == 42
assert testchainbox.get("the-pin") == 12
def test_chainbox_isinstance_box():
assert isinstance(picobox.ChainBox(), picobox.Box)
@pytest.mark.parametrize(
"name",
[name for name, _ in inspect.getmembers(picobox.Box) if not name.startswith("_")],
)
def test_chainbox_box_interface(name):
boxsignature = inspect.signature(getattr(picobox.Box(), name))
chainboxsignature = inspect.signature(getattr(picobox.ChainBox(), name))
assert boxsignature == chainboxsignature
| 2.4375
| 2
|
script.py
|
glaukiol1/sendit-spammer
| 1
|
12781881
|
<gh_stars>1-10
import requests
import json
import urllib3
import sys
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
headers = {
'Host': 'api.getsendit.com',
'Content-Length': '354',
'Sec-Ch-Ua': '" Not A;Brand";v="99", "Chromium";v="92"',
'App-Id': 'c2ad997f-1bf2-4f2c-b5fd-83926e8f3c65',
'App-Version': '1.0',
'Sec-Ch-Ua-Mobile': '?0',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36',
'Content-Type': 'application/json',
'Accept': '*/*',
'Origin': 'https://web.getsendit.com',
'Sec-Fetch-Site': 'same-site',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Dest': 'empty',
'Referer': 'https://web.getsendit.com/',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.9',
}
def main(json_payload,instance_number):
rqs = 0
while True:
try:
response = requests.post('https://api.getsendit.com/v1/posts',
headers=headers, data=json_payload, verify=False,timeout=0.9)
if (response.json()['status'] == 'success'):
rqs = rqs + 1
print(f'\033[92m'+f'[*] REQUEST SUCCESS | instance: {instance_number}')
else:
print(f"\033[91m[*]NETWORK ERROR| instance: {instance_number}")
except:
print(f"\033[91m[*]UNKNOWN ERROR | instance: {instance_number}")
| 2.765625
| 3
|
tensortrade/version.py
|
andrewczgithub/tensortrade
| 1
|
12781882
|
__version__ = "0.0.2-rc0"
| 1.054688
| 1
|
test/test_version.py
|
dell/dataiq-plugin-example
| 1
|
12781883
|
import unittest
from test import AppTest
class TestVersion(AppTest):
@staticmethod
def make_version_request(client, token):
return client.get(
'/version/',
headers={'Authorization': token})
def test_version(self, client, auth_provider):
r = TestVersion.make_version_request(
client, auth_provider('root', -1))
assert 200 == r.status_code
j = r.get_json()
assert j is not None
version = j['version']
assert '1.0.0.0' == version
def test_no_auth(self, client):
r = TestVersion.make_version_request(
client, '')
assert 401 == r.status_code
if __name__ == '__main__':
unittest.main()
| 2.875
| 3
|
python_dev/send_mail.py
|
Dloar/stocks_games
| 0
|
12781884
|
<filename>python_dev/send_mail.py
###
import smtplib
import datetime
from python_dev.functions import getDailyChange, getConfigFile
from email.message import EmailMessage
import warnings
warnings.filterwarnings("ignore")
config_conn = getConfigFile()
daily_looser, daily_gainer, daily_result, percentage_change, portfolio_db = getDailyChange()
# sending to multiple people
contacts = [config_conn.email_user[0]]
msg = EmailMessage()
msg['Subject'] = 'Update at ' + datetime.datetime.today().strftime('%Y-%m-%d')
msg['From'] = config_conn.email_user[0]
msg['To'] = ', '.join(contacts)
msg.add_alternative("""\
<!DOCTYPE html>
<html>
<head></head>
<body>
<p>Hi!<br>
Please see the results.
</p>
</body>
</html>
""", subtype='html')
msg.add_attachment(daily_looser, subtype='html')
msg.add_attachment("""
""")
# msg.add_attachment('daily_result, percentage_change)
msg.add_attachment(daily_gainer, subtype='html')
msg.add_attachment("""
""")
msg.add_attachment(portfolio_db, subtype='html')
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp: #Google connection SSL
smtp.login(user=config_conn.email_user[0], password=config_conn.email_psw[0])
smtp.send_message(msg=msg)
# with smtplib.SMTP('smtp.gmail.com', 587) as smtp: #Google connection
# smtp.ehlo() # Initiate process
# smtp.starttls() # Encrypt
# smtp.ehlo() # Reinitiate process
# subject = 'test1'
# body = 'This is a body of the email'
#
# msg = f'Subject: {subject}\n\n{body}'
#
# smtp.sendmail(config_conn.email_user[0], '<EMAIL>', msg=msg)
# with smtplib.SMTP('localhost', 1025) as smtp: #localhost connection
# subject = 'test1'
# body = 'This is a body of the email'
#
# msg = f'Subject: {subject}\n\n{body}'
#
# smtp.sendmail(config_conn.email_user[0], '<EMAIL>', msg=msg)
| 2.6875
| 3
|
esanpy/__init__.py
|
codelibs/esanpy
| 13
|
12781885
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import, unicode_literals
import argparse
from logging import getLogger, Formatter, StreamHandler
import os
import sys
from esanpy import analyzers
from esanpy import elasticsearch
from esanpy.core import ESRUNNER_VERSION, DEFAULT_CLUSTER_NAME, DEFAULT_HTTP_PORT, DEFAULT_TRANSPORT_PORT,\
DEFAULT_PLUGINS
start_server = elasticsearch.start_server
stop_server = elasticsearch.stop_server
create_analysis = elasticsearch.create_analysis
get_analysis = elasticsearch.get_analysis
delete_analysis = elasticsearch.delete_analysis
analyzer = analyzers.analyzer
custom_analyzer = analyzers.custom_analyzer
logger = getLogger('esanpy')
def parse_args(args):
parser = argparse.ArgumentParser(description=None)
parser.add_argument('--runner-version', dest='esrunner_version', action='store',
default=ESRUNNER_VERSION, help='Elasticsearch cluster name')
parser.add_argument('--cluster-name', dest='cluster_name', action='store',
default=DEFAULT_CLUSTER_NAME, help='Elasticsearch cluster name')
parser.add_argument('--host', dest='host', action='store',
default='localhost', help='Elasticsearch host name')
parser.add_argument('--http-port', dest='http_port', action='store',
default=DEFAULT_HTTP_PORT, type=int, help='Elasticsearch HTTP port')
parser.add_argument('--transport-port', dest='transport_port', action='store',
default=DEFAULT_TRANSPORT_PORT, type=int, help='Elasticsearch Transport port')
parser.add_argument('--analyzer-name', dest='analyzer_name', action='store',
default='standard', help='Analyzer name')
parser.add_argument('--text', dest='text', action='store', help='Text to analyze')
parser.add_argument('--plugin', dest='plugins', action='append', help='Plugins to install')
parser.add_argument('--verbose', '-v', dest='verbose', action='store_true',
default=False, help='Display debug messages')
parser.add_argument('--stop', dest='stop', action='store_true',
default=False, help='Stop Elasticsearch on exit')
return parser.parse_args(args=args)
def configure_logging(options):
formatter = Formatter('[%(asctime)s] %(message)s')
handler = StreamHandler(sys.stderr)
handler.setFormatter(formatter)
logger.addHandler(handler)
if options.verbose:
logger.setLevel(10)
else:
logger.setLevel(20)
def main(args=None):
options = parse_args(args)
configure_logging(options)
plugin_names = DEFAULT_PLUGINS if options.plugins is None else options.plugins
start_server(host=options.host,
http_port=options.http_port,
transport_port=options.transport_port,
cluster_name=options.cluster_name,
plugin_names=plugin_names,
esrunner_version=options.esrunner_version)
tokens = analyzer(options.text,
analyzer=options.analyzer_name)
print('\n'.join(tokens))
if options.stop:
stop_server(host=options.host,
http_port=options.http_port,
esrunner_version=options.esrunner_version)
return 0
if __name__ == '__main__':
sys.exit(main())
| 2.171875
| 2
|
scripts/experiments/create_q_and_time_for_shortqueries.py
|
dcs-chalmers/dataloc_vn
| 0
|
12781886
|
<filename>scripts/experiments/create_q_and_time_for_shortqueries.py
#!/usr/bin/env python3
# python3 scriptname.py beijing path_to_daysfolder path_to_basefolder
import sys
sys.path.append('../../')
sys.path.append('../datasets/')
import time
from query import *
import paths
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("experiment", choices=["beijing"], type=str, help="Choose which experiment (dataset) to run.")
args = parser.parse_args()
mode = args.experiment
basefolder = paths.beijing_folder
### actually execute
start_time = time.time()
print("Calculating answer behavior and workload per vehicle...".format(mode))
if mode == "beijing":
gen_beijing_timeq_files(basefolder)
else:
print("This should never have happened. I quit.")
quit()
time_needed = time.time() - start_time
print("...done in {} hrs".format(time_needed/3600))
| 2.65625
| 3
|
src/servicetools/middleware.py
|
zamj/python-service-tools
| 0
|
12781887
|
<filename>src/servicetools/middleware.py
"""Starlette middleware for services."""
import logging
from time import perf_counter
from typing import Any, Callable, Awaitable, Set, Optional
from structlog import get_logger
from starlette.middleware.base import BaseHTTPMiddleware, DispatchFunction
from starlette import status
from starlette.requests import Request
from starlette.responses import Response
from starlette.types import ASGIApp
LOGGER = get_logger(__name__)
class StructlogRequestMiddleware(BaseHTTPMiddleware):
"""
Logs information to a structlog logger about each HTTP request.
Logging will occur at the start and completion of each request.
If a request throws an exception, it will be logged and converted to a 500
response.
Any failures responses (400s and 500s) will be logged. A set of failure
codes to ignore can be provided to not log certain error codes (for example,
ignore all 404 errors).
"""
def __init__(
self,
app: ASGIApp,
dispatch: DispatchFunction = None,
logger: Any = LOGGER,
log_level: int = logging.INFO,
ignored_status_codes: Optional[Set[int]] = None,
) -> None:
"""
Create structlog request middleware.
:param app: Web application.
:param dispatch: Dispatch function.
:param logger: Structlog logger to log to.
:param log_level: Log level to write at.
:param ignored_status_codes: Set of status codes to not report on.
"""
super().__init__(app, dispatch)
self.logger = logger
self.log_level = log_level
self.ignored_status_codes = ignored_status_codes or set()
def __log(self, msg: str, **kwargs: Any) -> None:
"""Log at the configured level."""
self.logger.log(self.log_level, msg, **kwargs)
async def dispatch(
self, request: Request, call_next: Callable[[Request], Awaitable[Response]]
) -> Response:
"""Log information about the request and call the next layer."""
method = request.method
endpoint = request.url.path
self.__log("HTTP request start", method=method, endpoint=endpoint)
start_time = perf_counter()
try:
response = await call_next(request)
except Exception as e:
self.__log("Exception Occurred", exc_info=True)
raise e
end_time = perf_counter()
duration = end_time - start_time
status_code = response.status_code
self.__log(
"HTTP request end",
method=method,
endpoint=endpoint,
status_code=status_code,
seconds=duration,
)
if (
status_code >= status.HTTP_400_BAD_REQUEST
and status_code not in self.ignored_status_codes
):
self.__log(
"HTTP request error",
method=method,
endpoint=endpoint,
status_code=status_code,
content=response.body,
)
return response
| 2.34375
| 2
|
jobs/baseline.py
|
GAIPS/ILU-RL
| 0
|
12781888
|
<gh_stars>0
"""Baseline:
* uses train script for setting up experiment
* has to have a command line `tls_type`: `actuated`, `static`
TODO: Include config for evaluation
"""
import os
from pathlib import Path
from datetime import datetime
import sys
import json
import tempfile
import multiprocessing
import multiprocessing.pool
import time
import shutil
import argparse
import configparser
from models.train import main as baseline
from ilurl.utils.decorators import processable, benchmarked
from ilurl.utils import str2bool
# Pipeline components.
from jobs.convert2csv import xml2csv
from analysis.baseline_plots import main as baseline_plots
from ilurl.utils.decorators import safe_run
_ERROR_MESSAGE_TEST = ("ERROR: Caught an exception while "
"executing analysis/baseline_plots.py script.")
test_plots = safe_run(baseline_plots, error_message=_ERROR_MESSAGE_TEST)
ILURL_PATH = Path(os.environ['ILURL_HOME'])
CONFIG_PATH = ILURL_PATH / 'config'
mp = multiprocessing.get_context('spawn')
class NoDaemonProcess(mp.Process):
@property
def daemon(self):
return False
@daemon.setter
def daemon(self, val):
pass
class NoDaemonContext(type(multiprocessing.get_context('spawn'))):
Process = NoDaemonProcess
class NonDaemonicPool(multiprocessing.pool.Pool):
def __init__(self, *args, **kwargs):
kwargs['context'] = NoDaemonContext()
super(NonDaemonicPool, self).__init__(*args, **kwargs)
def delay_baseline(args):
"""Delays execution.
Parameters:
-----------
* args: tuple
Position 0: execution delay of the process.
Position 1: store the train config file.
Returns:
-------
* fnc : function
An anonymous function to be executed with a given delay
"""
time.sleep(args[0])
return baseline(args[1])
def get_arguments():
parser = argparse.ArgumentParser(
description="""
Script to evaluate actuated, static or random timings.
"""
)
parser.add_argument('tls_type', type=str, nargs='?',
choices=('actuated', 'static', 'webster', 'random', 'max_pressure'),
help='Control type.')
flags = parser.parse_args()
sys.argv = [sys.argv[0]]
return flags
def baseline_batch():
flags = get_arguments()
# Read script arguments from run.config file.
run_config = configparser.ConfigParser()
run_path = CONFIG_PATH / 'run.config'
run_config.read(run_path)
num_processors = int(run_config.get('run_args', 'num_processors'))
num_runs = int(run_config.get('run_args', 'num_runs'))
seeds = json.loads(run_config.get("run_args", "train_seeds"))
if len(seeds) != num_runs:
raise configparser.Error('Number of seeds in run.config `seeds`'
' must match the number of runs (`num_runs`) argument.')
print('Arguments (baseline.py):')
print('-----------------------')
print('Number of runs: {0}'.format(num_runs))
print('Number of processors: {0}'.format(num_processors))
print('Train seeds: {0}\n'.format(seeds))
# Assess total number of processors.
processors_total = mp.cpu_count()
print(f'Total number of processors available: {processors_total}\n')
# Adjust number of processors.
if num_processors > processors_total:
num_processors = processors_total
print(f'Number of processors downgraded to {num_processors}\n')
# Read train.py arguments from train.config file.
baseline_config = configparser.ConfigParser()
baseline_path = CONFIG_PATH / 'train.config'
baseline_config.read(str(baseline_path))
# Setup sumo-tls-type.
baseline_config.set('train_args', 'tls_type', flags.tls_type)
baseline_config.set('train_args', 'experiment_save_agent', str(False))
# Override train configurations with test parameters.
test_config = configparser.ConfigParser()
test_path = CONFIG_PATH / 'test.config'
test_config.read(test_path.as_posix())
horizon = int(test_config.get('test_args', 'rollout-time'))
baseline_config.set('train_args', 'experiment_time', str(horizon))
# Write .xml files for test plots creation.
baseline_config.set('train_args', 'sumo_emission', str(True))
timestamp = datetime.now().strftime('%Y%m%d%H%M%S.%f')
print(f'Experiment timestamp: {timestamp}')
with tempfile.TemporaryDirectory() as tmp_dir:
# Create a config file for each train.py
# with the respective seed. These config
# files are stored in a temporary directory.
tmp_path = Path(tmp_dir)
baseline_configs = []
for seed in seeds:
cfg_path = tmp_path / f'{flags.tls_type}-{seed}.config'
baseline_configs.append(cfg_path)
# Setup train seed.
baseline_config.set("train_args", "experiment_seed", str(seed + 1))
# Write temporary train config file.
with cfg_path.open('w') as ft:
baseline_config.write(ft)
# rvs: directories' names holding experiment data
if num_processors > 1:
packed_args = [(delay, cfg)
for (delay, cfg) in zip(range(len(baseline_configs)), baseline_configs)]
pool = NonDaemonicPool(num_processors)
rvs = pool.map(delay_baseline, packed_args)
pool.close()
pool.join()
else:
rvs = []
for cfg in baseline_configs:
rvs.append(delay_baseline((0.0, cfg)))
# Create a directory and move newly created files
paths = [Path(f) for f in rvs]
commons = [p.parent for p in paths]
if len(set(commons)) > 1:
raise ValueError(f'Directories {set(commons)} must have the same root')
dirpath = commons[0]
batchpath = dirpath / timestamp
if not batchpath.exists():
batchpath.mkdir()
# Move files
for src in paths:
dst = batchpath / src.parts[-1]
src.replace(dst)
sys.stdout.write(str(batchpath))
return str(batchpath)
@processable
def baseline_job():
# Suppress textual output.
return baseline_batch()
if __name__ == '__main__':
# 1) Run baseline.
experiment_root_path = baseline_batch()
# 2) Convert .xml files to .csv files.
xml2csv(experiment_root_path=experiment_root_path)
# 3) Create plots with metrics plots for final agent.
baseline_plots(experiment_root_path)
# 4) Clean up and compress files in order
# to optimize disk space.
print('\nCleaning and compressing files...\n')
experiment_root_path = Path(experiment_root_path)
for csv_path in experiment_root_path.rglob('*-emission.csv'):
Path(csv_path).unlink()
shutil.make_archive(experiment_root_path,
'gztar',
os.path.dirname(experiment_root_path),
experiment_root_path.name)
shutil.rmtree(experiment_root_path)
print('Experiment folder: {0}'.format(experiment_root_path))
| 2.21875
| 2
|
pelicanconf.py
|
alphor/jarmac.org
| 0
|
12781889
|
<reponame>alphor/jarmac.org
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = '<NAME>'
SITENAME = 'No Odd Cycles'
SITEURL = 'https://jarmac.org'
PATH = 'content'
TIMEZONE = 'EST'
DEFAULT_LANG = 'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
# ('Pelican', 'http://getpelican.com/'),
# ('Jinja2', 'http://jinja.pocoo.org/'),
LINKS = (('Source', 'https://github.com/alphor/jarmac.org'),
('nixOS', '//jarmac.org/category/nixos.html'),
('linux', '//jarmac.org/category/linux.html'),
('meta', '//jarmac.org/category/meta.html'),
('math', '//jarmac.org/category/math.html'))
# Social widget
SOCIAL = (('Github', 'https://github.com/alphor', '//jarmac.org/theme/img/GitHub-Mark-Light-32px.png'),
('RSS', 'https://jarmac.org/feed', '//jarmac.org/theme/img/feed-icon-28x28.png'),)
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
ARTICLE_URL = 'posts/{slug}.html'
ARTICLE_SAVE_AS = 'posts/{slug}.html'
# Begin theme config
PLUGIN_PATHS = ['./pelican-plugins']
PLUGINS = ['i18n_subsites']
THEME = './pelican-themes/pelican-bootstrap3'
CUSTOM_CSS = 'theme/css/sitewide.css'
JINJA_ENVIRONMENT = {'extensions': ['jinja2.ext.i18n']}
HIDE_SIDEBAR = True
# PIWIK_URL = 'piwik.jarmac.org'
# PIWIK_SITE_ID = "'1'"
DISABLE_URL_HASH = True
FEED_ALL_RSS = 'feed'
CATEGORY_FEED_RSS = '%s'
| 1.539063
| 2
|
api/dna.py
|
narendersinghyadav/dna
| 0
|
12781890
|
import psutil
import binascii
import socket
import ipaddress
"""
**Module Overview:**
This module will interact with Tor to get real time statistical and analytical information.
|-is_alive - check tor process is alive or killed
|-is_valid_ipv4_address-check for valid ip address
|-authenticate- cookie authentication of control port
|-get_version- get version of tor
|-get_pid- find pid of tor service
|-get_info- get information like version,exit policy,network status etc
|-set_conf- change the value of one or more configurable variable
|-reset_conf-set the configurable variable to default values
|-get_conf- Request the value of zero or more configuration variable
|-get_ports- retreive informations about listeners of different ports
|-get_network_statuses- Router status info (v3 directory style) for all ORs.
|-get_exit_policy-The default exit policy lines that Tor will *append* to the ExitPolicy config option.
|-prt_check-check validity of ports
|-can_exit_to- check whether one can exit through a particular port
|-get_circuit- get information about circuits present for use
|-port_usage-Usage of particular port
|-get_info_relay- retrieve information from database about a particular relay
|-status-tell status of a circuit BUILT or not
|-build_flag- build flag on circuit and relays
|-path- return path of circuit
|-created- circuit created info
|-signal-signal control port like NEWNYM,RELOAD etc
|-get_fingerprint-the contents of the fingerprint file that Tor writes as a relay, or a 551 if we're not a relay currently.
!-get_network_status-network status of a relay with given fingerprint
"""
def is_alive():
for proc in psutil.process_iter():
try:
if 'tor' in proc.name().lower():
return True
except(psutil.NoSuchProcess,psutil.AccessDenied,psutil.ZombieProcess):
pass
return False
def is_valid_ipv4_address(address):
if not isinstance(address, (bytes, str)):
return False
if address.count('.') != 3:
return False
for entry in address.split('.'):
if not entry.isdigit() or int(entry) < 0 or int(entry) > 255:
return False
elif entry[0] == '0' and len(entry) > 1:
return False
return True
def authenticate():
control_socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
control_socket.connect(('127.0.0.1',9051))
signal=bytes('PROTOCOLINFO \r\n','utf-8')
control_socket.send(signal)
rcv=control_socket.recv(4096).decode('utf-8')
rcv=rcv.splitlines()
if rcv[0]!='250-PROTOCOLINFO 1':
return None
cookie_path=rcv[1].split('"')
cookie_path=cookie_path[1]
f=open(cookie_path,"rb")
q=f.read()
q=binascii.b2a_hex(q)
q=q.decode('utf-8')
signal=bytes('AUTHENTICATE ' +q+' \r\n','utf-8')
control_socket.send(signal)
rcv=control_socket.recv(4096).decode('utf-8').split()[0]
if rcv=='250':
return control_socket
return None
def get_version():
control_socket=authenticate()
control_socket.send(bytes("GETINFO version \r\n",'utf-8'))
result=control_socket.recv(4096)
result=result.decode('utf-8')
result=result.split('=')
result=result[1].split(' ')
return result[0]
def get_pid(name):
control_socket=authenticate()
control_socket.send(bytes("GETINFO process/pid \r\n",'utf-8'))
result=control_socket.recv(4096)
result=result.decode('utf-8')
result=result.splitlines()
result=result[0].split('=')[1]
pid=result
return int(pid)
def get_info(query):
control_socket=authenticate()
getinfo='GETINFO '+query+" \r\n"
control_socket.send(bytes(getinfo,'utf-8'))
result=control_socket.recv(4096)
result=result+control_socket.recv(4096)
result=result+control_socket.recv(4096)
return result
def set_conf(name,new_value):
control_socket=authenticate()
setconf='SETCONF '+name+'='+new_value+' \r\n'
control_socket.send(bytes(setconf,'utf-8'))
result=control_socket.recv(4096)
def reset_conf(name):
control_socket=authenticate()
setconf='SETCONF '+name+'= \r\n'
control_socket.send(bytes(setconf,'utf-8'))
result=control_socket.recv(4096)
def get_conf(name):
control_socket=authenticate()
control_socket.send(bytes("GETCONF "+ name+" \r\n",'utf-8'))
result=control_socket.recv(4096)
result=result.decode('utf-8')
if result is None or "=" not in result:
return result
result=result.split('=')
result=' '.join(result[1].split())
return result
def get_ports(port_name):
control_socket=authenticate()
port_name=port_name.lower()
control_socket.send(bytes("GETINFO net/listeners/"+ port_name +" \r\n",'utf-8'))
result=control_socket.recv(4096)
result=result.decode('utf-8')
result=result.splitlines()
result=result[0].split('=')[1]
if len(result.split())>1 and len(result.split()[0].split(':'))>1:
result=result.split()[0].split(':')[1][:-1]
portlist=[]
if result!='':
try:
value=int(result)
portlist.append(value)
except ValueError:
pass
return portlist
def get_network_statuses():
control_socket=authenticate()
control_socket.send(bytes("GETINFO ns/all \r\n",'utf-8'))
controlsocket=control_socket.recv(4096).decode('utf-8')
result=""
for i in range(0,250):
result+=controlsocket
controlsocket=control_socket.recv(4096).decode('utf-8')
address_list=[]
or_list=[]
for line in result.splitlines():
if(line[0]=='r'):
data=line.split()
if(len(data)==9):
address_list.append(data[6])
or_list.append(data[7])
else:
continue
return address_list,or_list
def get_exit_policy():
PRIVATE_ADDRESSES = (
'0.0.0.0/8',
'169.254.0.0/16',
'127.0.0.0/8',
'192.168.0.0/16',
'10.0.0.0/8',
'172.16.0.0/12',
)
control_socket=authenticate()
control_socket.send(bytes("GETINFO address \r\n",'utf-8'))
address=control_socket.recv(4096).decode('utf-8').split('=')
if len(address)>=2:
address=address[1].splitlines()[0]
PRIVATE_ADDRESSES+=(address,)
control_socket.send(bytes("GETCONF ExitPolicyRejectPrivate \r\n",'utf-8'))
exitpolicy=control_socket.recv(4096).decode('utf-8')
exitpolicy=exitpolicy.split('=')[1]
exitpolicy=int(exitpolicy)
if exitpolicy==1:
acceptance='reject'
else:
acceptence='accept'
result=""
for ip_address in PRIVATE_ADDRESSES:
result+=acceptance+' '+ip_address+':*, '
control_socket.send(bytes("GETINFO exit-policy/default \r\n",'utf-8'))
result+=control_socket.recv(4096).decode('utf-8').split('=')[1].replace(',',', ')
return result.splitlines()[0]
def prt_check(prt,port):
prt=prt.split('-')
if len(prt)==2:
miniport=int(prt[0])
maxiport=int(prt[1])
else:
miniport=int(prt[0])
maxiport=int(prt[0])
if port>=miniport and port<=maxiport:
return True
else:
return False
def can_exit_to(policy,address,port):
policy=policy.split(',')
for policy_itr in policy:
accept=policy_itr.split()[0]
addr=policy_itr.split()[1].split(':')[0]
prt=policy_itr.split()[1].split(':')[1]
if (addr=='*' or ipaddress.ip_address(address) in ipaddress.ip_network(addr)) and (prt=='*' or prt_check(prt,port)):
if(accept=='reject'):
return False
else:
return True
return True
def get_circuits():
control_socket=authenticate()
control_socket.send(bytes("GETINFO circuit-status \r\n","utf-8"))
response=control_socket.recv(4096).decode('utf-8')
response=response.splitlines()
circuit_info=[]
response=response[1:-2]
for res in response:
circuit_info.append("CIRC "+res+"\n")
return circuit_info
def port_usage(port):
file=open('ports.cfg','r')
lines=file.readlines()
port_usg=''
for line in lines:
line=line.split()
if len(line)>3:
if line[0]=='port':
if line[1]==str(port):
port_usg=line[3]
if port_usg!='':
return port_usg
else:
log_trace("BUG failed to find port usages")
return None
def get_info_relay(query):
control_socket=authenticate()
control_socket.send(bytes("GETINFO "+query+" \r\n",'utf-8'))
response=control_socket.recv(4096).decode('utf-8')
if response[0]=='5':
return None
response=response.splitlines()[0]
response=response.split('=')[1]
return response
def status(circuit_info):
if len(circuit_info.split())>2 and circuit_info.split()[2]=='BUILT':
return 'BUILT'
return 'NOT BUILT'
def build_flags(circuit_info):
if len(circuit_info.split())<5:
return []
circuit_info=circuit_info.split()[4]
if len(circuit_info.split('='))<2:
return []
circuit_info=circuit_info.split('=')[1]
circuit_info=circuit_info.split(',')
return circuit_info
def path(circuit_info):
path_list=[]
if len(circuit_info.split())<4:
return []
circuit_info=circuit_info.split()[3]
circuit_info=circuit_info.split(',')
for circ in circuit_info:
path_list.append(circ.split('~'))
return path_list
def created(circuit_info):
if(len(circuit_info.split())<7):
return ''
circuit_info=circuit_info.split()[6]
circuit_info=circuit_info.split('=')[1]
circuit_info=circuit_info[:10]+" "+circuit_info[11:]
return circuit_info
def signal(signal_query,control_socket):
control_socket.send(bytes("SIGNAL "+ signal_query+" \r\n","utf-8"))
response=control_socket.recv(4096).decode('utf-8')
if response.split()[0]=='250':
return True
else:
return False
def get_fingerprint():
control_socket=authenticate()
control_socket.send(bytes("GETINFO fingerprint \r\n",'utf-8'))
result=control_socket.recv(4096)
response_code=result.decode('utf-8').split()
if response_code[0]=='551':
return ""
fingerprint=result.decode('utf-8').split('=')
return fingerprint
def get_network_status(fingerprint):
control_socket=authenticate()
control_socket.send(bytes("GETINFO ns/id/"+fingerprint+" \r\n",'utf-8'))
result=control_socket.recv(4096)
result=result.decode('utf-8')
dict_network_status={}
if len(result.split('='))<2:
return dict_network_status
result=result.split('=')[1]
result=result.splitlines()
flags=result[2]
result=result[1]
result=result.split()
if len(result)>=9:
dict_network_status["dir_port"]=result[8]
else:
dict_network_status["dir_port"]='None'
if len(result)>=7:
dict_network_status["or_port"]=result[7]
else:
dict_network_status["or_port"]="None"
dict_network_status["nickname"]=result[1]
dict_network_status["published"]=result[4]+" "+result[5]
dict_network_status["flags"]=flags.split()[1:]
return dict_network_status
| 2.625
| 3
|
src/ssh_server.py
|
ramonmeza/PythonSSHServerTutorial
| 4
|
12781891
|
import paramiko
from src.server_base import ServerBase
from src.ssh_server_interface import SshServerInterface
from src.shell import Shell
class SshServer(ServerBase):
def __init__(self, host_key_file, host_key_file_password=None):
super(SshServer, self).__init__()
self._host_key = paramiko.RSAKey.from_private_key_file(host_key_file, host_key_file_password)
def connection_function(self, client):
try:
# create the SSH transport object
session = paramiko.Transport(client)
session.add_server_key(self._host_key)
# create the server
server = SshServerInterface()
# start the SSH server
try:
session.start_server(server=server)
except paramiko.SSHException:
return
# create the channel and get the stdio
channel = session.accept()
stdio = channel.makefile('rwU')
# create the client shell and start it
# cmdloop() will block execution of this thread.
self.client_shell = Shell(stdio, stdio)
self.client_shell.cmdloop()
# After execution continues, we can close the session
# since the only way execution will continue from
# cmdloop() is if we explicitly return True from it,
# which we do with the bye command.
session.close()
except:
pass
| 2.671875
| 3
|
python/python-core/datetimes.py
|
josephobonyo/sigma_coding_youtube
| 893
|
12781892
|
# import our libraries
import time
import datetime
# get today's date
today = date.today()
print(today)
# create a custom date
future_date = date(2020, 1, 31)
print(future_date)
# let's create a time stamp
time_stamp = time.time()
print(time_stamp)
# create a date from a timestamp
date_stamp = date.fromtimestamp(time_stamp)
print(date_stamp)
# get components of a date
print(date_stamp.year)
print(date_stamp.month)
print(date_stamp.day)
# ------------------------- PART TWO --------------------------
from datetime import datetime, date, time
# create a date and a time
my_date = date(2019, 3, 22)
my_time = time(12, 30)
# create a datetime
my_datetime = datetime.combine(my_date, my_time)
print(my_datetime)
# get the different components
print(my_datetime.year)
print(my_datetime.month)
print(my_datetime.day)
print(my_datetime.hour)
print(my_datetime.minute)
| 3.96875
| 4
|
Tools/TestSend.py
|
CasperTheCat/D3D11Wrapper
| 11
|
12781893
|
import socket
import time
import struct
import os
import numpy
import sys
with open(sys.argv[1], "rb") as f:
data = f.read()[8:]
datarts = numpy.array(struct.unpack("{}Q".format(len(data) // 8), data))
nEvents = 8
HOST = 'localhost' # The remote host
PORT = 6666 # The same port as used by the server
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
index = 0
while True:
s.sendall(data[index*8*nEvents:(index+1)*8*nEvents])
index+=1
#s.sendall(b'\x55\x55\x00\x00\x00\x00\x00\x00')
time.sleep(datarts[index] / 1000000)
| 2.59375
| 3
|
main.py
|
Octoleet-Dev/Gingerbread_Checkers
| 0
|
12781894
|
print('welcome to the Gingerbread_Checkers launcher')
print('would you like to start a new game?')
wu = input('>>>')
yes = 'yes'
Yes = 'Yes'
no = 'no'
No = 'no'
def y():
print('okay, starting new game')
def x():
print('okay then')
if wu = yes:
y()
if wu = Yes
y()
if wu = no
x()
if wu = No
x()
else:
print('error code 000: invalid response recived')
| 3.40625
| 3
|
exstracs/exstracs_output.py
|
bnanita/trafficapi
| 0
|
12781895
|
<reponame>bnanita/trafficapi<gh_stars>0
"""
Name: ExSTraCS_OutputFileManager.py
Authors: <NAME> - Written at Dartmouth College, Hanover, NH, USA
Contact: <EMAIL>
Created: April 25, 2014
Modified: August 25,2014
Description: This module contains the methods for generating the different output files generated by ExSTraCS.
These files are generated at each learning checkpoint, and the last iteration. These include...
writePopStats: Summary of the population statistics
writePop: Outputs a snapshot of the entire rule population including classifier conditions, classes, and parameters.
attCo_Occurence: Calculates and outputs co-occurrence scores for each attribute pair in the dataset.
---------------------------------------------------------------------------------------------------------------------------------------------------------
ExSTraCS V2.0: Extended Supervised Tracking and Classifying System - An advanced LCS designed specifically for complex, noisy classification/data mining tasks,
such as biomedical/bioinformatics/epidemiological problem domains. This algorithm should be well suited to any supervised learning problem involving
classification, prediction, data mining, and knowledge discovery. This algorithm would NOT be suited to function approximation, behavioral modeling,
or other multi-step problems. This LCS algorithm is most closely based on the "UCS" algorithm, an LCS introduced by <NAME> and
<NAME> (2003) which in turn is based heavily on "XCS", an LCS introduced by Stewart Wilson (1995).
Copyright (C) 2014 <NAME>
This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABLILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
---------------------------------------------------------------------------------------------------------------------------------------------------------
"""
#Import Required Modules-------------------------------
from exstracs.exstracs_constants import *
from exstracs.exstracs_at import *
import copy
#------------------------------------------------------
class OutputFileManager:
def writePopStats(self, outFile, trainEval, testEval, exploreIter, pop, correct):
""" Makes output text file which includes all of the parameter settings used in the run as well as all of the evaluation stats including Time Track Output. """
if cons.outputSummary:
try:
popStatsOut = open(outFile + '_'+ str(exploreIter)+'_PopStats.txt','w') # Outputs Population run stats
except Exception as inst:
print(type(inst))
print(inst.args)
print(inst)
print('cannot open', outFile + '_'+ str(exploreIter)+'_PopStats.txt')
raise
else:
print("Writing Population Statistical Summary File...")
#Evaluation of pop
popStatsOut.write("Performance Statistics:------------------------------------------------------------------------\n")
popStatsOut.write("Training Accuracy\tTesting Accuracy\tTraining Coverage\tTesting Coverage\n")
if cons.testFile != 'None':
popStatsOut.write(str(trainEval[0])+"\t")
popStatsOut.write(str(testEval[0])+"\t")
popStatsOut.write(str(trainEval[1]) +"\t")
popStatsOut.write(str(testEval[1])+"\n\n")
elif cons.trainFile != 'None':
popStatsOut.write(str(trainEval[0])+"\t")
popStatsOut.write("NA\t")
popStatsOut.write(str(trainEval[1]) +"\t")
popStatsOut.write("NA\n\n")
else:
popStatsOut.write("NA\t")
popStatsOut.write("NA\t")
popStatsOut.write("NA\t")
popStatsOut.write("NA\n\n")
popStatsOut.write("Population Characterization:------------------------------------------------------------------------\n")
popStatsOut.write("MacroPopSize\tMicroPopSize\tGenerality\n")
popStatsOut.write(str(len(pop.popSet))+"\t"+ str(pop.microPopSize)+"\t"+str(pop.aveGenerality)+"\n\n")
popStatsOut.write("SpecificitySum:------------------------------------------------------------------------\n")
headList = cons.env.formatData.trainHeaderList #preserve order of original dataset
for i in range(len(headList)):
if i < len(headList)-1:
popStatsOut.write(str(headList[i])+"\t")
else:
popStatsOut.write(str(headList[i])+"\n")
# Prints out the Specification Sum for each attribute
for i in range(len(pop.attributeSpecList)):
if i < len(pop.attributeSpecList)-1:
popStatsOut.write(str(pop.attributeSpecList[i])+"\t")
else:
popStatsOut.write(str(pop.attributeSpecList[i])+"\n")
popStatsOut.write("\nAccuracySum:------------------------------------------------------------------------\n")
for i in range(len(headList)):
if i < len(headList)-1:
popStatsOut.write(str(headList[i])+"\t")
else:
popStatsOut.write(str(headList[i])+"\n")
# Prints out the Accuracy Weighted Specification Count for each attribute
for i in range(len(pop.attributeAccList)):
if i < len(pop.attributeAccList)-1:
popStatsOut.write(str(pop.attributeAccList[i])+"\t")
else:
popStatsOut.write(str(pop.attributeAccList[i])+"\n")
if cons.onlyRC: # When RC ONLY, there is no AttributeTrackingGlobalSums
popStatsOut.write("\nAttributeTrackingGlobalSums:----Rule Compaction ONLY, Attribute Tracking not loaded-----------------\n")
for i in range(len(headList)):
if i < len(headList)-1:
popStatsOut.write(str(headList[i])+"\t")
else:
popStatsOut.write(str(headList[i])+"\n")
for i in range(len(headList)):
if i < len(headList)-1:
popStatsOut.write(str(0.0)+"\t")
else:
popStatsOut.write(str(0.0)+"\n")
elif cons.doAttributeTracking:
popStatsOut.write("\nAttributeTrackingGlobalSums:------------------------------------------------------------------------\n")
for i in range(len(headList)):
if i < len(headList)-1:
popStatsOut.write(str(headList[i])+"\t")
else:
popStatsOut.write(str(headList[i])+"\n")
sumGlobalAttTrack = cons.AT.sumGlobalAttTrack()
for i in range(len(sumGlobalAttTrack)):
if i < len(sumGlobalAttTrack)-1:
popStatsOut.write(str(sumGlobalAttTrack[i])+"\t")
else:
popStatsOut.write(str(sumGlobalAttTrack[i])+"\n")
else:
popStatsOut.write("\nAttributeTrackingGlobalSums:----Tracking not applied!-----------------------------------------------\n")
for i in range(len(headList)):
if i < len(headList)-1:
popStatsOut.write(str(headList[i])+"\t")
else:
popStatsOut.write(str(headList[i])+"\n")
for i in range(len(headList)):
if i < len(headList)-1:
popStatsOut.write(str(0.0)+"\t")
else:
popStatsOut.write(str(0.0)+"\n")
#Time Track ---------------------------------------------------------------------------------------------------------
popStatsOut.write("\nRun Time(in minutes):------------------------------------------------------------------------\n")
popStatsOut.write(cons.timer.reportTimes())
popStatsOut.write("\nCorrectTrackerSave:------------------------------------------------------------------------\n")
for i in range(len(correct)):
popStatsOut.write(str(correct[i])+"\t")
popStatsOut.close()
else:
pass
def writePop(self, outFile, exploreIter, pop):
""" Writes a tab delimited text file specifying the evolved rule population, including conditions, phenotypes, and all rule parameters. """
if cons.outputPopulation:
try:
rulePopOut = open(outFile + '_'+ str(exploreIter)+'_RulePop.txt','w') # Outputs tab delimited text file of rule population and respective rule stats
except Exception as inst:
print(type(inst))
print(inst.args)
print(inst)
print('cannot open', outFile + '_'+ str(exploreIter)+'_RulePop.txt')
raise
else:
print("Writing Population as Data File...")
rulePopOut.write("Specified\tCondition\tPhenotype\tFitness\tAccuracy\tNumerosity\tAveMatchSetSize\tTimeStampGA\tInitTimeStamp\tSpecificity\tDeletionProb\tCorrectCount\tMatchCount\tCorrectCover\tMatchCover\tEpochComplete\n")
#Write each classifier--------------------------------------------------------------------------------------------------------------------------------------
for cl in pop.popSet:
rulePopOut.write(str(cl.printClassifier()))
rulePopOut.close()
else:
pass
def attCo_Occurence(self, outFile, exploreIter, pop):
""" Calculates pairwise attribute co-occurence througout all rules in the population."""
if cons.outputAttCoOccur:
print("Calculating Attribute Co-occurence Scores...")
dataLink = cons.env.formatData
dim = dataLink.numAttributes
maxAtts = 50 #Test 10
attList = []
#-------------------------------------------------------
# IDENTIFY ATTRIBUBTES FOR CO-OCCRRENCE EVALUATION
#-------------------------------------------------------
if dim <= maxAtts:
for i in range(0,dim):
attList.append(i)
else:
tempList = copy.deepcopy(pop.attributeSpecList)
tempList = sorted(tempList,reverse=True)
maxVal = tempList[maxAtts]
overflow = []
for i in range(0,dim):
if pop.attributeSpecList[i] >= maxVal: #get roughly the top 50 specified attributes. (may grab some extras, depending on
attList.append(i)
if pop.attributeSpecList[i] == maxVal:
overflow.append(i)
while len(attList) > maxAtts:
target = random.choice(overflow)
attList.remove(target)
overflow.remove(target)
#print(attList)
#-------------------------------------------------------
# CO-OCCRRENCE EVALUATION
#-------------------------------------------------------
comboList = []
castList = [None,None,0,0] #att1, att2, Specificity, Accuracy Weighted Specificity
count = 0
dim = dataLink.numAttributes
#Specify all attribute pairs.
for i in range(0, len(attList)-1):
for j in range(i+1,len(attList)):
comboList.append(copy.deepcopy(castList))
comboList[count][0] = dataLink.trainHeaderList[attList[i]]
comboList[count][1] = dataLink.trainHeaderList[attList[j]]
count += 1
for cl in pop.popSet:
count = 0
for i in range(len(attList)-1):
for j in range(i+1,len(attList)):
if attList[i] in cl.specifiedAttList and attList[j] in cl.specifiedAttList:
comboList[count][2] += cl.numerosity
comboList[count][3] += cl.numerosity * cl.accuracy
count += 1
tupleList = []
for i in comboList:
tupleList.append((i[0],i[1],i[2],i[3]))
sortedComboList = sorted(tupleList,key=lambda test: test[3], reverse=True)
print("Writing Attribute Co-occurence scores as data file...")
try:
f = open(outFile + '_'+ str(exploreIter)+ '_CO.txt', 'w')
except Exception as inst:
print(type(inst))
print(inst.args)
print(inst)
print('cannot open', outFile + '_'+ str(exploreIter)+ '_CO.txt')
raise
else:
for i in range(len(sortedComboList)):
for j in range(len(sortedComboList[0])): #att1, att2, count, AWcount
if j < len(sortedComboList[0])-1:
f.write(str(sortedComboList[i][j])+'\t')
else:
f.write(str(sortedComboList[i][j])+'\n')
f.close()
else:
pass
def save_tracking(self, exploreIter, outFile):
if cons.doAttributeTracking:
""" Prints out Attribute Tracking scores to txt file. """
try:
f = open(outFile + '_'+ str(exploreIter + 1)+'_AttTrack.txt','w') # Outputs tab delimited text file of rule population and respective rule stats
except Exception as inst:
print(type(inst))
print(inst.args)
print(inst)
print('cannot open', outFile + '_'+ str(exploreIter + 1)+'_AttTrack.txt')
raise
else:
print("Writing Attribute Tracking as Data File...")
trackingSums = cons.AT.attAccuracySums
#-------------------------------------------------------------------
f.write(str(cons.labelInstanceID) + '\t') #Write InstanceID label
for att in cons.env.formatData.trainHeaderList:
f.write(str(att) + '\t')
f.write(str(cons.labelPhenotype)+ '\n') #Write phenotype label
#---------------------------------------------------------------
for i in range(len(trackingSums)):
trackList = trackingSums[i]
f.write(str(cons.env.formatData.trainFormatted[i][2])+ '\t') #Write InstanceID
for att in trackList:
f.write(str(att) + '\t')
f.write(str(cons.env.formatData.trainFormatted[i][1]) +'\n') #Write phenotype
f.close()
def writePredictions(self, exploreIter, outFile, predictionList, realList, predictionSets):
if cons.outputTestPredictions:
""" Prints out the Testing Predictions to txt file."""
try:
f = open(outFile + '_'+ str(exploreIter + 1)+'_Predictions.txt','w') # Outputs tab delimited text file of rule population and respective rule stats
except Exception as inst:
print(type(inst))
print(inst.args)
print(inst)
print('cannot open', outFile + '_'+ str(exploreIter + 1)+'_Predictions.txt')
raise
else:
print("Writing Predictions to File...")
f.write(str(cons.labelInstanceID) + '\t'+'Endpoint Predictions'+'\t' + 'True Endpoint')
if cons.env.formatData.discretePhenotype:
for eachClass in cons.env.formatData.phenotypeList:
f.write('\t'+ str(eachClass))
f.write('\n')
for i in range(len(predictionList)):
f.write(str(cons.env.formatData.testFormatted[i][2])+ '\t') #Write InstanceID
f.write(str(predictionList[i])+'\t'+str(realList[i]))
if cons.env.formatData.discretePhenotype:
propList = []
for eachClass in cons.env.formatData.phenotypeList:
propList.append(predictionSets[i][eachClass])
for each in propList:
f.write('\t'+ str(each))
f.write('\n')
f.close()
def editPopStats(self, testEval):
""" Takes an existing popStatsFile and edits it to report Testing Accuracy performance, and Testing coverage on a specified testing dataset. """
try:
fileObject = open(cons.popRebootPath+"_PopStats.txt", 'rU') # opens each datafile to read.
except Exception as inst:
print(type(inst))
print(inst.args)
print(inst)
print('cannot open', cons.popRebootPath+"_PopStats.txt")
raise
#Grab the existing file information (only a couple items will change, i.e. testing acccuracy and testing coverage)
fileStorage = []
for each in fileObject:
fileStorage.append(each)
fileObject.close()
try:
popStatsOut = open(cons.popRebootPath+'_PopStats_Testing.txt','w') # Outputs Population run stats
except Exception as inst:
print(type(inst))
print(inst.args)
print(inst)
print('cannot open', cons.popRebootPath+'_PopStats_Testing.txt')
raise
else:
print("Writing Population Statistical Summary File...")
for i in range(2):
popStatsOut.write(fileStorage[i])
tempList = fileStorage[2].strip().split('\t')
popStatsOut.write(str(tempList[0])+"\t")
popStatsOut.write(str(testEval[0])+"\t")
popStatsOut.write(str(tempList[2]) +"\t")
popStatsOut.write(str(testEval[1])+"\n\n")
for i in range(4,36):
popStatsOut.write(fileStorage[i])
popStatsOut.close()
| 2.078125
| 2
|
session-1/fitting/fitter_apolo.py
|
Ivan-Solovyev/data-analysis-tutorial
| 1
|
12781896
|
<filename>session-1/fitting/fitter_apolo.py<gh_stars>1-10
#-------------------------------------------------------------------------------
import ostap.fitting.models as Models
from ostap.utils.timing import timing
from ostap.histos.histos import h1_axis
from Functions import *
#-------------------------------------------------------------------------------
im = ROOT.RooRealVar ('im' , 'im' , 2.240 , 2.330 )
pk = ROOT.RooRealVar ('pk' , 'peak ' , 2.2875 , 2.287 , 2.288 )
sg = ROOT.RooRealVar ('sg' , 'sigma' , 0.0045 , 0.003 , 0.070 )
am = ROOT.RooRealVar ('am' , 'asymmetry' , 0.0000 , -0.300 , 0.300 )
bt = ROOT.RooRealVar ('bt' , 'beta' , 1.0000 , 0.500 , 9.999 )
#-------------------------------------------------------------------------------
sig_ap = Models.Apollonios2_pdf( 'sig_ap', xvar=im, mean=pk, sigma=sg, asymmetry=am, beta=bt)
bkg0 = Models.Bkg_pdf ( 'bkg0' , xvar = im , power = 0 )
#
#-------------------------------------------------------------------------------
#
if __name__=="__main__":
#-------------------------------------------------------------------------------
model = Models.Fit1D ( signal = sig_ap , background = bkg0 )
#-------------------------------------------------------------------------------
rfile = ROOT.TFile("test_file.root","READ")
ds = rfile["ds_pi"]
dh = ( ds.reduce( ROOT.RooArgSet( im ) , "im>0" ) ).binnedClone()
with timing():
r, w = model.fitTo( dh , draw=True, silent=True)
r, w = model.fitTo( dh , draw=True, silent=True)
# r, w = model.fitTo(ds, draw=True, nbins=100, ncpu=4)
#-------------------------------------------------------------------------------
print(r)
#-------------------------------------------------------------------------------
h = w.pullHist()
draw_param( r, w, h, 90, im, 0.06*ds.sumEntries(), name="Lc", XTitle ="Mass",
Prefix="Apolo2" , Type="png", var_Units = "GeV/c^{2}")
#-------------------------------------------------------------------------------
| 2.203125
| 2
|
sample_project/env/lib/python3.9/site-packages/fontTools/misc/roundTools.py
|
Istiakmorsalin/ML-Data-Science
| 38,667
|
12781897
|
"""
Various round-to-integer helpers.
"""
import math
import functools
import logging
log = logging.getLogger(__name__)
__all__ = [
"noRound",
"otRound",
"maybeRound",
"roundFunc",
]
def noRound(value):
return value
def otRound(value):
"""Round float value to nearest integer towards ``+Infinity``.
The OpenType spec (in the section on `"normalization" of OpenType Font Variations <https://docs.microsoft.com/en-us/typography/opentype/spec/otvaroverview#coordinate-scales-and-normalization>`_)
defines the required method for converting floating point values to
fixed-point. In particular it specifies the following rounding strategy:
for fractional values of 0.5 and higher, take the next higher integer;
for other fractional values, truncate.
This function rounds the floating-point value according to this strategy
in preparation for conversion to fixed-point.
Args:
value (float): The input floating-point value.
Returns
float: The rounded value.
"""
# See this thread for how we ended up with this implementation:
# https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166
return int(math.floor(value + 0.5))
def maybeRound(v, tolerance, round=otRound):
rounded = round(v)
return rounded if abs(rounded - v) <= tolerance else v
def roundFunc(tolerance, round=otRound):
if tolerance < 0:
raise ValueError("Rounding tolerance must be positive")
if tolerance == 0:
return noRound
if tolerance >= .5:
return round
return functools.partial(maybeRound, tolerance=tolerance, round=round)
| 3.734375
| 4
|
facial_emotion_image.py
|
richacker/theme_detection
| 3
|
12781898
|
from keras.preprocessing.image import img_to_array
from keras.models import load_model
import imutils
import cv2
import numpy as np
import sys
# parameters for loading data and images
detection_model_path = 'haarcascade_files/haarcascade_frontalface_default.xml'
emotion_model_path = 'models/_mini_XCEPTION.106-0.65.hdf5'
img_path = sys.argv[1]
# hyper-parameters for bounding boxes shape
# loading models
face_detection = cv2.CascadeClassifier(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
EMOTIONS = ["angry","disgust","scared", "happy", "sad", "surprised","neutral"]
#reading the frame
orig_frame = cv2.imread(img_path)
frame = cv2.imread(img_path,0)
faces = face_detection.detectMultiScale(frame,scaleFactor=1.2,minNeighbors=5,minSize=(30,30),flags=cv2.CASCADE_SCALE_IMAGE)
if len(faces) > 0:
faces = sorted(faces, reverse=True,key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
(fX, fY, fW, fH) = faces
roi = frame[fY:fY + fH, fX:fX + fW]
roi = cv2.resize(roi, (48, 48))
roi = roi.astype("float") / 255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
preds = emotion_classifier.predict(roi)[0]
emotion_probability = np.max(preds)
label = EMOTIONS[preds.argmax()]
cv2.putText(orig_frame, label, (fX, fY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
cv2.rectangle(orig_frame, (fX, fY), (fX + fW, fY + fH),(0, 0, 255), 2)
print(label)
cv2.imshow('test_face', orig_frame)
cv2.imwrite('test_output/'+img_path.split('/')[-1],orig_frame)
if (cv2.waitKey(2000) & 0xFF == ord('q')):
sys.exit("Thanks")
cv2.destroyAllWindows()
| 3.171875
| 3
|
tests/test_entities.py
|
shelsoloa/Peachy
| 2
|
12781899
|
<reponame>shelsoloa/Peachy<filename>tests/test_entities.py
import peachy
engine = None
world = None
room = None
def test_startup():
global engine
global world
global room
engine = peachy.Engine()
world = engine.add_world(peachy.World('Test'))
room = world.room
for i in range(100):
room.add(peachy.Entity())
assert len(room) == 100
def test_naming():
t = room[0]
t.name = 'test'
assert t == room.get_name('test')
t.name = ''
def test_grouping():
# ents = room.entities[0:10]
# ents.clear()
pass
| 2.546875
| 3
|
src/ros_tcp_endpoint/tcp_sender.py
|
Gin-TrungSon/ROS-TCP-Endpoint
| 67
|
12781900
|
<filename>src/ros_tcp_endpoint/tcp_sender.py
# Copyright 2020 Unity Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import socket
import time
import threading
import struct
from .client import ClientThread
from .thread_pauser import ThreadPauser
from io import BytesIO
# queue module was renamed between python 2 and 3
try:
from queue import Queue
from queue import Empty
except:
from Queue import Queue
from Queue import Empty
class UnityTcpSender:
"""
Sends messages to Unity.
"""
def __init__(self):
# if we have a valid IP at this point, it was overridden locally so always use that
self.sender_id = 1
self.time_between_halt_checks = 5
# Each sender thread has its own queue: this is always the queue for the currently active thread.
self.queue = None
self.queue_lock = threading.Lock()
# variables needed for matching up unity service requests with responses
self.next_srv_id = 1001
self.srv_lock = threading.Lock()
self.services_waiting = {}
def send_unity_info(self, text):
if self.queue is not None:
command = SysCommand_Log()
command.text = text
serialized_bytes = ClientThread.serialize_command("__log", command)
self.queue.put(serialized_bytes)
def send_unity_warning(self, text):
if self.queue is not None:
command = SysCommand_Log()
command.text = text
serialized_bytes = ClientThread.serialize_command("__warn", command)
self.queue.put(serialized_bytes)
def send_unity_error(self, text):
if self.queue is not None:
command = SysCommand_Log()
command.text = text
serialized_bytes = ClientThread.serialize_command("__error", command)
self.queue.put(serialized_bytes)
def send_ros_service_response(self, srv_id, destination, response):
if self.queue is not None:
command = SysCommand_Service()
command.srv_id = srv_id
serialized_bytes = ClientThread.serialize_command("__response", command)
self.queue.put(serialized_bytes)
self.send_unity_message(destination, response)
def send_unity_message(self, topic, message):
if self.queue is not None:
serialized_message = ClientThread.serialize_message(topic, message)
self.queue.put(serialized_message)
def send_unity_service_request(self, topic, service_class, request):
if self.queue is None:
return None
thread_pauser = ThreadPauser()
with self.srv_lock:
srv_id = self.next_srv_id
self.next_srv_id += 1
self.services_waiting[srv_id] = thread_pauser
command = SysCommand_Service()
command.srv_id = srv_id
serialized_bytes = ClientThread.serialize_command("__request", command)
self.queue.put(serialized_bytes)
self.send_unity_message(topic, request)
# rospy starts a new thread for each service request,
# so it won't break anything if we sleep now while waiting for the response
thread_pauser.sleep_until_resumed()
response = service_class._response_class().deserialize(thread_pauser.result)
return response
def send_unity_service_response(self, srv_id, data):
thread_pauser = None
with self.srv_lock:
thread_pauser = self.services_waiting[srv_id]
del self.services_waiting[srv_id]
thread_pauser.resume_with_result(data)
def send_topic_list(self):
if self.queue is not None:
topic_list = SysCommand_TopicsResponse()
topics_and_types = rospy.get_published_topics()
topic_list.topics = [item[0] for item in topics_and_types]
topic_list.types = [item[1] for item in topics_and_types]
serialized_bytes = ClientThread.serialize_command("__topic_list", topic_list)
self.queue.put(serialized_bytes)
def start_sender(self, conn, halt_event):
sender_thread = threading.Thread(
target=self.sender_loop, args=(conn, self.sender_id, halt_event)
)
self.sender_id += 1
# Exit the server thread when the main thread terminates
sender_thread.daemon = True
sender_thread.start()
def sender_loop(self, conn, tid, halt_event):
s = None
local_queue = Queue()
# send an empty message to confirm connection
# minimal message: 4 zero bytes for topic length 0, 4 zero bytes for payload length 0
local_queue.put(b"\0\0\0\0\0\0\0\0")
with self.queue_lock:
self.queue = local_queue
try:
while not halt_event.is_set():
try:
item = local_queue.get(timeout=self.time_between_halt_checks)
except Empty:
# I'd like to just wait on the queue, but we also need to check occasionally for the connection being closed
# (otherwise the thread never terminates.)
continue
# print("Sender {} sending an item".format(tid))
try:
conn.sendall(item)
except Exception as e:
rospy.logerr("Exception on Send {}".format(e))
break
finally:
halt_event.set()
with self.queue_lock:
if self.queue is local_queue:
self.queue = None
class SysCommand_Log:
text = ""
class SysCommand_Service:
srv_id = 0
class SysCommand_TopicsResponse:
topics = []
types = []
| 2.28125
| 2
|
test/tests/del_name_scoping.py
|
aisk/pyston
| 1
|
12781901
|
<reponame>aisk/pyston
x = 1
def f():
if 0:
# the del marks 'x' as a name written to in this scope
del x
print x
try:
f()
except NameError, e:
print e
| 2.671875
| 3
|
office365/sharepoint/tenant/administration/sharing_capabilities.py
|
wreiner/Office365-REST-Python-Client
| 544
|
12781902
|
class SharingCapabilities:
def __init__(self):
pass
Disabled = 0
ExternalUserSharingOnly = 1
ExternalUserAndGuestSharing = 2
ExistingExternalUserSharingOnly = 3
| 1.734375
| 2
|
fixtures/orm.py
|
Justcoderguy/python_training
| 0
|
12781903
|
from pony.orm import *
from models.group import Group
from models.contact import Contact
from pymysql.converters import encoders, decoders, convert_mysql_timestamp
from datetime import datetime
_author_ = 'pzqa'
class ORMFixture:
db = Database()
class ORMGroup(db.Entity):
_table_ = 'group_list'
id = PrimaryKey(int, column='group_id')
name = Optional(str, column='group_name')
logo = Optional(str, column='group_header')
comment = Optional(str, column='group_footer')
contacts = Set(lambda: ORMFixture.ORMContact, table='address_in_groups', column='id',
reverse='groups', lazy=True)
class ORMContact(db.Entity):
_table_ = 'addressbook'
id = PrimaryKey(int, column='id')
f_name = Optional(str, column='firstname')
l_name = Optional(str, column='lastname')
address_one = Optional(str, column='address')
email_one = Optional(str, column='email')
email_two = Optional(str, column='email2')
email_three = Optional(str, column='email3')
home = Optional(str, column='home')
mobile = Optional(str, column='mobile')
work = Optional(str, column='work')
phone_two = Optional(str, column='phone2')
deprecated = Optional(datetime, column='deprecated')
groups = Set(lambda: ORMFixture.ORMGroup, table='address_in_groups', column='group_id',
reverse='contacts', lazy=True)
def __init__(self, host, name, admin, password):
conv = encoders
conv.update(decoders)
conv[datetime] = convert_mysql_timestamp
self.db.bind('mysql', host=host, database=name, user=admin, password=password, conv=conv)
self.db.generate_mapping()
sql_debug(True)
def convert_groups_to_model(self, groups):
def convert(group):
return Group(id=str(group.id), name=group.name, logo=group.logo, comment=group.comment)
return list(map(convert, groups))
def convert_contacts_to_model(self, contacts):
def convert(contact):
return Contact(id=str(contact.id), f_name=contact.f_name, l_name=contact.l_name,
address_one=contact.address_one, email_one=contact.email_one, email_two=contact.email_two,
email_three=contact.email_three, home=contact.home, mobile=contact.mobile,
work=contact.work, phone_two=contact.phone_two)
return list(map(convert, contacts))
@db_session
def get_group_list(self):
return self.convert_groups_to_model((select(g for g in ORMFixture.ORMGroup)))
@db_session
def get_contact_list(self):
return self.convert_contacts_to_model((select(c for c in ORMFixture.ORMContact if c.deprecated is None)))
@db_session
def get_contacts_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
return self.convert_contacts_to_model(orm_group.contacts)
@db_session
def get_contacts_not_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
return self.convert_contacts_to_model((
select(c for c in ORMFixture.ORMContact if c.deprecated is None and orm_group not in c.groups)))
| 2.28125
| 2
|
test/correctness/unit/upsert_tests.py
|
titanous/fdb-document-layer
| 0
|
12781904
|
#
# upsert_tests.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# MongoDB is a registered trademark of MongoDB, Inc.
#
from pymongo.errors import OperationFailure
import itertools
import pprint
import sys
import util
from util import MongoModelException
pprint = pprint.PrettyPrinter().pformat
# test_1 = ("Plain upsert", { 'a': 1 }, { 'b': 1 }, "")
# test_2 = (
# "Upsert with selector operators",
# { 'a' : { '$gt': 1 } },
# { 'b': 1 },
# ""
# )
# test_3 = (
# "Upsert with update operators",
# { 'a' : 1 },
# { '$set': { 'a' : 2 } },
# ""
# )
test_multiple_disparate_operators_1 = ("Multiple disparate operators 1", {
'$and': [{
'$and': [{
'a': 1
}]
}],
'b': {
'$gt': 1
}
}, {
'$set': {
'c': 35
}
}, 'mixed')
test_logical_1 = ("X", {'$and': [{'a.b.c.d': 1}, {'f': 17}]}, {'$set': {'yf': 17}}, 'mixed')
# {$and:[{$and:[{a:1}]}], b:{$gt: 1}}
def create_upsert_selector_operator_test(operator, object, depth, update, operator_type):
selector = next_selector = {}
next_char = 'a'
for i in range(0, depth):
next_selector[next_char] = {}
next_selector = next_selector[next_char]
next_char = chr(ord(next_char) + 1)
next_selector[operator] = object
return ("Upsert operator (%s) depth %s" % (operator, depth), selector, update, operator_type)
def create_upsert_dotted_selector_operator_test(operator, object, depth, update, operator_type):
selector = {}
k = ['a']
next_char = 'b'
for i in range(0, depth):
k.append(next_char)
next_char = chr(ord(next_char) + 1)
k_str = '.'.join(k)
selector[k_str] = {}
selector[k_str][operator] = object
return ("Upsert operator (%s) depth %s (dotted selector)" % (operator, depth), selector, update, operator_type)
def create_upsert_dotted_selector_operator_test_with_operator_in_initial_position(operator, object, depth, update,
operator_type):
selector = {}
k = ['a']
next_char = 'b'
for i in range(0, depth):
k.append(next_char)
next_char = chr(ord(next_char) + 1)
k_str = '.'.join(k)
selector[operator] = {}
selector[operator][k_str] = object
return ("Upsert operator (%s) depth %s (dotted selector)" % (operator, depth), selector, update, operator_type)
def create_operator_tests(operators, depth, update, operator_type):
return [
func(operator, object, depth, update, operator_type) for name, func in globals().iteritems()
if name.startswith('create_upsert_') for operator, object in operators
]
def create_operator_permutation_tests(oplist, depth, repeat, update):
op_permutations = itertools.product(oplist, repeat=repeat)
tests = []
for ops in op_permutations:
next_char = 'a'
next_selector = selector = {}
for i in range(0, depth):
for op, obj in ops:
next_selector[op] = obj
tests.append(("Permutation (%s) depth %s repeat %s" % (ops, depth, repeat), util.OrderedDict(selector),
update, 'permutation'))
next_selector[next_char] = {}
next_selector = next_selector[next_char]
next_char = chr(ord(next_char) + 1)
return tests
def test(collection1, collection2, test):
(label, selector, update, operator_type) = test
sys.stdout.write('\tTesting \"%s\"... ' % label)
update = util.deep_convert_to_ordered(update)
collection1.remove()
collection2.remove()
def up(c, s, u):
c.update(s, u, upsert=True, multi=False)
errors = []
ret1 = ret2 = []
for c in (collection1, collection2):
try:
up(c, selector, update)
except OperationFailure as e:
errors.append("PyMongo upsert failed! Error was: " + str(e))
except MongoModelException as e:
errors.append("MongoModel upsert failed! Error was: " + str(e))
if (len(errors)) == 1:
print util.alert('FAIL', 'fail')
print errors[0]
return (False, [], [], operator_type)
elif (len(errors)) == 2:
print util.alert('PASS', 'okblue')
return (True, [], [], operator_type)
ret1 = [util.deep_convert_to_unordered(i) for i in collection1.find({})]
ret1.sort()
ret2 = [util.deep_convert_to_unordered(i) for i in collection2.find({})]
ret2.sort()
def error_report(msg):
print util.indent(msg)
print util.indent("Selector was: %s" % pprint(dict(selector)))
print util.indent("Update was %s" % pprint(dict(update)))
print util.indent("Upsert from collection1: %s" % ret1)
print util.indent("Upsert from collection2: %s" % ret2)
passed = True
try:
if len(ret1) + len(ret2) == 0:
raise ValueError("NIL")
for i in range(0, max(len(ret1), len(ret2))):
try:
del ret1[i]['_id']
except:
pass
try:
del ret2[i]['_id']
except:
pass
assert ret1[i] == ret2[i]
print util.alert('PASS', 'okgreen')
except AssertionError:
print util.alert('FAIL', 'fail')
error_report("Upserts didn't match!")
passed = False
except IndexError:
print util.alert('FAIL', 'fail')
error_report("One or both upserts failed!")
passed = False
except ValueError as e:
print util.alert("PASS (%s)" % str(e), 'okblue')
return (passed, ret1, ret2, operator_type)
operator_types = {
'comparison_operators': (
('$gt', 0),
('$gte', 0),
('$in', [17, 2, 3]),
('$in', [17]),
('$lt', 17),
('$lte', 17),
('$ne', 17),
('$nin', [17, 2, 3]),
('$nin', [17]),
('$eq', 17),
),
'logical_operators': (
('$or', [{
'a': 17
}, {
'b': 17
}]),
('$or', [{
'$and': [{
'a': 17
}, {
'b': 17
}]
}]),
('$or', [{
'$and': [{
'a': 17
}, {
'$and': [{
'c': 18
}, {
'd': 19
}]
}, {
'b': 17
}]
}]),
('$or', [{
'$and': [{
'a': 17
}, {
'b': 17
}]
}, {
'c': 17
}]),
('$or', [{
'c': 17
}, {
'$and': [{
'a': 17
}, {
'b': 17
}]
}]),
('$or', [{
'c': 17
}, {
'$and': [{
'a': 17
}, {
'$and': [{
'd': 18
}, {
'e': 19
}]
}, {
'b': 17
}]
}]),
('$or', [{
'c': 17
}, {
'$or': [{
'a': 17
}, {
'b': 17
}]
}]),
('$and', [{
'a': 17
}, {
'b': 17
}]),
('$and', [{
'$and': [{
'a': 17
}, {
'b': 17
}]
}]),
('$and', [{
'c': 17
}, {
'$and': [{
'a': 17
}, {
'b': 17
}]
}]),
('$and', [{
'c.d.e.f': 17
}, {
'$or': [{
'a': 17
}, {
'b': 17
}]
}]),
('$nor', [{
'a': 17
}, {
'b': 17
}]),
),
'logical_not_operator': (
# ('$not', 17), # the documentation says `$not` requires an operator expression as its predicate
('a', {
'$not': {
'$gte': 3
}
}), ),
'element_operators': (
('$exists', True),
('$type', 2),
),
'array_operators': (
('$all', [17, 2, 3]), # these are breaking for some reason
('$all', []),
('$all', [35]),
('$size', 1),
('$size', 0),
('$size', None),
('$elemMatch', {
'$gte': 0,
'$lt': 10
}),
('$elemMatch', {
'$lt': 10
}),
)
}
updates = (
# fields
{
'$set': {
'yf': 17
}
},
{
'$inc': {
'yf': 17
}
},
{
'$mul': {
'yf': 2
}
},
{
'$rename': {
'yf': 'b'
}
},
{
'$setOnInsert': {
'yf': 17
}
},
{
'$set': {
'yf': 17
}
},
{
'$unset': {
'yf': ''
}
},
{
'$min': {
'yf': -1
}
},
{
'$max': {
'yf': 100
}
},
# arrays
{
'$addToSet': {
'yf': [17, 18, 19]
}
},
{
'$pop': {
'yf': 1
}
},
{
'$pullAll': {
'yf': [17, 18, 19]
}
},
{
'$pull': {
'yf': 17
}
},
# {'$pushAll':{'yf':[17,18,19]}},
{
'$push': {
'yf': 17
}
},
)
tests = [locals()[attr] for attr in dir() if attr.startswith('test_')]
oplist = []
for _, operators in operator_types.iteritems():
for op in operators:
oplist.append(op)
for depth in range(0, 5):
for update in updates:
for operator_type, operators in operator_types.iteritems():
tests = tests + create_operator_tests(operators, depth, update, operator_type)
for repeat in range(1, 2):
tests = tests + create_operator_permutation_tests(oplist, depth, repeat, update)
def test_all(collection1, collection2, **kwargs):
okay = True
number_passed = number_failed = 0
to_csv = kwargs.get("to_csv") or "upsert_test.csv"
if to_csv:
mf = open(to_csv, "a+")
mf.truncate()
mf.write('|'.join([
'Operator Type', 'Passed?', 'Return 1', 'Return 2', 'Selector', 'Update', 'Label', 'Collection1',
'Collection2'
]) + '\r\n')
for t in tests:
passed, r1, r2, operator_type = test(collection1, collection2, t)
number_passed = number_passed + 1 if passed else number_passed
number_failed = number_failed + 1 if not passed else number_failed
okay = passed and okay
if to_csv:
mf.write("|".join([
t[3],
str(passed),
str(r1),
str(r2),
str(dict(t[1])),
str(dict(t[2])), t[0],
str(collection1),
str(collection2), '\r\n'
]))
if to_csv:
mf.close()
print util.alert("Passed: %d, Failed: %d" % (number_passed, number_failed))
return okay
| 2.453125
| 2
|
getTheNthNodeFromtheEndOftheLinkedList1.py
|
kodreanuja/python
| 0
|
12781905
|
<filename>getTheNthNodeFromtheEndOftheLinkedList1.py
class Node:
def __init__(self, data) -> None:
self.data = data
self.next = None
class LinkedList:
def __init__(self) -> None:
self.head = None
def push(self, data):
New_node = Node(data)
New_node.next = self.head
self.head = New_node
def getNthFromLast(self, n):
l = 0
current = self.head
while current is not None:
current = current.next
l += 1
if n > l:
return " Location not found within the list."
current = self.head
for i in range(0, l-n):
current = current.next
return current.data
def printList(self):
current = self.head
while(current):
print(current.data, "->", end = " ")
current = current.next
if __name__ == "__main__":
l = LinkedList()
l.push(2)
l.push(1)
l.push(6)
l.push(9)
l.push(4)
l.printList()
print("\n")
print(l.getNthFromLast(2))
print(l.getNthFromLast(1))
print(l.getNthFromLast(3))
print(l.getNthFromLast(7))
| 4.03125
| 4
|
setup.py
|
tatp22/3d-positional-encoding
| 0
|
12781906
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="positional_encodings",
version="5.0.0",
author="<NAME>",
author_email="<EMAIL>",
description="1D, 2D, and 3D Sinusodal Positional Encodings in PyTorch",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/tatp22/multidim-positional-encoding",
packages=setuptools.find_packages(),
keywords=["transformers", "attention"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
install_requires=["torch", "tensorflow", "numpy"],
)
| 1.5
| 2
|
tests/test_assert_equal_dataframe.py
|
nmbrgts/py-dataframe-show-reader
| 1
|
12781907
|
<gh_stars>1-10
# Copyright 2019 The DataFrame Show Reader Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from pyspark.sql import DataFrame, SparkSession
from pytest import raises
from dataframe_show_reader.assert_equal_dataframe import assert_equal
from dataframe_show_reader.dataframe_show_reader import show_output_to_df
@pytest.fixture(scope="session")
def expected_df(spark_session: SparkSession) -> DataFrame:
return show_output_to_df("""
+-----+-----+
|col_a|col_b|
+-----+-----+
|1a |1b |
|2a |2b |
+-----+-----+
""", spark_session)
def test_assert_equal_when_dfs_are_equal(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+-----+
|col_a|col_b|
+-----+-----+
|1a |1b |
|2a |2b |
+-----+-----+
""", spark_session)
# No error or assertion failure should be thrown:
assert_equal(expected_df, actual_df)
def test_assert_equal_when_actual_df_has_different_value(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+-----+
|col_a|col_b|
+-----+-----+
|1a |1b |
|2a |99999|
+-----+-----+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(expected_df, actual_df, verbose=True)
assert 'The DataFrames differ.' == str(exception_info.value)
def test_assert_equal_when_column_order_is_different(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+-----+
|col_b|col_a|
+-----+-----+
|1b |1a |
|2b |2a |
+-----+-----+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(expected_df, actual_df)
assert 'The DataFrame schemas differ.' == str(exception_info.value)
def test_assert_equal_when_dfs_are_equal_and_column_is_null(
spark_session: SparkSession
):
actual_df = show_output_to_df("""
+------+
|col_a |
[string]
+------+
|null |
+------+
""", spark_session)
expected_df = show_output_to_df("""
+------+
|col_a |
[string]
+------+
|null |
+------+
""", spark_session)
# No error or assertion failure should be thrown:
assert_equal(expected_df, actual_df)
def test_assert_equal_when_actual_df_has_too_few_rows(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+-----+
|col_a|col_b|
+-----+-----+
|1a |1b |
+-----+-----+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(expected_df, actual_df, verbose=False)
assert 'The DataFrames differ.' == str(exception_info.value)
def test_assert_equal_when_actual_df_has_too_many_rows(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+-----+
|col_a|col_b|
+-----+-----+
|1a |1b |
|2a |2b |
|3a |3b |
+-----+-----+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(expected_df, actual_df)
assert 'The DataFrames differ.' == str(exception_info.value)
def test_assert_equal_when_actual_df_has_duplicate_last_row(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+-----+
|col_a|col_b|
+-----+-----+
|1a |1b |
|2a |2b |
|2a |2b |
+-----+-----+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(expected_df, actual_df)
assert 'The DataFrames differ.' == str(exception_info.value)
def test_assert_equal_when_actual_df_has_too_few_columns(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+
|col_a|
+-----+
|1a |
|2a |
+-----+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(expected_df, actual_df)
assert 'The DataFrame schemas differ.' == str(exception_info.value)
def test_assert_equal_when_actual_df_has_too_many_columns(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+-----+-----+
|col_a|col_b|col_c|
+-----+-----+-----+
|1a |1b |1c |
|2a |2b |2c |
+-----+-----+-----+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(expected_df, actual_df)
assert 'The DataFrame schemas differ.' == str(exception_info.value)
def test_assert_equal_when_column_names_do_not_match(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+-----+
|col_a|col_x|
+-----+-----+
|1a |1b |
|2a |2b |
+-----+-----+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(expected_df, actual_df)
assert 'The DataFrame schemas differ.' == str(exception_info.value)
def test_assert_equal_when_data_types_do_not_match(
spark_session: SparkSession):
"""
Test the fairly subtle case where one DF contains an INT and the other
contains a BIGINT, which can be an issue if we try to write a DF containing
a BIGINT into a previously existing Hive table defined to contain an INT.
"""
actual_df = show_output_to_df("""
+------+
|col_a |
[bigint]
+------+
|1 |
+------+
""", spark_session)
expected_df = show_output_to_df("""
+------+
|col_a |
[int ]
+------+
|1 |
+------+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(expected_df, actual_df)
assert 'The DataFrame schemas differ.' == str(exception_info.value)
def test_assert_equal_when_actual_df_is_none(
expected_df, spark_session: SparkSession):
with raises(AssertionError) as exception_info:
assert_equal(expected_df, None)
assert 'The actual DataFrame is None, but the expected DataFrame is not.' \
== str(exception_info.value)
def test_assert_equal_when_expected_df_is_none(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+
|col_a|
+-----+
|1a |
+-----+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(None, actual_df)
assert 'The expected DataFrame is None, but the actual DataFrame is not.' \
== str(exception_info.value)
def test_assert_equal_when_both_dfs_are_none(
expected_df, spark_session: SparkSession):
# No error or assertion failure should be thrown:
assert_equal(None, None)
| 2.703125
| 3
|
QGL/tools/matrix_tools.py
|
ty-zhao/QGL
| 33
|
12781908
|
<gh_stars>10-100
"""
Tools for manipulating matrices
Original Author: <NAME>
Copyright 2020 Raytheon BBN Technologies
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from scipy.linalg import expm
#Pauli matrices
pX = np.array([[0, 1], [1, 0]], dtype=np.complex128)
pZ = np.array([[1, 0], [0, -1]], dtype=np.complex128)
pY = 1j * pX @ pZ
pI = np.eye(2, dtype=np.complex128)
#Machine precision
_eps = np.finfo(np.complex128).eps
#### FUNCTIONS COPIED FROM PYGSTI
#### See: https://github.com/pyGSTio/pyGSTi
#### PYGSTI NOTICE
# Python GST Implementation (PyGSTi) v. 0.9
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia,
# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
#### END PYGSTI NOTICE
#### PYGSTI COPYRRIGHT
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia,
# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0 or in the
# LICENSE file in the root pyGSTi directory.
#### END PYGSTI COPYRIGHT
def tracenorm(A, tol=np.sqrt(_eps)):
"""
Compute the trace norm of a matrix A given by:
Tr(sqrt{A^dagger * A})
From: https://github.com/pyGSTio/pyGSTi/blob/master/pygsti/tools/optools.py
"""
if np.linalg.norm(A - np.conjugate(A.T)) < tol:
#Hermitian, so just sum eigenvalue magnitudes
return np.sum(np.abs(np.linalg.eigvals(A)))
else:
#Sum of singular values (positive by construction)
return np.sum(np.linalg.svd(A, compute_uv=False))
def tracedist(A, B, tol=np.sqrt(_eps)):
"""
Compute the trace distance between matrices A and B given by:
0.5 * Tr(sqrt{(A-B)^dagger * (A-B)})
From: https://github.com/pyGSTio/pyGSTi/blob/master/pygsti/tools/optools.py
"""
return 0.5 * tracenorm(A - B)
#### END FUNCTIONS COPIED FROM PYGSTI
def is_close(A, B, tol=np.sqrt(_eps)):
"""
Check if two matrices are close in the sense of trace distance.
"""
if tracedist(A, B) < tol:
return True
else:
A[np.abs(A) < tol] = 0.0
B[np.abs(B) < tol] = 0.0
A /= np.exp(1j*np.angle(A[0,0]))
B /= np.exp(1j*np.angle(B[0,0]))
return ((tracedist(A, B) < tol) or (tracedist(A, -1.0*B) < tol))
def haar_unitary(d):
"""
Generate a Haar-random unitary matrix of dimension d.
Algorithm from:
<NAME>. "How to generate random matrices from the classical
compact groups" arXiv: math-ph/0609050
"""
assert d > 1, 'Dimension must be > 1!'
re_Z = np.random.randn(d*d).reshape((d,d))
im_Z = np.random.randn(d*d).reshape((d,d))
Z = (re_Z + 1j*im_Z)/np.sqrt(2.0)
Q, R = np.linalg.qr(Z)
L = np.diag(np.diag(R) / np.abs(np.diag(R)))
return Q @ L @ Q
| 1.710938
| 2
|
Framework/task.py
|
Phinner/DiscordBot-Framework
| 1
|
12781909
|
<filename>Framework/task.py<gh_stars>1-10
from asyncio import sleep
from concurrent.futures import ThreadPoolExecutor
from discord.ext.tasks import Loop
from datetime import timedelta
class TaskManager(ThreadPoolExecutor):
Tasks = dict()
# --------------------------------------------------------------------------- #
@classmethod
def startScheduledTask(cls, name):
if name in cls.Tasks:
if not cls.Tasks[name].is_running():
cls.Tasks[name].start()
else:
raise KeyError(f"{name} doesn't exist")
@classmethod
def startAllScheduledTasks(cls):
for task in cls.Tasks.values():
if not task.is_running():
task.start()
# --------------------------------------------------------------------------- #
@classmethod
def scheduledTask(cls, seconds=0, minutes=0, hours=0, count=None, reconnect=True, loop=None,
start=None, run=True):
"""
This function is for scheduling functions to start at a certain hour or date,
check https://docs.python.org/3/library/datetime.html to know how to format the "start" dictionary.
"""
def wrapper(coro):
task = Loop(coro, seconds, hours, minutes, count, reconnect, loop)
cls.Tasks.update({coro.__name__: task})
@task.before_loop
async def before_task():
sleep_time = 0
if start is not None:
now = datetime.now()
keys = ["year", "month", "day", "hour", "minute", "second"]
future = dict(zip(keys, now.timetuple()))
future.update(start)
future = datetime(**future)
while now >= future:
future += timedelta(seconds=seconds, minutes=minutes, hours=hours)
sleep_time = (future - now).seconds
await sleep(sleep_time)
if run:
task.start()
return task
return wrapper
if __name__ == "__main__":
"""
Confusing demo lol
"""
import asyncio
from discord import Client
manager = TaskManager(max_workers=4)
n = 0
def blocking_task():
print("blocking task begin")
with open("text.txt", "w") as file:
for i in range(100000000):
pass
print("blocking task end")
def bot_main():
print("let us begin")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
client = Client(loop=loop)
@manager.scheduledTask(seconds=5, loop=loop)
async def backgroung_bot_task():
global n
n += 1
print(i * i)
@client.event
async def on_message(msg):
if msg.author.id == client.user.id:
return
print(msg)
loop.create_task(client.start(input("Bot token please: "), bot=True, reconnect=True))
loop.run_forever()
print("out")
def Oh_no():
print("zero")
g = 1/0
print("no zero ?")
def trace(future):
print(future)
print(future.result())
futures = list()
with manager:
futures.append(manager.submit(bot_main))
futures.append(manager.submit(blocking_task))
futures.append(manager.submit(Oh_no))
futures.append(manager.submit(blocking_task))
for fut in futures:
fut.add_done_callback(trace)
| 2.9375
| 3
|
templates/ws/src/python/pkg_name/common/species.py
|
roadnarrows-robotics/rnmake
| 0
|
12781910
|
"""
A Living organism.
Author: @PKG_AUTHOR@
License: @PKG_LICENSE@
"""
class Species:
""" Species class """
def __init__(self, specific, genus, common=None,
appearance=0.0, extinction=0.0, distribution='Earth'):
"""
Initializer
Parameters:
_specific Species specific scientific name.
_genus Species generic genus scientific name.
_common Species common name.
_appearance Species (estimated) first appearance, thousands year ago.
_extinction Species (estimated) extinction, thousands year ago.
Set to zero if species still present.
_distribution Geographic region found.
"""
self._specific = specific.lower()
self._genus = genus.capitalize()
self._common = common
self._appearance = appearance
self._extinction = extinction
self._distribution = distribution
def __repr__(self):
""" Return repr(self). """
return f"{self.__module__}.{self.__class__.__name__}" \
f"({self._specific}, {self._genus})"
def __str__(self):
""" Return str(self). """
return self.binomial_name
@property
def binomial_name(self):
""" Return species binomial name. """
return f"{self._genus} {self._specific}"
@property
def binomial_short(self):
""" Return species short binomial name. """
return f"{self._genus[0]}. {self._specific}"
@property
def species(self):
""" Return species specific scientific name. """
return self._specific
@property
def genus(self):
""" Return species genus scientific name. """
return self._genus
@property
def common_name(self):
""" Return species common name. """
return self._common
@property
def first_recorded(self):
""" Return species first (oldest) evidence in record (kya). """
return self._appearance
@property
def last_recorded(self):
""" Return species last (youngest) evidence in record (kya). """
return self._extinction
@property
def geographic_distribution(self):
""" Return species geographic range. """
return self._distribution
def presence_in_record(self):
""" Return 2-tuple of species time span. """
return (self._appearance, self._extinction)
def is_extinct(self):
""" Return True or False. """
return self._extinction > 0.0
| 3.53125
| 4
|
src/test/tests/unit/test_value_simple.py
|
ylee88/visit
| 1
|
12781911
|
<reponame>ylee88/visit
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: test_value_simple.py
#
# Tests: VisIt Test Suite Test Value tests
#
# Defect ID: none
#
# Programmer: <NAME>, Sun Jan 10 10:24:59 PST 2021
#
# ----------------------------------------------------------------------------
# Basic cases
TestSection("Basic")
TestValueEQ("test_value_basic_001",100,100)
TestValueNE("test_value_basic_002",200,100)
TestValueLT("test_value_basic_003",100,200)
TestValueLE("test_value_basic_004",100,200)
TestValueLE("test_value_basic_005",200,200)
TestValueGT("test_value_basic_006",200,100)
TestValueGE("test_value_basic_007",200,100)
TestValueGE("test_value_basic_008",200,200)
TestValueEQ("test_value_basic_009",100.0,100)
# Rounding cases
TestSection("Precision")
TestValueEQ("test_value_prec_001",200.0001,200,3)
TestValueEQ("test_value_prec_002",200.0000000001,200,9)
TestValueNE("test_value_prec_004",200.0001,200,9)
# IN operator
TestSection("In Operator")
TestValueIN("test_value_in_001",(100,200,300),200)
TestValueIN("test_value_in_001",[(0,0), (1,0), (1,-1), (0,1)],(1,-1))
# Custom equivalence operator for TestValueIN
def EqMod5(a,b):
return a%5 == b%5
# This tests that '35' is IN '(17,18,19,20,21)' by the EqMod5 operator
# which says two things are equal if their mod 5 values are equal. So,
# '35' is IN the list because 20%5==35%5 (0)
TestValueIN("test_value_in_002",(17,18,19,20,21),35,2,EqMod5)
# General Python objects wind up being tested as strings
TestSection("Objects")
TestValueEQ("test_value_object_001",[1,'2',3],"[1, '2', 3]")
TestValueLT("test_value_object_002","apple","carrot")
TestValueLT("test_value_object_003",(1,1,0),(1,1,1))
Exit()
| 1.976563
| 2
|
setup.py
|
exitcodezero/picloud-client-python
| 1
|
12781912
|
<reponame>exitcodezero/picloud-client-python
from distutils.core import setup
setup(
name='PiCloud-Client',
version='4.0.0',
author='<NAME>',
author_email='<EMAIL>',
packages=['picloud_client'],
url='https://github.com/exitcodezero/picloud-client-python',
license='LICENSE.txt',
description='A Python client for PiCloud',
long_description=open('README.txt').read(),
install_requires=[
"websocket-client == 0.32.0",
"requests == 2.8.1",
],
)
| 1.21875
| 1
|
tests/test_gendata.py
|
arappaport/gendata
| 0
|
12781913
|
import pytest
import json
from collections import OrderedDict
from gendata import gen_permutations, gen_random, prepare_col_opts
@pytest.fixture
def col_opts_test_data_one_level():
col_opts = OrderedDict()
col_opts["Col0"] = {
"Value0_A": 0.1,
"Value0_B": 0.2,
"Value0_C": 0.7
}
return dict(col_opts=col_opts,
total_cols_exp=1,
total_rows_exp=3)
@pytest.fixture
def col_opts_test_data_two_level(col_opts_test_data_one_level):
test_data = col_opts_test_data_one_level
col_opts = test_data['col_opts']
col_opts["Col1"] = {
"Value1_A_half": 0.5,
"Value1_B_half": 0.5
}
test_data['total_cols_exp'] += 1 #we added one column
test_data['total_rows_exp'] *= 2 # we added 2 values. so 2x expected permutations
return test_data
@pytest.fixture
def col_opts_test_data_three_level(col_opts_test_data_two_level):
test_data = col_opts_test_data_two_level
col_opts = test_data['col_opts']
col_opts["Col2"] = {
"Value2_A_10perc": 0.10,
"Value2_B_20perc": 0.20,
"Value2_C_30perc": 0.30,
"Value2_D_40perc_DEFAULT": "DEFAULT"
}
test_data['total_cols_exp'] += 1 #we added one column
test_data['total_rows_exp'] *= 4 # we added 3 values. so 3x expected permutations
return test_data
@pytest.fixture
def col_opts_test_data_four_level(col_opts_test_data_three_level):
test_data = col_opts_test_data_three_level
col_opts = test_data['col_opts']
col_opts["Col3"] = {
"Value3_A_100perc": "DEFAULT"
}
test_data['total_cols_exp'] += 1 #we added one column
test_data['total_rows_exp'] *= 1 # we added 1 value. No additional rows
return test_data
def _assert_result_shape(test_data, rows):
"""
Make sure the row result set is correct shape (#rows, # columns
:param col_opts:
:param rows: array or rows
"""
assert test_data
assert rows
assert len(rows) == test_data['total_rows_exp']
assert len(rows[0].keys()) == test_data['total_cols_exp']
assert len(rows[-1].keys()) == test_data['total_cols_exp']
class Test_gen_permutations():
def test_one_level(self, col_opts_test_data_one_level):
test_data = col_opts_test_data_one_level
rows = gen_permutations(test_data['col_opts'])
_assert_result_shape(test_data, rows)
def test_two_level(self, col_opts_test_data_two_level):
test_data = col_opts_test_data_two_level
rows = gen_permutations(test_data['col_opts'])
_assert_result_shape(test_data, rows)
def test_three_level(self, col_opts_test_data_three_level):
test_data = col_opts_test_data_three_level
rows = gen_permutations(test_data['col_opts'])
_assert_result_shape(test_data, rows)
def test_four_level(self, col_opts_test_data_four_level):
test_data = col_opts_test_data_four_level
rows = gen_permutations(test_data['col_opts'])
_assert_result_shape(test_data, rows)
| 2.28125
| 2
|
src/graph/pie_chart.py
|
asf174/TopDownNvidia
| 0
|
12781914
|
import matplotlib.pyplot as plt
import plotly.graph_objects as go
from plotly.subplots import make_subplots
class PieChart:
"""
Class which defines a PieChart graph.
Attributes:
__fig : fig ; reference to diagram (which contains all graphs)
__max_rows : int ; max number of rows
__max_cols : int ; max number of cols
__numCols : int ; current number of cols added to diagram
__numRows : int ; current numer of rows added to diagram
__title : str ; title name of diagram
__current_title_index : str ; current value of index of title's list
__titles : list ; list of titles of each graph
"""
def __init__(self, rows : int, cols : int, title : str, titles_sub_graphs : list):
"""Set attributes as arguments."""
specs_l : list[list] = list(list())
specs_sl : list = list()
for i in range (0, rows):
specs_sl = list()
for j in range(0, cols):
specs_sl.append({'type' : 'domain'})
specs_l.append(specs_sl)
self.__fig = make_subplots(rows = rows, cols = cols, specs = specs_l, subplot_titles = titles_sub_graphs)
self.__max_rows : int = rows
self.__max_cols : int = cols
self.__num_cols : int = 0
self.__num_rows : int = 1
self.__title : str = title
self.__titles : list = titles_sub_graphs
self.__current_title_index : int = 0
pass
def draw(labels : list, sizes : list, explode : list):
if len(labels) != len(sizes) or len(labels) != len(explode):
return False
plt.pie(sizes, explode = explode, labels = labels, autopct='%1.1f%%',
shadow = True, startangle = 90)
plt.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.savefig('books_read.png')
pass
def add_graph(self, labels : list, values : list, legend_group : str) -> bool:
if self.__num_cols > self.__max_cols or self.__num_rows > self.__max_rows or self.__current_title_index >= len(self.__titles) :
return False
self.__num_cols += 1
if self.__num_cols > self.__max_cols:
self.__num_cols = 1
self.__num_rows += 1
if self.__num_rows > self.__max_rows:
return False
self.__fig.add_trace(go.Pie(labels = labels, values = values, showlegend = True, legendgroup = legend_group), row = self.__num_rows, col = self.__num_cols)
self.__fig.update_yaxes(title_text = self.__titles[self.__current_title_index], row = self.__num_rows, col = self.__num_rows)
self.__current_title_index += 1
return True
pass
def __set_features(self):
""" Set some features."""
plt.tight_layout()
self.__fig.update_layout(title = {'text' : self.__title, 'x' : 0.5, 'xanchor': 'center'}, #legend = dict(yanchor = "top",
#y = 0.9, xanchor = "right", x = 0.01),
legend_title = "Legend", font = dict(size = 12, color = "Black"), legend_traceorder="grouped")
pass
def show(self):
""" Show Graph."""
self.__set_features()
self.__fig.show()
pass
def save(self, file_str : str):
""" Save figure in file indicated as argument.
Params:
file_str : str ; path to file where save figure
"""
self.__set_features()
self.__fig.write_html(file_str)
pass
| 3.359375
| 3
|
pyrotein/utils.py
|
carbonscott/pyrotein
| 1
|
12781915
|
<filename>pyrotein/utils.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from operator import itemgetter
from itertools import groupby
from .atom import constant_atomlabel, constant_aminoacid_code
def bin_image(img_orig, binning = 4, mode = 1, nan_replace = 0):
''' Bin an image for faster display.
'''
Y, X = img_orig.shape
if mode == 0:
img_bin = []
for i in range(0, Y, binning):
for j in range(0, X, binning):
sub_img = img_orig[i : min(i + binning, Y), j : min(j + binning, X)]
if np.all(np.isnan(sub_img)):
img_bin(append( (i, j, nan_replace) ))
else:
img_bin.append( (i, j, np.nanmean(sub_img)) )
if mode == 1:
img_bin = []
for i in range(0, Y, binning):
img_bin_y = []
for j in range(0, X, binning):
sub_img = img_orig[i : min(i + binning, Y), j : min(j + binning, X)]
if np.all(np.isnan(sub_img)):
img_bin_y.append( nan_replace )
else:
img_bin_y.append( np.nanmean(sub_img) )
img_bin.append(img_bin_y)
return np.array(img_bin)
def read_file(file, str_to_num = False, num_type = float, labelcolumn = -1):
'''Return all lines in the user supplied parameter file without comments.
'''
lines = []
with open(file,'r') as fh:
for line in fh.readlines():
# Separate entries by spaces and remove commented lines...
words = line.replace('#', ' # ').split()
# Omit any thing coming after the pound sign in a line...
if "#" in words: words = words[ : words.index("#")]
# Save non-empty line...
if str_to_num:
words[labelcolumn + 1:] = [ num_type(word) for word in words[labelcolumn + 1:] ]
if len(words) > 0: lines.append(words)
return lines
# [[[ Matrix operation ]]]
def mat2tril(mat, keepdims = False, offset = 0):
''' Convert a matrix into a lower triangular matrix.
mode:
- False: return one-dimensional array.
- True : return trigular matrix.
'''
# Convert full matrix to lower triangular matrix...
res = mat * np.tri(len(mat), len(mat), offset)
# Convert the lower triangular matrix into a one-dimensional array...
if not keepdims: res = res[np.tril_indices(len(mat), offset)]
return res
def array2tril(ary, length, offset = 0):
''' Convert a one-dimensional array into a lower triangular matrix.
'''
# Create an empty matrix with edge size of len...
res = np.zeros((length, length))
# Collect indices for members in lower triangular matrix with offset...
ver_i, hor_i = np.tril_indices(length, offset)
# Find the smaller length for valid assignment...
capacity = len(ver_i)
area = len(ary)
rightmost_index = np.min([area, capacity])
# Update empty matrix with values in the input array...
res[ver_i[:rightmost_index], hor_i[:rightmost_index]] = ary[:rightmost_index]
return res
def fill_nan_with_mean(mat, axis = 0):
''' Fill np.nan with mean value along `axis`.
Support two-dimensional matrix only.
'''
# Assert mat is 2d...
assert len(mat.shape) == 2, "fill_nan_with_mean ONLY supports 2D matrix."
# Assert axis is either 0 or 1 only...
assert axis == 0 or axis == 1, "fill_nan_with_mean ONLY allows 0 or 1 for axis."
# Obtain the axis mean...
axis_mean = np.nanmean(mat, axis = axis)
# Find the indices that has values of np.nan...
nan_i = np.where(np.isnan(mat))
# Replace np.nan with mean...
rep_axis = 1 - axis
mat[nan_i] = np.take(axis_mean, nan_i[rep_axis])
return None
def fill_nan_with_zero(mat):
''' Fill np.nan with zero along `axis`.
'''
# Assert mat is 2d...
assert len(mat.shape) == 2, "fill_nan_with_mean ONLY supports 2D matrix."
# Find the indices that has values of np.nan...
nan_i = np.where(np.isnan(mat))
# Replace np.nan with mean...
mat[nan_i] = 0.0
return None
def group_consecutive_integer(data):
''' As indicated by the function name. Refer to
https://docs.python.org/2.6/library/itertools.html#examples
for the method.
'''
data_export = []
for k, g in groupby(enumerate(data), lambda x: x[0]-x[1]):
data_export.append( list(map(itemgetter(1), g)) )
return data_export
def get_key_by_max_value(obj_dict):
''' A utility to fetch key corresponding to the max value in a dict.
'''
return max(obj_dict.items(), key = lambda x: x[1])[0]
def sparse_mask(super_seg, offset = 1, val_offset = 0.0, val_init = 1.0):
''' A mask to remove trivial values from intra-residue distances in a
sparse matrix.
'''
# Load constant -- atomlabel...
label_dict = constant_atomlabel()
aa_dict = constant_aminoacid_code()
# Calculate the total length of distance matrix...
len_list = [ len(label_dict[aa_dict[i]]) for i in super_seg ]
len_dmat = np.sum( len_list )
# Form a placeholder matrix with value one by default...
dmask = np.zeros( (len_dmat, len_dmat))
dmask[:] = val_init
# Assign zero to trivial values that only measure intra-residue distance...
len_resi = len(len_list)
pos_x, pos_y = sum(len_list[ : offset]), 0
for i, j in zip(len_list[ : len_resi - offset], len_list[ offset :]):
dmask[ pos_x : pos_x + j, pos_y : pos_y + i ] = val_offset
pos_x += j
pos_y += i
return dmask
def population_density(data, bin_cap = 100):
''' Return population density.
bin_cap stands for bin capacity (number of items per bin).
'''
# Flatten data...
data_flat = data.reshape(-1)
# Sort data...
data_sort = np.sort(data_flat)
# Obtain the length of data...
s, = data_sort.shape
# Go through the array and figure out bin_val and bin_edge...
bin_val = []
bin_edge = []
bin_step = bin_cap
for i in range(0, s, bin_cap):
if i + bin_cap > s: bin_step = s - i
data_seg = data_sort[i : i + bin_step]
b, e = data_seg[0], data_seg[-1]
den = bin_step / (e - b)
bin_val.append(den)
bin_edge.append(b)
bin_edge.append( data_sort[-1] )
return bin_val, bin_edge
def show_population_density(data, bin_cap, filename,
rng = [],
width = 3.5,
height = 2.62,
fontsize = 14,
linewidth = 1.5,
xlabel = 'Distance (\305)',
ylabel = 'Population density (1/\305)',
linecolor = 'black',
cmds = [],):
data_val, data_rng = population_density(data, bin_cap = bin_cap)
if len(rng) == 0: rng = data_rng[0], data_rng[-1]
import GnuplotPy3
gp = GnuplotPy3.GnuplotPy3()
gp(f"set terminal postscript eps size {width}, {height} \\")
gp(f" enhanced color \\")
gp(f" font 'Helvetica,{fontsize}' \\")
gp(f" linewidth {linewidth}")
gp(f"set output '{filename}.eps'")
gp(f"set encoding utf8")
gp(f"unset key")
gp(f"set xlabel '{xlabel}'")
gp(f"set ylabel '{ylabel}'")
gp(f"set xrange [{rng[0]}:{rng[1]}]")
for cmd in cmds:
gp(cmd)
gp("plot '-' using 1:2 with lines linewidth 1 linecolor rgb 'black'")
for i in range(len(data_val)):
if data_rng[i] < rng[0]: continue
if data_rng[i+1] > rng[1]: continue
gp(f"{data_rng[i]} {data_val[i]}")
gp(f"{data_rng[i+1]} {data_val[i]}")
gp("e")
gp("exit")
return None
def label_dmat(super_seg, nterm, cterm):
# Load constant -- atomlabel...
label_dict = constant_atomlabel()
aa_dict = constant_aminoacid_code()
# Go through residue and build a list of (resi, resn, atom)...
label_list = []
for seqi, resi in enumerate(range(nterm, cterm + 1)):
aa = super_seg[seqi]
resn = aa_dict[aa]
atom_list = label_dict[resn]
for atm in atom_list:
label = f"{resi}.{resn}.{atm}"
label_list.append(label)
return label_list
def tally_list1d(int_list):
int_dict = {}
for i in int_list:
if not i in int_dict: int_dict[i] = 1
int_dict[i] += 1
return int_dict
def chunker(seq, size = 60):
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
def sort_dict_by_key(_dict):
''' PDB doesn't sort resn by resi by default. This function sorts resn by
resi in an ascending order.
'''
return { k : v for k, v in sorted(_dict.items(), key = lambda x : x[0]) }
def sqeeze_seqi(lbl):
''' I know it's a terrible name.
It turns things like this
[0, 33], [63, 95] ->
[0, 33], [34, 66]
'''
len_dict = { k : e - b for k, (b, e) in sorted(labels.items(), key = lambda x: [1][0]) }
i_end = 0
sqeezed_lbl = {}
for k, l in len_dict.items():
sqeezed_lbl[k] = [ i_end, i_end + l ]
i_end += l+1
return sqeezed_lbl
| 2.609375
| 3
|
xchainpy/xchainpy_client/xchainpy_client/models/balance.py
|
SLjavad/xchainpy-lib
| 8
|
12781916
|
from xchainpy_util.asset import Asset
class Balance:
_asset = None # Asset
_amount = 0
def __init__(self, asset, amount):
"""
:param asset: asset type
:type asset: Asset
:param amount: amount
:type amount: str
"""
self._asset = asset
self._amount = amount
@property
def asset(self):
return self._asset
@asset.setter
def asset(self, asset):
self._asset = asset
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, amount):
self._amount = amount
| 2.875
| 3
|
src/database/utils/json_response_wrapper.py
|
jpeirce21/api
| 2
|
12781917
|
<gh_stars>1-10
"""
This is a wrapper for a JSON http response specific to the massenergize API.
It ensures that the data retrieved is in a json format and adds all possible
errors to the caller of a particular route
"""
from django.http import JsonResponse
# from .common import convert_to_json
from collections.abc import Iterable
from sentry_sdk import capture_message
class Json(JsonResponse):
def __init__(self, raw_data=None, errors=None, use_full_json=False, do_not_serialize=False):
cleaned_data = self.serialize(raw_data, errors, use_full_json, do_not_serialize)
super().__init__(cleaned_data, safe=True, json_dumps_params={'indent': 2})
def serialize(self, data, errors, use_full_json=False, do_not_serialize=False):
cleaned_data = {
"success": not bool(errors),
"errors": errors,
}
try:
if not data and not isinstance(data, Iterable):
cleaned_data['data'] = None
elif isinstance(data, dict) or do_not_serialize:
cleaned_data['data'] = data
elif isinstance(data, Iterable):
cleaned_data['data'] = [
(i.full_json() if use_full_json else i.simple_json()) for i in data
]
else:
cleaned_data['data'] = data.full_json()
except Exception as e:
capture_message(str(e), level="error")
cleaned_data['errors'] = [e]
return cleaned_data
#use model to dict
#use preloaded model info to check m2m, fk and directs
# if use_full_json:
# #serialize full objects including m2m
# pass
# else:
# #just don't include the m2ms
# pass
# return None
| 2.59375
| 3
|
pyaz/netappfiles/volume/export_policy/__init__.py
|
py-az-cli/py-az-cli
| 0
|
12781918
|
<gh_stars>0
from .... pyaz_utils import _call_az
def add(account_name, allowed_clients, cifs, nfsv3, nfsv41, pool_name, resource_group, rule_index, unix_read_only, unix_read_write, volume_name, add=None, force_string=None, remove=None, set=None):
'''
Add a new rule to the export policy for a volume.
Required Parameters:
- account_name -- Name of the ANF account.
- allowed_clients -- None
- cifs -- Indication that CIFS protocol is allowed
- nfsv3 -- Indication that NFSv3 protocol is allowed
- nfsv41 -- Indication that NFSv4.1 protocol is allowed
- pool_name -- Name of the ANF pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- rule_index -- None
- unix_read_only -- Indication of read only access
- unix_read_write -- Indication of read and write access
- volume_name -- Name of the ANF volume.
Optional Parameters:
- add -- Add an object to a list of objects by specifying a path and key value pairs. Example: --add property.listProperty <key=value, string or JSON string>
- force_string -- When using 'set' or 'add', preserve string literals instead of attempting to convert to JSON.
- remove -- Remove a property or an element from a list. Example: --remove property.list <indexToRemove> OR --remove propertyToRemove
- set -- Update an object by specifying a property path and value to set. Example: --set property1.property2=<value>
'''
return _call_az("az netappfiles volume export-policy add", locals())
def list(account_name, pool_name, resource_group, volume_name):
'''
List the export policy rules for a volume.
Required Parameters:
- account_name -- Name of the ANF account.
- pool_name -- Name of the ANF pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- volume_name -- Name of the ANF volume.
'''
return _call_az("az netappfiles volume export-policy list", locals())
def remove(account_name, pool_name, resource_group, rule_index, volume_name, add=None, force_string=None, remove=None, set=None):
'''
Remove a rule from the export policy for a volume by rule index. The current rules can be obtained by performing the subgroup list command.
Required Parameters:
- account_name -- Name of the ANF account.
- pool_name -- Name of the ANF pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- rule_index -- None
- volume_name -- Name of the ANF volume.
Optional Parameters:
- add -- Add an object to a list of objects by specifying a path and key value pairs. Example: --add property.listProperty <key=value, string or JSON string>
- force_string -- When using 'set' or 'add', preserve string literals instead of attempting to convert to JSON.
- remove -- Remove a property or an element from a list. Example: --remove property.list <indexToRemove> OR --remove propertyToRemove
- set -- Update an object by specifying a property path and value to set. Example: --set property1.property2=<value>
'''
return _call_az("az netappfiles volume export-policy remove", locals())
| 2.265625
| 2
|
ehr_functions/models/types/elastic_net.py
|
fdabek1/EHR-Functions
| 0
|
12781919
|
from ehr_functions.models.types._sklearn import SKLearnModel
from sklearn.linear_model import ElasticNet as EN
import numpy as np
class ElasticNet(SKLearnModel):
def __init__(self, round_output=False, **kwargs):
super().__init__(EN, kwargs)
self.round_output = round_output
def predict(self, x):
output = super().predict(x)
if self.round_output:
output = np.round(output)
return output
| 2.5625
| 3
|
Microservices/TPM/app/client_api/api_response.py
|
ShvDanil/Excellent
| 0
|
12781920
|
<filename>Microservices/TPM/app/client_api/api_response.py
"""
====================================================================================
Main response from server to client side (in case of project -> other microservice).
====================================================================================
"""
# Imports.
import app.server_api.router_from_server_to_client as sender
from typing import List
from fastapi import APIRouter, HTTPException
from app.client_api.api_model import Test
test = APIRouter()
@test.get('/TPM/get-new-test/{level}/{subject}')
async def index(level: int, subject: str) -> List[Test]:
"""
Handles processes of api or raises exception 500 ("internal server exception") if something went wrong.
:param level: Student's grade: 9 / 10 / 11.
:param subject: Student's subject: 'math' / 'rus'.
:return: Server response to client's request.
"""
# Get response from server side.
try:
response = await sender.return_test(level, subject)
if response is None:
raise Exception
return response
except Exception:
raise HTTPException(status_code=500, detail="Something went wrong. We are fixing the bug and developing "
"new features.")
| 2.640625
| 3
|
sparrow_cloud/middleware/exception.py
|
LaEmma/sparrow_cloud
| 15
|
12781921
|
import logging
import traceback
from django.conf import settings
from sparrow_cloud.dingtalk.sender import send_message
from sparrow_cloud.middleware.base.base_middleware import MiddlewareMixin
logger = logging.getLogger(__name__)
MESSAGE_LINE = """
##### <font color=\"info\"> 服务名称: {service_name}</font> #####
> 进程异常message:<font color=\"warning\">{exception_info}</font>
"""
class ExceptionMiddleware(MiddlewareMixin):
def process_exception(self, request, exception):
debug = settings.DEBUG
code = getattr(settings, "CLOUD_ERROR_NOTIFICATION_ROBOT", "cloud_error_notification_robot")
service_name = getattr(settings, "SERVICE_CONF", None).get("NAME", None)
if debug is True:
pass
else:
exception_info = traceback.format_exc()[-800:-1]
try:
msg = MESSAGE_LINE.format(service_name=service_name, exception_info=exception_info)
logger.info("sparrow_cloud log, service process_exception info : {}".format(msg))
send_message(msg=msg, code_list=[code], channel="wechat", message_type="markdown")
except Exception as ex:
logger.error("sparrow_cloud 发送服务异常信息通知失败,原因: {}".format(ex))
| 1.921875
| 2
|
web_chat/urls.py
|
yezimai/oldboyProject
| 0
|
12781922
|
<reponame>yezimai/oldboyProject
"""oldboyProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
import views
urlpatterns = [
url(r'dashboard/', views.dashboard,name='chat'),
url(r'load_contact_list/', views.load_contact_list,name='load_contact_list'),
url(r'send_msg/', views.send_msg,name='send_msg'),
url(r'get_msg/', views.send_msg,name='get_msg'),
url(r'file_upload/', views.file_upload,name='file_upload'),
url(r'file_upload_progess/', views.file_upload_progess,name='file_upload_progess'),
url(r'DelFileCache/', views.DelFileCache,name='DelFileCache'),
]
| 2.53125
| 3
|
tute.py
|
jccabrejas/tute
| 0
|
12781923
|
from app import app, db
from app.models import User, Game, Deck, Ledger, Trick
@app.shell_context_processor
def make_shell_context():
return {'db': db,
'User': User,
'Game': Game,
'Deck': Deck,
'Ledger': Ledger,
'Trick': Trick,
}
| 1.78125
| 2
|
backend/Hrplatform/migrations/0001_initial.py
|
iamavichawla/AI-based-video-analysis
| 0
|
12781924
|
# Generated by Django 3.2.4 on 2021-07-05 08:53
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Audio_store1',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('record', models.FileField(upload_to='')),
('title', models.CharField(default='my file', max_length=120)),
('wpm', models.IntegerField(blank=True, null=True)),
('pausesandfillerwords', models.IntegerField(blank=True, null=True)),
('pitch', models.IntegerField(blank=True, null=True)),
('duration', models.FloatField(blank=True, null=True)),
('pronunciation', models.FloatField(blank=True, null=True)),
('balance', models.FloatField(blank=True, null=True)),
('spotwords', models.IntegerField(blank=True, null=True)),
('sensitivewords', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'Audio_store1',
},
),
]
| 1.835938
| 2
|
code/python/netmiko_router_config.py
|
opsec7/refs
| 0
|
12781925
|
<filename>code/python/netmiko_router_config.py
#!/usr/bin/env python
# lima 6/16/2017
# multiprocessing works better in linux... forking is not supported in windows
# ... just run this in linux and move on...
#
# from __future__ import absolute_import, division, print_function
import netmiko
from multiprocessing import Pool
# import my toolbox.py functions
import toolbox
# from netmiko import ConnectHandler
import re
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL) # IOerror on broken pipe
# hanlde keyboard interrupt and exit
signal.signal(signal.SIGINT, signal.SIG_DFL)
device_type = 'cisco_ios'
username, password = toolbox.get_credentials()
"""
different way to feed ips to my_function
device_ip = '''
10.1.1.1
10.1.2.2
'''.strip().splitlines()
"""
device_ip = toolbox.get_ips('router_list.cfg')
def my_function(i):
try:
connection = netmiko.ConnectHandler(
ip=i, device_type=device_type, username=username, password=password)
# add global delay to slow devices requiring longer delay: global_delay_factor=60
hostname = connection.find_prompt()
# connection.config_mode()
# connection.send_command('ssh 10.x.y.0 255.255.255.0 ' + route_lookup(x))
# connection.send_command('clock timezone CST -6')
# connection.send_command('clock summer-time CST recurring')
y = connection.send_command(
'sh run | s ip access-list standard ACL_SSH_VTY_ACCESS')
# for t in y:
# if t:
# connection.send(xyz)
# y = connection.send_command('sh run ssh')
connection.disconnect()
if y:
print '%s: %s --> done' % (hostname, i)
print '%s\n' % (y)
else:
print '%s: %s --> done' % (hostname, i)
except Exception as e:
print '%s: %s\n\n' % (i, e)
# define number of threads to fire up at once
pool = Pool(16)
pool.map(my_function, device_ip)
| 2.375
| 2
|
metrix/metrix.py
|
KiriLev/metrix
| 0
|
12781926
|
import time
from collections import defaultdict
from dataclasses import dataclass
from logging import getLogger
from typing import Optional
@dataclass
class Bucket:
value: int = 0
last_updated_at: Optional[int] = None
def increment(self, timestamp: int):
self.value += 1
self.last_updated_at = timestamp
def reset(self):
self.value = 0
self.last_updated_at = None
class Metrix:
def __init__(self, ttl: int = 300, debug=False):
"""
Inits Metrix.
Args:
ttl (int): Time-to-live in seconds for events.
debug (bool): Set to True to enable additional logging.
Raises:
ValueError: TTL can't be non-positive
"""
if ttl <= 0:
raise ValueError(f"TTL can't be non-positive, got ttl={ttl}")
self.ttl = ttl
self.debug = debug
self._metrics_data = defaultdict(lambda: [Bucket() for _ in range(self.ttl)])
self._start_time = None
if self.debug:
self._logger = getLogger(name="metrix")
def increment(self, metric_name: str) -> None:
"""
Increments counter for a specified `metric_name`.
Args:
metric_name (str): Name of metric to increment.
"""
event_time = int(time.time())
if self._start_time is None:
self._start_time = event_time
# we use ring buffers to store events so we need to find an index
bucket_ind = (event_time - self._start_time) % self.ttl
bucket = self._metrics_data[metric_name][bucket_ind]
# in case of already used and outdated bucket we need to reset its value before we increment
if (
bucket.last_updated_at is not None
and bucket.last_updated_at < event_time - self.ttl
):
bucket.reset()
bucket.increment(event_time)
def sum(self, metric_name: str, interval: int) -> int:
"""
Returns counter value for a specified `metric_name` and specified
time range.
Args:
metric_name (str): Name of metric to retrieve number of occurrences.
interval (int): Number of seconds representing range of a query.
Returns:
sum (int): number
"""
event_time = int(time.time())
if metric_name not in self._metrics_data:
if self.debug:
self._logger.debug(f"No events for metric_name={metric_name}")
return 0
if interval > self.ttl:
interval = self.ttl
if self.debug:
self._logger.debug(f"Clipped interval={interval} to ttl={self.ttl}")
sum_ = 0
for bucket in self._metrics_data[metric_name]:
if bucket.last_updated_at is not None:
if (
bucket.last_updated_at < event_time - self.ttl
): # reset outdated buckets
bucket.reset()
elif bucket.last_updated_at > event_time - interval:
sum_ += bucket.value
return sum_
| 2.6875
| 3
|
src/materia/symmetry/symmetry_operation.py
|
kijanac/Materia
| 0
|
12781927
|
from __future__ import annotations
from typing import Iterable, Optional, Union
import materia as mtr
import numpy as np
import scipy.linalg
__all__ = [
"Identity",
"Inversion",
"Reflection",
"ProperRotation",
"ImproperRotation",
"SymmetryOperation",
]
class SymmetryOperation:
def __init__(
self,
matrix: Optional[np.ndarray] = None,
determinant: Optional[Union[int, float]] = None,
trace: Optional[float] = None,
axis: Optional[np.ndarray] = None,
) -> None:
if matrix is not None:
self.matrix, _ = scipy.linalg.polar(matrix)
elif determinant is not None and trace is not None:
if axis is None:
self.matrix, _ = scipy.linalg.polar(
determinant * np.eye(3).astype("float64")
)
else:
a = mtr.normalize(axis)
cos_theta = (trace - determinant) / 2
cos_theta = max(min(cos_theta, 1), -1)
theta = np.arccos(cos_theta)
self.matrix = mtr.rotation_matrix(
axis=a, theta=theta, improper=(determinant == -1)
)
else:
raise ValueError
def __eq__(self, other: SymmetryOperation) -> bool:
return hasattr(other, "matrix") and np.allclose(
self.matrix, other.matrix, atol=1e-3
)
@property
def det(self) -> int:
return int(round(np.linalg.det(self.matrix)))
@property
def tr(self) -> float:
return np.trace(self.matrix)
@property
def cos_theta(self) -> float:
return max(min((self.tr - np.sign(self.det)) / 2, 1.0), -1.0)
@property
def axis(self) -> np.ndarray:
# algorithm from scipp.ucsc.edu/~haber/ph116A/rotation_11.pdf
if np.isclose(abs(self.tr), 3):
return None
if np.isclose(self.tr * self.det, -1):
S = (np.eye(3) + self.det * self.matrix) / 2
for i in range(3):
signs = np.sign(S[:, i])
if not np.allclose(signs, [0, 0, 0]):
return signs * np.sqrt(np.abs(np.diag(S)))
inds = np.triu_indices(3, k=1)
return mtr.normalize(
(self.matrix.T - self.matrix)[inds][::-1] * np.array([1, -1, 1])
)
@property
def inverse(self) -> SymmetryOperation:
return SymmetryOperation(matrix=self.matrix.T)
def apply(self, structure: mtr.Structure):
return self.matrix @ structure.centered_atomic_positions.value
def error(self, structure: mtr.Structure):
kdt = scipy.spatial.KDTree(structure.centered_atomic_positions.value.T)
dists, _ = np.abs(kdt.query(self.apply(structure).T))
rs = np.abs(self.axis @ structure.centered_atomic_positions.value)
return dists / rs
def is_symmetry_of(self, structure: mtr.Structure, tolerance: float) -> bool:
round_to = round(-np.log(tolerance) / np.log(10))
X = structure.centered_atomic_positions.value
return set(
tuple(row) for row in self.apply(structure).T.round(round_to)
) == set(tuple(row) for row in X.T.round(round_to))
@property
def order(self) -> int:
return mtr.periodicity(self.matrix)
def __mul__(self, other):
return SymmetryOperation(matrix=self.matrix @ other.matrix)
class Identity(SymmetryOperation):
def __init__(self) -> None:
determinant = 1
trace = 3
axis = None
super().__init__(determinant=determinant, trace=trace, axis=axis)
class Inversion(SymmetryOperation):
def __init__(self) -> None:
determinant = -1
trace = -3
axis = None
super().__init__(determinant=determinant, trace=trace, axis=axis)
class Reflection(SymmetryOperation):
def __init__(self, axis: Iterable[Union[float, int]]) -> None:
determinant = -1
trace = 1
super().__init__(determinant=determinant, trace=trace, axis=axis)
class ProperRotation(SymmetryOperation):
def __init__(self, order: int, axis: Iterable[Union[float, int]]) -> None:
determinant = 1
trace = 2 * np.cos(2 * np.pi / order) + determinant
super().__init__(determinant=determinant, trace=trace, axis=axis)
def __repr__(self) -> str:
return f"ProperRotation(order={self.order})"
class ImproperRotation(SymmetryOperation):
def __init__(self, order: int, axis: Iterable[Union[float, int]]) -> None:
determinant = -1
trace = 2 * np.cos(2 * np.pi / order) + determinant
super().__init__(determinant=determinant, trace=trace, axis=axis)
| 2.328125
| 2
|
data/tinyfpga_bx.py
|
jwise/corescore
| 94
|
12781928
|
<reponame>jwise/corescore
ctx.addClock("i_clk", 16)
ctx.addClock("clk", 48)
| 1.117188
| 1
|
demo.py
|
foamliu/Zero-Shot-Learning
| 22
|
12781929
|
<filename>demo.py<gh_stars>10-100
import argparse
import json
import random
import numpy as np
import torchvision.transforms as transforms
from scipy.misc import imread, imresize, imsave
from utils import *
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transform = transforms.Compose([normalize])
def main(args):
superclass = args['superclass']
if superclass is None:
superclass = 'Animals'
checkpoint = '{}/BEST_{}_checkpoint.tar'.format(save_folder, superclass) # model checkpoint
print('checkpoint: ' + str(checkpoint))
# Load model
checkpoint = torch.load(checkpoint)
model = checkpoint['model']
W = checkpoint['W']
model = model.to(device)
W = W.to(device)
# model = model.cuda()
model.eval()
test_folder = get_test_folder_by_superclass(superclass)
files = [os.path.join(test_folder, file) for file in os.listdir(test_folder) if
file.lower().endswith('.jpg')]
num_test_samples = 10
samples = random.sample(files, num_test_samples)
imgs = torch.zeros([num_test_samples, 3, 224, 224], dtype=torch.float, device=device)
for i, path in enumerate(samples):
# Read images
img = imread(path)
img = imresize(img, (224, 224))
imsave('images/image_{}_{}.jpg'.format(superclass, i), img)
img = img.transpose(2, 0, 1)
assert img.shape == (3, 224, 224)
assert np.max(img) <= 255
img = torch.FloatTensor(img / 255.)
img = transform(img)
imgs[i] = img
imgs = torch.tensor(imgs)
imgs.to(torch.device("cuda"))
print('imgs.device: ' + str(imgs.device))
result = []
with torch.no_grad():
X = model(imgs) # (batch_size, 2048)
preds = X.mm(W) # (batch_size, 123)
attributes_per_class = get_attributes_per_class_by_superclass(superclass)
attribute_names = get_attribute_names_by_superclass(superclass)
_, scores = batched_KNN(preds, 1, attributes_per_class)
for i in range(num_test_samples):
embeded = preds[i]
embeded = embeded.cpu().numpy()
attributes = attribute_names[embeded >= 0.9]
attributes = ', '.join(attributes)
# print('embeded: ' + str(embeded))
labal_id = scores[i].item()
label_name = 'Label_A_%02d' % (labal_id + 1,)
print('labal_id: ' + str(labal_id))
result.append(
{'i': i, 'labal_id': labal_id, 'label_name': label_name, 'attributes': attributes})
ensure_folder('data')
with open('data/result_{}.json'.format(superclass), 'w') as file:
json.dump(result, file, indent=4, ensure_ascii=False)
if __name__ == '__main__':
# Parse arguments
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--superclass",
help="superclass ('Animals', 'Fruits', 'Vehicles', 'Electronics', 'Hairstyles')")
args = vars(ap.parse_args())
main(args)
| 2.265625
| 2
|
algorithm.py
|
dacousb/7old
| 5
|
12781930
|
# 7old
# search engine algorithm
# that gets data from DB
import sqlite3 as sl
def searchdb(q):
con = sl.connect("results.db")
cur = con.cursor()
rows = cur.execute("SELECT * FROM RESULT ORDER BY title")
result = []
for row in rows:
if (q in row[1] # URL
and row[1].count('/') <= 3
and (row[1].count('.') == 1
or (row[1].startswith('https://www.')
and row[1].count('.') == 2))
and '?' not in row[1]):
result.insert(0, row)
elif any(q in s for s in row):
result.append(row)
con.close()
return result
| 3.578125
| 4
|
taskflow/monitoring/aws.py
|
awm33/taskflow
| 22
|
12781931
|
<gh_stars>10-100
import boto3
from .base import MonitorDestination
class AWSMonitor(MonitorDestination):
def __init__(self,
metric_prefix='',
metric_namespace='taskflow',
*args, **kwargs):
self.metric_namespace = metric_namespace
self.metric_prefix = metric_prefix
self.cloudwatch = boto3.client('cloudwatch')
super(AWSMonitor, self).__init__(*args, **kwargs)
def heartbeat_scheduler(self, session):
self.cloudwatch.put_metric_data(
Namespace=self.metric_namespace,
MetricData=[
{
'MetricName': self.metric_prefix + 'scheduler_heartbeat',
'Value': 1,
'Unit': 'Count'
}
])
def task_retry(self, session, task_instance):
self.cloudwatch.put_metric_data(
Namespace=self.metric_namespace,
MetricData=[
{
'MetricName': self.metric_prefix + 'task_retry',
'Value': 1,
'Unit': 'Count'
},
{
'MetricName': self.metric_prefix + 'task_retry',
'Dimensions': [
{
'Name': 'task_name',
'Value': task_instance.task_name
}
],
'Value': 1,
'Unit': 'Count'
}
])
def task_failed(self, session, task_instance):
self.cloudwatch.put_metric_data(
Namespace=self.metric_namespace,
MetricData=[
{
'MetricName': self.metric_prefix + 'task_failure',
'Value': 1,
'Unit': 'Count'
},
{
'MetricName': self.metric_prefix + 'task_failure',
'Dimensions': [
{
'Name': 'task_name',
'Value': task_instance.task_name
}
],
'Value': 1,
'Unit': 'Count'
}
])
def task_success(self, session, task_instance):
self.cloudwatch.put_metric_data(
Namespace=self.metric_namespace,
MetricData=[
{
'MetricName': self.metric_prefix + 'task_success',
'Value': 1,
'Unit': 'Count'
},
{
'MetricName': self.metric_prefix + 'task_success',
'Dimensions': [
{
'Name': 'task_name',
'Value': task_instance.task_name
}
],
'Value': 1,
'Unit': 'Count'
}
])
def workflow_failed(self, session, workflow_instance):
self.cloudwatch.put_metric_data(
Namespace=self.metric_namespace,
MetricData=[
{
'MetricName': self.metric_prefix + 'workflow_failure',
'Value': 1,
'Unit': 'Count'
},
{
'MetricName': self.metric_prefix + 'workflow_failure',
'Dimensions': [
{
'Name': 'workflow_name',
'Value': workflow_instance.workflow_name
}
],
'Value': 1,
'Unit': 'Count'
}
])
def workflow_success(self, session, workflow_instance):
self.cloudwatch.put_metric_data(
Namespace=self.metric_namespace,
MetricData=[
{
'MetricName': self.metric_prefix + 'workflow_success',
'Value': 1,
'Unit': 'Count'
},
{
'MetricName': self.metric_prefix + 'workflow_success',
'Dimensions': [
{
'Name': 'workflow_name',
'Value': workflow_instance.workflow_name
}
],
'Value': 1,
'Unit': 'Count'
}
])
| 1.9375
| 2
|
eda/lista1/13.py
|
BrunaNayara/ppca
| 0
|
12781932
|
<gh_stars>0
n = int(input())
c = int(input())
while(c != n):
if c < n:
print("O número correto é maior.")
elif c > n:
print("O número correto é menor.")
c = int(input())
print("Parabéns! Você acertou.")
| 3.953125
| 4
|
DS-400/Medium/213-House Robber II/DP.py
|
ericchen12377/Leetcode-Algorithm-Python
| 2
|
12781933
|
class Solution(object):
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def robber(nums):
if not nums:
return 0
if len(nums) <= 2:
return max(nums)
dp = [0] * len(nums)
dp[0] = nums[0]
dp[1] = max(nums[0], nums[1])
for x in range(2, len(nums)):
dp[x] = max(dp[x - 1], nums[x] + dp[x - 2])
return dp[-1]
n = len(nums)
if n == 0:
return 0
if n == 1:
return nums[0]
choice1 = nums[:n-1]
choice2 = nums[1:]
return max(robber(choice1), robber(choice2))
| 3.234375
| 3
|
lino_book/projects/migs/__init__.py
|
lino-framework/lino_book
| 3
|
12781934
|
<gh_stars>1-10
# -*- coding: UTF-8 -*-
# Copyright 2015-2017 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""A copy of the :ref:`noi` as a ticket management demo, for testing Django migrations.
.. autosummary::
:toctree:
settings
"""
| 0.925781
| 1
|
Basic_app/urls.py
|
Achan40/Portfolio_FullStack
| 0
|
12781935
|
from django.conf.urls import url
from Basic_app import views
from pathlib import Path
urlpatterns = [
# The about page will be the homepage
url(r'^$',views.AboutView.as_view(),name='about'),
# Creating contact page
url(r'^contact/$',views.Contact_View,name='contact_create'),
# Contact confirmation page
url(r'^contact/confirm/$',views.ContactConfirmed.as_view(),name='contact_confirm'),
# List of Contacts page
url(r'^contact/contact_list/$',views.ContactList.as_view(),name='contact_list'),
# List of projects
url(r'^projects/$',views.ProjectList.as_view(),name='project_list'),
# New project creation,updating, and deleting
url(r'^projects/new/$',views.ProjectCreate.as_view(),name='project_create'),
url(r'^projects/(?P<pk>\d+)/edit/$',views.ProjectUpdate.as_view(),name='project_edit'),
url(r'^projects/(?P<pk>\d+)/remove/$',views.ProjectDelete.as_view(),name='project_remove'),
# Url for project detail generated by primary key
url(r'^projects/(?P<pk>\d+)$',views.ProjectDetailView.as_view(),name='project_detail'),
]
| 2.203125
| 2
|
chester/openingbook.py
|
bsamseth/chester
| 0
|
12781936
|
import chess
import chess.polyglot
import random
def play_from_opening_book(
book, max_depth=10, fen=chess.STARTING_FEN, random_seed=None
):
"""Play out moves from an opening book and return the resulting board.
From the given `fen` starting position, draw weighted random moves from the opening book
to a maximum depth of `2 * max_depth` plies. Whenever there are no move moves in the book,
the play stops and the board returned.
If a seed integer is given, then this will always return the same final position.
Arguments
---------
book: str
Path to a polyglot opening book file.
max_depth: int, optional
The maximum depth to play to. The number of moves (plies) made will at most be 2 times this.
Default is 10.
fen: str, optional
Starting position in FEN notation. Default is the standard opening position.
random_seed: int, optional
Seed the random number generator to produce the same results each call. Default is to not seed,
and so successive calls will in general yield different boards.
Returns
-------
A `chess.Board` with the resulting position.
"""
if random_seed is not None:
random.seed(random_seed)
board = chess.Board(fen)
with chess.polyglot.MemoryMappedReader(book) as reader:
try:
for _ in range(2 * max_depth):
move = reader.weighted_choice(board).move()
board.push(move)
except IndexError:
pass
return board
| 3.953125
| 4
|
crystallus/wyckoff_cfg_generator.py
|
yoshida-lab/crystallus
| 0
|
12781937
|
<filename>crystallus/wyckoff_cfg_generator.py
# Copyright 2021 TsumiNa
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from typing import Dict, Sequence, Union
from .core import WyckoffCfgGenerator as _WYG
class WyckoffCfgGenerator(object):
def __init__(
self,
composition,
*,
max_recurrent: int = 1_000,
n_jobs: int = -1,
priority: Union[Dict[int, Dict[str, float]], None] = None,
):
"""A generator for possible Wyckoff configuration generation.
Parameters
----------
max_recurrent:
Max recurrent until generate a reasonable structure, by default 5_000
n_jobs:
Number of cpu cores when parallel calculation, by default -1
priority:
Priorities for Wyckoff letters. By default, a Wyckoff letter will be sampled
from an Uniform distribution of the all available letters.
Give this parameter will overwrite the corresponding priority list of Wyckoff letters.
For example, space group 167 has Wyckoff letters `[a, b, c, d, e, f]`
By default, all their priority values are equal to `1`. Now, we want to lift the
priority of PW `a`, `b` and `d` and never use `e`, we can give parameter `priority`
a values like this: `{167: {a: 2, b: 2, d: 2, e: 0}}`. After that, the
new priority change to `{a: 2, b: 2, c: 1, d: 2, e: 0, f: 1}`. When generating,
the priority list will be normalized as this `{a: 2/8, b: 2/8, c: 1/8, d: 2/8 e: 0/8, f: 1/8}`.
composition:
Composition of compounds in the primitive cell; should be formated
as {<element symbol>: <ratio in float>}.
"""
self._wyg = _WYG(composition, max_recurrent=max_recurrent, n_jobs=n_jobs, priority=priority)
self._priority = priority
self._composition = composition
@property
def max_recurrent(self):
return self._wyg.max_recurrent
@property
def n_jobs(self):
return self._wyg.n_jobs
@property
def composition(self):
return deepcopy(self._composition)
@property
def priority(self):
return deepcopy(self._priority)
@n_jobs.setter
def n_jobs(self, n):
self._wyg.n_jobs = n
def gen_one(self, *, spacegroup_num: int):
"""Try to generate a possible Wyckoff configuration under the given space group.
Parameters
----------
spacegroup_num:
Space group number.
Returns
-------
Dict
Wyckoff configuration set, which is a dict with format like:
{"Li": ["a", "c"], "O": ["i"]}. Here, the "Li" is an available element
symbol and ["a", "c"] is a list which contains coresponding Wyckoff
letters. For convenience, dict will be sorted by keys.
"""
return self._wyg.gen_one(spacegroup_num)
def gen_many(self, size: int, *, spacegroup_num: Union[int, Sequence[int]]):
"""Try to generate possible Wyckoff configuration sets.
Parameters
----------
size:
How many times to try for one space group.
spacegroup_num:
Spacegroup numbers to generate Wyckoff configurations.
Returns
-------
Dict[int, List[Dict]], List[Dict]
A collection contains spacegroup number and it's corresponding Wyckoff
configurations (wy_cfg). If only one spacegroup number was given,
will only return the list of wy_cfgs, otherwise return in dict with
spacegroup number as key. wy_cfgs will be formated as
{element 1: [Wyckoff_letter, Wyckoff_letter, ...], element 2: [...], ...}.
"""
if isinstance(spacegroup_num, int):
spacegroup_num = (spacegroup_num,)
return self._wyg.gen_many(size, *spacegroup_num)
def gen_many_iter(self, size: int, *, spacegroup_num: Union[int, Sequence[int]]):
"""Try to generate possible Wyckoff configuration sets.
Parameters
----------
size:
How many times to try for one space group.
spacegroup_num:
Spacegroup numbers to generate Wyckoff configurations.
Yields
------
Dict[int, List[Dict]], List[Dict]
A collection contains spacegroup number and it's corresponding Wyckoff
configurations (wy_cfg). If only one spacegroup number was given,
will only return the list of wy_cfgs, otherwise return in dict with
spacegroup number as key. wy_cfgs will be formated as
{element 1: [Wyckoff_letter, Wyckoff_letter, ...], element 2: [...], ...}.
"""
if isinstance(spacegroup_num, int):
spacegroup_num = (spacegroup_num,)
for sp_num in spacegroup_num:
yield sp_num, self._wyg.gen_many(size, sp_num)
def __repr__(self):
return f"WyckoffCfgGenerator(\
\n max_recurrent={self.max_recurrent},\
\n n_jobs={self.n_jobs}\
\n priority={self._priority}\
\n composition={self._composition}\
\n)"
| 2.1875
| 2
|
amr_interop_bridge/scripts/fake_battery_percentage.py
|
LexxPluss/amr_interop_bridge
| 1
|
12781938
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
from std_msgs.msg import Float64
if __name__ == "__main__":
rospy.init_node("fake_battery_percentage")
pub = rospy.Publisher("battery_percentage", Float64, queue_size=1)
battery_percentage = rospy.get_param("~battery_percentage", 100)
publish_rate = rospy.get_param("~publish_rate", 1)
loop_rate = rospy.Rate(publish_rate)
while not rospy.is_shutdown():
battery_percentage -= 0.1
if battery_percentage < 0:
battery_percentage = 0
battery_percentage_msg = battery_percentage
pub.publish(battery_percentage_msg)
loop_rate.sleep()
| 2.453125
| 2
|
dl/layers/layer_base.py
|
nuka137/DeepLearningFramework
| 10
|
12781939
|
<reponame>nuka137/DeepLearningFramework
class LayerBase:
name_counter = {}
def __init__(self):
cls = self.__class__
if self.id() not in cls.name_counter:
cls.name_counter[self.id()] = 0
self.name_ = "{}_{}".format(self.id(), cls.name_counter[self.id()])
cls.name_counter[self.id()] += 1
def id(self):
raise NotImplementedError()
def name(self):
return self.name_
def initialize_parameters(self, initializer=None):
pass
def parameters(self):
return {}
def gradients(self):
return {}
def forward(self, x):
raise NotImplementedError()
def backward(self, x):
raise NotImplementedError()
| 2.921875
| 3
|
rcs_back/containers_app/admin.py
|
e-kondr01/rcs_back
| 0
|
12781940
|
from django.contrib import admin
from .models import (
Building,
BuildingPart,
Container,
EmailToken,
FullContainerReport,
TankTakeoutCompany,
)
class ContainerAdmin(admin.ModelAdmin):
readonly_fields = [
"mass",
"activated_at",
"avg_fill_time",
"calc_avg_fill_time",
"avg_takeout_wait_time",
"cur_fill_time",
"cur_takeout_wait_time",
"last_full_report",
"last_emptied_report",
"ignore_reports_count",
"is_full",
"check_time_conditions",
"requested_activation"
]
class BuildingPartAdmin(admin.ModelAdmin):
readonly_fields = [
"current_mass",
"meets_mass_takeout_condition",
"meets_time_takeout_condition",
"needs_takeout",
"containers_for_takeout",
"container_count"
]
class BuildingAdmin(admin.ModelAdmin):
readonly_fields = [
"current_mass",
"meets_mass_takeout_condition",
"meets_time_takeout_condition",
"needs_takeout",
"containers_for_takeout",
"container_count",
"calculated_collected_mass",
"confirmed_collected_mass",
"avg_fill_speed"
]
class FullContainerReportAdmin(admin.ModelAdmin):
readonly_fields = [
"takeout_wait_time"
]
admin.site.register(Container, ContainerAdmin)
admin.site.register(Building, BuildingAdmin)
admin.site.register(BuildingPart, BuildingPartAdmin)
admin.site.register(FullContainerReport, FullContainerReportAdmin)
admin.site.register(EmailToken)
admin.site.register(TankTakeoutCompany)
| 1.90625
| 2
|
rescuemap/migrations/0009_auto_20191106_0006.py
|
Corruption13/SOS-APP
| 2
|
12781941
|
<filename>rescuemap/migrations/0009_auto_20191106_0006.py
# Generated by Django 2.2.6 on 2019-11-05 18:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rescuemap', '0008_auto_20191105_2328'),
]
operations = [
migrations.RemoveField(
model_name='victim',
name='time',
),
migrations.AddField(
model_name='victim',
name='time_of_creation',
field=models.DateTimeField(null=True),
),
]
| 1.5625
| 2
|
Classification/LibLinear/src/test/scripts/generate_test_data.py
|
em3ndez/tribuo
| 1,091
|
12781942
|
# Copyright (c) 2015-2020, Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import os
def generate_data(mode='train', problem_type='binary'):
assert mode == 'train' or mode == 'test'
rng = np.random.RandomState(1)
if problem_type == 'binary':
labels = ['POS', 'NEG']
else:
labels = ['POS', 'NEG', 'NEU']
texts = ['aaa', 'bbb', 'ccc']
counts = {label: 0 for label in labels}
if mode == 'train':
n = 1000
else:
n = 100
lns = []
for i in range(n):
y = rng.choice(labels)
counts[y] += 1
x = rng.choice(texts)
lns.append('%s##%s\n' % (y, x))
print(counts)
with open('%s_input_%s.tribuo' % (mode, problem_type), 'w') as f:
for ln in lns:
f.write(ln)
def generate_models():
lltypes = [
'L2R_LR',
'L2R_L2LOSS_SVC_DUAL',
'L2R_L2LOSS_SVC',
'L2R_L1LOSS_SVC_DUAL',
'MCSVM_CS',
'L1R_L2LOSS_SVC',
'L1R_LR',
'L2R_LR_DUAL'
]
for lltype in lltypes:
cmd = './src/test/scripts/generate-model.sh %s %s %s %s' % (lltype, lltype, 'train_input_binary.tribuo', 'test_input_binary.tribuo')
print(cmd)
os.system(cmd)
# multiclass model
lltype = 'L2R_LR'
cmd = './src/test/scripts/generate-model.sh %s %s %s %s' % (lltype, lltype+'_multiclass', 'train_input_multiclass.tribuo', 'test_input_multiclass.tribuo')
print(cmd)
os.system(cmd)
if __name__ == '__main__':
generate_data(mode='train')
generate_data(mode='test')
generate_data(mode='train', problem_type='multiclass')
generate_data(mode='test', problem_type='multiclass')
generate_models()
| 2.125
| 2
|
market.py
|
rasimandiran/Gen-dqn
| 2
|
12781943
|
<gh_stars>1-10
# Environment for DQN
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
class Market(object):
LOOK_BACK = 24
def __init__(self, data):
self.fee = 0.001
self.data = data
self.data.index = pd.to_datetime(self.data.index, unit="ms")
self.data_5min = self.data.resample("5min").apply(dict(high="max", low="min", close="last", volume="sum"))
self.data_10min = self.data.resample("10min").apply(dict(high="max", low="min", close="last", volume="sum"))
self.data_15min = self.data.resample("15min").apply(dict(high="max", low="min", close="last", volume="sum"))
self.data_30min = self.data.resample("30min").apply(dict(high="max", low="min", close="last", volume="sum"))
self.data_1h = self.data.resample("1h").apply(dict(high="max", low="min", close="last", volume="sum"))
self.data_1h["real_high"] = self.data_1h.high
self.data_1h["real_low"] = self.data_1h.low
self.data_1h["real_close"] = self.data_1h.close
# indicators
for col in self.data_1h.columns:
if "real_" not in col:
self.data_5min[col] = self.data_5min[col].pct_change()
self.data_10min[col] = self.data_10min[col].pct_change()
self.data_15min[col] = self.data_15min[col].pct_change()
self.data_30min[col] = self.data_30min[col].pct_change()
self.data_1h[col] = self.data_1h[col].pct_change()
self.data_5min[col] = self.data_5min[col].replace([-np.Inf, np.Inf], [0, 1])
self.data_10min[col] = self.data_10min[col].replace([-np.Inf, np.Inf], [0, 1])
self.data_15min[col] = self.data_15min[col].replace([-np.Inf, np.Inf], [0, 1])
self.data_30min[col] = self.data_30min[col].replace([-np.Inf, np.Inf], [0, 1])
self.data_1h[col] = self.data_1h[col].replace([-np.Inf, np.Inf], [0, 1])
self.data_5min = self.data_5min.dropna()
self.data_10min = self.data_10min.dropna()
self.data_15min = self.data_15min.dropna()
self.data_30min = self.data_30min.dropna()
self.data_1h = self.data_1h.dropna()
self.prices = self.data_1h.loc[:, ["real_high", "real_low", "real_close"]]
self.data_1h = self.data_1h.drop(["real_high", "real_low", "real_close"], axis=1)
self.step_size = len(self.data_1h.iloc[24:-24])
self.state_space_size = (self.get_state(0)[3].shape[0], self.get_state(0)[3].shape[1], self.get_state(0)[3].shape[2]+1)
def get_state(self, t):
t=24+t
state = []
state.append(self.data_5min.loc[self.data_5min.index<self.data_1h.iloc[t].name].tail(self.LOOK_BACK).values)
state.append(self.data_10min.loc[self.data_10min.index<self.data_1h.iloc[t].name].tail(self.LOOK_BACK).values)
state.append(self.data_15min.loc[self.data_15min.index<self.data_1h.iloc[t].name].tail(self.LOOK_BACK).values)
state.append(self.data_30min.loc[self.data_30min.index<self.data_1h.iloc[t].name].tail(self.LOOK_BACK).values)
state.append(self.data_1h.loc[self.data_1h.index<=self.data_1h.iloc[t].name].tail(self.LOOK_BACK).values)
next_state = []
next_state.append(self.data_5min.loc[self.data_5min.index<self.data_1h.iloc[t+1].name].tail(self.LOOK_BACK).values)
next_state.append(self.data_10min.loc[self.data_10min.index<self.data_1h.iloc[t+1].name].tail(self.LOOK_BACK).values)
next_state.append(self.data_15min.loc[self.data_15min.index<self.data_1h.iloc[t+1].name].tail(self.LOOK_BACK).values)
next_state.append(self.data_30min.loc[self.data_30min.index<self.data_1h.iloc[t+1].name].tail(self.LOOK_BACK).values)
next_state.append(self.data_1h.loc[self.data_1h.index<=self.data_1h.iloc[t+1].name].tail(self.LOOK_BACK).values)
scaler = MinMaxScaler(feature_range=(0,1))
state = np.array([scaler.fit_transform(s) for s in state])
next_state = np.array([scaler.fit_transform(s) for s in next_state])
return self.prices.iloc[t].real_low, self.prices.iloc[t].real_high, self.prices.iloc[t].real_close, state, next_state
def buy(self, amount, price):
return (amount/price)*(1-self.fee)
def sell(self, qty, price):
return (qty*price)*(1-self.fee)
| 2.671875
| 3
|
seq_alignment/__init__.py
|
ryanlstevens/seq_alignment
| 1
|
12781944
|
<gh_stars>1-10
# Make global + local similarity class visible to next module
from .global_similarity import global_similarity
from .local_similarity import local_similarity
| 1.257813
| 1
|
scripts/find_includes.py
|
colin-zhou/fast-ta
| 13
|
12781945
|
<gh_stars>10-100
import subprocess
import sys
import os
from sysconfig import get_paths
# Attempt to see if running python installation has everything we need.
found_include = False
found_numpy_include = False
subversion = None
if sys.version_info[0] == 3:
print(get_paths()['include'])
found_include = True
subversion = sys.version_info[1]
try:
import numpy
print(numpy.get_include())
found_numpy_include = True
except ImportError:
pass
if not found_include:
try:
x = subprocess.check_output(["python3", "--version"]).strip().decode() # error is raised if it does not exist
print(subprocess.check_output("python3 -c'from sysconfig import get_paths;print(get_paths()[\"include\"])'", shell=True).strip().decode())
found_include = True
subversion = int(x.split("3.")[1][0])
if not found_numpy_include:
try:
print(subprocess.check_output('python3 -c"import numpy;print(numpy.get_include())"', shell=True).strip().decode())
found_numpy_include = True
except:
pass
except:
pass
if not found_include:
for i in range(1, 10):
try:
subprocess.check_output(["python3.%i"%i, "--version"]) # ensure error is raised if it does not exist
print(subprocess.check_output("python3.%i -c'from sysconfig import get_paths;print(get_paths()[\"include\"])'"%i, shell=True).strip().decode())
subversion = i
found_include = True
if not found_numpy_include:
try:
print(subprocess.check_output('python3.%i -c"import numpy;print(numpy.get_include())"'%i, shell=True).strip().decode())
found_numpy_include = True
except:
pass
break
except:
pass
if not found_include:
raise ModuleNotFoundError("Python Includes were not found!")
elif not found_numpy_include:
raise ModuleNotFoundError("NumPy Includes were not found!")
if os.name == 'nt':
lpath = get_paths()['include'].replace("Include", 'libs')
if os.path.exists(lpath+'\\python3%i.lib'%subversion) and os.path.exists(lpath+'\\python3%i_d.lib'%subversion):
print(lpath+'\\python3%i.lib'%subversion)
print(lpath+'\\python3%i_d.lib'%subversion)
else:
raise ModuleNotFoundError("Debug binaries not installed for Python version 3.%i! Re-run the installer with the option selected."%subversion)
| 2.640625
| 3
|
pyspark_apps/process/movies_csv_to_parquet.py
|
edithhuang2019/emr-demo
| 21
|
12781946
|
#!/usr/bin/env python3
# Process raw CSV data and output Parquet
# Author: <NAME> (November 2020)
import argparse
from pyspark.sql import SparkSession
def main():
args = parse_args()
spark = SparkSession \
.builder \
.appName("movie-ratings-csv-to-parquet") \
.getOrCreate()
for file in ["credits", "keywords", "links", "links_small", "movies_metadata", "ratings", "ratings_small"]:
convert_to_parquet(spark, file, args)
def convert_to_parquet(spark, file, args):
df_bakery = spark.read \
.format("csv") \
.option("header", "true") \
.option("delimiter", ",") \
.option("inferSchema", "true") \
.load(f"s3a://{args.bronze_bucket}/movie_ratings/{file}.csv")
df_bakery.write \
.format("parquet") \
.save(f"s3a://{args.silver_bucket}/movie_ratings/{file}/", mode="overwrite")
def parse_args():
"""Parse argument values from command-line"""
parser = argparse.ArgumentParser(description="Arguments required for script.")
parser.add_argument("--bronze-bucket", required=True, help="Raw data location")
parser.add_argument("--silver-bucket", required=True, help="Processed data location")
args = parser.parse_args()
return args
if __name__ == "__main__":
main()
| 3.234375
| 3
|
src/experiment.py
|
LightnessOfBeing/Understanding-Clouds
| 4
|
12781947
|
import collections
import os
import pandas as pd
from catalyst.dl import ConfigExperiment
from segmentation_models_pytorch.encoders import get_preprocessing_fn
from sklearn.model_selection import train_test_split
from src.augmentations import get_transforms
from src.dataset import CloudDataset
class Experiment(ConfigExperiment):
def get_datasets(self, **kwargs):
path = kwargs.get("path", None)
df_train_name = kwargs.get("df_train_name", None)
df_pl_name = kwargs.get("df_pl_name", None)
image_folder = kwargs.get("image_folder", None)
encoder_name = kwargs.get("model_name", None)
test_mode = kwargs.get("test_mode", None)
type = kwargs.get("type", None)
height = kwargs.get("height", None)
width = kwargs.get("width", None)
if type == "train":
df_train = pd.read_csv(os.path.join(path, df_train_name))
if df_pl_name is not None:
df_pl = pd.read_csv(os.path.join(path, df_pl_name))
df_train = df_train.append(df_pl)
print(
f"Pseudo-labels named {df_pl_name} {len(df_pl) / 4} added to train df"
)
if test_mode:
df_train = df_train[:150]
df_train["label"] = df_train["Image_Label"].apply(lambda x: x.split("_")[1])
df_train["im_id"] = df_train["Image_Label"].apply(lambda x: x.split("_")[0])
id_mask_count = (
df_train.loc[~df_train["EncodedPixels"].isnull(), "Image_Label"]
.apply(lambda x: x.split("_")[0])
.value_counts()
.reset_index()
.rename(columns={"index": "img_id", "Image_Label": "count"})
.sort_values(["count", "img_id"])
)
assert len(id_mask_count["img_id"].values) == len(
id_mask_count["img_id"].unique()
)
train_ids, valid_ids = train_test_split(
id_mask_count["img_id"].values,
random_state=42,
stratify=id_mask_count["count"],
test_size=0.1,
)
df_test = pd.read_csv(os.path.join(path, "sample_submission.csv"))
df_test["label"] = df_test["Image_Label"].apply(lambda x: x.split("_")[1])
df_test["im_id"] = df_test["Image_Label"].apply(lambda x: x.split("_")[0])
test_ids = (
df_test["Image_Label"]
.apply(lambda x: x.split("_")[0])
.drop_duplicates()
.values
)
preprocess_fn = get_preprocessing_fn(encoder_name, pretrained="imagenet")
if type != "test":
train_dataset = CloudDataset(
df=df_train,
path=path,
img_ids=train_ids,
image_folder=image_folder,
transforms=get_transforms("train"),
preprocessing_fn=preprocess_fn,
height=height,
width=width,
)
valid_dataset = CloudDataset(
df=df_train,
path=path,
img_ids=valid_ids,
image_folder=image_folder,
transforms=get_transforms("valid"),
preprocessing_fn=preprocess_fn,
height=height,
width=width,
)
test_dataset = CloudDataset(
df=df_test,
path=path,
img_ids=test_ids,
image_folder="test_images",
transforms=get_transforms("valid"),
preprocessing_fn=preprocess_fn,
height=height,
width=width,
)
datasets = collections.OrderedDict()
if type == "train":
datasets["train"] = train_dataset
datasets["valid"] = valid_dataset
elif type == "postprocess":
datasets["infer"] = valid_dataset
elif type == "test":
datasets["infer"] = test_dataset
return datasets
| 2.4375
| 2
|
readme_generator/test_scaffold.py
|
chelseadole/401-final-project
| 6
|
12781948
|
<gh_stars>1-10
"""Test file for ensure scaffolding functionality."""
# from readme_generator.make_scaffold import dependencies, license, setup_dict, user_data
# def test_user_data_populated_in_make_scaffold():
# """Test that make_scaffold creates a populated user_data dict."""
# assert len(user_data) > 0
# assert "url" in user_data.keys()
# def test_make_scaffold_creates_license():
# """Test that make_scaffold creates a populated license."""
# assert isinstance(license, str)
# assert "license" in license
# def test_make_scaffold_creates_setup_dict_and_contains_info():
# """Test that setup_dict is created successfully in make_scaffold."""
# assert isinstance(setup_dict, dict)
# assert "version" in setup_dict.keys()
# assert isinstance(setup_dict['author'], list)
# def test_dependencies_are_gathered():
# """Test that dependencies are gathered."""
# assert isinstance(dependencies, list)
# assert len(dependencies) > 0
| 2.453125
| 2
|
keras_wide_deep_98_table_15GB/python/intermediate_output.py
|
WenqiJiang/FPGA-Accelerator-for-Recommender-Systems
| 4
|
12781949
|
############ This program is not successful ##############
import pandas as pd
import numpy as np
import argparse
import pandas as pd
from datetime import datetime
import tensorflow as tf
# from tensorflow import keras
from tensorflow.keras.layers import Input, Embedding, Dense, Flatten, Activation, concatenate
# from tensorflow.keras.layers.advanced_activations import ReLU
# from tensorflow.keras.layers.normalization import BatchNormalization
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.utils import plot_model
from lib.read_conf import Config
from lib.dataset import input_fn
from train import Wide_and_Deep
class Wide_and_Deep_Intermediate_Output(Wide_and_Deep):
def __init__(self, mode="deep"):
super().__init__(mode)
def predict_intermediate(self, layer_name="dense_1"):
if not self.model:
self.load_model()
input_data = self.get_dataset(mode="pred", batch_size=128)
# print("Input data shape: {}".format(len(input_data)))
self.model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
intermediate_layer_model = tf.keras.Model(inputs=self.model.input,
outputs=self.model.get_layer(layer_name).output)
result = intermediate_output = intermediate_layer_model.predict(input_data)
print("result: {}".format(result))
print("result shape:{}".format(result.shape))
if __name__ == '__main__':
# mode = "wide and deep"
mode = "deep"
# wide_deep_net = Wide_and_Deep_Intermediate_Output(mode)
wide_deep_net = Wide_and_Deep(mode)
wide_deep_net.load_model()
get_3rd_layer_output = tf.keras.backend.function([wide_deep_net.model.layers[0].input], [wide_deep_net.model.layers[3].output])
layer_output = get_3rd_layer_output([x])[0]
# wide_deep_net.predict_model()
# wide_deep_net.predict_intermediate()
| 2.90625
| 3
|
parsers/api_check.py
|
Bassem95/Test26
| 105
|
12781950
|
<reponame>Bassem95/Test26
# -*- coding: utf-8 -*-
#!/usr/bin/python
"""
"""
import requests
import time
from raven import Client
client = Client(
'https://aee9ceb609b549fe8a85339e69c74150:8604fd36d8b04fbd9a70a81bdada5cdf@sentry.io/1223891')
key = "<KEY>"
def check_api(word):
query_string = { 'api-key': key, 'q': '"%s"' % word}
req = requests.get('https://api.nytimes.com/svc/search/v2/articlesearch.json', params=query_string, verify=False)
if req.status_code in set([429, 529, 504]):
time.sleep(50)
client.captureMessage("NYT API RATELIMIT")
return check_api(word)
if req.status_code == 500:
client.captureMessage("NYT API 500", extra={
'req':req,
'word': word,
})
return False
result = req.json()
num_results = len(result['response']['docs'])
return num_results < 2
| 2.640625
| 3
|