blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d29b7c4af830c3ce4cb21e03942d300b879e409b | 13edd8f1bc3b86fd881f85fbeafe94811392d7fc | /seventh_module/爬虫/5.scrapy/project_09_redisPro/project_09_redisPro/settings.py | 8c62d6c1d7ab98319ede7d009306beeb0efb76dc | [] | no_license | ryan-yang-2049/oldboy_python_study | f4c90c9d8aac499e1d810a797ab368217f664bb1 | 6e1ab7f217d9bf9aa7801266dee7ab4d7a602b9f | refs/heads/master | 2022-07-22T23:49:28.520668 | 2019-06-11T13:26:25 | 2019-06-11T13:26:25 | 129,877,980 | 0 | 1 | null | 2022-07-18T17:12:54 | 2018-04-17T09:12:48 | HTML | UTF-8 | Python | false | false | 3,680 | py | # -*- coding: utf-8 -*-
# Scrapy settings for project_09_redisPro project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'project_09_redisPro'
SPIDER_MODULES = ['project_09_redisPro.spiders']
NEWSPIDER_MODULE = 'project_09_redisPro.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'project_09_redisPro (+http://www.yourdomain.com)'
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'project_09_redisPro.middlewares.Project09RedisproSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'project_09_redisPro.middlewares.Project09RedisproDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'scrapy_redis.pipelines.RedisPipeline':400,
}
# 使用scrapy-redis组件的去重队列
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# 使用scrapy-redis组件自己的调度器
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# 是否允许暂停
SCHEDULER_PERSIST = True
# 如果redis服务器不在自己本机,则需要如下配置
REDIS_HOST = '101.132.45.51'
REDIS_PORT = '26379'
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"11066986@qq.com"
] | 11066986@qq.com |
a562123f6939c3763fb4d84f3946b4f8aeda00f0 | d84876ff3d2a61cb28eff13b1af173a091aff917 | /stock_prediction.py | c65790b23d6c0966f517bdabeaf14767239de1c7 | [] | no_license | webclinic017/Stock-Prediction-with-Regression-Models-In-Python | d5f64e066edbe987d775017680d2bcdecea52722 | 61be482ffa36869f43588cb2f7c005914dedda76 | refs/heads/master | 2022-02-18T19:05:57.839353 | 2019-09-07T15:50:29 | 2019-09-07T15:50:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,444 | py | # -*- coding: utf-8 -*-
"""Stock Prediction.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1n27WLQOmxqT8_Wyd3Nm1xONyeaCTxo6w
##Importing Libraries
"""
# Commented out IPython magic to ensure Python compatibility.
import numpy as np
import pandas as pd
import datetime
import pandas_datareader.data as web
from pandas import Series, DataFrame
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
from datetime import datetime, timedelta
from sklearn.model_selection import train_test_split
"""###Import Yahoo Finance Library"""
!pip install yfinance --upgrade --no-cache-dir
"""### Importing Stock data of Netflix"""
from pandas_datareader import data as pdr
import fix_yahoo_finance as yf
yf.pdr_override()
df_full = pdr.get_data_yahoo("NFLX", start="2014-01-01").reset_index()
df_full.to_csv('NFLX.csv',index=False)
df_full.head()
df_full['Date'] = pd.to_datetime(df_full.Date, format='%Y-%m-%d') # Converts string to datetime
df_full = df_full.set_index('Date') # Set the index of dataframe to date column
#plot
df_full.Close.plot()
df_full.info()
df_full.describe()
"""### Spliting into train and test data"""
forecast_out = 60 # Number of how many days to forecast
df_full['Prediction'] = df_full['Adj Close'].shift(-forecast_out)
df_full.tail()
x = np.array(df_full.drop(['Prediction'], 1))
x = x[:-forecast_out]
y = np.array(df_full['Prediction'])
y = y[:-forecast_out]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2)
"""## Linear Regression Models (OLS,Lasso,Ridge)"""
# Import package for builing different types of linear regrssion models
from sklearn.linear_model import LinearRegression, Ridge, Lasso
linear_model = LinearRegression()
ridge_model = Ridge()
lasso_model = Lasso()
linear_model.fit(x_train, y_train)
ridge_model.fit(x_train, y_train)
lasso_model.fit(x_train, y_train)
linear_model_score = linear_model.score(x_test, y_test)
print('LinearModel score:', linear_model_score)
ridge_model_score = ridge_model.score(x_test, y_test)
print('RidgeModel score:', ridge_model_score)
lasso_model_score = lasso_model.score(x_test, y_test)
print('LassoModel score:', lasso_model_score)
x_forecast = np.array(df_full.drop(['Prediction'], 1))[-forecast_out:]
linear_model_forecast_prediction = linear_model.predict(x_forecast)
linear_model_real_prediction = linear_model.predict(np.array(df_full.drop(['Prediction'], 1)))
ridge_model_forecast_prediction = ridge_model.predict(x_forecast)
ridge_model_real_prediction = ridge_model.predict(np.array(df_full.drop(['Prediction'], 1)))
lasso_model_forecast_prediction = lasso_model.predict(x_forecast)
lasso_model_real_prediction = lasso_model.predict(np.array(df_full.drop(['Prediction'], 1)))
predicted_dates = []
recent_date = df_full.index.max()
display_at = 1
alpha = 0.5
for i in range(forecast_out):
recent_date += timedelta(days=1)
predicted_dates.append(recent_date)
plt.figure(figsize = (16,8))
plt.xticks(rotation=60)
plt.plot(df_full.index[display_at:], linear_model_real_prediction[display_at:], label='Linear Preds', c='blue', alpha=alpha)
plt.plot(predicted_dates, linear_model_forecast_prediction, c='blue', alpha=alpha)
plt.plot(df_full.index[display_at:], ridge_model_real_prediction[display_at:], label='Ridge Preds', c='green', alpha=alpha)
plt.plot(predicted_dates, ridge_model_forecast_prediction, c='green', alpha=alpha)
plt.plot(df_full.index[display_at:], lasso_model_real_prediction[display_at:], label='Lasso Preds', c='red', alpha=alpha)
plt.plot(predicted_dates, lasso_model_forecast_prediction, c='red', alpha=alpha)
plt.plot(df_full.index[display_at:], df_full['Close'][display_at:], label='Actual', c='black', linewidth=3)
plt.legend()
"""### Polynomoal Regression"""
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error, r2_score
def create_polynomial_regression_model(degree):
"Creates a polynomial regression model for the given degree"
poly_features = PolynomialFeatures(degree=degree)
# transforms the existing features to higher degree features.
X_train_poly = poly_features.fit_transform(x_train)
# fit the transformed features to Linear Regression
poly_model = LinearRegression()
poly_model.fit(X_train_poly, y_train)
# predicting on training data-set
y_train_predicted = poly_model.predict(X_train_poly)
# predicting on test data-set
y_test_predict = poly_model.predict(poly_features.fit_transform(x_test))
# evaluating the model on training dataset
rmse_train = np.sqrt(mean_squared_error(y_train, y_train_predicted))
r2_train = r2_score(y_train, y_train_predicted)
# evaluating the model on test dataset
rmse_test = np.sqrt(mean_squared_error(y_test, y_test_predict))
r2_test = r2_score(y_test, y_test_predict)
print("The model performance for the training set at degree {}" .format(degree))
print("-------------------------------------------")
print("RMSE of training set is {}".format(rmse_train))
print("R2 score of training set is {}".format(r2_train))
print("\n")
print("The model performance for the test set at degree {}" .format(degree))
print("-------------------------------------------")
print("RMSE of test set is {}".format(rmse_test))
print("R2 score of test set is {}".format(r2_test))
print("______________________________________________________________________________________")
print("______________________________________________________________________________________")
print("\n")
for i in range(1,5):
create_polynomial_regression_model(i)
polynomial_features= PolynomialFeatures(degree=2)
x_poly = polynomial_features.fit_transform(x_train)
model = LinearRegression()
model.fit(x_poly, y_train)
y_poly_pred = model.predict(polynomial_features.fit_transform(x_test))
rmse = np.sqrt(mean_squared_error(y_test,y_poly_pred))
r2 = r2_score(y_test,y_poly_pred)
print(rmse)
print(r2)
polynomial_model_forecast_prediction = model.predict(polynomial_features.fit_transform(x_forecast))
polynomial_model_real_prediction = model.predict(polynomial_features.fit_transform(np.array(df_full.drop(['Prediction'], 1))))
predicted_dates = []
recent_date = df_full.index.max()
display_at = 1
alpha = 1
for i in range(forecast_out):
recent_date += timedelta(days=1)
predicted_dates.append(recent_date)
plt.figure(figsize = (16,8))
plt.xticks(rotation=60)
plt.plot(df_full.index[display_at:], linear_model_real_prediction[display_at:], label='Linear Preds', c='blue', alpha=alpha)
plt.plot(predicted_dates, linear_model_forecast_prediction, c='blue', alpha=alpha)
plt.plot(df_full.index[display_at:], ridge_model_real_prediction[display_at:], label='Ridge Preds', c='green', alpha=alpha)
plt.plot(predicted_dates, ridge_model_forecast_prediction, c='green', alpha=alpha)
plt.plot(df_full.index[display_at:], lasso_model_real_prediction[display_at:], label='Lasso Preds', c='red', alpha=alpha)
plt.plot(predicted_dates, lasso_model_forecast_prediction, c='red', alpha=alpha)
plt.plot(df_full.index[display_at:], polynomial_model_real_prediction[display_at:], label='polynomial Preds', c='magenta', alpha=alpha)
plt.plot(predicted_dates, polynomial_model_forecast_prediction, c='magenta', alpha=alpha)
plt.plot(df_full.index[display_at:], df_full['Close'][display_at:], label='Actual', c='black', linewidth=3)
plt.legend() | [
"noreply@github.com"
] | webclinic017.noreply@github.com |
b13966f310e0c8104c5131929d3d6b912977503f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_136/1638.py | 32faf53928bb6afe2590a89bf55f06931f8625b1 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,349 | py | # -*- coding: utf-8 -*-
"""
Solves the 'Cookie Clicker Alpha' Problem Google Code Jam Qualifications 2014
https://code.google.com/codejam/contest/2974486/dashboard#s=p1
Created on Fri Apr 12 1:58:51 2014
@author: Luca
"""
import numpy as np
import sys
def get_childern(node,C,F,X):
if (node(1)+C)>X:
child_no_fact = (node(0)+(X-node(1))/node(2),X,node(2))
return [child_no_fact]
child_no_fact = (node(0)+C/node(2),node(1)+C,node(2))
child_fact = (node(0)+C/node(2),node(1),node(2)+F)
return [child_no_fact,child_fact]
def solve_cookie_clicker_alpha(C,F,X):
root = (0,0,2) # time,cookies,rate
current_node = root
fringe = [root]
visited = []
solution = []
while len(fringe)>0:
current_node = fringe[0]
ch = get_children(current_node)
for c in ch:
if c not in visited:
fringe.append(c)
if fringe[-1](1)==X:
solution.append(fringe[-1])
visite.append()
def solve_by_enumeration(C,F,X):
# Trivial solution
rate =2.0
min_time = X/rate
last_time = min_time
n = 1
#print 'Trivial solution no farms %f'%(min_time)
while True:
# Buy a farm whenever is possible
# We assume intermediate solution when the farm is bought
# After it was possible are sub optimal
rate = 2.0
time = 0.0
#print 'Solution buying %d farms'%(n)
for i in range(0,n):
time += C/rate
#print 'Farm %d bought at time %f'%(i+1,time)
rate += F
time +=X/rate
#print 'Final time %f'%(time)
if time<min_time:
min_time = time
else:
return min_time
n = n +1
return min_time
if __name__ == '__main__':
if len(sys.argv)<2:
print 'Need to specify an input file'
exit(1)
input_file = sys.argv[1]
output_file = 'cookie_clicker_alpha_output_3.txt'
do_debug = True
try:
with open(input_file,'r') as f:
lines = f.readlines()
T = int(lines[0])
print 'Solving Cookie Clicker Alpha Problem for T=%d test cases.'%(T)
data = np.zeros((T,3),dtype=np.float64)
for n in range(0,T):
data[n,:] = np.array([float(t) for t in lines[n+1].split()],dtype = np.float)
if do_debug:
print 'Test case %d'%(n+1)
print 'C,F,X=%f,%f,%f'%(data[n,0],data[n,1],data[n,2])
except IOError:
print 'File %s not found'%input_file
exit(1)
# Solve the problem use binary tree depth first search
# tree branching every time a factory can be bought
solutions = []
for n in range(0,T):
C,F,X = data[n,:]
print 'Solving Cookie Clicker Alpha Problem %d C,F,X=%f,%f,%f'%(n,C,F,X)
res = solve_by_enumeration(C,F,X)
solutions.append(res)
try:
with open(output_file,'w') as f:
for n in range(0,T):
f.write('Case #%d: %12.8e\n'%(n+1,solutions[n]))
except IOError:
print 'File %s not found'%output_file
exit(1) | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
a238b652ff6bdc3c552b4f99c87bc8bddb5b42a7 | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /1423/1423.maximum-points-you-can-obtain-from-cards.785265974.Accepted.leetcode.python3.py | 18ad403e19400e3a95826bb18aff46c4284a3bdc | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | class Solution(object):
def maxScore(self, cardPoints, k):
left = 0
right = len(cardPoints) - k
ksum = sum(cardPoints[len(cardPoints) - k:])
result = max(float('-inf'), ksum)
while right < len(cardPoints):
ksum = ksum - cardPoints[right] + cardPoints[left]
result = max(result, ksum)
left += 1
right += 1
return result
| [
"huangyingw@gmail.com"
] | huangyingw@gmail.com |
423f8464015490054986bbe3c694ad19db6cca2c | a63b49c45f09e0b0abd20ed6ca81b8e30e17f755 | /a2c_ppo_acktr/storage.py | 147fc1989991982ce00a21c1d57b4fdfaacb63cd | [] | no_license | xxchenxx/MixtureOptimizer | d08b2395208e3efec2dbe730b4f194d509aea106 | 1c6f8d0848aeb71d49ea129001f3f170612cd4cf | refs/heads/master | 2022-12-04T05:24:01.974432 | 2020-08-22T15:40:34 | 2020-08-22T15:40:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,569 | py | import torch
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from pdb import set_trace as bp
def _flatten_helper(T, N, _tensor):
return _tensor.view(T * N, *_tensor.size()[2:])
class RolloutStorage(object):
def __init__(self, num_steps, obs_shape, action_shape=1, hidden_size=1, num_recurrent_layers=1):
# TODO: not include num_process here since we only have one model (optimizee) each time
# observation: (seq_len, batch_size, #lstm_input * window + #scalar_input + #actions * 1(LR))
self.obs = torch.zeros(num_steps + 1, 1, *obs_shape)
self.recurrent_hidden_states = torch.zeros(num_steps + 1, num_recurrent_layers, 1, hidden_size)
self.rewards = torch.zeros(num_steps, 1, 1)
self.value_preds = torch.zeros(num_steps + 1, 1)
self.returns = torch.zeros(num_steps + 1, 1)
self.action_log_probs = torch.zeros(num_steps, 1)
self.actions = torch.zeros(num_steps, action_shape)
self.num_steps = num_steps
self.step = 0
def reset(self):
device = self.obs.device
self.obs = torch.zeros_like(self.obs)
self.recurrent_hidden_states = torch.zeros_like(self.recurrent_hidden_states)
self.rewards = torch.zeros(self.num_steps, 1, 1)
self.value_preds = torch.zeros(self.num_steps + 1, 1)
self.returns = torch.zeros(self.num_steps + 1, 1)
self.action_log_probs = torch.zeros(self.num_steps, 1)
self.actions = torch.zeros_like(self.actions)
self.step = 0
self.to(device)
def to(self, device):
self.obs = self.obs.to(device)
self.recurrent_hidden_states = self.recurrent_hidden_states.to(device)
self.rewards = self.rewards.to(device)
self.value_preds = self.value_preds.to(device)
self.returns = self.returns.to(device)
self.action_log_probs = self.action_log_probs.to(device)
self.actions = self.actions.to(device)
def insert(self, obs, recurrent_hidden_states, actions, action_log_probs, value_preds, rewards):
self.obs[self.step + 1].copy_(obs)
self.recurrent_hidden_states[self.step + 1].copy_(recurrent_hidden_states)
self.actions[self.step].copy_(actions)
self.action_log_probs[self.step].copy_(action_log_probs)
self.value_preds[self.step].copy_(value_preds)
self.rewards[self.step].copy_(rewards)
self.step = (self.step + 1) % self.num_steps
def after_update(self):
self.obs[0].copy_(self.obs[-1])
self.recurrent_hidden_states[0].copy_(self.recurrent_hidden_states[-1])
def compute_returns(self, next_value, use_gae, gamma, gae_lambda):
if use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.size(0))):
delta = self.rewards[step] + gamma * self.value_preds[step + 1] - self.value_preds[step]
gae = delta + gamma * gae_lambda * gae
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.size(0))):
self.returns[step] = self.returns[step + 1] * gamma + self.rewards[step]
def feed_forward_generator(self, advantages, num_mini_batch=None, mini_batch_size=None):
num_steps, num_processes = self.rewards.size()[0:2]
batch_size = num_processes * num_steps
if mini_batch_size is None:
assert batch_size >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"* number of steps ({}) = {} "
"to be greater than or equal to the number of PPO mini batches ({})."
"".format(num_processes, num_steps, num_processes * num_steps,
num_mini_batch))
mini_batch_size = batch_size // num_mini_batch
sampler = BatchSampler(
SubsetRandomSampler(range(batch_size)),
mini_batch_size,
drop_last=True)
for indices in sampler:
obs_batch = self.obs[:-1].view(-1, *self.obs.size()[1:])[indices]
recurrent_hidden_states_batch = self.recurrent_hidden_states[:-1].view(-1, *self.recurrent_hidden_states.size()[1:])[indices]
actions_batch = self.actions.view(-1, self.actions.size(-1))[indices]
value_preds_batch = self.value_preds[:-1].view(-1, 1)[indices]
return_batch = self.returns[:-1].view(-1, 1)[indices]
old_action_log_probs_batch = self.action_log_probs.view(-1, 1)[indices]
if advantages is None:
adv_targ = None
else:
adv_targ = advantages.view(-1, 1)[indices]
yield obs_batch, recurrent_hidden_states_batch, actions_batch, value_preds_batch, return_batch, old_action_log_probs_batch, adv_targ
def recurrent_generator(self, advantages, num_mini_batch):
num_processes = self.rewards.size(1)
assert num_processes >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"to be greater than or equal to the number of "
"PPO mini batches ({}).".format(num_processes, num_mini_batch))
num_envs_per_batch = num_processes // num_mini_batch
perm = torch.randperm(num_processes)
for start_ind in range(0, num_processes, num_envs_per_batch):
obs_batch = []
recurrent_hidden_states_batch = []
actions_batch = []
value_preds_batch = []
return_batch = []
old_action_log_probs_batch = []
adv_targ = []
for offset in range(num_envs_per_batch):
ind = perm[start_ind + offset]
obs_batch.append(self.obs[:-1, ind])
recurrent_hidden_states_batch.append(self.recurrent_hidden_states[0:1, ind])
actions_batch.append(self.actions[:, ind])
value_preds_batch.append(self.value_preds[:-1, ind])
return_batch.append(self.returns[:-1, ind])
old_action_log_probs_batch.append(
self.action_log_probs[:, ind])
adv_targ.append(advantages[:, ind])
T, N = self.num_steps, num_envs_per_batch
# These are all tensors of size (T, N, -1)
obs_batch = torch.stack(obs_batch, 1)
actions_batch = torch.stack(actions_batch, 1)
value_preds_batch = torch.stack(value_preds_batch, 1)
return_batch = torch.stack(return_batch, 1)
old_action_log_probs_batch = torch.stack(
old_action_log_probs_batch, 1)
adv_targ = torch.stack(adv_targ, 1)
# States is just a (N, -1) tensor
recurrent_hidden_states_batch = torch.stack(recurrent_hidden_states_batch, 1).view(N, -1)
# Flatten the (T, N, ...) tensors to (T * N, ...)
obs_batch = _flatten_helper(T, N, obs_batch)
actions_batch = _flatten_helper(T, N, actions_batch)
value_preds_batch = _flatten_helper(T, N, value_preds_batch)
return_batch = _flatten_helper(T, N, return_batch)
old_action_log_probs_batch = _flatten_helper(T, N, \
old_action_log_probs_batch)
adv_targ = _flatten_helper(T, N, adv_targ)
yield obs_batch, recurrent_hidden_states_batch, actions_batch, \
value_preds_batch, return_batch, old_action_log_probs_batch, adv_targ
| [
"hfercxx@hotmail.com"
] | hfercxx@hotmail.com |
f974791af539b1ba0f63ab6f9457a2bafd3f0b78 | 58509347cca790fce26884f027425170c5891a17 | /bin/train_convert.py | c972a100902ea60af68848b0c99c8381455c69c5 | [] | no_license | Hiroshiba/signico_real_to_anime | e22d07ca6531b75b3987ecc309e02bcd405f6f61 | 0a68b132fc77e24539d7ddc65b3078fd0c7f3858 | refs/heads/master | 2021-01-19T23:25:37.149611 | 2018-03-21T17:24:32 | 2018-03-21T17:32:45 | 88,979,946 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,607 | py | import argparse
import chainer
import glob
import os
import sys
ROOT_PATH = os.path.join(os.path.dirname(__file__), "..")
sys.path.append(ROOT_PATH)
from deep_image_converter.config import Config
from deep_image_converter import dataset
from deep_image_converter.loss import ConvertModelLossMaker, FacebookConvertModelLossMaker
from deep_image_converter.model import prepare_model, choose_discriminator, BaseConvertModel
from deep_image_converter.updater import ConvertModelUpdater
from deep_image_converter.train import TrainManager
from deep_image_converter import utility
parser = argparse.ArgumentParser()
parser.add_argument('config_json_path')
config_json_path = parser.parse_args().config_json_path
config = Config(config_json_path)
config.copy_config_json()
train_manager = TrainManager(config.train_config)
datasets = dataset.choose(config.dataset_config)
nb = config.train_config.batchsize
IteratorClass = chainer.iterators.MultiprocessIterator
iterator_train_a = IteratorClass(datasets['train_a'], nb, True, True)
iterator_train_b = IteratorClass(datasets['train_b'], nb, True, True)
iterator_test = IteratorClass(datasets['test'], nb, False, False)
iterator_train_eval = IteratorClass(datasets['train_eval'], nb, False, False)
config.train_config.gpu >= 0 and chainer.cuda.get_device(config.train_config.gpu).use()
utility.chainer.set_default_initialW(config.model_config.initialW)
model = prepare_model(config.model_config)
assert isinstance(model, BaseConvertModel)
config.train_config.gpu >= 0 and model.to_gpu()
optimizer = train_manager.make_optimizer(model, 'main')
optimizers = {'main': optimizer}
dis = choose_discriminator(config.model_config)
config.train_config.gpu >= 0 and dis.to_gpu()
optimizer = train_manager.make_optimizer(dis, 'discriminator')
optimizers['dis'] = optimizer
if config.loss_config.name is None:
loss_maker = ConvertModelLossMaker(config.loss_config, model, dis)
elif config.loss_config.name == 'facebook':
loss_maker = FacebookConvertModelLossMaker(config.loss_config, model, dis)
else:
raise NotImplementedError(config.loss_config.name)
updater = ConvertModelUpdater(
optimizer=optimizers,
iterator={'a': iterator_train_a, 'b': iterator_train_b},
loss_maker=loss_maker,
device=config.train_config.gpu,
)
trainer = train_manager.make_trainer(
updater=updater,
model={'main': model, 'dis': dis},
eval_func=loss_maker.test,
iterator_test=iterator_test,
iterator_train_eval=iterator_train_eval,
loss_names=loss_maker.get_loss_names() + loss_maker.get_loss_names_discriminator(),
)
trainer.run()
| [
"kazuyuki_hiroshiba@dwango.co.jp"
] | kazuyuki_hiroshiba@dwango.co.jp |
0c5a649f0b60d66c181ab5f9abc7269f1142b11b | e2c369fc706a6058fe0126e088e8cc4ce48d2654 | /src/song/migrations/0011_auto_20190417_2320.py | f7b5aa3bf527350e940aa8e1023e259037afbc15 | [] | no_license | kishanpython/SongHub | 9ea1381d4add0c8fa036710f79fd9964c991eba7 | f86997d2070533ff7649ce3df89eaed66cbda609 | refs/heads/master | 2020-07-14T18:04:25.036424 | 2019-08-30T13:11:35 | 2019-08-30T13:11:35 | 205,368,723 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # Generated by Django 2.0 on 2019-04-17 23:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('song', '0010_song_file'),
]
operations = [
migrations.AlterField(
model_name='song',
name='file',
field=models.FileField(blank=True, null=True, upload_to='musics/'),
),
]
| [
"kishanyadav3223@gmail.com"
] | kishanyadav3223@gmail.com |
f83235d5cdc9fd38002e926ce485385bb59a0828 | 073c929cab2d92e9859010be654eb1ba69b397b1 | /src/participants/migrations/0004_alter_participant_polls.py | 62b1dd8d33b7130bc663cc9a08e88ff7a171fdf3 | [] | no_license | iamgaddiel/voting_system | 6e5abe9b6d9da29abc6a94e12157cb308bf2b717 | b41ec2a98ed678bedd3b9bdd9d6c8a5c679fcabf | refs/heads/main | 2023-06-22T17:29:57.371562 | 2021-07-22T06:57:47 | 2021-07-22T06:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | # Generated by Django 3.2.3 on 2021-05-31 04:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
('participants', '0003_participant_polls'),
]
operations = [
migrations.AlterField(
model_name='participant',
name='polls',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.polls'),
),
]
| [
"gaddiel@localhost.localdomain"
] | gaddiel@localhost.localdomain |
3e6076a9b6fe7e899b8fb311140de2d1133e6bed | 9079354291951a1782ec43efaead5876895eece8 | /legacy_scripts/setup.py | 66b535476ffc0715c1d1c734eccd7a5d32a16277 | [] | no_license | luungoc2005/nlp-test | c9a2e0174546221b0e6d2501d9c4dfeca5c6efd0 | ed43a4b1bbcd23c3fc39e92d790864c73a5999f3 | refs/heads/master | 2022-12-08T14:17:07.271865 | 2019-05-26T16:23:20 | 2019-05-26T16:23:20 | 125,201,975 | 0 | 0 | null | 2022-12-07T23:37:52 | 2018-03-14T11:24:54 | Jupyter Notebook | UTF-8 | Python | false | false | 355 | py | from distutils.core import setup
from Cython.Build import cythonize
# from distutils.extension import Extension
import numpy
setup(
name="Botbot-NLP",
ext_modules=cythonize([
# "common/_cutils/*.pyx",
"text_classification/fast_text/_cutils/*.pyx"
]
, include_path=[
numpy.get_include()
]
),
)
| [
"luungoc2005@gmail.com"
] | luungoc2005@gmail.com |
804c4b6e7cbf6961dbb3e5415cedb8a68caa6800 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_190/ch117_2020_03_31_05_22_20_071855.py | 23d4ed7a059bf6e7ba27702c678158f5b49358c9 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | import math
def snell_descartes(n1, n2, o1):
x=((math.sin(o1*(math,pi/180)))*n1)/n2
o2=math.sin(x*(math.pi/180))
return (o2) | [
"you@example.com"
] | you@example.com |
00829ea419f370c994ad692776728474d096aa13 | 3d6083f1821950afc06c432066dc763d3eb5db44 | /guide/basic/bert_embedder.py | 3465da95b238f7028c9271d0bc00953b1d1c82c5 | [] | no_license | flyfatty/self-allennlp | f4b3e3f3c36422c0950b0479a22546792c4852cb | 4741b2e47037dba1e20053f6877a7bbafedd8047 | refs/heads/master | 2023-08-24T03:10:13.994216 | 2021-10-04T08:13:07 | 2021-10-04T08:13:07 | 339,996,075 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,384 | py | # @Time : 2020/12/24 1:02
# @Author : LiuBin
# @File : bert_embedder.py
# @Description :
# @Software: PyCharm
import torch
from allennlp.data import Token, Vocabulary
from allennlp.data.fields import ListField, TextField
from allennlp.data.token_indexers import (
SingleIdTokenIndexer,
TokenCharactersIndexer,
ELMoTokenCharactersIndexer,
PretrainedTransformerIndexer,
PretrainedTransformerMismatchedIndexer,
)
from allennlp.data.tokenizers import (
CharacterTokenizer,
PretrainedTransformerTokenizer,
SpacyTokenizer,
WhitespaceTokenizer,
)
from allennlp.modules.seq2vec_encoders import CnnEncoder
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import (
Embedding,
TokenCharactersEncoder,
ElmoTokenEmbedder,
PretrainedTransformerEmbedder,
PretrainedTransformerMismatchedEmbedder,
)
from allennlp.nn import util as nn_util
import warnings
warnings.filterwarnings("ignore")
# Splits text into words (instead of wordpieces or characters). For ELMo, you can
# just use any word-level tokenizer that you like, though for best results you
# should use the same tokenizer that was used with ELMo, which is an older version
# of spacy. We're using a whitespace tokenizer here for ease of demonstration
# with binder.
tokenizer = WhitespaceTokenizer()
# Represents each token with an array of characters in a way that ELMo expects.
token_indexer = ELMoTokenCharactersIndexer()
# Both ELMo and BERT do their own thing with vocabularies, so we don't need to add
# anything, but we do need to construct the vocab object so we can use it below.
# (And if you have any labels in your data that need indexing, you'll still need
# this.)
vocab = Vocabulary()
text = "This is some text ."
tokens = tokenizer.tokenize(text)
print("ELMo tokens:", tokens)
text_field = TextField(tokens, {'elmo_tokens': token_indexer})
text_field.index(vocab)
# We typically batch things together when making tensors, which requires some
# padding computation. Don't worry too much about the padding for now.
padding_lengths = text_field.get_padding_lengths()
tensor_dict = text_field.as_tensor(padding_lengths)
print("ELMo tensors:", tensor_dict)
# Any transformer model name that huggingface's transformers library supports will
# work here. Under the hood, we're grabbing pieces from huggingface for this
# part.
transformer_model = 'bert-base-cased'
# To do modeling with BERT correctly, we can't use just any tokenizer; we need to
# use BERT's tokenizer.
tokenizer = PretrainedTransformerTokenizer(model_name=transformer_model)
# Represents each wordpiece with an id from BERT's vocabulary.
token_indexer = PretrainedTransformerIndexer(model_name=transformer_model)
text = "Some text with an extraordinarily long identifier."
tokens = tokenizer.tokenize(text)
print("BERT tokens:", tokens)
text_field = TextField(tokens, {'bert_tokens': token_indexer})
text_field.index(vocab)
tensor_dict = text_field.as_tensor(text_field.get_padding_lengths())
print("BERT tensors:", tensor_dict)
# Now we'll do an example with paired text, to show the right way to handle [SEP]
# tokens in AllenNLP. We have built-in ways of handling this for two text pieces.
# If you have more than two text pieces, you'll have to manually add the special
# tokens. The way we're doing this requires that you use a
# PretrainedTransformerTokenizer, not the abstract Tokenizer class.
# Splits text into wordpieces, but without adding special tokens.
tokenizer = PretrainedTransformerTokenizer(
model_name=transformer_model,
add_special_tokens=False,
)
context_text = "This context is frandibulous."
question_text = "What is the context like?"
context_tokens = tokenizer.tokenize(context_text)
question_tokens = tokenizer.tokenize(question_text)
print("Context tokens:", context_tokens)
print("Question tokens:", question_tokens)
combined_tokens = tokenizer.add_special_tokens(context_tokens, question_tokens)
print("Combined tokens:", combined_tokens)
text_field = TextField(combined_tokens, {'bert_tokens': token_indexer})
text_field.index(vocab)
tensor_dict = text_field.as_tensor(text_field.get_padding_lengths())
print("Combined BERT tensors:", tensor_dict)
# It's easiest to get ELMo input by just running the data code. See the
# exercise above for an explanation of this code.
tokenizer = WhitespaceTokenizer()
token_indexer = ELMoTokenCharactersIndexer()
vocab = Vocabulary()
text = "This is some text."
tokens = tokenizer.tokenize(text)
print("ELMo tokens:", tokens)
text_field = TextField(tokens, {'elmo_tokens': token_indexer})
text_field.index(vocab)
token_tensor = text_field.as_tensor(text_field.get_padding_lengths())
print("ELMo tensors:", token_tensor)
# We're using a tiny, toy version of ELMo to demonstrate this.
elmo_options_file = 'https://allennlp.s3.amazonaws.com/models/elmo/test_fixture/options.json'
elmo_weight_file = 'https://allennlp.s3.amazonaws.com/models/elmo/test_fixture/lm_weights.hdf5'
elmo_embedding = ElmoTokenEmbedder(options_file=elmo_options_file,
weight_file=elmo_weight_file)
embedder = BasicTextFieldEmbedder(token_embedders={'elmo_tokens': elmo_embedding})
tensor_dict = text_field.batch_tensors([token_tensor])
embedded_tokens = embedder(tensor_dict)
print("ELMo embedded tokens:", embedded_tokens)
# Again, it's easier to just run the data code to get the right output.
# We're using the smallest transformer model we can here, so that it runs on
# binder.
transformer_model = 'google/reformer-crime-and-punishment'
tokenizer = PretrainedTransformerTokenizer(model_name=transformer_model)
token_indexer = PretrainedTransformerIndexer(model_name=transformer_model)
text = "Some text with an extraordinarily long identifier."
tokens = tokenizer.tokenize(text)
print("Transformer tokens:", tokens)
text_field = TextField(tokens, {'bert_tokens': token_indexer})
text_field.index(vocab)
token_tensor = text_field.as_tensor(text_field.get_padding_lengths())
print("Transformer tensors:", token_tensor)
embedding = PretrainedTransformerEmbedder(model_name=transformer_model)
embedder = BasicTextFieldEmbedder(token_embedders={'bert_tokens': embedding})
tensor_dict = text_field.batch_tensors([token_tensor])
embedded_tokens = embedder(tensor_dict)
print("Transformer embedded tokens:", embedded_tokens)
| [
"28278672@qq.com"
] | 28278672@qq.com |
248789733a3133a24466895b30e8c35a526f519c | 98dbb9cd9523809b4ee0e6b92334fa6a2a6af2a3 | /bingads/v13/bulk/entities/labels/__init__.py | 053dc1f5f1bb6077ac63645d3921adb4bad7e414 | [
"MIT"
] | permissive | BingAds/BingAds-Python-SDK | a2f9b0c099b574a4495d0052218f263af55cdb32 | 373a586402bf24af7137b7c49321dbc70c859fce | refs/heads/main | 2023-07-27T15:31:41.354708 | 2023-07-10T03:21:03 | 2023-07-10T03:21:03 | 31,927,550 | 105 | 182 | NOASSERTION | 2023-09-04T06:51:20 | 2015-03-09T23:09:01 | Python | UTF-8 | Python | false | false | 140 | py | __author__ = 'Bing Ads SDK Team'
__email__ = 'bing_ads_sdk@microsoft.com'
from .bulk_label import *
from .bulk_label_associations import *
| [
"qitia@microsoft.com"
] | qitia@microsoft.com |
c244275eaf4960476910ef1e16ce1ae889076b4a | 377fc6e13101a2a45826cd118110c790f396a805 | /abc024-b.py | 1a6b9cb638e6c8a270a7d98c1cb76f25a8b319d8 | [] | no_license | number09/atcoder | 4076e7223f424b9923754e73992d6442e0bb0de7 | f521ca1205b254d99744abaf6a7a5bfe69845fe0 | refs/heads/master | 2021-06-04T23:16:39.021645 | 2021-01-19T08:30:39 | 2021-01-19T08:30:39 | 132,128,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | n, t = map(int, input().split())
li_a = list()
for i in range(n):
li_a.append(int(input()))
last_open = (0, 0)
t_seconds = 0
for a in li_a:
if last_open[0] <= a <= last_open[1]:
t_seconds += a + t - last_open[1]
last_open = (last_open[0], t + a)
else:
t_seconds += t
last_open = (a, a + t)
print(t_seconds)
| [
"cielo.abierto09@gmail.com"
] | cielo.abierto09@gmail.com |
c6ab140ab8f2cb9654ad0aaf732e6dacf963ac3b | 9fc6604ae98e1ae91c490e8201364fdee1b4222a | /odx_custom_support_ticket/model/ticket_category_path.py | 08a84d39d96b2b82526614eb7ffc12378baf3f80 | [] | no_license | nabiforks/baytonia | b65e6a7e1c7f52a7243e82f5fbcc62ae4cbe93c4 | 58cb304d105bb7332f0a6ab685015f070988ba56 | refs/heads/main | 2023-03-23T21:02:57.862331 | 2021-01-04T03:40:58 | 2021-01-04T03:40:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | from odoo import models, fields, api
class TicketCategoryPath(models.Model):
_name = 'ticket.category.path'
_rec_name = 'category_id'
category_id = fields.Many2one('website.support.ticket.categories', string="Category",required=1)
user_state_ids = fields.One2many('user.state','ticket_category_path_id','User State')
_sql_constraints = [
('category_id_uniq', 'unique (category_id)', "Category should be Unique !"),
]
class UserState(models.Model):
_name = 'user.state'
stage = fields.Integer('Stage')
state = fields.Many2one('website.support.ticket.states',string="State")
ticket_category_path_id = fields.Many2one('ticket.category.path','Ticket Category Path')
user_id = fields.Many2one('res.users', string="Assigned User")
| [
"ash@odoxsofthub.com"
] | ash@odoxsofthub.com |
5143c0b105c551354de46512e908b7649fd053b1 | e926966c5aa8061dc4b4780b20817c6504dd488b | /telegram_bots/urls.py | 319cc375fcc2932ce965c6bd1a6cb8805aa94094 | [
"MIT"
] | permissive | vladimirmyshkovski/django-telegram-bots | da954c4b7754a368e14422153b4e67dd53bff8d1 | f58ee16d61cd1b14cdf5c39649f63a851c1419e4 | refs/heads/master | 2023-01-06T22:42:11.263909 | 2018-07-19T10:34:10 | 2018-07-19T10:34:10 | 126,815,491 | 0 | 1 | MIT | 2022-12-26T20:36:22 | 2018-03-26T10:58:51 | Python | UTF-8 | Python | false | false | 1,048 | py | from django.conf.urls import url
from . import views
app_name = 'telegram_bots'
urlpatterns = [
url(
regex=r'^$',
view=views.BotListView.as_view(),
name='telegram_bots_list',
),
url(
regex=r'^(?P<pk>\d+)/$',
view=views.BotDetailView.as_view(),
name='telegram_bots_detail',
),
url(
regex=r'^create/$',
view=views.BotCreateView.as_view(),
name='telegram_bots_create',
),
url(
regex=r'^(?P<pk>\d+)/delete/$',
view=views.BotDeleteView.as_view(),
name='telegram_bots_delete',
),
url(
regex=r'^subscribe/(?P<signature>.+)/$',
view=views.BotSubscribeView.as_view(),
name='telegram_bots_subscribe'
),
url(
regex=r'^unsubscribe/(?P<signature>.+)/$',
view=views.BotUnsubscribeView.as_view(),
name='telegram_bots_unsubscribe'
),
url(
regex=r'^(?P<bot_token>.+)/$',
view=views.ReceiveView.as_view(),
name='telegram_bots_receiver'
),
]
| [
"narnikgamarnikus@gmail.com"
] | narnikgamarnikus@gmail.com |
bfbb23a786b14a777616004e0854edb298e9cb69 | 2316ce8a21d44a5d09284968ef42530633dc10d2 | /sample_code/ep260/rev01/t.py | 9d90dfa8c1cc3697f35e2f8011be9b7038e13761 | [] | no_license | AlexanderWinkelmeier/explains | 160de2c41fc5fc0156b482b41f89644dc585c4f3 | d47ec53e384e4303a2d8e71fab9073a1a8d2d6bc | refs/heads/master | 2023-07-30T04:55:31.234482 | 2021-09-15T02:59:42 | 2021-09-15T02:59:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | import pytest
def test_pass():
assert 1 == 1
def test_failed():
assert 1 == 2
@pytest.fixture
def fixture():
assert False
def test_errored(fixture):
assert 1 == 1
| [
"int3l@users.noreply.github.com"
] | int3l@users.noreply.github.com |
c6db3d71ad904a4bddf6dd521ffae6b04bdd25a0 | 76de4fc4f00a04c8c9acc1e9e4a5fae12cf0c08a | /trunk/pyformex/__init__.py | 002e4e687bd184efd65023a3a3b764564156c3e2 | [] | no_license | BackupTheBerlios/pyformex-svn | ec2361b1b9967918be65e892217a691a6f8b145d | f5404809095711334bbb938d9d119a69ad8fc260 | refs/heads/master | 2020-12-24T13:20:47.422165 | 2011-11-15T11:52:23 | 2011-11-15T11:52:23 | 40,749,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,022 | py | # $Id$
##
## This file is part of pyFormex 0.8.5 Sun Nov 6 17:27:05 CET 2011
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: https://savannah.nongnu.org/projects/pyformex/
## Copyright (C) Benedict Verhegghe (benedict.verhegghe@ugent.be)
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""pyFormex core module initialisation.
This module initializes the pyFormex global variables and
defines a few essential functions.
"""
__version__ = "0.8.5-a1"
__revision__ = '2070M'
Version = 'pyFormex %s' % __version__
Copyright = 'Copyright (C) 2004-2011 Benedict Verhegghe'
Url = 'http://pyformex.org'
Description = "pyFormex is a tool for generating, manipulating and transforming large geometrical models of 3D structures by sequences of mathematical transformations."
# The GUI parts
app_started = False
interactive = False
app = None # the Qapplication
GUI = None # the GUI QMainWindow
canvas = None # the OpenGL Drawing widget controlled by the running script
#board = None # the message board
# set start date/time
import time,datetime
StartTime = datetime.datetime.now()
# initialize some global variables used for communication between modules
options = None # the options found on the command line
print_help = None # the function to print(the pyformex help text (pyformex -h))
cfg = {} # the current session configuration
prefcfg = None # the preferenced configuration
refcfg = None # the reference configuration
preffile = None # the file where the preferenced configuration will be saved
PF = {} # explicitely exported globals
_PF_ = {} # globals that will be offered to scripts
scriptName = None
# define last rescue versions of message, warning and debug
def message(s):
print(s)
warning = message
def debug(s,lead="DEBUG",level=-1):
"""Print a debug message"""
try: # to make sure that debug() can be used before options are set
if options.debug < 0 or (options.debug % level > 0):
raise
pass
except:
print("%s: %s" % (lead,str(s)))
def debugt(s):
"""Print a debug message with timer"""
debug(s,time.time())
### End
| [
"bverheg@8d6f1305-3bde-0310-9e88-884b4813ce35"
] | bverheg@8d6f1305-3bde-0310-9e88-884b4813ce35 |
1d4a64996631967cbfe32ff2234b6028d67116af | 3235cf9cbebcb6c12510b1ab5cbd6c1051ef6378 | /CnnModelTrainKaggleCatDog0614/CnnModelTrainKaggleCatDog_DateAugmentation_Pred.py | b150edd104d87d0ee33bb751c30b292b61db5d96 | [] | no_license | dorahero/pyAI | 8ba99fe2726264044e166562359868425d6e79ea | c185875ca19f0cca5ec0812eff373e25d0fbd0f1 | refs/heads/master | 2022-11-27T14:56:07.942104 | 2020-08-08T02:21:48 | 2020-08-08T02:21:48 | 285,956,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | # --coding:utf-8--
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras.models import load_model
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
train_dir = 'kagglecatdog/train'
test_dir = 'kagglecatdog/test'
validation_dir = 'kagglecatdog/validation'
train_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(train_dir )
print('='*30)
print('訓練的分類:',train_generator.class_indices)
print('='*30)
labels = train_generator.class_indices
#將分類做成字典方便查詢
labels = dict((v,k) for k,v in labels.items())
print(labels)
# 載入模型
model = load_model('model_CnnModelTrainKaggleCatDog_DateAugmentation.h5')
# 將圖片轉為待測數據
def read_image(img_path):
try:
img = image.load_img(img_path, target_size=(150, 150))
except Exception as e:
print(img_path,e)
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
return img
# 隨機輸入一個待測圖片
filename = "kagglecatdog/test/cat/cat.1684.jpg"
plt.figure()
im = Image.open(filename)
im_list = np.asarray(im)
plt.title("predict")
plt.axis("off")
plt.imshow(im_list)
plt.show()
img = read_image(filename)
pred = model.predict(img)[0]
print('辨識結果:',labels[pred[0]])
| [
"dorahero2727@gmail.com"
] | dorahero2727@gmail.com |
1cd7ea419f68dfffbd358789871d2d9fd90a5a26 | 8690ff3a6a1ca748aebb381bd50fdb317babbaf8 | /utils/ops.py | b5f81000648ecedae427cf12334d3a082dd1fddf | [] | no_license | thoppe/postern_perception | 8457bd1f89fb198191c4152d3354036ad4369d20 | b2b8dda375d1a0430c2cadcd5994e1fbd7a23676 | refs/heads/master | 2023-04-07T17:48:52.816426 | 2020-01-29T16:22:04 | 2020-01-29T16:22:04 | 216,728,164 | 0 | 0 | null | 2023-03-25T18:13:20 | 2019-10-22T05:12:42 | Python | UTF-8 | Python | false | false | 6,822 | py | # This script contains all neural network layers and functions that are used
# the project.
from __future__ import division
import tensorflow as tf
import numpy as np
weight_init = tf.contrib.layers.xavier_initializer()
def instance_norm(x, scope='instance_norm'):
""" Wrapper of instance normalization.
Parameters
----------
input: tensor.
scope: name of the scope.
Returns
-------
normalized tensor.
"""
return tf.contrib.layers.instance_norm(
x, epsilon=1e-05, center=True, scale=True, scope=scope)
def conv2d(input_, output_dim, d_h=2, d_w=2, scope='conv_0',
conv_filters_dim=4, padding='zero', use_bias=True, pad=0):
""" Wrapper of convolutional operation.
Parameters
----------
input_: a 4d tensor.
output_dim: int, output channels.
d_h: int, height of stride.
d_w: int, width of stride.
scope: str, name of variable scope.
conv_filters_dim: int, size of kernel, width = height.
padding: str, strategy of padding, one of "zero" and "reflect".
use_bias: bool, whether to use bias in this layer.
pad: int, size of padding.
Returns
-------
conv: output 4d tensor.
"""
k_initializer = tf.random_normal_initializer(stddev=0.02)
b_initializer = tf.constant_initializer(0)
k_h = k_w = conv_filters_dim
with tf.compat.v1.variable_scope(scope):
if padding == 'zero':
x = tf.pad(
input_,
[[0, 0], [pad, pad], [pad, pad], [0, 0]])
elif padding == 'reflect':
x = tf.pad(
input_,
[[0, 0], [pad, pad], [pad, pad], [0, 0]],
mode='REFLECT')
else:
x = input_
conv = tf.layers.conv2d(
x,
output_dim,
kernel_size=[k_h, k_w],
strides=(d_h, d_w),
kernel_initializer=k_initializer,
bias_initializer=b_initializer,
use_bias=use_bias)
return conv
def deconv2d(input_, output_dim, d_h=2, d_w=2, scope='deconv_0',
conv_filters_dim=4, padding='SAME', use_bias=True):
"""Transposed convolution (fractional stride convolution) layer.
Parameters
----------
input_: tensor, input image.
output_dim: int, number of channels.
d_h: int, height of stride.
d_w: int, width of stride.
scope: str, name of scope.
conv_filter_dim: int, kernel size.
padding: int, "same" or "valid", case insensitive.
use_bias: bool, use bias or not.
Returns
-------
deconv: tensor, output tenosr.
"""
k_initializer = tf.random_normal_initializer(stddev=0.02)
b_initializer = tf.constant_initializer(0)
k_h = k_w = conv_filters_dim
deconv = tf.layers.conv2d_transpose(
inputs=input_,
filters=output_dim,
kernel_size=[k_h, k_w],
strides=(d_h, d_w),
padding=padding,
kernel_initializer=k_initializer,
bias_initializer=b_initializer,
use_bias=use_bias,
name=scope)
return deconv
def relu(input_):
""" Wrapper of ReLU function.
Parameters
----------
input_: tensor.
Returns
-------
tensor.
"""
return tf.nn.relu(input_)
def lrelu(input_):
""" Wrapper of LeakyReLU function.
Parameters
----------
input_: tensor.
Returns
-------
tensor.
"""
return tf.nn.leaky_relu(input_, alpha=0.01)
def tanh(input_):
""" Wrapper of tanh function.
Parameters
----------
input_: tensor.
Returns
-------
tensor.
"""
return tf.tanh(input_)
def l1_loss(x, y):
""" L1 loss.
Parameters
----------
x: tensor.
y: tensor, which should have the same shape as x.
Returns
-------
loss: scalar, l1 loss.
"""
loss = tf.reduce_mean(tf.abs(x - y))
return loss
def l2_loss(x, y):
""" L2 loss.
Parameters
----------
x: tensor
y: tensor, which should have the same shape as x.
Returns
-------
loss: scalar, l2 loss.
"""
loss = tf.reduce_mean(tf.reduce_sum(tf.square(x - y), axis=[1, 2, 3]))
return loss
def content_loss(endpoints_mixed, content_layers):
""" Content loss.
Ref: https://arxiv.org/abs/1603.08155.
Parameters
----------
endpoints_mixed: dict, (name, tensor).
content_layers: list, name of layers used.
Returns
-------
loss: scalar, content loss.
"""
loss = 0
for layer in content_layers:
feat_a, feat_b = tf.split(endpoints_mixed[layer], 2, 0)
size = tf.size(feat_a)
loss += tf.nn.l2_loss(feat_a - feat_b) * 2 / tf.to_float(size)
return loss
def style_loss(endpoints_mixed, style_layers):
""" Style loss.
Ref: https://arxiv.org/abs/1603.08155.
Parameters
----------
endpoints_mixed: dict, (name, tensor).
content_layers: list, name of layers used.
Returns
-------
loss: scalar, style loss.
"""
loss = 0
for layer in style_layers:
feat_a, feat_b = tf.split(endpoints_mixed[layer], 2, 0)
size = tf.size(feat_a)
loss += tf.nn.l2_loss(
gram(feat_a) - gram(feat_b)) * 2 / tf.to_float(size)
return loss
def gram(layer):
""" Compute gram matrix.
Ref: https://arxiv.org/abs/1603.08155.
Parameters
----------
layer: tensor.
Returns
-------
grams: gram matrices.
"""
shape = tf.shape(layer)
num_images = shape[0]
width = shape[1]
height = shape[2]
num_filters = shape[3]
features = tf.reshape(layer, tf.stack([num_images, -1, num_filters]))
denominator = tf.to_float(width * height * num_filters)
grams = tf.matmul(features, features, transpose_a=True) / denominator
return grams
def angular2cart(angular):
""" Angular coordinates to cartesian coordinates.
Parameters
----------
angular: list, [yaw, pitch]
Returns
-------
np.array, coordinates in cartesian system.
"""
theta = angular[:, 0] / 180.0 * np.pi
phi = angular[:, 1] / 180.0 * np.pi
x = np.cos(phi) * np.sin(theta)
y = np.sin(phi)
z = np.cos(phi) * np.cos(theta)
return np.stack([x, y, z], axis=1)
def angular_error(x, y):
"""Compute the angular error.
Parameters
----------
x: list, [yaw, pitch].
y: list, [yaw, pitch].
Returns
-------
int, error.
"""
x = angular2cart(x)
y = angular2cart(y)
x_norm = np.sqrt(np.sum(np.square(x), axis=1))
y_norm = np.sqrt(np.sum(np.square(y), axis=1))
sim = np.divide(np.sum(np.multiply(x, y), axis=1),
np.multiply(x_norm, y_norm))
sim = np.clip(sim, -1.0, 1.0)
return np.arccos(sim) * 180.0 / np.pi
| [
"travis.hoppe@gmail.com"
] | travis.hoppe@gmail.com |
e69edb98dd34169b7e5de559b90da43214d3dfd4 | 69f5b6defd7d2dc1664799bcaa5fad3fb1af4c7f | /script/2010_paper/prepare_data.py | c29e789e9a56f3fbc1dc50eb6eddb698ed957348 | [] | no_license | leelabcnbc/unsup-pytorch | c48ff0232b4baf5d50b406d696a4f460e8521b90 | 35e8aa2ef687c2b32a5838e57ea07babe0c1abbb | refs/heads/master | 2020-03-18T18:22:33.342249 | 2018-09-07T17:38:17 | 2018-09-07T17:38:17 | 135,088,622 | 2 | 0 | null | 2018-06-20T02:12:55 | 2018-05-27T23:22:57 | Python | UTF-8 | Python | false | false | 2,493 | py | """this file prepares the data for the 2010 NIPS paper
of conv PSD
Koray Kavukcuoglu, Pierre Sermanet, Y-Lan Boureau, Karol Gregor, Michaël Mathieu, Yann LeCun:
Learning Convolutional Feature Hierarchies for Visual Recognition. NIPS 2010: 1090-1098
I will prepare 1000000 25x25 patches, which should be sufficient.
"""
import os
import numpy as np
import h5py
from torch.utils.serialization.read_lua_file import load_lua
from unsup import dir_dictionary
def load_raw_data():
raw_data = load_lua(os.path.join(dir_dictionary['debug_reference'],
'tr-berkeley-N5K-M56x56-lcn.bin'))
raw_data = raw_data.numpy()
return raw_data
def sample_from_raw_data(std_threshold=0.2, seed=0, ddof=1,
num_im=1000000):
# this ddof stuff really should not matter.
# here I just want to follow what's done in the original code as much as possible.
pass
raw_data = load_raw_data()
assert raw_data.shape == (5000, 56, 56)
rng_state = np.random.RandomState(seed=seed)
# for loop
collected = 0
all_imgs = []
all_img_idx = []
all_r_idx = []
all_c_idx = []
while collected < num_im:
if collected % 10000 == 0:
print(collected)
# randomly select a image
im_idx = rng_state.randint(5000)
# then randomly select a patch
r_idx, c_idx = rng_state.randint(56 - 25 + 1, size=(2,))
im_candidate = raw_data[im_idx, np.newaxis, r_idx:r_idx + 25, c_idx:c_idx + 25]
if np.std(im_candidate, ddof=ddof) <= std_threshold:
continue
else:
collected += 1
# save as float to save space
all_imgs.append(im_candidate.astype(np.float32))
all_img_idx.append(im_idx)
all_r_idx.append(r_idx)
all_c_idx.append(c_idx)
return {
'raw_data': raw_data,
'data': np.asarray(all_imgs),
'idx_img': np.asarray(all_img_idx),
'idx_r': np.asarray(all_r_idx),
'idx_c': np.asarray(all_c_idx),
}
if __name__ == '__main__':
data_dict = sample_from_raw_data()
# save as npy
with h5py.File(os.path.join(os.path.split(__file__)[0], 'data.hdf5')) as f:
if 'data' not in f:
# 2.4G vs 2.2G. not worth it.
# f.create_dataset('data', data=a, compression='gzip')
for k, v in data_dict.items():
print(k, v.shape)
f.create_dataset(k, data=v)
| [
"zym1010@gmail.com"
] | zym1010@gmail.com |
0e64bc4b8ddf9d83ec635386f2315eb33db3939d | 85c426913d63773c4802a4a3c354df909030654b | /python/PF/ABCBank_CreditCard_System_List/iCard/Read_Write_Reward_Scheme.py | d35d948984cc96537c38eeaaff32b5488d9658f5 | [] | no_license | SensehacK/playgrounds | 17bf2a3133db6c0cafe185c4cc2c7b59862980aa | 3decd550cdb6034db8b497051acaaec8221073aa | refs/heads/master | 2023-05-11T20:05:31.680168 | 2023-04-30T00:01:58 | 2023-04-30T00:01:58 | 159,632,542 | 1 | 0 | null | 2023-03-05T11:34:34 | 2018-11-29T08:27:53 | Python | UTF-8 | Python | false | false | 1,561 | py | import csv
'''This function fetches the details of all reward schemes from SchemeDetails CSV file
in 3 lists and returns them as tuple of lists.
Input: Path of CSV file.
Output: A tuple of lists i.e. card type list, min transaction amount list and associated reward points list.
'''
def get_reward_scheme_details():
file_pointer=open("..\\SuppliedFiles\\SchemeDetails.csv","r")
reward_scheme_details=csv.reader(file_pointer)
card_type_list=[]
min_trasaction_amt_list=[]
reward_point_list=[]
for reward_detail in reward_scheme_details:
card_type_list.append(reward_detail[0])
min_trasaction_amt_list.append(reward_detail[1])
reward_point_list.append(reward_detail[2])
file_pointer.close()
return (card_type_list,min_trasaction_amt_list,reward_point_list)
'''This function updates the details of reward schemes in SchemeDetails CSV file which
are received as parameters in form of lists.
Input: A tuple of lists i.e. card type list, min transaction amount list and associated reward points list.
Output: Updates the CSV file.
'''
def set_reward_scheme_details(card_type_list,min_trasaction_amt_list,reward_point_list):
f=open("..\\SuppliedFiles\\SchemeDetails.csv","w")
f.write("")
f.close()
for i in range(len(card_type_list)):
f=open("..\\SuppliedFiles\\SchemeDetails.csv","a")
f.write(str(card_type_list[i])+","+str(min_trasaction_amt_list[i])+","+str(reward_point_list[i])+'\n')
f.close()
| [
"kautilyasave@gmail.com"
] | kautilyasave@gmail.com |
807997fdb4c00db45d521df97a537eeef8ba9932 | 246e9200a834261eebcf1aaa54da5080981a24ea | /hackerrank/algorithms/warmups/time-conversion.py | f99051f21c296ca29067abeae81b12d14b231abc | [] | no_license | kalsotra2001/practice | db435514b7b57ce549b96a8baf64fad8f579da18 | bbc8a458718ad875ce5b7caa0e56afe94ae6fa68 | refs/heads/master | 2021-12-15T20:48:21.186658 | 2017-09-07T23:01:56 | 2017-09-07T23:01:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | s = raw_input()
time = s[:-2]
if s[-2:] == "AM":
if s[:2] == "12":
time = "00" + time[2:]
else:
if s[:2] != "12":
time = str(int(time[:2]) + 12) + time[2:]
print time | [
"jacquelineluo95@gmail.com"
] | jacquelineluo95@gmail.com |
fb23db33b5e66fcfe17c61e18a6d04be312b9c1f | 063ab6c256b5c60406c7d4ee6820dbbf8192efa9 | /ros_ws/build/learning_ros_external_pkgs_noetic/baxter_simulator/baxter_sim_examples/catkin_generated/pkg.develspace.context.pc.py | 7b7827d082d8236e15fb3543a5214218394f47d0 | [] | no_license | Iris-ye233/final-project_revised | d34aa55f6bba2f5b73b4f3a255f5041bdf7c71fc | 8ab78592b3fe79c8fa359cc877a52192784d2152 | refs/heads/master | 2023-05-13T18:11:04.802393 | 2021-06-03T16:27:01 | 2021-06-03T16:27:01 | 371,312,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "rospy;rospack;baxter_core_msgs;baxter_gazebo;baxter_interface;baxter_tools;baxter_tools;gazebo_ros;gazebo_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "baxter_sim_examples"
PROJECT_SPACE_DIR = "/home/yedi/ros_ws/devel"
PROJECT_VERSION = "1.2.12"
| [
"1155150731@link.cuhk.edu.hk"
] | 1155150731@link.cuhk.edu.hk |
33e9f7059135f1c5a0be98fa9d79e928bea92868 | f8df1bff1dccbc1b4cf67cb7765ce75b17777aa3 | /app/glapp/shane/openglpanel.py | 0d6b28812f848688b60359540cd5ecc798c316ea | [] | no_license | juancq/py-interactive-genetic-algorithm | d74048338da283acd9545aab00f435b5c500d669 | d1b388e001232040e966fd3077722ed2560d1d9e | refs/heads/master | 2021-01-10T20:57:58.446987 | 2017-05-26T07:42:10 | 2017-05-26T07:42:10 | 4,923 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | import wx
from gui import feedbackpanel
class OpenGLPanel(feedbackpanel.FeedbackPanel):
def __init__(self, parent, id, data = None, tick = 100, size = (250, 250)):
feedbackpanel.FeedbackPanel.__init__(self, parent, id, size = size)
import openglcanvas
canvas = openglcanvas.IGAGLCanvas(self, data, tick = tick, size = size)
self.sizer.Add(canvas, 1, wx.EXPAND)
self.Layout()
#------------------------------------------#
| [
"juan@dragonite.(none)"
] | juan@dragonite.(none) |
aaa10d917c12333b2b0b5f49def8cf9e4fdbdc10 | 81fe7f2faea91785ee13cb0297ef9228d832be93 | /AdventOfCode/21/day15.py | ea974830ec31ba50613e0a88d6ea0b7888d7b660 | [] | no_license | blegloannec/CodeProblems | 92349c36e1a35cfc1c48206943d9c2686ea526f8 | 77fd0fa1f1a519d4d55265b9a7abf12f1bd7d19e | refs/heads/master | 2022-05-16T20:20:40.578760 | 2021-12-30T11:10:25 | 2022-04-22T08:11:07 | 54,330,243 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | #!/usr/bin/env python3
import sys
from heapq import *
INF = 1<<30
def dijkstra(Map, i0,j0, i1,j1):
S = len(Map)
Dist = [[INF]*S for _ in range(S)]
Dist[i0][j0] = 0
Q = [(0,i0,j0)]
while Q:
d,i,j = heappop(Q)
if i==i1 and j==j1:
break
if d>Dist[i][j]:
continue
for vi,vj in ((i-1,j),(i+1,j),(i,j-1),(i,j+1)):
if 0<=vi<S and 0<=vj<S and Dist[i][j]+Map[vi][vj]<Dist[vi][vj]:
Dist[vi][vj] = Dist[i][j]+Map[vi][vj]
heappush(Q, (Dist[vi][vj],vi,vj))
return Dist[i1][j1]
def main():
# Part 1
Map = [list(map(int, L.strip())) for L in sys.stdin.readlines()]
S = len(Map)
print(dijkstra(Map, 0,0, S-1,S-1))
# Part 2
S5 = 5*S
Map5 = [[(Map[i%S][j%S]+i//S+j//S-1)%9+1 for j in range(S5)] for i in range(S5)]
print(dijkstra(Map5, 0,0, S5-1,S5-1))
main()
| [
"blg@gmx.com"
] | blg@gmx.com |
7dc59258a5b3fc8644e0f31266ec92fa17934dde | 7f5a9a470f9a89108fca0280018b0563e9a0207a | /wykres_masy_calkowitej_od_czasu_box.py | 74ce9195d6999085a995c02868eb15e0ec158cff | [] | no_license | b-doroszuk/wykresy_kruszarki | dffa18b1b4856c7a29dfd4039960676cd5c40be0 | e68d22f034bd1c866393c0b0edacdebace393dd3 | refs/heads/main | 2023-06-02T04:39:07.238948 | 2021-06-19T21:00:37 | 2021-06-19T21:00:37 | 378,262,957 | 0 | 0 | null | 2021-06-18T20:55:05 | 2021-06-18T20:55:04 | null | UTF-8 | Python | false | false | 6,882 | py | from edempy import Deck
import numpy as np
from edempy import BoxBin, CylinderBin
import matplotlib.pyplot as plt
import matplotlib; matplotlib.use("TkAgg")
from time import strftime
def get_mass_time_box(time_step: int, deck, L_boxbin, R_boxbin):
# zamienia krok czasowy na jednostke czasu
czas = deck.timestepKeys[time_step]
# zmienne do przechowywania masy
mass_lupek = 0
mass_piaskowiec = 0
mass_dolomit = 0
mass_dummy_lupek = 0
mass_dummy_piaskowiec = 0
mass_dummy_dolomit = 0
"""LUPEK"""
binned_ids_L0_lupek = L_boxbin.getBinnedObjects(deck.timestep[time_step].particle[0].getIds(),
deck.timestep[time_step].particle[0].getPositions())
binned_ids_R0_lupek = R_boxbin.getBinnedObjects(deck.timestep[time_step].particle[0].getIds(),
deck.timestep[time_step].particle[0].getPositions())
# dummy lupek
binned_ids_L0_dummy_lupek = L_boxbin.getBinnedObjects(
deck.timestep[time_step].particle[3].getIds(),
deck.timestep[time_step].particle[3].getPositions())
binned_ids_R0_dummy_lupek = R_boxbin.getBinnedObjects(
deck.timestep[time_step].particle[3].getIds(),
deck.timestep[time_step].particle[3].getPositions())
# lupek loop
for i in binned_ids_L0_lupek:
mass_lupek += deck.timestep[time_step].particle[0].getMass(id=i)
for i in binned_ids_R0_lupek:
mass_lupek += deck.timestep[time_step].particle[0].getMass(id=i)
# dummy lupek loop
for i in binned_ids_L0_dummy_lupek:
mass_dummy_lupek += deck.timestep[time_step].particle[3].getMass(id=i)
for i in binned_ids_R0_dummy_lupek:
mass_dummy_lupek += deck.timestep[time_step].particle[3].getMass(id=i)
"""PIASEK"""
binned_ids_L1_piaskowiec = L_boxbin.getBinnedObjects(deck.timestep[time_step].particle[1].getIds(),
deck.timestep[time_step].particle[1].getPositions())
binned_ids_R1_piaskowiec = R_boxbin.getBinnedObjects(deck.timestep[time_step].particle[1].getIds(),
deck.timestep[time_step].particle[1].getPositions())
binned_ids_L0_dummy_piaskowiec = L_boxbin.getBinnedObjects(
deck.timestep[time_step].particle[4].getIds(),
deck.timestep[time_step].particle[4].getPositions())
binned_ids_R0_dummy_piaskowiec = R_boxbin.getBinnedObjects(
deck.timestep[time_step].particle[4].getIds(),
deck.timestep[time_step].particle[4].getPositions())
# piaskowiec loop
for i in binned_ids_L1_piaskowiec:
mass_piaskowiec += deck.timestep[time_step].particle[1].getMass(id=i)
for i in binned_ids_R1_piaskowiec:
mass_piaskowiec += deck.timestep[time_step].particle[1].getMass(id=i)
# dummy piaskowiec loop
for i in binned_ids_L0_dummy_piaskowiec:
mass_dummy_piaskowiec += deck.timestep[time_step].particle[4].getMass(id=i)
for i in binned_ids_R0_dummy_piaskowiec:
mass_dummy_piaskowiec += deck.timestep[time_step].particle[4].getMass(id=i)
"""DOLOMIT"""
binned_ids_L2_dolomit = L_boxbin.getBinnedObjects(deck.timestep[time_step].particle[2].getIds(),
deck.timestep[time_step].particle[2].getPositions())
binned_ids_R2_dolomit = R_boxbin.getBinnedObjects(deck.timestep[time_step].particle[2].getIds(),
deck.timestep[time_step].particle[2].getPositions())
binned_ids_L0_dummy_dolomit = L_boxbin.getBinnedObjects(
deck.timestep[time_step].particle[5].getIds(),
deck.timestep[time_step].particle[5].getPositions())
binned_ids_R0_dummy_dolomit = R_boxbin.getBinnedObjects(
deck.timestep[time_step].particle[5].getIds(),
deck.timestep[time_step].particle[5].getPositions())
# dolomit loop
for i in binned_ids_L2_dolomit:
mass_dolomit += deck.timestep[time_step].particle[2].getMass(id=i)
for i in binned_ids_R2_dolomit:
mass_dolomit += deck.timestep[time_step].particle[2].getMass(id=i)
# dummy dolomit loop
for i in binned_ids_L0_dummy_dolomit:
mass_dummy_dolomit += deck.timestep[time_step].particle[5].getMass(id=i)
for i in binned_ids_R0_dummy_dolomit:
mass_dummy_dolomit += deck.timestep[time_step].particle[5].getMass(id=i)
#print()
#print(mass_lupek, mass_piaskowiec, mass_dolomit)
#print(mass_dummy_lupek, mass_dummy_piaskowiec, mass_dummy_dolomit)
#print()
rock_mass = mass_lupek + mass_piaskowiec + mass_dolomit
dummy_mass = mass_dummy_lupek + mass_dummy_piaskowiec + mass_dummy_dolomit
total_mass = rock_mass + dummy_mass
#print(rock_mass, dummy_mass)
# zwraca mase calkowita i czas w sekundach !!
return total_mass, czas
def main():
"""
parametry wejsciowe:
interval_table = [poczatkowy krok, koncowy krok czasowy, interwal],
filepath = sciezka
L_boxbin = wymiary boxa np. BoxBin([0, -0.8, -0.75], 3, 0.25, 1.5)
R_boxbin = -||-
is_export = czy exportowac do txt (True / False)
is_draw = czy rysowac wykres (True / False)
is_save = czy zapisac wykres (True / False)
PONIZEJ 68 KROKU CZASOWEGO SKRYPT WYWALA BLAD !!!
"""
interval_table = [68, 260, 10]
filepath = "C:\\Users\\Jakub\\PycharmProjects\\test2\\testownik11_prof_Robert_Krol\\projekt_2\\POLKOWICE_etap_2\\simulation_0\\simulation_0.dem"
L_boxbin = BoxBin([0, -0.8, -0.75], 3, 0.25, 1.5)
R_boxbin = BoxBin([0, 0.8, -0.75], 3, 0.25, 1.5)
is_draw = True
is_save = False
deck = Deck(filepath)
mass_list = []
time = []
for i in range(interval_table[0], interval_table[1], interval_table[2]):
print("krok czasowy: ", i)
total_mass, czas = get_mass_time_box(time_step=i, deck=deck, L_boxbin=L_boxbin, R_boxbin=R_boxbin)
mass_list.append(round(total_mass, 2))
time.append(round(float(czas), 2))
fig = plt.figure(figsize=(7, 6))
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
# rysuje wykres
axes.plot(time, mass_list)
axes.set_xlabel("czas [s]")
axes.set_ylabel("masa [kg]")
axes.set_title("Left BinBox")
if is_save:
plt.savefig(f"Left_BinBox_{strftime('%m_%d_%Y-%H_%M_%S')}.png")
if is_draw:
plt.show()
if __name__ == '__main__':
import sys
sys.exit(main())
| [
"you@example.com"
] | you@example.com |
cdd00f1aee1b6099e9869021c75ba1cf9dc318d7 | e6913abba3f5cfd396e62c7e514674dbcb3631bb | /vidfeat/_vertical_boxed.py | 135b8fb34c9ab722a5e7dcc6f9a0b18a65ef495b | [] | no_license | bwhite/vidfeat | f98b8511ad13347037c60d7026725a6149851a81 | c9e7c6a02b41951fc93f0cefe0c78b24f5731f59 | refs/heads/master | 2016-09-06T03:00:58.791493 | 2012-06-19T21:54:01 | 2012-06-19T21:54:01 | 1,878,956 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | import vidfeat
import imfeat
import sklearn.svm
class VerticalBoxedFrameFeature(vidfeat.ClassifierFrameFeature):
feature = imfeat.BlackBars()
def __init__(self, *args, **kw):
classifier = sklearn.svm.LinearSVC(class_weight='auto')
self.svm_parameters = [{'C': [10 ** x for x in range(0, 12, 3)]}]
super(VerticalBoxedFrameFeature, self).__init__(classifier=classifier,
*args, **kw)
def _feature(self, image):
return self.feature(image)
if __name__ == '__main__':
vidfeat._frame_feature_main('vertical_boxed', vidfeat.VerticalBoxedFrameFeature, remove_bars=True)
| [
"bwhite@dappervision.com"
] | bwhite@dappervision.com |
8ced806cfdc062b9eed27d8c280a64109ff72856 | b87f66b13293782321e20c39aebc05defd8d4b48 | /mpi/mpi_merge.py | edf623650edaf9d1fc58f8f3c85293cfea3b2539 | [] | no_license | m-elhussieny/code | 5eae020932d935e4d724c2f3d16126a0d42ebf04 | 5466f5858dbd2f1f082fa0d7417b57c8fb068fad | refs/heads/master | 2021-06-13T18:47:08.700053 | 2016-11-01T05:51:06 | 2016-11-01T05:51:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,959 | py | #!/usr/bin/env python
"""
Merge several HDF5 or ASCII files.
Merge all files that have a common (given) pattern in the name.
The patterns may be numbers and/or characters. Example: 'YYYYMMDD',
where YYYY is year, MM is month and DD is day.
"""
# Fernando <fpaolo@ucsd.edu>
# November 2, 2012
import os
import sys
import re
import numpy as np
import tables as tb
import argparse as ap
from mpi4py import MPI
# parse command line arguments
parser = ap.ArgumentParser()
parser.add_argument('files', nargs='+', help='HDF5 2D file[s] to merge')
parser.add_argument('-p', dest='pattern', default="_\d\d\d\d\d\d\d\d",
help="pattern to match in the file names, default '_\d\d\d\d\d\d\d\d'")
parser.add_argument('-o', dest='prefix', default='all_',
help='prefix of output file name, default all_')
parser.add_argument('-s', dest='suffix', default='',
help='suffix of output file name, default none')
parser.add_argument('-n', dest='count', action='store_const', const=True, \
default=False, help='count number of tasks and exit, default no')
args = parser.parse_args()
def close_files():
for fid in tb.file._open_files.values():
fid.close()
def get_files_to_merge(files, pattern):
tomerge = {}
patterns = np.unique(re.findall(pattern, ' '.join(files)))
for s in patterns:
tomerge[s] = [f for f in files if s in f]
return tomerge
def get_fname_out(stem, fnamein, pref='', suf=''):
path = os.path.split(fnamein)[0]
return os.path.join(path, ''.join([pref, stem, suf, '.h5']))
def get_shape_out(files):
nrows = 0
for fname in files:
f = tb.openFile(fname, 'r')
data = f.getNode('/data')
nrow, ncols = data.shape
nrows += nrow
f.close()
return (nrows, ncols)
def merge_files(fname, shape, files):
print 'merging:\n', files
print 'into:\n', fname, '...'
fout = tb.openFile(fname, 'w')
nrows, ncols = shape
atom = tb.Atom.from_type('float64')
filters = tb.Filters(complib='zlib', complevel=9)
dout = fout.createEArray('/', 'data', atom=atom,
shape=(0, ncols), filters=filters)
for fnamein in files:
fin = tb.openFile(fnamein, 'r')
data = fin.getNode('/data')
dout.append(data[:])
close_files()
print 'done.'
def merge_all(tomerge, pref='', suf=''):
for patt, fnames in tomerge.items():
fnameout = get_fname_out(patt, fnames[0], pref, suf)
shape = get_shape_out(fnames)
merge_files(fnameout, shape, fnames)
# MPI functions
def simple_partitioning(length, num_procs):
sublengths = [length/num_procs]*num_procs
for i in range(length % num_procs): # treatment of remainder
sublengths[i] += 1
return sublengths
def get_subproblem_input_args(input_args, my_rank, num_procs):
sub_ns = simple_partitioning(len(input_args), num_procs)
my_offset = sum(sub_ns[:my_rank])
my_input_args = input_args[my_offset:my_offset+sub_ns[my_rank]]
return my_input_args
def program_to_run(string):
if '.py' in string:
run = 'python '
else:
run = '' # './'
return run
#-------------
# If needed, uses `glob` to avoid Unix limitation on number of cmd args.
# To use it, instead of _file names_ pass a _str_ with "dir + file pattern".
if len(args.files) > 1:
files = args.files
else:
from glob import glob
files = glob(args.files[0])
pattern = str(args.pattern)
pref = args.prefix
suf = args.suffix
count = args.count
#path, _ = os.path.split(files[0]) # path of first file
print 'pattern to match:', pattern
print 'total files:', len(files)
comm = MPI.COMM_WORLD
my_rank = comm.Get_rank()
num_procs = comm.Get_size()
tomerge = get_files_to_merge(files, pattern)
if count: print 'number of tasks:', len(tomerge.items()); sys.exit()
my_tomerge = get_subproblem_input_args(tomerge.items(), my_rank, num_procs)
merge_all(dict(my_tomerge), pref=pref, suf=suf)
close_files()
| [
"fspaolo@gmail.com"
] | fspaolo@gmail.com |
1d983087ace0527d39672656d8b1e6c4526ebcfd | 7838473d3688eb89b598198440c6769ef56701a6 | /pyvsphere/vmware_vswitch_facts.py | ee53f751dd107d27cc9efcaf6e8591706b9b6a56 | [] | no_license | Maliaotw/pyvsphere | 58029c4b1fad0667d87f9a36434f67209f9180ee | 7069cf566dae8d35c2770050ccb71342ed5d3d8e | refs/heads/main | 2023-03-03T14:05:19.526387 | 2021-02-09T16:34:07 | 2021-02-09T16:34:07 | 317,414,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,402 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_vswitch_facts
short_description: Gathers facts about an ESXi host's vswitch configurations
description:
- This module can be used to gather facts about an ESXi host's vswitch configurations when ESXi hostname or Cluster name is given.
- The vSphere Client shows the value for the number of ports as elastic from vSphere 5.5 and above.
- Other tools like esxcli might show the number of ports as 1536 or 5632.
- See U(https://kb.vmware.com/s/article/2064511) for more details.
version_added: '2.6'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- Facts about vswitch belonging to every ESXi host systems under this cluster will be returned.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname to gather facts from.
- If C(cluster_name) is not given, this parameter is required.
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather vswitch facts about all ESXi Host in given Cluster
vmware_vswitch_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
delegate_to: localhost
register: all_hosts_vswitch_facts
- name: Gather firewall facts about ESXi Host
vmware_vswitch_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
delegate_to: localhost
register: all_vswitch_facts
'''
RETURN = r'''
hosts_vswitch_facts:
description: metadata about host's vswitch configuration
returned: on success
type: dict
sample: {
"10.76.33.218": {
"vSwitch0": {
"mtu": 1500,
"num_ports": 128,
"pnics": [
"vmnic0"
]
},
"vSwitch_0011": {
"mtu": 1500,
"num_ports": 128,
"pnics": [
"vmnic2",
"vmnic1"
]
},
},
}
'''
from .mymodule import AnsibleModule
from .vcenter import VcenterConfig
from ansible.modules.cloud.vmware.vmware_vswitch_info import VswitchInfoManager
def vmware_vswitch_facts(VcenterConfig: VcenterConfig,esxi_hostname):
"""Main"""
argument_spec = dict(
cluster_name=False,
esxi_hostname=esxi_hostname,
)
argument_spec.update(**VcenterConfig.as_dict())
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
vmware_vswitch_mgr = VswitchInfoManager(module)
module.exit_json(changed=False, hosts_vswitch_facts=vmware_vswitch_mgr.gather_vswitch_info())
module.get_info(vmware_vswitch_mgr.gather_vswitch_info())
| [
"MaliaoTW@gmail.com"
] | MaliaoTW@gmail.com |
15717a292bdc89415c16f5ff81de7542e336cd37 | 3b1a13edca51449f015086acad1e5a51ae89cba5 | /lang/py/pylib/10/threading/threading_rlock.py | c9a98dd6693b1555f9e9282f076052e573461c52 | [
"MIT"
] | permissive | ch1huizong/study | 9000042d3ad13b06426b03423ee335aee15fd061 | d3b7c43fc03484839f8bbc7d0d056df1b1eba9cd | refs/heads/master | 2023-09-04T12:45:48.125715 | 2023-08-17T02:49:40 | 2023-08-17T02:49:40 | 162,539,130 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | #!/usr/bin/env python
# encoding: UTF-8
import threading
lock=threading.RLock()
print'First try:',lock.acquire()
print'Second try:',lock.acquire(0)
| [
"root@k.k.lab"
] | root@k.k.lab |
b4bb3b9601d9f2d0665fe916401fbf091bea4e6e | e41fc34b9d3d5aa94e5c6b843ee35fc1280ed6b5 | /app/settings/config_control.py | f59173df4c2a68fbdf9dd2416dc8f00ea26d6ec6 | [] | no_license | cleverbotdev/my_university_group_site | b69b03185ddbb6fca763f1394851e031cb3e304e | b4d4079dc2f942634f63b96e799050f6191d5aad | refs/heads/master | 2023-07-20T06:48:33.340436 | 2021-03-04T10:14:21 | 2021-03-04T10:14:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,536 | py | # -*- coding: utf-8 -*-
if __name__ == '__main__':
from os import chdir
from app.settings.config import HOME_DIR, SETTINGS_FILE, EXAMPLE_SETTINGS_FILE
chdir(HOME_DIR)
def create_new_settings(config_path, example_settings_filename):
from configparser import ConfigParser
example_cfg = ConfigParser(allow_no_value=True, converters={'list': lambda x: [i.strip() for i in x.split(',')]})
example_cfg.read(example_settings_filename)
user_input_tag = example_cfg.get("settings_ini_file", "user_input_tag")
print("Config file not found!")
print(f"I am trying to create {config_path}...")
print(f"I am coping {example_settings_filename} and rename this to {config_path}")
with open(f"{example_settings_filename}", "r", encoding="utf-8") as file, open(config_path, 'w',
encoding='utf-8') as wtiten_file:
print(
'\n'.join([(''.join([i + input(f"\nВведите пожалуйста {i.replace('=', '').strip()} для своей программы:\n")
for i in filter(bool, string.split(user_input_tag))])
if user_input_tag in string and not string.startswith("user_input_tag") else string)
for string in iter(file.read().split('\n'))]), file=wtiten_file)
def create_cfg(config_path='',
example_settings_filename=''):
import sys
from configparser import ConfigParser
from os.path import exists
if not exists(config_path) and not exists(example_settings_filename):
print(f"Config file ({config_path}) not found! Exiting!")
sys.exit(0)
if not exists(config_path):
create_new_settings(config_path, example_settings_filename)
if exists(config_path):
cfg = ConfigParser(allow_no_value=True, converters={'list': lambda x: [i.strip() for i in x.split(',')]})
cfg.read(config_path)
else:
print("Config not found! Exiting!")
print(f"I can't create {SETTINGS_FILE}...")
print(f"You can try cloning {EXAMPLE_SETTINGS_FILE} to {SETTINGS_FILE} and edit params into this")
sys.exit(0)
return cfg
def save_change_in_cinfig_file(cfg=None):
if not cfg:
cfg = create_cfg(SETTINGS_FILE, EXAMPLE_SETTINGS_FILE)
with open(SETTINGS_FILE, "w") as config_file:
cfg.write(config_file)
return cfg
if __name__ == '__main__':
cfg = create_cfg(SETTINGS_FILE, EXAMPLE_SETTINGS_FILE)
| [
"rkbcu@mail.ru"
] | rkbcu@mail.ru |
e90effd3bbfd10d2539c58e07eaaef4ea30eb3a1 | 0b63f38c7fb468e478e5be82c685de1b7ddb87e5 | /meiduo/meiduo_mall/scripts/generate_detail_html.py | c1e6f9a5b13f24b3617489fe30fddf118e1edd65 | [
"MIT"
] | permissive | Highsir/Simplestore | fcf5ef81a754604c0953a3c1433a7bc09290c121 | 5fc4d9930b0cd1e115f8c6ebf51cd9e28922d263 | refs/heads/master | 2020-09-01T07:55:45.362457 | 2019-11-01T04:55:48 | 2019-11-01T04:55:48 | 218,913,913 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | # 1. 添加导包路径 (把 scripts 的上一级目录添加到导包路径sys.path)
import sys
sys.path.insert(0, '../')
# 2. 设置配置文件,初始化django环境
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "meiduo_mall.settings.dev")
django.setup()
# 3. 导包
from celery_tasks.html.tasks import generate_static_sku_detail_html
from goods.models import SKU
# 4. 功能逻辑
if __name__ == '__main__':
skus = SKU.objects.all()
for sku in skus:
print(sku.id)
generate_static_sku_detail_html(sku.id)
| [
"highsir421@163.com"
] | highsir421@163.com |
1a73b6acb4d8371dc29e5f10e62860f6bc22386f | f1fcd165cd8444310ce5d201e481e3982dc28110 | /easy/1901/190108/jang.py | 25720a11955857329f172ec792f4b8f996ea5564 | [] | no_license | JoosJuliet/algoStudy | 310a71a0fcc8f3c23281544cf3458ed999040176 | 3fc1e850f9d8b9f290f41fddd59ff403fbfffa05 | refs/heads/master | 2020-04-20T19:26:25.485875 | 2019-03-27T22:37:27 | 2019-03-27T22:37:27 | 169,049,593 | 1 | 0 | null | 2019-02-04T08:43:07 | 2019-02-04T08:43:07 | null | UTF-8 | Python | false | false | 130 | py | from collections import Counter
input()
ans = 0
for c in Counter(map(int, input().split())).values():
ans += c//2
print(ans)
| [
"wkdtjsgur100@naver.com"
] | wkdtjsgur100@naver.com |
3567b6b9dbb5ba835c394efc4a0acaf1d521e739 | f60434c0a27f0f5ada2aa5607c94947890de5692 | /codezilla/sherlock.py | 4b6219a06e227af9a424e2f6c7b32393e09c7c36 | [
"MIT"
] | permissive | AnuragAnalog/codechef | 16aa7711e6471f6249874066105f50aee90436c3 | 348dd1d8daac356f0390ce124a263f6157495b1c | refs/heads/master | 2022-06-27T15:11:36.811069 | 2020-05-11T03:20:19 | 2020-05-11T03:20:19 | 262,927,296 | 1 | 0 | null | 2020-05-11T03:18:50 | 2020-05-11T03:16:15 | null | UTF-8 | Python | false | false | 372 | py | n = int(input())
inst = list(input())
string = ""
for i in inst:
if i == "1":
string = string + "a"
elif i == "2":
string = string + "bb"
elif i == "3":
string = string + "ab"
elif i == "4":
string = string.replace("a", "$")
string = string.replace("b", "a")
string = string.replace("$", "b")
print(string)
| [
"anurag.peddi1998@gmail.com"
] | anurag.peddi1998@gmail.com |
1b17c93b5d60e121444377dcc3f277dd0f2fce03 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /run/runcase_ipo.py | 22f8fbc94a9b59c8509cbbaec8ddff44282914d9 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,887 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import os
import time
import unittest
sys.path.append('/home/yhl2/workspace/xtp_test/Autocase_Result')
sys.path.append('/home/yhl2/workspace/xtp_test/utils')
import CaseServiceIpo
# 深圳现价case批量执行
def runCases( path, filename,sheet_name):
'''
:param cases: py名称集合
:param filename: 存放case参数的文件名
:return: None
'''
suite_cases = []
excel_file = os.path.join(path, filename)
case_service = CaseServiceIpo.CaseService(excel_file,sheet_name)
d = [(k, case_service.testcase_seq_dict[k]) for k in sorted(case_service.testcase_seq_dict.keys())]
# 按顺序加载case
for (k,case) in d:
m = __import__(case['pyname'])
cls = getattr(m, case['pyname'])
print cls
suite_case = unittest.TestLoader().loadTestsFromTestCase(cls)
# suite_cases = []
suite_cases.append(suite_case)
suite = unittest.TestSuite(suite_cases)
unittest.TextTestRunner(verbosity=2).run(suite)
# time.sleep(10)
def getCases(casepath):
file_list = os.listdir(casepath)
cases = []
for file in file_list:
if file[-2:] == 'py' and file != '__init__.py':
file_index = file.find('.py')
case = file[0:file_index]
cases.append(case)
return cases
def run_case(casepath_yw,filename,sheetname):
casepath = '/home/yhl2/workspace/xtp_test/Autocase_Result/'+casepath_yw
cases = getCases(casepath)
path = '/home/yhl2/workspace/xtp_test/utils'
runCases(cases, path, filename, sheetname)
if __name__ == '__main__':
# py存放路径
casepath = '/home/yhl2/workspace/xtp_test/Autocase_Result'
cases = getCases(casepath)
path = '/home/yhl2/workspace/xtp_test/utils'
runCases(cases, path, u'普通业务自动化用例.xlsx',u'新股申购_深圳')
| [
"418033945@qq.com"
] | 418033945@qq.com |
bf43527a3d5127746b93a44909d325e5c4ebbe32 | 1bdb0da31d14102ca03ee2df44f0ec522b0701a4 | /Lombardia/AlfaVarese/3-FoundReportList.py | ef7ade81f4816ecdf095f5b44ed170800b914cf7 | [] | no_license | figuriamoci/Acqua | dc073d90c3c5e5899b22005685847916de1dfd95 | aef22fcd0c80c92441e0e3df2468d7a2f23a848a | refs/heads/master | 2020-12-15T04:00:26.855139 | 2020-06-08T21:17:55 | 2020-06-08T21:17:55 | 234,986,179 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,048 | py | ##
from selenium import webdriver
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import logging,pandas as pd
import acqua.aqueduct as aq
gestore = "AlfaVarese"
aq.setEnv('Lombardia//'+gestore)
url = "https://www.alfasii.it/la-societa/servizi/acquedotto.html"
#
options = webdriver.ChromeOptions()
options.add_argument( '--ignore-certificate-errors' )
options.add_argument( '--incognito' )
options.add_argument( '--headless' )
locationList = pd.read_csv("Metadata/LocationList.csv")
#locationList = locationList[0:10]
foundReportList = pd.DataFrame()
##
for i,loc in locationList.iterrows():
driver = webdriver.Chrome( "chromedriver", options=options )
driver.implicitly_wait( 10 ) # seconds
driver.get( url )
time.sleep( 5 )
try:
alias_city = loc['alias_city']
alias_address = loc['alias_address']
divWebElement = WebD riverWait( driver, 10 ).until( EC.visibility_of( driver.find_element_by_id( "sl_sidebar" ) ) )
listWebElement = divWebElement.find_elements_by_tag_name("div")
listWebElement[0].text.split("\n")
cityWebElement = [c for c in listWebElement if c.text.split("\n")[0] == alias_city and c.text.split("\n")[1] == alias_address][0]
driver.execute_script( "arguments[0].click();", cityWebElement )
time.sleep(2)
logging.info("Extract report for %s/%s (%s/%s)...",alias_city,alias_address,i+1,len(locationList))
reportLinkWebElement = WebDriverWait( driver, 10 ).until( EC.visibility_of( driver.find_element_by_link_text("Scarica la tabella dei valori") ) )
urlReport = reportLinkWebElement.get_attribute("href")
row = {"alias_city":alias_city,"alias_address":alias_address,"urlReport":urlReport}
foundReportList = foundReportList.append(row,ignore_index=True)
except:
logging.critical("Skip %s/%s",alias_city,alias_address)
driver.close()
##
foundReportList.to_csv('Metadata/ReportFoundList.csv',index=False)
| [
"an.fantini@gmail.com"
] | an.fantini@gmail.com |
74832a0ea32690e56228433ad4eb435b3f2d0185 | 8246e9fbdecdb37651e0d09497fd9428e434f33c | /FilmLocationFromGuidedWithSerial/admin.py | d179c1eeaa68276fef03e2c251f12fffe2bac988 | [] | no_license | rajeev1234/Landing-Page | 479995026ab01fc504a1e9502e7763dc04266009 | 4bfd22a6a1776907ba78b3dc9037064c820b049e | refs/heads/master | 2020-03-08T13:37:20.253252 | 2018-04-05T06:33:26 | 2018-04-05T06:33:26 | 128,162,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | from django.contrib import admin
# Register your models in admin panels here.
from . import models
# declaring comments stack
class CommentInline(admin.TabularInline):
model = models.Comment
# attaching commment stack to FilmLocationFromGuidedWithSerial
class FilmLocationFromGuidedWithSerialAdmin(admin.ModelAdmin):
inlines = [CommentInline]
# calling in admin panel
admin.site.register(models.FilmLocationFromGuidedWithSerial, FilmLocationFromGuidedWithSerialAdmin)
admin.site.register(models.Comment)
| [
"ccrcian.rajeev1@gmail.com"
] | ccrcian.rajeev1@gmail.com |
be7339bac0388480a26e2b7a029ad4492e92b529 | 90b2ad813c96d630cd254475b0ad3a7a735011e5 | /codigo/MeteoSalon/MQTT_test.py | 8c23d94dc8cb58f56bd2ec98d7aef2b8a31a2d6f | [] | no_license | vtt-info/micropythonTutorial | dbcd4c13af442446c9816e4fdcd82b8eaaf6a27a | 67a58fb56bef4ef55c89cf76fc3ccde842b62ce6 | refs/heads/master | 2020-11-27T21:44:22.044221 | 2019-12-22T16:25:44 | 2019-12-22T16:25:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,470 | py | # MQTT test
# basado en https://randomnerdtutorials.com/micropython-mqtt-esp32-esp8266/
from umqttsimple import MQTTClient
import ubinascii
import machine
import Wemos # Facilita el identificar los pines
import MeteoSalon # Relacionado con los dispositivos conectados
import NeoPixelTHO # Relacioniado con el ledRGB
import time # Para las esperas
import helpFiles # para free y df
import utime
client_id = ubinascii.hexlify(machine.unique_id())
topic_sub = b'MeteoSalon'
topic_subFree = topic_sub + b'/free'
topic_subMem = topic_sub + b'/mem'
topic_subLed = topic_sub + b'/led'
topic_subTemp = topic_sub + b'/Temp'
topic_subHum = topic_sub + b'/Hum'
topic_subPress = topic_sub + b'/Press'
topic_subLedRGB = topic_sub + b'/ledRGB'
topic_pub = b'hello'
mqtt_server = '192.168.1.200'
def sub_CheckTopics(topic, msg):
print((topic, msg))
if topic == topic_subLed: # Check for Led Topic
if msg == b'On':
print('Led:On')
MeteoSalon.led.off()
else:
print('Led:Off')
MeteoSalon.led.on()
elif topic == topic_subLedRGB: ## Check for RGB Topic
MeteoSalon.color(msg)
elif topic == topic_subFree: ## Check for free memory
freeMem = helpFiles.free()
client.publish(topic_subMem, str(freeMem))
def connect_and_subscribe():
global client, client_id, mqtt_server, topic_sub, topic_subLedRGB, topic_subLed
client = MQTTClient(client_id, mqtt_server)
client.set_callback(sub_CheckTopics)
client.connect()
client.subscribe(topic_subFree)
client.subscribe(topic_subLed)
client.subscribe(topic_subLedRGB)
print('Connected to %s MQTT broker, subscribed to %s topic' % (mqtt_server, topic_subFree))
return client
def restart_and_reconnect():
print('Failed to connect to MQTT broker. Reconnecting...')
time.sleep(10)
machine.reset()
def mainBeta(everySeconds=60):
connect_and_subscribe() # connect and get a client reference
last_Temp = utime.ticks_ms()
while True :
client.check_msg() # Check por new messages and call the callBack function
now = utime.ticks_ms()
if utime.ticks_diff(now, last_Temp) > (everySeconds*1000):
last_Temp = now
client.publish(topic_subTemp, MeteoSalon.bme.temperature)
client.publish(topic_subPress, MeteoSalon.bme.pressure)
client.publish(topic_subHum, MeteoSalon.bme.humidity)
time.sleep_ms(200)
| [
"javacasm@gmail.com"
] | javacasm@gmail.com |
7c54c7b31d8d70ba4d82aa27a48606da121ed2d6 | 9c63f6d39a6085674ab42d1488476d0299f39ec9 | /Python/LC_Unique_Email_Addresses.py | 8d175c6c63fe339b19edb98152a443a9c2a31f7a | [] | no_license | vijayjag-repo/LeetCode | 2237e3117e7e902f5ac5c02bfb5fbe45af7242d4 | 0a5f47e272f6ba31e3f0ff4d78bf6e3f4063c789 | refs/heads/master | 2022-11-14T17:46:10.847858 | 2022-11-08T10:28:30 | 2022-11-08T10:28:30 | 163,639,628 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | class Solution(object):
def numUniqueEmails(self, emails):
"""
:type emails: List[str]
:rtype: int
Approach:
Split into local and domain.
Process accordingly
"""
new = set()
for email in emails:
local,domain = email.split('@')
if('+' in local):
local = local[:local.index('+')]
if('.' in local):
local = local.replace('.','')
new.add(local+'@'+domain)
return(len(new))
| [
"noreply@github.com"
] | vijayjag-repo.noreply@github.com |
275bc0dc169eb8d80100c4b3485b2de5f9c9a001 | 822d3cd484b54f0531fc205520c765a8321c0613 | /pyFile/9面向对象进阶/5.描述器/随堂笔记/12.反向操作符.py | 9d8711a8583579d14da0a7ab9391630953089a88 | [] | no_license | mghxy123/learnPython | 31d1cc18deeed5a89864ca0333fe488e0dbf08b4 | 00740e87d55a4dffd78773deaff8689485df31e8 | refs/heads/master | 2021-07-21T14:31:02.421788 | 2020-06-27T11:28:01 | 2020-06-27T11:28:01 | 187,751,182 | 0 | 0 | null | 2020-06-07T05:14:05 | 2019-05-21T02:58:35 | Python | UTF-8 | Python | false | false | 1,373 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : 12.反向操作符.py
# Author: HuXianyong
# Date : 2019/5/27 17:58
# print(type(NotImplemented)) #未实现的单值
# print(type(None))#未实现的单值
# print(type(NotImplementedError)) #异常类
class A:
def __init__(self,x):
self.x = x
def __repr__(self):
return "<A {}>".format(self.x)
def __add__(self, other):
print('add ~~~~~~~~~~~')
if hasattr(other,'x'):
return self.x +other.x
else:
try:
x = int(other)
except:
x=0
return self.x+x
def __iadd__(self, other):
print('iadd ~~~~~~~~~~~')
return A(self.x+other.x)
def __radd__(self, other):
print('radd ~~~~~~~~~~~')
return self+other
a1 = A(4)
a2 = A(5)
print(a1+a2) #add int 9 a1.__add__(a2)
print(a2+a1)
# print(a2+1) #报错 调用的还是add
# print(2+a1) #报错,这里调的是radd 等价于1.__radd__(a1) int.a1__radd__(1,a1)
class B:
def __init__(self,x):
self.x = x
def __add__(self, other): #如果b1存在运算法重载,且它是在第一位,就按照他的运算方法来
return NotImplemented #这里是正常输出10
# return 123
b1 = B(6)
print(a1+b1) #可以执行,a1.__add__(b1)
print(b1+a1) #可以执行,b1.__radd__(a1)
| [
"mghxy123@163.com"
] | mghxy123@163.com |
0893fddba045a950026684cfcf99ea17a23ccda4 | 2c3e0c3ef202375d998c9123934af09315d33fee | /LeetCode/Greedy Algorithms/jump_game.py | b08f9a1ecc1c6bde18a1346953fcd57c11047c44 | [] | no_license | Kalesh-Singh/Interviews2019 | e74f0ec22b1cb5fe178a38efc9c0ceea929e32f0 | e8fadb9636659a28f657fb43ee804761a215c37e | refs/heads/master | 2020-05-23T12:56:05.896620 | 2019-08-25T15:23:10 | 2019-08-25T15:23:10 | 186,767,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | class Solution:
def canJump(self, nums: 'List[int]') -> bool:
# Solution 1 - Dynamic Programming Bottom Up Approach
# n = len(nums)
# # Try determine whether we can reach the last index
# # starting from the right.
# results = [False] * n
# # We know we can get to the last index from itself
# # i.e. no jumps
# results[n - 1] = True
# for i in range(n - 2, -1, -1):
# maxJumpIndex = min(i + nums[i], n - 1)
# for j in range(i + 1, maxJumpIndex + 1):
# if results[j]:
# # If we can get to the end from j
# # and we can get to j from i
# # then we can get to the end from i
# results[i] = True
# break
# return results[0]
# Solution 2 - Greedy Approach
n = len(nums)
last_pos = n - 1
for i in range(n - 1, -1, -1):
if i + nums[i] >= last_pos:
last_pos = i
return last_pos == 0
| [
"kaleshsingh96@gmail.com"
] | kaleshsingh96@gmail.com |
5fc218c45331323e07ff14adde4a58c7ebcb9b5f | 15945660e0e9624693f11d7ec6460fb41d2f1ef9 | /tfx/utils/import_utils.py | 476d32d7366a4df7ac64158d12abebb7380674ae | [
"Apache-2.0"
] | permissive | HassanDayoub/tfx | f4a32cd6e25493d152a6f91b2cc26db94154d0a6 | dc9221abbb8dad991d1ae22fb91876da1290efae | refs/heads/master | 2020-05-30T18:44:31.410424 | 2019-05-31T22:06:53 | 2019-05-31T22:07:25 | 189,904,199 | 2 | 0 | Apache-2.0 | 2019-06-02T23:09:17 | 2019-06-02T23:09:17 | null | UTF-8 | Python | false | false | 1,171 | py | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX type definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Text, Type
def import_class_by_path(class_path: Text) -> Type[Any]:
"""Import a class by its <module>.<name> path.
Args:
class_path: <module>.<name> for a class.
Returns:
Class object for the given class_path.
"""
classname = class_path.split('.')[-1]
modulename = '.'.join(class_path.split('.')[0:-1])
mod = __import__(modulename, fromlist=[classname])
return getattr(mod, classname)
| [
"tensorflow-extended-team@google.com"
] | tensorflow-extended-team@google.com |
c065a92fe428a5c1db1d2ed606c8bfba5a1d6d3b | 0c1d3807940f223c913aeadac31b85dc576b1dca | /app.py | b1656d84cd9a00fbce5cfecc0fdd87693551c67c | [] | no_license | wegamekinglc/QA | 4b2073e4f93d96c09c771d868914cef3367ab55f | c818442b06f5701feb6b38dcf6f20853d8ec6556 | refs/heads/master | 2020-06-11T00:41:02.003796 | 2019-06-26T04:46:36 | 2019-06-26T04:46:36 | 193,806,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,416 | py | from typing import Tuple
import requests
from flask import (
Flask, render_template, request, redirect
)
import pandas as pd
app = Flask(__name__)
pd.set_option('display.max_colwidth', -1)
def chunkstring(string, length):
return (string[0+i:length+i] for i in range(0, len(string), length))
def handle_response_hr(resp: dict) -> Tuple[str, str]:
code = resp['code']
if code == 1:
is_matched = '是'
answer = resp['data']['target_answer'].replace('\n', '')
answer = '<br>'.join(chunkstring(answer, 50))
else:
is_matched = '否'
answer = "您好,这个问题您是商米第一位提到的呢,<br>" \
"暂时无法查询到对应答案哦。请您尝试调整搜索关键词或直接联系人力资源部张小桐(Tel:15651621590)来寻求帮助,<br>" \
"后续我们也会将您提出的问题完善到我的“大脑”中,谢谢您"
return is_matched, answer
def handle_response_cs(resp: dict) -> Tuple[str, str]:
code = resp['code']
if code == 1:
is_matched = '是'
answer = resp['data']['target_answer'].replace('\n', '')
answer = '<br>'.join(chunkstring(answer, 50))
else:
is_matched = '否'
answer = "您好,已经帮您转人工服务!"
return is_matched, answer
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
if request.form['submit_button'] == '商米HR问答Demo':
return redirect('/hr')
elif request.form['submit_button'] == '客户服务问答Demo':
return redirect('/cs')
elif request.method == 'GET':
return render_template('index.html', head='商米问答机器人测试')
@app.route('/hr', methods=['GET'])
def hr_form():
return render_template('hr_search.html', hint="请输入测试问题", head="商米HR问答Demo", result="")
@app.route('/hr', methods=['POST'])
def hr_query():
query = request.form['query']
resp = requests.post('http://172.16.0.170:8126/faq', data={"question": query}).json()
parsed_resp = handle_response_hr(resp)
df = pd.DataFrame(columns=['是否匹配', '答案'])
df.loc[0, '是否匹配'] = parsed_resp[0]
df.loc[0, '答案'] = parsed_resp[1]
return render_template('hr_search.html',
hint="请输入测试问题",
head="商米HR问答Demo",
result=df.to_html(index=False, justify='center', classes='center', escape=False))
@app.route('/cs', methods=['GET'])
def cs_form():
return render_template('cs_search.html', hint="请输入测试问题", head="客户服务问答Demo", result="")
@app.route('/cs', methods=['POST'])
def cs_query():
query = request.form['query']
resp = requests.post('http://172.16.0.170:8000/faq', data={"question": query}).json()
parsed_resp = handle_response_cs(resp)
df = pd.DataFrame(columns=['是否匹配', '答案'])
df.loc[0, '是否匹配'] = parsed_resp[0]
df.loc[0, '答案'] = parsed_resp[1]
return render_template('cs_search.html',
hint="请输入测试问题",
head="客户服务问答Demo",
result=df.to_html(index=False, justify='center', classes='center', escape=False))
if __name__ == '__main__':
app.run(host="0.0.0.0")
| [
"scrappedprince.li@gmail.com"
] | scrappedprince.li@gmail.com |
37ef390c7e9d53f0d6bc90b5bb19dee5ee3d0338 | 28deae4b6f2ef4c83116d8a7e08061b2ac47bb71 | /Spider/ImgSpider/utils/exceptions.py | d2c585c0275117650b4ba792d89e1926e23d29ec | [
"MIT",
"Apache-2.0"
] | permissive | Danceiny/HackGirlfriend | 9cc796c733be7055799efb1c51f1e5ecb3d12d81 | d64f43c5cfb48d30ed812e34fb19bc7b90ba01f8 | refs/heads/master | 2023-01-04T16:09:55.205094 | 2017-07-22T16:48:59 | 2017-07-22T16:48:59 | 93,874,976 | 2 | 1 | Apache-2.0 | 2022-12-26T20:14:57 | 2017-06-09T15:57:34 | HTML | UTF-8 | Python | false | false | 1,267 | py | # coding=utf-8
class UnknownPythonVersion(Exception):
msg = 'Unknown Python version found, please check your Python installation.'
class UnsupportedPythonVersion(Exception):
msg = 'So far ImageSpider only support Python 2.'
class GetBaseLinkFailed(Exception):
msg = 'Getting base link failed.'
class ParameterNotGiven(Exception):
msg = 'Parameter is not given.'
class InvalidImageFileName(Exception):
msg = 'Invalid image filename found.'
class ClearCacheFailed(Exception):
msg = 'Clearing cache failed.'
class LoadCacheFailed(Exception):
msg = 'Loading cache failed.'
class InitializeFailed(Exception):
msg = 'Initialization failed.'
class SaveImageFailed(Exception):
msg = 'Saving image failed.'
class TranslateToAbsoluteURLFailed(Exception):
msg = 'Translating relative URL to absolute URL failed.'
class LoadSettingsFileFailed(Exception):
msg = 'load settings.conf failed, please check if it exists.'
class SettingsError(Exception):
CONFIG = 'settings.conf: '
interval_err = CONFIG + '[interval] must be larger than 0.'
sites_err = CONFIG + '[sites] is necessary.'
class InvalidDomain(Exception):
msg = 'The domain is invalid.'
class PackageNotInstalled(Exception):
pass
| [
"danceiny@gmail.com"
] | danceiny@gmail.com |
628f197dcdce92355c268cf6f67500a76d8f9ba6 | 0431fb263e38422585edca273fb47ef92fd22243 | /dataloaders/data_poke.py | ed7e09e5dcdf2e7f5b203cc60babc9ab1a115781 | [] | no_license | RRoundTable/EEN-with-Keras | a6c3352eafc05fcb7ed41463d637a684de9a0b27 | ae71903afa05135f5eb6e2797854969f5a082958 | refs/heads/master | 2020-04-28T03:17:25.762629 | 2019-04-27T12:19:13 | 2019-04-27T12:19:13 | 174,930,756 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,653 | py | import os, random, glob, pdb, math
import pickle as pickle
from sklearn.externals import joblib
import numpy
from scipy import misc
# import torch
# import torchvision
import utils
from tensorflow.python.keras.layers import Input
class ImageLoader(object):
def _load_set(self, split):
print('loading {} set'.format(split))
datalist = []
datapath = '{}/{}/'.format(self.arg.get("datapath"), split)
for fdname in os.listdir(datapath):
print("file : {}".format(fdname))
fd_datalist = []
abs_fdname = os.path.join(datapath, fdname)
print("loading {}".format(abs_fdname))
presaved_npy = glob.glob(os.path.join(abs_fdname, "presave.pkl"))
if len(presaved_npy) == 1:
with open(os.path.join(abs_fdname, "presave.pkl"),'rb') as f :
fd_datalist=joblib.load(f)
elif len(presaved_npy) == 0:
with open(os.path.join(abs_fdname, "presave.pkl"), 'wb') as f:
for abs_fname in sorted(glob.glob(os.path.join(abs_fdname, "*.jpg"))[:-1]):
print('reading {}'.format(abs_fname))
img = misc.imread(abs_fname)
r_img = misc.imresize(img, (self.height, self.width))
fd_datalist.append(r_img)
print(numpy.array(fd_datalist).shape)
# fd_datalist = numpy.transpose(numpy.array(fd_datalist), (0, 3, 1, 2))
joblib.dump(fd_datalist,f)
# numpy.save(os.path.join(abs_fdname, "presave.npy"), fd_datalist)
else:
raise ValueError
actions = numpy.load(abs_fdname + '/actions.npy')
datalist.append({'frames': fd_datalist, 'actions': actions})
return datalist
def __init__(self, arg):
super(ImageLoader, self).__init__()
self.arg = arg
self.datalist = []
self.height = arg.get('height')
self.width = arg.get('width')
self.nc = arg.get('nc')
self.ncond = arg.get('ncond', 1)
self.npred = arg.get('npred', 1)
self.datalist_train = self._load_set('train')
self.datalist_test = self._load_set('test')
# keep some training data for validation
self.datalist_valid = self.datalist_train[-3:]
self.datalist_train = self.datalist_train[:-3]
# self.datalist_valid = self.datalist_train
# self.datalist_train = self.datalist_train
# pointers
self.iter_video_ptr = 0
self.iter_sample_ptr = self.ncond
print("Dataloader constructed done")
def reset_ptrs(self):
self.iter_video_ptr = 0
self.iter_sample_ptr = self.ncond
def _sample_time(self, video, actions, num_cond, num_pred):
start_pos = random.randint(0, video.shape[0]-2)
cond_frames = video[start_pos]
pred_frames = video[start_pos+1]
actions = actions[start_pos]
return cond_frames, pred_frames, actions
def _iterate_time(self, video, start_pos, actions, num_cond, num_pred):
cond_frames = video[start_pos]
pred_frames = video[start_pos+1]
actions = actions[start_pos]
return cond_frames, pred_frames, actions
def get_batch(self, split):
if split == 'train':
datalist = self.datalist_train
elif split == 'valid':
datalist = self.datalist_valid
elif split == 'test':
datalist = self.datalist_test
cond_frames, pred_frames, actions = [], [], []
# rolling
id = 1
while id <= self.arg.get("batchsize"):
sample = random.choice(datalist)
sample_video = sample.get('frames')
sample_actions = sample.get('actions')
selected_cond_frames, selected_pred_frames, selected_actions = self._sample_time(
sample_video, sample_actions, self.ncond, self.npred)
assert(len(selected_actions) > 0)
cond_frames.append(selected_cond_frames)
pred_frames.append(selected_pred_frames)
actions.append(selected_actions)
id += 1
# processing on the numpy array level
cond_frames = numpy.array(cond_frames, dtype='float32') / 255.0
pred_frames = numpy.array(pred_frames, dtype='float32') / 255.0
actions = numpy.array(actions).squeeze()
return cond_frames,pred_frames,actions
def get_iterated_batch(self, split):
if self.split == 'train':
datalist = self.datalist_train
elif self.split == 'test':
datalist = self.datalist_test
cond_frames, pred_frames, actions = [], [], []
# rolling
id = 1
while id <= self.arg.get("batchsize"):
if self.iter_video_ptr == len(datalist):
return None, None, None
sample = self.datalist[self.iter_video_ptr]
sample_video = sample.get('frames')
sample_actions = sample.get('actions')
if self.iter_sample_ptr + self.npred > sample_video.shape[0]:
self.iter_video_ptr += 1
self.iter_sample_ptr = self.ncond
else:
selected_cond_frames, selected_pred_frames, selected_actions = self._iterate_time(
sample_video, self.iter_sample_ptr, sample_actions, self.ncond, self.npred)
assert(len(selected_actions) > 0)
cond_frames.append(selected_cond_frames)
pred_frames.append(selected_pred_frames)
actions.append(selected_actions)
id += 1
self.iter_sample_ptr += 1
# processing on the numpy array level
cond_frames = numpy.array(cond_frames, dtype='float') / 255.0
pred_frames = numpy.array(pred_frames, dtype='float') / 255.0
actions = numpy.array(actions).squeeze()
# # return tensor
# cond_frames_ts = torch.from_numpy(cond_frames).cuda()
# pred_frames_ts = torch.from_numpy(pred_frames).cuda()
# actions_ts = torch.from_numpy(actions).cuda()
#
# # keras
# return cond_frames_ts, pred_frames_ts, actions_ts
#
# def plot_seq(self, cond, pred):
# cond_pred = torch.cat((cond, pred), 1)
# cond_pred = cond_pred.view(-1, self.nc, self.height, self.width)
# grid = torchvision.utils.make_grid(cond_pred, self.ncond+self.npred, pad_value=1)
# return grid
| [
"ryu071511@gmail.com"
] | ryu071511@gmail.com |
f0638fd2d66ede9da7dbfba2d846f8e41920760a | 8bd1ae9c1681ee8c1214a4e9cda29a503676c36d | /v1/tnc2.py | 926883fffab2cabc992b1efab7396b853be0ba9b | [] | no_license | zleffke/balloon | c15ef7e5614018022ca1f19ed2e4c82009352165 | 2943d303692b299e6d0866299d83a94d0839bf5b | refs/heads/master | 2021-07-06T02:28:17.304024 | 2021-05-22T20:25:18 | 2021-05-22T20:25:18 | 57,078,951 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,811 | py | #!/usr/bin/env python
##################################################
# GPS Interface
# Author: Zach Leffke
# Description: Initial GPS testing
##################################################
from optparse import OptionParser
import threading
from datetime import datetime as date
import os
import serial
import math
import sys
import string
import time
def utc_ts():
return str(date.utcnow()) + " UTC | "
class TNC_Thread(threading.Thread):
def __init__ (self, port, baud, log_flag, call_filt):
threading.Thread.__init__(self)
self._stop = threading.Event()
self.tnc_ser = serial.Serial(port, baud)
self.log_flag = log_flag
self.line = ""
self.call_filt = call_filt
self.callsign = ""
self.path = []
self.raw_log = None
self.csv_log = None
self.lat = 0.0
self.lon = 0.0
self.alt = 0.0
self.spd = 0.0
self.cse = 0.0
self.time_utc = 0.0
self.log_file=None
#if self.log_flag!=None:
# self.log_file = open(self.log_flag,'a')
def run(self):
while (not self._stop.isSet()):
data = self.tnc_ser.readline()
#data = "KK4BSM-11>APT314,WIDE1-1,WIDE2-1:/205107h3713.89N/08025.49WO000/000/A=002125/Virginia Tech Project Ellie, Go Hokies!\n"
#data = "KC8SIR-1>APBL10,WIDE3-1,WIDE4-1:!3733.20N/08106.48WO183/036/A=018991V300"
if self.log_flag != None:
self.log_file = open(self.log_flag,'a')
self.log_file.write(utc_ts() + data)
self.log_file.close()
self.line = data.strip('\n')
self.Parse_TNC()
#print self.line
time.sleep(1)
sys.exit()
def Parse_TNC(self):
#----------Extract Callsign----------
#--Locate first '>', take characters from beginning, limit search to first ten characters
idx1 = self.line.find('>', 0, 10)
self.callsign = self.line[:idx1]
#print len(self.callsign), self.callsign
#--Verify Callsign matches callsign filter
idx2 = self.callsign.find(self.call_filt)
#print idx2
if (idx2 != -1): #Callsign Match
#----------extract path----------
#locate first '>', locate ':', take characters in between
a = self.line.find(':')
path_str = self.line[idx1+1:a]
self.path = path_str.split(',')
#----------extract time----------
#locate ':', take next 7 characters
#hhmmsst, hh - hours, mm - minutes, ss - seconds, t - timezone
#time_str = self.line[a+2:a+2+7]
#if ((time_str[6] == 'h')or(time_str[6] == 'z')): #Zulu Time
# self.time_utc = time_str[0:2] + ":" + time_str[2:4] + ":" + time_str[4:6] + " UTC"
#----------extract lat----------
#locate ':', skip 7 char, take next 8 char
lat_str = self.line[a+2:a+2+7]
#print lat_str
#lat_hemi = self.line[a+2+7:a+2+8]
lat_f = float(lat_str[0:2]) + float(lat_str[2:]) / 60.0
#if (lat_hemi == 'S'): lat_f = lat_f * -1
self.lat = lat_f #decimal degrees
#----------extract lon----------
#locate ':', skip 16, take next 9 char
lon_str = self.line[a+11:a+11+8]
lon_hemi = self.line[a+11+8: a+11+9]
lon_f = float(lon_str[0:3]) + float(lon_str[3:]) / 60.0
if lon_hemi == "W": lon_f = lon_f * -1
self.lon = lon_f # decimal degrees
#----------extract spd----------
#locate ':', skip 27, take next 3 char
a = self.line.find('O')
cse_str = self.line[a+1:a+4]
#print cse_str
self.cse = float(cse_str)#*1.15078 #convert from knots to mph
#----------extract course----------
#locate ':/', skip 30, take next 3 char
spd_str = self.line[a+1+4:a+1+4+3]
#print cse_str
self.spd = float(spd_str)*1.15078 #in degrees
#----------extract altitude----------
#locate 'A=', take next 6
a = self.line.find('A=')
alt_str = self.line[a+2:a+2+6]
self.alt = float(alt_str) #in feet
def get_last_callsign(self):
return self.callsign
def get_lat_lon_alt(self):
return self.lat, self.lon, self.alt
def get_spd_cse(self):
return self.spd, self.cse
def get_time(self):
return self.time_utc
def stop(self):
#self.tnc_ser.close()
self._stop.set()
sys.exit()
def stopped(self):
return self._stop.isSet()
| [
"zleffke@vt.edu"
] | zleffke@vt.edu |
2c1f0bb8452e88d0139f57a8d392d8d03d95841c | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /G2QnBrxvpq9FacFuo_9.py | 409f1278f18bac3fdd0fd7919b0002ac306a61bc | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py |
def possible_path(lst):
if lst[0]=='H':return all([lst[i]=='H' for i in range(0,len(lst),2)])
return all([lst[i]=='H' for i in range(1,len(lst),2)])
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
901a12d85d7a567cb6b96e5c26267c541a2bee5a | 75af3c931671a55ea0058cea6e83e90dc1aed6d1 | /profil3r/app/core/services/_porn.py | 2ab894337d20e37f5825d30f488926628e176fca | [
"MIT"
] | permissive | derinkebapiskender/Profil3r | e711a381d84b27744d5289a87c99b4b8e77b8866 | d45fea1efab0487bfac49e422ebc46cb26b29582 | refs/heads/main | 2023-07-01T00:19:24.113439 | 2021-07-26T23:08:59 | 2021-07-26T23:08:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | from profil3r.app.modules.porn.pornhub import Pornhub
from profil3r.app.modules.porn.redtube import Redtube
from profil3r.app.modules.porn.xvideos import Xvideos
# Pornhub
def pornhub(self):
self.result["pornhub"] = Pornhub(self.config, self.permutations_list).search()
# print results
self.print_results("pornhub")
return self.result["pornhub"]
# Redtube
def redtube(self):
self.result["redtube"] = Redtube(self.config, self.permutations_list).search()
# print results
self.print_results("redtube")
return self.result["redtube"]
# XVideos
def xvideos(self):
self.result["xvideos"] = Xvideos(self.config, self.permutations_list).search()
# print results
self.print_results("xvideos")
return self.result["xvideos"] | [
"r0g3r5@protonmail.com"
] | r0g3r5@protonmail.com |
9800f6c861feb1d3a793b713f5649f8631284b22 | b77cc1448ae2c68589c5ee24e1a0b1e53499e606 | /appraisal/migrations/0034_appraisal_performance_classification.py | 4206da7d9f420b1fcc58d45701b6a13bd21cbc9c | [] | no_license | PregTech-c/Hrp_system | a5514cf6b4c778bf7cc58e8a6e8120ac7048a0a7 | 11d8dd3221497c536dd7df9028b9991632055b21 | refs/heads/master | 2022-10-09T07:54:49.538270 | 2018-08-21T11:12:04 | 2018-08-21T11:12:04 | 145,424,954 | 1 | 1 | null | 2022-10-01T09:48:53 | 2018-08-20T13:58:31 | JavaScript | UTF-8 | Python | false | false | 595 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-08-13 21:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('appraisal', '0033_auto_20180813_1332'),
]
operations = [
migrations.AddField(
model_name='appraisal',
name='performance_classification',
field=models.CharField(blank=True, choices=[('0', 'Failure'), ('1', 'Improvement Needed'), ('2', 'Excellent'), ('3', 'Exceptional')], default='0', max_length=1),
),
]
| [
"imugabi64@yahoo.com"
] | imugabi64@yahoo.com |
1b8a8570b27806bdeec6392585f3026f4913f1fb | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_strongholds.py | 55c41458d6f0e15de2928d80a317f4df66fb17c1 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
#calss header
class _STRONGHOLDS():
def __init__(self,):
self.name = "STRONGHOLDS"
self.definitions = stronghold
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['stronghold']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
3c838498f6089798a32bacec1df55395f584f265 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03695/s950925933.py | e48c5ff64081e4d044a733a6017beb03e5e51894 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | n = int(input())
A = list(map(int, input().split()))
color = []
other = 0
for a in A:
if a < 400: color.append(1)
elif a < 800: color.append(2)
elif a < 1200: color.append(3)
elif a < 1600: color.append(4)
elif a < 2000: color.append(5)
elif a < 2400: color.append(6)
elif a < 2800: color.append(7)
elif a < 3200: color.append(8)
else: other += 1
c = len(set(color))
if color: print(c, c+other)
else: print(1, other) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
37b31d7c83517db42c782f28736d5e2d0a9d7128 | ed296ff86f13b1d9e41fbf6aace441090850ef57 | /chapter_03_data_modeling/account_example/account_example/example/migrations/0003_internalaccount_account_number.py | 53c0a79cd57dc61299f17ccdb16473cda975e38f | [
"MIT"
] | permissive | cnb0/Python-Architecture-Patterns | 2a6113605f95a4c99369ee0febe2812c792a7acf | 2194746ea334c9dd84b5547a6d59ebfa95ced394 | refs/heads/main | 2023-08-12T02:31:17.187913 | 2021-10-12T19:16:14 | 2021-10-12T19:16:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | # Generated by Django 3.2 on 2021-04-18 12:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('example', '0002_internalaccount_initial_amount'),
]
operations = [
migrations.AddField(
model_name='internalaccount',
name='account_number',
field=models.IntegerField(default=0, unique=True),
preserve_default=False,
),
]
| [
"jaime.buelta@gmail.com"
] | jaime.buelta@gmail.com |
f134a84e1d0e478b6e38dfe4818b42ecbd892513 | 353def93fa77384ee3a5e3de98cfed318c480634 | /.history/week01/homework02/maoyanspiders/maoyanspiders/pipelines_20200627225504.py | 431121b82bb35a87337da0908ef3299621934ef2 | [] | no_license | ydbB/Python001-class01 | d680abc3ea1ccaeb610751e3488421417d381156 | ad80037ccfc68d39125fa94d2747ab7394ac1be8 | refs/heads/master | 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null | UTF-8 | Python | false | false | 566 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class MaoyanspidersPipeline(object):
def process_item(self, item, spider):
films_name = item['films_name']
films_type = item['films_type']
release_time = item['release_time']
output = f'|{films_name}|\t|{films_type}|\t|{release_time}|\n\n'
with open('./week01/homework02/top10.csv',encoding='utf-8') as article:
a
| [
"31039587+ydbB@users.noreply.github.com"
] | 31039587+ydbB@users.noreply.github.com |
1097ae22a7073a902f0a5afb758647bb025d8aa7 | 052a89753a7917b7fa0ccdf5718d5250a1379d2c | /bin/painter.py | 007ccd5118b932b88dfafb856861b084ae5a60d4 | [] | no_license | bopopescu/aws.example.com | 25e2efda3bd9ae2a257c34904ccb53043fe20b55 | 97254868688c3c3a991843fcacc973c93b366700 | refs/heads/master | 2022-11-22T07:06:30.386034 | 2016-10-25T15:22:14 | 2016-10-25T15:22:14 | 282,553,417 | 0 | 0 | null | 2020-07-26T01:22:26 | 2020-07-26T01:22:25 | null | UTF-8 | Python | false | false | 2,169 | py | #!/Users/deanarmada/Desktop/projects/python-projects/aws.example.com/bin/python
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates pasting into an already displayed
# photoimage. note that the current version of Tk updates the whole
# image every time we paste, so to get decent performance, we split
# the image into a set of tiles.
#
try:
from tkinter import Tk, Canvas, NW
except ImportError:
from Tkinter import Tk, Canvas, NW
from PIL import Image, ImageTk
import sys
#
# painter widget
class PaintCanvas(Canvas):
def __init__(self, master, image):
Canvas.__init__(self, master, width=image.size[0], height=image.size[1])
# fill the canvas
self.tile = {}
self.tilesize = tilesize = 32
xsize, ysize = image.size
for x in range(0, xsize, tilesize):
for y in range(0, ysize, tilesize):
box = x, y, min(xsize, x+tilesize), min(ysize, y+tilesize)
tile = ImageTk.PhotoImage(image.crop(box))
self.create_image(x, y, image=tile, anchor=NW)
self.tile[(x, y)] = box, tile
self.image = image
self.bind("<B1-Motion>", self.paint)
def paint(self, event):
xy = event.x - 10, event.y - 10, event.x + 10, event.y + 10
im = self.image.crop(xy)
# process the image in some fashion
im = im.convert("L")
self.image.paste(im, xy)
self.repair(xy)
def repair(self, box):
# update canvas
dx = box[0] % self.tilesize
dy = box[1] % self.tilesize
for x in range(box[0]-dx, box[2]+1, self.tilesize):
for y in range(box[1]-dy, box[3]+1, self.tilesize):
try:
xy, tile = self.tile[(x, y)]
tile.paste(self.image.crop(xy))
except KeyError:
pass # outside the image
self.update_idletasks()
#
# main
if len(sys.argv) != 2:
print("Usage: painter file")
sys.exit(1)
root = Tk()
im = Image.open(sys.argv[1])
if im.mode != "RGB":
im = im.convert("RGB")
PaintCanvas(root, im).pack()
root.mainloop()
| [
"deanarmada@gmail.com"
] | deanarmada@gmail.com |
ce8f7a72520ec6d106c411bd7698922722e9cd8f | 20cf9a80fd651b5adb7242bf17da2c323785f776 | /01_Python/00_python_fund/filter_type.py | 5d5aa673f2d382415462308c99d0696e18c93a76 | [] | no_license | ethanlow23/codingDojoCoursework | 4f2ea0490437fe61a927f665c6b6b23435f095cf | 1a8fcad44377727d43517d34bd1f425fc5d6abab | refs/heads/master | 2020-04-28T18:58:53.696656 | 2019-03-13T20:56:44 | 2019-03-13T20:56:44 | 175,496,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py |
sI = 45
mI = 100
bI = 455
eI = 0
spI = -23
sS = "Rubber baby buggy bumpers"
mS = "Experience is simply the name we give our mistakes"
bS = "Tell me and I forget. Teach me and I remember. Involve me and I learn."
eS = ""
aL = [1,7,4,21]
mL = [3,5,7,34,3,2,113,65,8,89]
lL = [4,34,22,68,9,13,3,5,7,9,2,12,45,923]
eL = []
spL = ['name','address','phone number','social security number']
if isinstance(n, int) or isinstance(n, float):
if n >= 100:
print "that's a big number"
else:
print "that's a small number"
elif isinstance(n, str):
if len(n) >= 50:
print "long sentence"
else:
print "short sentence"
elif isinstance(n, list):
if len(n) >= 10:
print "big list"
else:
print "short list" | [
"ethanlow223@yahoo.com"
] | ethanlow223@yahoo.com |
3aab6d9e3ab7cac0507d1de4eeb54cfa9f9bdc8e | d8346eaf1c910ff02c7b243692a2766b8b089f06 | /for-post/python-string/s2-methods/split.py | 33b6036796fcee2b77b0370fa84b79457b731e68 | [] | no_license | dustinpfister/examples-python | 55304c99ba3af82cd8784ee98745546632155c68 | a9910ee05d4df524f951f61b6d9778531a58ccbf | refs/heads/master | 2023-03-06T21:10:18.888654 | 2021-02-26T20:32:52 | 2021-02-26T20:32:52 | 318,595,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | str = '0,1,2,3,4,5'
l = str.split(',')
print(type(l).__name__) # list
print(l[3]) # 3
str = '012345'
# I can not give and empty string as a sep
# doing so will result in an error
try:
l = str.split('')
except ValueError:
print('ValueError')
# however there are a number of other ways to
# get that kind of list such as passing the string value
# to the list built in function
l = list(str);
print(type(l).__name__) # list
print(l[3]) # 3
| [
"dustin.pfister@gmail.com"
] | dustin.pfister@gmail.com |
f89c2bef07b20a5b88a18fc7237a8db40c9581f0 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_1482494_0/Python/Decs/b.py | 0f263000841a7dcbb8b18269a13a7f97da46664b | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | tests = int(raw_input())
for test in xrange(1, tests + 1):
n = int(raw_input())
needed = [map(int, raw_input().split()) for i in xrange(n)]
finishes = 0
total = 0
completed = [0] * n
changed = True
while changed:
changed = False
for level in xrange(n):
if completed[level] < 2 and total >= needed[level][1]:
finishes += 1
total += 2 - completed[level]
completed[level] = 2
changed = True
break
if changed:
continue
one_star = [(-needed[i][1], i) for i in xrange(n) if completed[i] == 0 and total >= needed[i][0]]
one_star.sort()
if len(one_star) >= 1:
finishes += 1
total += 1
completed[one_star[0][1]] = 1
changed = True
if total != 2 * n:
print 'Case #%d: Too Bad' % test
else:
print 'Case #%d: %d' % (test, finishes) | [
"eewestman@gmail.com"
] | eewestman@gmail.com |
aa500d76845c40a3d72f123894af915ff7dbc08a | e59fe240f0359aa32c59b5e9f581db0bfdb315b8 | /galaxy-dist/lib/galaxy/util/memdump.py | 25558ca4d47da9d18a9d786248dcd19f8de0f6c8 | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | subway/Galaxy-Distribution | dc269a0258471597d483687a0f1dd9e10bd47448 | d16d6f9b6a8b7f41a218c06539863c8ce4d5a73c | refs/heads/master | 2021-06-30T06:26:55.237251 | 2015-07-04T23:55:51 | 2015-07-04T23:55:51 | 15,899,275 | 1 | 2 | null | 2020-10-07T06:17:26 | 2014-01-14T10:47:28 | Groff | UTF-8 | Python | false | false | 1,541 | py |
# Attempt to load guppy module, and only define Memdump class
# if available
try:
import pkg_resources
pkg_resources.require( "guppy" )
except:
import sys
print >> sys.stderr, "No guppy module, Memdump not available"
Memdump = None
else:
import os, sys, signal, time, guppy
class Memdump( object ):
def __init__( self, signum=signal.SIGUSR1, fname="memdump.log" ):
self.fname = fname
signal.signal( signum, self.dump )
self.heapy = guppy.hpy()
self.heap = None
def dump( self, signum, stack ):
file = open( self.fname, "a" )
print >> file, "Memdump for pid %d at %s" % ( os.getpid(), time.asctime() )
print >> file
try:
self.heap = self.heapy.heap()
print >> file, "heap():"
print >> file, self.heap
print >> file, "\nbyrcs:"
print >> file, self.heap.byrcs
print >> file, "\nbyrcs[0].byid:"
print >> file, self.heap.byrcs[0].byid
print >> file, "\nget_rp():"
print >> file, self.heap.get_rp()
self.heapy.setref()
except AssertionError:
pass
print >> file, "\nEnd dump\n"
file.close()
def setref( self ):
self.heapy.setref()
def get( self, update=False ):
if update:
self.heap = self.heapy.heap()
return self.heap
| [
"sabba_88@hotmail.com"
] | sabba_88@hotmail.com |
1a7633f93b0824f775592709fc99a1d901708513 | cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc | /Python Books/Python-3-Video-Tutorial/Exercise Files/Exercise Files/02 Quick Start/function.py | 730ac95decd51740673e575dccced4204c40da21 | [] | no_license | theGreenJedi/Path | df24fca355590efef0c6cb5c52e7216c6b5d2464 | b5ed2805dbb046480929e49e550bfd8af5bb4d6f | refs/heads/master | 2023-07-27T14:23:37.694546 | 2021-07-16T01:38:55 | 2021-07-16T01:38:55 | 87,686,563 | 8 | 2 | null | 2023-07-11T22:49:03 | 2017-04-09T05:57:30 | Jupyter Notebook | UTF-8 | Python | false | false | 344 | py | #!/usr/bin/python3
def isprime(n):
if n == 1:
print("1 is special")
return False
for x in range(2, n):
if n % x == 0:
print("{} equals {} x {}".format(n, x, n // x))
return False
else:
print(n, "is a prime number")
return True
for n in range(1, 20):
isprime(n)
| [
"GreenJedi@protonmail.com"
] | GreenJedi@protonmail.com |
3089cbd516c8333275c4d4d1c89c91ddcf6f27d1 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/octal/88c13780af7b419e8a9f1b81c23ca0df.py | 815521fd47687557af3468c72421c673042ff834 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 866 | py | def Base(base_, name_):
all_digits = "0123456789" + "".join(chr(i) for i in xrange(ord('a'), ord('z')+1))
if base_ > len(all_digits):
raise ValueError("Cannot create a numbering base {}: not enough digits".format(base_))
class Base(object):
digits = all_digits[:base_]
base = base_
name = name_
def __init__(self, s):
self.num = s
acc = 0
b = self.base
for sd in self.num:
try:
d = self.digits.index(sd)
acc *= b
acc += d
except ValueError:
raise ValueError("Invalid {} digit: {}".format(self.name, sd))
self.value = acc
def to_decimal(self):
return self.value
return Base
class Octal(Base(8, 'octal')):
pass
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
88c5eb693664e13eaf85f607287f78a60cfd3cef | 915bfb55c32999a39807b5364c6fa48d0d0b0bb0 | /OMS/saltstack/scripts/copy_anything.py | 53d97b77fa03a43466f000f89d9e2b974a0d6055 | [] | no_license | rysinal/pythonnote | fd761d67fcf41fc009a5724ecd666db63cfef62a | 90245323b1d6fcfdec89c1abefbc34ef6fa0946d | refs/heads/master | 2021-12-23T11:39:29.580329 | 2017-11-13T08:31:07 | 2017-11-13T08:31:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | #!/usr/bin/python
import shutil
import errno
# import os
def do_copy(src, dst):
try:
# if os.path.exists(dst):
# shutil.copyfile(src, dst)
# else:
shutil.copytree(src, dst)
except OSError as exc:
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else:
raise
| [
"754267513@qq.com"
] | 754267513@qq.com |
1d807c3ac02c9f70b4c9b2e471a6204a41b1ed38 | f7a20374403b55189cc5db6e8fa34d0ba290387c | /modules/everyday_report/report_mp.py | 9b3f45580f58891e6f10da07b88711f3cea0d088 | [] | no_license | dark-ice/upink_modules | 1a7b5a165cc5e05396c62cf33c261b907c23e33c | c497bf87a39796f1df3877542359b1927bec3a76 | refs/heads/master | 2021-05-01T04:40:16.436666 | 2014-04-12T15:09:31 | 2014-04-12T15:09:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,658 | py | # coding=utf-8
__author__ = 'andrey'
from openerp import tools
from openerp.osv import fields
from openerp.osv.orm import Model
class ReportMP(Model):
_name = 'day.report.mp'
_description = u'Ежедневные отчеты - МП'
_auto = False
_order = 'date'
_columns = {
'date_start': fields.date('c', select=True),
'date_end': fields.date('по', select=True),
'date': fields.date('Дата'),
'week_number': fields.integer('Номер недели', group_operator="avg"),
'ppc_plan': fields.integer('PPC план'),
'ppc_fact': fields.integer('PPC факт'),
'ppc_cash': fields.float('PPC $'),
'web_plan': fields.integer('web план'),
'web_fact': fields.integer('web факт'),
'web_cash': fields.float('web $'),
'smm_plan': fields.integer('smm план'),
'smm_fact': fields.integer('smm факт'),
'smm_cash': fields.float('smm $'),
'seo_plan': fields.integer('seo план'),
'seo_fact': fields.integer('seo факт'),
'seo_cash': fields.float('seo $'),
'call_plan': fields.integer('КЦ план'),
'call_fact': fields.integer('КЦ факт'),
'call_cash': fields.float('КЦ $'),
'video_plan': fields.integer('video план'),
'video_fact': fields.integer('video факт'),
'video_cash': fields.float('video $'),
'mp_plan': fields.integer('МП план'),
'mp_fact': fields.integer('МП факт'),
'mp_cash': fields.float('МП $'),
'moscow_plan': fields.integer('Москва план'),
'moscow_fact': fields.integer('Москва факт'),
'moscow_cash': fields.float('Москва $'),
'total_fact': fields.integer('Зашедшие брифы'),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'day_report_mp')
cr.execute("""
create or replace view day_report_mp as (
SELECT
row_number()
OVER () AS id,
to_char(r.date, 'YYYY-MM-DD') date_end,
to_char(r.date, 'YYYY-MM-DD') date_start,
extract(WEEK FROM r.date) week_number,
r.date date,
max(total_fact) total_fact,
max(CASE WHEN r.direction = 'PPC' THEN r.plan ELSE 0 END) ppc_plan,
max(ppc_fact) ppc_fact,
max(ppc_cash) ppc_cash,
max(CASE WHEN r.direction = 'SMM' THEN r.plan ELSE 0 END) smm_plan,
max(smm_fact) smm_fact,
max(smm_cash) smm_cash,
max(CASE WHEN r.direction = 'SEO' THEN r.plan ELSE 0 END) seo_plan,
max(seo_fact) seo_fact,
max(seo_cash) seo_cash,
max(CASE WHEN r.direction = 'CALL' THEN r.plan ELSE 0 END) call_plan,
max(call_fact) call_fact,
max(call_cash) call_cash,
max(CASE WHEN r.direction = 'SITE' THEN r.plan ELSE 0 END) web_plan,
max(web_fact) web_fact,
max(web_cash) web_cash,
max(CASE WHEN r.direction = 'VIDEO' THEN r.plan ELSE 0 END) video_plan,
max(video_fact) video_fact,
max(video_cash) video_cash,
max(CASE WHEN r.direction = 'MP' THEN r.plan ELSE 0 END) mp_plan,
max(mp_fact) mp_fact,
max(mp_cash) mp_cash,
max(CASE WHEN r.direction = 'MOSCOW' THEN r.plan ELSE 0 END) moscow_plan,
max(moscow_fact) moscow_fact,
max(moscow_cash) moscow_cash
FROM
day_report_brief_plan r
LEFT JOIN (
SELECT
h.cr_date::DATE date,
sum(CASE WHEN bss.direction IN ('PPC', 'SEO', 'SMM', 'CALL', 'SITE', 'VIDEO', 'MP', 'MOSCOW') IS NOT NULL THEN 1 ELSE 0 END) total_fact,
sum(CASE WHEN bss.direction = 'PPC' THEN 1 ELSE 0 END) ppc_fact,
sum(CASE WHEN bss.direction = 'PPC' THEN b.sum_mediaplan ELSE 0 END) ppc_cash,
sum(CASE WHEN bss.direction = 'SMM' THEN 1 ELSE 0 END) smm_fact,
sum(CASE WHEN bss.direction = 'SMM' THEN b.sum_mediaplan ELSE 0 END) smm_cash,
sum(CASE WHEN bss.direction = 'SEO' THEN 1 ELSE 0 END) seo_fact,
sum(CASE WHEN bss.direction = 'SEO' THEN b.sum_mediaplan ELSE 0 END) seo_cash,
sum(CASE WHEN bss.direction = 'CALL' THEN 1 ELSE 0 END) call_fact,
sum(CASE WHEN bss.direction = 'CALL' THEN b.sum_mediaplan ELSE 0 END) call_cash,
sum(CASE WHEN bss.direction = 'SITE' THEN 1 ELSE 0 END) web_fact,
sum(CASE WHEN bss.direction = 'SITE' THEN b.sum_mediaplan ELSE 0 END) web_cash,
sum(CASE WHEN bss.direction = 'VIDEO' THEN 1 ELSE 0 END) video_fact,
sum(CASE WHEN bss.direction = 'VIDEO' THEN b.sum_mediaplan ELSE 0 END) video_cash,
sum(CASE WHEN bss.direction = 'MP' THEN 1 ELSE 0 END) mp_fact,
sum(CASE WHEN bss.direction = 'MP' THEN b.sum_mediaplan ELSE 0 END) mp_cash,
sum(CASE WHEN bss.direction = 'MOSCOW' THEN 1 ELSE 0 END) moscow_fact,
sum(CASE WHEN bss.direction = 'MOSCOW' THEN b.sum_mediaplan ELSE 0 END) moscow_cash
FROM brief_history h
LEFT JOIN brief_main b
ON (h.brief_id = b.id)
LEFT JOIN brief_services_stage bss
ON (bss.id = b.services_ids)
WHERE h.state_id = 'media_approval'
GROUP BY h.cr_date::DATE
) b on (b.date=r.date)
GROUP BY r.date
)""")
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False):
for item in domain:
if item[0] == 'date_start':
item[0] = 'date'
item[1] = '>='
if item[0] == 'date_end':
item[0] = 'date'
item[1] = '<='
item[2] = "{date} 23:59:59".format(date=item[2],)
return super(ReportMP, self).read_group(cr, uid, domain, fields, groupby, offset, limit, context, orderby)
ReportMP() | [
"karbanovich.andrey@gmail.com"
] | karbanovich.andrey@gmail.com |
6088e141228743b67ea1602b028a24c111010e3a | e96461c5711974aee2401aad3206131b84e7b665 | /library/piglow.py | f4539f48cab58c387be9fc2b9a33bc3b879a7e34 | [] | no_license | sbelyea/piglow | 0a06507ef4859711a47027b09e58f22b7e42c5eb | d8599be3998521a3d211e38ac61043f717d74d40 | refs/heads/master | 2020-12-11T04:00:40.815366 | 2015-05-12T09:45:32 | 2015-05-12T09:45:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,444 | py | import sn3218
import atexit
import time
sn3218.enable()
sn3218.enable_leds(0b111111111111111111)
clear_on_exit = True
auto_update = False
_legs = [
# r o y g b w
[ 6, 7, 8, 5, 4, 9 ],
[ 17, 16, 15, 13, 11, 10 ],
[ 0, 1, 2, 3, 14, 12 ]
]
_values = [0] * 18
colours = {
"red" : 0,
"orange" : 1,
"yellow" : 2,
"green" : 3,
"blue" : 4,
"white" : 5
}
def white(v): ring(5,v)
def blue(v): ring(4,v)
def green(v): ring(3,v)
def yellow(v): ring(2,v)
def orange(v): ring(1,v)
def red(v): ring(0,v)
def arm1(v): arm(0,v)
def arm2(v): arm(1,v)
def arm3(v): arm(2,v)
def led1(v): set(0,v)
def led2(v): set(1,v)
def led3(v): set(2,v)
def led4(v): set(3,v)
def led5(v): set(4,v)
def led6(v): set(5,v)
def led7(v): set(6,v)
def led8(v): set(7,v)
def led9(v): set(8,v)
def led10(v): set(9,v)
def led11(v): set(10,v)
def led12(v): set(11,v)
def led13(v): set(12,v)
def led14(v): set(13,v)
def led15(v): set(14,v)
def led16(v): set(15,v)
def led17(v): set(16,v)
def led18(v): set(17,v)
def arm(x,y): leg(x - 1,y)
def spoke(x,y): leg(x - 1,y)
def show():
'''
Output the contents of the values list to PiGlow.
'''
sn3218.output(_values)
def get():
return _values
def set(leds, value):
'''
Set one or more LEDs with one or more values
Args:
* leds - A single index, or list of indexes of the LEDs to set
* values - A single value, or list of values to set
'''
global _values
if isinstance(leds, list):
for led in leds:
if isinstance(value, list):
_values[leds[led] % 18] = (value[led] % 256)
else:
_values[led % 18] = (value % 256)
elif isinstance(leds, int):
leds = leds % 18
if isinstance(value, list):
_values[leds:leds + len(value)] = map(lambda v: v % 256, value)
if len(_values) > 18:
wrap = _values[18:]
_values = _values[:18]
set(0, wrap)
else:
_values[leds] = (value % 256)
else:
raise ValueError("Invalid LED(s)")
if auto_update:
show()
def ring(ring, value):
'''
Set the brightness of a specific ring
'''
ring = ring % 7
set([_legs[0][ring], _legs[1][ring], _legs[2][ring]], value)
def leg_bar(leg, percentage):
# 1530 = 6 * 255
amount = int(1530.0 * percentage)
for led in reversed(_legs[leg]):
set(led,255 if amount > 255 else amount)
amount = 0 if amount < 255 else amount - 255
def leg(leg, intensity):
set(_legs[leg % 3], intensity)
def led(led, intensity):
'''Compatibility function for old PiGlow library
Accepts LED between 1 and 18.
Calls set(led - 1, intesity)
Args:
* led - LED number from 1 to 18
* intensity - brightness from 0 to 255
'''
set(led - 1, intensity)
def single(leg, ring, intensity):
'''Sets a single LED by its leg/ring
Args:
* leg - leg index of LED
* ring - ring index of LED
* intensity - brightness from 0 to 255
'''
set(_legs[leg % 3][ring % 7], intensity)
def tween(duration, end, start = None):
'''Tweens to a particular set of intensities.
Also accepts an optional starting point, otherwise
the current state of the LED is used.
Args:
* duration - duration in seconds
* end - list of 18 values to tween to
* start - list of 18 values to start from
'''
if not len(end) == 18:
raise ValueError("Requires list of 18 values")
fps = 1.0/60
steps = int(duration / fps)
if start is None:
start = _values
for x in range(steps):
new = []
for y in range(18):
s = start[y]
e = end[y]
c = float(e - s)
b = s + ((c/float(steps)) * (x+1))
new.append(int(b))
set(0, new)
show()
time.sleep(fps)
def colour(colour, intensity):
if not isinstance(colour, int):
if colour in colours:
ring(colours[colour], intensity)
return True
else:
raise ValueError("Invalid Colour")
return False
ring(colour-1, intensity)
return True
def all(value):
set(0, [value]*18)
def clear():
set(0, [0]*18)
def off():
all(0)
show()
def _exit():
if clear_on_exit:
off()
atexit.register(_exit)
| [
"phil@gadgetoid.com"
] | phil@gadgetoid.com |
1ab2dafa56e225f40ea46f42f12efa3c77ff3108 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/51/usersdata/120/20281/submittedfiles/listas.py | 396d08af8cee48625cc848246acf170897a490a0 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | # -*- coding: utf-8 -*-
from __future__ import division
def degrau(lista):
maior=0
for i in range(0,len(a)-1,1):
degrau=math.fabs(a[i]-a[i+1])
if degrau>maior:
maior=degrau
return maior
a=[]
n=input('insira o numero de termos da lista:')
for i in range(0,n,1):
a.append(input('digite um elemento de a:')
print maior
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
10b048dcdfba609dde36b77f50d00d1d7bdb14c4 | ba7134468cb18014fe2e3e1513382fa52aafd4eb | /03_Python_network_programming/003_HTTP_Web服务器/004_Web静态服务器_多线程threading.Thread_多任务高并发.py | e24ff1c71c9bed28feb84b9f632b5cd39fd8d1ff | [] | no_license | FelixZFB/Python_advanced_learning | 4e44616b390e1c6e7da37229c7ad48c069cee71b | a71a6d733ed2134a79f02a6488807862b23438b8 | refs/heads/master | 2021-06-27T11:15:07.754719 | 2020-11-20T02:41:25 | 2020-11-20T02:41:25 | 183,116,714 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,039 | py | # -*- coding:utf-8 -*-
# 创建一个Web服务器,客户端请求后,返回显示所需要的页面
# 下面代码中已经加入了html文件夹的系统路径
# 打开一个网页后,自连接都可以打开了
# 程序会根据请求提取出名字,然后进入到html文件中查找匹配相关文件
# 然后再浏览器中显示出来
import socket
import re
import threading
def service_client(new_socket):
"为一个客户端进行服务,为这个客户端返回数据"
# 1. 接收浏览器发送过来的请求,即HTTP请求
request_data = new_socket.recv(1024).decode("utf-8")
# 将请求报文以行分隔为列表
request_header_lines = request_data.splitlines()
# 格式化打印出请求报文信息,换行打出
for line in request_header_lines:
print(line)
# 提取出请求网页的名称,即/后面的内容
# 先取出请求头的第一行
request_line = request_header_lines[0]
# 上面提取出来的请求头的第一行是:GET /index.html HTTP/1.1
# 从/之外的任何字符开始匹配,匹配多次,相当于从GET开始匹配,
# 匹配到第一个/,后面匹配除了空格外的任何字符,相当于匹配到html结束,后面出现了空格
# 并且从/之后的匹配视为一个分组,分组里面匹配结果就是/index.html
# group(0)是取出匹配的整体结果:GET /index.html
# group(1)就是第一个分组:/index.html
get_file_name = re.match("[^/]+(/[^ ]*)", request_line).group(1)
# 加入系统路径,网页都是放在html文件夹中
get_file_name = "./html" + get_file_name # ./html/index.html
print("file name is ===>%s" % get_file_name)
print('*' * 50)
# 2. 返回http格式的数据给浏览器
# 请求的网页也可能不存在,加入try语句
try:
f = open(get_file_name, 'rb')
except:
response_header = "HTTP/1.1 404 not found\r\n"
response_header += "\r\n"
response_body = "====sorry ,file not found===="
else:
# 2.1 组织相应头信息(header),浏览器中换行使用\r\n
response_header = "HTTP/1.1 200 OK\r\n" # 200表示找到这个资源
response_header += "\r\n" # 用一个空的行与body进行隔开,作为换行符
# 组织内容(body)
# 返回一个本地已经编辑好的前端html页面
response_body = f.read()
f.close()
finally:
# 2.2 组织响应报文,发送数据,由于已经不是单纯的字符串,不能使用拼接
# 头和体信息单独发送
# response = response_header + response_body
# 先发送头header信息
new_socket.send(response_header.encode("utf-8"))
# 再发送body信息
new_socket.send(response_body)
# 3. 关闭客户端套接字
new_socket.close()
def main():
"作为程序的主控制入口,完成整体控制"
# 1. 创建tcp套接字
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 设置当服务器先close,即服务器端4次挥手之后资源能够立即释放,这样就保证了,下次运行程序时 可以立即绑定7788端口
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 2. 服务器绑定本地IP地址和端口
server_socket.bind(("", 7788))
# 3. 设置为监听套接字
server_socket.listen(128)
# 加入循环,服务器一直处于运行状态,可以不断接收新的客户端请求,
# 浏览器可以通过刷新不断请求该服务器
while True:
# 4. 等待新客户端的连接,返回一个新的客户端专用套接字
new_socket, client_addr = server_socket.accept()
# 5. 使用多进程为这个客户端服务,有新的请求,又重新创建一个子进程,注意参数后面的逗号不能省略
new_process = threading.Thread(target=service_client, args=(new_socket, ))
new_process.start()
# 注意:多线程不会复制new_socket,共享这个全局变量,此处不能close
if __name__ == "__main__":
main()
# 运行程序,打开浏览器,访问网址:http://127.0.0.1:7788/index.html
# 浏览器运行结果:
# 显示了一个html页面
# 如果随便访问一个网址:http://127.0.0.1:7788/index555.html,
# QQ浏览器则会无法显示此网页 错误代码 HTTP ERROR 404
# 火狐浏览器没有内容显示
# 打印出的请求头信息
# GET /index.html HTTP/1.1
# Host: 127.0.0.1:7788
# Connection: keep-alive
# Upgrade-Insecure-Requests: 1
# User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3719.400 QQBrowser/10.5.3715.400
# Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8
# Accept-Encoding: gzip, deflate, br
# Accept-Language: zh-CN,zh;q=0.9
#
# file name is ===>./html/index.html
# ************************************************** | [
"18200116656@qq.com"
] | 18200116656@qq.com |
b601ab4cd9d6945c2924065d897b8602b0755205 | 50dd2a43daa8316fc11e0c176b5872738fcc5dde | /Learning/103_Tableaux/fusion.py | 580e9aa10bfa32f0c6630f48ce9e9bebfdb0224a | [] | no_license | FrenchBear/Python | 58204d368e3e72071eef298ff00d06ff51bd7914 | b41ab4b6a59ee9e145ef2cd887a5fe306973962b | refs/heads/master | 2023-08-31T18:43:37.792427 | 2023-08-26T15:53:20 | 2023-08-26T15:53:20 | 124,466,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 960 | py | # fusion.py
# fusion de listes triées
# Programmer efficacement chap 4
# Mon implémentation sans regarder cette du livre!
# 2022-05-25 PV
def fusion(l1: list[int], l2: list[int]) -> list[int]:
f = []
len1 = len(l1)
len2 = len(l2)
i1 = i2 = 0
while i1 < len1 and i2 < len2:
if l1[i1] <= l2[i2]:
f.append(l1[i1])
i1 += 1
else:
f.append(l2[i2])
i2 += 1
f.extend(l1[i1:])
f.extend(l2[i2:])
return f
# For verification
def is_sorted(l: list[int]) -> bool:
return all(l[i-1]<=l[i] for i in range(1, len(l)))
# assert(is_sorted([1,2,2,3]))
# assert(not is_sorted([4,1,2]))
# assert(is_sorted([0]))
# assert(is_sorted([]))
l1 = list(i*5 for i in range(15))
l2 = list(i*7 for i in range(12))
print(l1)
print(l2)
f = fusion(l1, l2)
print(f)
assert(len(f) == len(l1)+len(l2))
assert(all(x in f for x in l1))
assert(all(x in f for x in l2))
assert(is_sorted(f))
| [
"FrenchBear38@outlook.com"
] | FrenchBear38@outlook.com |
39c9cae21ab1ee0cf7cdede5d4282ed5af383a1c | f28bf07217731a8d97fa3d7029df11b2a0506650 | /maddpg/buffer.py | 78c8abeac3863a7af9c51688bc94145d155c2fbf | [
"MIT"
] | permissive | postBG/deep-reinforcement-learning | 7465f7698d91363c8bacd791467f1dbb44bee9a9 | 5df5662b091c4c3f00beba1aa6f9ce8a52001c93 | refs/heads/master | 2022-12-18T20:06:08.245460 | 2021-09-05T09:26:17 | 2021-09-05T09:26:17 | 169,988,821 | 2 | 0 | MIT | 2022-12-08T01:44:34 | 2019-02-10T14:48:16 | Jupyter Notebook | UTF-8 | Python | false | false | 666 | py | from collections import deque
import random
from utilities import transpose_list
class ReplayBuffer:
def __init__(self,size):
self.size = size
self.deque = deque(maxlen=self.size)
def push(self,transition):
"""push into the buffer"""
input_to_buffer = transpose_list(transition)
for item in input_to_buffer:
self.deque.append(item)
def sample(self, batchsize):
"""sample from the buffer"""
samples = random.sample(self.deque, batchsize)
# transpose list of list
return transpose_list(samples)
def __len__(self):
return len(self.deque)
| [
"profile2697@gmail.com"
] | profile2697@gmail.com |
71f85e3a685ada74364b5df598a424a483de3dc9 | d7e160a2512b9d70b18adbffde4c6d9a61521a12 | /DFS/타겟 넘버.py | 4b22f473b59b4c2309755e984a9b52cd37ed5ce7 | [] | no_license | EoJin-Kim/CodingTest | 14b6cf7a3bb45954c065efdf9d1e05143cb321a3 | 975c753ee572f605f4d9a12a3dc54ab0d437dade | refs/heads/master | 2023-06-19T16:06:50.625143 | 2021-07-14T13:10:17 | 2021-07-14T13:10:17 | 356,877,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | from itertools import permutations,product
'''
def solution(numbers, target):
answer = 0
length = len(numbers)
expression = [(0,1) for i in range(length)]
for exp in product(*expression):
result=0
for i in range(length):
if exp[i]==0:
result+=-numbers[i]
else:
result+=numbers[i]
if result==target:
answer+=1
return answer
'''
answer=0
def solution(numbers, target):
global answer
dfs(0,numbers,0,target)
return answer
def dfs(idx,numbers,temp,target):
global answer
length = len(numbers)
if idx == length and temp==target:
answer+=1
return
if idx==length:
return
dfs(idx + 1, numbers, temp - numbers[idx],target)
dfs(idx + 1, numbers, temp + numbers[idx],target)
print(solution([1, 1, 1, 1, 1],3)) | [
"62640679+EoJin-Kim@users.noreply.github.com"
] | 62640679+EoJin-Kim@users.noreply.github.com |
5e414e62692567069edb6c5a647221bd64902bba | aea8fea216234fd48269e4a1830b345c52d85de2 | /fhir/resources/devicemetric.py | ed3ffc3f990d216dd34ff39c1317ddd8efaf505f | [
"BSD-3-Clause"
] | permissive | mmabey/fhir.resources | 67fce95c6b35bfdc3cbbc8036e02c962a6a7340c | cc73718e9762c04726cd7de240c8f2dd5313cbe1 | refs/heads/master | 2023-04-12T15:50:30.104992 | 2020-04-11T17:21:36 | 2020-04-11T17:21:36 | 269,712,884 | 0 | 0 | NOASSERTION | 2020-06-05T17:03:04 | 2020-06-05T17:03:04 | null | UTF-8 | Python | false | false | 6,904 | py | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/DeviceMetric
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import sys
from . import backboneelement, domainresource
class DeviceMetric(domainresource.DomainResource):
""" Measurement, calculation or setting capability of a medical device.
Describes a measurement, calculation or setting capability of a medical
device.
"""
resource_type = "DeviceMetric"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.calibration = None
""" Describes the calibrations that have been performed or that are
required to be performed.
List of `DeviceMetricCalibration` items (represented as `dict` in JSON). """
self.category = None
""" measurement | setting | calculation | unspecified.
Type `str`. """
self.color = None
""" black | red | green | yellow | blue | magenta | cyan | white.
Type `str`. """
self.identifier = None
""" Instance identifier.
List of `Identifier` items (represented as `dict` in JSON). """
self.measurementPeriod = None
""" Describes the measurement repetition time.
Type `Timing` (represented as `dict` in JSON). """
self.operationalStatus = None
""" on | off | standby | entered-in-error.
Type `str`. """
self.parent = None
""" Describes the link to the parent Device.
Type `FHIRReference` referencing `['Device']` (represented as `dict` in JSON). """
self.source = None
""" Describes the link to the source Device.
Type `FHIRReference` referencing `['Device']` (represented as `dict` in JSON). """
self.type = None
""" Identity of metric, for example Heart Rate or PEEP Setting.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.unit = None
""" Unit of Measure for the Metric.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(DeviceMetric, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DeviceMetric, self).elementProperties()
js.extend(
[
(
"calibration",
"calibration",
DeviceMetricCalibration,
"DeviceMetricCalibration",
True,
None,
False,
),
("category", "category", str, "code", False, None, True),
("color", "color", str, "code", False, None, False),
(
"identifier",
"identifier",
identifier.Identifier,
"Identifier",
True,
None,
False,
),
(
"measurementPeriod",
"measurementPeriod",
timing.Timing,
"Timing",
False,
None,
False,
),
(
"operationalStatus",
"operationalStatus",
str,
"code",
False,
None,
False,
),
(
"parent",
"parent",
fhirreference.FHIRReference,
"Reference",
False,
None,
False,
),
(
"source",
"source",
fhirreference.FHIRReference,
"Reference",
False,
None,
False,
),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
True,
),
(
"unit",
"unit",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
]
)
return js
class DeviceMetricCalibration(backboneelement.BackboneElement):
""" Describes the calibrations that have been performed or that are required to
be performed.
"""
resource_type = "DeviceMetricCalibration"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.state = None
""" not-calibrated | calibration-required | calibrated | unspecified.
Type `str`. """
self.time = None
""" Describes the time last calibration has been performed.
Type `FHIRDate` (represented as `str` in JSON). """
self.type = None
""" unspecified | offset | gain | two-point.
Type `str`. """
super(DeviceMetricCalibration, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DeviceMetricCalibration, self).elementProperties()
js.extend(
[
("state", "state", str, "code", False, None, False),
("time", "time", fhirdate.FHIRDate, "instant", False, None, False),
("type", "type", str, "code", False, None, False),
]
)
return js
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + ".codeableconcept"]
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + ".fhirdate"]
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + ".fhirreference"]
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + ".identifier"]
try:
from . import timing
except ImportError:
timing = sys.modules[__package__ + ".timing"]
| [
"connect2nazrul@gmail.com"
] | connect2nazrul@gmail.com |
c94938e0933471f0917f589c92279f67677f80c3 | 4492b8daf969f839f7803d1af1d3f80858dddd98 | /docs/source/webobapp.py | 28a0fe44c2dea7a8cea687116296c4bdba372815 | [] | no_license | aodag/my-pyramid-katas | 59af3b25dfdf68bcd7434cdcb4258c08c43b1012 | 99cf99ffb646c428cfb3c9d6eec7e593d865d576 | refs/heads/master | 2020-04-06T03:31:59.912229 | 2016-06-12T13:14:07 | 2016-06-12T13:14:07 | 34,158,089 | 0 | 1 | null | 2016-06-12T13:14:09 | 2015-04-18T08:14:55 | null | UTF-8 | Python | false | false | 241 | py | from webob import Request, Response
def application(environ, start_response):
request = Request(environ)
response = Response(request=request)
response.text = "Hello, world!"
return response(environ, start_response)
| [
"aodagx@gmail.com"
] | aodagx@gmail.com |
7bad46d3469e4d662073157681c56a6dcbe87753 | a5c33ac6d89b0445ff011d24943d441681fa4af3 | /tests/test_gino_sanic.py | 7f1f80f0573cb99af36c6eabaffd70e15dc209ec | [
"BSD-3-Clause"
] | permissive | python-gino/gino-sanic | b34490c879d02b163da468d8567bedea70a3afa7 | 7bc7e98989f1936a17f38ec352a3a7dc7d217753 | refs/heads/master | 2021-08-24T13:53:16.353243 | 2020-04-18T17:43:26 | 2020-04-18T17:43:26 | 229,087,530 | 5 | 7 | NOASSERTION | 2021-06-02T06:17:14 | 2019-12-19T15:47:24 | Python | UTF-8 | Python | false | false | 4,512 | py | import asyncio
import os
import ssl
import gino
import pytest
import sanic
from gino.ext.sanic import Gino
from sanic.response import text, json
DB_ARGS = dict(
host=os.getenv("DB_HOST", "localhost"),
port=os.getenv("DB_PORT", 5432),
user=os.getenv("DB_USER", "postgres"),
password=os.getenv("DB_PASS", ""),
database=os.getenv("DB_NAME", "postgres"),
)
PG_URL = "postgresql://{user}:{password}@{host}:{port}/{database}".format(**DB_ARGS)
_MAX_INACTIVE_CONNECTION_LIFETIME = 59.0
def teardown_module():
# sanic server will close the loop during shutdown
asyncio.set_event_loop(asyncio.new_event_loop())
# noinspection PyShadowingNames
async def _app(config):
app = sanic.Sanic()
app.config.update(config)
app.config.update(
{
"DB_KWARGS": dict(
max_inactive_connection_lifetime=_MAX_INACTIVE_CONNECTION_LIFETIME,
),
}
)
db = Gino(app)
class User(db.Model):
__tablename__ = "gino_users"
id = db.Column(db.BigInteger(), primary_key=True)
nickname = db.Column(db.Unicode(), default="noname")
@app.route("/")
async def root(request):
conn = await request["connection"].get_raw_connection()
# noinspection PyProtectedMember
assert conn._holder._max_inactive_time == _MAX_INACTIVE_CONNECTION_LIFETIME
return text("Hello, world!")
@app.route("/users/<uid:int>")
async def get_user(request, uid):
method = request.args.get("method")
q = User.query.where(User.id == uid)
if method == "1":
return json((await q.gino.first_or_404()).to_dict())
elif method == "2":
return json((await request["connection"].first_or_404(q)).to_dict())
elif method == "3":
return json((await db.bind.first_or_404(q)).to_dict())
elif method == "4":
return json((await db.first_or_404(q)).to_dict())
else:
return json((await User.get_or_404(uid)).to_dict())
@app.route("/users", methods=["POST"])
async def add_user(request):
u = await User.create(nickname=request.form.get("name"))
await u.query.gino.first_or_404()
await db.first_or_404(u.query)
await db.bind.first_or_404(u.query)
await request["connection"].first_or_404(u.query)
return json(u.to_dict())
e = await gino.create_engine(PG_URL)
try:
try:
await db.gino.create_all(e)
yield app
finally:
await db.gino.drop_all(e)
finally:
await e.close()
@pytest.fixture
def ssl_ctx():
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
return ctx
@pytest.fixture
async def app():
async for a in _app(
{
"DB_HOST": DB_ARGS["host"],
"DB_PORT": DB_ARGS["port"],
"DB_USER": DB_ARGS["user"],
"DB_PASSWORD": DB_ARGS["password"],
"DB_DATABASE": DB_ARGS["database"],
}
):
yield a
@pytest.fixture
async def app_ssl(ssl_ctx):
async for a in _app(
{
"DB_HOST": DB_ARGS["host"],
"DB_PORT": DB_ARGS["port"],
"DB_USER": DB_ARGS["user"],
"DB_PASSWORD": DB_ARGS["password"],
"DB_DATABASE": DB_ARGS["database"],
"DB_SSL": ssl_ctx,
}
):
yield a
@pytest.fixture
async def app_dsn():
async for a in _app({"DB_DSN": PG_URL}):
yield a
def _test_index_returns_200(app):
request, response = app.test_client.get("/")
assert response.status == 200
assert response.text == "Hello, world!"
def test_index_returns_200(app):
_test_index_returns_200(app)
def test_index_returns_200_dsn(app_dsn):
_test_index_returns_200(app_dsn)
def _test(app):
for method in "01234":
request, response = app.test_client.get("/users/1?method=" + method)
assert response.status == 404
request, response = app.test_client.post("/users", data=dict(name="fantix"))
assert response.status == 200
assert response.json == dict(id=1, nickname="fantix")
for method in "01234":
request, response = app.test_client.get("/users/1?method=" + method)
assert response.status == 200
assert response.json == dict(id=1, nickname="fantix")
def test(app):
_test(app)
def test_ssl(app_ssl):
_test(app_ssl)
def test_dsn(app_dsn):
_test(app_dsn)
| [
"fantix.king@gmail.com"
] | fantix.king@gmail.com |
cf097de52c3b6aa3b00f61889614ee3666b50615 | 70fa6468c768d4ec9b4b14fc94fa785da557f1b5 | /lib/surface/ml_engine/versions/__init__.py | c540906a54c9770851a877bd986b742229cbf2d1 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | kylewuolle/google-cloud-sdk | d43286ef646aec053ecd7eb58566ab2075e04e76 | 75f09ebe779e99fdc3fd13b48621fe12bfaa11aa | refs/heads/master | 2020-04-20T22:10:41.774132 | 2019-01-26T09:29:26 | 2019-01-26T09:29:26 | 169,131,028 | 0 | 0 | NOASSERTION | 2019-02-04T19:04:40 | 2019-02-04T18:58:36 | Python | UTF-8 | Python | false | false | 1,229 | py | # -*- coding: utf-8 -*- #
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command group for ml-engine versions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class Versions(base.Group):
"""Cloud ML Engine Versions commands.
A version is an implementation of a model, represented as a serialized
TensorFlow graph with trained parameters.
When you communicate with Cloud ML Engine services, you use the combination
of the model, version, and current project to identify a specific model
implementation that is deployed in the cloud.
"""
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
2982561042ff6a5c182705c130d0c7657f1b9216 | 1b94c7cfd66804fe8d40b5def35e4b9b18d69ba2 | /stubs/google/appengine/api/taskqueue/taskqueue_stub.pyi | 7fd103268159098ac1c4bf312540530a305e7979 | [
"MIT"
] | permissive | the-blue-alliance/the-blue-alliance | 3dc210a9611ce9b240907ffd420f78040318dcdc | 6d42f3cdb2f785d192f2871419e58aaae3445029 | refs/heads/py3 | 2023-08-22T21:02:36.398100 | 2023-08-22T19:14:01 | 2023-08-22T19:14:01 | 888,427 | 344 | 263 | MIT | 2023-09-14T18:35:20 | 2010-09-04T20:34:11 | HTML | UTF-8 | Python | false | false | 5,278 | pyi | from google.appengine.api import api_base_pb2 as api_base_pb2, apiproxy_stub as apiproxy_stub, apiproxy_stub_map as apiproxy_stub_map, queueinfo as queueinfo, request_info as request_info
from google.appengine.api.taskqueue import taskqueue as taskqueue
from google.appengine.runtime import apiproxy_errors as apiproxy_errors
from google.appengine.tools import queue_xml_parser as queue_xml_parser
from typing import Any
DEFAULT_RATE: str
DEFAULT_RATE_FLOAT: float
DEFAULT_BUCKET_SIZE: int
MAX_ETA: Any
MAX_PULL_TASK_SIZE_BYTES: Any
MAX_PUSH_TASK_SIZE_BYTES: Any
MAX_TASK_SIZE = MAX_PUSH_TASK_SIZE_BYTES
MAX_REQUEST_SIZE: Any
BUILT_IN_HEADERS: Any
DEFAULT_QUEUE_NAME: str
INF: float
QUEUE_MODE: Any
AUTOMATIC_QUEUES: Any
TIME_STR_FMT: str
def QueryTasksResponseToDict(queue_name, task_response, now, task_add_request_pb: Any | None = ...): ...
def ConvertGetQueuesResponseToQueuesDicts(response): ...
def ConvertTaskDictToTaskObject(task): ...
class _Group:
gettime: Any
def __init__(self, queue_yaml_parser: Any | None = ..., app_id: Any | None = ..., _all_queues_valid: bool = ..., _update_newest_eta: Any | None = ..., _testing_validate_state: bool = ..., gettime=...): ...
def GetQueuesAsDicts(self): ...
def HasQueue(self, queue_name): ...
def GetQueue(self, queue_name): ...
def GetQueues(self): ...
def GetNextPushTask(self): ...
def BulkAdd_Rpc(self, request, response) -> None: ...
def UpdateQueue_Rpc(self, request, response) -> None: ...
def FetchQueues_Rpc(self, request, response) -> None: ...
def FetchQueueStats_Rpc(self, request, response) -> None: ...
def QueryTasks_Rpc(self, request, response) -> None: ...
def FetchTask_Rpc(self, request, response) -> None: ...
def Delete_Rpc(self, request, response) -> None: ...
def DeleteQueue_Rpc(self, request, response) -> None: ...
def PauseQueue_Rpc(self, request, response) -> None: ...
def PurgeQueue_Rpc(self, request, response) -> None: ...
def QueryAndOwnTasks_Rpc(self, request, response) -> None: ...
def ModifyTaskLease_Rpc(self, request, response) -> None: ...
class Retry:
def __init__(self, task, queue) -> None: ...
def CanRetry(self, retry_count, age_usec): ...
def CalculateBackoffUsec(self, retry_count): ...
class _Queue:
queue_name: Any
bucket_refill_per_second: Any
bucket_capacity: Any
user_specified_rate: Any
retry_parameters: Any
max_concurrent_requests: Any
paused: Any
queue_mode: Any
acl: Any
target: Any
gettime: Any
task_name_archive: Any
task_add_request_pbs: Any
def __init__(self, queue_name, bucket_refill_per_second=..., bucket_capacity=..., user_specified_rate=..., retry_parameters: Any | None = ..., max_concurrent_requests: Any | None = ..., paused: bool = ..., queue_mode=..., acl: Any | None = ..., _testing_validate_state: Any | None = ..., target: Any | None = ..., gettime=...): ...
def VerifyIndexes(self) -> None: ...
def UpdateQueue_Rpc(self, request, response) -> None: ...
def FetchQueues_Rpc(self, request, response) -> None: ...
def QueryTasks_Rpc(self, request, response) -> None: ...
def FetchTask_Rpc(self, request, response) -> None: ...
def Delete_Rpc(self, request, response) -> None: ...
def QueryAndOwnTasks_Rpc(self, request, response) -> None: ...
def ModifyTaskLease_Rpc(self, request, response) -> None: ...
def IncRetryCount(self, task_name) -> None: ...
def GetTasksAsDicts(self): ...
def GetTaskAsDict(self, task_name): ...
def PurgeQueue(self) -> None: ...
def RunTaskNow(self, task) -> None: ...
def PostponeTask(self, task, new_eta_usec) -> None: ...
def Lookup(self, maximum, name: Any | None = ..., eta: Any | None = ...): ...
def Count(self): ...
def OldestTask(self): ...
def Oldest(self): ...
def Add(self, request, now) -> None: ...
def Delete(self, name): ...
def Populate(self, num_tasks): ...
class _TaskExecutor:
def __init__(self, default_host, request_data) -> None: ...
def ExecuteTask(self, task, queue): ...
class _BackgroundTaskScheduler:
task_executor: Any
default_retry_seconds: Any
def __init__(self, group, task_executor, retry_seconds, **kwargs) -> None: ...
def UpdateNextEventTime(self, next_event_time) -> None: ...
def Shutdown(self) -> None: ...
def MainLoop(self) -> None: ...
class TaskQueueServiceStub(apiproxy_stub.APIProxyStub):
THREADSAFE: bool
gettime: Any
def __init__(self, service_name: str = ..., root_path: Any | None = ..., queue_config_path: Any | None = ..., auto_task_running: bool = ..., task_retry_seconds: int = ..., _all_queues_valid: bool = ..., default_http_server: str = ..., _testing_validate_state: bool = ..., request_data: Any | None = ..., gettime=...): ...
def EnableAutoTaskRunning(self) -> None: ...
def StartBackgroundExecution(self) -> None: ...
def Shutdown(self) -> None: ...
def GetQueues(self): ...
def GetTasks(self, queue_name): ...
def DeleteTask(self, queue_name, task_name) -> None: ...
def FlushQueue(self, queue_name) -> None: ...
def Clear(self): ...
def get_filtered_tasks(self, url: Any | None = ..., name: Any | None = ..., queue_names: Any | None = ...): ...
| [
"noreply@github.com"
] | the-blue-alliance.noreply@github.com |
916a1b892d9ee34ba3b09ce362ccd49f91b02fb8 | c10ef416832b3e99e58fb93c85f414d94bbdbc2e | /py3canvas/tests/peer_reviews.py | 3f74127f0550f90e37aece9a9ce32d0dd96a9fde | [
"MIT"
] | permissive | tylerclair/py3canvas | 83bab26d1624a11acffaeb0392c6a9a38f995f16 | 7485d458606b65200f0ffa5bbe597a9d0bee189f | refs/heads/master | 2021-10-26T03:27:48.418437 | 2021-10-23T15:07:26 | 2021-10-23T15:07:26 | 92,841,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,518 | py | """PeerReviews API Tests for Version 1.0.
This is a testing template for the generated PeerReviewsAPI Class.
"""
import unittest
import requests
import secrets
from py3canvas.apis.peer_reviews import PeerReviewsAPI
from py3canvas.apis.peer_reviews import Peerreview
class TestPeerReviewsAPI(unittest.TestCase):
"""Tests for the PeerReviewsAPI."""
def setUp(self):
self.client = PeerReviewsAPI(secrets.instance_address, secrets.access_token)
def test_get_all_peer_reviews_courses_peer_reviews(self):
"""Integration test for the PeerReviewsAPI.get_all_peer_reviews_courses_peer_reviews method."""
course_id = None # Change me!!
assignment_id = None # Change me!!
r = self.client.get_all_peer_reviews_courses_peer_reviews(
assignment_id, course_id, include=None
)
def test_get_all_peer_reviews_sections_peer_reviews(self):
"""Integration test for the PeerReviewsAPI.get_all_peer_reviews_sections_peer_reviews method."""
section_id = None # Change me!!
assignment_id = None # Change me!!
r = self.client.get_all_peer_reviews_sections_peer_reviews(
assignment_id, section_id, include=None
)
def test_get_all_peer_reviews_courses_submissions(self):
"""Integration test for the PeerReviewsAPI.get_all_peer_reviews_courses_submissions method."""
course_id = None # Change me!!
assignment_id = None # Change me!!
submission_id = None # Change me!!
r = self.client.get_all_peer_reviews_courses_submissions(
assignment_id, course_id, submission_id, include=None
)
def test_get_all_peer_reviews_sections_submissions(self):
"""Integration test for the PeerReviewsAPI.get_all_peer_reviews_sections_submissions method."""
section_id = None # Change me!!
assignment_id = None # Change me!!
submission_id = None # Change me!!
r = self.client.get_all_peer_reviews_sections_submissions(
assignment_id, section_id, submission_id, include=None
)
def test_create_peer_review_courses(self):
"""Integration test for the PeerReviewsAPI.create_peer_review_courses method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_create_peer_review_sections(self):
"""Integration test for the PeerReviewsAPI.create_peer_review_sections method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_delete_peer_review_courses(self):
"""Integration test for the PeerReviewsAPI.delete_peer_review_courses method."""
course_id = None # Change me!!
assignment_id = None # Change me!!
submission_id = None # Change me!!
user_id = None # Change me!!
r = self.client.delete_peer_review_courses(
assignment_id, course_id, submission_id, user_id
)
def test_delete_peer_review_sections(self):
"""Integration test for the PeerReviewsAPI.delete_peer_review_sections method."""
section_id = None # Change me!!
assignment_id = None # Change me!!
submission_id = None # Change me!!
user_id = None # Change me!!
r = self.client.delete_peer_review_sections(
assignment_id, section_id, submission_id, user_id
)
| [
"tyler.clair@gmail.com"
] | tyler.clair@gmail.com |
018066b50c689152a24b32c9026365c72603e07e | 398d6a7fb50a2485ef17319028da73a94b166ef8 | /01Factory/1-8.py | d11ca0024613770a0e91a31efae0333d0a306ce1 | [] | no_license | Yuanoung/TemplateOfDesign | 3e7675de97d90f354b32de6863ad8c4b7e2b338a | ea1635ec42d12b1c869db20a31292e063e5d200e | refs/heads/master | 2021-04-30T14:28:08.961904 | 2018-02-12T09:52:36 | 2018-02-12T09:52:36 | 121,216,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | class Operation(object):
@staticmethod
def getResult(numberA, op, numberB):
if op == "+":
return numberA + numberB
elif op == "-":
return numberA - numberB
elif op == "*":
return numberA * numberB
else:
return numberA / numberB | [
"yuanoung@outlook.com"
] | yuanoung@outlook.com |
da4403af4ed8be3e36cf4bb7252b4e9888b00a01 | 93c02201c60da7f9f231f0a87ffe8b32729ce229 | /Array/kth-smallest-element.py | a00353263dc369f7777f5c84e850f8a1c7eefbd2 | [] | no_license | sudo-hemant/curated_questions_dsa | 54aebe021f58f757e519f508f9269798127c7495 | 8f1097274bfd1d2c6f25def6c16982bbf6d7461c | refs/heads/master | 2023-03-14T07:53:00.652525 | 2021-02-27T07:16:22 | 2021-02-27T07:16:22 | 339,599,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py |
# NOTE: it is min heap, every time we pop element, it pops minimum element
# and we need smallest element, so we are going to push element by changing its sign
import heapq
def kthSmallest(arr, l, r, k):
'''
arr : given array
l : starting index of the array i.e 0
r : ending index of the array i.e size-1
k : find kth smallest element and return using this function
'''
heap = []
for num in arr:
if len(heap) < k:
heapq.heappush(heap, -1 * num)
else:
curr_min = -1 * heapq.heappop(heap)
heapq.heappush(heap, -1 * min(curr_min, num))
return -1 * heapq.heappop(heap)
| [
"sudohemant@gmail.com"
] | sudohemant@gmail.com |
10d8de2da1b5848136bd6aa8f35b12ce3cfa57a3 | a39f8f5b66314b1084f3a88d8d7a1a791292ea2c | /examples/comboBox1.py | e32adc5343234eac78948d372b4ffc33fa5dc491 | [] | no_license | zwlyn/pyqt5try | 6d62e28795390c3049ddb6cbb6b761002149e47a | 21931c26f0d87b2aa3ba80eef5f0fcd46d54bcdd | refs/heads/master | 2020-08-01T02:57:17.683792 | 2019-10-09T11:10:11 | 2019-10-09T11:10:11 | 210,837,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | # -*- coding: utf_8 -*-
import sys
from PyQt5.QtWidgets import (QWidget, QLabel,
QComboBox, QApplication)
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.lbl = QLabel("Ubuntu", self)
combo = QComboBox(self)
combo.addItem("Ubuntu")
combo.addItem("Mandriva")
combo.addItem("Fedora")
combo.addItem("Arch")
combo.addItem("Gentoo")
combo.move(50, 50 )
self.lbl.move(50, 150)
combo.activated[str].connect(self.onActivated)
self.setGeometry(300, 300, 300, 200)
self.setWindowTitle('QComboBox')
self.show()
def onActivated(self, text):
self.lbl.setText(text)
self.lbl.adjustSize()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
| [
"1666013677@qq.com"
] | 1666013677@qq.com |
42ecacc92a7a98c20a2a14c5f509bf7fb1ac7325 | 060c340a1f0d24fbf7a3aae573f59ebe2f8a6bbf | /dynamics.py | 9a83bedf9d84c999329aba22c4ccc2d70c7cbfba | [
"MIT"
] | permissive | pj1138/MayaToolbox | 0052f070de12b34820c4ef107a587b602afe94de | 8eabd96eeedcb4242dba4ec15617bad3b81496d7 | refs/heads/master | 2021-01-13T11:57:57.638751 | 2017-09-24T18:11:27 | 2017-09-24T18:11:27 | 29,816,700 | 0 | 0 | null | 2015-01-25T14:29:02 | 2015-01-25T14:29:01 | null | UTF-8 | Python | false | false | 1,067 | py | # DYNAMICS
import pymel.core as py
import maya.cmds as mc
import maya.mel as mel
from math import *
from xml.dom.minidom import *
from random import uniform as rnd
import os
import re
#~~
from mayatoolbox import *
from animation import *
def quickDyn(spread=5, num=10, joints=False, bake=False):
target = []
g = py.gravity()
for i in range(0,num):
c = py.polyCube()
target.append(c)
x = rnd(-spread,spread)
y = rnd(-spread,spread) + 10
z = rnd(-spread,spread)
py.move(x,y,z)
py.rotate(x,y,z)
s(target)
py.rigidBody()
for i in range(0,len(target)):
py.connectDynamic(target[i],f=g)
if(joints==False and bake==True):
bakeAnimation(target)
if(joints==True):
target2 = []
for i in range(0,len(target)):
s(target[i])
jnt = py.joint()
target2.append(jnt)
if(bake==True):
bakeAnimation(target2)
for i in range(0,len(target2)):
unparent(target2[i])
| [
"nick@fox-gieg.com"
] | nick@fox-gieg.com |
8924478933f7a7524dcaab4f3c7882e74a847575 | 01fdd206c8c825b30870bdd3f6e75f0aa113b849 | /test/record/parser/test_response_whois_nic_tr_status_registered.py | 58c44cf2045188ef955e413f45503f6c8c2d0294 | [
"MIT"
] | permissive | huyphan/pyyawhois | 0fbc5a7d64a53ae6e3393fdc1c7ff0d0ac5f22b5 | 77fb2f73a9c67989f1d41d98f37037406a69d136 | refs/heads/master | 2021-01-23T22:42:55.989651 | 2015-09-19T16:40:06 | 2015-09-19T16:40:06 | 23,335,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,978 | py |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.nic.tr/status_registered
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisNicTrStatusRegistered(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.nic.tr/status_registered.txt"
host = "whois.nic.tr"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'registered')
def test_available(self):
eq_(self.record.available, False)
def test_domain(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.domain)
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 4)
eq_(self.record.nameservers[0].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[0].name, "ns1.google.com")
eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[1].name, "ns2.google.com")
eq_(self.record.nameservers[2].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[2].name, "ns3.google.com")
eq_(self.record.nameservers[3].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[3].name, "ns4.google.com")
def test_admin_contacts(self):
eq_(self.record.admin_contacts.__class__.__name__, 'list')
eq_(len(self.record.admin_contacts), 1)
eq_(self.record.admin_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.admin_contacts[0].type, yawhois.record.Contact.TYPE_ADMINISTRATIVE)
eq_(self.record.admin_contacts[0].id, "mi154-metu")
eq_(self.record.admin_contacts[0].name, None)
eq_(self.record.admin_contacts[0].organization, "MarkMonitor, Inc")
eq_(self.record.admin_contacts[0].address, "Hidden upon user request")
eq_(self.record.admin_contacts[0].city, None)
eq_(self.record.admin_contacts[0].zip, None)
eq_(self.record.admin_contacts[0].state, None)
eq_(self.record.admin_contacts[0].country, None)
eq_(self.record.admin_contacts[0].country_code, None)
eq_(self.record.admin_contacts[0].phone, "Hidden upon user request")
eq_(self.record.admin_contacts[0].fax, "Hidden upon user request")
eq_(self.record.admin_contacts[0].email, None)
def test_registered(self):
eq_(self.record.registered, True)
def test_created_on(self):
eq_(self.record.created_on.__class__.__name__, 'datetime')
eq_(self.record.created_on, time_parse('2001-08-23 00:00:00 UTC'))
def test_registrar(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.registrar)
def test_registrant_contacts(self):
eq_(self.record.registrant_contacts.__class__.__name__, 'list')
eq_(len(self.record.registrant_contacts), 1)
eq_(self.record.registrant_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.registrant_contacts[0].type, yawhois.record.Contact.TYPE_REGISTRANT)
eq_(self.record.registrant_contacts[0].id, None)
eq_(self.record.registrant_contacts[0].name, "Google Inc.")
eq_(self.record.registrant_contacts[0].organization, None)
eq_(self.record.registrant_contacts[0].address, "1600 Amphitheatre Parkway\nMountain View CA")
eq_(self.record.registrant_contacts[0].city, None)
eq_(self.record.registrant_contacts[0].zip, None)
eq_(self.record.registrant_contacts[0].state, None)
eq_(self.record.registrant_contacts[0].country, "United States of America")
eq_(self.record.registrant_contacts[0].country_code, None)
eq_(self.record.registrant_contacts[0].phone, "+ 1-650-2530000-")
eq_(self.record.registrant_contacts[0].fax, "+ 1-650-2530001-")
eq_(self.record.registrant_contacts[0].email, "dns-admin@google.com")
def test_technical_contacts(self):
eq_(self.record.technical_contacts.__class__.__name__, 'list')
eq_(len(self.record.technical_contacts), 1)
eq_(self.record.technical_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.technical_contacts[0].type, yawhois.record.Contact.TYPE_TECHNICAL)
eq_(self.record.technical_contacts[0].id, "btl1-metu")
eq_(self.record.technical_contacts[0].name, None)
eq_(self.record.technical_contacts[0].organization, "BERÝL TEKNOLOJÝ LTD. ÞTÝ.")
eq_(self.record.technical_contacts[0].address, "Ceyhun Atuf Kansu Cad. Bayraktar Ýþ Merkezi\nNo:114 G-4 Balgat\nAnkara,06520\nTürkiye")
eq_(self.record.technical_contacts[0].city, None)
eq_(self.record.technical_contacts[0].zip, None)
eq_(self.record.technical_contacts[0].state, None)
eq_(self.record.technical_contacts[0].country, None)
eq_(self.record.technical_contacts[0].country_code, None)
eq_(self.record.technical_contacts[0].phone, "+ 90-312-4733035-")
eq_(self.record.technical_contacts[0].fax, "+ 90-312-4733039-")
eq_(self.record.technical_contacts[0].email, None)
def test_updated_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.updated_on)
def test_domain_id(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.domain_id)
def test_expires_on(self):
eq_(self.record.expires_on.__class__.__name__, 'datetime')
eq_(self.record.expires_on, time_parse('2014-08-22 00:00:00 UTC'))
def test_disclaimer(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.disclaimer)
| [
"dachuy@gmail.com"
] | dachuy@gmail.com |
5bc5b5cfffe723fe4f784cb6707c7b054ae384ae | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-imageenhan/aliyunsdkimageenhan/request/v20190930/RecolorImageRequest.py | ff730d30e823cf7d8caf5085eb77484efb0abc51 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 2,317 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkimageenhan.endpoint import endpoint_data
class RecolorImageRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'imageenhan', '2019-09-30', 'RecolorImage','imageenhan')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Mode(self): # String
return self.get_body_params().get('Mode')
def set_Mode(self, Mode): # String
self.add_body_params('Mode', Mode)
def get_ColorCount(self): # Integer
return self.get_body_params().get('ColorCount')
def set_ColorCount(self, ColorCount): # Integer
self.add_body_params('ColorCount', ColorCount)
def get_ColorTemplates(self): # RepeatList
return self.get_body_params().get('ColorTemplate')
def set_ColorTemplates(self, ColorTemplate): # RepeatList
for depth1 in range(len(ColorTemplate)):
if ColorTemplate[depth1].get('Color') is not None:
self.add_body_params('ColorTemplate.' + str(depth1 + 1) + '.Color', ColorTemplate[depth1].get('Color'))
def get_Url(self): # String
return self.get_body_params().get('Url')
def set_Url(self, Url): # String
self.add_body_params('Url', Url)
def get_RefUrl(self): # String
return self.get_body_params().get('RefUrl')
def set_RefUrl(self, RefUrl): # String
self.add_body_params('RefUrl', RefUrl)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
76fbb1b839f66900f0ee35447d47bdd4a00613bb | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03785/s325179868.py | b238757b012a1f39080269d6ad3146db49516bb8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | import sys
def input(): return sys.stdin.readline().strip()
def resolve():
n,c,k=map(int, input().split())
l=[int(input()) for i in range(n)]
l.sort()
saisyo=l[0]
ninzu=1
ans=0
for j in range(1,n):
x=l[j]-saisyo
if x<=k and ninzu<c:
ninzu+=1
else:
ans+=1
saisyo=l[j]
ninzu=1
print(ans+1)
resolve() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a308321b6894a652f10d50f0e278a3312baaee0a | 9fe1c431568746622ae9533d4694097e576f960c | /prophyle/increment_version.py | 0c14ee5978e3dcb21724d129abcae06d142371b3 | [
"MIT"
] | permissive | prophyle/prophyle | 819cc062714838cd543d99e65ec5075b5a54400c | aa1ed8eebe74d8557090422255f42ee18aaef839 | refs/heads/master | 2023-08-07T18:47:15.649605 | 2023-07-26T12:47:36 | 2023-07-26T12:47:36 | 49,076,502 | 13 | 3 | MIT | 2023-09-01T18:12:12 | 2016-01-05T16:03:04 | Python | UTF-8 | Python | false | false | 435 | py | #! /usr/bin/env python3
import os
import sys
vfn = os.path.join(os.path.dirname(sys.argv[0]), "version.py")
exec(open(vfn).read())
numbers = VERSION.split(".")
numbers[-1] = str(int(numbers[-1]) + 1)
version = ".".join(numbers)
with open(vfn, "w") as f:
f.write('try:\n')
f.write(' from __commit import *\n')
f.write('except ImportError:\n')
f.write(' pass\n')
f.write('VERSION = "{}"'.format(version))
| [
"karel.brinda@gmail.com"
] | karel.brinda@gmail.com |
909cd365350879c5c496c6bb6d5c9e72bffcfcbd | dbd387349cdbe73200f3a3ab1023abd8a885ad93 | /oauth/models.py | 786d8b3271d8b0689e94fe0f8368ffe54198c956 | [] | no_license | AS207960/oauth | 5cb81eaea656f52c39ab2b0f9619bcbc0941661d | 3304d2deac926e6de35fe7f0af71bd78e70423e8 | refs/heads/master | 2023-07-24T01:38:43.116526 | 2023-07-15T22:11:24 | 2023-07-15T22:11:24 | 330,265,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,960 | py | from django.db import models
from django.urls import reverse
from django.conf import settings
from django.contrib.auth import get_user_model
import uuid
import django_keycloak_auth.clients
import as207960_utils.models
def sync_resource_to_keycloak(self, display_name, resource_type, scopes, urn, view_name, super_save, args, kwargs):
uma_client = django_keycloak_auth.clients.get_uma_client()
token = django_keycloak_auth.clients.get_access_token()
created = False
if not self.pk:
created = True
super_save(*args, **kwargs)
create_kwargs = {
"name": f"{resource_type}_{self.id}",
"displayName": f"{display_name}: {str(self)}",
"ownerManagedAccess": True,
"scopes": scopes,
"type": urn,
"uri": reverse(view_name, args=(self.id,)) if view_name else None,
}
if created or not self.resource_id:
if self.user:
create_kwargs['owner'] = self.user.username
d = uma_client.resource_set_create(
token,
**create_kwargs
)
self.resource_id = d['_id']
super_save(*args, **kwargs)
else:
uma_client.resource_set_update(
token,
id=self.resource_id,
**create_kwargs
)
def delete_resource(resource_id):
uma_client = django_keycloak_auth.clients.get_uma_client()
token = django_keycloak_auth.clients.get_access_token()
uma_client.resource_set_delete(token, resource_id)
def get_object_ids(access_token, resource_type, action):
scope_name = f"{action}-{resource_type}"
permissions = django_keycloak_auth.clients.get_authz_client().get_permissions(access_token)
permissions = permissions.get("permissions", [])
permissions = filter(
lambda p: scope_name in p.get('scopes', []) and p.get('rsname', "").startswith(f"{resource_type}_"),
permissions
)
object_ids = list(map(lambda p: p['rsname'][len(f"{resource_type}_"):], permissions))
return object_ids
def eval_permission(token, resource, scope, submit_request=False):
resource = str(resource)
permissions = django_keycloak_auth.clients.get_authz_client().get_permissions(
token=token,
resource_scopes_tuples=[(resource, scope)],
submit_request=submit_request
)
for permission in permissions.get('permissions', []):
for scope in permission.get('scopes', []):
if permission.get('rsid') == resource and scope == scope:
return True
return False
def get_resource_owner(resource_id):
uma_client = django_keycloak_auth.clients.get_uma_client()
token = django_keycloak_auth.clients.get_access_token()
resource = uma_client.resource_set_read(token, resource_id)
owner = resource.get("owner", {}).get("id")
user = get_user_model().objects.filter(username=owner).first()
return user
class OAuthClient(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
realm = models.CharField(max_length=255)
client_id = models.CharField(max_length=255)
resource_id = models.UUIDField(null=True)
def __init__(self, *args, user=None, **kwargs):
self.user = user
super().__init__(*args, **kwargs)
class Meta:
verbose_name = "OAuth Client"
verbose_name_plural = "OAuth Clients"
def __str__(self):
return self.client_id
@classmethod
def get_object_list(cls, access_token: str, action='view'):
return cls.objects.filter(pk__in=get_object_ids(access_token, 'oauth-client', action))
@classmethod
def has_class_scope(cls, access_token: str, action='view'):
scope_name = f"{action}-oauth-client"
return django_keycloak_auth.clients.get_authz_client() \
.eval_permission(access_token, f"oauth-client", scope_name)
def has_scope(self, access_token: str, action='view'):
scope_name = f"{action}-oauth-client"
return eval_permission(access_token, self.resource_id, scope_name)
def save(self, *args, **kwargs):
sync_resource_to_keycloak(
self,
display_name="OAuth Client", resource_type="oauth-client", scopes=[
'view-oauth-client',
'edit-oauth-client',
'delete-oauth-client',
],
urn="urn:as207960:domains:oauth_client", super_save=super().save, view_name='view_client',
args=args, kwargs=kwargs
)
def delete(self, *args, **kwargs):
super().delete(*args, *kwargs)
delete_resource(self.resource_id)
class PersonalAccessToken(models.Model):
id = as207960_utils.models.TypedUUIDField("oauth_pat", primary_key=True)
revoked = models.BooleanField(blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
def __str__(self):
return self.name
| [
"q@misell.cymru"
] | q@misell.cymru |
affd3c9683fcd45c1d12c534f88df28b264321b8 | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/System/__init___parts/UriFormat.py | 87f1c147e2e568c5bda247aa1f44e7aca4eb4f3f | [
"MIT"
] | permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | class UriFormat(Enum,IComparable,IFormattable,IConvertible):
"""
Controls how URI information is escaped.
enum UriFormat,values: SafeUnescaped (3),Unescaped (2),UriEscaped (1)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
SafeUnescaped=None
Unescaped=None
UriEscaped=None
value__=None
| [
"magnetscoil@gmail.com"
] | magnetscoil@gmail.com |
dbdaecc9c92364b03753ac08fe2fbbe65a16f506 | 8205128a5a3564a826b4ae432f082a9ed4134316 | /algo/binary-search/_0081_SearchInRotatedSortedArray2.py | 25bb149045c8c612cc771884a7e6fafa3dfd3833 | [] | no_license | ianlai/Note-Python | 507094bda9fefa0fe2f45a629076c679178a2c74 | ca95110b77152258573b6f1d43e39a316cdcb459 | refs/heads/master | 2023-03-19T03:26:35.532874 | 2021-03-10T16:10:51 | 2021-03-10T16:10:51 | 68,308,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | class Solution:
def search(self, nums: List[int], target: int) -> int:
# Edge cases
if nums is None or len(nums) == 0:
return False
# mset = set(nums)
# nums = list(mset)
start, end = 0, len(nums)-1
if nums[start] == target:
return True
if nums[end] == target:
return True
# Preprocess (remove redundants in two ends)
while start < end and nums[start] == nums[end]:
start += 1
# Binary search loop
while start + 1 < end:
mid = (start + end) // 2
#print(start, mid, end)
if target == nums[mid]:
return True
if nums[mid] >= nums[start]:
if nums[start] <= target <= nums[mid]:
end = mid
else:
start = mid
if nums[mid] <= nums[end]:
if nums[mid] <= target <= nums[end]:
start = mid
else:
end = mid
# Binary search check
if nums[start] == target:
return True
if nums[end] == target:
return True
return False | [
"ian.explore.world@gmail.com"
] | ian.explore.world@gmail.com |
3514a6464e59eb60826de67c90f064f388efa269 | c960c1bf7fd094c031a77a3545bcc797b845edcd | /backend/home/migrations/0002_load_initial_data.py | 82ffbac62de87f64c4e07131f4f9260d1101b9ae | [] | no_license | crowdbotics-apps/chatter-22176 | e6e67efb11219b9ad5b0a680e2fe75263335e961 | a9a30ab3887dc3a766fb99398c3c114ecbd8be99 | refs/heads/master | 2023-01-02T06:00:27.746009 | 2020-11-01T03:15:59 | 2020-11-01T03:15:59 | 309,014,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,282 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "Chatter"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">Chatter</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "chatter-22176.botics.co"
site_params = {
"name": "Chatter",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
92660bcaac41f821096bf113adae2d40568e80c3 | 0d2f636592dc12458254d793f342857298c26f12 | /7-15.py | 112be3b6c1452fb33aa38df5d6f4edefdae54996 | [] | no_license | chenpc1214/test | c6b545dbe13e672f11c58464405e024394fc755b | 8610320686c499be2f5fa36ba9f11935aa6d657b | refs/heads/master | 2022-12-13T22:44:41.256315 | 2020-09-08T16:25:49 | 2020-09-08T16:25:49 | 255,796,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | buyers = [["Jamase",1030],["Curry",893],
["Durant",2050],["Jordan",990],
["David",2110],["Kevin",15000],
["Mary",10050],["Tom",8800],]
infinite = list()
VIP = list()
Gold = list()
while buyers:
fall_out_buyer = buyers.pop()
if fall_out_buyer[1] >= 10000:
infinite.append(fall_out_buyer)
elif 1000 <= fall_out_buyer[1] <= 10000:
VIP.append(fall_out_buyer)
else:
Gold.append(fall_out_buyer)
print("infinite_buyers的資料:",infinite)
print("VIP_buyers的資料:",VIP)
print("Gold_buyers的資料:",Gold)
| [
"kkbuger1523@gmail.com"
] | kkbuger1523@gmail.com |
4eed67d321cba99d1ee8fd552901b941afb1d2ef | 417f6a92e6179e9da3bc65ae5f56bb274cb47152 | /vindauga/types/vindauga_object.py | b88066e8a0be97c059dd2c3e90d72d7c53085592 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | gabbpuy/vindauga | 6504540f79afa0abed7677103ae50c848a1d18b8 | 04c4def7c1bea135b1b97fdc18f8f45ccd63c40a | refs/heads/master | 2023-06-07T23:01:14.918105 | 2023-06-05T02:07:48 | 2023-06-05T02:07:48 | 194,688,774 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | # -*- coding: utf-8 -*-
import gettext
import logging
logger = logging.getLogger(__name__)
gettext.install('vindauga')
class VindaugaObject:
_registry = {}
def __init_subclass__(cls, **kwargs):
super().__init_subclass__()
try:
VindaugaObject._registry[cls.name] = cls
except AttributeError:
logger.info('A class has no name: %s', cls)
def destroy(self, o):
if o:
o.shutdown()
del o
def shutdown(self):
pass
| [
"akm@unyx.net"
] | akm@unyx.net |
66df3c0242eaf51dfc0f6b3c9c7880f5fb7e500e | bf6e7fba9eca62f40ba1a9532994c14e751fdfeb | /test/unit/test_internal_casing.py | b673b60e26e3cd3d9eca90225fd686fc2745b06f | [
"Apache-2.0"
] | permissive | CyberGRX/py2neo | 11f1a765d2b629c7b6c3e86cb24e842638b3eec9 | e6a50a80f769f21d8024733c4bf83e899443d672 | refs/heads/v4.2-grx | 2023-04-20T06:46:39.158143 | 2023-04-05T22:04:27 | 2023-04-05T22:04:27 | 171,706,053 | 0 | 0 | Apache-2.0 | 2019-02-20T17:42:36 | 2019-02-20T16:13:12 | Python | UTF-8 | Python | false | false | 1,796 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2019, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py2neo.internal.text import Words
def test_breakdown_of_string_with_spaces():
x = Words("hello world")
assert x.words == ("hello", "world")
def test_breakdown_of_string_with_underscores():
x = Words("hello_world")
assert x.words == ("hello", "world")
def test_breakdown_of_string_with_hyphens():
x = Words("hello-world")
assert x.words == ("hello", "world")
def test_breakdown_of_single_word_upper_case_string():
x = Words("HELLO")
assert x.words == ("HELLO",)
def test_breakdown_tuple():
x = Words(("hello", "world"))
assert x.words == ("hello", "world")
def test_upper():
x = Words("Hello world")
assert x.upper() == "HELLO WORLD"
def test_lower():
x = Words("Hello world")
assert x.lower() == "hello world"
def test_title():
x = Words("Hello WORLD")
assert x.title() == "Hello WORLD"
def test_snake():
x = Words("Hello world")
assert x.snake() == "hello_world"
def test_camel():
x = Words("Hello world")
assert x.camel() == "helloWorld"
def test_camel_with_upper_first():
x = Words("Hello world")
assert x.camel(upper_first=True) == "HelloWorld"
| [
"nigel@neo4j.com"
] | nigel@neo4j.com |
1a01b7e712420553340256f4f54e33c95365a01b | 474285a15bf21ac3638249397fe8045b150b1aa5 | /usage/bdrc/download_ocr_output.py | 7338e331e57b439d6491dd68e15773e2a7aec735 | [] | no_license | noncapture1/img2opf | 641f40d2db22823deae99ea83789d346940cb82d | af1f2bbcdfed39032fc44292d8105009120fd066 | refs/heads/master | 2023-04-17T14:12:00.180754 | 2021-04-29T04:25:27 | 2021-04-29T04:25:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,975 | py | import argparse
import logging
import sys
from pathlib import Path
from typing import Mapping
from bdrc_ocr import (
BATCH_PREFIX,
IMAGES,
OUTPUT,
SERVICE,
get_s3_bits,
get_s3_image_list,
get_s3_prefix_path,
get_volume_infos,
get_work_local_id,
ocr_output_bucket,
save_file,
)
logging.basicConfig(
filename=f"{__file__}.log",
format="%(asctime)s, %(levelname)s: %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
level=logging.INFO,
)
def get_json_fn(fn):
return f"{fn.split('.')[0]}.json.gz"
def get_s3_key(s3prefix, fn):
return s3prefix + "/" + fn
def save_file(bits, fn, imagegroup_output_dir):
imagegroup_output_dir.mkdir(exist_ok=True, parents=True)
output_fn = imagegroup_output_dir / fn
output_fn.write_bytes(bits.getvalue())
def download_ocr_result_for_vol(
volume_prefix_url, work_local_id, imagegroup, output_base_dir, s3_ocr_paths
):
imagegroup_s3prefix = s3_ocr_paths[OUTPUT]
for imageinfo in get_s3_image_list(volume_prefix_url):
imagegroup_output_dir = output_base_dir / work_local_id / imagegroup
ocr_result_fn = get_json_fn(imageinfo["filename"])
if (imagegroup_output_dir / ocr_result_fn).is_file():
continue
s3_key = get_s3_key(imagegroup_s3prefix, ocr_result_fn)
filebits = get_s3_bits(s3_key, ocr_output_bucket)
if filebits:
save_file(filebits, ocr_result_fn, imagegroup_output_dir)
def process(args):
work_local_id, work = get_work_local_id(args.work)
for vol_info in get_volume_infos(work):
imagegroup = vol_info["imagegroup"]
if imagegroup > args.end:
break
if imagegroup < args.start:
continue
if imagegroup in args.skip:
continue
print(f"[INFO] Processing {vol_info['imagegroup']} ....")
s3_ocr_paths = get_s3_prefix_path(
work_local_id=work_local_id,
imagegroup=vol_info["imagegroup"],
service=SERVICE,
batch_prefix=BATCH_PREFIX,
data_types=[IMAGES, OUTPUT],
)
download_ocr_result_for_vol(
volume_prefix_url=vol_info["volume_prefix_url"],
work_local_id=work_local_id,
imagegroup=vol_info["imagegroup"],
output_base_dir=Path(args.output_dir),
s3_ocr_paths=s3_ocr_paths,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("work")
parser.add_argument(
"--output_dir", "-o", default="./archive/output", help="start imagegroup"
)
parser.add_argument("--start", "-s", default=chr(0), help="start imagegroup")
parser.add_argument(
"--end", "-e", default=chr(sys.maxunicode), help="end imagegroup"
)
parser.add_argument(
"--skip", "-sk", default="", help="imagegroups to be skiped (in comma seperated"
)
args = parser.parse_args()
process(args)
| [
"ten13zin@gmail.com"
] | ten13zin@gmail.com |
613c451c771753b53f7b622d95595e42af3924d2 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/agfoodplatform/v20200512preview/get_farm_beats_model.py | 69a86e25db7b166badd191892f7224c7ea92a20c | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,205 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetFarmBeatsModelResult',
'AwaitableGetFarmBeatsModelResult',
'get_farm_beats_model',
'get_farm_beats_model_output',
]
@pulumi.output_type
class GetFarmBeatsModelResult:
"""
FarmBeats ARM Resource.
"""
def __init__(__self__, id=None, instance_uri=None, location=None, name=None, provisioning_state=None, system_data=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if instance_uri and not isinstance(instance_uri, str):
raise TypeError("Expected argument 'instance_uri' to be a str")
pulumi.set(__self__, "instance_uri", instance_uri)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="instanceUri")
def instance_uri(self) -> str:
"""
Uri of the FarmBeats instance.
"""
return pulumi.get(self, "instance_uri")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
FarmBeats instance provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetFarmBeatsModelResult(GetFarmBeatsModelResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFarmBeatsModelResult(
id=self.id,
instance_uri=self.instance_uri,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_farm_beats_model(farm_beats_resource_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFarmBeatsModelResult:
"""
FarmBeats ARM Resource.
:param str farm_beats_resource_name: FarmBeats resource name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['farmBeatsResourceName'] = farm_beats_resource_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:agfoodplatform/v20200512preview:getFarmBeatsModel', __args__, opts=opts, typ=GetFarmBeatsModelResult).value
return AwaitableGetFarmBeatsModelResult(
id=__ret__.id,
instance_uri=__ret__.instance_uri,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
system_data=__ret__.system_data,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_farm_beats_model)
def get_farm_beats_model_output(farm_beats_resource_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetFarmBeatsModelResult]:
"""
FarmBeats ARM Resource.
:param str farm_beats_resource_name: FarmBeats resource name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
e9f8d7fa5afb1585c089bd9b3b7619c57d787d28 | 291f0aa9a40eeca26fb08106c952b9347db7dba7 | /apps/views/main.py | 1d6a6abd023956c00c22858049b0c5c03ba4415f | [
"Apache-2.0"
] | permissive | gaohj/nzflask_bbs | fad10b93f8f495a94d5d6db6f5c60d85c1c85518 | 36a94c380b78241ed5d1e07edab9618c3e8d477b | refs/heads/master | 2022-12-12T21:43:17.417294 | 2020-03-20T10:28:22 | 2020-03-20T10:28:22 | 239,702,874 | 0 | 2 | Apache-2.0 | 2022-12-08T03:50:07 | 2020-02-11T07:34:01 | JavaScript | UTF-8 | Python | false | false | 1,235 | py | from flask import Blueprint,render_template,flash,redirect,url_for,request
from apps.forms import PostsForm
from flask_login import current_user
from apps.models import Posts
from apps.extensions import db
#实例化蓝本对象
main = Blueprint('main',__name__)
@main.route('/',methods=['GET','POST'])
def index():
form = PostsForm()
if form.validate_on_submit():
#判断用户是否登录
if current_user.is_authenticated:
#获取当前登录的用户
u = current_user._get_current_object()
p = Posts(content=form.content.data,user=u)
db.session.add(p)
db.session.commit()
return redirect(url_for('main.index'))
else:
flash('请先登录')
return redirect(url_for('users.login'))
#取出所有的博客 类视图 get方法
# posts = Posts.query.filter_by(rid=0).all()
page = request.args.get('page',1,type=int) #接收前端用户提交的页码
pagination =Posts.query.filter_by(rid=0).order_by(Posts.timestamp.desc()).paginate(page,per_page=6,error_out=False)
posts = pagination.items
return render_template('main/index.html',form=form,posts=posts,pagination=pagination)
| [
"gaohj@163.com"
] | gaohj@163.com |
61e2092b0af89c531bc53eaf2804e05cce81e5ac | be9046ba8823cc5fbb6064db33b195481985cd0e | /hindsight1/management/commands/import_info.py | 3015ebe26bc6a091b3104d89038397aff8642223 | [
"MIT"
] | permissive | jcaguirre89/mysite | 9f692c6f2cd8dc427aba6d9dd3e4e9dc2a349fb2 | 3b118747d7129d7a249ea2ad7b6644e46e9816f1 | refs/heads/master | 2022-04-02T22:53:39.107234 | 2018-12-10T03:16:50 | 2018-12-10T03:16:50 | 115,764,839 | 0 | 0 | MIT | 2020-02-11T23:49:34 | 2017-12-30T01:04:00 | Python | UTF-8 | Python | false | false | 638 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 18 15:44:31 2017
@author: crist
"""
from hindsight1.models import Sp100
from django.core.management.base import BaseCommand
import os
#flip directory in production
directory = 'C:\\Users\\crist\\mysite\\hindsight1\\static\\hindsight1'
#directory = '/home/cristobal/mysite/hindsight1/static/hindsight1'
filename = 'sp100_info.csv'
fileDir=os.path.join(directory,filename)
class Command(BaseCommand):
def handle(self, *args, **kwargs):
# Since the CSV headers match the model fields,
# you only need to provide the file's path
Sp100.objects.from_csv(fileDir) | [
"cristobal.aguirre89@gmail.com"
] | cristobal.aguirre89@gmail.com |
41d8ef2dda318ee3978e8bc8f7e4e6dfeef99995 | a52dbc76680dbe4938c4ae81290f2f36f2eae68d | /workflowrepository/urls.py | a41d5bf8d73d619cd0770f7c38858d147acc1f32 | [] | no_license | RodriGuerra98/psi | 4a9969e71a98b1aaf9b0a74d1cbd1d8ced19b425 | b90293c001e65465e6880fe4aaccf0d2d03262b0 | refs/heads/master | 2022-12-01T01:25:50.234245 | 2019-05-30T09:54:51 | 2019-05-30T09:54:51 | 154,207,653 | 0 | 0 | null | 2022-11-22T02:37:05 | 2018-10-22T19:55:28 | JavaScript | UTF-8 | Python | false | false | 1,289 | py | """workflowrepository URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url ,include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from data import views
import find
import upload
from django.conf.urls import handler404
from workflowrepository.views import mi_error_404
handler404 = mi_error_404
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('find.urls')),
url(r'^' ,include('upload.urls')),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"you@example.com"
] | you@example.com |
ff3d80ceee2e30e015c033c3b4658ab7e99e95cc | a9386fd8a14e66c27b5059f562dc239f2c4b0ff7 | /shared/aspace_agent_mapping/agent_parsers/Famname.py | a14f1904186ea9393cba3b3a0afbb7705a741163 | [] | no_license | bentley-historical-library/vandura | 20f93e2f9cf2370e40537f863da9f2f19db329a0 | 0fefc0bf92c2487987a9c23e70187718c3b949f0 | refs/heads/master | 2021-01-17T00:54:08.023435 | 2016-11-04T20:00:04 | 2016-11-04T20:00:04 | 37,206,505 | 0 | 18 | null | 2016-11-04T20:00:05 | 2015-06-10T15:45:33 | Python | UTF-8 | Python | false | false | 358 | py | import json
from vandura.shared.aspace_agent_mapping.agent_parsers.create_famname_json import parse_famname
class Famname:
def __init__(self, string, auth_id="", auth_source=""):
self.data_dict = parse_famname(string, auth_id, auth_source)
def get_aspace_json(self):
return json.dumps({"publish": True, "names": [self.data_dict]})
| [
"djpillen@umich.edu"
] | djpillen@umich.edu |
94d8ad77d72ccf80b5d64fa32c4ea8bcb9553559 | 5ea1216c24b62c6beab3c6d9d2e2e06a9c58c796 | /总题库/105.ConstructBinaryTreefromPreorderandInorderTraversal.py | 1bdc7e3cc68a120486922f2d5225c61b60ba3b78 | [] | no_license | xiami2019/LeetCode | 596de2f093d52b58cf80421f67de03757578cd5f | 8d09a56672553ecee4af731796980b2c61c52df2 | refs/heads/master | 2021-01-02T15:15:13.984178 | 2020-07-08T01:20:15 | 2020-07-08T01:20:15 | 239,675,873 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def buildTreeHelper(self, preorder: List[int], inorder: List[int], inStart: int, inEnd: int) -> TreeNode:
if inEnd - inStart < 0:
return None
node = TreeNode(preorder[0])
index = 0
while index <= inEnd and inorder[index] != preorder[0]:
index += 1
preorder.pop(0)
node.left = self.buildTreeHelper(preorder, inorder, inStart, index - 1)
node.right = self.buildTreeHelper(preorder, inorder, index + 1, inEnd)
return node
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
return self.buildTreeHelper(preorder, inorder, 0, len(inorder) - 1) | [
"435350193@qq.com"
] | 435350193@qq.com |
c6879728d54c08d070de5533473c59cb546bae77 | 4266e9b1c59ddef83eede23e0fcbd6e09e0fa5cb | /vs/gyp/test/mac/gyptest-type-envvars.py | b75e094636a0f57ec51dcaf30ca221e66a904e25 | [
"BSD-3-Clause"
] | permissive | barrystudy/study | b3ba6ed652d1a0bcf8c2e88a2a693fa5f6bf2115 | 96f6bb98966d3633b47aaf8e533cd36af253989f | refs/heads/master | 2020-12-24T14:53:06.219236 | 2017-10-23T02:22:28 | 2017-10-23T02:22:28 | 41,944,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test that MACH_O_TYPE etc are set correctly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='type_envvars')
test.build('test.gyp', test.ALL, chdir='type_envvars')
# The actual test is done by postbuild scripts during |test.build()|.
test.pass_test()
| [
"2935973620@qq.com"
] | 2935973620@qq.com |
01eecb6722bbf993c6ea5ee09043fc12c6f5c5aa | 5afcc3b02b7f4fe14e90f33b0a42bfc51b278e19 | /matlab_ext/measurement/mc-assistant/projects/py_hw_models/test_sensor_channals.py | f1d57724af0694d74b3bcfb6ffb9bfafd0ed9a71 | [
"MIT",
"Apache-2.0"
] | permissive | zaqwes8811/micro-apps | c9e51fa7931c9d5625e1517bad7b1593104a50c0 | bb1643562751dda70ae4f8bd632a171f1de05df5 | refs/heads/master | 2023-04-27T21:04:09.457192 | 2023-04-25T08:41:23 | 2023-04-25T08:41:23 | 11,820,285 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,057 | py | #-*- coding: utf-8 -*-
import random
import unittest
# App
import models.sensors.one_sensor as sensor_channal
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.seq = range(10)
def test_shuffle(self):
# make sure the shuffled sequence does not lose any elements
random.shuffle(self.seq)
self.seq.sort()
self.assertEqual(self.seq, range(10))
# should raise an exception for an immutable sequence
self.assertRaises(TypeError, random.shuffle, (1,2,3))
def test_choice(self):
element = random.choice(self.seq)
self.assertTrue(element in self.seq)
def test_sample(self):
with self.assertRaises(ValueError):
random.sample(self.seq, 20)
for element in random.sample(self.seq, 5):
self.assertTrue(element in self.seq)
if __name__ == '__main__':
#unittest.main()
pass
name = 'I' # Current
cfg = sensor_channal.get_sensor_cfg_new(
name, sensor_channal.kSensorCfgMap)
print cfg
| [
"ilugansky@arrival.com"
] | ilugansky@arrival.com |
d817e562bc6c67af216ebf7ecd5e1a2ab53daf12 | f9b3b867abfbfb01ab57cb249a2e0fcb372e435b | /examples/hcaptcha_request_proxy.py | f271ac3797a79202e129afa48a77d6d1ebc8801c | [
"MIT"
] | permissive | ad-m/python-anticaptcha | af615cfd7549d48829adb441b837eed1373fe782 | 076922ee646483328c580c6623f7cb49a2ea4493 | refs/heads/master | 2022-05-17T17:18:51.638547 | 2022-03-28T03:44:41 | 2022-03-28T03:44:41 | 95,789,669 | 241 | 66 | MIT | 2022-03-28T03:38:57 | 2017-06-29T15:05:36 | Python | UTF-8 | Python | false | false | 1,564 | py | from six.moves.urllib import parse
import re
import requests
from os import environ
from python_anticaptcha import AnticaptchaClient, HCaptchaTask
api_key = environ["KEY"]
proxy_url = environ["PROXY_URL"] # eg. socks5://user:password/123.123.123.123:8888/
site_key_pattern = 'data-sitekey="(.+?)"'
url = "http://hcaptcha.jawne.info.pl/"
client = AnticaptchaClient(api_key)
session = requests.Session()
EXPECTED_RESULT = "Your request have submitted successfully."
UA = (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"
)
def parse_url(url):
parsed = parse.urlparse(url)
return dict(
proxy_type=parsed.scheme,
proxy_address=parsed.hostname,
proxy_port=parsed.port,
proxy_login=parsed.username,
proxy_password=parsed.password,
)
def get_form_html():
return session.get(url).text
def get_token(form_html):
site_key = re.search(site_key_pattern, form_html).group(1)
proxy = parse_url(proxy_url)
task = HCaptchaTask(
website_url=url,
website_key=site_key,
user_agent=UA,
cookies="test=test",
**proxy
)
job = client.createTask(task)
job.join()
return job.get_solution_response()
def form_submit(token):
return requests.post(url, data={"g-recaptcha-response": token}).text
def process():
html = get_form_html()
token = get_token(html)
return form_submit(token)
if __name__ == "__main__":
assert EXPECTED_RESULT in process()
| [
"naczelnik@jawnosc.tk"
] | naczelnik@jawnosc.tk |
c3ab1e1ec0477a77f93cd10d4c0a7d9ae3f4be26 | 43acaf9718b0a62594ed8e42b6c01099acd2d075 | /apps/lista/migrations/0030_auto_20200402_1450.py | 8d2059b1ff3c88db1f6e081b9f5468bc816c8f87 | [] | no_license | JmSubelza/Demo | 2f357889975c183b4a0f627330a80e535823faea | affceeadb87f1f14fb4e481851a1ac107e512f48 | refs/heads/master | 2023-05-14T18:16:38.153963 | 2020-04-28T16:15:27 | 2020-04-28T16:15:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-04-02 19:50
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('lista', '0029_auto_20200331_1704'),
]
operations = [
migrations.AlterModelOptions(
name='listado',
options={'ordering': ['-periodo'], 'verbose_name': 'listado', 'verbose_name_plural': 'listados'},
),
migrations.AlterField(
model_name='listado',
name='fecha',
field=models.DateField(default=datetime.datetime(2020, 4, 2, 19, 50, 36, 334173, tzinfo=utc)),
),
migrations.AlterModelTable(
name='listado',
table='listado',
),
]
| [
"Chrisstianandres@gmail.com"
] | Chrisstianandres@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.