blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8b354179b8a8992d767d00a4edc6a9d8f61d5c3b | e1b8ae703c84f6a06dd3a3072cfa9afb7f9ebce7 | /accounts/views.py | 0173f6be5057efb35f374b1cdba19b08f80fd4c6 | [] | no_license | Code-Institute-Submissions/renanclothestore | 95a2a161f0f0046e328cb639a88ddaf6afaceae5 | ea295d1643b06a1f5cdbdbafcdbe767d2c286648 | refs/heads/master | 2020-03-26T12:34:21.946183 | 2018-08-13T21:40:09 | 2018-08-13T21:40:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,209 | py | from django.contrib import messages, auth
from django.contrib.auth.decorators import login_required
from accounts.forms import UserRegistrationForm, UserLoginForm
from django.core.urlresolvers import reverse
from django.shortcuts import render, redirect
from django.template.context_processors import csrf
from django.conf import settings
import datetime
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from models import User
import stripe
import arrow
import json
stripe.api_key = settings.STRIPE_SECRET
def register(request):
if request.method == 'POST':
form = UserRegistrationForm(request.POST)
if form.is_valid():
try:
customer = stripe.Customer.create(
email=form.cleaned_data['email'],
card=form.cleaned_data['stripe_id'],
plan='REG_MONTHLY',
)
if customer:
user = form.save()
user.stripe_id = customer.id
user.subscription_end = arrow.now().replace(weeks=+4).datetime
user.save()
user = auth.authenticate(email=request.POST.get('email'), password=request.POST.get('password1'))
if user:
auth.login(request, user)
messages.success(request, "You have successfully registered")
return redirect(reverse('profile'))
else:
messages.error(request, "We were unable to log you in at this time")
else:
messages.error(request, "We were unable to take payment from the card provided")
except stripe.error.CardError, e:
messages.error(request, "Your card was declined!")
else:
today = datetime.date.today()
form = UserRegistrationForm(initial={'expiry_month': today.month, 'expiry_year': today.year})
args = {'form': form, 'publishable': settings.STRIPE_PUBLISHABLE}
args.update(csrf(request))
return render(request, 'register.html', args)
@login_required(login_url='/accounts/login/')
def cancel_subscription(request):
try:
customer = stripe.Customer.retrieve(request.user.stripe_id)
customer.cancel_subscription(at_period_end=True)
except Exception, e:
messages.error(request, e)
return redirect('profile')
@csrf_exempt
def subscriptions_webhook(request):
event_json = json.loads(request.body)
# Verify the event by fetching it from Stripe
try:
# firstly verify this is a real event generated by Stripe.com
# commented out for testing - uncomment when live
# event = stripe.Event.retrieve(event_json['object']['id'])
cust = event_json['object']['customer']
paid = event_json['object']['paid']
user = User.objects.get(stripe_id=cust)
if user and paid:
user.subscription_end = arrow.now().replace(weeks=+4).datetime # add 4 weeks from now
user.save()
except stripe.InvalidRequestError, e:
return HttpResponse(status=404)
return HttpResponse(status=200)
@login_required(login_url='/login/')
def profile(request):
return render(request, 'profile.html')
def login(request):
if request.method == 'POST':
form = UserLoginForm(request.POST)
if form.is_valid():
user = auth.authenticate(email=request.POST.get('email'),
password=request.POST.get('password'))
if user is not None:
auth.login(request, user)
messages.error(request, "You have successfully logged in")
return redirect(reverse('profile'))
else:
form.add_error(None, "Your email or password was not recognised")
else:
form = UserLoginForm()
args = {'form': form}
args.update(csrf(request))
return render(request, 'login.html', args)
def logout(request):
auth.logout(request)
messages.success(request, 'You have successfully logged out')
return render(request, 'index.html') | [
"renanzabeu@yahoo.it"
] | renanzabeu@yahoo.it |
8f404dd9f87feae9a0b2c22d54b7c1e5641b0c48 | 281fa4de7baa79587c7d1dedb63019627e429de0 | /lesson1/Hello_Input3.py | 531102bc4e305b9c90d292fbc657c41b9fc521ce | [] | no_license | vincenttuan/PythonCourse | a7b302324148633e84b9e6db3cc3b00eea8f08d4 | 648f342eb3c82b4bbd4e6575ef8e6e690322ce70 | refs/heads/master | 2020-04-18T17:32:55.734217 | 2019-03-30T08:31:13 | 2019-03-30T08:31:13 | 167,657,410 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | # -*- coding:UTF-8 -*-
import math
h = 170.0
w = 60.0
bmi = w / ((h/100)**2)
print('身體評量指數 bmi = %.2f' % bmi)
bmi = w / math.pow(h/100, 2)
print('bmi = %.2f' % bmi)
print('h = %.2f, w = %.2f, bmi = %.2f' % (h, w, bmi))
print("h = {0}, w = {1}, bmi = {2}".format(h, w, bmi))
if (bmi >= 18.0 and bmi < 23) :
print('正常')
else :
print('不正常')
if 18 <= bmi < 23:
print('正常')
else:
print('不正常')
| [
"vincentjava@yahoo.com.tw"
] | vincentjava@yahoo.com.tw |
77ee955b11d10f5174a6ce0263c5c809f8f785ef | 7e395a7ac6abec3fe24f4ca02d5370f1c8fb3c17 | /DemoPrj_tent/employee_shared/views.py | 46ff1396326f2b0f49311dd202b3bb110073e9c8 | [] | no_license | udaykumaraodh/DjangoTentPrj | fbfe6929954846d3c9bc4815a06108eecf3ea54c | 53d8c518247666f7325bb55672819dce66bf89a9 | refs/heads/main | 2023-07-27T21:06:11.704280 | 2021-08-31T16:28:49 | 2021-08-31T16:28:49 | 401,743,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,102 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.db import connection
def empDetails(request):
with connection.cursor() as cursor:
cursor.execute('''Insert into employee_shared_empdetails(empno,ename,salary) values(103,'harish',30000.0) ''')
connection.commit()
cursor.execute('Select * from employee_shared_empdetails')
ds=cursor.fetchall()
return HttpResponse(ds)
def empUpd(request):
with connection.cursor() as cursor:
cursor.execute('''update employee_shared_empdetails set salary=salary+10000 where empno=103 ''')
connection.commit()
cursor.execute('Select * from employee_shared_empdetails')
ds=cursor.fetchall()
return HttpResponse(ds)
def empDel(request):
with connection.cursor() as cursor:
cursor.execute('''delete from employee_shared_empdetails where id=3 ''')
connection.commit()
cursor.execute('Select * from employee_shared_empdetails')
ds=cursor.fetchall()
return HttpResponse(ds)
# Create your views here.
| [
"udaykumarandolu@gmail.com"
] | udaykumarandolu@gmail.com |
743939c27c7e0e8d00a2487a97e1bdf562484341 | d3519a4d17c3a1097b6b16404d4657f3ab1035f7 | /env/gym_Rubiks_Cube/envs/rubiks_cube_env.py | 493f1074976e99b902f269699cccea4c176a2b66 | [] | no_license | AveyBD/rubiks-cube-ai | 1e7dc0d343e811d5fbe7dda989d61856266b9899 | a0f276ca022a579c6d1d75f817993b1dae44ff89 | refs/heads/master | 2020-09-28T07:08:57.938186 | 2019-12-08T19:31:19 | 2019-12-08T19:31:19 | 226,719,959 | 1 | 0 | null | 2019-12-08T19:30:10 | 2019-12-08T19:30:09 | null | UTF-8 | Python | false | false | 2,676 | py | import gym
from gym import spaces
import numpy as np
import random
from gym_Rubiks_Cube.envs import cube
actionList = [
'f', 'r', 'l', 'u', 'd', 'b',
'.f', '.r', '.l', '.u', '.d', '.b']
tileDict = {
'R': 0,
'O': 1,
'Y': 2,
'G': 3,
'B': 4,
'W': 5,
}
class RubiksCubeEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, orderNum=3):
# the action is 6 move x 2 direction = 12
self.action_space = spaces.Discrete(12)
# input is 9x6 = 54 array
self.orderNum = orderNum
low = np.array([0 for i in range(self.orderNum * self.orderNum * 6)])
high = np.array([5 for i in range(self.orderNum * self.orderNum * 6)])
self.observation_space = spaces.Box(low, high, dtype=np.uint8) # flattened
self.step_count = 0
self.scramble_low = 1
self.scramble_high = 10
self.doScamble = True
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
self.action_log.append(action)
self.ncube.minimalInterpreter(actionList[action])
self.state = self.getstate()
self.step_count = self.step_count + 1
reward = 0.0
done = False
others = {}
if self.ncube.isSolved():
reward = 1.0
done = True
if self.step_count > 40:
done = True
return self.state, reward, done, others
def reset(self):
self.state = {}
self.ncube = cube.Cube(order=self.orderNum)
if self.doScamble:
self.scramble()
self.state = self.getstate()
self.step_count = 0
self.action_log = []
return self.state
def getstate(self):
return np.array([tileDict[i] for i in self.ncube.constructVectorState()])
def render(self, mode='human', close=False):
if close:
return
self.ncube.displayCube(isColor=True)
def setScramble(self, low, high, doScamble=True):
self.scramble_low = low
self.scramble_high = high
self.doScamble = doScamble
def scramble(self):
# set the scramber number
scramble_num = random.randint(self.scramble_low, self.scramble_high)
# check if scramble
while self.ncube.isSolved():
self.scramble_log = []
for i in range(scramble_num):
action = random.randint(0, 11)
self.scramble_log.append(action)
self.ncube.minimalInterpreter(actionList[action])
def getlog(self):
return self.scramble_log, self.action_log
| [
"vivnps.verma@gmail.com"
] | vivnps.verma@gmail.com |
a46da7b7026d254511b4f87b98c7230d86a6ee3b | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_299/ch153_2020_04_13_20_31_46_202522.py | 3cb9afb3697ff5ec456e21b36cec4d500a24ee1f | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | def agrupa_por_idade(dicio):
criança = []
adolescente = []
adulto = []
idoso = []
dicionovo = {'crianças':criança, 'adolescentes':adolescente, 'adultos':adulto, 'idosos':idoso}
for nome , idade in dicio.items():
if idade <= 11:
criança.append(nome)
elif idade <= 17:
adolescente.append(nome)
elif idade <= 59:
adulto.append(nome)
else:
idoso.append(nome)
dicionovo['crianças'] = criança
dicionovo['adolescentes'] = adolescente
dicionovo['adultos'] = adulto
dicionovo['idosos'] = idoso
return dicionovo | [
"you@example.com"
] | you@example.com |
b662116be5fba1510a240c78910795c6ec93caec | c839961aeab22795200d9edef9ba043fe42eeb9c | /data/script335.py | 6eca8fa73f168a3bb922efb5448f2204b04c98c3 | [] | no_license | StevenLOL/kaggleScape | ad2bb1e2ed31794f1ae3c4310713ead1482ffd52 | 18bede8420ab8d2e4e7c1eaf6f63280e20cccb97 | refs/heads/master | 2020-03-17T05:12:13.459603 | 2018-05-02T19:35:55 | 2018-05-02T19:35:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,287 | py |
# coding: utf-8
# **Cluster Visualization - Assets based on Structural NAN values**
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
import matplotlib.pyplot as plt
# Any results you write to the current directory are saved as output.
# In[ ]:
#load the data. It comes in train.h5 so we need to us HDFStore
df = pd.HDFStore("../input/train.h5", "r").get("train")
# In[ ]:
# I make vectors that hold the ratio of NAN values for a given id in a given column
unique_ids = pd.unique(df.id)
len(unique_ids)
NaN_vectors = np.zeros(shape=(len(unique_ids), df.shape[1]))
for i, i_id in enumerate(unique_ids):
data_sub = df[df.id ==i_id]
NaN_vectors[i,:] = np.sum(data_sub.isnull(),axis=0) /float(data_sub.shape[0])
NaN_vectors
# In[ ]:
# get all the NaN vectors in which every collumn for that ID is NaN. What we are looking for
#is collumns in which the features fundamentally do not exist.
bin_NaN = 1*(NaN_vectors==1)
print("Still has the shape of {} by {}".format(bin_NaN.shape[0],bin_NaN.shape[1]))
# In[ ]:
# we now have a vector of things that are either 1 where nothing exists in teh column or zero something
#exists Now we take a covariance over these bins to see which ones move togehter. we are looking only based on columns
bin_cov=np.corrcoef(bin_NaN.T)
bin_cov.shape[1]
# In[ ]:
# plot bin_cov
plt.matshow(bin_cov)
#if you think abou what this shows it is show the probability that when an entire column is missing what
# is the probability that another column will be completely missing.
# In[ ]:
# In this graph i make the matrix sparse by considering only things that have perfect correlation. This
# gives us insight into the relationship of the pairs.
plt.matshow(bin_cov == 1)
# In[ ]:
# What we are doing here is looking at all the column pairs that when they are missing they are always
# missing together. ie they have a corralation of 1. We also get the count. it stands to reason that
# if this happens in only one or two id's out of 1400 then perhaps it is a statistical anomoly or could be
# reflective of a non structural issue. This is actually very enlightening and we see there are 60
# some odd pairs that satisfy this criteria. More importantly is that it happens for lots of tickers.
# Maybe we have soemthing here.
bin_NaN
edges = []
count =np.dot(bin_NaN.T,bin_NaN)
for i in range(bin_cov.shape[0]):
for j in range(bin_cov.shape[0]-i):
if i!=i+j and bin_cov[i,i+j]==1:
edges.append([i,i+j,count[i,i+j]])
print(edges)
# In[ ]:
#lets see how many unique counts there are. it looks like a few of these counts happen multiple times.
# this is interesting and could imply some structural issue.
ucount = [i[2] for i in edges]
print(np.unique(ucount))
# In[ ]:
print('rows: {}'.format(bin_NaN.shape[0]))
print('cols: {}'.format(len(edges)))
# the idea here is that we create a feature vector. We look at all the ids which have all their data
# missing in a certain collumn. above we found that if all the data is missing in a certain collumn it
# would be missing in another collumn as well. so we look at all these pairs (shown as edges) and we
# then create a matrix of id x edges. We then put a 0 or a 1 in the collumn to indicate that the pair of
# data is missing or not. This serves as a feature. I will then go on to cluster over these features.
nan_features = np.zeros((bin_NaN.shape[0],len(edges)))
for i in range(bin_NaN.shape[0]):
for j, edge in enumerate(edges):
nan_features[i,j] = 1*(bin_NaN[i,edge[0]] & bin_NaN[i,edge[1]])
print('this is just a check that indexing is correct: {}'.format(np.sum(nan_features,axis=1).shape[0]))
# In[ ]:
# we take a look at the silouette score as we increase the number of clusters to understand the optimal
#number of clusters. We see here that it continues to increase which we would expect. I chose to cut it
# off around 12
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
#Range for k
kmin = 2
kmax = 25
sil_scores = []
#Compute silouhette scoeres
for k in range(kmin,kmax):
km = KMeans(n_clusters=k, n_init=20).fit(nan_features)
sil_scores.append(silhouette_score(nan_features, km.labels_))
#Plot
plt.plot(range(kmin,kmax), sil_scores)
plt.title('KMeans Results')
plt.xlabel('Number of Clusters')
plt.ylabel('Silhouette Score')
plt.show()
# In[ ]:
# DBSCAN The only thing that is important from here is the labels.
# the way that DB scan works is that you give it eps and min_samples and it
# finds core groups. eps is the distance cut off and min is how many elements
# at minimum you need to define a cluster
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
X = nan_features
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
# In[ ]:
#COLOR MAPPING - we run a kmeans cluster but we need to decide how many
#clusters we want to use. This is another way for us to cluster. Here we use
k=12
km = KMeans(n_clusters=k, n_init=20).fit(nan_features)
colors=km.labels_
# In[ ]:
#now we try to visualize the data of these features.
# WOW LOOK AT THESE RESULTS. THAT IS BEAUTIFUL!!
from sklearn.manifold import TSNE
from time import time
n_iter = 5000
for i in [2, 5, 30, 50, 100]:
t0 = time()
model = TSNE(n_components=2, n_iter = n_iter,random_state=0, perplexity =i)
np.set_printoptions(suppress=True)
Y = model.fit_transform(nan_features)
t1 =time()
print( "t-SNE: %.2g sec" % (t1 -t0))
plt.scatter(Y[:, 0], Y[:, 1], c= colors)
plt.title('t-SNE with perplexity = {}'.format(i))
plt.show()
# In[ ]:
# Now i do the same in 3d to try to better understand these clusters
from mpl_toolkits.mplot3d import Axes3D
n_iter = 5000
for i in [2, 5, 30, 50, 100]:
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
t0 = time()
model = TSNE(n_components=3, random_state=0, perplexity=i, n_iter=n_iter)
np.set_printoptions(suppress=True)
Y = model.fit_transform(nan_features)
t1 =time()
print( "t-SNE: %.2g sec" % (t1 -t0))
ax.scatter(Y[:, 0], Y[:, 1], Y[:, 2],c=db.labels_,
cmap=plt.cm.Paired)
ax.set_title("3D T-SNE - Perplexity = {}".format(i))
ax.set_xlabel("1st dim")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd dim")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd dim")
ax.w_zaxis.set_ticklabels([])
plt.show()
# In[ ]:
# I will use PCA now to plot
from sklearn import decomposition
# I chose this number pretty much at random, you can change it. using 22 features to describe
# something with 110 x variables still seems high.
n_eigens = 22
# Creating PCA object
pca = decomposition.PCA(n_components=n_eigens, svd_solver ='randomized', whiten=True)
X_pca =pca.fit_transform(nan_features)
X_pca
# **2D -PCA Plot**
# In[ ]:
# This is a 2D pca plot... mehhhh
plt.scatter(X_pca[:,0],X_pca[:,1],c=colors)
# In[ ]:
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = decomposition.PCA(n_components=3).fit_transform(nan_features)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2],c=colors,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
# In[ ]:
# here we plot a graph that looks at how many PCA components explain the variation
n_eigens=10
X_reduced = decomposition.PCA(n_components=n_eigens).fit(nan_features)
with plt.style.context('fivethirtyeight'):
plt.figure(figsize=(8, 5));
plt.title('Explained Variance Ratio over Component');
plt.plot(X_reduced.explained_variance_ratio_);
# In[ ]:
with plt.style.context('fivethirtyeight'):
plt.figure(figsize=(8, 5));
plt.title('Cumulative Explained Variance over EigenFace');
plt.plot(X_reduced.explained_variance_ratio_.cumsum());
# In[ ]:
print('PCA captures {:.2f} percent of the variance in the dataset'.format(X_reduced.explained_variance_ratio_.sum() * 100))
print('PCA components have dimensions {} by {}'.format(*X_reduced.components_.shape))
| [
"adithyagirish@berkeley.edu"
] | adithyagirish@berkeley.edu |
94ccbc6af940e19a5187ad8c2c0913db8af83b8d | 04a77043cebd9415069aad4a6b8e7af077de1168 | /2-python_opp/day04/Python_OO4/with.py | f38bdaf9633ae0eaac7bd82d4b73c4c65fce139f | [] | no_license | yangxiangtao/biji | a935fbc4af42c81205900cb95a11e98c16d739de | 5c5f46e6c145fc02ea10b7befdc05c489fc3b945 | refs/heads/master | 2022-11-12T02:25:51.532838 | 2019-04-02T01:22:12 | 2019-04-02T01:22:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | # with.py
# with语句示例
# try:
# #f = open("aaa.txt", "rt")
# # 使用with语句,不管以下的操作是否
# # 发生异常,都能保证文件被正确关闭
# with open("a.txt", "rt") as f:
# for line in f:
# print(line,end="")
# # with语句结束
# except:
# print("文件操作失败")
class A: #自定义资源管理器
def __init__(self, name):
self.name = name
def __enter__(self):
print("__enter__()方法被执行")
return self
def __exit__(self, exc_type, exc_val,exc_tb):
print("__exit__()方法被执行")
if exc_type is None: #没有出现异常
print("没有出现异常")
else: # 出现异常
print("错误类型:", exc_type)
print("错误对象:", exc_val)
print("TraceBack:", exc_tb)
if __name__ == "__main__":
with A("test_name") as a:
print("with语句执行了")
# 制造或不制造异常
a = int(input("请输入一个数字:"))
print("程序退出") | [
"123@bb.com"
] | 123@bb.com |
d41d78e8572fe2ff96675b55eb8b75fc96cc4b9a | a5e6ce10ff98539a94a5f29abbc053de9b957cc6 | /competition/20191124/d.py | fb87af6d47c3ddbc8bf896989ae9b64796fbfd70 | [] | no_license | shimaw28/atcoder_practice | 5097a8ec636a9c2e9d6c417dda5c6a515f1abd9c | 808cdc0f2c1519036908118c418c8a6da7ae513e | refs/heads/master | 2020-07-26T10:59:51.927217 | 2020-06-13T11:53:19 | 2020-06-13T11:53:19 | 208,622,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | N = int(input())
g = []
for _ in range(N-1):
a, b = map(int, input().split())
g.append((a,b))
d = {}
for i in range(1, N+1):
d[i] = []
d_lines = {}
col = [1]
n_cols = 1
d[g[0][0]].append(1)
d[g[0][1]].append(1)
d_lines[1] = [g[0][0], g[0][1]]
for gi in g[1:]:
a, b = gi[0], gi[1]
n = 1
while True:
if n not in d[a]:
break
else:
n += 1
col.append(n)
n_cols = max(n_cols, n)
d[a].append(n)
d[b].append(n)
print(n_cols)
for c in col:
print(c)
| [
"shima.w28@gmail.com"
] | shima.w28@gmail.com |
f1c9093fbdba3a7d1f45d8cd049d334f45a4d65c | d7ffbb47995f35b42b3e3d06d5fb14978440e766 | /mysite/urls.py | cb583ad6c599aed51704ddf4cd3250a1c2c3bcc1 | [] | no_license | githubjyotiranjan/django-restframework-api | 032c39aa93fea125a63a546638f8750ba5b3860f | 46affccbb1f72a488e382ad7dc8dacadc8e4e1c5 | refs/heads/master | 2020-03-20T15:56:30.233524 | 2018-06-24T04:28:16 | 2018-06-24T04:28:16 | 137,525,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,190 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.contrib import admin
from django.conf.urls import url
from django.conf.urls import include
from accounts import views
from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView
urlpatterns = [
url('admin/', admin.site.urls),
url(r'^api/token/$', TokenObtainPairView.as_view(), name='token_obtain_pair'),
url(r'^api/token/refresh/$', TokenRefreshView.as_view(), name='token_refresh'),
url(r'api/',include('accounts.urls', namespace='accounts')),
]
| [
"jsatapathy007@gmail.com"
] | jsatapathy007@gmail.com |
b529a2465a28b369f9bc16606c3b1b5d712fb008 | 7855dfd58df154a6f0a0d0939630fbc3ca24b0c1 | /memegen/__init__.py | 159ae96c2d670c3110c7aaedebcf52706c2a278c | [
"MIT"
] | permissive | neufeldtech/memegen | f17c7b4c9bcb849fc0e021a444eb5678ab7af06e | 1375c7ca88b44fc435bb55992b5ef6b7ad882475 | refs/heads/master | 2020-12-25T15:51:08.260488 | 2016-06-06T01:38:11 | 2016-06-06T11:29:14 | 52,167,031 | 1 | 0 | null | 2016-02-20T17:56:40 | 2016-02-20T17:56:39 | null | UTF-8 | Python | false | false | 296 | py | """Package for MemeGen."""
import sys
__project__ = 'MemeGen'
__version__ = '2.1'
VERSION = "{} v{}".format(__project__, __version__)
PYTHON_VERSION = 3, 5
if sys.version_info < PYTHON_VERSION: # pragma: no cover (manual test)
exit("Python {}.{}+ is required.".format(*PYTHON_VERSION))
| [
"jacebrowning@gmail.com"
] | jacebrowning@gmail.com |
b8412b4889c039c927fe3d297dfdb00dbe224b5a | 8130c34d546c323d6d5d2ca6b4a67330af08828f | /.history/menu_app/views_20210104161836.py | f2217c09ba6b4454c6940d53ef9b130fb78808e2 | [] | no_license | lienusrob/final | ba2dad086fc97b21b537ef12df834dfadd222943 | f2726e31f1d51450e4aed8c74021c33679957b28 | refs/heads/master | 2023-02-15T01:36:54.463034 | 2021-01-07T12:47:05 | 2021-01-07T12:47:05 | 327,279,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,858 | py |
from .models import Cart, CartItem, MenuItem, ItemsCategory, Order, Orders, generate_order_id
from account_app.models import Profile
from .forms import AddToCartForm
from django.views.generic import ListView
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
from django.utils import timezone
class MenuListView(ListView):
model = MenuItem
template_name = 'items/menu_list.html'
def menu_list_view(request):
item_list = MenuItem.objects.all()
context = {'item_list': item_list,
'item_categories':reversed(ItemsCategory.objects.all()),
'item_categories_side_nav':reversed(ItemsCategory.objects.all())}
return render(request, 'menu_app/menu_list.html', context)
def home(request):
category_menu = ItemsCategory.objects.all()
context = {'category_menu': category_menu}
return render (request, 'homepage.html', context)
def menu_item_detail(request, **kwargs):
item = MenuItem.objects.filter(id=kwargs.get('pk')).first()
context = {'item':item}
return render(request, 'menu_app/item_details.html', context)
def new_order_info(request):
user_profile = get_object_or_404(Profile, user=request.user)
order, created = Order.objects.get_or_create(customer=user_profile.user, is_ordered=False)
if created:
order.ref_code = generate_order_id()
order.save()
context = {'order':order}
return render(request, 'items/order_info.html', context)
def cart (request):
cart = Cart.objects.get(user = request.user, current = True)
cart_items = CartItem.objects.filter(cart = cart)
context = {'cart_items':cart_items}
return render (request, 'menu_app/cart.html', context )
def menu_details(request, name):
category = ItemsCategory.objects.get(name=name)
menu_details = MenuItem.objects.filter(category=category)
context = {'menu_details':menu_details, 'category':name, 'user':request.user}
if request.method=="POST":
form = AddToCartForm(request.POST or None)
form.cart = Cart.objects.get_or_create(user=request.user, current=True)
form.save()
#messages.success(request, "Item" "added to cart successfully!, please go to cart and check for items.")
return render(request, ('menu_app/menu_list.html'), context)
def cart(request):
cart = Cart.objects.get(user=request.user, current=True)
cart_items = CartItem.objects.filter(cart=cart)
#extras = Extras.objects.all()
context = {'cart_items':cart_items}
return render(request, 'menu_app/cart.html', context)
def view_cart(request):
"""A View that renders the cart contents page"""
return render(request, "cart.html")
def add_to_cart(request, id):
"""Add a quantity of the specified product to the cart"""
quantity = int(request.POST.get('quantity'))
cart = request.session.get('cart', {})
if id in cart:
cart[id] = int(cart[id]) + quantity
else:
cart[id] = cart.get(id, quantity)
request.session['cart'] = cart
return redirect('homepage')
def adjust_cart(request, id):
quantity = int(request.POST.get('quantity'))
cart = request.session.get('cart', {})
if quantity > 0:
cart[id] = quantity
else:
cart.pop(id)
request.session['cart'] = cart
return redirect('view_cart')
def orders (request):
cart = Cart.objects.get(user=request.user, current = True)
cart_items = CartItem.objects.filter(cart__pk__ = cart.pk)
if request.method == "POST":
for key, value in request.POST.items():
if key == "csrfmiddleweartoken":
continue
cart.current == False
cart.date_ordered= timezone.now()
cart.save()
orders= Orders (cart = cart)
orders.save()
cart = Cart(user=)
| [
"lienus.rob@hotmail.de"
] | lienus.rob@hotmail.de |
101d748a0568052170a6d7e693048fe769ade8ae | 7cce9875553a31b2ef2253792d46c488a8e5acb7 | /keras/keras12_split2_pratice.py | eba9f6c7ecb0b719832496daab847b8985cdac43 | [] | no_license | qzson/Study | 8498f0b1612fb2f7947d5067b7275c338e0216c6 | 4b214e9efb0ad7a5e11bca58fd311ee66200ad5e | refs/heads/master | 2022-12-01T15:16:00.966115 | 2020-08-14T08:16:42 | 2020-08-14T08:16:42 | 263,221,146 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,302 | py | # split 개인 연습 파일
# 1. train:val:test = 6:2:2 데이터 분리
import numpy as np
x = np.array(range(1,101))
y = np.array(range(101,201))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, shuffle = False,
train_size = 0.6)
x_val, x_test, y_val, y_test = train_test_split(
x_test, y_test, shuffle = False,
test_size = 0.5) # 40% 중 절반 = 20%
print(x_train)
print(x_val)
print(x_test)
# 2. 8:1:1
import numpy as np
x = np.array(range(1,101))
y = np.array(range(101,201))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, shuffle = False,
train_size = 0.8)
x_val, x_test, y_val, y_test = train_test_split(
x_test, y_test, shuffle = False,
test_size = 0.5)
print(x_train)
print(x_val)
print(x_test)
# 3. 7:1:2
import numpy as np
x = np.array(range(1,101))
y = np.array(range(101,201))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, shuffle = False,
train_size = 0.7)
x_val, x_test, y_val, y_test = train_test_split(
x_test, y_test, shuffle = False,
test_size = 1/3)
# 질문 : 그냥 1:2로 나누는 과정에서 나머지는 자동으로 분류되나요?
# 답변 : test_size 에서 test가 1/3으로 할당을 했으니, 나머지는 2/3으로 자동으로 연산
print(x_train)
print(x_val)
print(x_test)
# 4. 둘 중 하나만 써도 된다.
import numpy as np
x = np.array(range(1,101))
y = np.array(range(101,201))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, shuffle = False,
train_size = 0.6, test_size = 0.4)
# <구자님 부연설명 참조>
# train_size와 test_size를 둘 다 사용해도 되고, 둘 중 하나만 사용해도 됨
# 단, train_size + test_size = sum > 1 이면 에러 뜸
# sum < 1 이면 빠진 값 만큼 날아감
# ex) train_size = 0.6, test_size = 0.3 이면 sum = 0.9로 0.1만큼의 값이 사라진다.
# train_size = 0.6, test_size = 0.4 [가능]
# train_size = 0.6, test_size = 0.3 [나머지 10%는 어디루?]
# train_size = 0.6, test_size = 0.5 [Error 발생]
print(x_train)
print(x_test) | [
"qzson@naver.com"
] | qzson@naver.com |
f60d9436d6d9c1ff4d9caaf58ceab35dfe0ad001 | 9ebaf3720bed22f4b122ea530e6fa3921c2a71b4 | /101_110_파이썬 분기문/104.boolean.py | b1e3d4c2024597f2b349bb9c4eced7625153f921 | [] | no_license | dofany/python300 | 703b7ecb17aab718cb45ca7f4fc3cc325efed0f1 | c0a30f8136ec66a4bdc66ce47cf8547fb508cefc | refs/heads/master | 2023-04-20T10:45:06.290735 | 2021-05-08T08:15:21 | 2021-05-08T08:15:21 | 355,829,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | # 아래 코드의 결과를 예상하라.
x = 4
print(1<x<5) | [
"kimdohwan17@gmail.com"
] | kimdohwan17@gmail.com |
d16de8d3bf1afedbf28f3c59adba5016a078d48b | bc368e94d950af97b71e0b0c2a3d2b86c6a9d173 | /learn-theano/learn/nn2.py | 1d24fcba78306ab8cb4597b321cc554f35b414ff | [] | no_license | ChenLiangbo/Learning-python | 6100c6ff74330fb1710622cdb22bde5a1d52e40b | 868cc4d71d1b9bd362b9fac8a39f295037d20b4c | refs/heads/master | 2020-06-11T07:42:04.434182 | 2018-05-08T05:06:14 | 2018-05-08T05:06:14 | 75,731,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,852 | py | #!usr/bin/env/python
# -*- coding: utf-8 -*-
import theano
import theano.tensor as T
from theano import function
from theano.ifelse import ifelse
import numpy as np
from random import random
print "strat build model ..."
# http://www.tuicool.com/articles/M7FRziR
# 定义变量:
x = T.matrix('x')
w1 = theano.shared(np.array([random(),random()]))
w2 = theano.shared(np.array([random(),random()]))
w3 = theano.shared(np.array([random(),random()]))
b1 = theano.shared(1.)
b2 = theano.shared(1.)
learning_rate = 0.01
a1 = 1/(1+T.exp(-T.dot(x,w1)-b1))
print "a2 = ",type(a1)
print dir(a1)
print "-"*80
print "ndim = ",a1.ndim
# print "get_scalar_constant_value = ",a1.get_scalar_constant_value()
print "dtype = ",a1.dtype
a2 = 1/(1+T.exp(-T.dot(x,w2)-b1))
x2 = T.stack([a1,a2],axis=1)
a3 = 1/(1+T.exp(-T.dot(x2,w3)-b2))
a_hat = T.vector('a_hat') #Actual output
cost = -(a_hat*T.log(a3) + (1-a_hat)*T.log(1-a3)).sum()
dw1,dw2,dw3,db1,db2 = T.grad(cost,[w1,w2,w3,b1,b2])
train = function(
inputs = [x,a_hat],
outputs = [a3,cost],
updates = [
[w1, w1-learning_rate*dw1],
[w2, w2-learning_rate*dw2],
[w3, w3-learning_rate*dw3],
[b1, b1-learning_rate*db1],
[b2, b2-learning_rate*db2]
]
)
inputs = [
[0, 0],
[0, 1],
[1, 0],
[1, 1]
]
outputs = [1,0,0,1]
print "start training ..."
# 遍历输入并计算输出:
cost = []
for iteration in range(30000):
print "iteration = ",iteration
pred, cost_iter = train(inputs, outputs)
cost.append(cost_iter)
break
# 打印输出
print 'The outputs of the NN are:'
for i in range(len(inputs)):
print 'The output for x1=%d | x2=%d is %.2f' % (inputs[i][0],inputs[i][1],pred[i])
# 绘制损失图:
print '\nThe flow of cost during model run is as following:'
import matplotlib.pyplot as plt
# plt.plot(cost)
# plt.show() | [
"chenlb@polarwin.cn"
] | chenlb@polarwin.cn |
2f7b3ebc8907d6b393f102241244a36dbd7c459c | cbda89443b351bb2047180dad4e300c13dc3df7f | /Crystals/Morpurgo_all_atoms_Reorgs_qsplit_fittedscreens/Jobs/PDIF-CN2/PDIF-CN2_neut_neut_inner2_outer0/PDIF-CN2_neut_neut_inner2_outer0.py | 15ddd8ba9e54cd3004aa0e5e5a64fa6fc1119384 | [] | no_license | sheridanfew/pythonpolarisation | 080f52979f98d26360a46412a10c8e3f51ee4549 | 178e2684e9a239a8e60af5f7b1eb414ac5f31e92 | refs/heads/master | 2021-07-10T01:07:40.978790 | 2021-03-11T16:56:37 | 2021-03-11T16:56:37 | 96,101,351 | 0 | 0 | null | 2017-07-03T13:37:06 | 2017-07-03T10:54:52 | null | UTF-8 | Python | false | false | 5,854 | py | import sys
sys.path.append('../../../../../')
from BasicElements import *
from BasicElements.Register import GetRegister
from BasicElements.MoleculeFactory import ReadMoleculeType
from BasicElements.MoleculeFactory import GetMolecule
from BasicElements.Crystal import *
from Polarizability.GetDipoles import get_dipoles,split_dipoles_onto_atoms
from Polarizability import *
from Polarizability.GetEnergyFromDips import *
from Polarizability.JMatrix import JMatrix
import numpy as np
from math import *
from time import gmtime, strftime
import os
print strftime("%a, %d %b %Y %X +0000", gmtime())
qdict={"anion": -1.0, "neut": 0.0, "cation": 1.0}
name='PDIF-CN2_neut_neut_inner2_outer0'
#For crystals here, all cubic and centred at centre
insize=2
#number of TVs in each dir central mol is from edge of inner region
outsize=0
state='neut'
mols_cen=['PDIF_CN2_mola_neut_aniso_cifstruct_mul.xyz']
mols_sur=['PDIF_CN2_mola_neut_aniso_cifstruct_mul.xyz']
mols_outer=['sp_PDIFCN2_neut.xyz']
screenradius=2.5533199878
#From cif:
'''
PDIF-CN2
_cell_length_a 5.2320(14)
_cell_length_b 7.638(2)
_cell_length_c 18.819(5)
_cell_angle_alpha 92.512(5)
_cell_angle_beta 95.247(5)
_cell_angle_gamma 104.730(4)
_cell_volume 722.5(3)
'''
#Get translation vectors:
a=5.232014/0.5291772109217
b=7.6382/0.5291772109217
c=18.8195/0.5291772109217
alpha=92.5125*(pi/180)
beta=95.2475*(pi/180)
gamma=104.7304*(pi/180)
cif_unit_cell_volume=722.53/(a*b*c*(0.5291772109217**3))
cell_volume=sqrt(1 - (cos(alpha)**2) - (cos(beta)**2) - (cos(gamma)**2) + (2*cos(alpha)*cos(beta)*cos(gamma)))
#Converts frac coords to carts
matrix_to_cartesian=np.matrix( [[a, b*cos(gamma), c*cos(beta)],
[0, b*sin(gamma), c*(cos(alpha) - cos(beta)*cos(gamma))/sin(gamma)],
[0, 0, c*cell_volume/sin(gamma)]])
#carts to frac
matrix_to_fractional=matrix_to_cartesian.I
#TVs, TV[0,1,2] are the three translation vectors.
TV=matrix_to_cartesian.T
cut=8.0
totsize=insize+outsize
#number of TVs in each dir nearest c inner mol is from edge of outer region
cenpos=[totsize,totsize,totsize]
length=[2*totsize+1,2*totsize+1,2*totsize+1]
maxTVs=insize
outer_maxTVs=insize+outsize
#for diamond outer, don't specify for cube and will fill to cube edges.
print 'name: ',name,'mols_cen: ', mols_cen,' mols_sur: ',mols_sur,' TVs: ', TV
# Place Molecules
crystal=Crystal(name=name,mols_cen=mols_cen,mols_sur=mols_sur,cenpos=cenpos,length=length,TVs=TV,maxTVs=maxTVs,mols_outer=mols_outer,outer_maxTVs=outer_maxTVs)
crystal().ModifyPolarizabilityCry(jmtype='TholeExp',fittype='empirical')
#crystal._mols contains all molecules.
#mols[0] contains a list of all molecules in position a, mols[1] all mols in pos'n b, etc.
#mols[0][x,y,z] contains molecule a in position x,y,z
#mols may as such be iterated over in a number of ways to consider different molecules.
crystal().print_posns()
#Calculate Properties:
print strftime("%a, %d %b %Y %X +0000", gmtime())
E0 = np.matrix([0.,0.,0.])
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc jm'
#screenradius=1.6623/(Natoms**2)
# Thole paper screenradius value for fit to components of pol. tensor divided by no. atoms in mol. We choose this screenradius value for smearing of charge as, with near planar mols, in some dirs we have molecule-like polarisabilities with near atom-like separations.
#This form of screenradius will result in charge being smeared along the separation axis of molecules by NAtoms*(Thole's value for a single atom)
jm = JMatrix(jmtype='TholeExp',screenradius=screenradius)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc dips:'
d = get_dipoles(E0=E0,jm=jm._m,cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
Efield = get_electric_field(E0)
potential = get_potential()
print strftime("%a, %d %b %Y %X +0000", gmtime())
#print 'dips', d
print 'splitting dips onto atoms'
split_d = split_dipoles_onto_atoms(d)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'summing dips:'
tot = np.matrix([0.,0.,0.])
for dd in split_d:
tot += dd
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'total dip moment', tot
Uqq = np.multiply(get_U_qq(potential=potential),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqq', Uqq
Uqd = np.multiply(get_U_qdip(dips=d,Efield=Efield),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqd', Uqd
Udd = np.multiply(get_U_dipdip(jm=jm._m,dips=d.T),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Udd', Udd
energyev = Udd+Uqd+Uqq
print 'energyev', energyev
energy=energyev/27.211
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Making .dat cross sections for gnuplot'
# print TVs
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_TVs.dat' % name, 'w')
TVstr=str(str(TV[0,0]) + ' ' + str(TV[0,1]) + ' ' + str(TV[0,2]) + '\n' + str(TV[1,0]) + ' ' + str(TV[1,1]) + ' ' + str(TV[1,2]) + '\n' + str(TV[2,0]) + ' ' + str(TV[2,1]) + ' ' + str(TV[2,2])+ '\n')
f.write(TVstr)
f.flush()
f.close()
# print dipoles
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_dipoles.dat' % name, 'w')
for dd in split_d:
dstr=str(dd)
f.write(dstr)
f.write('\n')
f.flush()
f.close()
# print properties for charge in centrepos
time=strftime("%a, %d %b %Y %X +0000", gmtime())
f = open('%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\tenergyev\tUqq\tUqd\tUdd\tTotdip_x\tTotdip_y\tTotdip_z')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,energyev,Uqq,Uqd,Udd,tot[0,0],tot[0,1],tot[0,2]))
f.flush()
f.close()
print 'Job Completed Successfully.'
| [
"sheridan.few@gmail.com"
] | sheridan.few@gmail.com |
13bb30d8a590842169beb86a035435accff49d55 | add0bb7a309ea346614d7f560a24e653d3d0ff67 | /pythonbase/数据解析/2.正则解析_分页爬取.py | fdab3e17bf3c37d2d29e351313833ffb684b8d18 | [] | no_license | 1572903465/PythonProjects | 935aff08d5b3d3f146393764a856369061513d36 | 73576080174f72ea1df9b36d201cf3949419041b | refs/heads/master | 2023-06-10T15:50:49.178112 | 2021-07-05T15:42:53 | 2021-07-05T15:42:53 | 301,328,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,460 | py | # 需求爬取糗事百科中糗图模块下的所有图片图片
import requests
import re
import os
if __name__ == '__main__':
#创建一个文件加, 保存所有的图片
if not os.path.exists('./qiutuLibs'):
os.mkdir('./qiutuLibs')
# 设置一个通用的url模板
url = 'https://www.qiushibaike.com/imgrank/page/%d/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36'
}
# pageNum = 1
for pageNum in range(1,5):
#对应页码的url
new_url = format(url%pageNum)
# 使用通用怕从对url对应的一整张页面进行爬取
page_text = requests.get(url=new_url,headers=headers).text
# 使用聚焦爬虫将页面的所有糗图进行解析/爬取
ex = '<div class="thumb">.*?<img src="(.*?)" alt.*?</div>'
img_src_list = re.findall(ex,page_text,re.S)
print(img_src_list)
for src in img_src_list:
#拼接出一个完整的图片url
src = 'https:'+src
# 请求到了图片的二进制数据
img_data = requests.get(url=src,headers=headers).content
#生成图片名称
img_name=src.split('/')[-1]
imgPath = './qiutuLibs/'+img_name
with open(imgPath,'wb') as fp:
fp.write(img_data)
print(img_name+'爬取成功')
| [
"1572903465@qq.com"
] | 1572903465@qq.com |
4232950bb6747d83d1fbf6623c4a0579313b9c14 | eef659a707d87e979741cc11ad59344c911790f5 | /cc3/rest/serializers.py | 3ab26544ea65b8b75307be53466128327e97be3c | [] | no_license | qoin-open-source/samen-doen-cc3 | 1e5e40a9b677886aa78f980670df130cbbb95629 | 8b7806177e1e245af33b5112c551438b8c0af5d2 | refs/heads/master | 2020-05-04T02:26:07.039872 | 2019-04-02T21:19:54 | 2019-04-02T21:19:54 | 178,926,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | from rest_framework import serializers
from cc3.cards.models import Card
class CardSerializer(serializers.ModelSerializer):
class Meta:
model = Card
fields = (
'card_type',
'number',
'card_security_code',
'creation_date',
'activation_date',
'expiration_date',
'card_security_code_blocked_until',
'owner',
'status',
)
| [
"stephen.wolff@qoin.com"
] | stephen.wolff@qoin.com |
537c884c6295315906c8b48d238b4689ecbbad55 | e57d7785276053332c633b57f6925c90ad660580 | /sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_query_text_async.py | a34195f7e320e51f4760c054540dd55b0ae6b5c0 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | adriananeci/azure-sdk-for-python | 0d560308497616a563b6afecbb494a88535da4c5 | b2bdfe659210998d6d479e73b133b6c51eb2c009 | refs/heads/main | 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 | MIT | 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null | UTF-8 | Python | false | false | 2,419 | py | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""
FILE: sample_query_text_async.py
DESCRIPTION:
This sample demonstrates how to ask a question from supplied text data.
USAGE:
python sample_query_text_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_QUESTIONANSWERING_ENDPOINT - the endpoint to your QuestionAnswering resource.
2) AZURE_QUESTIONANSWERING_KEY - your QuestionAnswering API key.
"""
import asyncio
async def sample_query_text():
# [START query_text_async]
import os
from azure.core.credentials import AzureKeyCredential
from azure.ai.language.questionanswering.aio import QuestionAnsweringClient
from azure.ai.language.questionanswering import models as qna
endpoint = os.environ["AZURE_QUESTIONANSWERING_ENDPOINT"]
key = os.environ["AZURE_QUESTIONANSWERING_KEY"]
client = QuestionAnsweringClient(endpoint, AzureKeyCredential(key))
async with client:
input = qna.TextQueryOptions(
question="How long it takes to charge surface?",
records=[
qna.TextRecord(
text="Power and charging. It takes two to four hours to charge the Surface Pro 4 battery fully from an empty state. " +
"It can take longer if you’re using your Surface for power-intensive activities like gaming or video streaming while you’re charging it.",
id="doc1"
),
qna.TextRecord(
text="You can use the USB port on your Surface Pro 4 power supply to charge other devices, like a phone, while your Surface charges. " +
"The USB port on the power supply is only for charging, not for data transfer. If you want to use a USB device, plug it into the USB port on your Surface.",
id="doc2"
)
]
)
output = await client.query_text(input)
best_answer = [a for a in output.answers if a.confidence_score > 0.9][0]
print("Q: {}".format(input.question))
print("A: {}".format(best_answer.answer))
# [END query_text_async]
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(sample_query_text())
| [
"noreply@github.com"
] | adriananeci.noreply@github.com |
54f9c679ed4b9d269ac0c203e74bb78f7f17dd0b | b2758a5755d3712fef54ff004403cbf1d25c7956 | /sourcefinder/image.py | b68587776e41dcb08574281553c5a0f25d14be8a | [
"BSD-2-Clause"
] | permissive | DavidRuhe/pyse | 6fa34dbe247f44cee89e8bd84bc5e5efe245c047 | a8310c2967bf597d265376e7e5bd8ca598bb34be | refs/heads/master | 2022-12-07T14:51:16.857972 | 2018-03-28T15:15:05 | 2018-03-28T15:15:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,548 | py | """
Some generic utility routines for number handling and
calculating (specific) variances
"""
import itertools
import logging
import numpy
from sourcefinder import extract
from sourcefinder import stats
from sourcefinder import utils
from sourcefinder.utility import containers
from sourcefinder.utility.memoize import Memoize
try:
import ndimage
except ImportError:
from scipy import ndimage
logger = logging.getLogger(__name__)
#
# Hard-coded configuration parameters; not user settable.
#
INTERPOLATE_ORDER = 1 # Spline order for grid interpolation
MEDIAN_FILTER = 0 # If non-zero, apply a median filter of size
# MEDIAN_FILTER to the background and RMS grids prior
# to interpolating.
MF_THRESHOLD = 0 # If MEDIAN_FILTER is non-zero, only use the filtered
# grid when the (absolute) difference between the raw
# and filtered grids is larger than MF_THRESHOLD.
DEBLEND_MINCONT = 0.005 # Min. fraction of island flux in deblended subisland
STRUCTURING_ELEMENT = [[0, 1, 0], [1, 1, 1], [0, 1, 0]] # Island connectiivty
class ImageData(object):
"""Encapsulates an image in terms of a numpy array + meta/headerdata.
This is your primary contact point for interaction with images: it icludes
facilities for source extraction and measurement, etc.
"""
def __init__(self, data, beam, wcs, margin=0, radius=0, back_size_x=32,
back_size_y=32, residuals=True
):
"""Sets up an ImageData object.
*Args:*
- data (2D numpy.ndarray): actual image data
- wcs (utility.coordinates.wcs): world coordinate system
specification
- beam (3-tuple): beam shape specification as
(semimajor, semiminor, theta)
"""
# Do data, wcs and beam need deepcopy?
# Probably not (memory overhead, in particular for data),
# but then the user shouldn't change them outside ImageData in the
# mean time
self.rawdata = data # a 2D numpy array
self.wcs = wcs # a utility.coordinates.wcs instance
self.beam = beam # tuple of (semimaj, semimin, theta)
self.clip = {}
self.labels = {}
self.freq_low = 1
self.freq_high = 1
self.back_size_x = back_size_x
self.back_size_y = back_size_y
self.margin = margin
self.radius = radius
self.residuals = residuals
###########################################################################
# #
# Properties and attributes. #
# #
# Properties are attributes managed by methods; rather than calling the #
# method directly, the attribute automatically invokes it. We can use #
# this to do cunning transparent caching ("memoizing") etc; see the #
# Memoize class. #
# #
# clearcache() clears all the memoized data, which can get quite large. #
# It may be wise to call this, for example, in an exception handler #
# dealing with MemoryErrors. #
# #
###########################################################################
@Memoize
def _grids(self):
"""Gridded RMS and background data for interpolating"""
return self.__grids()
grids = property(fget=_grids, fdel=_grids.delete)
@Memoize
def _backmap(self):
"""Background map"""
if not hasattr(self, "_user_backmap"):
return self._interpolate(self.grids['bg'])
else:
return self._user_backmap
def _set_backmap(self, bgmap):
self._user_backmap = bgmap
del (self.backmap)
del (self.data_bgsubbed)
backmap = property(fget=_backmap, fdel=_backmap.delete, fset=_set_backmap)
@Memoize
def _get_rm(self):
"""RMS map"""
if not hasattr(self, "_user_noisemap"):
return self._interpolate(self.grids['rms'], roundup=True)
else:
return self._user_noisemap
def _set_rm(self, noisemap):
self._user_noisemap = noisemap
del (self.rmsmap)
rmsmap = property(fget=_get_rm, fdel=_get_rm.delete, fset=_set_rm)
@Memoize
def _get_data(self):
"""Masked image data"""
# We will ignore all the data which is masked for the rest of the
# sourcefinding process. We build up the mask by stacking ("or-ing
# together") a number of different effects:
#
# * A margin from the edge of the image;
# * Any data outside a given radius from the centre of the image;
# * Data which is "obviously" bad (equal to 0 or NaN).
mask = numpy.zeros((self.xdim, self.ydim))
if self.margin:
margin_mask = numpy.ones((self.xdim, self.ydim))
margin_mask[self.margin:-self.margin, self.margin:-self.margin] = 0
mask = numpy.logical_or(mask, margin_mask)
if self.radius:
radius_mask = utils.circular_mask(self.xdim, self.ydim, self.radius)
mask = numpy.logical_or(mask, radius_mask)
mask = numpy.logical_or(mask, numpy.isnan(self.rawdata))
return numpy.ma.array(self.rawdata, mask=mask)
data = property(fget=_get_data, fdel=_get_data.delete)
@Memoize
def _get_data_bgsubbed(self):
"""Background subtracted masked image data"""
return self.data - self.backmap
data_bgsubbed = property(fget=_get_data_bgsubbed,
fdel=_get_data_bgsubbed.delete)
@property
def xdim(self):
"""X pixel dimension of (unmasked) data"""
return self.rawdata.shape[0]
@property
def ydim(self):
"""Y pixel dimension of (unmasked) data"""
return self.rawdata.shape[1]
@property
def pixmax(self):
"""Maximum pixel value (pre-background subtraction)"""
return self.data.max()
@property
def pixmin(self):
"""Minimum pixel value (pre-background subtraction)"""
return self.data.min()
def clearcache(self):
"""Zap any calculated data stored in this object.
Clear the background and rms maps, labels, clip, and any locally held
data. All of these can be reconstructed from the data accessor.
Note that this *must* be run to pick up any new settings.
"""
self.labels.clear()
self.clip.clear()
del (self.backmap)
del (self.rmsmap)
del (self.data)
del (self.data_bgsubbed)
del (self.grids)
if hasattr(self, 'residuals_from_gauss_fitting'):
del (self.residuals_from_gauss_fitting)
if hasattr(self, 'residuals_from_deblending'):
del (self.residuals_from_deblending)
###########################################################################
# #
# General purpose image handling. #
# #
# Routines for saving and trimming data, and calculating background/RMS #
# maps (in conjuntion with the properties above). #
# #
###########################################################################
# Private "support" methods
def __grids(self):
"""Calculate background and RMS grids of this image.
These grids can be interpolated up to make maps of the original image
dimensions: see _interpolate().
This is called automatically when ImageData.backmap,
ImageData.rmsmap or ImageData.fdrmap is first accessed.
"""
# We set up a dedicated logging subchannel, as the sigmaclip loop
# logging is very chatty:
sigmaclip_logger = logging.getLogger(__name__ + '.sigmaclip')
# there's no point in working with the whole of the data array
# if it's masked.
useful_chunk = ndimage.find_objects(numpy.where(self.data.mask, 0, 1))
assert (len(useful_chunk) == 1)
useful_data = self.data[useful_chunk[0]]
my_xdim, my_ydim = useful_data.shape
rmsgrid, bggrid = [], []
for startx in range(0, my_xdim, self.back_size_x):
rmsrow, bgrow = [], []
for starty in range(0, my_ydim, self.back_size_y):
chunk = useful_data[
startx:startx + self.back_size_x,
starty:starty + self.back_size_y
].ravel()
if not chunk.any():
rmsrow.append(False)
bgrow.append(False)
continue
chunk, sigma, median, num_clip_its = stats.sigma_clip(
chunk, self.beam)
if len(chunk) == 0 or not chunk.any():
rmsrow.append(False)
bgrow.append(False)
else:
mean = numpy.mean(chunk)
rmsrow.append(sigma)
# In the case of a crowded field, the distribution will be
# skewed and we take the median as the background level.
# Otherwise, we take 2.5 * median - 1.5 * mean. This is the
# same as SExtractor: see discussion at
# <http://terapix.iap.fr/forum/showthread.php?tid=267>.
# (mean - median) / sigma is a quick n' dirty skewness
# estimator devised by Karl Pearson.
if numpy.fabs(mean - median) / sigma >= 0.3:
sigmaclip_logger.debug(
'bg skewed, %f clipping iterations', num_clip_its)
bgrow.append(median)
else:
sigmaclip_logger.debug(
'bg not skewed, %f clipping iterations',
num_clip_its)
bgrow.append(2.5 * median - 1.5 * mean)
rmsgrid.append(rmsrow)
bggrid.append(bgrow)
rmsgrid = numpy.ma.array(
rmsgrid, mask=numpy.where(numpy.array(rmsgrid) == False, 1, 0))
bggrid = numpy.ma.array(
bggrid, mask=numpy.where(numpy.array(bggrid) == False, 1, 0))
return {'rms': rmsgrid, 'bg': bggrid}
def _interpolate(self, grid, roundup=False):
"""
Interpolate a grid to produce a map of the dimensions of the image.
Args:
grid (numpy.ma.MaskedArray)
Kwargs:
roundup (bool)
Returns:
(numpy.ma.MaskedArray)
Used to transform the RMS, background or FDR grids produced by
L{_grids()} to a map we can compare with the image data.
If roundup is true, values of the resultant map which are lower than
the input grid are trimmed.
"""
# there's no point in working with the whole of the data array if it's
# masked.
useful_chunk = ndimage.find_objects(numpy.where(self.data.mask, 0, 1))
assert (len(useful_chunk) == 1)
my_xdim, my_ydim = self.data[useful_chunk[0]].shape
if MEDIAN_FILTER:
f_grid = ndimage.median_filter(grid, MEDIAN_FILTER)
if MF_THRESHOLD:
grid = numpy.where(
numpy.fabs(f_grid - grid) > MF_THRESHOLD, f_grid, grid
)
else:
grid = f_grid
# Bicubic spline interpolation
xratio = float(my_xdim) / self.back_size_x
yratio = float(my_ydim) / self.back_size_y
# First arg: starting point. Second arg: ending point. Third arg:
# 1j * number of points. (Why is this complex? Sometimes, NumPy has an
# utterly baffling API...)
slicex = slice(-0.5, -0.5 + xratio, 1j * my_xdim)
slicey = slice(-0.5, -0.5 + yratio, 1j * my_ydim)
my_map = numpy.ma.MaskedArray(numpy.zeros(self.data.shape),
mask=self.data.mask)
# Remove the MaskedArrayFutureWarning warning and keep old numpy < 1.11
# behavior
my_map.unshare_mask()
my_map[useful_chunk[0]] = ndimage.map_coordinates(
grid, numpy.mgrid[slicex, slicey],
mode='nearest', order=INTERPOLATE_ORDER)
# If the input grid was entirely masked, then the output map must
# also be masked: there's no useful data here. We don't search for
# sources on a masked background/RMS, so this data will be cleanly
# skipped by the rest of the sourcefinder
if numpy.ma.getmask(grid).all():
my_map.mask = True
elif roundup:
# In some cases, the spline interpolation may produce values
# lower than the minimum value in the map. If required, these
# can be trimmed off. No point doing this if the map is already
# fully masked, though.
my_map = numpy.ma.MaskedArray(
data=numpy.where(
my_map >= numpy.min(grid), my_map, numpy.min(grid)),
mask=my_map.mask
)
return my_map
###########################################################################
# #
# Source extraction. #
# #
# Provides for both traditional (islands-above-RMS) and FDR source #
# extraction systems. #
# #
###########################################################################
def extract(self, det, anl, noisemap=None, bgmap=None, labelled_data=None,
labels=None, deblend_nthresh=0, force_beam=False):
"""
Kick off conventional (ie, RMS island finding) source extraction.
Kwargs:
det (float): detection threshold, as a multiple of the RMS
noise. At least one pixel in a source must exceed this
for it to be regarded as significant.
anl (float): analysis threshold, as a multiple of the RMS
noise. All the pixels within the island that exceed
this will be used when fitting the source.
noisemap (numpy.ndarray):
bgmap (numpy.ndarray):
deblend_nthresh (int): number of subthresholds to use for
deblending. Set to 0 to disable.
force_beam (bool): force all extractions to have major/minor axes
equal to the restoring beam
Returns:
:class:`sourcefinder.utility.containers.ExtractionResults`
"""
if anl > det:
logger.warn(
"Analysis threshold is higher than detection threshold"
)
# If the image data is flat we may as well crash out here with a
# sensible error message, otherwise the RMS estimation code will
# crash out with a confusing error later.
if numpy.ma.max(self.data) == numpy.ma.min(self.data):
raise RuntimeError("Bad data: Image data is flat")
if (type(bgmap).__name__ == 'ndarray' or
type(bgmap).__name__ == 'MaskedArray'):
if bgmap.shape != self.backmap.shape:
raise IndexError("Background map has wrong shape")
else:
self.backmap = bgmap
if (type(noisemap).__name__ == 'ndarray' or
type(noisemap).__name__ == 'MaskedArray'):
if noisemap.shape != self.rmsmap.shape:
raise IndexError("Noisemap has wrong shape")
if noisemap.min() < 0:
raise ValueError("RMS noise cannot be negative")
else:
self.rmsmap = noisemap
if labelled_data is not None and labelled_data.shape != self.data.shape:
raise ValueError("Labelled map is wrong shape")
return self._pyse(
det * self.rmsmap, anl * self.rmsmap, deblend_nthresh, force_beam,
labelled_data=labelled_data, labels=labels
)
def reverse_se(self, det):
"""Run source extraction on the negative of this image.
Obviously, there should be no sources in the negative image, so this
tells you about the false positive rate.
We need to clear cached data -- backgroung map, cached clips, etc --
before & after doing this, as they'll interfere with the normal
extraction process. If this is regularly used, we'll want to
implement a separate cache.
"""
self.labels.clear()
self.clip.clear()
self.data_bgsubbed *= -1
results = self.extract(det=det)
self.data_bgsubbed *= -1
self.labels.clear()
self.clip.clear()
return results
def fd_extract(self, alpha, anl=None, noisemap=None,
bgmap=None, deblend_nthresh=0, force_beam=False
):
"""False Detection Rate based source extraction.
The FDR procedure guarantees that <FDR> < alpha.
See `Hopkins et al., AJ, 123, 1086 (2002)
<http://adsabs.harvard.edu/abs/2002AJ....123.1086H>`_.
"""
# The correlation length in config.py is used not only for the
# calculation of error bars with the Condon formulae, but also for
# calculating the number of independent pixels.
corlengthlong, corlengthshort = utils.calculate_correlation_lengths(
self.beam[0], self.beam[1])
C_n = (1.0 / numpy.arange(
round(0.25 * numpy.pi * corlengthlong *
corlengthshort + 1))[1:]).sum()
# Calculate the FDR threshold
# Things will go terribly wrong in the line below if the interpolated
# noise values get very close or below zero. Use INTERPOLATE_ORDER=1
# or the roundup option.
if (type(bgmap).__name__ == 'ndarray' or
type(bgmap).__name__ == 'MaskedArray'):
if bgmap.shape != self.backmap.shape:
raise IndexError("Background map has wrong shape")
else:
self.backmap = bgmap
if (type(noisemap).__name__ == 'ndarray' or
type(noisemap).__name__ == 'MaskedArray'):
if noisemap.shape != self.rmsmap.shape:
raise IndexError("Noisemap has wrong shape")
if noisemap.min() < 0:
raise ValueError("RMS noise cannot be negative")
else:
self.rmsmap = noisemap
normalized_data = self.data_bgsubbed / self.rmsmap
n1 = numpy.sqrt(2 * numpy.pi)
prob = numpy.sort(
numpy.ravel(numpy.exp(-0.5 * normalized_data ** 2) / n1))
lengthprob = float(len(prob))
compare = (alpha / C_n) * numpy.arange(lengthprob + 1)[1:] / lengthprob
# Find the last undercrossing, see, e.g., fig. 9 in Miller et al., AJ
# 122, 3492 (2001). Searchsorted is not used because the array is not
# sorted.
try:
index = (numpy.where(prob - compare < 0.)[0]).max()
except ValueError:
# Everything below threshold
return containers.ExtractionResults()
fdr_threshold = numpy.sqrt(-2.0 * numpy.log(n1 * prob[index]))
# Default we require that all source pixels are above the threshold,
# not only the peak pixel. This gives a better guarantee that indeed
# the fraction of false positives is less than fdr_alpha in config.py.
# See, e.g., Hopkins et al., AJ 123, 1086 (2002).
if not anl:
anl = fdr_threshold
return self._pyse(fdr_threshold * self.rmsmap, anl * self.rmsmap,
deblend_nthresh, force_beam)
def flux_at_pixel(self, x, y, numpix=1):
"""Return the background-subtracted flux at a certain position
in the map"""
# numpix is the number of pixels to look around the target.
# e.g. numpix = 1 means a total of 9 pixels, 1 in each direction.
return self.data_bgsubbed[y - numpix:y + numpix + 1,
x - numpix:x + numpix + 1].max()
@staticmethod
def box_slice_about_pixel(x, y, box_radius):
"""
Returns a slice centred about (x,y), of width = 2*int(box_radius) + 1
"""
ibr = int(box_radius)
x = int(x)
y = int(y)
return (slice(x - ibr, x + ibr + 1),
slice(y - ibr, y + ibr + 1))
def fit_to_point(self, x, y, boxsize, threshold, fixed):
"""Fit an elliptical Gaussian to a specified point on the image.
The fit is carried on a square section of the image, of length
*boxsize* & centred at pixel coordinates *x*, *y*. Any data
below *threshold* * rmsmap is not used for fitting. If *fixed*
is set to ``position``, then the pixel coordinates are fixed
in the fit.
Returns an instance of :class:`sourcefinder.extract.Detection`.
"""
logger.debug("Force-fitting pixel location ({},{})".format(x, y))
# First, check that x and y are actually valid semi-positive integers.
# Otherwise,
# If they are too high (positive), then indexing will fail
# BUT, if they are negative, then we get wrap-around indexing
# and the fit continues at the wrong position!
if (x < 0 or x > self.xdim
or y < 0 or y > self.ydim):
logger.warning("Dropping forced fit at ({},{}), "
"pixel position outside image".format(x, y)
)
return None
# Next, check if any of the central pixels (in a 3x3 box about the
# fitted pixel position) have been Masked
# (e.g. if NaNs, or close to image edge) - reject if so.
central_pixels_slice = ImageData.box_slice_about_pixel(x, y, 1)
if self.data.mask[central_pixels_slice].any():
logger.warning(
"Dropping forced fit at ({},{}), "
"Masked pixel in central fitting region".format(x, y))
return None
if ((
# Recent NumPy
hasattr(numpy.ma.core, "MaskedConstant") and
isinstance(self.rmsmap, numpy.ma.core.MaskedConstant)
) or (
# Old NumPy
numpy.ma.is_masked(self.rmsmap[int(x), int(y)])
)):
logger.error("Background is masked: cannot fit")
return None
chunk = ImageData.box_slice_about_pixel(x, y, boxsize / 2.0)
if threshold is not None:
# We'll mask out anything below threshold*self.rmsmap from the fit.
labels, num = self.labels.setdefault(
# Dictionary mapping threshold -> islands map
threshold,
ndimage.label(
self.clip.setdefault(
# Dictionary mapping threshold -> mask
threshold,
numpy.where(
self.data_bgsubbed > threshold * self.rmsmap, 1, 0
)
)
)
)
mylabel = labels[int(x), int(y)]
if mylabel == 0: # 'Background'
raise ValueError(
"Fit region is below specified threshold, fit aborted.")
mask = numpy.where(labels[chunk] == mylabel, 0, 1)
fitme = numpy.ma.array(self.data_bgsubbed[chunk], mask=mask)
if len(fitme.compressed()) < 1:
raise IndexError("Fit region too close to edge or too small")
else:
fitme = self.data_bgsubbed[chunk]
if fitme.size < 1:
raise IndexError("Fit region too close to edge or too small")
if not len(fitme.compressed()):
logger.error("All data is masked: cannot fit")
return None
# set argument for fixed parameters based on input string
if fixed == 'position':
fixed = {'xbar': boxsize / 2.0, 'ybar': boxsize / 2.0}
elif fixed == 'position+shape':
fixed = {'xbar': boxsize / 2.0, 'ybar': boxsize / 2.0,
'semimajor': self.beam[0],
'semiminor': self.beam[1],
'theta': self.beam[2]}
elif fixed == None:
fixed = {}
else:
raise TypeError("Unkown fixed parameter")
if threshold is not None:
threshold_at_pixel = threshold * self.rmsmap[int(x), int(y)]
else:
threshold_at_pixel = None
try:
measurement, residuals = extract.source_profile_and_errors(
fitme,
threshold_at_pixel,
self.rmsmap[int(x), int(y)],
self.beam,
fixed=fixed
)
except ValueError:
# Fit failed to converge
# Moments are not applicable when holding parameters fixed
logger.error("Gaussian fit failed at %f, %f", x, y)
return None
try:
assert (abs(measurement['xbar']) < boxsize)
assert (abs(measurement['ybar']) < boxsize)
except AssertionError:
logger.warn('Fit falls outside of box.')
measurement['xbar'] += x - boxsize / 2.0
measurement['ybar'] += y - boxsize / 2.0
measurement.sig = (fitme / self.rmsmap[chunk]).max()
return extract.Detection(measurement, self)
def fit_fixed_positions(self, positions, boxsize, threshold=None,
fixed='position+shape',
ids=None):
"""
Convenience function to fit a list of sources at the given positions
This function wraps around fit_to_point().
Args:
positions (tuple): list of (RA, Dec) tuples. Positions to be fit,
in decimal degrees.
boxsize: See :py:func:`fit_to_point`
threshold: as above.
fixed: as above.
ids (tuple): A list of identifiers. If not None, then must match
the length and order of the ``requested_fits``. Any
successfully fit positions will be returned in a tuple
along with the matching id. As these are simply passed back to
calling code they can be a string, tuple or whatever.
In particular, boxsize is in pixel coordinates as in
fit_to_point, not in sky coordinates.
Returns:
tuple: A list of successful fits.
If ``ids`` is None, returns a single list of
:class:`sourcefinder.extract.Detection` s.
Otherwise, returns a tuple of two matched lists:
([detections], [matching_ids]).
"""
if ids is not None:
assert len(ids) == len(positions)
successful_fits = []
successful_ids = []
for idx, posn in enumerate(positions):
try:
x, y, = self.wcs.s2p((posn[0], posn[1]))
except RuntimeError as e:
if (str(e).startswith("wcsp2s error: 8:") or
str(e).startswith("wcsp2s error: 9:")):
logger.warning("Input coordinates (%.2f, %.2f) invalid: ",
posn[0], posn[1])
else:
raise
else:
try:
fit_results = self.fit_to_point(x, y,
boxsize=boxsize,
threshold=threshold,
fixed=fixed)
if not fit_results:
# We were unable to get a good fit
continue
if (fit_results.ra.error == float('inf') or
fit_results.dec.error == float('inf')):
logging.warning("position errors extend outside image")
else:
successful_fits.append(fit_results)
if ids:
successful_ids.append(ids[idx])
except IndexError as e:
logger.warning("Input pixel coordinates (%.2f, %.2f) "
"could not be fit because: " + str(e),
posn[0], posn[1])
if ids:
return successful_fits, successful_ids
return successful_fits
def label_islands(self, detectionthresholdmap, analysisthresholdmap):
"""
Return a lablled array of pixels for fitting.
Args:
detectionthresholdmap (numpy.ndarray):
analysisthresholdmap (numpy.ndarray):
Returns:
list of valid islands (list of int)
labelled islands (numpy.ndarray)
"""
# If there is no usable data, we return an empty set of islands.
if not len(self.rmsmap.compressed()):
logging.warning("RMS map masked; sourcefinding skipped")
return [], numpy.zeros(self.data_bgsubbed.shape, dtype=numpy.int)
# At this point, we select all the data which is eligible for
# sourcefitting. We are actually using three separate filters, which
# exclude:
#
# 1. Anything which has been masked before we reach this point;
# 2. Any pixels which fall below the analysis threshold at that pixel
# position;
# 3. Any pixels corresponding to a position where the RMS noise is
# less than RMS_FILTER (default 0.001) times the median RMS across
# the whole image.
#
# The third filter attempts to exclude those regions of the image
# which contain no usable data; for example, the parts of the image
# falling outside the circular region produced by awimager.
RMS_FILTER = 0.001
clipped_data = numpy.ma.where(
(self.data_bgsubbed > analysisthresholdmap) &
(self.rmsmap >= (RMS_FILTER * numpy.ma.median(self.rmsmap))),
1, 0
).filled(fill_value=0)
labelled_data, num_labels = ndimage.label(clipped_data,
STRUCTURING_ELEMENT)
labels_below_det_thr, labels_above_det_thr = [], []
if num_labels > 0:
# Select the labels of the islands above the analysis threshold
# that have maximum values values above the detection threshold.
# Like above we make sure not to select anything where either
# the data or the noise map are masked.
# We fill these pixels in above_det_thr with -1 to make sure
# its labels will not be in labels_above_det_thr.
# NB data_bgsubbed, and hence above_det_thr, is a masked array;
# filled() sets all mased values equal to -1.
above_det_thr = (
self.data_bgsubbed - detectionthresholdmap
).filled(fill_value=-1)
# Note that we avoid label 0 (the background).
maximum_values = ndimage.maximum(
above_det_thr, labelled_data, numpy.arange(1, num_labels + 1)
)
# If there's only one island, ndimage.maximum will return a float,
# rather than a list. The rest of this function assumes that it's
# always a list, so we need to convert it.
if isinstance(maximum_values, float):
maximum_values = [maximum_values]
# We'll filter out the insignificant islands
for i, x in enumerate(maximum_values, 1):
if x < 0:
labels_below_det_thr.append(i)
else:
labels_above_det_thr.append(i)
# Set to zero all labelled islands that are below det_thr:
labelled_data = numpy.where(
numpy.in1d(labelled_data.ravel(), labels_above_det_thr).reshape(
labelled_data.shape),
labelled_data, 0
)
return labels_above_det_thr, labelled_data
def _pyse(
self, detectionthresholdmap, analysisthresholdmap,
deblend_nthresh, force_beam, labelled_data=None, labels=[]
):
"""
Run Python-based source extraction on this image.
Args:
detectionthresholdmap (numpy.ndarray):
analysisthresholdmap (numpy.ndarray):
deblend_nthresh (int): number of subthresholds for deblending. 0
disables.
force_beam (bool): force all extractions to have major/minor axes
equal to the restoring beam
labelled_data (numpy.ndarray): labelled island map (output of
numpy.ndimage.label()). Will be calculated automatically if not
provided.
labels (tuple): list of labels in the island map to use for
fitting.
Returns:
(..utility.containers.ExtractionResults):
This is described in detail in the "Source Extraction System" document
by John Swinbank, available from TKP svn.
"""
# Map our chunks onto a list of islands.
island_list = []
if labelled_data is None:
labels, labelled_data = self.label_islands(
detectionthresholdmap, analysisthresholdmap
)
# Get a bounding box for each island:
# NB Slices ordered by label value (1...N,)
# 'None' returned for missing label indices.
slices = ndimage.find_objects(labelled_data)
for label in labels:
chunk = slices[label - 1]
analysis_threshold = (analysisthresholdmap[chunk] /
self.rmsmap[chunk]).max()
# In selected_data only the pixels with the "correct"
# (see above) labels are retained. Other pixel values are
# set to -(bignum).
# In this way, disconnected pixels within (rectangular)
# slices around islands (particularly the large ones) do
# not affect the source measurements.
selected_data = numpy.ma.where(
labelled_data[chunk] == label,
self.data_bgsubbed[chunk].data, -extract.BIGNUM
).filled(fill_value=-extract.BIGNUM)
island_list.append(
extract.Island(
selected_data,
self.rmsmap[chunk],
chunk,
analysis_threshold,
detectionthresholdmap[chunk],
self.beam,
deblend_nthresh,
DEBLEND_MINCONT,
STRUCTURING_ELEMENT
)
)
# If required, we can save the 'left overs' from the deblending and
# fitting processes for later analysis. This needs setting up here:
if self.residuals:
self.residuals_from_gauss_fitting = numpy.zeros(self.data.shape)
self.residuals_from_deblending = numpy.zeros(self.data.shape)
for island in island_list:
self.residuals_from_deblending[island.chunk] += (
island.data.filled(fill_value=0.))
# Deblend each of the islands to its consituent parts, if necessary
if deblend_nthresh:
deblended_list = [x.deblend() for x in island_list]
# deblended_list = [x.deblend() for x in island_list]
island_list = list(utils.flatten(deblended_list))
# Set up the fixed fit parameters if 'force beam' is on:
if force_beam:
fixed = {'semimajor': self.beam[0],
'semiminor': self.beam[1],
'theta': self.beam[2]}
else:
fixed = None
# Iterate over the list of islands and measure the source in each,
# appending it to the results list.
results = containers.ExtractionResults()
for island in island_list:
fit_results = island.fit(fixed=fixed)
if fit_results:
measurement, residual = fit_results
else:
# Failed to fit; drop this island and go to the next.
continue
try:
det = extract.Detection(measurement, self, chunk=island.chunk)
if (det.ra.error == float('inf') or
det.dec.error == float('inf')):
logger.warn('Bad fit from blind extraction at pixel coords:'
'%f %f - measurement discarded'
'(increase fitting margin?)', det.x, det.y)
else:
results.append(det)
except RuntimeError as e:
logger.error("Island not processed; unphysical?")
if self.residuals:
self.residuals_from_deblending[island.chunk] -= (
island.data.filled(fill_value=0.))
self.residuals_from_gauss_fitting[island.chunk] += residual
def is_usable(det):
# Check that both ends of each axis are usable; that is, that they
# fall within an unmasked part of the image.
# The axis will not likely fall exactly on a pixel number, so
# check all the surroundings.
def check_point(x, y):
x = (int(x), int(numpy.ceil(x)))
y = (int(y), int(numpy.ceil(y)))
for position in itertools.product(x, y):
try:
if self.data.mask[position[0], position[1]]:
# Point falls in mask
return False
except IndexError:
# Point falls completely outside image
return False
# Point is ok
return True
for point in (
(det.start_smaj_x, det.start_smaj_y),
(det.start_smin_x, det.start_smin_y),
(det.end_smaj_x, det.end_smaj_y),
(det.end_smin_x, det.end_smin_y)
):
if not check_point(*point):
logger.debug("Unphysical source at pixel %f, %f" % (
det.x.value, det.y.value))
return False
return True
# Filter will return a list; ensure we return an ExtractionResults.
return containers.ExtractionResults(list(filter(is_usable, results)))
| [
"gijs@pythonic.nl"
] | gijs@pythonic.nl |
9c004b8f8bc3f269da8eea1ebaae63fe378d510a | 31cc708f7e38017073cb148f33393aed879e27bb | /blog/migrations/0001_initial.py | 0c440763cc8ed74b8afb6998860ff8c7b0306564 | [] | no_license | rkdwldnjs1/My-first-blog | 9a38820a1f9211052ff491945d7cd366a07b6783 | f43b28a2a3eb00d70b326c103e3ae311299b4210 | refs/heads/master | 2020-03-22T01:35:19.576412 | 2018-07-01T09:14:50 | 2018-07-01T09:14:50 | 139,315,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-21 19:05
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"you@example.com"
] | you@example.com |
83f869d6971a120fc931416ae7e1f6cc3824e0ea | 045cb1a5638c3575296f83471758dc09a8065725 | /addons/website_crm/models/__init__.py | 266fafb332a6569b8e611cb61511f4b2ae4a4946 | [] | no_license | marionumza/saas | 7236842b0db98d1a0d0c3c88df32d268509629cb | 148dd95d991a348ebbaff9396759a7dd1fe6e101 | refs/heads/main | 2023-03-27T14:08:57.121601 | 2021-03-20T07:59:08 | 2021-03-20T07:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | # -*- coding: utf-8 -*-
# Part of Harpiya. See LICENSE file for full copyright and licensing details.
from . import crm_lead
from . import res_config_settings
from . import website_visitor
| [
"yasir@harpiya.com"
] | yasir@harpiya.com |
c99b8bdaa5e8f2344cdc98648368b6410b06dfad | 2ff7e53d5e512cd762217ca54317982e07a2bb0c | /eve/client/script/ui/shared/messagebox.py | d97d98c5ea0fb0485ea02234e8cd716ccb54e7bb | [] | no_license | nanxijw/Clara-Pretty-One-Dick | 66d3d69426642b79e8fd4cc8e0bec23adeeca6d6 | 50de3488a2140343c364efc2615cf6e67f152be0 | refs/heads/master | 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,300 | py | #Embedded file name: eve/client/script/ui/shared\messagebox.py
import uiprimitives
import uicontrols
import uiutil
import uthread
import uicls
import carbonui.const as uiconst
import localization
class MessageBox(uicontrols.Window):
__guid__ = 'form.MessageBox'
__nonpersistvars__ = ['suppress']
default_width = 340
default_height = 210
default_alwaysLoadDefaults = True
def ApplyAttributes(self, attributes):
uicontrols.Window.ApplyAttributes(self, attributes)
self.suppress = 0
self.name = 'modal'
self.scope = 'all'
self.edit = None
self.sr.main.clipChildren = True
def Execute(self, text, title, buttons, icon, suppText, customicon = None, height = None, default = None, modal = True, okLabel = None, cancelLabel = None):
self._Execute(title, buttons, icon, suppText, customicon, height, default, modal, okLabel, cancelLabel)
if text:
text = text.replace('\r', '').replace('\n', '')
edit = uicls.EditPlainText(parent=self.sr.main, padding=const.defaultPadding, readonly=1)
self.edit = edit
uthread.new(self.SetText, text)
def ExecuteCustomContent(self, customContentCls, title, buttons, icon, suppText, customicon = None, height = None, default = None, modal = True, okLabel = None, cancelLabel = None, messageData = None):
self._Execute(title, buttons, icon, suppText, customicon, height, default, modal, okLabel, cancelLabel)
customContent = customContentCls(parent=self.sr.main, padding=const.defaultPadding, messageData=messageData, align=uiconst.TOTOP)
self.height = customContent.GetContentHeight() + 110
def _Execute(self, title, buttons, icon, suppText, customicon, height, default, modal, okLabel, cancelLabel):
if height is None:
height = 210
self.MakeUnMinimizable()
self.HideHeader()
self.SetMinSize([340, height])
self.DefineIcons(icon, customicon)
if title is None:
title = localization.GetByLabel('UI/Common/Information')
self.sr.main = uiutil.FindChild(self, 'main')
caption = uicontrols.EveCaptionLarge(text=title, align=uiconst.CENTERLEFT, parent=self.sr.topParent, left=64, width=270)
self.SetTopparentHeight(max(56, caption.textheight + 16))
self.DefineButtons(buttons, default=default, okLabel=okLabel, cancelLabel=cancelLabel)
if suppText:
self.ShowSupp(suppText)
if modal:
uicore.registry.SetFocus(self)
def ShowSupp(self, text):
bottom = uiprimitives.Container(name='suppressContainer', parent=self.sr.main, align=uiconst.TOBOTTOM, height=20, idx=0)
self.sr.suppCheckbox = uicontrols.Checkbox(text=text, parent=bottom, configName='suppress', retval=0, checked=0, groupname=None, callback=self.ChangeSupp, align=uiconst.TOPLEFT, pos=(6, 0, 320, 0))
bottom.height = max(20, self.sr.suppCheckbox.height)
def ChangeSupp(self, sender):
self.suppress = sender.checked
def SetText(self, txt):
self.edit.SetValue(txt, scrolltotop=1)
def CloseByUser(self, *etc):
if self.isModal:
self.SetModalResult(uiconst.ID_CLOSE)
else:
uicontrols.Window.CloseByUser(self)
| [
"billchang.e@gmail.com"
] | billchang.e@gmail.com |
ac325ec992292cfb4f5972a30cef9de1144e0730 | 47128c6ff1277eedf851670d33f7a288fdfe2246 | /redis database/redis_hashs.py | 972824bc49c27750abbba4a168846bf52f541d75 | [] | no_license | chati757/python-learning-space | 5de7f11a931cf95bc076473da543331b773c07fb | bc33749254d12a47523007fa9a32668b8dc12a24 | refs/heads/master | 2023-08-13T19:19:52.271788 | 2023-07-26T14:09:58 | 2023-07-26T14:09:58 | 83,208,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | import redis
redis=redis.StrictRedis(
host='localhost',
port=6379,
db=0,
password=8182757
)
'''
hash value
-hset <myhash> <field> <hashvalue>
-hget <myhash> <field>
-hmset <myhash> <field1> <hashvalue> <field2> <hashvalue2> ..
-hmget <myhash> <field1> <field2> ..
-hgetall <myhash>
'''
#hset
print("hset")
redis.hset("myhash","myfield","hashvalue")
#hget
print("hget")
test=redis.hget("myhash","myfield")
print(test)
#hmset
print("hmset")
redis.hmset("myhash2",{"test1":"test1","test2":"test2"})
#hmget
print("hmget")
test2=redis.hmget("myhash2",{"test1","test2"})
print(test2)
| [
"chati757@users.noreply.github.com"
] | chati757@users.noreply.github.com |
790f67a27f41c7f4456c418cf0e03464a2005369 | e4ee9f2ca60b60ea9fa1b05c982594a2c1b10484 | /day61 课上笔记以及代码 django orm跨表查询/代码/manytable/app01/models.py | 17295b515cdc4aa9e96bd4ea8605780ea96f0da5 | [] | no_license | tianshang486/Pythonlaonanhai | 100df2cc437aad1ee1baf45bdfc4500b1302092b | 2a5b46986f5ca684b2ae350596e293db54e1e2f4 | refs/heads/master | 2022-09-19T02:16:56.972160 | 2020-06-04T09:24:30 | 2020-06-04T09:24:30 | 269,314,860 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | from django.db import models
# Create your models here.
from django.db import models
# Create your models here.
#作者表
class Author(models.Model): #比较常用的信息放到这个表里面
name=models.CharField( max_length=32)
age=models.IntegerField()
# authorDetail=models.OneToOneField(to="AuthorDetail",to_field="nid",on_delete=models.CASCADE)
authorDetail=models.OneToOneField(to='AuthorDetail') #一对一到AuthorDetail表 生成为表字段之后,会自动变为authorDetail_id这样有一个名称
# 外键字段 -- 外键字段名_id
# foreign+unique
def __str__(self):
return self.name
#作者详细信息表
class AuthorDetail(models.Model):
birthday=models.DateField()
# telephone=models.BigIntegerField()
telephone=models.CharField(max_length=32)
addr=models.CharField( max_length=64)
def __str__(self):
return self.addr
#出版社表 和 书籍表 是 一对多的关系
class Publish(models.Model):
name=models.CharField( max_length=32)
city=models.CharField( max_length=32)
email=models.EmailField() #charfield -- asdfasdf
def __str__(self):
return self.name
#书籍表
class Book(models.Model):
nid = models.AutoField(primary_key=True)
title = models.CharField( max_length=32)
publishDate=models.DateField()
price=models.DecimalField(max_digits=5,decimal_places=2) #decimal(16,2)
publishs=models.ForeignKey(to="Publish",related_name='xx')
authors=models.ManyToManyField(to='Author')
def __str__(self):
return self.title
# class BookToAuthor(models.Model):
# book_id = models.ForeignKey(to='Book')
# author_id = models.ForeignKey(to='Author')
# # xx = models.CharField(max_length=12)
| [
"tianshang486@.com"
] | tianshang486@.com |
d9883c55f4417a71358c9968e50ee92c86351fe8 | 1131148b4a368c959893c0b3115ac35735dde195 | /setup.py | 3659bddd44d06334873087da61d2d03bbf94b10d | [
"MIT"
] | permissive | radical-cybertools/radical.repex | d6db7251b2edfa4783113a217204f38c122dc91a | 46b5afa6b656f7477de64e29d4e10e015095f91b | refs/heads/master | 2023-04-30T04:39:27.796788 | 2022-08-15T11:00:36 | 2022-08-15T11:00:36 | 204,073,881 | 0 | 0 | MIT | 2021-11-23T18:01:42 | 2019-08-23T21:44:53 | Python | UTF-8 | Python | false | false | 10,450 | py | #!/usr/bin/env python3
__author__ = 'RADICAL-Cybertools Team'
__email__ = 'info@radical-cybertools.org'
__copyright__ = 'Copyright 2013-22, The RADICAL-Cybertools Team'
__license__ = 'MIT'
''' Setup script, only usable via pip. '''
import re
import os
import sys
import glob
import shutil
import subprocess as sp
from setuptools import setup, Command, find_namespace_packages
# ------------------------------------------------------------------------------
name = 'radical.repex'
mod_root = 'src/radical/repex/'
# ------------------------------------------------------------------------------
#
# pip warning:
# "In-tree builds are now default. pip 22.1 will enforce this behaviour change.
# A possible replacement is to remove the --use-feature=in-tree-build flag."
#
# With this change we need to make sure to clean out all temporary files from
# the src tree. Specifically create (and thus need to clean)
# - VERSION
# - SDIST
# - the sdist file itself (a tarball)
#
# `pip install` (or any other direct or indirect invocation of `setup.py`) will
# in fact run `setup.py` multiple times: one on the top level, and internally
# again with other arguments to build sdist and bwheel packages. We must *not*
# clean out temporary files in those internal runs as that would invalidate the
# install.
#
# We thus introduce an env variable `SDIST_LEVEL` which allows us to separate
# internal calls from the top level invocation - we only clean on the latter
# (see end of this file).
sdist_level = int(os.environ.get('SDIST_LEVEL', 0))
os.environ['SDIST_LEVEL'] = str(sdist_level + 1)
# ------------------------------------------------------------------------------
#
def sh_callout(cmd):
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
stdout, stderr = p.communicate()
ret = p.returncode
return stdout, stderr, ret
# ------------------------------------------------------------------------------
#
# versioning mechanism:
#
# - version: 1.2.3 - is used for installation
# - version_detail: v1.2.3-9-g0684b06 - is used for debugging
# - version is read from VERSION file in src_root, which then is copied to
# module dir, and is getting installed from there.
# - version_detail is derived from the git tag, and only available when
# installed from git. That is stored in mod_root/VERSION in the install
# tree.
# - The VERSION file is used to provide the runtime version information.
#
def get_version(_mod_root):
'''
a VERSION file containes the version strings is created in mod_root,
during installation. That file is used at runtime to get the version
information.
'''
try:
_version_base = None
_version_detail = None
_sdist_name = None
# get version from './VERSION'
src_root = os.path.dirname(__file__)
if not src_root:
src_root = '.'
with open(src_root + '/VERSION', 'r', encoding='utf-8') as f:
_version_base = f.readline().strip()
# attempt to get version detail information from git
# We only do that though if we are in a repo root dir,
# ie. if 'git rev-parse --show-prefix' returns an empty string --
# otherwise we get confused if the ve lives beneath another repository,
# and the pip version used uses an install tmp dir in the ve space
# instead of /tmp (which seems to happen with some pip/setuptools
# versions).
out, _, ret = sh_callout(
'cd %s ; '
'test -z `git rev-parse --show-prefix` || exit -1; '
'tag=`git describe --tags --always` 2>/dev/null ; '
'branch=`git branch | grep -e "^*" | cut -f 2- -d " "` 2>/dev/null ; '
'echo $tag@$branch' % src_root)
_version_detail = out.strip()
_version_detail = _version_detail.decode()
_version_detail = _version_detail.replace('detached from ', 'detached-')
# remove all non-alphanumeric (and then some) chars
_version_detail = re.sub('[/ ]+', '-', _version_detail)
_version_detail = re.sub('[^a-zA-Z0-9_+@.-]+', '', _version_detail)
if ret != 0 or \
_version_detail == '@' or \
'git-error' in _version_detail or \
'not-a-git-repo' in _version_detail or \
'not-found' in _version_detail or \
'fatal' in _version_detail :
_version = _version_base
elif '@' not in _version_base:
_version = '%s-%s' % (_version_base, _version_detail)
else:
_version = _version_base
# make sure the version files exist for the runtime version inspection
_path = '%s/%s' % (src_root, _mod_root)
with open(_path + '/VERSION', 'w', encoding='utf-8') as f:
f.write(_version_base + '\n')
f.write(_version + '\n')
_sdist_name = '%s-%s.tar.gz' % (name, _version_base)
# _sdist_name = _sdist_name.replace('/', '-')
# _sdist_name = _sdist_name.replace('@', '-')
# _sdist_name = _sdist_name.replace('#', '-')
# _sdist_name = _sdist_name.replace('_', '-')
if '--record' in sys.argv or \
'bdist_egg' in sys.argv or \
'bdist_wheel' in sys.argv :
# pip install stage 2 or easy_install stage 1
#
# pip install will untar the sdist in a tmp tree. In that tmp
# tree, we won't be able to derive git version tags -- so we pack
# the formerly derived version as ./VERSION
shutil.move('VERSION', 'VERSION.bak') # backup
shutil.copy('%s/VERSION' % _path, 'VERSION') # version to use
os.system ('python3 setup.py sdist') # build sdist
shutil.copy('dist/%s' % _sdist_name,
'%s/%s' % (_mod_root, _sdist_name)) # copy into tree
shutil.move('VERSION.bak', 'VERSION') # restore version
with open(_path + '/SDIST', 'w', encoding='utf-8') as f:
f.write(_sdist_name + '\n')
return _version_base, _version_detail, _sdist_name, _path
except Exception as e:
raise RuntimeError('Could not extract/set version: %s' % e) from e
# ------------------------------------------------------------------------------
# get version info -- this will create VERSION and srcroot/VERSION
version, version_detail, sdist_name, path = get_version(mod_root)
# ------------------------------------------------------------------------------
# check python version, should be >= 3.6
if sys.hexversion < 0x03060000:
raise RuntimeError('ERROR: %s requires Python 3.6 or newer' % name)
# ------------------------------------------------------------------------------
#
class RunTwine(Command):
user_options = []
def initialize_options(self): pass
def finalize_options(self): pass
def run(self):
_, _, ret = sh_callout('python3 setup.py sdist upload -r pypi')
raise SystemExit(ret)
# ------------------------------------------------------------------------------
#
# This copies the contents like examples/ dir under sys.prefix/share/$name
# It needs the MANIFEST.in entries to work.
base = 'share/%s' % name
df = [('%s/' % base, glob.glob('examples/*.{py,cfg}'))]
# ------------------------------------------------------------------------------
#
setup_args = {
'name' : name,
'namespace_packages' : ['radical'],
'version' : version,
'description' : 'RADICAL Replica Exchange Framework.',
'author' : 'RADICAL Group at Rutgers University',
'author_email' : 'radical@rutgers.edu',
'maintainer' : 'The RADICAL Group',
'maintainer_email' : 'radical@rutgers.edu',
'url' : 'https://www.github.com/radical-cybertools/radical.repex/',
'license' : 'MIT',
'keywords' : 'radical distributed computing',
'python_requires' : '>=3.6',
'classifiers' : [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
'Topic :: System :: Distributed Computing',
'Topic :: Scientific/Engineering',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Unix'
],
'packages' : find_namespace_packages('src', include=['radical.*']),
'package_dir' : {'': 'src'},
'scripts' : ['bin/radical-repex-version',
'bin/radical-repex'],
'package_data' : {'': ['*.txt', '*.sh', '*.json', '*.gz', '*.c',
'*.md', 'VERSION', 'SDIST', sdist_name]},
# 'setup_requires' : ['pytest-runner'],
'install_requires' : ['radical.utils>=1.12',
'radical.entk>=1.12'],
'tests_require' : ['pytest',
'pylint',
'flake8',
'coverage',
'mock==2.0.0.',
],
'test_suite' : '%s.tests' % name,
'zip_safe' : False,
# 'build_sphinx' : {
# 'source-dir' : 'docs/',
# 'build-dir' : 'docs/build',
# 'all_files' : 1,
# },
# 'upload_sphinx' : {
# 'upload-dir' : 'docs/build/html',
# },
# This copies the contents of the examples/ dir under
# sys.prefix/share/$name
# It needs the MANIFEST.in entries to work.
'data_files' : df,
'cmdclass' : {'upload': RunTwine},
}
# ------------------------------------------------------------------------------
#
setup(**setup_args)
# ------------------------------------------------------------------------------
# clean temporary files from source tree
if sdist_level == 0:
os.system('rm -vrf src/%s.egg-info' % name)
os.system('rm -vf %s/%s' % (path, sdist_name))
os.system('rm -vf %s/VERSION' % path)
os.system('rm -vf %s/SDIST' % path)
| [
"andre@merzky.net"
] | andre@merzky.net |
fd4fd9e65dc6ee6914dcc793301d1b035fea57b1 | 3da6b8a0c049a403374e787149d9523012a1f0fc | /Coder_Old/早期代码/test6.py | 73386cf5c9237f095c66d194c9e0dbf3b2a33fb8 | [] | no_license | AndersonHJB/PyCharm_Coder | d65250d943e84b523f022f65ef74b13e7c5bc348 | 32f2866f68cc3a391795247d6aba69a7156e6196 | refs/heads/master | 2022-07-25T11:43:58.057376 | 2021-08-03T02:50:01 | 2021-08-03T02:50:01 | 348,922,058 | 3 | 3 | null | 2021-09-05T02:20:10 | 2021-03-18T02:57:16 | Python | UTF-8 | Python | false | false | 363 | py | user_answer_correct = False #False
while not user_answer_correct: #True
user_gender = input("请输入你的性别(F/M):")
if user_gender == 'F':
print("你是萌妹子!")
user_answer_correct = True
elif user_gender == 'M':
print("你是糙汉子!")
user_answer_correct = True
else:
print("输入不正确,请输入'F'或'M'")
| [
"1432803776@qq.com"
] | 1432803776@qq.com |
79f773c3ae16399c5f62a3cf1fb0ce58800a4bfe | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_7/gdljam001/question1.py | 82f4d81b71c8d1fa705a32a941e2345642497547 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | """Enter strings delete duplicates print
James Godlonton
29 April 2014"""
def main():#Main function get input of strings and print unique ones
print("Enter strings (end with DONE):\n")
count=0
words=[]#array is used to keep order
newWord=input("")#get first string
#append to array
while(newWord!="DONE"):#while "DONE" sentinal not entered get new word
if newWord not in words:#if new word not done and not already in the words array add it to array
words.append(newWord)
newWord=input("")
print("Unique list:")
for wrd in words:#Cycle through array printing words in correct order
print(wrd)
if __name__=="__main__":#Running main
main() | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
89407569d51f23f7788a8c25ad587b13bfa7d953 | 68a52ad1df836c9f6d922515b2f896b6928ce6a0 | /SafetyProductionSystem/systemsettings/migrations/0003_auto_20190225_1623.py | 899014ddee8e156d68343a14982871a4e35b7333 | [] | no_license | Chuazhen0/SafetyProductionSystem | 1141f845e04b032ff2a230c8def26066f061600c | 442d5df3818d43aebb9830f2456c73018aae2acf | refs/heads/master | 2020-05-20T12:47:46.365020 | 2019-05-08T09:56:01 | 2019-05-08T09:56:01 | 185,579,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | # Generated by Django 2.0.5 on 2019-02-25 16:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('systemsettings', '0002_supervisiontype_form_use'),
]
operations = [
migrations.AddField(
model_name='kks',
name='KKs_organid',
field=models.CharField(blank=True, default='', max_length=30, null=True, verbose_name='KKS电厂接口id'),
),
migrations.AlterField(
model_name='kks',
name='KKS_code',
field=models.CharField(blank=True, default='', max_length=30, null=True, verbose_name='KKS编码'),
),
migrations.AlterField(
model_name='kks',
name='KKS_codename',
field=models.TextField(blank=True, default='', null=True, verbose_name='KKS编码名称'),
),
]
| [
"Caohuazhenrn@163.com"
] | Caohuazhenrn@163.com |
8e718c6efb993cb859b4413f341342e7098c2c60 | 01062df369907a6ff4367ad006d9be75f3b0886f | /zvt/recorders/eastmoney/dividend_financing/__init__.py | 49e6bf4757df6edae05a9e784d6be28b8a1da5cc | [
"MIT"
] | permissive | scanfyu/zvt | 917d8dd6df63fd3d55183896710573700f615a0e | 2ff38155bd85fb0945a7b45cad8dbdf2f175a3d5 | refs/heads/master | 2021-12-01T13:40:00.241766 | 2021-11-26T16:04:04 | 2021-11-26T16:04:04 | 186,553,012 | 0 | 0 | MIT | 2020-03-01T02:33:08 | 2019-05-14T05:46:40 | Python | UTF-8 | Python | false | false | 1,256 | py | # -*- coding: utf-8 -*-
# the __all__ is generated
__all__ = []
# __init__.py structure:
# common code of the package
# export interface in __all__ which contains __all__ of its sub modules
# import all from submodule eastmoney_dividend_detail_recorder
from .eastmoney_dividend_detail_recorder import *
from .eastmoney_dividend_detail_recorder import __all__ as _eastmoney_dividend_detail_recorder_all
__all__ += _eastmoney_dividend_detail_recorder_all
# import all from submodule eastmoney_dividend_financing_recorder
from .eastmoney_dividend_financing_recorder import *
from .eastmoney_dividend_financing_recorder import __all__ as _eastmoney_dividend_financing_recorder_all
__all__ += _eastmoney_dividend_financing_recorder_all
# import all from submodule eastmoney_rights_issue_detail_recorder
from .eastmoney_rights_issue_detail_recorder import *
from .eastmoney_rights_issue_detail_recorder import __all__ as _eastmoney_rights_issue_detail_recorder_all
__all__ += _eastmoney_rights_issue_detail_recorder_all
# import all from submodule eastmoney_spo_detail_recorder
from .eastmoney_spo_detail_recorder import *
from .eastmoney_spo_detail_recorder import __all__ as _eastmoney_spo_detail_recorder_all
__all__ += _eastmoney_spo_detail_recorder_all | [
"5533061@qq.com"
] | 5533061@qq.com |
c8495695dfc3158a39b3fd5002042159feee657f | 74b12c96a73d464e3ca3241ae83a0b6fe984b913 | /python/tvm/ir/memory_pools.py | 6fa6bb41280ee5b88cdca8431f31b4201bd621c5 | [
"Apache-2.0",
"BSD-3-Clause",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] | permissive | masahi/tvm | cf765bb892655f02135e1ce3afde88698f026483 | c400f7e871214451b75f20f4879992becfe5e3a4 | refs/heads/master | 2023-08-22T20:46:25.795382 | 2022-04-13T08:47:10 | 2022-04-13T08:47:10 | 138,661,036 | 4 | 2 | Apache-2.0 | 2021-09-03T20:35:19 | 2018-06-25T23:39:51 | Python | UTF-8 | Python | false | false | 4,811 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Objects for Memory Pools to be used within the compilation"""
from typing import Optional, List
from tvm._ffi import register_object
from tvm.runtime import Object
from . import _ffi_api
@register_object("ir.PoolInfo")
class PoolInfo(Object):
"""PoolInfo object holds information related to memory pools
where the statically sized allocate nodes will pooled into.
Parameters
----------
pool_name : str
The name of the memory pool
target_access : Dict[Target, str]
A dictionary where keys describe which targets could
access the pool where value could take the values :
a) "rw" : read-write access
b) "ro" : write-only acesss
size_hint_bytes : Optional[int]
The expected size hint to be used by the allocator.
The default value would be -1 which means the pool
is not size restricted.
clock_frequency_hz : Optional[int]
The clock frequency that the memory pool runs at in Hz.
If not specified/known, this will default to -1 indicating
it hasn't been defined.
read_bandwidth_bytes_per_cycle : Optional[int]
The read bandwidth of the memory pool in bytes/cycle.
If not specified/known, this will default to -1 indicating
it hasn't been defined.
write_bandwidth_bytes_per_cycle : Optional[int]
The write bandwidth of the memory pool in bytes/cycle.
If not specified/known, this will default to -1 indicating
it hasn't been defined.
read_latency_cycles : Optional[int]
The read latency of the memory pool in cycles.
If not specified/known, this will default to 0.
write_latency_cycles : Optional[int]
The write latency of the memory pool in cycles.
If not specified/known, this will default to 0.
target_burst_bytes : Optional[Union[Dict[Target, int], None]]
The burst length of the memory pool in bytes per target.
If not specified/known for a given target, a burst length
of 1 byte will be assumed.
"""
# The string parameter to indicate read and write access to a pool
# This needs to be kept in sync with kTargetPoolReadWriteAccess in
# include/tvm/ir/memory_pools.h
READ_WRITE_ACCESS = "rw"
# The string parameter to indicate read only access to a pool
# This needs to be kept in sync with kTargetPoolReadOnlyAccess in
# include/tvm/ir/memory_pools.h
READ_ONLY_ACCESS = "ro"
def __init__(
self,
pool_name: str,
target_access, # Dict[Target, str]
size_hint_bytes: Optional[int] = -1,
clock_frequency_hz: Optional[int] = -1,
read_bandwidth_bytes_per_cycle: Optional[int] = -1,
write_bandwidth_bytes_per_cycle: Optional[int] = -1,
read_latency_cycles: Optional[int] = 0,
write_latency_cycles: Optional[int] = 0,
target_burst_bytes=None, # Optional[Union[Dict[target.Target, int], None]]
):
if not target_burst_bytes:
target_burst_bytes = dict()
self.__init_handle_by_constructor__(
_ffi_api.PoolInfo, # type: ignore # pylint: disable=no-member
pool_name,
target_access,
size_hint_bytes,
clock_frequency_hz,
read_bandwidth_bytes_per_cycle,
write_bandwidth_bytes_per_cycle,
read_latency_cycles,
write_latency_cycles,
target_burst_bytes,
)
@register_object("ir.WorkspaceMemoryPools")
class WorkspaceMemoryPools(Object):
"""This object contains a list of PoolInfo objects to be used as
workspace memory in the compilation
Parameters
----------
pools : List[PoolInfo]
The list of PoolInfo objects to be used with the compilation
"""
def __init__(
self,
pools: List[PoolInfo],
):
self.__init_handle_by_constructor__(
_ffi_api.WorkspaceMemoryPools, pools # type: ignore # pylint: disable=no-member
)
| [
"noreply@github.com"
] | masahi.noreply@github.com |
272841adc8391f599019c7c4e827f9a9d32e39c6 | bb5d587afdf7fb455972889b1453b48371b55c25 | /my_projects/social_project/social_profile/urls.py | 5ae5e9d4a2bf24fbbd6c72a76988e482e20487cd | [] | no_license | nilldiggonto/projects_dj3_vue3_js | e8a98019c1e5ec65724c09733054afbacfb22ead | 6ce52c29c3560a25ed36ba074fc6c2a60191ebe4 | refs/heads/main | 2023-05-30T06:00:06.558789 | 2021-05-29T10:06:02 | 2021-05-29T10:06:02 | 342,195,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | from django.urls import path
from .views import socialprofileView,follow_userView,unfollow_userView,followerView,followView,edit_avatarView
urlpatterns = [
path('avatar/',edit_avatarView,name='social-edit-avatar'),
path('<str:username>',socialprofileView,name='social-profile'),
path('<str:username>/follow/',follow_userView,name='social-user-profile-follow'),
path('<str:username>/unfollow/',unfollow_userView,name='social-user-profile-unfollow'),
path('<str:username>/follower/list/',followerView,name='social-follower-list'),
path('<str:username>/follows/list/',followView,name='social-follow-list'),
] | [
"nilldiggonto@gmail.com"
] | nilldiggonto@gmail.com |
668ee56d3db6869a5a34ef2f597168e88eb72fe4 | 337522cddf5931972932a870e24905567889a49f | /example7.py | e772a6cd235c28c6f95c89d68e7929327932a8dd | [] | no_license | taichi-dev/voxel-challenge | 3d7552d7029328e9c28e2665c06e1b23a15bcc30 | 94c28c8c37d6ff1547daf47becf9731be41dc2e5 | refs/heads/main | 2023-07-10T06:59:02.694455 | 2022-08-04T03:06:53 | 2022-08-04T03:06:53 | 481,891,435 | 186 | 45 | null | 2022-08-05T16:09:43 | 2022-04-15T08:29:35 | Python | UTF-8 | Python | false | false | 5,837 | py | from scene import Scene; import taichi as ti; from taichi.math import *
day = True; manual_seed = 77
scene = Scene(voxel_edges=0, exposure=2 - day)
scene.set_floor(-0.05, (1.0, 1.0, 1.0))
scene.set_background_color((0.9, 0.98, 1) if day else (0.01, 0.01, 0.02))
scene.set_directional_light((1, 1, 1), 0.1, (0.9, 0.98, 1) if day else (0.01, 0.01, 0.02))
lgrid, ngrid = 15, 8
@ti.func
def rand(i, j): return fract(ti.sin(dot(vec2(i, j), vec2(12.9898, 78.233))) * 43758.5453)
@ti.func
def is_road(i, j):
return 0 <= i < ngrid and 0 <= j <= ngrid and scene.get_voxel(vec3(i, -8, j))[0] == 1
@ti.kernel
def initialize():
for i, j in ti.ndrange(8, 8): scene.set_voxel(ivec3(i, -8, j), 0, vec3(0))
start, end = 1+int(vec2(ti.random(),ti.random())*(ngrid-2)), 1+int(vec2(ti.random(),ti.random())*(ngrid-2))
turn = start + 1
while any((abs(turn-start)==1)|(abs(turn-end)==1)): turn = 1+int(vec2(ti.random(),ti.random())*(ngrid-2))
for k in ti.static([0, 1]):
d = vec2(k, 1-k); p = start[k]*vec2(1-k, k)-d
while p[1-k] < ngrid - 1:
p += d; scene.set_voxel(ivec3(p.x, -8, p.y), 1, vec3(0.5))
if p[1-k] == turn[1-k]: d = (1 if start[k] < end[k] else -1) * vec2(1-k, k)
if p[k] == end[k]: d = vec2(k, 1-k)
@ti.func
def build_road(X, uv, d):
if d.sum() <= 2:
if ((d.x | d.z) ^ (d.y | d.w)) & 1: uv = vec2(uv.y, uv.x) if (d.y | d.w) & 1 else uv
else: # curve
while d.z == 0 or d.w == 0: d = d.yzwx; uv=vec2(14-uv.y, uv.x)
uv = vec2(uv.norm(), ti.atan2(uv.x, uv.y)*2/pi*lgrid)
elif d.sum() >= 3: # junction
while d.sum() == 3 and d.y != 0: d = d.yzwx; uv=vec2(14-uv.y, uv.x) # rotate T-junction
if d.sum() > 3 or uv.x <= 7:
uv = vec2(mix(14-uv.x, uv.x, uv.x <= 7), mix(14-uv.y, uv.y, uv.y <= 7))
uv = vec2(uv.norm(), ti.atan2(uv.x, uv.y)*2/pi*lgrid)
scene.set_voxel(vec3(X.x, 0, X.y), 1, vec3(1 if uv.x==7 and 4<uv.y<12 else 0.5)) # pavement
if uv.x <= 1 or uv.x >= 13: scene.set_voxel(ivec3(X.x, 1, X.y), 1, vec3(0.7, 0.65, 0.6)) # sidewalk
if uv.y == 7 and (uv.x == 1 or uv.x == 13): # lights
for i in range(2, 9): scene.set_voxel(vec3(X.x, i, X.y), 1, vec3(0.6, 0.6, 0.6))
if uv.y == 7 and (1<=uv.x<=2 or 12<=uv.x<=13): scene.set_voxel(vec3(X.x, 8, X.y), 1, vec3(0.6, 0.6, 0.6))
if uv.y == 7 and (uv.x == 2 or uv.x == 12): scene.set_voxel(vec3(X.x, 7, X.y), 2, vec3(1, 1, 0.6))
@ti.func
def build_building(X, uv, d, r):
while d.sum() > 0 and d.z == 0: d = d.yzwx; uv=vec2(14-uv.y, uv.x) # rotate
fl = int(3 + 10 * r); style = rand(r, 5)
wall = vec3(rand(r, 1),rand(r, 2),rand(r, 2)) * 0.2+0.4
wall2 = mix(vec3(rand(r, 9)*0.2+0.2), wall, style > 0.5 and rand(r, 4) < 0.4)
maxdist = max(abs(uv.x - 7), abs(uv.y - 7))
for i in range(2, fl * 4):
light = mix(vec3(0.25,0.35,0.38), vec3(0.7,0.7,0.6), rand(rand(X.x, X.y), i//2)>0.6)
if maxdist < 6:
scene.set_voxel(vec3(X.x, i, X.y), mix(1, 0, i%4<2), mix(wall2, light, i%4<2))
if (uv.x == 2 or uv.x == 12) and (uv.y == 2 or uv.y == 12) or style>0.5 and (uv.x%3==1 or uv.y%3==1):
scene.set_voxel(vec3(X.x, i, X.y), 1, wall)
if maxdist < 5: scene.set_voxel(vec3(X.x, i, X.y), mix(1, 2, i%4<2), mix(wall, light, i%4<2))
if maxdist == 5:
for i in range(fl*4, fl*4+2): scene.set_voxel(vec3(X.x, i, X.y), 1, wall) # roof
if maxdist < 5: scene.set_voxel(vec3(X.x, fl*4, X.y), 1, vec3(rand(r, 7)*0.2+0.4))
for i in range(2): scene.set_voxel(vec3(X.x, i, X.y), 1, vec3(0.7, 0.65, 0.6)) # sidewalk
if fl > 10 and uv.x == 6 and uv.y == 6: # antenna
for i in range(fl+1):
scene.set_voxel(vec3(X.x, fl*5-i, X.y), mix(1, 2, i==0), mix(vec3(0.6), vec3(0.8,0,0), i==0))
if d.sum() > 0 and uv.y == 2 and 4 < uv.x < 10: # billboard
for i in range(5, 7):
scene.set_voxel(vec3(X.x,i,X.y), 2, vec3(int(r*3)==0,int(r*3)==1,int(r*3)==2)*(0.2+ti.random()*0.3))
for i in range(2, 5): scene.set_voxel(vec3(X.x,i,X.y), 0, vec3(0))
if d.sum() > 0 and uv.y == 3 and 4 < uv.x < 10:
for i in range(2, 5): scene.set_voxel(vec3(X.x,i,X.y), 1, vec3(0.7,0.7,0.6))
if max(abs(uv.x - rand(r, 8)*7-4), abs(uv.y - rand(r, 10)*7-4)) < 1.5: # HVAC
for i in range(fl*4+1, fl*4+3): scene.set_voxel(vec3(X.x, i, X.y), 1, vec3(0.6))
@ti.func
def build_park(X, uv, d, r):
center, height = int(vec2(rand(r, 1) * 7 + 4, rand(r, 2) * 7 + 4)), 9 + int(rand(r, 3)) * 5
for i in range(height + 3): # tree
if (uv - center).norm() < 1:
scene.set_voxel(vec3(X.x, i, X.y), 1, vec3(0.36, 0.18, 0.06))
if i > min(height-4, (height+5)//2) and (uv - center).norm() < (height+3-i) * (rand(r, 4)*0.6 + 0.4):
scene.set_voxel(vec3(X.x, i, X.y), ti.random()<0.8, vec3(0.1, 0.3 + ti.random()*0.2, 0.1))
h = 2 * ti.sin((uv.x**2+uv.y**2+rand(r, 0)**2*256)/1024 * 2*pi) + 2 + (ti.random() > 0.95)
for i in range(int(h)): # grass
scene.set_voxel(vec3(X.x, i, X.y), 1, vec3(0.2, 0.5 + ti.random() * 0.2, 0.05))
if max(abs(uv.x - rand(r, 4)*7-4), abs(uv.y - rand(r, 5)*7-4)) < 0.5: # light
for i in range(3):
scene.set_voxel(vec3(X.x, h+i, X.y), 1+(i==1), mix(vec3(0.2),vec3(0.9,0.8,0.6),vec3(i==1)))
@ti.kernel
def draw():
for X in ti.grouped(ti.ndrange((-60, 60), (-60, 60))):
I, uv = (X+60) // lgrid, float((X + 60) % lgrid)
d = int(vec4(is_road(I.x,I.y+1),is_road(I.x+1,I.y),is_road(I.x,I.y-1),is_road(I.x-1,I.y)))
r = mix(rand(I.x, I.y), any(d>0), 0.4)
if is_road(I.x, I.y): build_road(X, uv, d)
elif r > 0.5: build_building(X, uv, d, 2*r-1)
else: build_park(X, uv, d, 2*r)
[initialize() for _ in range(manual_seed + 1)]; draw(); scene.finish()
| [
"noreply@github.com"
] | taichi-dev.noreply@github.com |
e15565680e10de69563972677f3682cd3d1140ce | aff608f59d1c9ecee2ebca0ac63e0e1775502858 | /sitegeist/cache.py | 0244a7db751c7415809f9af64171871519dea17c | [] | permissive | parthkhatri1998/sitegeist | 45a5010dad09fabd98be59a9fd0dc18289749ba7 | 231b18dfb3a5a0fce32c1c5e01227dcf9bb18010 | refs/heads/master | 2022-12-23T01:37:37.721753 | 2020-09-30T16:26:15 | 2020-09-30T16:26:15 | 299,981,485 | 0 | 0 | BSD-3-Clause | 2020-09-30T16:25:45 | 2020-09-30T16:25:45 | null | UTF-8 | Python | false | false | 1,509 | py | import datetime
import logging
from django.conf import settings
from sitegeist.mongo import db
logger = logging.getLogger(__name__)
class CoordinateCache(object):
def __init__(self, namespace, snap_radius=None):
self.namespace = namespace
self.snap_radius = snap_radius or settings.SITEGEIST['GEO_SNAP_RADIUS']
def get(self, latlon):
if not settings.SITEGEIST.get('COORDCACHE', False):
return
latlon = [float(p) for p in latlon]
spec = {
"namespace": self.namespace,
"geo": {
"$maxDistance": self.snap_radius,
"$near": latlon
}
}
doc = db.coordcache.find_one(spec)
if doc:
now = datetime.datetime.utcnow()
expsecs = settings.SITEGEIST['COORDCACHE_EXPIRATION'] * 60
exptime = doc['timestamp'] + datetime.timedelta(seconds=expsecs)
logger.debug("Comparing cache expiration %s to now %s" % (exptime, now))
if exptime > now:
logger.debug("Cache is valid")
return doc['data']
else:
logger.debug("Cache is invalid, deleting document")
db.coordcache.remove(doc["_id"])
def set(self, latlon, data):
doc = {
'geo': latlon,
'namespace': self.namespace,
'data': data,
'timestamp': datetime.datetime.utcnow(),
}
db.coordcache.insert(doc)
| [
"jcarbaugh@gmail.com"
] | jcarbaugh@gmail.com |
c679014ccedb4e34ff298ce78464a417862421b3 | fe0017ae33385d7a2857d0aa39fa8861b40c8a88 | /env/lib/python3.8/site-packages/pandas/io/formats/format.py | e1e5a003b341a42dcec5a1b1b3a9e2d821e9838d | [] | no_license | enriquemoncerrat/frasesback | eec60cc7f078f9d24d155713ca8aa86f401c61bf | e2c77f839c77f54e08a2f0930880cf423e66165b | refs/heads/main | 2023-01-03T23:21:05.968846 | 2020-10-18T21:20:27 | 2020-10-18T21:20:27 | 305,198,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64,892 | py | """
Internal module for formatting output data in csv, html,
and latex files. This module also applies to display formatting.
"""
import decimal
import re
from contextlib import contextmanager
from csv import QUOTE_NONE, QUOTE_NONNUMERIC
from datetime import tzinfo
from functools import partial
from io import StringIO
from shutil import get_terminal_size
from typing import (
IO,
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from unicodedata import east_asian_width
import math
import numpy as np
import pandas.core.common as com
from pandas._config.config import get_option, set_option
from pandas._libs import lib
from pandas._libs.missing import NA
from pandas._libs.tslib import format_array_from_datetime
from pandas._libs.tslibs import NaT, Timedelta, Timestamp, iNaT
from pandas._libs.tslibs.nattype import NaTType
from pandas._typing import FilePathOrBuffer, Label
from pandas.core.arrays.datetimes import DatetimeArray
from pandas.core.arrays.timedeltas import TimedeltaArray
from pandas.core.base import PandasObject
from pandas.core.construction import extract_array
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_numeric_dtype,
is_scalar,
is_timedelta64_dtype,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.indexes.api import Index, MultiIndex, PeriodIndex, ensure_index
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.errors import AbstractMethodError
from pandas.io.common import stringify_path
from pandas.io.formats.printing import adjoin, justify, pprint_thing
if TYPE_CHECKING:
from pandas import Categorical, DataFrame, Series
FormattersType = Union[
List[Callable], Tuple[Callable, ...], Mapping[Union[str, int], Callable]
]
FloatFormatType = Union[str, Callable, "EngFormatter"]
ColspaceType = Mapping[Label, Union[str, int]]
ColspaceArgType = Union[
str, int, Sequence[Union[str, int]], Mapping[Label, Union[str, int]],
]
common_docstring = """
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : %(col_space_type)s, optional
%(col_space)s.
header : %(header_type)s, optional
%(header)s.
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list, tuple or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List/tuple must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
min_rows : int, optional
The number of rows to display in the console in a truncated repr
(when number of rows is above `max_rows`).
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
"""
_VALID_JUSTIFY_PARAMETERS = (
"left",
"right",
"center",
"justify",
"justify-all",
"start",
"end",
"inherit",
"match-parent",
"initial",
"unset",
)
return_docstring = """
Returns
-------
str or None
If buf is None, returns the result as a string. Otherwise returns
None.
"""
class CategoricalFormatter:
def __init__(
self,
categorical: "Categorical",
buf: Optional[IO[str]] = None,
length: bool = True,
na_rep: str = "NaN",
footer: bool = True,
):
self.categorical = categorical
self.buf = buf if buf is not None else StringIO("")
self.na_rep = na_rep
self.length = length
self.footer = footer
self.quoting = QUOTE_NONNUMERIC
def _get_footer(self) -> str:
footer = ""
if self.length:
if footer:
footer += ", "
footer += f"Length: {len(self.categorical)}"
level_info = self.categorical._repr_categories_info()
# Levels are added in a newline
if footer:
footer += "\n"
footer += level_info
return str(footer)
def _get_formatted_values(self) -> List[str]:
return format_array(
self.categorical._internal_get_values(),
None,
float_format=None,
na_rep=self.na_rep,
quoting=self.quoting,
)
def to_string(self) -> str:
categorical = self.categorical
if len(categorical) == 0:
if self.footer:
return self._get_footer()
else:
return ""
fmt_values = self._get_formatted_values()
fmt_values = [i.strip() for i in fmt_values]
values = ", ".join(fmt_values)
result = ["[" + values + "]"]
if self.footer:
footer = self._get_footer()
if footer:
result.append(footer)
return str("\n".join(result))
class SeriesFormatter:
def __init__(
self,
series: "Series",
buf: Optional[IO[str]] = None,
length: Union[bool, str] = True,
header: bool = True,
index: bool = True,
na_rep: str = "NaN",
name: bool = False,
float_format: Optional[str] = None,
dtype: bool = True,
max_rows: Optional[int] = None,
min_rows: Optional[int] = None,
):
self.series = series
self.buf = buf if buf is not None else StringIO()
self.name = name
self.na_rep = na_rep
self.header = header
self.length = length
self.index = index
self.max_rows = max_rows
self.min_rows = min_rows
if float_format is None:
float_format = get_option("display.float_format")
self.float_format = float_format
self.dtype = dtype
self.adj = _get_adjustment()
self._chk_truncate()
def _chk_truncate(self) -> None:
from pandas.core.reshape.concat import concat
self.tr_row_num: Optional[int]
min_rows = self.min_rows
max_rows = self.max_rows
# truncation determined by max_rows, actual truncated number of rows
# used below by min_rows
truncate_v = max_rows and (len(self.series) > max_rows)
series = self.series
if truncate_v:
max_rows = cast(int, max_rows)
if min_rows:
# if min_rows is set (not None or 0), set max_rows to minimum
# of both
max_rows = min(min_rows, max_rows)
if max_rows == 1:
row_num = max_rows
series = series.iloc[:max_rows]
else:
row_num = max_rows // 2
series = concat((series.iloc[:row_num], series.iloc[-row_num:]))
self.tr_row_num = row_num
else:
self.tr_row_num = None
self.tr_series = series
self.truncate_v = truncate_v
def _get_footer(self) -> str:
name = self.series.name
footer = ""
if getattr(self.series.index, "freq", None) is not None:
assert isinstance(
self.series.index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)
)
footer += f"Freq: {self.series.index.freqstr}"
if self.name is not False and name is not None:
if footer:
footer += ", "
series_name = pprint_thing(name, escape_chars=("\t", "\r", "\n"))
footer += f"Name: {series_name}"
if self.length is True or (self.length == "truncate" and self.truncate_v):
if footer:
footer += ", "
footer += f"Length: {len(self.series)}"
if self.dtype is not False and self.dtype is not None:
dtype_name = getattr(self.tr_series.dtype, "name", None)
if dtype_name:
if footer:
footer += ", "
footer += f"dtype: {pprint_thing(dtype_name)}"
# level infos are added to the end and in a new line, like it is done
# for Categoricals
if is_categorical_dtype(self.tr_series.dtype):
level_info = self.tr_series._values._repr_categories_info()
if footer:
footer += "\n"
footer += level_info
return str(footer)
def _get_formatted_index(self) -> Tuple[List[str], bool]:
index = self.tr_series.index
if isinstance(index, MultiIndex):
have_header = any(name for name in index.names)
fmt_index = index.format(names=True)
else:
have_header = index.name is not None
fmt_index = index.format(name=True)
return fmt_index, have_header
def _get_formatted_values(self) -> List[str]:
return format_array(
self.tr_series._values,
None,
float_format=self.float_format,
na_rep=self.na_rep,
)
def to_string(self) -> str:
series = self.tr_series
footer = self._get_footer()
if len(series) == 0:
return f"{type(self.series).__name__}([], {footer})"
fmt_index, have_header = self._get_formatted_index()
fmt_values = self._get_formatted_values()
if self.truncate_v:
n_header_rows = 0
row_num = self.tr_row_num
row_num = cast(int, row_num)
width = self.adj.len(fmt_values[row_num - 1])
if width > 3:
dot_str = "..."
else:
dot_str = ".."
# Series uses mode=center because it has single value columns
# DataFrame uses mode=left
dot_str = self.adj.justify([dot_str], width, mode="center")[0]
fmt_values.insert(row_num + n_header_rows, dot_str)
fmt_index.insert(row_num + 1, "")
if self.index:
result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values])
else:
result = self.adj.adjoin(3, fmt_values)
if self.header and have_header:
result = fmt_index[0] + "\n" + result
if footer:
result += "\n" + footer
return str("".join(result))
class TextAdjustment:
def __init__(self):
self.encoding = get_option("display.encoding")
def len(self, text: str) -> int:
return len(text)
def justify(self, texts: Any, max_len: int, mode: str = "right") -> List[str]:
return justify(texts, max_len, mode=mode)
def adjoin(self, space: int, *lists, **kwargs) -> str:
return adjoin(space, *lists, strlen=self.len, justfunc=self.justify, **kwargs)
class EastAsianTextAdjustment(TextAdjustment):
def __init__(self):
super().__init__()
if get_option("display.unicode.ambiguous_as_wide"):
self.ambiguous_width = 2
else:
self.ambiguous_width = 1
# Definition of East Asian Width
# https://unicode.org/reports/tr11/
# Ambiguous width can be changed by option
self._EAW_MAP = {"Na": 1, "N": 1, "W": 2, "F": 2, "H": 1}
def len(self, text: str) -> int:
"""
Calculate display width considering unicode East Asian Width
"""
if not isinstance(text, str):
return len(text)
return sum(
self._EAW_MAP.get(east_asian_width(c), self.ambiguous_width) for c in text
)
def justify(
self, texts: Iterable[str], max_len: int, mode: str = "right"
) -> List[str]:
# re-calculate padding space per str considering East Asian Width
def _get_pad(t):
return max_len - self.len(t) + len(t)
if mode == "left":
return [x.ljust(_get_pad(x)) for x in texts]
elif mode == "center":
return [x.center(_get_pad(x)) for x in texts]
else:
return [x.rjust(_get_pad(x)) for x in texts]
def _get_adjustment() -> TextAdjustment:
use_east_asian_width = get_option("display.unicode.east_asian_width")
if use_east_asian_width:
return EastAsianTextAdjustment()
else:
return TextAdjustment()
class TableFormatter:
show_dimensions: Union[bool, str]
is_truncated: bool
formatters: FormattersType
columns: Index
@property
def should_show_dimensions(self) -> bool:
return self.show_dimensions is True or (
self.show_dimensions == "truncate" and self.is_truncated
)
def _get_formatter(self, i: Union[str, int]) -> Optional[Callable]:
if isinstance(self.formatters, (list, tuple)):
if is_integer(i):
i = cast(int, i)
return self.formatters[i]
else:
return None
else:
if is_integer(i) and i not in self.columns:
i = self.columns[i]
return self.formatters.get(i, None)
@contextmanager
def get_buffer(
self, buf: Optional[FilePathOrBuffer[str]], encoding: Optional[str] = None
):
"""
Context manager to open, yield and close buffer for filenames or Path-like
objects, otherwise yield buf unchanged.
"""
if buf is not None:
buf = stringify_path(buf)
else:
buf = StringIO()
if encoding is None:
encoding = "utf-8"
elif not isinstance(buf, str):
raise ValueError("buf is not a file name and encoding is specified.")
if hasattr(buf, "write"):
yield buf
elif isinstance(buf, str):
with open(buf, "w", encoding=encoding, newline="") as f:
# GH#30034 open instead of codecs.open prevents a file leak
# if we have an invalid encoding argument.
# newline="" is needed to roundtrip correctly on
# windows test_to_latex_filename
yield f
else:
raise TypeError("buf is not a file name and it has no write method")
def write_result(self, buf: IO[str]) -> None:
"""
Write the result of serialization to buf.
"""
raise AbstractMethodError(self)
def get_result(
self,
buf: Optional[FilePathOrBuffer[str]] = None,
encoding: Optional[str] = None,
) -> Optional[str]:
"""
Perform serialization. Write to buf or return as string if buf is None.
"""
with self.get_buffer(buf, encoding=encoding) as f:
self.write_result(buf=f)
if buf is None:
return f.getvalue()
return None
class DataFrameFormatter(TableFormatter):
"""
Render a DataFrame
self.to_string() : console-friendly tabular output
self.to_html() : html table
self.to_latex() : LaTeX tabular environment table
"""
__doc__ = __doc__ if __doc__ else ""
__doc__ += common_docstring + return_docstring
col_space: ColspaceType
def __init__(
self,
frame: "DataFrame",
columns: Optional[Sequence[str]] = None,
col_space: Optional[ColspaceArgType] = None,
header: Union[bool, Sequence[str]] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[FormattersType] = None,
justify: Optional[str] = None,
float_format: Optional[FloatFormatType] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
line_width: Optional[int] = None,
max_rows: Optional[int] = None,
min_rows: Optional[int] = None,
max_cols: Optional[int] = None,
show_dimensions: Union[bool, str] = False,
decimal: str = ".",
table_id: Optional[str] = None,
render_links: bool = False,
bold_rows: bool = False,
escape: bool = True,
):
self.frame = frame
self.show_index_names = index_names
if sparsify is None:
sparsify = get_option("display.multi_sparse")
self.sparsify = sparsify
self.float_format = float_format
if formatters is None:
self.formatters = {}
elif len(frame.columns) == len(formatters) or isinstance(formatters, dict):
self.formatters = formatters
else:
raise ValueError(
f"Formatters length({len(formatters)}) should match "
f"DataFrame number of columns({len(frame.columns)})"
)
self.na_rep = na_rep
self.decimal = decimal
if col_space is None:
self.col_space = {}
elif isinstance(col_space, (int, str)):
self.col_space = {"": col_space}
self.col_space.update({column: col_space for column in self.frame.columns})
elif isinstance(col_space, Mapping):
for column in col_space.keys():
if column not in self.frame.columns and column != "":
raise ValueError(
f"Col_space is defined for an unknown column: {column}"
)
self.col_space = col_space
else:
if len(frame.columns) != len(col_space):
raise ValueError(
f"Col_space length({len(col_space)}) should match "
f"DataFrame number of columns({len(frame.columns)})"
)
self.col_space = dict(zip(self.frame.columns, col_space))
self.header = header
self.index = index
self.line_width = line_width
self.max_rows = max_rows
self.min_rows = min_rows
self.max_cols = max_cols
self.max_rows_displayed = min(max_rows or len(self.frame), len(self.frame))
self.show_dimensions = show_dimensions
self.table_id = table_id
self.render_links = render_links
if justify is None:
self.justify = get_option("display.colheader_justify")
else:
self.justify = justify
self.bold_rows = bold_rows
self.escape = escape
if columns is not None:
self.columns = ensure_index(columns)
self.frame = self.frame[self.columns]
else:
self.columns = frame.columns
self._chk_truncate()
self.adj = _get_adjustment()
def _chk_truncate(self) -> None:
"""
Checks whether the frame should be truncated. If so, slices
the frame up.
"""
from pandas.core.reshape.concat import concat
# Cut the data to the information actually printed
max_cols = self.max_cols
max_rows = self.max_rows
self.max_rows_adj: Optional[int]
max_rows_adj: Optional[int]
if max_cols == 0 or max_rows == 0: # assume we are in the terminal
(w, h) = get_terminal_size()
self.w = w
self.h = h
if self.max_rows == 0:
dot_row = 1
prompt_row = 1
if self.show_dimensions:
show_dimension_rows = 3
# assume we only get here if self.header is boolean.
# i.e. not to_latex() where self.header may be List[str]
self.header = cast(bool, self.header)
n_add_rows = self.header + dot_row + show_dimension_rows + prompt_row
# rows available to fill with actual data
max_rows_adj = self.h - n_add_rows
self.max_rows_adj = max_rows_adj
# Format only rows and columns that could potentially fit the
# screen
if max_cols == 0 and len(self.frame.columns) > w:
max_cols = w
if max_rows == 0 and len(self.frame) > h:
max_rows = h
if not hasattr(self, "max_rows_adj"):
if max_rows:
if (len(self.frame) > max_rows) and self.min_rows:
# if truncated, set max_rows showed to min_rows
max_rows = min(self.min_rows, max_rows)
self.max_rows_adj = max_rows
if not hasattr(self, "max_cols_adj"):
self.max_cols_adj = max_cols
max_cols_adj = self.max_cols_adj
max_rows_adj = self.max_rows_adj
truncate_h = max_cols_adj and (len(self.columns) > max_cols_adj)
truncate_v = max_rows_adj and (len(self.frame) > max_rows_adj)
frame = self.frame
if truncate_h:
# cast here since if truncate_h is True, max_cols_adj is not None
max_cols_adj = cast(int, max_cols_adj)
if max_cols_adj == 0:
col_num = len(frame.columns)
elif max_cols_adj == 1:
max_cols = cast(int, max_cols)
frame = frame.iloc[:, :max_cols]
col_num = max_cols
else:
col_num = max_cols_adj // 2
frame = concat(
(frame.iloc[:, :col_num], frame.iloc[:, -col_num:]), axis=1
)
# truncate formatter
if isinstance(self.formatters, (list, tuple)):
truncate_fmt = self.formatters
self.formatters = [
*truncate_fmt[:col_num],
*truncate_fmt[-col_num:],
]
self.tr_col_num = col_num
if truncate_v:
# cast here since if truncate_v is True, max_rows_adj is not None
max_rows_adj = cast(int, max_rows_adj)
if max_rows_adj == 1:
row_num = max_rows
frame = frame.iloc[:max_rows, :]
else:
row_num = max_rows_adj // 2
frame = concat((frame.iloc[:row_num, :], frame.iloc[-row_num:, :]))
self.tr_row_num = row_num
else:
self.tr_row_num = None
self.tr_frame = frame
self.truncate_h = truncate_h
self.truncate_v = truncate_v
self.is_truncated = bool(self.truncate_h or self.truncate_v)
def _to_str_columns(self) -> List[List[str]]:
"""
Render a DataFrame to a list of columns (as lists of strings).
"""
# this method is not used by to_html where self.col_space
# could be a string so safe to cast
col_space = {k: cast(int, v) for k, v in self.col_space.items()}
frame = self.tr_frame
# may include levels names also
str_index = self._get_formatted_index(frame)
if not is_list_like(self.header) and not self.header:
stringified = []
for i, c in enumerate(frame):
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(
fmt_values, self.justify, minimum=col_space.get(c, 0), adj=self.adj,
)
stringified.append(fmt_values)
else:
if is_list_like(self.header):
# cast here since can't be bool if is_list_like
self.header = cast(List[str], self.header)
if len(self.header) != len(self.columns):
raise ValueError(
f"Writing {len(self.columns)} cols "
f"but got {len(self.header)} aliases"
)
str_columns = [[label] for label in self.header]
else:
str_columns = self._get_formatted_column_labels(frame)
if self.show_row_idx_names:
for x in str_columns:
x.append("")
stringified = []
for i, c in enumerate(frame):
cheader = str_columns[i]
header_colwidth = max(
col_space.get(c, 0), *(self.adj.len(x) for x in cheader)
)
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(
fmt_values, self.justify, minimum=header_colwidth, adj=self.adj
)
max_len = max(max(self.adj.len(x) for x in fmt_values), header_colwidth)
cheader = self.adj.justify(cheader, max_len, mode=self.justify)
stringified.append(cheader + fmt_values)
strcols = stringified
if self.index:
strcols.insert(0, str_index)
# Add ... to signal truncated
truncate_h = self.truncate_h
truncate_v = self.truncate_v
if truncate_h:
col_num = self.tr_col_num
strcols.insert(self.tr_col_num + 1, [" ..."] * (len(str_index)))
if truncate_v:
n_header_rows = len(str_index) - len(frame)
row_num = self.tr_row_num
# cast here since if truncate_v is True, self.tr_row_num is not None
row_num = cast(int, row_num)
for ix, col in enumerate(strcols):
# infer from above row
cwidth = self.adj.len(strcols[ix][row_num])
is_dot_col = False
if truncate_h:
is_dot_col = ix == col_num + 1
if cwidth > 3 or is_dot_col:
my_str = "..."
else:
my_str = ".."
if ix == 0:
dot_mode = "left"
elif is_dot_col:
cwidth = 4
dot_mode = "right"
else:
dot_mode = "right"
dot_str = self.adj.justify([my_str], cwidth, mode=dot_mode)[0]
strcols[ix].insert(row_num + n_header_rows, dot_str)
return strcols
def write_result(self, buf: IO[str]) -> None:
"""
Render a DataFrame to a console-friendly tabular output.
"""
from pandas import Series
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
info_line = (
f"Empty {type(self.frame).__name__}\n"
f"Columns: {pprint_thing(frame.columns)}\n"
f"Index: {pprint_thing(frame.index)}"
)
text = info_line
else:
strcols = self._to_str_columns()
if self.line_width is None: # no need to wrap around just print
# the whole frame
text = self.adj.adjoin(1, *strcols)
elif (
not isinstance(self.max_cols, int) or self.max_cols > 0
): # need to wrap around
text = self._join_multiline(*strcols)
else: # max_cols == 0. Try to fit frame to terminal
lines = self.adj.adjoin(1, *strcols).split("\n")
max_len = Series(lines).str.len().max()
# plus truncate dot col
dif = max_len - self.w
# '+ 1' to avoid too wide repr (GH PR #17023)
adj_dif = dif + 1
col_lens = Series([Series(ele).apply(len).max() for ele in strcols])
n_cols = len(col_lens)
counter = 0
while adj_dif > 0 and n_cols > 1:
counter += 1
mid = int(round(n_cols / 2.0))
mid_ix = col_lens.index[mid]
col_len = col_lens[mid_ix]
# adjoin adds one
adj_dif -= col_len + 1
col_lens = col_lens.drop(mid_ix)
n_cols = len(col_lens)
# subtract index column
max_cols_adj = n_cols - self.index
# GH-21180. Ensure that we print at least two.
max_cols_adj = max(max_cols_adj, 2)
self.max_cols_adj = max_cols_adj
# Call again _chk_truncate to cut frame appropriately
# and then generate string representation
self._chk_truncate()
strcols = self._to_str_columns()
text = self.adj.adjoin(1, *strcols)
buf.writelines(text)
if self.should_show_dimensions:
buf.write(f"\n\n[{len(frame)} rows x {len(frame.columns)} columns]")
def _join_multiline(self, *args) -> str:
lwidth = self.line_width
adjoin_width = 1
strcols = list(args)
if self.index:
idx = strcols.pop(0)
lwidth -= np.array([self.adj.len(x) for x in idx]).max() + adjoin_width
col_widths = [
np.array([self.adj.len(x) for x in col]).max() if len(col) > 0 else 0
for col in strcols
]
assert lwidth is not None
col_bins = _binify(col_widths, lwidth)
nbins = len(col_bins)
if self.truncate_v:
# cast here since if truncate_v is True, max_rows_adj is not None
self.max_rows_adj = cast(int, self.max_rows_adj)
nrows = self.max_rows_adj + 1
else:
nrows = len(self.frame)
str_lst = []
st = 0
for i, ed in enumerate(col_bins):
row = strcols[st:ed]
if self.index:
row.insert(0, idx)
if nbins > 1:
if ed <= len(strcols) and i < nbins - 1:
row.append([" \\"] + [" "] * (nrows - 1))
else:
row.append([" "] * nrows)
str_lst.append(self.adj.adjoin(adjoin_width, *row))
st = ed
return "\n\n".join(str_lst)
def to_string(
self,
buf: Optional[FilePathOrBuffer[str]] = None,
encoding: Optional[str] = None,
) -> Optional[str]:
return self.get_result(buf=buf, encoding=encoding)
def to_latex(
self,
buf: Optional[FilePathOrBuffer[str]] = None,
column_format: Optional[str] = None,
longtable: bool = False,
encoding: Optional[str] = None,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
caption: Optional[str] = None,
label: Optional[str] = None,
) -> Optional[str]:
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
from pandas.io.formats.latex import LatexFormatter
return LatexFormatter(
self,
column_format=column_format,
longtable=longtable,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
caption=caption,
label=label,
).get_result(buf=buf, encoding=encoding)
def _format_col(self, i: int) -> List[str]:
frame = self.tr_frame
formatter = self._get_formatter(i)
return format_array(
frame.iloc[:, i]._values,
formatter,
float_format=self.float_format,
na_rep=self.na_rep,
space=self.col_space.get(frame.columns[i]),
decimal=self.decimal,
)
def to_html(
self,
buf: Optional[FilePathOrBuffer[str]] = None,
encoding: Optional[str] = None,
classes: Optional[Union[str, List, Tuple]] = None,
notebook: bool = False,
border: Optional[int] = None,
) -> Optional[str]:
"""
Render a DataFrame to a html table.
Parameters
----------
classes : str or list-like
classes to include in the `class` attribute of the opening
``<table>`` tag, in addition to the default "dataframe".
notebook : {True, False}, optional, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
``<table>`` tag. Default ``pd.options.display.html.border``.
"""
from pandas.io.formats.html import HTMLFormatter, NotebookFormatter
Klass = NotebookFormatter if notebook else HTMLFormatter
return Klass(self, classes=classes, border=border).get_result(
buf=buf, encoding=encoding
)
def _get_formatted_column_labels(self, frame: "DataFrame") -> List[List[str]]:
from pandas.core.indexes.multi import _sparsify
columns = frame.columns
if isinstance(columns, MultiIndex):
fmt_columns = columns.format(sparsify=False, adjoin=False)
fmt_columns = list(zip(*fmt_columns))
dtypes = self.frame.dtypes._values
# if we have a Float level, they don't use leading space at all
restrict_formatting = any(l.is_floating for l in columns.levels)
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
def space_format(x, y):
if (
y not in self.formatters
and need_leadsp[x]
and not restrict_formatting
):
return " " + y
return y
str_columns = list(
zip(*[[space_format(x, y) for y in x] for x in fmt_columns])
)
if self.sparsify and len(str_columns):
str_columns = _sparsify(str_columns)
str_columns = [list(x) for x in zip(*str_columns)]
else:
fmt_columns = columns.format()
dtypes = self.frame.dtypes
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
str_columns = [
[" " + x if not self._get_formatter(i) and need_leadsp[x] else x]
for i, (col, x) in enumerate(zip(columns, fmt_columns))
]
# self.str_columns = str_columns
return str_columns
@property
def has_index_names(self) -> bool:
return _has_names(self.frame.index)
@property
def has_column_names(self) -> bool:
return _has_names(self.frame.columns)
@property
def show_row_idx_names(self) -> bool:
return all((self.has_index_names, self.index, self.show_index_names))
@property
def show_col_idx_names(self) -> bool:
return all((self.has_column_names, self.show_index_names, self.header))
def _get_formatted_index(self, frame: "DataFrame") -> List[str]:
# Note: this is only used by to_string() and to_latex(), not by
# to_html(). so safe to cast col_space here.
col_space = {k: cast(int, v) for k, v in self.col_space.items()}
index = frame.index
columns = frame.columns
fmt = self._get_formatter("__index__")
if isinstance(index, MultiIndex):
fmt_index = index.format(
sparsify=self.sparsify,
adjoin=False,
names=self.show_row_idx_names,
formatter=fmt,
)
else:
fmt_index = [index.format(name=self.show_row_idx_names, formatter=fmt)]
fmt_index = [
tuple(
_make_fixed_width(
list(x), justify="left", minimum=col_space.get("", 0), adj=self.adj,
)
)
for x in fmt_index
]
adjoined = self.adj.adjoin(1, *fmt_index).split("\n")
# empty space for columns
if self.show_col_idx_names:
col_header = [str(x) for x in self._get_column_name_list()]
else:
col_header = [""] * columns.nlevels
if self.header:
return col_header + adjoined
else:
return adjoined
def _get_column_name_list(self) -> List[str]:
names: List[str] = []
columns = self.frame.columns
if isinstance(columns, MultiIndex):
names.extend("" if name is None else name for name in columns.names)
else:
names.append("" if columns.name is None else columns.name)
return names
# ----------------------------------------------------------------------
# Array formatters
def format_array(
values: Any,
formatter: Optional[Callable],
float_format: Optional[FloatFormatType] = None,
na_rep: str = "NaN",
digits: Optional[int] = None,
space: Optional[Union[str, int]] = None,
justify: str = "right",
decimal: str = ".",
leading_space: Optional[bool] = None,
quoting: Optional[int] = None,
) -> List[str]:
"""
Format an array for printing.
Parameters
----------
values
formatter
float_format
na_rep
digits
space
justify
decimal
leading_space : bool, optional
Whether the array should be formatted with a leading space.
When an array as a column of a Series or DataFrame, we do want
the leading space to pad between columns.
When formatting an Index subclass
(e.g. IntervalIndex._format_native_types), we don't want the
leading space since it should be left-aligned.
Returns
-------
List[str]
"""
fmt_klass: Type[GenericArrayFormatter]
if is_datetime64_dtype(values.dtype):
fmt_klass = Datetime64Formatter
elif is_datetime64tz_dtype(values.dtype):
fmt_klass = Datetime64TZFormatter
elif is_timedelta64_dtype(values.dtype):
fmt_klass = Timedelta64Formatter
elif is_extension_array_dtype(values.dtype):
fmt_klass = ExtensionArrayFormatter
elif is_float_dtype(values.dtype) or is_complex_dtype(values.dtype):
fmt_klass = FloatArrayFormatter
elif is_integer_dtype(values.dtype):
fmt_klass = IntArrayFormatter
else:
fmt_klass = GenericArrayFormatter
if space is None:
space = get_option("display.column_space")
if float_format is None:
float_format = get_option("display.float_format")
if digits is None:
digits = get_option("display.precision")
fmt_obj = fmt_klass(
values,
digits=digits,
na_rep=na_rep,
float_format=float_format,
formatter=formatter,
space=space,
justify=justify,
decimal=decimal,
leading_space=leading_space,
quoting=quoting,
)
return fmt_obj.get_result()
class GenericArrayFormatter:
def __init__(
self,
values: Any,
digits: int = 7,
formatter: Optional[Callable] = None,
na_rep: str = "NaN",
space: Union[str, int] = 12,
float_format: Optional[FloatFormatType] = None,
justify: str = "right",
decimal: str = ".",
quoting: Optional[int] = None,
fixed_width: bool = True,
leading_space: Optional[bool] = None,
):
self.values = values
self.digits = digits
self.na_rep = na_rep
self.space = space
self.formatter = formatter
self.float_format = float_format
self.justify = justify
self.decimal = decimal
self.quoting = quoting
self.fixed_width = fixed_width
self.leading_space = leading_space
def get_result(self) -> List[str]:
fmt_values = self._format_strings()
return _make_fixed_width(fmt_values, self.justify)
def _format_strings(self) -> List[str]:
if self.float_format is None:
float_format = get_option("display.float_format")
if float_format is None:
precision = get_option("display.precision")
float_format = lambda x: f"{x: .{precision:d}g}"
else:
float_format = self.float_format
if self.formatter is not None:
formatter = self.formatter
else:
quote_strings = self.quoting is not None and self.quoting != QUOTE_NONE
formatter = partial(
pprint_thing,
escape_chars=("\t", "\r", "\n"),
quote_strings=quote_strings,
)
def _format(x):
if self.na_rep is not None and is_scalar(x) and isna(x):
try:
# try block for np.isnat specifically
# determine na_rep if x is None or NaT-like
if x is None:
return "None"
elif x is NA:
return str(NA)
elif x is NaT or np.isnat(x):
return "NaT"
except (TypeError, ValueError):
# np.isnat only handles datetime or timedelta objects
pass
return self.na_rep
elif isinstance(x, PandasObject):
return str(x)
else:
# object dtype
return str(formatter(x))
vals = extract_array(self.values, extract_numpy=True)
is_float_type = (
lib.map_infer(vals, is_float)
# vals may have 2 or more dimensions
& np.all(notna(vals), axis=tuple(range(1, len(vals.shape))))
)
leading_space = self.leading_space
if leading_space is None:
leading_space = is_float_type.any()
fmt_values = []
for i, v in enumerate(vals):
if not is_float_type[i] and leading_space:
fmt_values.append(f" {_format(v)}")
elif is_float_type[i]:
fmt_values.append(float_format(v))
else:
if leading_space is False:
# False specifically, so that the default is
# to include a space if we get here.
tpl = "{v}"
else:
tpl = " {v}"
fmt_values.append(tpl.format(v=_format(v)))
return fmt_values
class FloatArrayFormatter(GenericArrayFormatter):
"""
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# float_format is expected to be a string
# formatter should be used to pass a function
if self.float_format is not None and self.formatter is None:
# GH21625, GH22270
self.fixed_width = False
if callable(self.float_format):
self.formatter = self.float_format
self.float_format = None
def _value_formatter(
self,
float_format: Optional[FloatFormatType] = None,
threshold: Optional[Union[float, int]] = None,
) -> Callable:
"""Returns a function to be applied on each value to format it"""
# the float_format parameter supersedes self.float_format
if float_format is None:
float_format = self.float_format
# we are going to compose different functions, to first convert to
# a string, then replace the decimal symbol, and finally chop according
# to the threshold
# when there is no float_format, we use str instead of '%g'
# because str(0.0) = '0.0' while '%g' % 0.0 = '0'
if float_format:
def base_formatter(v):
return float_format(value=v) if notna(v) else self.na_rep
else:
def base_formatter(v):
return str(v) if notna(v) else self.na_rep
if self.decimal != ".":
def decimal_formatter(v):
return base_formatter(v).replace(".", self.decimal, 1)
else:
decimal_formatter = base_formatter
if threshold is None:
return decimal_formatter
def formatter(value):
if notna(value):
if abs(value) > threshold:
return decimal_formatter(value)
else:
return decimal_formatter(0.0)
else:
return self.na_rep
return formatter
def get_result_as_array(self) -> np.ndarray:
"""
Returns the float values converted into strings using
the parameters given at initialisation, as a numpy array
"""
if self.formatter is not None:
return np.array([self.formatter(x) for x in self.values])
if self.fixed_width:
threshold = get_option("display.chop_threshold")
else:
threshold = None
# if we have a fixed_width, we'll need to try different float_format
def format_values_with(float_format):
formatter = self._value_formatter(float_format, threshold)
# default formatter leaves a space to the left when formatting
# floats, must be consistent for left-justifying NaNs (GH #25061)
if self.justify == "left":
na_rep = " " + self.na_rep
else:
na_rep = self.na_rep
# separate the wheat from the chaff
values = self.values
is_complex = is_complex_dtype(values)
mask = isna(values)
values = np.array(values, dtype="object")
values[mask] = na_rep
imask = (~mask).ravel()
values.flat[imask] = np.array(
[formatter(val) for val in values.ravel()[imask]]
)
if self.fixed_width:
if is_complex:
result = _trim_zeros_complex(values, self.decimal, na_rep)
else:
result = _trim_zeros_float(values, self.decimal, na_rep)
return np.asarray(result, dtype="object")
return values
# There is a special default string when we are fixed-width
# The default is otherwise to use str instead of a formatting string
float_format: Optional[FloatFormatType]
if self.float_format is None:
if self.fixed_width:
float_format = partial(
"{value: .{digits:d}f}".format, digits=self.digits
)
else:
float_format = self.float_format
else:
float_format = lambda value: self.float_format % value
formatted_values = format_values_with(float_format)
if not self.fixed_width:
return formatted_values
# we need do convert to engineering format if some values are too small
# and would appear as 0, or if some values are too big and take too
# much space
if len(formatted_values) > 0:
maxlen = max(len(x) for x in formatted_values)
too_long = maxlen > self.digits + 6
else:
too_long = False
with np.errstate(invalid="ignore"):
abs_vals = np.abs(self.values)
# this is pretty arbitrary for now
# large values: more that 8 characters including decimal symbol
# and first digit, hence > 1e6
has_large_values = (abs_vals > 1e6).any()
has_small_values = (
(abs_vals < 10 ** (-self.digits)) & (abs_vals > 0)
).any()
if has_small_values or (too_long and has_large_values):
float_format = partial("{value: .{digits:d}e}".format, digits=self.digits)
formatted_values = format_values_with(float_format)
return formatted_values
def _format_strings(self) -> List[str]:
# shortcut
if self.formatter is not None:
return [self.formatter(x) for x in self.values]
return list(self.get_result_as_array())
class IntArrayFormatter(GenericArrayFormatter):
def _format_strings(self) -> List[str]:
formatter = self.formatter or (lambda x: f"{x: d}")
fmt_values = [formatter(x) for x in self.values]
return fmt_values
class Datetime64Formatter(GenericArrayFormatter):
def __init__(
self,
values: Union[np.ndarray, "Series", DatetimeIndex, DatetimeArray],
nat_rep: str = "NaT",
date_format: None = None,
**kwargs,
):
super().__init__(values, **kwargs)
self.nat_rep = nat_rep
self.date_format = date_format
def _format_strings(self) -> List[str]:
""" we by definition have DO NOT have a TZ """
values = self.values
if not isinstance(values, DatetimeIndex):
values = DatetimeIndex(values)
if self.formatter is not None and callable(self.formatter):
return [self.formatter(x) for x in values]
fmt_values = format_array_from_datetime(
values.asi8.ravel(),
format=_get_format_datetime64_from_values(values, self.date_format),
na_rep=self.nat_rep,
).reshape(values.shape)
return fmt_values.tolist()
class ExtensionArrayFormatter(GenericArrayFormatter):
def _format_strings(self) -> List[str]:
values = extract_array(self.values, extract_numpy=True)
formatter = values._formatter(boxed=True)
if is_categorical_dtype(values.dtype):
# Categorical is special for now, so that we can preserve tzinfo
array = values._internal_get_values()
else:
array = np.asarray(values)
fmt_values = format_array(
array,
formatter,
float_format=self.float_format,
na_rep=self.na_rep,
digits=self.digits,
space=self.space,
justify=self.justify,
leading_space=self.leading_space,
)
return fmt_values
def format_percentiles(
percentiles: Union[
np.ndarray, List[Union[int, float]], List[float], List[Union[str, float]]
]
) -> List[str]:
"""
Outputs rounded and formatted percentiles.
Parameters
----------
percentiles : list-like, containing floats from interval [0,1]
Returns
-------
formatted : list of strings
Notes
-----
Rounding precision is chosen so that: (1) if any two elements of
``percentiles`` differ, they remain different after rounding
(2) no entry is *rounded* to 0% or 100%.
Any non-integer is always rounded to at least 1 decimal place.
Examples
--------
Keeps all entries different after rounding:
>>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
['1.999%', '2.001%', '50%', '66.667%', '99.99%']
No element is rounded to 0% or 100% (unless already equal to it).
Duplicates are allowed:
>>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
"""
percentiles = np.asarray(percentiles)
# It checks for np.NaN as well
with np.errstate(invalid="ignore"):
if (
not is_numeric_dtype(percentiles)
or not np.all(percentiles >= 0)
or not np.all(percentiles <= 1)
):
raise ValueError("percentiles should all be in the interval [0,1]")
percentiles = 100 * percentiles
int_idx = np.isclose(percentiles.astype(int), percentiles)
if np.all(int_idx):
out = percentiles.astype(int).astype(str)
return [i + "%" for i in out]
unique_pcts = np.unique(percentiles)
to_begin = unique_pcts[0] if unique_pcts[0] > 0 else None
to_end = 100 - unique_pcts[-1] if unique_pcts[-1] < 100 else None
# Least precision that keeps percentiles unique after rounding
prec = -np.floor(
np.log10(np.min(np.ediff1d(unique_pcts, to_begin=to_begin, to_end=to_end)))
).astype(int)
prec = max(1, prec)
out = np.empty_like(percentiles, dtype=object)
out[int_idx] = percentiles[int_idx].astype(int).astype(str)
out[~int_idx] = percentiles[~int_idx].round(prec).astype(str)
return [i + "%" for i in out]
def _is_dates_only(
values: Union[np.ndarray, DatetimeArray, Index, DatetimeIndex]
) -> bool:
# return a boolean if we are only dates (and don't have a timezone)
values = values.ravel()
values = DatetimeIndex(values)
if values.tz is not None:
return False
values_int = values.asi8
consider_values = values_int != iNaT
one_day_nanos = 86400 * 1e9
even_days = (
np.logical_and(consider_values, values_int % int(one_day_nanos) != 0).sum() == 0
)
if even_days:
return True
return False
def _format_datetime64(
x: Union[NaTType, Timestamp], tz: Optional[tzinfo] = None, nat_rep: str = "NaT"
) -> str:
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if tz is not None or not isinstance(x, Timestamp):
if getattr(x, "tzinfo", None) is not None:
x = Timestamp(x).tz_convert(tz)
else:
x = Timestamp(x).tz_localize(tz)
return str(x)
def _format_datetime64_dateonly(
x: Union[NaTType, Timestamp], nat_rep: str = "NaT", date_format: None = None
) -> str:
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if not isinstance(x, Timestamp):
x = Timestamp(x)
if date_format:
return x.strftime(date_format)
else:
return x._date_repr
def _get_format_datetime64(
is_dates_only: bool, nat_rep: str = "NaT", date_format: None = None
) -> Callable:
if is_dates_only:
return lambda x, tz=None: _format_datetime64_dateonly(
x, nat_rep=nat_rep, date_format=date_format
)
else:
return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep)
def _get_format_datetime64_from_values(
values: Union[np.ndarray, DatetimeArray, DatetimeIndex], date_format: Optional[str]
) -> Optional[str]:
""" given values and a date_format, return a string format """
if isinstance(values, np.ndarray) and values.ndim > 1:
# We don't actually care about the order of values, and DatetimeIndex
# only accepts 1D values
values = values.ravel()
is_dates_only = _is_dates_only(values)
if is_dates_only:
return date_format or "%Y-%m-%d"
return date_format
class Datetime64TZFormatter(Datetime64Formatter):
def _format_strings(self) -> List[str]:
""" we by definition have a TZ """
values = self.values.astype(object)
is_dates_only = _is_dates_only(values)
formatter = self.formatter or _get_format_datetime64(
is_dates_only, date_format=self.date_format
)
fmt_values = [formatter(x) for x in values]
return fmt_values
class Timedelta64Formatter(GenericArrayFormatter):
def __init__(
self,
values: Union[np.ndarray, TimedeltaIndex],
nat_rep: str = "NaT",
box: bool = False,
**kwargs,
):
super().__init__(values, **kwargs)
self.nat_rep = nat_rep
self.box = box
def _format_strings(self) -> List[str]:
formatter = self.formatter or _get_format_timedelta64(
self.values, nat_rep=self.nat_rep, box=self.box
)
return [formatter(x) for x in self.values]
def _get_format_timedelta64(
values: Union[np.ndarray, TimedeltaIndex, TimedeltaArray],
nat_rep: str = "NaT",
box: bool = False,
) -> Callable:
"""
Return a formatter function for a range of timedeltas.
These will all have the same format argument
If box, then show the return in quotes
"""
values_int = values.astype(np.int64)
consider_values = values_int != iNaT
one_day_nanos = 86400 * 1e9
even_days = (
np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0
)
if even_days:
format = None
else:
format = "long"
def _formatter(x):
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if not isinstance(x, Timedelta):
x = Timedelta(x)
result = x._repr_base(format=format)
if box:
result = f"'{result}'"
return result
return _formatter
def _make_fixed_width(
strings: List[str],
justify: str = "right",
minimum: Optional[int] = None,
adj: Optional[TextAdjustment] = None,
) -> List[str]:
if len(strings) == 0 or justify == "all":
return strings
if adj is None:
adj = _get_adjustment()
max_len = max(adj.len(x) for x in strings)
if minimum is not None:
max_len = max(minimum, max_len)
conf_max = get_option("display.max_colwidth")
if conf_max is not None and max_len > conf_max:
max_len = conf_max
def just(x):
if conf_max is not None:
if (conf_max > 3) & (adj.len(x) > max_len):
x = x[: max_len - 3] + "..."
return x
strings = [just(x) for x in strings]
result = adj.justify(strings, max_len, mode=justify)
return result
def _trim_zeros_complex(
str_complexes: np.ndarray, decimal: str = ".", na_rep: str = "NaN"
) -> List[str]:
"""
Separates the real and imaginary parts from the complex number, and
executes the _trim_zeros_float method on each of those.
"""
return [
"".join(_trim_zeros_float(re.split(r"([j+-])", x), decimal, na_rep))
for x in str_complexes
]
def _trim_zeros_float(
str_floats: Union[np.ndarray, List[str]], decimal: str = ".", na_rep: str = "NaN"
) -> List[str]:
"""
Trims zeros, leaving just one before the decimal points if need be.
"""
trimmed = str_floats
def _is_number(x):
return x != na_rep and not x.endswith("inf")
def _cond(values):
finite = [x for x in values if _is_number(x)]
has_decimal = [decimal in x for x in finite]
return (
len(finite) > 0
and all(has_decimal)
and all(x.endswith("0") for x in finite)
and not (any(("e" in x) or ("E" in x) for x in finite))
)
while _cond(trimmed):
trimmed = [x[:-1] if _is_number(x) else x for x in trimmed]
# leave one 0 after the decimal points if need be.
return [x + "0" if x.endswith(decimal) and _is_number(x) else x for x in trimmed]
def _has_names(index: Index) -> bool:
if isinstance(index, MultiIndex):
return com.any_not_none(*index.names)
else:
return index.name is not None
class EngFormatter:
"""
Formats float values according to engineering format.
Based on matplotlib.ticker.EngFormatter
"""
# The SI engineering prefixes
ENG_PREFIXES = {
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: "u",
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y",
}
def __init__(self, accuracy: Optional[int] = None, use_eng_prefix: bool = False):
self.accuracy = accuracy
self.use_eng_prefix = use_eng_prefix
def __call__(self, num: Union[int, float]) -> str:
"""
Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng(0) # for self.accuracy = 0
' 0'
>>> format_eng(1000000) # for self.accuracy = 1,
# self.use_eng_prefix = True
' 1.0M'
>>> format_eng("-1e-6") # for self.accuracy = 2
# self.use_eng_prefix = False
'-1.00E-06'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
"""
dnum = decimal.Decimal(str(num))
if decimal.Decimal.is_nan(dnum):
return "NaN"
if decimal.Decimal.is_infinite(dnum):
return "inf"
sign = 1
if dnum < 0: # pragma: no cover
sign = -1
dnum = -dnum
if dnum != 0:
pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = decimal.Decimal(0)
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
int_pow10 = int(pow10)
if self.use_eng_prefix:
prefix = self.ENG_PREFIXES[int_pow10]
else:
if int_pow10 < 0:
prefix = f"E-{-int_pow10:02d}"
else:
prefix = f"E+{int_pow10:02d}"
mant = sign * dnum / (10 ** pow10)
if self.accuracy is None: # pragma: no cover
format_str = "{mant: g}{prefix}"
else:
format_str = f"{{mant: .{self.accuracy:d}f}}{{prefix}}"
formatted = format_str.format(mant=mant, prefix=prefix)
return formatted
def set_eng_float_format(accuracy: int = 3, use_eng_prefix: bool = False) -> None:
"""
Alter default behavior on how float is formatted in DataFrame.
Format float in engineering format. By accuracy, we mean the number of
decimal digits after the floating point.
See also EngFormatter.
"""
set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
set_option("display.column_space", max(12, accuracy + 9))
def _binify(cols: List[int], line_width: int) -> List[int]:
adjoin_width = 1
bins = []
curr_width = 0
i_last_column = len(cols) - 1
for i, w in enumerate(cols):
w_adjoined = w + adjoin_width
curr_width += w_adjoined
if i_last_column == i:
wrap = curr_width + 1 > line_width and i > 0
else:
wrap = curr_width + 2 > line_width and i > 0
if wrap:
bins.append(i)
curr_width = w_adjoined
bins.append(len(cols))
return bins
def get_level_lengths(
levels: Any, sentinel: Union[bool, object, str] = ""
) -> List[Dict[int, int]]:
"""
For each index in each level the function returns lengths of indexes.
Parameters
----------
levels : list of lists
List of values on for level.
sentinel : string, optional
Value which states that no new index starts on there.
Returns
-------
Returns list of maps. For each level returns map of indexes (key is index
in row and value is length of index).
"""
if len(levels) == 0:
return []
control = [True] * len(levels[0])
result = []
for level in levels:
last_index = 0
lengths = {}
for i, key in enumerate(level):
if control[i] and key == sentinel:
pass
else:
control[i] = False
lengths[last_index] = i - last_index
last_index = i
lengths[last_index] = len(level) - last_index
result.append(lengths)
return result
def buffer_put_lines(buf: IO[str], lines: List[str]) -> None:
"""
Appends lines to a buffer.
Parameters
----------
buf
The buffer to write to
lines
The lines to append.
"""
if any(isinstance(x, str) for x in lines):
lines = [str(x) for x in lines]
buf.write("\n".join(lines))
| [
"enriquemoncerrat@gmail.com"
] | enriquemoncerrat@gmail.com |
aadb7b2e4e1363533ecbe16d31573db2a76fb0f0 | 77900cdd9a815caf1cd04705321ca93f5072179f | /Project/.history/product_20211116161051.py | 37f9a82e833a984cd72187f16de880e3336ee1fb | [] | no_license | Bom19990111/helloword_python | 717799d994223d65de5adaeabecf396ff2bc1fb7 | 2ee2e67a60043f03c1ce4b070470c7d2dcdc72a7 | refs/heads/master | 2023-09-06T04:17:02.057628 | 2021-11-21T20:00:46 | 2021-11-21T20:00:46 | 407,063,273 | 0 | 1 | null | 2021-11-21T20:00:47 | 2021-09-16T07:18:35 | Python | UTF-8 | Python | false | false | 10,482 | py | import data as list_product
import random
import pandas as pd
# def __init__(self, Id, Product_code, Product_name, Brand, Year, Size):
# self.Id = Id
# self.Product_code = Product_code
# self.Product_name = Product_name
# self.Brand = Brand
# self.Year = Year
# self.Size = Size
# Thêm sản phẩm
def AddProduct():
print("THÊM SẢN PHẨM")
product = {
"Id": "",
"Product_code": "",
"Product_name": "",
"Brand": "",
"Price": "",
"Year": "",
"Quantity": "",
"Size": "",
"Status": ""
}
print("Nhập ID sản phẩm:")
try:
Id = int(input())
except:
print("ID phải là số")
try:
AddProduct()
except RuntimeError:
print("Dừng chương trình!")
while True:
student = FindProductDuplicate(Id)
if student != False:
print("ID đã tồn tại, vui lòng nhập lại ID:")
Id = int(input())
else:
break
product['Id'] = Id
# Mã sản phẩm random
code_product = random.randint(1, 99)
str_id = "HKSP"
if code_product <= 9:
str_id += "0" + str(code_product)
else:
str_id += str(code_product)
product["Product_code"] = str_id
print("Nhập tên sản phẩm: ")
product['Product_name'] = input()
print("Nhập thương hiệu sản phẩm: ")
product['Brand'] = input()
print("Nhập giá sản phẩm: ")
try:
product['Price'] = float(input())
except ValueError:
print("Giá phải là kiểu số")
try:
print("Nhập giá sản phẩm: ")
product['Price'] = float(input())
except:
print('Dừng chương trình')
print("Nhập năm sản xuất: ")
try:
product['Year'] = int(input())
except ValueError:
print("Năm phải là kiểu số")
try:
print("Nhập năm sản xuất: ")
product['Year'] = int(input())
except:
print('Dừng chương trình!')
print("Nhập số lượng: ")
product['Quantity'] = int(input())
print("Nhập size giày: ")
product['Size'] = input()
print("Nhập tình trạng sản phẩm: ")
product['Status'] = input()
list_product.list_product.append(product)
answer = input("Bạn có muốn nhập tiếp không? Y/N ")
if answer == "y" or answer == "Y":
AddProduct()
# Tìm kiếm ID trùng lặp
def FindProductDuplicate(Id):
for i in range(0, len(list_product.list_product)):
if list_product.list_product[i]['Id'] == Id:
return [i, list_product.list_product[i]]
return False
# Hiển thị tất cả sản phẩm
def ShowAllProduct():
print("*** HIỂN THỊ TẤT CẢ SẢN PHẨM ***")
if len(list_product.list_product) == 0 or len(list_product.list_product) < 0:
print("Chưa có sản phẩm nào để hiển thị! ".upper())
for i in range(0, len(list_product.list_product)):
print("ID : \t", list_product.list_product[i]['Id']),
print("Mã sản phẩm : \t",
list_product.list_product[i]['Product_code']),
print("Tên sản phẩm : \t",
list_product.list_product[i]['Product_name']),
print("Thương hiệu : \t", list_product.list_product[i]['Brand']),
print("Giá : \t", list_product.list_product[i]['Price']),
print("Năm xuất bản : \t", list_product.list_product[i]['Year']),
print("Số lượng : \t", list_product.list_product[i]['Quantity']),
print("Size giày : \t", list_product.list_product[i]['Size'])
print("Tình trạng : \t", list_product.list_product[i]['Status'])
print("________________________________")
# Sửa thông tin sản phẩm
def UpdateProduct():
print("*** CẬP NHẬT THÔNG TIN SẢN PHẨM ***")
print("Nhập ID sản phẩm cần sửa")
try:
Id = int(input())
product = FindProductDuplicate(Id)
except:
print("Vui lòng nhập đúng ID")
try:
UpdateProduct()
except:
print("Dừng chương trình!")
if product == False:
print("Không tìm thấy sản phẩm ID = ".upper(), Id)
print("********************************")
else:
print("""Bạn muốn cập nhật mục nào ? :
0. Thoát.
1. Tên sản phẩm.
2. Thương hiệu sản phẩm.
3. Giá sản phẩm
4. Size giày.
5. Số lượng.
6. Năm xuất bản.
7. Tình trạng """)
action = 0
while action >= 0:
if action == 1:
UpdateProductName()
elif action == 2:
UpdateProductBrand()
elif action == 3:
UpdateProductPrice()
elif action == 4:
UpdateProductSize()
elif action == 5:
UpdateProductQuatity()
elif action == 6:
UpdateProductYear()
elif action == 7:
UpdateStatus()
def UpdateProductName():
print("Nhập tên cập nhật của sản phẩm: ")
name_product = input()
product[1]['Product_name'] = name_product
def UpdateProductBrand():
print("Nhập thương hiệu muốn cập nhật: ")
name_product = input()
product[1]['Brand'] = name_product
def UpdateProductPrice():
print("Nhập giá muốn cập nhật: ")
name_product = float(input())
product[1]['Price'] = name_product
def UpdateProductSize():
print("Nhập size muốn cập nhật: ")
name_product = input()
product[1]['Size'] = name_product
def UpdateProductYear():
print("Nhập năm sản xuất muốn cập nhật: ")
name_product = int(input())
product[1]['Year'] = name_product
list_product.list_product[product[0]] = product[1]
def UpdateProductQuatity():
print("Nhập số lượng muốn cập nhật: ")
name_product = int(input())
product[1]['Quantity'] = name_product
list_product.list_product[product[0]] = product[1]
def UpdateStatus():
print("Nhập tình trạng muốn cập nhật: ")
name_product = input()
product[1]['Status'] = name_product
list_product.list_product[product[0]] = product[1]
action = int(input("Bạn chọn mục cập nhật nào? "))
if action == 0:
print("Không cập nhật mục nào".upper())
print("********************************")
break
# Xóa sản phẩm
def DeleteProduct():
print("*** XÓA SẢN PHẨM ***")
print("Nhập ID sản phẩm cần xóa:")
Id = int(input())
product = FindProductDuplicate(Id)
if product == False:
print("Không tìm thấy sản phẩm ID = ".upper(), Id)
print("********************************")
else:
answer = input("Bạn có muốn xóa sản phẩm này không? Y/N ".upper())
if answer == "y" or answer == "Y":
if product != False:
list_product.list_product.remove(product[1])
print("Xóa sản phẩm thành công!".upper())
print("********************************")
else:
print("Đã từ chối xóa sản phẩm này!".upper())
print("********************************")
# Tìm kiếm sản phẩm
def FindProductByName():
print("*** TÌM KIẾM SẢN PHẨM ***")
if (len(list_product.list_product) == 0 or len(list_product.list_product) < 0):
print("Chưa có sản phẩm nào trong giỏ!".upper())
print("********************************")
else:
NameProduct = str(
input("Nhập tên sản phẩm hoặc tên thương hiệu bạn muốn tìm kiếm: ")).upper()
is_found = False
for i in range(0, len(list_product.list_product)):
if str(list_product.list_product[i]['Product_name']).upper() in NameProduct or str(list_product.list_product[i]['Brand']).upper() in NameProduct:
is_found = True
print("ID : \t", list_product.list_product[i]['Id']),
print("Mã sản phẩm : \t",
list_product.list_product[i]['Product_code']),
print("Tên sản phẩm : \t",
list_product.list_product[i]['Product_name']),
print("Thương hiệu : \t",
list_product.list_product[i]['Brand']),
print("Giá : \t",
list_product.list_product[i]['Price']),
print("Năm xuất bản : \t",
list_product.list_product[i]['Year']),
print("Số lượng : \t",
list_product.list_product[i]['Quantity']),
print("Size giày : \t",
list_product.list_product[i]['Size'])
print("Tình trạng : \t",
list_product.list_product[i]['Status'])
print("________________________________")
if not is_found:
print("Không tìm thấy sản phẩm này @@".upper())
print("********************************")
def SortProductNameA_Z():
list_product.list_product.sort(key=lambda item: item.get("Product_name"))
def SortProductNameZ_A():
list_product.list_product.sort(
key=lambda item: item.get("Product_name"), reverse=True)
def SortPriceAsc():
list_product.list_product.sort(key=lambda item: item.get("Price"))
def SortPriceDesc():
list_product.list_product.sort(
key=lambda item: item.get("Price"), reverse=True)
def ExportExecel():
xl = pd.ExcelFile('danhsachsanpham.xlsx')
df = pd.read_excel(xl, header=None)
print(df.head())
def ImportExecel():
xl = pd.ExcelFile('danhsachsanpham.xlsx')
df = pd.read_excel(xl, header=None)
print(df.head())
| [
"phanthituyngoc1995@gmail.com"
] | phanthituyngoc1995@gmail.com |
dc14a031d4b4835a5f528eaa2f8bc1d6d6d739ac | dd027c4bbcace97e3dbf566c123b178ceb1a8282 | /sett/jenkins.py | e001b1bf4d4e59dbd5a8ad9a75c21044fbe740be | [] | no_license | cecedille1/Sett | 479bf00ca8df807f431e235c68b892bb90fab9b0 | bf8b9f204caa532a5fb8f110ab4e4a1cea03cb96 | refs/heads/master | 2021-01-10T13:28:11.614022 | 2016-03-31T16:51:14 | 2016-03-31T16:51:14 | 43,488,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from paver.easy import task, call_task
@task
def jenkins():
"""Runs the Jenkins tasks"""
# Generate nosetest.xml
# Generate coverage.xml
# Generate flake8.log
try:
call_task('quality', options={
'output': 'flake8.log',
'strictness': 2,
})
except SystemExit:
quality_ok = False
else:
quality_ok = True
call_task('coverage', options={
'xunit': 'nosetests.xml',
'xcoverage': 'coverage.xml',
})
if not quality_ok:
raise SystemExit(1)
| [
"gr@enix.org"
] | gr@enix.org |
f6a4e829c58c1e15e58c94bdee01590ada2c674c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_228/ch167_2020_06_22_18_23_48_389416.py | 3422198732d84d06c9a9ad4b22dcf97ec1c7fdeb | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | def bairro_mais_custoso(empresa):
semestral={}
for bairro in empresa:
semestral[bairro]=0
for gasto in empresa[bairro][6:]:
semestral[bairro]+=gasto
maiorcusto=0
nome=""
for bairro,custo in semestral.items():
if custo>maiorcusto:
maiorcusto=custo
nome=bairro
return nome
| [
"you@example.com"
] | you@example.com |
8f2d224306e0f28df1fcd4c35f5e3b04d6029624 | 8ce3df908d86c22ed9f1de810ce92493528f82c7 | /examples/speedup/tmp/main_init_mean.py | 46311a958c8584dd5ac9050bc8aeff17b33f2f34 | [] | no_license | kun0906/kjl_reload | cbe596b80500253e1a636b75dfc87c2aac78cbbd | a7690d9a548809d6d56f46af715f53fe28f945b7 | refs/heads/master | 2023-03-31T00:07:10.758612 | 2021-03-25T13:58:23 | 2021-03-25T13:58:23 | 349,272,691 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 26,465 | py | """ Main function
1. Instructions for executing the main file
1) Change the current directory to "examples/"
cd examples/
2) Check the python3 veriosn
python3 -V
3) Execute the main file
PYTHONPATH=../:./ python3.7 speedup/main_kjl.py > speedup/out/main_kjl.txt 2>&1 &
Note:
Check the memory leak issue with memory_profiler (mprof)
E.g.:
python3 -m memory_profiler example.py
# Visualization
mprof run --multiprocess <script>
mprof plot
# Instructions for using "mprof run"
1) Add "mprof" path into system path
PATH=$PATH:~/.local/bin
2) Execute the main file
PYTHONPATH=../:./ python3.7 mprof run -multiprocess speedup/main_kjl.py > speedup/out/main_kjl.txt 2>&1 &
# " PYTHONPATH=../:./ python3.7 ~/.local/lib/python3.7/site-packages/mprof.py run —multiprocess
# speedup/main_kjl.py > speedup/out/main_kjl.txt 2>&1 & "
3) Visualize the memory data
mprof plot mprofile_20210201000108.dat
"""
# Authors: kun.bj@outlook.com
# License: XXX
import itertools
import os
import os.path as pth
import time
import traceback
from joblib import delayed, Parallel
from memory_profiler import profile
from kjl.log import get_log
from kjl.utils.tool import execute_time, dump_data, load_data
from speedup._merge import _dat2csv, merge_res
from speedup._speedup_kjl import single_main
from speedup.generate_data import generate_data_speed_up
# create a customized log instance that can print the information.
lg = get_log(level='info')
DATASETS = [
### UNB datasets
# 'UNB3',
# 'UNB_5_8_Mon', # auc: 0.5
# 'UNB12_comb', # combine UNB1 and UNB2 normal and attacks
# 'UNB13_comb', # combine UNB1 and UNB2 normal and attacks
# 'UNB14_comb',
# 'UNB23_comb', # combine UNB1 and UNB2 normal and attacks
# 'UNB24_comb',
# 'UNB34_comb',
# 'UNB35_comb',
# 'UNB12_1', # combine UNB1 and UNB2 attacks, only use UNB1 normal
# 'UNB13_1',
# 'UNB14_1',
# 'UNB23_2', # combine UNB1 and UNB2 attacks, only use UNB2 normal
# 'UNB24_2',
# 'UNB34_3',
# 'UNB35_3',
# 'UNB34_3',
# 'UNB45_4',
# 'UNB123_1', # combine UNB1, UNB2, UNB3 attacks, only use UNB1 normal
# 'UNB134_1',
# 'UNB145_1',
# 'UNB245_2',
# 'UNB234_2', # combine UNB2, UNB3, UNB4 attacks, only use UNB2 normal
# 'UNB35_3', # combine UNB3, UNB5 attacks, only use UNB3 normal
# 'UNB24',
### CTU datasets
# 'CTU21', # normal + abnormal (botnet) # normal 10.0.0.15 (too few normal flows)
# 'CTU22', # normal + abnormal (coinminer)
# 'CTU31', # normal + abnormal (botnet) # 192.168.1.191
# 'CTU32', # normal + abnormal (coinminer)
### MAWI datasets
# 'MAWI32_2020', # 'MAWI/WIDE_2020/pc_203.78.4.32',
# 'MAWI32-2_2020', # 'MAWI/WIDE_2020/pc_203.78.4.32-2',
# 'MAWI165-2_2020', # 'MAWI/WIDE_2020/pc_203.78.7.165-2', # ~25000 (flows src_dst)
# 'ISTS1',
### IOT datasets
# 'DWSHR_2020', # 79 flows
# 'WSHR_2020', # 4 flows
### SMTV datasets. All smtv dataset are on NOEN server: /opt/smart-tv/roku-data-20190927-182117
# SMTV_2019 # cp -rp roku-data-20190927-182117 ~/Datasets/UCHI/IOT_2019/
# 'SMTV1_2019',
# 'SMTV2_2019',
### Final datasets for the paper
'UNB345_3', # Combine UNB3, UNB3 and UNB5 attack data as attack data and only use UNB3's normal as normal data
'CTU1', # Two different abnormal data
'MAWI1_2020', # Two different normal data
'MACCDC1', # Two different normal data
'SFRIG1_2020', # Two different normal data
'AECHO1_2020', # Two different normal data
'DWSHR_WSHR_2020', # only use Dwshr normal as normal data, and combine Dwshr and wshr novelties as novelty
]
MODELS = [
### Algorithm name
"OCSVM(rbf)",
"KJL-OCSVM(linear)",
"Nystrom-OCSVM(linear)",
# "GMM(full)", "GMM(diag)",
"KJL-GMM(full)", "KJL-GMM(diag)",
"Nystrom-GMM(full)", "Nystrom-GMM(diag)",
### quickshift(QS)/meanshift(MS) are used before KJL/Nystrom projection
# "QS-KJL-GMM(full)", "QS-KJL-GMM(diag)",
# "MS-KJL-GMM(full)", "MS-KJL-GMM(diag)",
# "QS-Nystrom-GMM(full)", "QS-Nystrom-GMM(diag)",
# "MS-Nystrom-GMM(full)", "MS-Nystrom-GMM(diag)",
### quickshift(QS)/meanshift(MS) are used after KJL/Nystrom projection
"KJL-QS-GMM(full)", "KJL-QS-GMM(diag)",
# "KJL-MS-GMM(full)", "KJL-MS-GMM(diag)"
"Nystrom-QS-GMM(full)", "Nystrom-QS-GMM(diag)",
# "Nystrom-MS-GMM(full)", "Nystrom-MS-GMM(diag)"
]
# if it ues grid search or not. True for best params and False for default params
GSES = [('is_gs', True), ('is_gs', False)]
# which data do we want to extract features? src_dat or src
DIRECTIONS = [('direction', 'src_dst'), ]
# Features.
FEATS = [('feat', 'iat_size'), ('feat', 'stats')]
# if it uses header in feature or not.
HEADERS = [('is_header', True), ('is_header', False)]
# if quickshift(QS)/meanshift(MS) are used before KJL/Nystrom projection or after projection
BEFORE_PROJS = [('before_proj', False), ]
def _get_model_cfg(model_cfg, n_repeats=5, q=0.3, n_kjl=100, d_kjl=5, n_comp=1,
random_state=42, verbose=10, overwrite=False, n_jobs=10):
""" Get all params needed for an experiment except for dataset params
Parameters
----------
model_cfg
n_repeats
q
n_kjl
d_kjl
n_comp
random_state
verbose
overwrite
n_jobs
Returns
-------
updated model_cfg: dict
"""
qs = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95]
before_proj = model_cfg['before_proj']
is_gs = model_cfg['is_gs']
model_name = model_cfg['model_name']
# if 'OCSVM' in model_name:
# if 'rbf' in model_name:
# kernel = 'rbf'
# elif 'linear' in model_name:
# kernel = 'linear'
# else:
# msg = model_name
# raise NotImplementedError(msg)
if 'GMM' in model_name:
if 'full' in model_name:
covariance_type = 'full'
elif 'diag' in model_name:
covariance_type = 'diag'
else:
msg = model_name
raise NotImplementedError(msg)
# if 'KJL' in model_name:
# is_kjl = True
# else:
# is_kjl = False
TEMPLATE = {"model_name": model_name, # the case name of current experiment
"train_size": model_cfg['train_size'],
'detector': {'detector_name': 'GMM', 'GMM_covariance_type': None},
'is_gs': False,
'before_proj': before_proj,
'after_proj': not before_proj,
'std': {'is_std': False, 'is_std_mean': False}, # default use std
'kjl': {'is_kjl': False, 'd_kjl': d_kjl},
'nystrom': {'is_nystrom': False},
'quickshift': {'is_quickshift': False},
'meanshift': {'is_meanshift': False},
'random_state': random_state,
'n_repeats': n_repeats,
# 'q_abnormal_thres': 0.9,
'verbose': verbose,
'overwrite': overwrite,
'n_jobs': n_jobs,
}
def create_case(template=TEMPLATE, **kwargs):
case = {}
for k, v in template.items():
if type(v) is dict:
for _k, _v in v.items():
case[_k] = _v
else:
case[k] = v
for k, v in kwargs.items():
if type(v) is dict:
for _k, _v in v.items():
case[_k] = _v
else:
case[k] = v
return case
if model_name == "OCSVM(rbf)":
# case 1: OCSVM(rbf)
model_cfg = create_case(template=TEMPLATE,
detector={'detector_name': 'OCSVM',
'OCSVM_kernel': 'rbf',
'OCSVM_qs': qs if is_gs else [q],
'OCSVM_nus': [0.5]
},
is_gs=is_gs, # default kjl=False
)
elif model_name == "KJL-OCSVM(linear)":
# case 12: KJL-OCSVM(linear)
model_cfg = create_case(template=TEMPLATE,
detector={'detector_name': 'OCSVM',
'OCSVM_kernel': 'linear',
# 'OCSVM_qs': qs if is_gs else [q],
'OCSVM_nus': [0.5]
},
is_gs=is_gs,
kjl={'is_kjl': True,
'kjl_qs': qs if is_gs else [q], # kjl and OCSVM use the same q
'kjl_ns': [n_kjl],
'kjl_ds': [d_kjl]
}
)
elif model_name == "Nystrom-OCSVM(linear)":
# case 13: Nystrom-OCSVM(linear)
model_cfg = create_case(template=TEMPLATE,
detector={'detector_name': 'OCSVM',
'OCSVM_kernel': 'linear',
# 'OCSVM_qs': qs if is_gs else [q],
'OCSVM_nus': [0.5]
},
is_gs=is_gs,
nystrom={'is_nystrom': True,
'nystrom_qs': qs if is_gs else [q],
'nystrom_ns': [n_kjl],
'nystrom_ds': [d_kjl]
}
)
# elif model_name == 'GMM(full)' or model_name == "GMM(diag)":
# # case 2: GMM
# model_cfg = create_case(template=TEMPLATE,
# detector={'detector_name': 'GMM',
# 'GMM_covariance_type': covariance_type,
# 'GMM_n_components': [1, 5, 10, 15, 20, 25, 30, 35, 40,
# 45] if is_gs else [n_comp]
# },
# is_gs=is_gs
# )
elif model_name == "KJL-GMM(full)" or model_name == "KJL-GMM(diag)":
# case 3: KJL-GMM
model_cfg = create_case(template=TEMPLATE,
detector={'detector_name': 'GMM',
'GMM_covariance_type': covariance_type,
'GMM_n_components': [1, 4, 6, 8, 10, 12, 14, 16, 18, 20]
if is_gs else [n_comp]
},
is_gs=is_gs,
kjl={'is_kjl': True,
'kjl_qs': qs if is_gs else [q],
'kjl_ns': [n_kjl],
'kjl_ds': [d_kjl]
}
)
elif model_name == "Nystrom-GMM(full)" or model_name == "Nystrom-GMM(diag)":
# case 4: Nystrom-GMM # nystrom will take more time than kjl
model_cfg = create_case(template=TEMPLATE,
detector={'detector_name': 'GMM',
'GMM_covariance_type': covariance_type,
'GMM_n_components': [1, 4, 6, 8, 10, 12, 14, 16, 18, 20] if is_gs
else [n_comp]
},
is_gs=is_gs,
nystrom={'is_nystrom': True,
'nystrom_qs': qs if is_gs else [q],
'nystrom_ns': [n_kjl],
'nystrom_ds': [d_kjl]
}
)
# quickshift(QS)/meanshift(MS) are used before KJL/Nystrom projection
elif model_name == "QS-KJL-GMM(full)" or model_name == "QS-KJL-GMM(diag)" or \
model_name == "KJL-QS-GMM(full)" or model_name == "KJL-QS-GMM(diag)":
# # case 5: QS-KJL-GMM
model_cfg = create_case(template=TEMPLATE,
detector={'detector_name': 'GMM',
'GMM_covariance_type': covariance_type,
'GMM_n_components': []
},
is_gs=is_gs,
kjl={'is_kjl': True,
'kjl_qs': qs if is_gs else [q],
'kjl_ns': [n_kjl],
'kjl_ds': [d_kjl]
},
quickshift={'is_quickshift': True,
'quickshift_ks': [500],
# [50, 100, 150, 200, 250, 300, 350, 400, 450, 500]
'quickshift_betas': [0.9] # fix QS
}
)
elif model_name == "MS-KJL-GMM(full)" or model_name == "MS-KJL-GMM(diag)" or \
model_name == "KJL-MS-GMM(full)" or model_name == "KJL-MS-GMM(diag)":
# case 6: MS-KJL-GMM
model_cfg = create_case(template=TEMPLATE,
detector={'detector_name': 'GMM',
'GMM_covariance_type': covariance_type,
'GMM_n_components': []
},
is_gs=is_gs,
kjl={'is_kjl': True,
'kjl_qs': qs if is_gs else [q],
'kjl_ns': [n_kjl],
'kjl_ds': [d_kjl]
},
meanshift={'is_meanshift': True,
'meanshift_qs': [] # meanshift and kjl use the same q
}
)
# quickshift(QS)/meanshift(MS) are used after KJL/Nystrom projection
elif model_name == "Nystrom-QS-GMM(full)" or model_name == "Nystrom-QS-GMM(diag)" or \
model_name == "QS-Nystrom-GMM(full)" or model_name == "QS-Nystrom-GMM(diag)":
# case 7: Nystrom-QS-GMM
model_cfg = create_case(template=TEMPLATE,
detector={'detector_name': 'GMM',
'GMM_covariance_type': covariance_type,
'GMM_n_components': []
},
is_gs=is_gs,
nystrom={'is_nystrom': True,
'nystrom_qs': qs if is_gs else [q],
'nystrom_ns': [n_kjl],
'nystrom_ds': [d_kjl]
},
quickshift={'is_quickshift': True,
'quickshift_ks': [500],
# [50, 100, 150, 200, 250, 300, 350, 400, 450, 500]
'quickshift_betas': [0.9]
}
)
elif model_name == "Nystrom-MS-GMM(full)" or model_name == "Nystrom-MS-GMM(diag)" or \
model_name == "MS-Nystrom-GMM(full)" or model_name == "MS-Nystrom-GMM(diag)":
# case 8: Nystrom-MS-GMM
model_cfg = create_case(template=TEMPLATE,
detector={'detector_name': 'GMM',
'GMM_covariance_type': covariance_type,
'GMM_n_components': []
},
is_gs=is_gs,
nystrom={'is_nystrom': True,
'nystrom_qs': qs if is_gs else [q],
'nystrom_ns': [n_kjl],
'nystrom_ds': [d_kjl]
},
meanshift={'is_meanshift': True,
'meanshift_qs': []
}
)
else:
msg = model_name
raise NotImplementedError(msg)
return model_cfg
def _get_data(data_pth=None, data_name=None, direction='src_dst', feat='iat_size', header=False,
overwrite=False):
"""Load data from data_pth if data_path exists, otherwise, generate data from pcap fiels
Parameters
----------
data_pth:
data_name
direction
feat
header
overwrite
Returns
-------
X: features
y: labels
"""
if overwrite:
if pth.exists(data_pth): os.remove(data_pth)
if not pth.exists(data_pth):
data_pth = generate_data_speed_up(data_name, feat_type=feat, header=header, direction=direction,
out_file=data_pth,
overwrite=overwrite)
return load_data(data_pth)
class SingleEXP:
def __init__(self, in_dir=None, n_repeats=5, q=0.3, n_kjl=100, d_kjl=5, n_comp=1,
random_state=42, verbose=10, overwrite=False, n_jobs=10):
"""Generate the result by one algorithm on one dataset.
Parameters
----------
in_dir
n_repeats
q
n_kjl
d_kjl
n_comp
random_state
verbose
overwrite
n_jobs
"""
self.in_dir = in_dir
self.n_repeats = n_repeats
self.n_jobs = n_jobs
self.q = q
self.n_kjl = n_kjl
self.d_kjl = d_kjl
self.n_comp = n_comp
self.verbose = verbose
self.random_state = random_state
self.overwrite = overwrite
def get_data(self, data_cfg):
"""Get one data from data_cfg
Parameters
----------
data_cfg
Returns
-------
"""
data_file = pth.join(self.in_dir, data_cfg['direction'],
data_cfg['feat'] + '-header_' + str(data_cfg['is_header']),
data_cfg['data_name'], 'Xy-normal-abnormal.dat')
X, y = _get_data(data_pth=data_file,
data_name=data_cfg['data_name'], direction=data_cfg['direction'],
feat=data_cfg['feat'], header=data_cfg['is_header'],
overwrite=self.overwrite)
self.data_cfg = data_cfg
self.data_cfg['data'] = (X, y)
self.data_cfg['data_file'] = data_file
return self.data_cfg
def get_model_cfg(self, model_cfg):
"""get all model cfg based on model_cfg
Returns
-------
"""
model_cfg = _get_model_cfg(model_cfg, n_repeats=self.n_repeats,
q=self.q, n_kjl=self.n_kjl, d_kjl=self.d_kjl,
n_comp=self.n_comp, n_jobs=self.n_jobs,
random_state=self.random_state, verbose=self.verbose,
overwrite=self.overwrite)
self.model_cfg = model_cfg
return self.model_cfg
def execute(self, model_cfg=None, data_cfg=None):
"""build and evaluate a model on a data
Parameters
----------
model
data
Returns
-------
"""
res = single_main(model_cfg, data_cfg)
return res
def save_model(self):
pass
def save_data(self, res, out_dir=''):
""" save res to disk
Parameters
----------
res
out_dir
Returns
-------
"""
if not pth.exists(out_dir):
os.makedirs(out_dir)
out_file = pth.join(out_dir, 'res.dat')
dump_data(res, out_file=out_file)
# (data_file, model_name), (_best_res, _middle_res) = res
try:
_dat2csv(res, out_file=out_file + '.csv', feat_set=self.data_cfg['feat'])
except Exception as e:
print(f"Error({e})")
return out_file
def show_model(self, model, out_dir=None):
pass
def show_data(self, res, out_dir=None):
pass
def _main(data_cfg, model_cfg, out_dir=''):
""" Get the result by one model on one dataset
Parameters
----------
data_cfg
model_cfg
out_dir
Returns
-------
"""
start = time.time()
lg.info(f"data_cfg: {data_cfg}, model_cfg: {model_cfg}")
res = ''
try:
exp = SingleEXP(in_dir=f'speedup/data', n_repeats=5,
q=0.25, n_kjl=100, d_kjl=model_cfg['d_kjl'], n_comp=1,
random_state=42, verbose=10, overwrite=False,
n_jobs=5)
data_cfg = exp.get_data(data_cfg)
model_cfg = exp.get_model_cfg(model_cfg)
model_cfg['out_dir'] = out_dir
res = exp.execute(model_cfg, data_cfg)
# update out_dir
out_dir = pth.join(out_dir,
exp.data_cfg['direction'],
exp.data_cfg['feat'] + "-header_" + str(exp.data_cfg['is_header']),
exp.data_cfg['data_name'],
"before_proj_" + str(exp.model_cfg['before_proj']) + \
"-gs_" + str(exp.model_cfg['is_gs']),
exp.model_cfg['model_name'] + "-std_" + str(exp.model_cfg['is_std'])
+ "_center_" + str(exp.model_cfg['is_std_mean']) + "-d_" + str(exp.model_cfg['d_kjl']) \
+ "-" + str(exp.model_cfg['GMM_covariance_type']))
out_file = exp.save_data(res, out_dir=out_dir)
# exp.save_model(, out_dir=out_dir)
exp.show_data(res, out_dir=out_dir)
except Exception as e:
traceback.print_exc()
lg.error(f"Error: {data_cfg}, {model_cfg}")
end = time.time()
time_token = end - start
return res, time_token
@execute_time
@profile
def main1(directions=[('direction', 'src_dst'), ],
feats=[('feat', 'iat_size'), ('feat', 'stats')],
headers=[('is_header', True), ('is_header', False)],
gses=[('is_gs', True), ('is_gs', False)],
before_projs=[('before_proj', False), ],
ds=[('d_kjl', 5), ], out_dir='speedup/out',
train_sizes=[('train_size', 5000)],
is_parallel=True):
# Store all the results
res = []
# Get all datasets
datasets = [('data_name', v) for v in DATASETS]
datasets_cfg = list(itertools.product(datasets, directions, feats, headers))
# Get all models
models = [('model_name', v) for v in MODELS]
models_cfg = list(itertools.product(models, gses, before_projs, ds, train_sizes))
# The total number of the experiments
n_tot = len(list(itertools.product(datasets_cfg, models_cfg)))
lg.info(f'n_tot: {n_tot}')
for i, (data_cfg, model_cfg) in enumerate(list(itertools.product(datasets_cfg, models_cfg))):
lg.info(f'{i}/{n_tot}, {dict(data_cfg)}, {dict(model_cfg)}')
n_cpus = os.cpu_count()
lg.info(f'n_cpus: {n_cpus}')
# If we execute all experiments in parallel
if is_parallel:
parallel = Parallel(n_jobs=5, verbose=30)
with parallel:
res = parallel(delayed(_main)(dict(data_cfg), dict(model_cfg), out_dir) for data_cfg, model_cfg, in
list(itertools.product(datasets_cfg, models_cfg)))
else:
# Run each combination in sequence.
for i, (data_cfg, model_cfg) in enumerate(list(itertools.product(datasets_cfg, models_cfg))):
res_, time_token = _main(dict(data_cfg), dict(model_cfg), out_dir)
res.append(res_)
lg.info(f'{i + 1}/{n_tot}, it takes {time_token:.5f}s')
# Dump all results to disk
dump_data(res, out_file=f'{out_dir}/res.dat')
lg.info('\n\n***finish!')
def main():
out_dir = 'speedup/out/kjl'
try:
###########################################################################################################
# Get results with IAT_SIZE
main1(feats=[('feat', 'iat_size')],
headers=[('is_header', False)],
gses=[('is_gs', False)],
before_projs=[('before_proj', False), ],
ds=[('d_kjl', 5), ],
train_sizes=[('train_size', 5000)],
out_dir=out_dir,
)
###########################################################################################################
# Get results with STATS
main1(feats=[('feat', 'stats')],
headers=[('is_header', True)],
# gses=[('is_gs', False)],
before_projs=[('before_proj', False), ],
ds=[('d_kjl', 5), ],
out_dir = out_dir,
)
except Exception as e:
traceback.print_exc()
lg.error(e)
###########################################################################################################
# Merge all results
merge_res(in_dir=out_dir, datasets=DATASETS,
directions=[('direction', 'src_dst'), ],
feats=[('feat', 'iat_size'), ('feat', 'stats'), ],
# headers=[('is_header', False)],
models=MODELS,
# gses=[('is_gs', True), ('is_gs', False)],
before_projs=[('before_proj', False), ],
ds=[('d_kjl', 5), ], )
if __name__ == '__main__':
main()
| [
"kun.bj@foxmail.com"
] | kun.bj@foxmail.com |
3ade7e133fc24950c2902b71a88de65edfd42d9e | 1ba8d8ae275524a2ac61226dca4a21972cf6c355 | /Deep_Learning_with_TensorFlow/1.4.0/Chapter10/8. Estimator-DNNClassifier.py | 0355ab5fda86fa6dc0e3bc53a476c1c09fb64d08 | [
"MIT"
] | permissive | hyphenliu/TensorFlow_Google_Practice | 4e2da546f6056ddbbda26a2d9855cc96c2e3a708 | 0ea7d52a4056e5e53391a452a9bbd468175af7f5 | refs/heads/master | 2020-09-28T17:13:05.032238 | 2018-07-18T08:07:34 | 2018-07-18T08:07:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,444 | py |
# coding: utf-8
# ### 1. 模型定义。
# In[1]:
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
tf.logging.set_verbosity(tf.logging.INFO)
mnist = input_data.read_data_sets("../../datasets/MNIST_data", one_hot=False)
# 定义模型的输入。
feature_columns = [tf.feature_column.numeric_column("image", shape=[784])]
# 通过DNNClassifier定义模型。
estimator = tf.estimator.DNNClassifier(feature_columns=feature_columns,
hidden_units=[500],
n_classes=10,
optimizer=tf.train.AdamOptimizer(),
model_dir="log")
# ### 2. 训练模型。
# In[2]:
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"image": mnist.train.images},
y=mnist.train.labels.astype(np.int32),
num_epochs=None,
batch_size=128,
shuffle=True)
estimator.train(input_fn=train_input_fn, steps=10000)
# ### 3. 测试模型。
# In[3]:
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"image": mnist.test.images},
y=mnist.test.labels.astype(np.int32),
num_epochs=1,
batch_size=128,
shuffle=False)
test_results = estimator.evaluate(input_fn=test_input_fn)
accuracy_score = test_results["accuracy"]
print("\nTest accuracy: %g %%" % (accuracy_score*100))
print test_results
| [
"1786546913@qq.com"
] | 1786546913@qq.com |
c1d24c92172885a47858a5721728634a2ce52cf1 | b24ce5acced59ef367a20706949953f3ea81d57a | /tensorflow/contrib/learn/python/learn/datasets/mnist.py | 01262ff5f81053a3407809aa680589af27cec783 | [
"Apache-2.0"
] | permissive | BoldizsarZopcsak/Image-Classifier | b57dd3b72cf368cc1d66a5e318003a2a2d8338a4 | c0d471a55a70b3118178488db3c005a9277baade | refs/heads/master | 2022-11-19T12:28:49.625532 | 2018-01-20T15:48:48 | 2018-01-20T15:48:48 | 118,253,026 | 1 | 1 | Apache-2.0 | 2022-11-01T09:24:24 | 2018-01-20T15:04:57 | Python | UTF-8 | Python | false | false | 9,114 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for downloading and reading MNIST data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import numpy
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.framework import dtypes
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(f):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth].
Args:
f: A file object that can be passed into a gzip reader.
Returns:
data: A 4D uint8 numpy array [index, y, x, depth].
Raises:
ValueError: If the bytestream does not start with 2051.
"""
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST image file: %s' %
(magic, f.name))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def extract_labels(f, one_hot=False, num_classes=10):
"""Extract the labels into a 1D uint8 numpy array [index].
Args:
f: A file object that can be passed into a gzip reader.
one_hot: Does one hot encoding for the result.
num_classes: Number of classes for the one hot encoding.
Returns:
labels: a 1D uint8 numpy array.
Raises:
ValueError: If the bystream doesn't start with 2049.
"""
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST label file: %s' %
(magic, f.name))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels, num_classes)
return labels
class DataSet(object):
def __init__(self,
images,
labels,
fake_data=False,
one_hot=False,
dtype=dtypes.float32,
reshape=True):
"""Construct a DataSet.
one_hot arg is used only if fake_data is true. `dtype` can be either
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
`[0, 1]`.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
if dtype not in (dtypes.uint8, dtypes.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
dtype)
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
if dtype == dtypes.float32:
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False, shuffle=True):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1] * 784
if self.one_hot:
fake_label = [1] + [0] * 9
else:
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)
]
start = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
perm0 = numpy.arange(self._num_examples)
numpy.random.shuffle(perm0)
self._images = self.images[perm0]
self._labels = self.labels[perm0]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
rest_num_examples = self._num_examples - start
images_rest_part = self._images[start:self._num_examples]
labels_rest_part = self._labels[start:self._num_examples]
# Shuffle the data
if shuffle:
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self.images[perm]
self._labels = self.labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size - rest_num_examples
end = self._index_in_epoch
images_new_part = self._images[start:end]
labels_new_part = self._labels[start:end]
return numpy.concatenate((images_rest_part, images_new_part), axis=0) , numpy.concatenate((labels_rest_part, labels_new_part), axis=0)
else:
self._index_in_epoch += batch_size
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_data_sets(train_dir,
fake_data=False,
one_hot=False,
dtype=dtypes.float32,
reshape=True,
validation_size=5000):
if fake_data:
def fake():
return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype)
train = fake()
validation = fake()
test = fake()
return base.Datasets(train=train, validation=validation, test=test)
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
local_file = base.maybe_download(TRAIN_IMAGES, train_dir,
SOURCE_URL + TRAIN_IMAGES)
with open(local_file, 'rb') as f:
train_images = extract_images(f)
local_file = base.maybe_download(TRAIN_LABELS, train_dir,
SOURCE_URL + TRAIN_LABELS)
with open(local_file, 'rb') as f:
train_labels = extract_labels(f, one_hot=one_hot)
local_file = base.maybe_download(TEST_IMAGES, train_dir,
SOURCE_URL + TEST_IMAGES)
with open(local_file, 'rb') as f:
test_images = extract_images(f)
local_file = base.maybe_download(TEST_LABELS, train_dir,
SOURCE_URL + TEST_LABELS)
with open(local_file, 'rb') as f:
test_labels = extract_labels(f, one_hot=one_hot)
if not 0 <= validation_size <= len(train_images):
raise ValueError(
'Validation size should be between 0 and {}. Received: {}.'
.format(len(train_images), validation_size))
validation_images = train_images[:validation_size]
validation_labels = train_labels[:validation_size]
train_images = train_images[validation_size:]
train_labels = train_labels[validation_size:]
train = DataSet(train_images, train_labels, dtype=dtype, reshape=reshape)
validation = DataSet(validation_images,
validation_labels,
dtype=dtype,
reshape=reshape)
test = DataSet(test_images, test_labels, dtype=dtype, reshape=reshape)
return base.Datasets(train=train, validation=validation, test=test)
def load_mnist(train_dir='MNIST-data'):
return read_data_sets(train_dir)
| [
"zboldi@gmail.com"
] | zboldi@gmail.com |
2252aa44817b2d07ab6ed5d0ee6a3517d72f807c | aa7de5b75b65404715676121d61a9b06348d5f62 | /telemetry/telemetry/internal/platform/device_finder.py | 4547ecd20747c41e9da726a41ddad68480306fba | [
"BSD-3-Clause"
] | permissive | benschmaus/catapult | 3ca2ede51e4a23082e634fa07a03c11158bd6d9a | f388b1f6b90c670b6524fd68a295bae26ba8db70 | refs/heads/master | 2021-01-20T07:53:45.431708 | 2017-07-17T18:03:09 | 2017-07-17T18:03:09 | 90,060,605 | 0 | 1 | null | 2017-05-02T17:38:42 | 2017-05-02T17:38:41 | null | UTF-8 | Python | false | false | 1,304 | py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds devices that can be controlled by telemetry."""
from telemetry.internal.platform import android_device
from telemetry.internal.platform import cros_device
from telemetry.internal.platform import desktop_device
DEVICES = [
android_device,
cros_device,
desktop_device,
]
def _GetAllAvailableDevices(options):
"""Returns a list of all available devices."""
devices = []
for device in DEVICES:
devices.extend(device.FindAllAvailableDevices(options))
return devices
def GetDevicesMatchingOptions(options):
"""Returns a list of devices matching the options."""
devices = []
remote_platform_options = options.remote_platform_options
if (not remote_platform_options.device or
remote_platform_options.device == 'list'):
devices = _GetAllAvailableDevices(options)
elif remote_platform_options.device == 'android':
devices = android_device.FindAllAvailableDevices(options)
else:
devices = _GetAllAvailableDevices(options)
devices = [d for d in devices if d.guid ==
options.remote_platform_options.device]
devices.sort(key=lambda device: device.name)
return devices
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
d9d3518697740e62f5fcd78a2d44d8952a390926 | e976eb4db57ddee4947cbab8746446dd53f6cf6f | /101-150/三角形最小路径和.py | ec3af7dda958d297f2e463e0f3c2c521d4e43907 | [] | no_license | Aiyane/aiyane-LeetCode | 5328529079bcfbc84f4e4d67e3d8736b9745dc0d | 3c4d5aacc33f3ed66b6294894a767862170fb4f6 | refs/heads/master | 2020-04-01T20:33:54.125654 | 2019-06-25T09:56:10 | 2019-06-25T09:56:10 | 153,610,015 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 三角形最小路径和.py
"""
给定一个三角形,找出自顶向下的最小路径和。每一步只能移动到下一行中相邻的结点上。
例如,给定三角形:
[
[2],
[3,4],
[6,5,7],
[4,1,8,3]
]
自顶向下的最小路径和为 11(即,2 + 3 + 5 + 1 = 11)。
说明:
如果你可以只使用 O(n) 的额外空间(n 为三角形的总行数)来解决这个问题,那么你的算法会很加分。
"""
__author__ = 'Aiyane'
class Solution(object):
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
if not triangle: return 0
vals = [v for v in triangle[-1]]
for line in triangle[:-1][::-1]:
for i, v in enumerate(line):
vals[i] = v + min(vals[i], vals[i+1])
return vals[0]
| [
"2310091880qq@gmail.com"
] | 2310091880qq@gmail.com |
d52dadc7c858dec5f454c2bdd7b914dc1e9870c5 | c90b3ac3e5ad11cb93d4e6b76b9b9c4a19d0f512 | /.history/copytest_20200502125750.py | 3f5f083b6c965e4eb030019b5d6986b066591552 | [] | no_license | rbafna6507/passwordstorageproject | 6465585e36c81075856af8d565fe83e358b4a40a | 480c30e358f7902ac0ef5c4e8d9556cb1d6d33f4 | refs/heads/master | 2022-11-25T12:05:02.625968 | 2020-07-27T21:33:38 | 2020-07-27T21:33:38 | 283,021,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | import pickle
import cryptography
from cryptography.fernet import Fernet
"""def encrypt(message: bytes, key: bytes) -> bytes:
return Fernet(key).encrypt(message)
def decrypt(token: bytes, key: bytes) -> bytes:
return Fernet(key).decrypt(token)
"""
infile = open('jeff.pkl','rb')
z = pickle.load(infile)
key = Fernet.generate_key()
f = Fernet(key)
e_userpass = z
username = input(b"Username: ")
password = input(b"password: ")
website = input("Website: ")
e_username = f.encrypt(username)
e_password = f.encrypt(password)
e_list = [b"Username: " + e_username, b"Password: " + e_password]
e_userpass["Website: " + website] = e_list
outfile = open("jeff.pkl", "wb")
pickle.dump(e_userpass, outfile)
outfile.close()
infile = open('jeff.pkl','rb')
z = pickle.load(infile)
e_userpass = z
j = [e_userpass[k] for k in e_userpass]
for k in j:
for q in k:
decrypt(q)
"""for key, value in d_userpass.items():
print(key, ' : ', value)""" | [
"35872545+rbafna6507@users.noreply.github.com"
] | 35872545+rbafna6507@users.noreply.github.com |
de6d747b4bcee54d33b0dee1e30d34fd54406e16 | 57fc5d54f5df359c7a53020fb903f36479d3a322 | /controllers/.history/supervisor/supervisor_20201129205247.py | 5141061cf25577be2ea6efb0fdd4690a9671d116 | [] | no_license | shenwuyue-xie/webots_testrobots | 929369b127258d85e66c5275c9366ce1a0eb17c7 | 56e476356f3cf666edad6449e2da874bb4fb4da3 | refs/heads/master | 2023-02-02T11:17:36.017289 | 2020-12-20T08:22:59 | 2020-12-20T08:22:59 | 323,032,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,472 | py | import math
import numpy as np
from numpy import random
from numpy.core.fromnumeric import size
from numpy.lib.function_base import meshgrid
import utilities as utils
from deepbots.supervisor.controllers.supervisor_emitter_receiver import \
SupervisorCSV
# # from deepbots.supervisor.wrappers.tensorboard_wrapper import TensorboardLogger
from tensorboardX import SummaryWriter
from models.networks import TD3
from controller import Keyboard
import os
Max_robotnum = 6
OBSERVATION_SPACE = (Max_robotnum-1) * 4 + 7 + 9 * Max_robotnum
ACTION_SPACE = Max_robotnum * 2 + 3
MAX_DSNUM = (Max_robotnum-1) * 4 + 7
DIST_SENSORS_MM = {'min': 0, 'max': 1000}
XPOSITION = {'min':-2, 'max':2}
YPOSITION = {'min':-1.5 , 'max':1.5}
ZPOSITION = {'min': -1, 'max' : 8}
MAX_DISTANCE = {'min':0, 'max':10}
MAX_ANGLE = {'min':-math.pi, 'max':math.pi}
# import ptvsd
# print("waiting for debugger attach")
# ptvsd.enable_attach(address=("127.0.0.1",7788))
# ptvsd.wait_for_attach()
class TaskDecisionSupervisor(SupervisorCSV):
def __init__(self,robot,observation_space,log_dir,v_action,v_observation,v_reward,windows=[10,100,200]):
super(TaskDecisionSupervisor,self).__init__()
self.timestep = int(self.supervisor.getBasicTimeStep())
self.keyboard = Keyboard()
self.keyboard.enable(self.timestep)
self.emitter = self.supervisor.getEmitter('emitter')
self.receiver = self.supervisor.getReceiver('receiver')
self.robot_list = robot
self.robot_handles = []
self.observation = [0 for i in range(observation_space)]
self.findThreshold = 0.2
self.steps = 0
self.steps_threshold = 6000
self.endbattery = [50000 for i in range(Max_robotnum)]
self.final_distance = [50 for i in range(Max_robotnum)]
self.final_target = self.supervisor.getFromDef('final_target')
self.should_done = False
self.startbattery = 50000
self.setuprobots()
self.step_cntr = 0
self.step_global = 0
self.step_reset = 0
self.score = 0
self.score_history = []
self.v_action = v_action
self.v_observation = v_observation
self.v_reward = v_reward
self.windows = windows
self.file_writer = SummaryWriter(log_dir, flush_secs=30)
def setuprobots(self):
for defname in self.robot_list:
self.robot_handles.append(self.supervisor.getFromDef(defname))
def handle_receiver(self):
message = []
for i in range(self.robot_num):
if self.receiver.getQueueLength() > 0:
string_message = self.receiver.getData().decode("utf-8")
string_message = string_message.split(",")
for ms in string_message:
message.append(ms)
self.receiver.nextPacket()
return message
def get_observations(self):
self.ds_values = []
self.final_distance = [50 for i in range(Max_robotnum)]
self.message = [1000 for i in range(MAX_DSNUM)]
self.angles = []
observation = []
message = self.handle_receiver()
self.angles = [0 for i in range(Max_robotnum)]
if len(message) != 0:
for i in range(len(message)):
self.message[i] = float(message[i])
self.ds_values.append(float(message[i]))
for j in range(MAX_DSNUM):
observation.append(utils.normalize_to_range(float(self.message[j]),DIST_SENSORS_MM['min'],DIST_SENSORS_MM['max'], 0, 1))
for k in range(0,self.robot_num):
robot_position = []
robot_position = self.robot_handles[k].getPosition()
robot_rotation = []
robot_rotation = self.robot_handles[k].getOrientation()
observation.append(utils.normalize_to_range(float(robot_position[0]),XPOSITION['min'],XPOSITION['max'],0,1))
observation.append(utils.normalize_to_range(float(robot_position[1]),YPOSITION['min'],YPOSITION['max'],0,1))
observation.append(utils.normalize_to_range(float(robot_position[2]),ZPOSITION['min'],ZPOSITION['max'],0,1))
observation.append(utils.normalize_to_range(float(robot_rotation[0]),-1,1,0,1))
observation.append(utils.normalize_to_range(float(robot_rotation[1]),-1,1,0,1))
observation.append(utils.normalize_to_range(float(robot_rotation[2]),-1,1,0,1))
observation.append(utils.normalize_to_range(float(robot_rotation[3]),-math.pi,math.pi,0,1))
self.final_distance[k] = utils.get_distance_from_target(self.robot_handles[k],self.final_target)
observation.append(utils.normalize_to_range(float(self.final_distance[k]),MAX_DISTANCE['min'],MAX_DISTANCE['max'],0,1))
self.angles[k] = utils.get_angle_from_target(self.robot_handles[k],self.final_target)
observation.append(utils.normalize_to_range(float(self.angles[k]),MAX_ANGLE['min'],MAX_ANGLE['max'],0,1))
for m in range(self.robot_num,Max_robotnum):
for n in range(9):
observation.append(0.5)
else :
observation = [0 for i in range(OBSERVATION_SPACE)]
self.observation = observation
return self.observation
# robot_children = self.robot_handles[k].getField('children')
# frontjoint_node = robot_children.getMFNode(3)
# frontjoint = frontjoint_node.getField('jointParameters')
# frontjoint = frontjoint.getSFNode()
# para = frontjoint.getField('position')
# front_hingeposition = para.getSFFloat()
# observation.append(utils.normalize_to_range(float(front_hingeposition),-math.pi/2,math.pi/2,0,1))
# front_ep = frontjoint_node.getField('endPoint')
# front_ep = front_ep.getSFNode()
# frontrotation_field = front_ep.getField('rotation')
# front_rotation = frontrotation_field.getSFRotation()
# for f in range(3):
# observation.append(utils.normalize_to_range(float(front_rotation[f]),-1,1,0,1))
# observation.append(utils.normalize_to_range(float(front_rotation[3]),-math.pi/2,math.pi/2,0,1))
# robot_children = self.robot_handles[k].getField('children')
# rearjoint_node = robot_children.getMFNode(4)
# rearjoint = rearjoint_node.getField('jointParameters')
# rearjoint = rearjoint.getSFNode()
# para = rearjoint.getField('position')
# rear_hingeposition = para.getSFFloat()
# observation.append(utils.normalize_to_range(float(rear_hingeposition),-math.pi/2,math.pi/2,0,1))
# rear_ep = rearjoint_node.getField('endPoint')
# rear_ep = rear_ep.getSFNode()
# rearrotation_field = rear_ep.getField('rotation')
# rear_rotation = rearrotation_field.getSFRotation()
# for r in range(3):
# observation.append(utils.normalize_to_range(float(rear_rotation[r]),-1,1,0,1))
# observation.append(utils.normalize_to_range(float(rear_rotation[3]),-math.pi/2,math.pi/2,0,1))
# final_position = []
# final_position = self.final_target.getPosition()
# observation.append(utils.normalize_to_range(float(final_position[0]),XPOSITION['min'],XPOSITION['max'],0,1))
# observation.append(utils.normalize_to_range(float(final_position[1]),YPOSITION['min'],YPOSITION['max'],0,1))
# observation.append(utils.normalize_to_range(float(final_position[2]),ZPOSITION['min'],ZPOSITION['max'],0,1))
# final_distance = []
# for d in range(self.robot_num):
# final_distance.append(utils.get_distance_from_target(self.robot_handles[d],self.final_target))
# self.final_distance[d] = final_distance[d]
def get_default_observation(self):
self.observation = [0 for i in range(OBSERVATION_SPACE)]
return self.observation
def empty_queue(self):
self.observation = [0 for i in range(OBSERVATION_SPACE)]
# self.shockcount = 0
self.overrangecount = 0
# self.flagadd = False
# self.flagreduce = False
self.endbattery = [50000 for i in range(Max_robotnum)]
self.dscount = 0
while self.supervisor.step(self.timestep) != -1:
if self.receiver.getQueueLength() > 0:
self.receiver.nextPacket()
else:
break
def get_reward(self,action):
if (self.observation == [0 for i in range(OBSERVATION_SPACE)] or len(self.observation) == 0 ) :
return 0
reward = 0
translations = []
for i in range(len(self.robot_handles)):
translation = self.robot_handles[i].getField('translation').getSFVec3f()
translations.append(translation)
if self.steps >= self.steps_threshold:
return -20
if np.min(self.ds_values) <= 50:
reward = reward -2
self.dscount = self.dscount + 1
if self.dscount > 60:
reward = reward -20
for m in range(Max_robotnum):
consumption = self.startbattery - self.endbattery[m]
reward = reward - float(consumption/self.startbattery) * 6
self.should_done = True
if self.dscount > 30:
reward = reward - 5
if np.min(self.ds_values) <= 150:
reward = reward -1
for j in range(len(self.robot_handles)):
if translations[j][2] <= ZPOSITION['min'] or translations[j][2] >= ZPOSITION['max']:
reward = reward - 2
self.overrangecount = self.overrangecount + 1
if translations[j][0] <= XPOSITION['min'] or translations[j][0] >= ZPOSITION['max']:
reward = reward - 2
self.overrangecount = self.overrangecount + 1
if self.overrangecount >40:
reward = reward -20
for m in range(Max_robotnum):
consumption = self.startbattery - self.endbattery[m]
reward = reward - float(consumption/self.startbattery) * 6
self.should_done = True
if min(self.final_distance) < self.findThreshold:
reward = reward + 100
for m in range(Max_robotnum):
consumption = self.startbattery - self.endbattery[m]
reward = reward - float(consumption/self.startbattery) * 6
return reward
else :
reward = reward - float(min(self.final_distance))
return reward
# """惩罚不停+-+-的行为 """
# if action[-1] > 0.9 :
# if self.flagreduce == True:
# self.shockcount = self.shockcount + 1
# self.flagadd = True
# self.flagreduce = False
# if action[-1] < 0.1:
# if self.flagadd == True:
# self.shockcount = self.shockcount + 1
# self.flagadd = False
# self.flagreduce =True
# if action[-1] >=0.1 and action[-1] <=0.9:
# self.shockcount = self.shockcount - 1
# self.flagadd = False
# self.flagreduce = False
# if self.shockcount >= 8:
# reward = reward - 4
# if self.shockcount >= 12:
# reward = reward - 8
# self.should_done = True
# """如果ban的动作值有十个值出现在动作区域,不稳定给负的reward,训练到100代左右时,模块几乎不再动自己的前后motor"""
# count = 0
# for k in range(12,24):
# action[k] = utils.normalize_to_range(float(action[k]),-0.2,1.2,0,1)
# if action[k] > 0.95 or action[k] < 0.05:
# count = count + 1
# if count > 9 :
# reward = reward - 2
"""something worse need to be modified"""
"""加机器人时还需要考虑rearmotor的位置,测试后发现是hingejoint的jointParameters域的position参数,需要找到这个参数"""
"""可以只改变相对应的hingejoint参数使两者结合,也可以改变模块位置和角度,但是改变模块位置和角度比较复杂"""
# position = abs(get...)
# 改变hingejoint,只需要改变front hingejoint的position参数
# 改变模块位置和角度
# deltax和deltaz可以根据position来计算,主要是rotation要更改,绕x轴旋转(1,0,0,rad)
# 但是之前寻找模块的位置时已经修改过自己的rotation,所以不好更改,并且更改了rotation,translation也要更改,用这套体姿表征体系更改起来特别复杂
# 另外,因为是往后加模块,所以除非尾巴上翘,否则都不能这样加(陷到地底下了)
# 况且,即便尾巴上翘,可以直接加到后ban上,可能也会因为重力原因把整个构型掀翻
# 综上所述,无论是可行性,还是稳定性原因,都建议只修改front_hingejoint的position值
def robot_step(self,action):
# x = np.random.rand()
# e = 0.8 + ep * 0.2/10000
# if x > e :
# action[-1] = np.random.rand()
robot_children = self.robot_handles[-1].getField('children')
rearjoint_node = robot_children.getMFNode(4)
joint = rearjoint_node.getField('jointParameters')
joint = joint.getSFNode()
para = joint.getField('position')
hingeposition = para.getSFFloat()
if hingeposition > 1 or hingeposition < -1:
return
action[-1] = utils.normalize_to_range(float(action[-1]),-0.1,1.1,0,1)
if action[-1] > 0 and action[-1] <= 0.4 and self.robot_num < Max_robotnum:
last_translation = self.robot_handles[-1].getField('translation').getSFVec3f()
last_angle = self.robot_handles[-1].getField('rotation').getSFRotation()[3]
last_rotation = self.robot_handles[-1].getField('rotation').getSFRotation()
Oritation = np.array(self.robot_handles[-1].getOrientation())
Oritation=Oritation.reshape(3,3)
Position = np.array(self.robot_handles[-1].getPosition())
vec = np.array([0, 0, -0.23])
# print(vec)
# print(np.dot(Oritation,vec))
final_position = (np.dot(Oritation,vec)+Position).reshape(-1).tolist()
# print(final_position)
new_translation = []
new_translation.append(final_position[0])
new_translation.append(final_position[1])
new_translation.append(final_position[2])
# delta_z = 0.23 * math.cos(last_angle)
# delta_x = 0.23 * math.sin(last_angle)
# new_translation = []
# new_translation.append(last_translation[0] - delta_x)
# new_translation.append(last_translation[1])
# new_translation.append(last_translation[2] - delta_z)
# if hingeposition > 0.8 or hingeposition < -0.8:
# delta = 0.03 - 0.03 * math.cos(hingeposition)
# delta_z = delta * math.cos(last_angle)
# delta_x = delta * math.sin(last_angle)
# new_translation[0] = new_translation[0] + delta_x
# new_translation[2] = new_translation[2] + delta_z
new_rotation = []
for i in range(4):
new_rotation.append(last_rotation[i])
flag_translation = False
flag_rotation = False
flag_front = False
flag_frontposition = False
flag_frontrotation = False
flag_battery = False
battery_remain = float(self.endbattery[self.robot_num])
importname = "robot_" + str(self.robot_num) + '.wbo'
new_file =[]
with open(importname,'r') as f:
lines = f.readlines()
for line in lines:
if "translation" in line:
if flag_translation == False:
replace = "translation " + str(new_translation[0]) + " " + str(new_translation[1]) + " " + str(new_translation[2])
line = "\t" + replace +'\n'
flag_translation = True
if "rotation" in line:
if flag_rotation == False:
replace = "rotation " + str(new_rotation[0]) + " " + str(new_rotation[1]) + " " + str(new_rotation[2]) + " " \
+str(new_rotation[3])
line = "\t" + replace +'\n'
flag_rotation = True
if 'front HingeJoint' in line:
flag_front = True
if 'position' in line:
if flag_front == True and flag_frontposition ==False:
repalce = "position "+ str(-hingeposition)
line = "\t\t\t\t" + repalce + '\n'
flag_frontposition = True
if 'rotation' in line :
if flag_front == True and flag_frontrotation == False:
replace = "rotation " + str(1)+ ' ' + str(0)+ ' ' + str(0) + ' ' + str(-hingeposition)
line = "\t\t\t\t" + replace + '\n'
flag_frontrotation = True
if "battery" in line :
flag_battery = True
if "50000" in line and flag_battery == True:
line = "\t\t" + str(battery_remain) + "," + " " + str(50000) + '\n'
new_file.append(line)
with open(importname,'w') as f:
for line in new_file:
f.write(line)
rootNode = self.supervisor.getRoot()
childrenField = rootNode.getField('children')
childrenField.importMFNode(-1,importname)
defname = 'robot_' + str(self.robot_num)
self.robot_handles.append(self.supervisor.getFromDef(defname))
self.robot_num = self.robot_num + 1
elif action[-1] >=0.9 and action[-1] <1 and self.robot_num >1:
battery_field = self.robot_handles[-1].getField('battery')
battery_remain = battery_field.getMFFloat(0)
self.endbattery[self.robot_num - 1] = battery_remain
removerobot = self.robot_handles[-1]
removerobot.remove()
self.robot_num = self.robot_num - 1
del(self.robot_handles[-1])
# new_translation_field = self.robot_handles[-1].getField('translation')
# new_translation_field.setSFVec3f(new_translation)
# new_rotation_field = self.robot_handles[-1].getField('rotation')
# new_rotation_field.setSFRotation(new_rotation)
# robot_children = self.robot_handles[-1].getField('children')
# frontjoint_node = robot_children.getMFNode(3)
# joint = frontjoint_node.getField('jointParameters')
# joint = joint.getSFNode()
# para = joint.getField('position')
# para.setSFFloat(-hingeposition)
# battery_remain = float(self.endbattery[self.robot_num - 1])
# battery_field = self.robot_handles[-1].getField('battery')
# battery_field.setMFFloat(0,battery_remain)
# battery_field.setMFFloat(1,self.startbattery)
def step(self,action):
if self.supervisor.step(self.timestep) == -1:
exit()
self.handle_emitter(action)
key = self.keyboard.getKey()
observation = self.get_observations()
reward = self.get_reward(action)
isdone = self.is_done()
info = self.get_info()
if key == Keyboard.CONTROL + ord("A"):
print()
print("Actions: ", action)
if key == ord("R"):
print()
print("Rewards: ", reward)
if key == Keyboard.CONTROL + ord("5"):
print()
print("Observations: ", observation)
if key == Keyboard.CONTROL + ord("M"):
print()
print("message", self.message)
if (self.v_action > 1):
self.file_writer.add_histogram(
"Actions/Per Global Step",
action,
global_step=self.step_global)
if (self.v_observation > 1):
self.file_writer.add_histogram(
"Observations/Per Global Step",
observation,
global_step=self.step_global)
if (self.v_reward > 1):
self.file_writer.add_scalar("Rewards/Per Global Step", reward,
self.step_global)
if (isdone):
self.file_writer.add_scalar(
"Is Done/Per Reset step",
self.step_cntr,
global_step=self.step_reset)
self.file_writer.flush()
self.score += reward
self.step_cntr += 1
self.step_global += 1
return observation,reward,isdone,info
def is_done(self):
self.steps = self.steps + 1
self.file_writer.flush()
if min(self.final_distance) <= self.findThreshold:
print("======== + Solved + ========")
return True
if self.steps >= self.steps_threshold or self.should_done:
return True
# rotation_field = self.robot_handles[0].getField('rotation').getSFRotation()
# """需要计算出模块完全侧边倒的rotation是多少,遇到这种情况直接进行下一次迭代"""
# # if rotation_field[0] < -0.4 and rotation_field[1] > 0.4 and rotation_field[2] > 0.4 and rotation_field[3] < -1.5708:
# # return True
return False
def reset(self):
print("Reset simulation")
self.respawnRobot()
self.steps = 0
self.should_done = False
self.robot_num = 1
"""observation 源代码wrapper有问题"""
self.score_history.append(self.score)
if (self.v_reward > 0):
self.file_writer.add_scalar(
"Score/Per Reset", self.score, global_step=self.step_reset)
for window in self.windows:
if self.step_reset > window:
self.file_writer.add_scalar(
"Score/With Window {}".format(window),
np.average(self.score_history[-window:]),
global_step=self.step_reset - window)
self.file_writer.flush()
self.step_reset += 1
self.step_cntr = 0
self.score = 0
return self.get_default_observation()
def flush(self):
if self._file_writer is not None:
self._file_writer.flush()
def close(self):
if self._file_writer is not None:
self._file_writer.close()
def get_info(self):
pass
def respawnRobot(self):
for robot in self.robot_handles:
robot.remove()
rootNode = self.supervisor.getRoot()
childrenField = rootNode.getField('children')
childrenField.importMFNode(-1,"robot_0.wbo")
# childrenField.importMFNode(-1,"robot_1.wbo")
# childrenField.importMFNode(-1,"robot_2.wbo")
# childrenField.importMFNode(-1,"robot_3.wbo")
# childrenField.importMFNode(-1,"robot_4.wbo")
# childrenField.importMFNode(-1,"robot_5.wbo")
self.robot_handles = []
for defrobotname in self.robot_list:
self.robot_handles.append(self.supervisor.getFromDef(defrobotname))
self.final_target = self.supervisor.getFromDef('final_target')
self.supervisor.simulationResetPhysics()
self._last_message = None
robot_defnames = ['robot_0']
supervisor_env = TaskDecisionSupervisor(robot_defnames, observation_space=OBSERVATION_SPACE,log_dir="logs/results/ddpg", v_action=1,v_observation=1,v_reward=1,windows=[10,\
10000, 2000])
agent = TD3(lr_actor=0.00025,
lr_critic=0.0025,
input_dims= OBSERVATION_SPACE,
gamma=0.99,
tau=0.001,
env=supervisor_env,
batch_size=512,
layer1_size=400,
layer2_size=300,
layer3_size=200,
layer4_size=400,
layer5_size=300,
layer6_size=200,
n_actions=ACTION_SPACE,
load_models=False,
save_dir='./models/saved/ddpg/')
score_history = []
np.random.seed(0)
for i in range(1, 20000):
done = False
score = 0
obs = list(map(float, supervisor_env.reset()))
supervisor_env.empty_queue()
first_iter = True
if i % 10000 == 0:
print("================= TESTING =================")
while not done:
act = agent.choose_action_test(obs).tolist()
supervisor_env.robot_step(act)
new_state, _, done, _ = supervisor_env.step(act)
obs = list(map(float, new_state))
else:
print("================= TRAINING =================")
while not done:
if (not first_iter):
act = agent.choose_action_train(obs).tolist()
else:
first_iter = False
act = [0,0]
for k in range(0,13):
act.append(0.5)
supervisor_env.robot_step(act)
new_state, reward, done, info = supervisor_env.step(act)
agent.remember(obs, act, reward, new_state, int(done))
agent.learn()
score += reward
obs = list(map(float, new_state))
score_history.append(score)
print("===== Episode", i, "score %.2f" % score,
"100 game average %.2f" % np.mean(score_history[-100:]))
if i % 100 == 0:
agent.save_models() | [
"1092673859@qq.com"
] | 1092673859@qq.com |
75c04d89f6a090103d6edb1dd15a4c3ce0c7ac47 | 32b9ed968247fd0f5b2291307059f2de4288a951 | /models/seq2seqGAN/networks.py | 4c7390dead57273a7f30eba2f25ee6fcd24fa0ff | [] | no_license | jshi31/T2ONet | 4aaf57636e2caf8f8d93ba742be8b4ebaaefe30d | 928cdc3311e887f3676a55db5d544fee5ac71a3f | refs/heads/master | 2023-05-25T22:14:54.441849 | 2023-05-10T06:45:02 | 2023-05-10T06:45:02 | 247,373,510 | 18 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,553 | py | import torch
import torch.nn as nn
import functools
from torch.autograd import Variable
import numpy as np
import pdb
###############################################################################
# Functions
###############################################################################
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def define_G(input_nc, output_nc, ngf, netG, n_downsample_global=3, n_blocks_global=9, n_local_enhancers=1,
n_blocks_local=3, norm='instance', gpu_ids=[]):
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'global':
netG = GlobalGenerator(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global, norm_layer)
elif netG == 'local':
netG = LocalEnhancer(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global,
n_local_enhancers, n_blocks_local, norm_layer)
elif netG == 'encoder':
netG = Encoder(input_nc, output_nc, ngf, n_downsample_global, norm_layer)
else:
raise('generator not implemented!')
print(netG)
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
netG.cuda(gpu_ids[0])
netG.apply(weights_init)
return netG
def define_D(input_nc, cond_nc, ndf, n_layers_D, norm='instance', use_sigmoid=False, num_D=1, getIntermFeat=False, gpu_ids=[]):
norm_layer = get_norm_layer(norm_type=norm)
netD = MultiscaleDiscriminator(input_nc, cond_nc, ndf, n_layers_D, norm_layer, use_sigmoid, num_D, getIntermFeat)
print(netD)
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
netD.cuda(gpu_ids[0])
netD.apply(weights_init)
return netD
def print_network(net):
if isinstance(net, list):
net = net[0]
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
##############################################################################
# Losses
##############################################################################
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
if isinstance(input[0], list):
loss = 0
for input_i in input:
pred = input_i[-1]
target_tensor = self.get_target_tensor(pred, target_is_real)
loss += self.loss(pred, target_tensor)
return loss
else:
target_tensor = self.get_target_tensor(input[-1], target_is_real)
return self.loss(input[-1], target_tensor)
class VGGLoss(nn.Module):
def __init__(self, gpu_ids):
super(VGGLoss, self).__init__()
self.vgg = Vgg19().cuda()
self.criterion = nn.L1Loss()
self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]
def forward(self, x, y):
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
loss = 0
for i in range(len(x_vgg)):
loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())
return loss
##############################################################################
# Generator
##############################################################################
class LocalEnhancer(nn.Module):
def __init__(self, input_nc, output_nc, ngf=32, n_downsample_global=3, n_blocks_global=9,
n_local_enhancers=1, n_blocks_local=3, norm_layer=nn.BatchNorm2d, padding_type='reflect'):
super(LocalEnhancer, self).__init__()
self.n_local_enhancers = n_local_enhancers
###### global generator model #####
ngf_global = ngf * (2**n_local_enhancers)
model_global = GlobalGenerator(input_nc, output_nc, ngf_global, n_downsample_global, n_blocks_global, norm_layer).model
model_global = [model_global[i] for i in range(len(model_global)-3)] # get rid of final convolution layers
self.model = nn.Sequential(*model_global)
###### local enhancer layers #####
for n in range(1, n_local_enhancers+1):
### downsample
ngf_global = ngf * (2**(n_local_enhancers-n))
model_downsample = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf_global, kernel_size=7, padding=0),
norm_layer(ngf_global), nn.ReLU(True),
nn.Conv2d(ngf_global, ngf_global * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf_global * 2), nn.ReLU(True)]
### residual blocks
model_upsample = []
for i in range(n_blocks_local):
model_upsample += [ResnetBlock(ngf_global * 2, padding_type=padding_type, norm_layer=norm_layer)]
### upsample
model_upsample += [nn.ConvTranspose2d(ngf_global * 2, ngf_global, kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(ngf_global), nn.ReLU(True)]
### final convolution
if n == n_local_enhancers:
model_upsample += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
setattr(self, 'model'+str(n)+'_1', nn.Sequential(*model_downsample))
setattr(self, 'model'+str(n)+'_2', nn.Sequential(*model_upsample))
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
def forward(self, input):
### create input pyramid
input_downsampled = [input]
for i in range(self.n_local_enhancers):
input_downsampled.append(self.downsample(input_downsampled[-1]))
### output at coarest level
output_prev = self.model(input_downsampled[-1])
### build up one layer at a time
for n_local_enhancers in range(1, self.n_local_enhancers+1):
model_downsample = getattr(self, 'model'+str(n_local_enhancers)+'_1')
model_upsample = getattr(self, 'model'+str(n_local_enhancers)+'_2')
input_i = input_downsampled[self.n_local_enhancers-n_local_enhancers]
output_prev = model_upsample(model_downsample(input_i) + output_prev)
return output_prev
class GlobalGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d,
padding_type='reflect'):
assert(n_blocks >= 0)
super(GlobalGenerator, self).__init__()
activation = nn.ReLU(True)
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation]
### downsample
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf * mult * 2), activation]
### resnet blocks
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer)]
### upsample
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(int(ngf * mult / 2)), activation]
model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
return self.model(input)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, activation=nn.ReLU(True), use_dropout=False):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout)
def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim),
activation]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class Encoder(nn.Module):
def __init__(self, input_nc, output_nc, ngf=32, n_downsampling=4, norm_layer=nn.BatchNorm2d):
super(Encoder, self).__init__()
self.output_nc = output_nc
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0),
norm_layer(ngf), nn.ReLU(True)]
### downsample
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf * mult * 2), nn.ReLU(True)]
### upsample
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(int(ngf * mult / 2)), nn.ReLU(True)]
model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input, inst):
outputs = self.model(input)
# instance-wise average pooling
outputs_mean = outputs.clone()
inst_list = np.unique(inst.cpu().numpy().astype(int))
for i in inst_list:
for b in range(input.size()[0]):
indices = (inst[b:b+1] == int(i)).nonzero() # n x 4
for j in range(self.output_nc):
output_ins = outputs[indices[:,0] + b, indices[:,1] + j, indices[:,2], indices[:,3]]
mean_feat = torch.mean(output_ins).expand_as(output_ins)
outputs_mean[indices[:,0] + b, indices[:,1] + j, indices[:,2], indices[:,3]] = mean_feat
return outputs_mean
class ConditionEncoding(nn.Module):
def __init__(self, cond_nc):
super(ConditionEncoding, self).__init__()
self.fc = nn.Sequential(nn.Linear(1024, cond_nc), nn.BatchNorm1d(cond_nc), nn.LeakyReLU(0.2, True))
def forward(self, hidden):
"""
:param hidden: (num_layers * num_dirs, batch, hidden_size)
:return: (batch, cond_nc)
"""
hidden = hidden.transpose(0, 1).contiguous() # (batch, num_layers * num_dirs, hidden_size)
hidden = hidden.view(hidden.shape[0], -1) # (batch, num_layers * num_dirs * hidden_size)
return self.fc(hidden)
class MultiscaleDiscriminator(nn.Module):
def __init__(self, input_nc, cond_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d,
use_sigmoid=False, num_D=3, getIntermFeat=False):
super(MultiscaleDiscriminator, self).__init__()
self.num_D = num_D
self.n_layers = n_layers
self.getIntermFeat = getIntermFeat
self.cond_nc = cond_nc
self.n_all_layers = n_layers + (3 if not use_sigmoid else 4)
for i in range(num_D):
netD = NLayerDiscriminator(input_nc, cond_nc, ndf, n_layers, norm_layer, use_sigmoid, getIntermFeat)
for j in range(self.n_all_layers):
setattr(self, 'scale'+str(i)+'_layer'+str(j), getattr(netD, 'model'+str(j)))
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
def singleD_forward(self, model, input, cond):
# model: list of layers
res = [input]
# do the first n_layers, reduce size by 2 every layer
for n in range(self.n_layers):
res.append(model[n](res[-1]))
# fuse input and condition
input = res[-1]
bs, _, h, w = input.shape
cond = cond.unsqueeze(-1).unsqueeze(-1).expand(bs, self.cond_nc, h, w)
fuse = torch.cat([input, cond], 1)
res.append(model[self.n_layers](fuse))
# pass the final layers
for n in range(self.n_layers + 1, self.n_all_layers):
res.append(model[n](res[-1]))
if self.getIntermFeat:
return res[1:]
else:
return res[-1:]
def forward(self, input, cond):
num_D = self.num_D
result = []
input_downsampled = input
for i in range(num_D):
model = [getattr(self, 'scale'+str(num_D-1-i)+'_layer'+str(j)) for j in range(self.n_all_layers)]
result.append(self.singleD_forward(model, input_downsampled, cond))
if i != (num_D-1):
input_downsampled = self.downsample(input_downsampled)
return result
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, cond_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, getIntermFeat=False):
super(NLayerDiscriminator, self).__init__()
self.getIntermFeat = getIntermFeat
self.n_layers = n_layers
self.cond_nc = cond_nc
self.use_sigmoid = use_sigmoid
self.n_all_layers = n_layers + (3 if not use_sigmoid else 4)
kw = 4
padw = int(np.ceil((kw-1.0)/2))
sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]]
nf = ndf
for n in range(1, n_layers):
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [[
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw),
norm_layer(nf), nn.LeakyReLU(0.2, True)
]]
nf_prev = nf + cond_nc # add condition channel
nf = min(nf * 2, 512)
sequence += [[
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw),
norm_layer(nf),
nn.LeakyReLU(0.2, True)
]]
sequence += [[
nn.Conv2d(nf, nf, kernel_size=kw, stride=1, padding=padw),
norm_layer(nf),
nn.LeakyReLU(0.2, True)
]]
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
if use_sigmoid:
sequence += [[nn.Sigmoid()]]
for n in range(len(sequence)):
setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))
def forward(self, input, cond):
res = [input]
# do the first n_layers, reduce size by 2 every layer
for n in range(self.n_layers):
model = getattr(self, 'model'+str(n))
res.append(model(res[-1]))
# fuse input and condition
model = getattr(self, 'model'+str(self.n_layers))
input = res[-1]
bs, _, h, w = input.shape(res[-1])
cond = cond.expand(bs, self.cond_nc, h, w)
fuse = torch.cat([input, cond], 1)
res.append(model(fuse))
# do the final layers
for n in range(self.n_layers + 1, self.n_all_layers):
model = getattr(self, 'model'+str(n))
res.append(model(res[-1]))
if self.getIntermFeat:
return res[1:]
else:
return res[-1]
from torchvision import models
class Vgg19(torch.nn.Module):
def __init__(self, requires_grad=False):
super(Vgg19, self).__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
# transform. X \in [0, 1] RGB
mean = torch.tensor([0.485, 0.456, 0.406], device=X.device).view(1, 3, 1, 1)
std = torch.tensor([0.229, 0.224, 0.225], device=X.device).view(1, 3, 1, 1)
X = (X - mean)/std
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
| [
"j.shi@rochester.edu"
] | j.shi@rochester.edu |
ee82fb7cc4fd156f236ae8bafab57f23b8736e1b | 7b3711d4c6d7284255ba0270d49d120f984bf7c6 | /problems/549_binary_tree_longest_consecutive_sequence_ii.py | 9351f1ab9242d7c976ab632e31e3661c5bf978ce | [] | no_license | loganyu/leetcode | 2d336f30feb55379aaf8bf0273d00e11414e31df | 77c206305dd5cde0a249365ce7591a644effabfc | refs/heads/master | 2023-08-18T09:43:10.124687 | 2023-08-18T00:44:51 | 2023-08-18T00:44:51 | 177,875,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,748 | py | '''
Given a binary tree, you need to find the length of Longest Consecutive Path in Binary Tree.
Especially, this path can be either increasing or decreasing. For example, [1,2,3,4] and [4,3,2,1] are both considered valid, but the path [1,2,4,3] is not valid. On the other hand, the path can be in the child-Parent-child order, where not necessarily be parent-child order.
Example 1:
Input:
1
/ \
2 3
Output: 2
Explanation: The longest consecutive path is [1, 2] or [2, 1].
Example 2:
Input:
2
/ \
1 3
Output: 3
Explanation: The longest consecutive path is [1, 2, 3] or [3, 2, 1].
Note: All the values of tree nodes are in the range of [-1e7, 1e7].
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def longestConsecutive(self, root: TreeNode) -> int:
self.max = 0
self.longest_path(root)
return self.max
def longest_path(self, node):
if not node:
return (0, 0)
inr = dcr = 1
if node.left:
l = self.longest_path(node.left)
if node.val == node.left.val + 1:
dcr = l[1] + 1
elif node.val == node.left.val - 1:
inr = l[0] + 1
if node.right:
r = self.longest_path(node.right)
if node.val == node.right.val + 1:
dcr = max(dcr, r[1] + 1)
elif node.val == node.right.val - 1:
inr = max(inr, r[0] + 1)
self.max = max(self.max, dcr + inr - 1)
return (inr, dcr)
| [
"logan.yu@cadre.com"
] | logan.yu@cadre.com |
39490c4724d2b3f930595661177772125731acc9 | ab79f8297105a7d412303a8b33eaa25038f38c0b | /education/school_transport/wizard/transfer_vehicle.py | 6d509565898bbd75f1f7b57dd95cfd567302c7c2 | [] | no_license | adahra/addons | 41a23cbea1e35079f7a9864ade3c32851ee2fb09 | c5a5678379649ccdf57a9d55b09b30436428b430 | refs/heads/master | 2022-06-17T21:22:22.306787 | 2020-05-15T10:51:14 | 2020-05-15T10:51:14 | 264,167,002 | 1 | 0 | null | 2020-05-15T10:39:26 | 2020-05-15T10:39:26 | null | UTF-8 | Python | false | false | 4,425 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2011-2012 Serpent Consulting Services (<http://www.serpentcs.com>)
# Copyright (C) 2013-2014 Serpent Consulting Services (<http://www.serpentcs.com>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class transfer_vehicle(osv.TransientModel):
_name = "transfer.vehicle"
_description = "transfer vehicle"
_columns = {
'name':fields.many2one('student.student','Student Name', readonly=True),
'participation_id':fields.many2one('transport.participant','Participation', required=True),
'root_id':fields.many2one('student.transport','Root', required=True),
'old_vehicle_id':fields.many2one('transport.vehicle','Old Vehicle No', required=True),
'new_vehicle_id':fields.many2one('transport.vehicle','New Vehicle No', required=True),
}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
result = super(transfer_vehicle, self).default_get(cr, uid, fields, context=context)
if context.get('active_id'):
student = self.pool.get('student.student').browse(cr, uid, context.get('active_id'), context=context)
if 'name' in fields:
result.update({'name': student.id})
return result
def onchange_participation_id(self, cr, uid, ids, transport, context=None):
if not transport:
return {}
transport_obj = self.pool.get('transport.participant').browse(cr, uid, transport, context)
return {'value': {'root_id': transport_obj.transport_id.id, 'old_vehicle_id': transport_obj.vehicle_id.id}}
def vehicle_transfer(self, cr, uid, ids, context=None):
stu_prt_obj = self.pool.get('transport.participant')
vehi_obj = self.pool.get('transport.vehicle')
for new_data in self.browse(cr, uid, ids, context=context):
vehi_data = vehi_obj.browse(cr, uid, new_data.old_vehicle_id.id, context=context)
vehi_new_data = vehi_obj.browse(cr, uid, new_data.new_vehicle_id.id, context=context)
#check for transfer in same vehicle
if new_data.old_vehicle_id.id == new_data.new_vehicle_id.id:
raise osv.except_osv(_('Error !'),_('Sorry you can not transfer in same vehicle.'))
# First Check Is there vacancy or not
person = int(vehi_data.participant) + 1
if vehi_data.capacity < person:
raise osv.except_osv(_('Error !'),_('There is No More vacancy on this vehicle.'))
#remove entry of participant in old vehicle.
participants = [prt_id.id for prt_id in vehi_data.vehi_participants_ids]
participants.remove(new_data.participation_id.id)
vehi_obj.write(cr, uid, new_data.old_vehicle_id.id, {'vehi_participants_ids':[(6,0,participants)]}, context=context)
#entry of participant in new vehicle.
participants = [prt_id.id for prt_id in vehi_new_data.vehi_participants_ids]
participants.append(new_data.participation_id.id)
vehi_obj.write(cr, uid, new_data.new_vehicle_id.id, {'vehi_participants_ids':[(6,0,participants)]}, context=context)
stu_prt_obj.write(cr, uid, new_data.participation_id.id, {'vehicle_id': new_data.new_vehicle_id.id,}, context=context)
return {}
transfer_vehicle()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"prog1@381544ba-743e-41a5-bf0d-221725b9d5af"
] | prog1@381544ba-743e-41a5-bf0d-221725b9d5af |
990dbb97b825729a03f9e35c2c4534b21c1a05e4 | 2a171178942a19afe9891c2425dce208ae04348b | /kubernetes/client/models/v1alpha1_certificate_signing_request_status.py | 7d4e56624b8742735902dccd5ab749de8d607509 | [
"Apache-2.0"
] | permissive | ouccema/client-python | ac3f1dee1c5ad8d82f15aeecb87a2f5f219ca4f4 | d7f33ec53e302e66674df581904a3c5b1fcf3945 | refs/heads/master | 2021-01-12T03:17:54.274888 | 2017-01-03T22:13:14 | 2017-01-03T22:13:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,830 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.0-snapshot
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1alpha1CertificateSigningRequestStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, certificate=None, conditions=None):
"""
V1alpha1CertificateSigningRequestStatus - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'certificate': 'str',
'conditions': 'list[V1alpha1CertificateSigningRequestCondition]'
}
self.attribute_map = {
'certificate': 'certificate',
'conditions': 'conditions'
}
self._certificate = certificate
self._conditions = conditions
@property
def certificate(self):
"""
Gets the certificate of this V1alpha1CertificateSigningRequestStatus.
If request was approved, the controller will place the issued certificate here.
:return: The certificate of this V1alpha1CertificateSigningRequestStatus.
:rtype: str
"""
return self._certificate
@certificate.setter
def certificate(self, certificate):
"""
Sets the certificate of this V1alpha1CertificateSigningRequestStatus.
If request was approved, the controller will place the issued certificate here.
:param certificate: The certificate of this V1alpha1CertificateSigningRequestStatus.
:type: str
"""
self._certificate = certificate
@property
def conditions(self):
"""
Gets the conditions of this V1alpha1CertificateSigningRequestStatus.
Conditions applied to the request, such as approval or denial.
:return: The conditions of this V1alpha1CertificateSigningRequestStatus.
:rtype: list[V1alpha1CertificateSigningRequestCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""
Sets the conditions of this V1alpha1CertificateSigningRequestStatus.
Conditions applied to the request, such as approval or denial.
:param conditions: The conditions of this V1alpha1CertificateSigningRequestStatus.
:type: list[V1alpha1CertificateSigningRequestCondition]
"""
self._conditions = conditions
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"mehdy@google.com"
] | mehdy@google.com |
c4e9f6e335fa168175f5e5b8bf42d3dcdb16c8d4 | b59e093876a78054bf58ae16fa245bace5d924a2 | /maxWidthRamp.py | fb91dfb4aae2584c36dc9a7c834098301dea7ae2 | [] | no_license | NeilWangziyu/Leetcode_py | 539551585413e1eebd6e6175ba3105c6bc17e943 | 4105e18050b15fc0409c75353ad31be17187dd34 | refs/heads/master | 2020-04-08T03:50:05.904466 | 2019-10-15T07:13:49 | 2019-10-15T07:13:49 | 158,991,828 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,746 | py | class Solution(object):
def maxWidthRamp(self, A):
"""
:type A: List[int]
:rtype: int
"""
if not A:
return 0
if len(A) < 2:
return 0
res = 0
for i in range(len(A)):
for j in range(i+1, len(A)):
if A[i] <= A[j]:
res = max(res, j-i)
return res
def maxWidthRamp2(self, A):
if not A:
return 0
if len(A) < 2:
return 0
A_list = sorted(range(len(A)), key=lambda x:A[x])
last = len(A)
res = 0
for each in A_list:
if each < last:
last = each
else:
res = max(res, each - last)
return res
def maxWidthRamp3(self, A):
"""
:type A: List[int]
:rtype: int
"""
re = 0
stack = []
for i in range(len(A)):
if len(stack) == 0 or A[stack[-1]] > A[i]: # 防止下标越界,不用A[i]>A[i+1}
print(A[i])
stack.append(i) # stack中存放下标 ,按值升序
print(stack)
for j in range(len(A) - 1, re - 1, -1): # 最大堆的左端肯定在单调栈内
print(j, stack)
while stack and A[stack[-1]] <= A[j]:
k = j - stack.pop() # 对于栈顶元素来说不可能有更大值, 因此pop出
re = max(re, k) # 找到每个单调递增堆中元素的最大宽度坡,max即为整个数组最终结果
return re
s = Solution()
A = [6,0,8,2,1,5]
# print(s.maxWidthRamp2(A))
print(s.maxWidthRamp3(A))
A = [9,8,1,0,1,9,4,0,4,1]
# print(s.maxWidthRamp2(A))
print(s.maxWidthRamp3(A))
| [
"noreply@github.com"
] | NeilWangziyu.noreply@github.com |
049cb528754542f06ea9fb36b875d4720677fdeb | 5de14b0e96e17822aafdd0658aef846693db2786 | /app/core/tests/tests_admin.py | fbce74a7b8516edecfa5f3236de2aa61dfbd05bd | [
"MIT"
] | permissive | mahinm20/recipe-app-api | 392a6cec214d54522cd7ebbb21bb4a443ab8d6ef | f9d6c69ae71cdd3c265f50b503cb027c6cb307a9 | refs/heads/master | 2022-10-05T01:03:19.799872 | 2020-06-07T21:25:05 | 2020-06-07T21:25:05 | 264,731,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,279 | py | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email = 'mahinmalhotra20@gmail.com',
password = 'mala2028'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email = 'testing@alala.com',
password = 'password123',
name = 'Frank Costanza',
)
def test_users_listed(self):
"""Test that users are listed on the user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_page_change(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
#
# from django.test import TestCase
# from django.contrib.auth import get_user_model
# from django.urls import reverse
# from django.test import Client
#
#
# class AdminSiteTests(TestCase):
#
# def setUp(self):
# self.client = Client()
# self.admin_user = get_user_model().objects.create_superuser(
# email='admin@londonappdev.com',
# password='password123'
# )
# self.client.force_login(self.admin_user)
# self.user = get_user_model().objects.create_user(
# email='test@londonappdev.com',
# password='password123',
# name='Test User Full Name',
# )
#
# def test_users_listed(self):
# """Test that users are listed on the user page"""
# url = reverse('admin:core_user_changelist')
# res = self.client.get(url)
#
# self.assertContains(res, self.user.name)
# self.assertContains(res, self.user.email)
| [
"mahinmalhotra20@gmail.com"
] | mahinmalhotra20@gmail.com |
486f1f5d15eb52bf0fc58132dc6ea64812ba691a | d274e22b1cc5d546855fe46b089b13cfe2f4047c | /september2020/solutions/day03_RepeatedSubstringPattern.py | bb4d713142470f406a4f140eab73257325d2f299 | [] | no_license | varunkumar032/lockdown-leetcode | ca6b7a8133033110680dd226c897dd8a1482682b | 15a72a53be9005eca816f018cb1b244f2aa4cdfb | refs/heads/master | 2023-06-30T08:31:54.323747 | 2021-07-12T11:29:59 | 2021-07-12T11:29:59 | 260,616,280 | 0 | 0 | null | 2021-05-06T10:24:48 | 2020-05-02T04:52:37 | Python | UTF-8 | Python | false | false | 728 | py | # Given a non-empty string check if it can be constructed by taking a substring of it and
# appending multiple copies of the substring together.
# You may assume the given string consists of lowercase English letters only and its length
# will not exceed 10000.
# Example 1:
# Input: "abab"
# Output: True
# Explanation: It's the substring "ab" twice.
# Example 2:
# Input: "aba"
# Output: False
# Example 3:
# Input: "abcabcabcabc"
# Output: True
# Explanation: It's the substring "abc" four times. (And the substring "abcabc" twice.)
def repeatedSubstringPattern(s):
for i in range(len(s)//2):
substring = s[:i+1]
if len(s)%len(substring)==0 and substring*(len(s)//len(substring))==s:
return True
return False
| [
"varunkumar032@gmail.com"
] | varunkumar032@gmail.com |
588a8a5be895bf8a6ad7215fca32c5d78ead3cf1 | 7202b4cf562fcacf2f684c1985b448b5780c4967 | /alds1/05d.py | c2600d1dd7eb418017cba2b350db2a0f9c426f45 | [] | no_license | mskt4440/AOJ | ce523182dbd75e85c1bba43d7d23217711b8e617 | f6d9ca36e77a88ed9ddbeb53340a745bf8cac157 | refs/heads/master | 2021-07-07T00:34:23.034606 | 2020-09-24T02:25:43 | 2020-09-24T02:25:43 | 188,768,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | #
# alds1 05d
#
def merge(A, left, mid, right):
count = 0
L = A[left:mid]
R = A[mid:right]
L.append(1000000001)
R.append(1000000001)
i, j = 0, 0
for k in range(left, right):
if L[i] <= R[j]:
A[k] = L[i]
i += 1
else:
A[k] = R[j]
j += 1
if L[i] != 1000000001:
count += mid + j - k - 1
return count
def mergesort(A, left, right):
if left + 1 < right:
mid = (left + right) // 2
countL = mergesort(A, left, mid)
countR = mergesort(A, mid, right)
return merge(A, left, mid, right) + countL + countR
return 0
def main():
n = int(input())
A = list(map(int, input().split()))
ans = mergesort(A, 0, n)
print(ans)
if __name__ == '__main__':
main()
| [
"mskt4440@gmail.com"
] | mskt4440@gmail.com |
8762b8e7d197c08bf7bf83f303877557b9522988 | f50f1aa1f8f139d546db3230a1cb1f53043fd9e6 | /util/admin/utempter/actions.py | 8c7613e8e50b84e6a3887ce3173972ecb7f86f3e | [] | no_license | pars-linux/corporate2 | 7887961d1552d39bc3b0bef4a60fd3413d9b82bb | 14d1eacfc824fb8d0bff8173e7ac06b36b88d10d | refs/heads/master | 2020-05-26T15:02:12.005654 | 2017-02-27T03:07:14 | 2017-02-27T03:07:14 | 82,476,084 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def build():
shelltools.export("LDFLAGS", "%s -Wl,-z,now" % get.LDFLAGS())
autotools.make('RPM_OPT_FLAGS="%s"' % get.CFLAGS())
def install():
autotools.rawInstall('RPM_BUILD_ROOT="%s" LIBDIR=/usr/lib' % get.installDIR())
pisitools.dobin("utmp")
| [
"eki@420bcd57-4a62-4fd6-832e-5ede16c90cc9"
] | eki@420bcd57-4a62-4fd6-832e-5ede16c90cc9 |
54cce0747110b0b1c271b203c1514e4c21cc3016 | 350c6ff3512579a8380b3c4f07e580ec9c69761c | /tests/test_sync_fetch.py | e56659d5b8578846a03394497b8673caeaa45810 | [
"Python-2.0",
"Apache-2.0"
] | permissive | tailhook/edgedb-python | 1928e2fb7e9dbb28c32fd26cde41b8acb6a6806f | c30518918d2ca3f224469d45ff12f47db5762ef4 | refs/heads/master | 2020-09-12T20:30:45.098907 | 2019-11-08T16:35:02 | 2019-11-08T16:35:02 | 222,544,936 | 0 | 0 | Apache-2.0 | 2019-11-18T21:08:32 | 2019-11-18T21:08:32 | null | UTF-8 | Python | false | false | 13,024 | py | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import edgedb
from edgedb import _testbase as tb
class TestSyncFetch(tb.SyncQueryTestCase):
ISOLATED_METHODS = False
SETUP = '''
CREATE TYPE test::Tmp {
CREATE REQUIRED PROPERTY tmp -> std::str;
};
'''
TEARDOWN = '''
DROP TYPE test::Tmp;
'''
def test_sync_parse_error_recover_01(self):
for _ in range(2):
with self.assertRaises(edgedb.EdgeQLSyntaxError):
self.con.fetchall('select syntax error')
with self.assertRaises(edgedb.EdgeQLSyntaxError):
self.con.fetchall('select syntax error')
with self.assertRaisesRegex(edgedb.EdgeQLSyntaxError,
'Unexpected end of line'):
self.con.fetchall('select (')
with self.assertRaisesRegex(edgedb.EdgeQLSyntaxError,
'Unexpected end of line'):
self.con.fetchall_json('select (')
for _ in range(10):
self.assertEqual(
self.con.fetchall('select 1;'),
edgedb.Set((1,)))
self.assertFalse(self.con.is_closed())
def test_sync_parse_error_recover_02(self):
for _ in range(2):
with self.assertRaises(edgedb.EdgeQLSyntaxError):
self.con.execute('select syntax error')
with self.assertRaises(edgedb.EdgeQLSyntaxError):
self.con.execute('select syntax error')
for _ in range(10):
self.con.execute('select 1; select 2;'),
def test_sync_exec_error_recover_01(self):
for _ in range(2):
with self.assertRaises(edgedb.DivisionByZeroError):
self.con.fetchall('select 1 / 0;')
with self.assertRaises(edgedb.DivisionByZeroError):
self.con.fetchall('select 1 / 0;')
for _ in range(10):
self.assertEqual(
self.con.fetchall('select 1;'),
edgedb.Set((1,)))
def test_sync_exec_error_recover_02(self):
for _ in range(2):
with self.assertRaises(edgedb.DivisionByZeroError):
self.con.execute('select 1 / 0;')
with self.assertRaises(edgedb.DivisionByZeroError):
self.con.execute('select 1 / 0;')
for _ in range(10):
self.con.execute('select 1;')
def test_sync_exec_error_recover_03(self):
query = 'select 10 // <int64>$0;'
for i in [1, 2, 0, 3, 1, 0, 1]:
if i:
self.assertEqual(
self.con.fetchall(query, i),
edgedb.Set([10 // i]))
else:
with self.assertRaises(edgedb.DivisionByZeroError):
self.con.fetchall(query, i)
def test_sync_exec_error_recover_04(self):
for i in [1, 2, 0, 3, 1, 0, 1]:
if i:
self.con.execute(f'select 10 // {i};')
else:
with self.assertRaises(edgedb.DivisionByZeroError):
self.con.fetchall(f'select 10 // {i};')
def test_sync_exec_error_recover_05(self):
with self.assertRaisesRegex(edgedb.QueryError,
'cannot accept parameters'):
self.con.execute(f'select <int64>$0')
self.assertEqual(
self.con.fetchall('SELECT "HELLO"'),
["HELLO"])
def test_sync_fetch_single_command_01(self):
r = self.con.fetchall('''
CREATE TYPE test::server_fetch_single_command_01 {
CREATE REQUIRED PROPERTY server_fetch_single_command_01 ->
std::str;
};
''')
self.assertEqual(r, [])
r = self.con.fetchall('''
DROP TYPE test::server_fetch_single_command_01;
''')
self.assertEqual(r, [])
r = self.con.fetchall('''
CREATE TYPE test::server_fetch_single_command_01 {
CREATE REQUIRED PROPERTY server_fetch_single_command_01 ->
std::str;
};
''')
self.assertEqual(r, [])
r = self.con.fetchall('''
DROP TYPE test::server_fetch_single_command_01;
''')
self.assertEqual(r, [])
r = self.con.fetchall_json('''
CREATE TYPE test::server_fetch_single_command_01 {
CREATE REQUIRED PROPERTY server_fetch_single_command_01 ->
std::str;
};
''')
self.assertEqual(r, '[]')
r = self.con.fetchall_json('''
DROP TYPE test::server_fetch_single_command_01;
''')
self.assertEqual(r, '[]')
def test_sync_fetch_single_command_02(self):
r = self.con.fetchall('''
SET MODULE default;
''')
self.assertEqual(r, [])
r = self.con.fetchall('''
SET ALIAS foo AS MODULE default;
''')
self.assertEqual(r, [])
r = self.con.fetchall('''
SET MODULE default;
''')
self.assertEqual(r, [])
with self.assertRaisesRegex(edgedb.InterfaceError, r'fetchone\(\)'):
self.con.fetchone('''
SET ALIAS bar AS MODULE std;
''')
self.con.fetchall('''
SET ALIAS bar AS MODULE std;
''')
self.assertEqual(r, [])
r = self.con.fetchall_json('''
SET MODULE default;
''')
self.assertEqual(r, '[]')
r = self.con.fetchall_json('''
SET ALIAS bar AS MODULE std;
''')
self.assertEqual(r, '[]')
def test_sync_fetch_single_command_03(self):
qs = [
'START TRANSACTION',
'DECLARE SAVEPOINT t0',
'ROLLBACK TO SAVEPOINT t0',
'RELEASE SAVEPOINT t0',
'ROLLBACK',
'START TRANSACTION',
'COMMIT',
]
for _ in range(3):
with self.assertRaisesRegex(
edgedb.InterfaceError,
r'cannot be executed with fetchone\(\).*'
r'not return'):
self.con.fetchone('START TRANSACTION')
with self.assertRaisesRegex(
edgedb.InterfaceError,
r'cannot be executed with fetchone_json\(\).*'
r'not return'):
self.con.fetchone_json('START TRANSACTION')
for _ in range(3):
for q in qs:
r = self.con.fetchall(q)
self.assertEqual(r, [])
for q in qs:
r = self.con.fetchall_json(q)
self.assertEqual(r, '[]')
for q in qs:
with self.assertRaisesRegex(
edgedb.InterfaceError,
r'cannot be executed with fetchone\(\).*'
r'not return'):
self.con.fetchone(q)
with self.assertRaisesRegex(
edgedb.InterfaceError,
r'cannot be executed with fetchone_json\(\).*'
r'not return'):
self.con.fetchone_json(q)
def test_sync_fetch_single_command_04(self):
with self.assertRaisesRegex(edgedb.ProtocolError,
'expected one statement'):
self.con.fetchall('''
SELECT 1;
SET MODULE blah;
''')
with self.assertRaisesRegex(edgedb.ProtocolError,
'expected one statement'):
self.con.fetchone('''
SELECT 1;
SET MODULE blah;
''')
with self.assertRaisesRegex(edgedb.ProtocolError,
'expected one statement'):
self.con.fetchall_json('''
SELECT 1;
SET MODULE blah;
''')
def test_sync_basic_datatypes_01(self):
for _ in range(10):
self.assertEqual(
self.con.fetchone(
'select ()'),
())
self.assertEqual(
self.con.fetchall(
'select (1,)'),
edgedb.Set([(1,)]))
self.assertEqual(
self.con.fetchone(
'select <array<int64>>[]'),
[])
self.assertEqual(
self.con.fetchall(
'select ["a", "b"]'),
edgedb.Set([["a", "b"]]))
self.assertEqual(
self.con.fetchall('''
SELECT {(a := 1 + 1 + 40, world := ("hello", 32)),
(a:=1, world := ("yo", 10))};
'''),
edgedb.Set([
edgedb.NamedTuple(a=42, world=("hello", 32)),
edgedb.NamedTuple(a=1, world=("yo", 10)),
]))
with self.assertRaisesRegex(
edgedb.InterfaceError,
r'query cannot be executed with fetchone\('):
self.con.fetchone('SELECT {1, 2}')
with self.assertRaisesRegex(edgedb.NoDataError,
r'\bfetchone_json\('):
self.con.fetchone_json('SELECT <int64>{}')
def test_sync_basic_datatypes_02(self):
self.assertEqual(
self.con.fetchall(
r'''select [b"\x00a", b"b", b'', b'\na']'''),
edgedb.Set([[b"\x00a", b"b", b'', b'\na']]))
self.assertEqual(
self.con.fetchall(
r'select <bytes>$0', b'he\x00llo'),
edgedb.Set([b'he\x00llo']))
def test_sync_basic_datatypes_03(self):
for _ in range(10):
self.assertEqual(
self.con.fetchall_json(
'select ()'),
'[[]]')
self.assertEqual(
self.con.fetchall_json(
'select (1,)'),
'[[1]]')
self.assertEqual(
self.con.fetchall_json(
'select <array<int64>>[]'),
'[[]]')
self.assertEqual(
json.loads(
self.con.fetchall_json(
'select ["a", "b"]')),
[["a", "b"]])
self.assertEqual(
json.loads(
self.con.fetchone_json(
'select ["a", "b"]')),
["a", "b"])
self.assertEqual(
json.loads(
self.con.fetchall_json('''
SELECT {(a := 1 + 1 + 40, world := ("hello", 32)),
(a:=1, world := ("yo", 10))};
''')),
[
{"a": 42, "world": ["hello", 32]},
{"a": 1, "world": ["yo", 10]}
])
self.assertEqual(
json.loads(
self.con.fetchall_json('SELECT {1, 2}')),
[1, 2])
self.assertEqual(
json.loads(self.con.fetchall_json('SELECT <int64>{}')),
[])
with self.assertRaises(edgedb.NoDataError):
self.con.fetchone_json('SELECT <int64>{}')
def test_sync_args_01(self):
self.assertEqual(
self.con.fetchall(
'select (<array<str>>$foo)[0] ++ (<array<str>>$bar)[0];',
foo=['aaa'], bar=['bbb']),
edgedb.Set(('aaabbb',)))
def test_sync_args_02(self):
self.assertEqual(
self.con.fetchall(
'select (<array<str>>$0)[0] ++ (<array<str>>$1)[0];',
['aaa'], ['bbb']),
edgedb.Set(('aaabbb',)))
def test_sync_args_03(self):
with self.assertRaisesRegex(edgedb.QueryError, r'missing \$0'):
self.con.fetchall('select <int64>$1;')
with self.assertRaisesRegex(edgedb.QueryError, r'missing \$1'):
self.con.fetchall('select <int64>$0 + <int64>$2;')
with self.assertRaisesRegex(edgedb.QueryError,
'combine positional and named parameters'):
self.con.fetchall('select <int64>$0 + <int64>$bar;')
| [
"yury@magic.io"
] | yury@magic.io |
c72d86150e8893812b060a8c68cf3d14538adb84 | 3952a0faa9085169cca288e22ba934073923613c | /pytorch/pytorchcv/models/others/oth_ntsnet2.py | ebd73d84473c54962119bc50de008d54444b3653 | [
"MIT"
] | permissive | Magic-chao/imgclsmob | 4876651d8db8642dc363f83fe754e828394fa927 | b9281ed56e9705623941498f49ca833805b6a7e0 | refs/heads/master | 2020-05-24T00:27:12.622838 | 2019-05-13T06:27:00 | 2019-05-13T06:27:00 | 187,017,063 | 1 | 0 | null | 2019-05-16T11:47:36 | 2019-05-16T11:47:35 | null | UTF-8 | Python | false | false | 8,710 | py | import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from .oth_nts_resnet import resnet50
__all__ = ['oth_ntsnet']
INPUT_SIZE = (448, 448) # (w, h)
CAT_NUM = 4
PROPOSAL_NUM = 6
_default_anchors_setting = (
dict(layer='p3', stride=32, size=48, scale=[2 ** (1. / 3.), 2 ** (2. / 3.)], aspect_ratio=[0.667, 1, 1.5]),
dict(layer='p4', stride=64, size=96, scale=[2 ** (1. / 3.), 2 ** (2. / 3.)], aspect_ratio=[0.667, 1, 1.5]),
dict(layer='p5', stride=128, size=192, scale=[1, 2 ** (1. / 3.), 2 ** (2. / 3.)], aspect_ratio=[0.667, 1, 1.5]),
)
def generate_default_anchor_maps(anchors_setting=None, input_shape=INPUT_SIZE):
"""
generate default anchor
:param anchors_setting: all informations of anchors
:param input_shape: shape of input images, e.g. (h, w)
:return: center_anchors: # anchors * 4 (oy, ox, h, w)
edge_anchors: # anchors * 4 (y0, x0, y1, x1)
anchor_area: # anchors * 1 (area)
"""
if anchors_setting is None:
anchors_setting = _default_anchors_setting
center_anchors = np.zeros((0, 4), dtype=np.float32)
edge_anchors = np.zeros((0, 4), dtype=np.float32)
anchor_areas = np.zeros((0,), dtype=np.float32)
input_shape = np.array(input_shape, dtype=int)
for anchor_info in anchors_setting:
stride = anchor_info['stride']
size = anchor_info['size']
scales = anchor_info['scale']
aspect_ratios = anchor_info['aspect_ratio']
output_map_shape = np.ceil(input_shape.astype(np.float32) / stride)
output_map_shape = output_map_shape.astype(np.int)
output_shape = tuple(output_map_shape) + (4,)
ostart = stride / 2.
oy = np.arange(ostart, ostart + stride * output_shape[0], stride)
oy = oy.reshape(output_shape[0], 1)
ox = np.arange(ostart, ostart + stride * output_shape[1], stride)
ox = ox.reshape(1, output_shape[1])
center_anchor_map_template = np.zeros(output_shape, dtype=np.float32)
center_anchor_map_template[:, :, 0] = oy
center_anchor_map_template[:, :, 1] = ox
for scale in scales:
for aspect_ratio in aspect_ratios:
center_anchor_map = center_anchor_map_template.copy()
center_anchor_map[:, :, 2] = size * scale / float(aspect_ratio) ** 0.5
center_anchor_map[:, :, 3] = size * scale * float(aspect_ratio) ** 0.5
edge_anchor_map = np.concatenate((center_anchor_map[..., :2] - center_anchor_map[..., 2:4] / 2.,
center_anchor_map[..., :2] + center_anchor_map[..., 2:4] / 2.),
axis=-1)
anchor_area_map = center_anchor_map[..., 2] * center_anchor_map[..., 3]
center_anchors = np.concatenate((center_anchors, center_anchor_map.reshape(-1, 4)))
edge_anchors = np.concatenate((edge_anchors, edge_anchor_map.reshape(-1, 4)))
anchor_areas = np.concatenate((anchor_areas, anchor_area_map.reshape(-1)))
return center_anchors, edge_anchors, anchor_areas
def hard_nms(cdds, topn=10, iou_thresh=0.25):
if not (type(cdds).__module__ == 'numpy' and len(cdds.shape) == 2 and cdds.shape[1] >= 5):
raise TypeError('edge_box_map should be N * 5+ ndarray')
cdds = cdds.copy()
indices = np.argsort(cdds[:, 0])
cdds = cdds[indices]
cdd_results = []
res = cdds
while res.any():
cdd = res[-1]
cdd_results.append(cdd)
if len(cdd_results) == topn:
return np.array(cdd_results)
res = res[:-1]
start_max = np.maximum(res[:, 1:3], cdd[1:3])
end_min = np.minimum(res[:, 3:5], cdd[3:5])
lengths = end_min - start_max
intersec_map = lengths[:, 0] * lengths[:, 1]
intersec_map[np.logical_or(lengths[:, 0] < 0, lengths[:, 1] < 0)] = 0
iou_map_cur = intersec_map / ((res[:, 3] - res[:, 1]) * (res[:, 4] - res[:, 2]) + (cdd[3] - cdd[1]) * (
cdd[4] - cdd[2]) - intersec_map)
res = res[iou_map_cur < iou_thresh]
return np.array(cdd_results)
class ProposalNet(nn.Module):
def __init__(self):
super(ProposalNet, self).__init__()
self.down1 = nn.Conv2d(2048, 128, 3, 1, 1)
self.down2 = nn.Conv2d(128, 128, 3, 2, 1)
self.down3 = nn.Conv2d(128, 128, 3, 2, 1)
self.ReLU = nn.ReLU()
self.tidy1 = nn.Conv2d(128, 6, 1, 1, 0)
self.tidy2 = nn.Conv2d(128, 6, 1, 1, 0)
self.tidy3 = nn.Conv2d(128, 9, 1, 1, 0)
def forward(self, x):
batch_size = x.size(0)
d1 = self.ReLU(self.down1(x))
d2 = self.ReLU(self.down2(d1))
d3 = self.ReLU(self.down3(d2))
t1 = self.tidy1(d1).view(batch_size, -1)
t2 = self.tidy2(d2).view(batch_size, -1)
t3 = self.tidy3(d3).view(batch_size, -1)
return torch.cat((t1, t2, t3), dim=1)
class NTSNet(nn.Module):
def __init__(self,
topN=4,
full_output=False):
super(NTSNet, self).__init__()
self.topN = topN
_, edge_anchors, _ = generate_default_anchor_maps()
self.pad_side = 224
self.edge_anchors = (edge_anchors + 224).astype(np.int)
self.full_output = full_output
self.pretrained_model = resnet50(pretrained=True)
self.pretrained_model.avgpool = nn.AdaptiveAvgPool2d(1)
self.pretrained_model.fc = nn.Linear(512 * 4, 200)
self.proposal_net = ProposalNet()
self.concat_net = nn.Linear(2048 * (CAT_NUM + 1), 200)
self.partcls_net = nn.Linear(512 * 4, 200)
def forward(self, x):
resnet_out, rpn_feature, feature = self.pretrained_model(x)
x_pad = F.pad(x, (self.pad_side, self.pad_side, self.pad_side, self.pad_side), mode='constant', value=0)
batch = x.size(0)
# we will reshape rpn to shape: batch * nb_anchor
rpn_score = self.proposal_net(rpn_feature.detach())
all_cdds = [
np.concatenate((x.reshape(-1, 1), self.edge_anchors.copy(), np.arange(0, len(x)).reshape(-1, 1)), axis=1)
for x in rpn_score.data.cpu().numpy()]
top_n_cdds = [hard_nms(x, topn=self.topN, iou_thresh=0.25) for x in all_cdds]
top_n_cdds = np.array(top_n_cdds)
top_n_index = top_n_cdds[:, :, -1].astype(np.int)
top_n_index = torch.from_numpy(top_n_index).long().to(x.device)
top_n_prob = torch.gather(rpn_score, dim=1, index=top_n_index)
part_imgs = torch.zeros([batch, self.topN, 3, 224, 224]).to(x.device)
for i in range(batch):
for j in range(self.topN):
[y0, x0, y1, x1] = top_n_cdds[i][j, 1:5].astype(np.int)
part_imgs[i:i + 1, j] = F.interpolate(x_pad[i:i + 1, :, y0:y1, x0:x1], size=(224, 224), mode='bilinear',
align_corners=True)
part_imgs = part_imgs.view(batch * self.topN, 3, 224, 224)
_, _, part_features = self.pretrained_model(part_imgs.detach())
part_feature = part_features.view(batch, self.topN, -1)
part_feature = part_feature[:, :CAT_NUM, ...].contiguous()
part_feature = part_feature.view(batch, -1)
# concat_logits have the shape: B*200
concat_out = torch.cat([part_feature, feature], dim=1)
concat_logits = self.concat_net(concat_out)
raw_logits = resnet_out
# part_logits have the shape: B*N*200
part_logits = self.partcls_net(part_features).view(batch, self.topN, -1)
if self.full_output:
return [raw_logits, concat_logits, part_logits, top_n_index, top_n_prob]
else:
return concat_logits
def oth_ntsnet(pretrained=False):
return NTSNet()
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
from torch.autograd import Variable
pretrained = False
models = [
oth_ntsnet,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != oth_ntsnet or weight_count == 29033133)
x = Variable(torch.randn(1, 3, 448, 448))
y = net(x)
# y.sum().backward()
# assert (tuple(y[0].size()) == (1, 200))
assert (tuple(y.size()) == (1, 200))
if __name__ == "__main__":
_test()
| [
"osemery@gmail.com"
] | osemery@gmail.com |
84090d40ff2316d42b863fad8d472445fab799f6 | 1dca0675aa9c56bc13d2423362e21914c9426cfa | /web_29_jul_dev_8220/wsgi.py | e17ecdf2603d683b1472cb55c3f5b29a649233af | [] | no_license | crowdbotics-apps/web-29-jul-dev-8220 | 9c5a1ba90482dde61d8ec4f0e6ba9bc8f28b71cb | 5583b4280a97c8269a36265bf56c27c3c6515e54 | refs/heads/master | 2022-11-29T05:14:25.319284 | 2020-07-29T05:09:48 | 2020-07-29T05:09:48 | 283,399,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | """
WSGI config for web_29_jul_dev_8220 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "web_29_jul_dev_8220.settings")
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
06b9b4cd93422773c9648fa9e62d931e63211375 | 357b233baba79125936def2065cdb7d20008b06e | /scraper/spiders/bilkent_turkish_writings.py | e3f4f5788e139e2e565a4f9229211db4945f1685 | [] | no_license | selimfirat/bilkent-turkish-writings-dataset | 00e7bf8d83f21ce54705022887ad49ab54e8fbcd | be662fd50987a653071af0673e1247fb4c4ce7ed | refs/heads/master | 2021-05-08T14:33:35.472323 | 2018-02-04T15:59:27 | 2018-02-04T15:59:27 | 120,089,498 | 41 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | import scrapy
from scrapy import Request
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
from writing_entry import WritingEntry
class BilkentTurkishWritingsSpider(scrapy.Spider):
name = "bilkent_turkish_writings"
custom_settings = {
"ITEM_PIPELINES": {
'scrapy.pipelines.files.FilesPipeline': 100
},
"DOWNLOAD_DELAY": 0.25,
"FILES_STORE": '../data/'
}
start_urls = ["https://stars.bilkent.edu.tr/turkce/"]
allowed_domains = ['stars.bilkent.edu.tr']
def __init__(self, *args, **kwargs):
super(BilkentTurkishWritingsSpider, self).__init__(*args, **kwargs)
def parse(self, response):
print('Parsing '+response.url)
file_urls = []
for link in LxmlLinkExtractor(allow=self.allowed_domains).extract_links(response):
if "ogrenciNo" in link.url:
file_urls.append(link.url)
else:
yield Request(link.url, self.parse)
yield WritingEntry(file_urls=file_urls)
| [
"yilmazselimfirat@gmail.com"
] | yilmazselimfirat@gmail.com |
4eb07103153cd420fa53c7b6c7e7f84d5bc4c189 | 9cfdc1373b59b92121a0a4ab795a395ac8440fbf | /python/false_nearest_neighbours.py | c5ca028d352032a8575fd13ac8e1c99f4829885b | [] | no_license | physycom/metnum | ae1da308ba333dd036dea46319c45c2ba81bd1ca | cb114bb49062f9a9ec165e294a05b24663c60f17 | refs/heads/master | 2023-04-01T07:30:21.122348 | 2023-03-22T16:06:27 | 2023-03-22T16:06:27 | 34,463,378 | 4 | 6 | null | 2018-03-23T21:59:19 | 2015-04-23T15:04:48 | C++ | UTF-8 | Python | false | false | 2,205 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 19 22:30:58 2018
@author: NICO
"""
import numpy as np
import matplotlib.pylab as plt
import scipy
dt = .01
it = 10000
sigma = 16.
b = 4.
r = 45.92
x, y, z = np.empty(shape=(it)), np.empty(shape=(it)), np.empty(shape=(it))
x[0], y[0], z[0] = 10, 1, 1
Vx = lambda x, y, sigma : sigma*(y-x)
Vy = lambda x, y, z, r : -x*z + r*x - y
Vz = lambda x, y, z, b : -b*z + x*y
for i in range(0, it-1):
x[i+1] = x[i] + dt*Vx(x[i], y[i], sigma)
y[i+1] = y[i] + dt*Vy(x[i], y[i], z[i], r)
z[i+1] = z[i] + dt*Vz(x[i], y[i], z[i], b)
# False Nearest Neighbours
RT = 15
AT = 2
sigmay = np.std(x)
maxEmbDim = 10
delay= 1
rEEM = it - (maxEmbDim*delay - delay)
EEM = np.concatenate([y[delay*maxEmbDim-delay:].reshape(1, len(y[delay*maxEmbDim-delay:])),
[y[delay*maxEmbDim-(i+1)*delay:-i*delay] for i in range(1, maxEmbDim)]
], axis=0).T
Ind1 = np.empty(maxEmbDim)
Ind2 = np.empty(maxEmbDim)
embedm = 0 # only for plot
for k in range(1, maxEmbDim+1):
D = scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(EEM[:, :k], "euclidean"))
np.fill_diagonal(a=D, val=np.inf)
l = np.argmin(D[:rEEM - maxEmbDim - k, :], axis=1)
fnn1 = np.asarray([abs(y[i + maxEmbDim + k - 1]-y[li + maxEmbDim + k - 1])/D[i, li] for i,li in enumerate(l) if D[i, li] > 0 and li + maxEmbDim + k - 1 < it])
fnn2 = np.asarray([abs(y[i + maxEmbDim + k - 1]-y[li + maxEmbDim + k - 1])/sigmay for i,li in enumerate(l) if D[i, li] > 0 and li + maxEmbDim + k - 1 < it])
Ind1[k-1] = len(np.where(np.asarray(fnn1) > RT)[0])
Ind2[k-1] = len(np.where(np.asarray(fnn2) > AT)[0])
if embedm == 0: # only for plot
if Ind1[k-1] / len(fnn1) < .1 and Ind2[-1] / len(fnn1) < .1 and Ind1[k-1] != 0:
embedm = k
#break # uncomment for true algorithm
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8,8))
ax.plot(np.arange(0, maxEmbDim), Ind1)
ax.set_xlabel('Embedding dimension', fontsize=14)
ax.set_ylabel('% FNN', fontsize=14)
ax.set_title('Optimal Embedding Dimension with FNN', fontsize=16)
ax.plot(embedm, Ind1[embedm], 'r.')
plt.text(embedm, Ind1[embedm] + 100, "EmbDim = $%d$"%(embedm))
| [
"nico.curti2@unibo.it"
] | nico.curti2@unibo.it |
09a5308750165fafa7c2e001427ab5f7c4828826 | e861f5fb5f8a8b766bf5633f69990bafe7552d96 | /triage_tickets/TicketList.py | 4f9f538f3cbef5114f965dd443ceab6d5186dda2 | [] | no_license | Lothilius/random_tools | 59d849629cbdd7cb9f469dc2b5fb4c1f2271ff2d | cb085723d474648d63cfcd28790d385bf06bb870 | refs/heads/master | 2020-07-03T21:44:07.508955 | 2019-08-13T03:50:06 | 2019-08-13T03:50:06 | 202,058,255 | 0 | 0 | null | 2019-08-13T03:50:07 | 2019-08-13T03:38:23 | Python | UTF-8 | Python | false | false | 14,775 | py | __author__ = 'Lothilius'
# coding: utf-8
import datetime
import re
import sys
import pandas as pd
from pyprogressbar import Bar
from HelpdeskConnection import HelpdeskConnection as hdc
from HelpdeskConnectionV3 import HelpdeskConnection as hdc3
from Ticket import Ticket
from helper_scripts.misc_helpers.data_manipulation import correct_date_dtype
from time import time
import numpy as np
pd.set_option('display.width', 340)
pd.set_option('display.max_columns', 50)
class TicketList(object):
""" The ticket list class creates an object that gathers individual tickets that belong to a particular list view.
The list view will need to be specified from the list of view available to the person running the quarry to
gather the tickets.
"""
def __init__(self, helpdesk_que='Triage', with_resolution=False, with_conversations=False,
with_detail=True, last_id=0, version=1, query=None, count_only=False, open_only=False):
self.ticket_cursor = 1
self.total_count = 0
self.last_ticket_id = last_id
self.with_resolution = with_resolution
self.with_conversations = with_conversations
self.version = version
self.query = query
self.count_only = count_only
self.open_only = open_only
if version == 3:
self.tickets = list(self.get_all_tickets(with_detail=False))
else:
view_id = self.get_view_id(helpdesk_que)
self.tickets = list(self.get_all_tickets(view_id, with_detail))
def __getitem__(self, item):
return self.tickets[item]
def __str__(self):
return str(self.tickets)
@staticmethod
def get_filter_list():
""" Retrieve list request filters available.
:return: Dictionary of the ticket details
"""
url, querystring, headers = hdc.create_api_request()
url = url + "/filters"
querystring['OPERATION_NAME'] = "GET_REQUEST_FILTERS"
del querystring['INPUT_DATA']
# print querystring
filter_list = hdc.fetch_from_helpdesk(url, querystring, headers)
return filter_list
@staticmethod
def get_view_id(view_name=''):
try:
# Get the view ID for the pending view HD
filters = pd.DataFrame(TicketList.get_filter_list())
view__id = filters[filters.VIEWNAME == view_name].VIEWID.iloc[0]
return view__id
except ValueError, e:
print e
view_name = raw_input('Please enter valid view name: ')
TicketList.get_view_id(view_name)
except:
error_result = "Unexpected error 1TL: %s, %s" % (sys.exc_info()[0], sys.exc_info()[1])
print error_result
def aggregate_tickets(self, ticket_list_a, ticket_list_b):
""" Join to lists of helpdesk tickets.
:param ticket_list_a: list
:param ticket_list_b: list
:return: list - helpdesk_tickets
"""
helpdesk_tickets = ticket_list_a + ticket_list_b
return helpdesk_tickets
def get_100_tickets(self, helpdesk_que='7256000001531681_MyView_7256000001531679', from_value=1): # 7256000001531681_MyView_7256000001531679 7256000001516015_MyView_7256000000736233
""" Get helpdesk tickets for the respective query 100 at a time.
:return: list of dicts - helpdesk_tickets
"""
if self.version == 3:
url, querystring, headers = hdc3.create_api_request(from_value, query=self.query, open_only=self.open_only)
helpdesk_tickets, ticket_info = hdc3.fetch_from_helpdesk(url, querystring, headers)
self.total_count = ticket_info['total_count']
return helpdesk_tickets
else:
url, querystring, headers = hdc.create_api_request(helpdesk_que, from_value)
return hdc.fetch_from_helpdesk(url, querystring, headers)
def get_all_tickets(self, helpdesk_que='7256000001531681_MyView_7256000001531679', with_detail=True):
try:
# Get first 100 ticket from helpdesk
helpdesk_tickets = self.get_100_tickets(helpdesk_que=helpdesk_que)
if self.total_count > 0 and self.total_count > 100:
print "Retrieving list of Tickets: \n"
pbar = Bar(self.total_count / 100)
# Check if more than 100 exist and need to be aggregated.
if len(helpdesk_tickets) == 100 and not self.count_only:
# TODO - Make this a recursive method!!!
while len(helpdesk_tickets) % 100 == 0:
self.ticket_cursor = self.ticket_cursor + 100
helpdesk_tickets = self.aggregate_tickets(
helpdesk_tickets, self.get_100_tickets(helpdesk_que=helpdesk_que,
from_value=self.ticket_cursor))
if self.total_count != 0:
pbar.passed()
if with_detail and self.version != 3:
ticket_details = []
try:
# Convert helpdesk ticket list to Dataframe
helpdesk_df = pd.DataFrame(helpdesk_tickets)
helpdesk_df['WORKORDERID'] = pd.to_numeric(helpdesk_df['WORKORDERID'], errors='coerce')
# Reduce ticket list to only tickets greater than the last ticket id
# Note: If now last ticket ID is given the last ticket ID is 0 and so all ticket detail is retrieved.
detail_list = helpdesk_df[helpdesk_df['WORKORDERID'] > self.last_ticket_id]
if self.last_ticket_id != 0:
print 'Retrieving ticket detail from Work order ID: %s' % self.last_ticket_id
else:
print 'Retrieving ticket detail.'
# Gather Ticket details for each in the summery dataframe
pbar = Bar(len(detail_list['WORKORDERID'].tolist()))
for i, each in enumerate(detail_list['WORKORDERID'].tolist()):
# print i
ticket = Ticket(str(each), self.with_resolution, self.with_conversations)
ticket_details.append(ticket.details)
pbar.passed()
except:
error_result = "Unexpected error 1TL: %s, %s" % (sys.exc_info()[0], sys.exc_info()[1])
print error_result
raise Exception(error_result)
return ticket_details
else:
helpdesk_tickets
return helpdesk_tickets
except EOFError:
error_result = "Unexpected error 1TL: %s, %s" % (sys.exc_info()[0], sys.exc_info()[1])
# TODO -Fix this issue so that error_message is populated!
print error_result
@staticmethod
def convert_time(unicode_series):
"""Given value for date time
Convert it to a regular datetime string"""
# for each in unicode_series:
# print unicode_series
try:
unicode_series = int(unicode_series)
if len(str(unicode_series)) == 10 or len(str(unicode_series)) == 13:
if len(str(unicode_series)) == 10:
unicode_series = str(unicode_series)
elif len(str(unicode_series)) == 13:
unicode_series = str(unicode_series)[:10]
date_time_value = datetime.datetime.fromtimestamp(int(unicode_series)).strftime('%Y-%m-%d %H:%M:%S')
if int(date_time_value[:4]) > 2009:
return date_time_value
else:
return str(unicode_series)
else:
return str(unicode_series)
except:
return unicode_series
@staticmethod
def reduce_to_year(unicode_series):
try:
pattern = re.compile("(\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2})$")
match = pattern.match(unicode_series)
if match:
date_only = unicode_series[:10]
date_only = datetime.datetime.strptime(date_only, '%Y-%m-%d')
return date_only
else:
return unicode_series
except:
pass
@staticmethod
def reformat_as_dataframe(ticket_details):
""" Use to reformat responses to a panda data frame.
:param ticket_details: Should be in the form of an array of dicts ie [{1,2,...,n},{...}...,{...}]
:return: returns panda dataframe
"""
version = ticket_details.version
ticket_details = pd.DataFrame(list(ticket_details))
if version == 3:
ticket_details = TicketList.version3_reformat_helper(ticket_details)
ticket_details = ticket_details.rename(columns={'UDF_CHAR1': 'Department_Group',
'UDF_CHAR2': 'System',
'UDF_CHAR11': 'System Component'})
ticket_details = ticket_details.applymap(TicketList.convert_time)
# ticket_details = ticket_details.applymap(TicketList.reduce_to_year)
ticket_details = correct_date_dtype(ticket_details, date_time_format='%Y-%m-%d %H:%M:%S')
return ticket_details
@staticmethod
def version3_reformat_helper(ticket_details):
"""Convert the base data frame to the expected data frame"""
# Replace None values with
ticket_details.replace(to_replace={None: np.nan}, inplace=True)
# Split columns that have sub-columns in the form of a dict with lists
column_replacements = {'udf_fields': ['udf_char1', 'udf_char2', 'udf_char11'], 'mode': ['name'],
'template': ['name'], 'requester': ['email_id', 'name'], 'technician': ['name'],
'status': ['name'], 'sla': ['name'], 'site': ['name'],
'responded_time': ['value'], 'priority': ['name'],
'group': ['name'], 'first_response_due_by_time': ['value'], 'due_by_time': ['value'],
'department': ['name'], 'created_time': ['value'], 'created_by': ['value'],
'completed_time': ['value'], 'category': ['name'], 'resolved_time': ['value'],
'item': ['name'], 'level': ['name'], 'subcategory': ['name']}
for column in column_replacements.keys():
for subcolumn in column_replacements[column]:
if subcolumn in ['name', 'value']:
try:
ticket_details[column] = ticket_details[column].apply(
lambda x: TicketList.version3_null_filler(x, subcolumn))
except:
pass
else:
ticket_details[subcolumn] = ticket_details[column].apply(
lambda x: TicketList.version3_null_filler(x, subcolumn))
# Remove the field containing custom fields and approval status
ticket_details.drop(columns=['udf_fields', 'approval_status', 'first_response_due_by_time'], inplace=True)
# Add missing columns that are supposed to come through v3
if 'resolution' not in ticket_details.columns.tolist():
ticket_details['resolution'] = np.nan
if 'item' not in ticket_details.columns.tolist():
ticket_details['item'] = np.nan
if 'level' not in ticket_details.columns.tolist():
ticket_details['level'] = np.nan
if 'subcategory' not in ticket_details.columns.tolist():
ticket_details['subcategory'] = np.nan
# Rename columns to v1 naming schema
ticket_details = ticket_details.rename(columns={'created_by': 'CREATEDBY', 'created_time': 'CREATEDTIME',
'description': 'SHORTDESCRIPTION', 'display_id': 'WORKORDERID',
'due_by_time': 'DUEBYTIME', 'group': 'GROUP',
'has_attachments': 'HASATTACHMENTS',
'id': 'LONG_REQUESTID', 'attachments': 'ATTACHMENTS',
'category': 'CATEGORY', 'completed_time': 'COMPLETEDTIME',
'deleted_on': 'DELETED_TIME', 'department': 'DEPARTMENT',
'has_notes': 'HASNOTES', 'mode': 'MODE', 'priority': 'PRIORITY',
'requester': 'REQUESTER', 'level': 'LEVEL', 'item': 'ITEM',
'responded_time': 'RESPONDEDTIME', 'email_id': 'REQUESTEREMAIL',
'site': 'SITE', 'sla': 'SLA',
'status': 'STATUS', 'technician': 'TECHNICIAN',
'template': 'TEMPLATEID', 'time_elapsed ': 'TIMESPENTONREQ',
'udf_char1': 'UDF_CHAR1', 'udf_char2': 'UDF_CHAR2',
'resolved_time': 'RESOLUTIONLASTUPDATEDTIME',
'subject': 'SUBJECT', 'resolution': 'RESOLUTION',
'subcategory': 'SUBCATEGORY', 'udf_char11': 'UDF_CHAR11'})
missing_columns = ['DELETED_TIME', 'HASCONVERSATION', 'ISPENDING', 'REQUESTTEMPLATE',
'STOPTIMER', 'TIMESPENTONREQ', 'RESOLVER']
ticket_details = ticket_details.assign(**dict.fromkeys(missing_columns, np.nan))
return ticket_details
@staticmethod
def version3_null_filler(record, column):
try:
value = record[column]
except KeyError:
value = np.nan
except TypeError:
value = np.nan
return value
if __name__ == '__main__':
start = time()
try:
test_tickets = TicketList(version=3, query='martin.valenzuela@bazaarvoice.com', count_only=False,
open_only=True)
except AttributeError as e:
tickets = e.args[0]
# print type(tickets)
# print tickets[0]['WORKORDERID']
test_tickets = TicketList.reformat_as_dataframe(test_tickets)
# tickets.drop('ATTACHMENTS', axis=1, inplace=True)
end = time()
print (end - start) / 60
print test_tickets.shape
| [
"martin.valenzuela@bazaarvoice.com"
] | martin.valenzuela@bazaarvoice.com |
c23921ec0bde1e6d104e6e05ace6a9282567bf7a | 18ef7bb35b3029672e4c4072aa7868e3513bfe44 | /P1.py | 73d0507c967519673d3c90287e9f91022857b10e | [
"MIT"
] | permissive | chinmaydas96/CarND-LaneLines-P1 | d7e1235ad8ed2107588881f5dfcaa428b192cb3c | be8e03257962314d6adea68634d053d5f0550510 | refs/heads/master | 2022-04-22T12:31:43.611230 | 2020-04-16T10:39:34 | 2020-04-16T10:39:34 | 255,964,402 | 0 | 0 | MIT | 2020-04-15T15:46:48 | 2020-04-15T15:46:47 | null | UTF-8 | Python | false | false | 19,123 | py | #!/usr/bin/env python
# coding: utf-8
# # Self-Driving Car Engineer Nanodegree
#
#
# ## Project: **Finding Lane Lines on the Road**
# ***
# In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below.
#
# Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.
#
# In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.
#
# ---
# Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image.
#
# **Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".**
#
# ---
# **The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**
#
# ---
#
# <figure>
# <img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" />
# <figcaption>
# <p></p>
# <p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p>
# </figcaption>
# </figure>
# <p></p>
# <figure>
# <img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" />
# <figcaption>
# <p></p>
# <p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p>
# </figcaption>
# </figure>
# **Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
# ## Import Packages
# In[1]:
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
# ## Read in an Image
# In[2]:
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
# ## Ideas for Lane Detection Pipeline
# **Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
#
# `cv2.inRange()` for color selection
# `cv2.fillPoly()` for regions selection
# `cv2.line()` to draw lines on an image given endpoints
# `cv2.addWeighted()` to coadd / overlay two images
# `cv2.cvtColor()` to grayscale or change color
# `cv2.imwrite()` to output images to file
# `cv2.bitwise_and()` to apply a mask to an image
#
# **Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
# ## Helper Functions
# Below are some helper functions to help get you started. They should look familiar from the lesson!
# In[3]:
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines_new(img, lines, color=[255, 0, 0], thickness=6):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
## create an empty array with all the line slope
all_slopes = np.zeros((len(lines)))
## create an empty array for left lines
left_line_slope = []
## create an empty array for right lines
right_line_slope = []
# keep each line slope in the array
for index,line in enumerate(lines):
for x1,y1,x2,y2 in line:
all_slopes[index] = (y2-y1)/(x2-x1)
# get all left line slope if it is positive
left_line_slope = all_slopes[all_slopes > 0]
# get all left line slope if it is negetive
right_line_slope = all_slopes[all_slopes < 0]
## mean value of left slope and right slope
m_l = left_line_slope.mean()
m_r = right_line_slope.mean()
# Create empty list for all the left points and right points
final_x4_l = []
final_x3_l = []
final_x4_r = []
final_x3_r = []
## get fixed y-cordinate in both top and bottom point
y4 = 320
y3 = img.shape[0]
## Go for each line to calculate left top x-cordinate, right top x-cordinate,
## left buttom x-cordinate, right bottom top x-cordinate
for index,line in enumerate(lines):
for x1,y1,x2,y2 in line:
m = (y2-y1)/(x2-x1)
if m > 0 :
final_x4_l.append(int(((x1 + (y4 - y1) / m_l) + (x2 + (y4 - y2) / m_l))/ 2))
final_x3_l.append(int(((x1 + (y3 - y1) / m_l) + (x2 + (y3 - y2) / m_l))/ 2))
else:
final_x4_r.append(int(((x1 + (y4 - y1) / m_r) + (x2 + (y4 - y2) / m_r))/ 2))
final_x3_r.append(int(((x1 + (y3 - y1) / m_r) + (x2 + (y3 - y2) / m_r))/ 2))
try :
## taking average of each points
x4_l = int(sum(final_x4_l)/ len(final_x4_l))
x4_r = int(sum(final_x4_r)/ len(final_x4_r))
x3_l = int(sum(final_x3_l)/ len(final_x3_l))
x3_r = int(sum(final_x3_r)/ len(final_x3_r))
## Draw the left line and right line
cv2.line(img, (x4_l, y4), (x3_l, y3), color, thickness)
cv2.line(img, (x4_r, y4), (x3_r, y3), color, thickness)
except:
pass
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines_new(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
# ## Test Images
#
# Build your pipeline to work on the images in the directory "test_images"
# **You should make sure your pipeline works well on these images before you try the videos.**
# In[4]:
import os
os.listdir("test_images/")
# ## Build a Lane Finding Pipeline
#
#
# Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.
#
# Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
# In[18]:
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
def preprocess_image(image_path):
image = mpimg.imread(image_path)
gray_image = grayscale(image)
blured_image = gaussian_blur(gray_image, 5)
canny_image = canny(gray_image, low_threshold=100, high_threshold=170)
vertices = np.array([[(80,image.shape[0]),(450, 320), (490, 320), (image.shape[1],image.shape[0])]], dtype=np.int32)
roi_image = region_of_interest(canny_image, vertices)
hough_img = hough_lines(roi_image, rho=2, theta=np.pi/180, threshold=50, min_line_len=100, max_line_gap=160)
final_img= weighted_img(hough_img, image, α=0.8, β=1., γ=0.)
return final_img
def process_test_images(source_folder,destination_folder):
## create destination folder if not present
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
## Get all input files from the source folder
list_test_files = os.listdir(source_folder)
## process all the input files
for file in list_test_files:
output = preprocess_image(source_folder+ '/' + file)
cv2.imwrite(destination_folder+'/'+ file, cv2.cvtColor(output, cv2.COLOR_RGB2BGR))
process_test_images('test_images','test_images_output')
# In[19]:
# In[20]:
os.listdir('test_images')
# In[21]:
# Checking in an image
plt.figure(figsize=(15,8))
plt.subplot(121)
image = mpimg.imread('test_images/solidYellowCurve.jpg')
plt.imshow(image)
plt.title('Original image')
plt.subplot(122)
image = mpimg.imread('test_images_output/whiteCarLaneSwitch.jpg')
plt.imshow(image)
plt.title('Output image')
plt.show()
# ## Test on Videos
#
# You know what's cooler than drawing lanes over images? Drawing lanes over video!
#
# We can test our solution on two provided videos:
#
# `solidWhiteRight.mp4`
#
# `solidYellowLeft.mp4`
#
# **Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
#
# **If you get an error that looks like this:**
# ```
# NeedDownloadError: Need ffmpeg exe.
# You can download it by calling:
# imageio.plugins.ffmpeg.download()
# ```
# **Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**
# In[9]:
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
# In[10]:
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
gray_image = grayscale(image)
blured_image = gaussian_blur(gray_image, 5)
canny_image = canny(gray_image, low_threshold=100, high_threshold=170)
vertices = np.array([[(80,image.shape[0]),(450, 320), (490, 320), (image.shape[1],image.shape[0])]], dtype=np.int32)
roi_image = region_of_interest(canny_image, vertices)
hough_img = hough_lines(roi_image, rho=2, theta=np.pi/180, threshold=50, min_line_len=100, max_line_gap=160)
result= weighted_img(hough_img, image, α=0.8, β=1., γ=0.)
return result
# Let's try the one with the solid white lane on the right first ...
# In[11]:
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
white_clip.write_videofile(white_output, audio=False)
# ## Improve the draw_lines() function
#
# **At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".**
#
# **Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**
# Now for the one with the solid yellow lane on the left. This one's more tricky!
# In[13]:
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
yellow_clip.write_videofile(yellow_output, audio=False)
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
cv2.imwrite('image_test.jpg',image)
gray_image = grayscale(image)
blured_image = gaussian_blur(gray_image, 5)
canny_image = canny(gray_image, low_threshold=100, high_threshold=170)
cv2.imwrite('image_test_canny.jpg',canny_image)
x_size = image.shape[1]
y_size = image.shape[0]
left_bottom = (80, y_size)
left_top = (x_size / 2 - 50, y_size / 2 + 50)
right_bottom = (x_size - 80, y_size)
right_top = (x_size / 2 + 50, y_size / 2 + 50)
#vertices = np.array([[left_bottom, left_top, right_top, right_bottom]], dtype=np.int32)
#vertices = np.array([[(280,image.shape[0]),(450, 320), (490, 320), (image.shape[1],image.shape[0])]], dtype=np.int32)
vertices = np.array([[(300,680),(620, 460), (720, 460), (1085,673)]], dtype=np.int32)
roi_image = region_of_interest(canny_image, vertices)
try:
hough_img = hough_lines(roi_image, rho=2, theta=np.pi/180, threshold=50, min_line_len=100, max_line_gap=160)
result= weighted_img(hough_img, image, α=0.8, β=1., γ=0.)
return result
except:
return image
# In[16]:
challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
challenge_clip.write_videofile(challenge_output, audio=False)
| [
"chinmaydasbat@gmail.com"
] | chinmaydasbat@gmail.com |
9e5a7e72a9833ec32ef8a92895d30c8b3d688938 | 66b2bccf07754119b9eed320d9f7715fa52f6d44 | /scrapy_learn1/utils/dbbaseutil.py | 2c512ebc43bf825ae27711e81ddf171a5572ea7b | [] | no_license | wangsanshi123/scrapy_learn1 | 678c3708e3765ab26cff8799a55d0153abe9da55 | 16bf80a634484842e9b66db9138c2f4c8769d087 | refs/heads/master | 2020-03-08T07:42:56.260798 | 2018-04-04T03:26:58 | 2018-04-04T03:26:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,566 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import warnings
import sqlite3
import pymysql
warnings.filterwarnings('ignore')
__author__ = 'yangsheng'
class DBUtil(object):
def __init__(self, dbpath):
self._dbpath = dbpath
self._conn = sqlite3.connect(self._dbpath)
def cursor_execute_one(self, sql):
'''查询结果,逐行读取,yield模式'''
cursor = self._conn.cursor()
cursor.execute(sql)
hasnext = True
while hasnext:
data = cursor.fetchone()
if data:
yield data
else:
hasnext = False
def cursor_execute(self, sql):
cursor = self._conn.cursor()
cursor.execute(sql)
datas = cursor.fetchall()
cursor.close()
return datas
def cursor_execute_nosearch(self, sql, comm=False, datas=None):
'''
执行sql语句,非查询,没有返回
:param sql: sql语句
:param comm: 是否commit
:param datas
:return:
'''
cursor = self._conn.cursor()
if datas:
cursor.execute(sql, datas)
else:
cursor.execute(sql)
if comm:
self._conn.commit()
def commit(self):
'''
提交之前发生的更新操作
:return:
'''
self._conn.commit()
class MysqlUtil(object):
def __init__(self, dbhost, dbport, dbname, dbuser, dbpwd, charset='utf8'):
self.dbhost = dbhost
self.dbport = dbport
self.dbname = dbname
self.dbuser = dbuser
self.dbpwd = dbpwd
self.charset = charset
self._conn = pymysql.connect(host=dbhost, port=dbport, db=dbname,
user=dbuser, passwd=dbpwd, charset=charset)
def cursor_execute_nosearch(self, sql, comm=False):
'''
执行sql语句,非查询,没有返回
:param sql: sql语句
:param comm: 是否commit
:return:
'''
cursor = self._conn.cursor()
cursor.execute(sql)
cursor.close()
if comm:
self._conn.commit()
def cursor_execute(self, sql):
cursor = self._conn.cursor()
cursor.execute(sql)
dataset = cursor.fetchall()
cursor.close()
return dataset
def commit(self):
self._conn.commit()
def close(self):
self._conn.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
| [
"118319592@qq.com"
] | 118319592@qq.com |
b751373ce90366b0f78e58e0b5902ed2dfc9ceb2 | 69d8a912212c1355470c298ac4f2fb716aed9982 | /proyecto/adopcion/models.py | 1f6b436c1ecfcf697d872172f1543322344ffd27 | [] | no_license | juampiludu/almanimal | b7fa92cb34cd8b908ef609036fb647fd1ca328a7 | 033037823252a13fd20514c226dd100837fdc610 | refs/heads/master | 2023-01-20T17:46:57.671217 | 2020-12-02T04:05:03 | 2020-12-02T04:05:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,022 | py | from django.db import models
from django.contrib.auth.models import User
from django.utils.html import mark_safe
from django.templatetags.static import static
# Create your models here.
class Animal(models.Model):
class Sexo(models.TextChoices):
MACHO = 'Macho'
HEMBRA = 'Hembra'
INDEFINIDO = 'Indefinido'
class Tamaño(models.TextChoices):
GRANDE = 'Grande'
MEDIANO = 'Mediano'
CHICO = 'Chico'
class TipoAnimal(models.TextChoices):
PERRO = 'Perro'
GATO = 'Gato'
OTRO = 'Otro'
class Tiempo(models.TextChoices):
DIAS = 'Días'
SEMANAS = 'Semanas'
MESES = 'Meses'
AÑOS = 'Años'
class Meta:
verbose_name = 'Animal'
verbose_name_plural = 'Animales'
def __str__(self):
return self.nombre
dueño = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='Creador', limit_choices_to={'is_staff':True})
nombre = models.CharField(verbose_name='Nombre', max_length=255, null=False, blank=False)
tipo_animal = models.CharField(verbose_name='Tipo de animal', max_length=40, choices=TipoAnimal.choices, null=False, blank=False)
raza = models.CharField(verbose_name='Raza', max_length=255, null=False, blank=False)
tamaño = models.CharField(verbose_name='Tamaño', max_length=40, choices=Tamaño.choices, null=False, blank=False)
foto1 = models.ImageField(verbose_name="Foto 1", blank=True, null=True, upload_to='foto1')
foto2 = models.ImageField(verbose_name="Foto 2", blank=True, null=True, upload_to='foto2')
edad = models.IntegerField(verbose_name='Edad', null=False, blank=False)
tiempo = models.CharField("Tiempo", max_length=50, choices=Tiempo.choices, null=False, blank=False)
sexo = models.CharField(verbose_name='Sexo', max_length=40, choices=Sexo.choices, null=False, blank=False)
descripcion = models.TextField(verbose_name='Descripcion', null=False, blank=False)
caracter = models.TextField(verbose_name='Carácter', null=True, blank=True)
vacunado = models.BooleanField(verbose_name='Vacunado', null=False, blank=False)
desparasitado = models.BooleanField(verbose_name='Desparasitado', null=False, blank=False)
castrado = models.BooleanField(verbose_name='Castrado', null=False, blank=False)
comentario = models.TextField(verbose_name='Comentarios', null=True, blank=True)
telefono = models.CharField("Teléfono de contacto", max_length=50)
email = models.EmailField("Email de contacto", max_length=254)
publicado = models.BooleanField(verbose_name='Publicado', default=False)
creado = models.DateTimeField(auto_now_add=True, verbose_name='Fecha de creación')
actualizado = models.DateTimeField(auto_now=True,verbose_name='Ultima actualización')
def image_tag(self):
if self.foto1 and hasattr(self.foto1, 'url'):
return mark_safe(f'<img style="object-fit:cover; height:100px; width:100px" src={self.foto1.url} />')
else:
return mark_safe(f'<img style="object-fit:cover; height:100px; width:100px" src={static("/adopcion/img/no-image.png")} />')
image_tag.short_description = ''
def image_tag2(self):
if self.foto2 and hasattr(self.foto2, 'url'):
return mark_safe(f'<img style="object-fit:cover; height:100px; width:100px" src={self.foto2.url} />')
else:
return mark_safe(f'<img style="object-fit:cover; height:100px; width:100px" src={static("/adopcion/img/no-image.png")} />')
image_tag2.short_description = ''
def save(self, *args, **kwargs):
try:
this = Animal.objects.get(id=self.id)
if this.foto1 != self.foto1:
this.foto1.delete()
if this.foto2 != self.foto2:
this.foto2.delete()
except:
pass
super(Animal, self).save(*args, **kwargs)
| [
"juanzakka@gmail.com"
] | juanzakka@gmail.com |
bb8c3ba0a02d52add159b61061125416627d40e6 | 33c19c13824db26e455177b18864963ea40d8cba | /trtools/tools/tests/test_pload.py | 660140b711338b8cb729a7529b53cf2ab0d9f7a7 | [
"MIT"
] | permissive | jeffamaxey/trtools | 6767375d04d4c6702f61d0ad4839e4ebe6ffe3f8 | 39db1d72269f43e7ba380da5ad28d565137089ed | refs/heads/master | 2023-08-10T23:44:17.640063 | 2021-09-17T18:21:54 | 2021-09-17T18:21:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | import trtools.util.testing as tm
import trtools.io.api as trio
import trtools.tools.pload as pload
| [
"dale@dalejung.com"
] | dale@dalejung.com |
96c5e9e11b6540e09bfed4c444788cc0a3fcee75 | 119f503a2786a929db24937c2b91d63ac9c2af72 | /examples/plot_sig_bkg.py | be5db7ccfca401c916b1cf0e9d086ed26b2c7c44 | [
"BSD-3-Clause"
] | permissive | loopylangur/zfit | f1c1e352eca5c1e58fbe276ba4b65b409f0faa6d | 535b970dfb6611ef687a184746b9e191756506ba | refs/heads/master | 2020-12-20T14:41:30.985958 | 2020-01-12T15:28:54 | 2020-01-12T15:28:54 | 236,109,089 | 0 | 0 | null | 2020-01-25T00:59:17 | 2020-01-25T00:59:16 | null | UTF-8 | Python | false | false | 2,592 | py | # Copyright (c) 2019 zfit
import numpy as np
import zfit
import matplotlib.pyplot as plt
# create space
obs = zfit.Space("x", limits=(-10, 10))
# parameters
mu = zfit.Parameter("mu", 1., -4, 6)
sigma = zfit.Parameter("sigma", 1., 0.1, 10)
lambd = zfit.Parameter("lambda", -0.06, -1, -0.01)
frac = zfit.Parameter("fraction", 0.3, 0, 1)
# model building, pdf creation
gauss = zfit.pdf.Gauss(mu=mu, sigma=sigma, obs=obs)
exponential = zfit.pdf.Exponential(lambd, obs=obs)
model = zfit.pdf.SumPDF([gauss, exponential], fracs=frac)
# data
n_sample = 10000
exp_data = exponential.sample(n=n_sample * (1 - frac)).numpy()
gauss_data = gauss.sample(n=n_sample * frac).numpy()
data = model.create_sampler(n_sample, limits=obs)
data.resample()
mu.set_value(0.5)
sigma.set_value(1.2)
lambd.set_value(-0.05)
frac.set_value(0.07)
# plot the data
data_np = data[:, 0].numpy()
color = 'black'
n_bins = 50
linewidth = 2.5
plot_scaling = n_sample / n_bins * obs.area()
x = np.linspace(-10, 10, 1000)
# plot the pdf BEFORE fitting
plt.figure()
plt.title("Before fitting")
# plot the data
plt.hist(data_np, color=color, bins=n_bins, histtype="stepfilled", alpha=0.1)
plt.hist(data_np, color=color, bins=n_bins, histtype="step")
# plot the pdfs
y = model.pdf(x).numpy()
y_gauss = (gauss.pdf(x) * frac).numpy() # notice the frac!
y_exp = (exponential.pdf(x) * (1 - frac)).numpy() # notice the frac!
plt.plot(x, y * plot_scaling, label="Sum - Model", linewidth=linewidth * 2)
plt.plot(x, y_gauss * plot_scaling, '--', label="Gauss - Signal", linewidth=linewidth)
plt.plot(x, y_exp * plot_scaling, '--', label="Exponential - Background", linewidth=linewidth)
plt.xlabel("Physical observable")
plt.legend()
# create NLL
nll = zfit.loss.UnbinnedNLL(model=model, data=data)
# create a minimizer
minimizer = zfit.minimize.Minuit()
result = minimizer.minimize(nll)
# do the error calculations, here with minos
param_errors = result.error()
plt.figure()
plt.title("After fitting")
# plot the data
plt.hist(data_np, color=color, bins=n_bins, histtype="stepfilled", alpha=0.1)
plt.hist(data_np, color=color, bins=n_bins, histtype="step")
y = model.pdf(x).numpy() # rerun now after the fitting
y_gauss = (gauss.pdf(x) * frac).numpy()
y_exp = (exponential.pdf(x) * (1 - frac)).numpy()
plt.plot(x, y * plot_scaling, label="Sum - Model", linewidth=linewidth * 2)
plt.plot(x, y_gauss * plot_scaling, '--', label="Gauss - Signal", linewidth=linewidth)
plt.plot(x, y_exp * plot_scaling, '--', label="Exponential - Background", linewidth=linewidth)
plt.xlabel("Physical observable")
plt.legend()
plt.show()
| [
"mayou36@jonas.eschle.com"
] | mayou36@jonas.eschle.com |
9ef8ca178f37669d0f1c6165b9589e22eec12759 | 8a6dbabe023deea0d29e666bc4d6d7a53a51f08d | /tests/test_drug_response.py | aa7473787c40a3b048261fec9ad60fc92a61aba5 | [
"CC-BY-NC-ND-4.0",
"Apache-2.0"
] | permissive | pasmopy/breast_cancer | 01d2c1f387364b71fc99a7a5250b0d39decd7575 | f6113dd286476069d1abc7627475e626e5cbeafc | refs/heads/master | 2023-04-10T03:08:52.806513 | 2022-09-21T15:52:03 | 2022-09-21T15:52:03 | 348,209,066 | 7 | 4 | Apache-2.0 | 2022-09-21T15:52:05 | 2021-03-16T04:05:57 | Python | UTF-8 | Python | false | false | 984 | py | import os
import shutil
import pandas as pd
from drug_response.drug.database import CancerCellLineEncyclopedia
def test_create_figs():
for dir in ["dose_response", "activity_area"]:
if os.path.isdir(dir):
shutil.rmtree(dir)
ccle = CancerCellLineEncyclopedia()
erbb_expression_ratio = pd.read_csv(
os.path.join("drug_response", "data", "ErbB_expression_ratio.csv"),
index_col=0,
)
compounds = list(set(ccle.drug_response_data["Compound"]))
for compound in compounds:
ccle.save_all(erbb_expression_ratio, compound)
for dir in ["dose_response", "activity_area"]:
assert os.path.isfile(
os.path.join(
f"{dir}",
f"{ccle._drug2target(compound)}",
f"{ccle._convert_drug_name(compound)}.pdf",
)
)
def test_cleanup():
for dir in ["dose_response", "activity_area"]:
shutil.rmtree(dir)
| [
"31299606+himoto@users.noreply.github.com"
] | 31299606+himoto@users.noreply.github.com |
72587c53ee9d480ee96056750566064d1ab30347 | 4c82c378774437b4fece5865a469485d11dd5c04 | /games/files_directories.py | 013a16b59ba58486ac2ffae7e1b4f38ad5e817c5 | [] | no_license | Cunarefa/Matrix | 54c4bbfd588d5f1a5d5889228be5224b85889538 | 634a793c1554458ab6b9b65014ba3fde279b4c4d | refs/heads/master | 2023-07-25T17:39:02.795840 | 2021-09-10T11:25:31 | 2021-09-10T11:25:31 | 401,263,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,145 | py | from abc import ABC, abstractmethod
class Component(ABC):
@abstractmethod
def get_size(self):
pass
def add_component(self, component):
pass
class File(Component):
def __init__(self, size):
self.size = size
def __add__(self, other):
return self.size + other.size
@property
def get_size(self):
return self.size
class Directory(Component):
def __init__(self, components):
self.components = components
self.limit = -1
# def __next__(self):
# if self.limit < len(self.components) - 1:
# self.limit += 1
# return self.components[self.limit]
# else:
# raise StopIteration
# def __iter__(self):
# return self
@property
def get_size(self):
result = 0
for i in self.components:
result += i.get_size
return result
def add_component(self, component):
self.components.append(component)
s = File(50)
d = File(20)
q = Directory([s, d])
dir = Directory([s, q])
print(dir.get_size, '\n')
dir.add_component(d)
print(dir.get_size)
| [
"yevgen.yelik@gmail.com"
] | yevgen.yelik@gmail.com |
ec7974d7ba5104cef543b2d76097554593a51e29 | a1d8b5de6a54dc942f63e2e4a946db174cae0996 | /ctci-making-anagrams/main.py | 5af3a78f3b4833c0e58dbb912cc1a9f0c63d7d09 | [
"MIT"
] | permissive | joaojunior/hackerrank | 5dae64166b6fdbec8b7bd2112443fdfde0e64e74 | a5ee0449e791535930b8659dfb7dddcf9e1237de | refs/heads/master | 2021-11-22T07:53:33.978238 | 2021-09-28T00:35:16 | 2021-09-28T00:35:16 | 116,694,043 | 0 | 1 | MIT | 2018-02-21T00:29:09 | 2018-01-08T15:29:26 | Python | UTF-8 | Python | false | false | 354 | py | def make_anagrams(a, b):
d_a = {}
d_b = {}
qty = 0
for c in a:
d_a[c] = d_a.get(c, 0) + 1
for c in b:
d_b[c] = d_b.get(c, 0) + 1
for c, frequency in d_a.items():
qty += abs(frequency - d_b.get(c, 0))
for c, frequency in d_b.items():
if c not in d_a:
qty += frequency
return qty
| [
"jcajcefet@yahoo.com.br"
] | jcajcefet@yahoo.com.br |
6f070a3bd651d8a3d1201e6e7cd264da3ea66a23 | 60ae058efd4d8c1367af3cf7e5b40d868bfef610 | /config/wsgi.py | 808061fc392eb6922590a6c79f928db37899b7d2 | [
"MIT"
] | permissive | munikarmanish/cms | dc766bac59f4b2a9a38b2962ceedb4b8c3e36a27 | 39cbdd89bed7b9de887b0e8ba545e72e440bf1d2 | refs/heads/master | 2021-03-27T14:47:34.809972 | 2017-01-24T04:30:23 | 2017-01-24T04:30:23 | 79,788,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,468 | py | """
WSGI config for Communication Management System project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [
"munikarmanish@gmail.com"
] | munikarmanish@gmail.com |
adfe0cf1de9c32946f939923d8014e797f2e7db0 | 01fdd206c8c825b30870bdd3f6e75f0aa113b849 | /test/record/parser/test_response_whois_isoc_org_il_property_status_transfer_allowed.py | 3e8870b0af59fcdedef88641297afba471c8dc7e | [
"MIT"
] | permissive | huyphan/pyyawhois | 0fbc5a7d64a53ae6e3393fdc1c7ff0d0ac5f22b5 | 77fb2f73a9c67989f1d41d98f37037406a69d136 | refs/heads/master | 2021-01-23T22:42:55.989651 | 2015-09-19T16:40:06 | 2015-09-19T16:40:06 | 23,335,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.isoc.org.il/property_status_transfer_allowed
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisIsocOrgIlPropertyStatusTransferAllowed(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.isoc.org.il/property_status_transfer_allowed.txt"
host = "whois.isoc.org.il"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'registered')
def test_available(self):
eq_(self.record.available, False)
def test_registered(self):
eq_(self.record.registered, True)
| [
"dachuy@gmail.com"
] | dachuy@gmail.com |
ba5c8b5a65db1e493db1a3389552f2447aac39b0 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/11110011.py | 6a4ef801f0e27b7b31926ae3ec0e52db71d2bbe9 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,132 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/11110011.py generated: Wed, 25 Jan 2017 15:25:20
#
# Event Type: 11110011
#
# ASCII decay Descriptor: {[[B0]nos -> mu+ (tau- -> pi+ pi- pi- nu_tau) (K*(892)0 -> K+ pi-)]cc, [[B0]nos -> (tau+ -> pi+ pi- pi+ anti-nu_tau) mu- (K*(892)0 -> K+ pi-)]cc, [[B0]os -> mu- (tau+ -> pi+ pi- pi+ anti-nu_tau) (K*(892)~0 -> K- pi+)]cc, [[B0]os -> (tau- -> pi+ pi- pi- nu_tau) mu+ (K*(892)~0 -> K- pi+)]cc}
#
from Configurables import Generation
Generation().EventType = 11110011
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bd_Ksttaumu,3pi=DecProdCut,tauolababar.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 511,-511 ]
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
cb8dcbaeebb340b0047d76d8fbbc2286bb66e39b | 21e7753732296bfdfb6dd9a9b58c7c6b8d90a1e5 | /ArraysAndStrings/IsUnique/IsUnique.py | 4841949c494375e7ad7ad4044c532c52a0ddc5b6 | [] | no_license | rongfeng-china/python-algorithms-and-data-structures | eb8514b44d7ff97dd7c4deda2d8ea888a5aa8d04 | a69241bb7b684bc7d00acdd46c2fc214f7b61887 | refs/heads/master | 2020-03-13T09:08:13.375870 | 2015-12-11T07:37:30 | 2015-12-11T07:37:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # Accepts a string and returns true if all chars in the string are
# unique. Returns false otherwise. Assumes strings made up of
# lowercase letters 'a' through 'z'
def isUniqueChars(str):
checker = 0
for c in str:
value = ord(c) - ord('a')
if (checker & (1 << value)) > 0:
return False
checker |= (1 << value)
return True
# Below implementation assumes ASCII strings - 128 unique chars. This
# helps to achieve both O(1) time and space complexities (actually
# O(128))
def isUniqueCharsASCII(str):
if len(str) > 128:
return false
checker = [False] * 128
for c in str:
value = ord(c) # Returns the ASCII for a char
if checker[value] is True:
return False
checker[value] = True
return True
| [
"prathamt@outlook.com"
] | prathamt@outlook.com |
f2b8894c23d54c6d333e01186a92793a177ca940 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4424/codes/1637_2443.py | da74fa2d94940f25db95693ea2d3556c85cc5da1 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | # Teste seu código aos poucos.
# Não teste tudo no final, pois fica mais difícil de identificar erros.
# Use as mensagens de erro para corrigir seu código.
from math import*
r = float(input("Digite um numero: "))
h = float(input("Digite um numero: "))
n = float(input("Digite um numero: "))
if(n==1):
x=((pi*h**2)*(3*r-h))/3
if(n==2):
x=((4*pi*r**3)/3)-((pi*h**2)*(3*r-h))/3
print(round(x, 4)) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
06d7833c5cf9a4d4c3c82424abe3000b5fccce9a | 34599596e145555fde0d4264a1d222f951f49051 | /pcat2py/class/212441d0-5cc5-11e4-af55-00155d01fe08.py | ce239a839bec9440133bfd44d09eebdbf2016b3e | [
"MIT"
] | permissive | phnomcobra/PCAT2PY | dc2fcbee142ce442e53da08476bfe4e68619346d | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | refs/heads/master | 2021-01-11T02:23:30.669168 | 2018-02-13T17:04:03 | 2018-02-13T17:04:03 | 70,970,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,467 | py | #!/usr/bin/python
################################################################################
# 212441d0-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "212441d0-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
dword = cli.get_reg_dword(r'HKLM:\Software\Policies\Microsoft\Windows NT\Terminal Services', 'DisablePasswordSaving')
# Output Lines
self.output = [r'HKLM:\Software\Policies\Microsoft\Windows NT\Terminal Services', ('DisablePasswordSaving=' + str(dword))]
if dword == 1:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\Windows NT'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\Windows NT\Terminal Services'")
cli.powershell(r"Set-ItemProperty -path 'HKLM:\Software\Policies\Microsoft\Windows NT\Terminal Services' -name 'DisablePasswordSaving' -value 1 -Type DWord")
| [
"phnomcobra@gmail.com"
] | phnomcobra@gmail.com |
a33256408c609682eb43687b84b9c799e96b48e9 | 7c44b3e06bd9d212b81e1d237c2bf945940b8893 | /numpy_pandas_matplotlib/matplotlib_and_seaborn_part_1/bar_chart_practice.py | c76603b8b5de4ae08b6b6ee2a954a9fd4a89edfe | [] | no_license | sivaneshl/ai_programming_with_python | e89f9faf566b01b844fe83329dd3e54257141397 | 75801197fcc1ebbb827cc9c8cf7c8ab9e373e1e2 | refs/heads/master | 2022-04-11T07:58:50.148433 | 2020-04-05T05:22:16 | 2020-04-05T05:22:16 | 248,581,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,890 | py | # prerequisite package imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
pokemon = pd.read_csv('pokemon.csv')
print(pokemon.head())
# Task 1: There have been quite a few Pokémon introduced over the series' history. How many were introduced in each
# generation? Create a bar chart of these frequencies using the 'generation_id' column.
base_color = sb.color_palette()[0]
# n_pkmn_gen = pokemon.groupby(['generation_id'])['id'].agg(count=np.size)
# sb.barplot(n_pkmn_gen.index.values, n_pkmn_gen['count'], color=base_color)
sb.countplot(data=pokemon[['generation_id','id']], x='generation_id', color=base_color)
plt.show()
# Task 2: Each Pokémon species has one or two 'types' that play a part in its offensive and defensive capabilities.
# How frequent is each type? The code below creates a new dataframe that puts all of the type counts in a single column.
pkmn_types = pokemon.melt(id_vars=['id','species'],
value_vars=['type_1', 'type_2'],
var_name='type_level',
value_name='type').dropna()
# pkmn_types.head()
# Your task is to use this dataframe to create a relative frequency plot of the proportion of Pokémon with each type,
# sorted from most frequent to least. Hint: The sum across bars should be greater than 100%, since many Pokémon have
# two types. Keep this in mind when considering a denominator to compute relative frequencies.
type_counts = pkmn_types['type'].value_counts()
type_order = type_counts.index
n_pokemon = pokemon.shape[0]
max_type_count = type_counts[0]
max_prop = max_type_count / n_pokemon
ticks_prop = np.arange(0, max_prop, 0.02)
tick_names = ['{:0.2f}'.format(x) for x in ticks_prop]
sb.countplot(data=pkmn_types, y='type', color=base_color, order=type_order)
plt.xticks(ticks_prop * n_pokemon, tick_names)
plt.show() | [
"sivaneshl@virtusa.com"
] | sivaneshl@virtusa.com |
d9e20c42cd6f1cd9777d48d2c582d65d0e74ca28 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_206/1009.py | d384122c55980468d170b2657602724644c3e86d | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | t=input()
m=1
while m<=t:
n,d=map(int,raw_input().split())
ti=[]
while d:
d-=1
a,v=map(int,raw_input().split())
ti.append(float(n-a)/float(v))
ti=max(ti)
print "Case #"+str(m)+": {0:.6f}".format(round(float(n)/float(ti),6))
m+=1 | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
5b370a69edfe4fc04c6aecace8d9746361dbb566 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/loadtesting/azure-mgmt-loadtesting/azure/mgmt/loadtesting/operations/__init__.py | 402d17a0616ec39fabd982a5382b8a74612463d5 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 917 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._operations import Operations
from ._quotas_operations import QuotasOperations
from ._load_tests_operations import LoadTestsOperations
from ._patch import __all__ as _patch_all
from ._patch import * # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"Operations",
"QuotasOperations",
"LoadTestsOperations",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
2487c049fae1f807d744beb02551e7d28cde2723 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02723/s554502060.py | c72f25fef3a45b0bd9520c9936493221c77df6e5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | def main():
k = input()
print("Yes" if k[2] == k[3] and k[4] == k[5] else "No")
if __name__ == '__main__':
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
38ff0aebf3aeaaff4c9604670e7270beb6611cde | 94ca5a956889a1263bb58fc9b8624455503783cd | /page/storagebox/personinfo.py | 686ff5550dfec556681b1a366cf3509647dcb18c | [] | no_license | xmaimiao/wmAPP | 50bfbd5c50982cae0723fa3ce3f3f8c59314403b | b427f0afaccde0d939b275f9f48724d404257f1e | refs/heads/master | 2023-03-27T05:16:31.920995 | 2021-03-29T07:31:11 | 2021-03-29T07:31:11 | 352,554,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,285 | py | from appium.webdriver.common.mobileby import MobileBy
from page.basepage import BasePage
from page.storagebox.editpage import EditPage
class PersonInfo(BasePage):
edit_ele = (MobileBy.XPATH,'//button[@class="van-button van-button--primary van-button--large van-button--plain van-button--block"]')
def goto_editpage(self):
# print(f"在個人信息頁面打印url1:{self.driver.current_url}")
# print(f"在個人信息頁面打印上下文1:{self.driver.current_context}")
# print(f"在個人信息頁面打印上下文2:{self.driver.current_context}")
# print(f"在個人信息頁面打印窗口句柄:{self.driver.window_handles}")
# print(f"在個人信息頁面打印當前窗口句柄1:{self.driver.current_window_handle}")
self.driver.switch_to_window(self.driver.window_handles[-1])
# print(f"在個人信息頁面打印窗口句柄:{self.driver.window_handles}")
# print(f"在個人信息頁面打印當前窗口句柄2:{self.driver.current_window_handle}")
# print(f"在個人信息頁面打印上下文3:{self.driver.current_context}")
# print(f"在個人信息頁面打印url2:{self.driver.current_url}")
self.find_and_click(self.edit_ele)
return EditPage(self.driver) | [
"765120214@qq.com"
] | 765120214@qq.com |
27a9b0abcecd91ef70deff9da8a75cd44fa09432 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/web/web_app_slot.py | c8e01b97cda8ccdfa62d6c3523f44080e0913232 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57,034 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['WebAppSlotArgs', 'WebAppSlot']
@pulumi.input_type
class WebAppSlotArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
client_affinity_enabled: Optional[pulumi.Input[bool]] = None,
client_cert_enabled: Optional[pulumi.Input[bool]] = None,
client_cert_exclusion_paths: Optional[pulumi.Input[str]] = None,
client_cert_mode: Optional[pulumi.Input['ClientCertMode']] = None,
cloning_info: Optional[pulumi.Input['CloningInfoArgs']] = None,
container_size: Optional[pulumi.Input[int]] = None,
custom_domain_verification_id: Optional[pulumi.Input[str]] = None,
daily_memory_time_quota: Optional[pulumi.Input[int]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
host_name_ssl_states: Optional[pulumi.Input[Sequence[pulumi.Input['HostNameSslStateArgs']]]] = None,
host_names_disabled: Optional[pulumi.Input[bool]] = None,
hosting_environment_profile: Optional[pulumi.Input['HostingEnvironmentProfileArgs']] = None,
https_only: Optional[pulumi.Input[bool]] = None,
hyper_v: Optional[pulumi.Input[bool]] = None,
identity: Optional[pulumi.Input['ManagedServiceIdentityArgs']] = None,
is_xenon: Optional[pulumi.Input[bool]] = None,
key_vault_reference_identity: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
redundancy_mode: Optional[pulumi.Input['RedundancyMode']] = None,
reserved: Optional[pulumi.Input[bool]] = None,
scm_site_also_stopped: Optional[pulumi.Input[bool]] = None,
server_farm_id: Optional[pulumi.Input[str]] = None,
site_config: Optional[pulumi.Input['SiteConfigArgs']] = None,
slot: Optional[pulumi.Input[str]] = None,
storage_account_required: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_network_subnet_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a WebAppSlot resource.
:param pulumi.Input[str] name: Unique name of the app to create or update. To create or update a deployment slot, use the {slot} parameter.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[bool] client_affinity_enabled: <code>true</code> to enable client affinity; <code>false</code> to stop sending session affinity cookies, which route client requests in the same session to the same instance. Default is <code>true</code>.
:param pulumi.Input[bool] client_cert_enabled: <code>true</code> to enable client certificate authentication (TLS mutual authentication); otherwise, <code>false</code>. Default is <code>false</code>.
:param pulumi.Input[str] client_cert_exclusion_paths: client certificate authentication comma-separated exclusion paths
:param pulumi.Input['ClientCertMode'] client_cert_mode: This composes with ClientCertEnabled setting.
- ClientCertEnabled: false means ClientCert is ignored.
- ClientCertEnabled: true and ClientCertMode: Required means ClientCert is required.
- ClientCertEnabled: true and ClientCertMode: Optional means ClientCert is optional or accepted.
:param pulumi.Input['CloningInfoArgs'] cloning_info: If specified during app creation, the app is cloned from a source app.
:param pulumi.Input[int] container_size: Size of the function container.
:param pulumi.Input[str] custom_domain_verification_id: Unique identifier that verifies the custom domains assigned to the app. Customer will add this id to a txt record for verification.
:param pulumi.Input[int] daily_memory_time_quota: Maximum allowed daily memory-time quota (applicable on dynamic apps only).
:param pulumi.Input[bool] enabled: <code>true</code> if the app is enabled; otherwise, <code>false</code>. Setting this value to false disables the app (takes the app offline).
:param pulumi.Input[Sequence[pulumi.Input['HostNameSslStateArgs']]] host_name_ssl_states: Hostname SSL states are used to manage the SSL bindings for app's hostnames.
:param pulumi.Input[bool] host_names_disabled: <code>true</code> to disable the public hostnames of the app; otherwise, <code>false</code>.
If <code>true</code>, the app is only accessible via API management process.
:param pulumi.Input['HostingEnvironmentProfileArgs'] hosting_environment_profile: App Service Environment to use for the app.
:param pulumi.Input[bool] https_only: HttpsOnly: configures a web site to accept only https requests. Issues redirect for
http requests
:param pulumi.Input[bool] hyper_v: Hyper-V sandbox.
:param pulumi.Input['ManagedServiceIdentityArgs'] identity: Managed service identity.
:param pulumi.Input[bool] is_xenon: Obsolete: Hyper-V sandbox.
:param pulumi.Input[str] key_vault_reference_identity: Identity to use for Key Vault Reference authentication.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] location: Resource Location.
:param pulumi.Input['RedundancyMode'] redundancy_mode: Site redundancy mode
:param pulumi.Input[bool] reserved: <code>true</code> if reserved; otherwise, <code>false</code>.
:param pulumi.Input[bool] scm_site_also_stopped: <code>true</code> to stop SCM (KUDU) site when the app is stopped; otherwise, <code>false</code>. The default is <code>false</code>.
:param pulumi.Input[str] server_farm_id: Resource ID of the associated App Service plan, formatted as: "/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
:param pulumi.Input['SiteConfigArgs'] site_config: Configuration of the app.
:param pulumi.Input[str] slot: Name of the deployment slot to create or update. The name 'production' is reserved.
:param pulumi.Input[bool] storage_account_required: Checks if Customer provided storage account is required
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] virtual_network_subnet_id: Azure Resource Manager ID of the Virtual network and subnet to be joined by Regional VNET Integration.
This must be of the form /subscriptions/{subscriptionName}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if client_affinity_enabled is not None:
pulumi.set(__self__, "client_affinity_enabled", client_affinity_enabled)
if client_cert_enabled is not None:
pulumi.set(__self__, "client_cert_enabled", client_cert_enabled)
if client_cert_exclusion_paths is not None:
pulumi.set(__self__, "client_cert_exclusion_paths", client_cert_exclusion_paths)
if client_cert_mode is not None:
pulumi.set(__self__, "client_cert_mode", client_cert_mode)
if cloning_info is not None:
pulumi.set(__self__, "cloning_info", cloning_info)
if container_size is not None:
pulumi.set(__self__, "container_size", container_size)
if custom_domain_verification_id is not None:
pulumi.set(__self__, "custom_domain_verification_id", custom_domain_verification_id)
if daily_memory_time_quota is not None:
pulumi.set(__self__, "daily_memory_time_quota", daily_memory_time_quota)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if host_name_ssl_states is not None:
pulumi.set(__self__, "host_name_ssl_states", host_name_ssl_states)
if host_names_disabled is not None:
pulumi.set(__self__, "host_names_disabled", host_names_disabled)
if hosting_environment_profile is not None:
pulumi.set(__self__, "hosting_environment_profile", hosting_environment_profile)
if https_only is not None:
pulumi.set(__self__, "https_only", https_only)
if hyper_v is None:
hyper_v = False
if hyper_v is not None:
pulumi.set(__self__, "hyper_v", hyper_v)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if is_xenon is None:
is_xenon = False
if is_xenon is not None:
pulumi.set(__self__, "is_xenon", is_xenon)
if key_vault_reference_identity is not None:
pulumi.set(__self__, "key_vault_reference_identity", key_vault_reference_identity)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if location is not None:
pulumi.set(__self__, "location", location)
if redundancy_mode is not None:
pulumi.set(__self__, "redundancy_mode", redundancy_mode)
if reserved is None:
reserved = False
if reserved is not None:
pulumi.set(__self__, "reserved", reserved)
if scm_site_also_stopped is None:
scm_site_also_stopped = False
if scm_site_also_stopped is not None:
pulumi.set(__self__, "scm_site_also_stopped", scm_site_also_stopped)
if server_farm_id is not None:
pulumi.set(__self__, "server_farm_id", server_farm_id)
if site_config is not None:
pulumi.set(__self__, "site_config", site_config)
if slot is not None:
pulumi.set(__self__, "slot", slot)
if storage_account_required is not None:
pulumi.set(__self__, "storage_account_required", storage_account_required)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if virtual_network_subnet_id is not None:
pulumi.set(__self__, "virtual_network_subnet_id", virtual_network_subnet_id)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Unique name of the app to create or update. To create or update a deployment slot, use the {slot} parameter.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group to which the resource belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="clientAffinityEnabled")
def client_affinity_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
<code>true</code> to enable client affinity; <code>false</code> to stop sending session affinity cookies, which route client requests in the same session to the same instance. Default is <code>true</code>.
"""
return pulumi.get(self, "client_affinity_enabled")
@client_affinity_enabled.setter
def client_affinity_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "client_affinity_enabled", value)
@property
@pulumi.getter(name="clientCertEnabled")
def client_cert_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
<code>true</code> to enable client certificate authentication (TLS mutual authentication); otherwise, <code>false</code>. Default is <code>false</code>.
"""
return pulumi.get(self, "client_cert_enabled")
@client_cert_enabled.setter
def client_cert_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "client_cert_enabled", value)
@property
@pulumi.getter(name="clientCertExclusionPaths")
def client_cert_exclusion_paths(self) -> Optional[pulumi.Input[str]]:
"""
client certificate authentication comma-separated exclusion paths
"""
return pulumi.get(self, "client_cert_exclusion_paths")
@client_cert_exclusion_paths.setter
def client_cert_exclusion_paths(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_cert_exclusion_paths", value)
@property
@pulumi.getter(name="clientCertMode")
def client_cert_mode(self) -> Optional[pulumi.Input['ClientCertMode']]:
"""
This composes with ClientCertEnabled setting.
- ClientCertEnabled: false means ClientCert is ignored.
- ClientCertEnabled: true and ClientCertMode: Required means ClientCert is required.
- ClientCertEnabled: true and ClientCertMode: Optional means ClientCert is optional or accepted.
"""
return pulumi.get(self, "client_cert_mode")
@client_cert_mode.setter
def client_cert_mode(self, value: Optional[pulumi.Input['ClientCertMode']]):
pulumi.set(self, "client_cert_mode", value)
@property
@pulumi.getter(name="cloningInfo")
def cloning_info(self) -> Optional[pulumi.Input['CloningInfoArgs']]:
"""
If specified during app creation, the app is cloned from a source app.
"""
return pulumi.get(self, "cloning_info")
@cloning_info.setter
def cloning_info(self, value: Optional[pulumi.Input['CloningInfoArgs']]):
pulumi.set(self, "cloning_info", value)
@property
@pulumi.getter(name="containerSize")
def container_size(self) -> Optional[pulumi.Input[int]]:
"""
Size of the function container.
"""
return pulumi.get(self, "container_size")
@container_size.setter
def container_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "container_size", value)
@property
@pulumi.getter(name="customDomainVerificationId")
def custom_domain_verification_id(self) -> Optional[pulumi.Input[str]]:
"""
Unique identifier that verifies the custom domains assigned to the app. Customer will add this id to a txt record for verification.
"""
return pulumi.get(self, "custom_domain_verification_id")
@custom_domain_verification_id.setter
def custom_domain_verification_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_domain_verification_id", value)
@property
@pulumi.getter(name="dailyMemoryTimeQuota")
def daily_memory_time_quota(self) -> Optional[pulumi.Input[int]]:
"""
Maximum allowed daily memory-time quota (applicable on dynamic apps only).
"""
return pulumi.get(self, "daily_memory_time_quota")
@daily_memory_time_quota.setter
def daily_memory_time_quota(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "daily_memory_time_quota", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
<code>true</code> if the app is enabled; otherwise, <code>false</code>. Setting this value to false disables the app (takes the app offline).
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="hostNameSslStates")
def host_name_ssl_states(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HostNameSslStateArgs']]]]:
"""
Hostname SSL states are used to manage the SSL bindings for app's hostnames.
"""
return pulumi.get(self, "host_name_ssl_states")
@host_name_ssl_states.setter
def host_name_ssl_states(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HostNameSslStateArgs']]]]):
pulumi.set(self, "host_name_ssl_states", value)
@property
@pulumi.getter(name="hostNamesDisabled")
def host_names_disabled(self) -> Optional[pulumi.Input[bool]]:
"""
<code>true</code> to disable the public hostnames of the app; otherwise, <code>false</code>.
If <code>true</code>, the app is only accessible via API management process.
"""
return pulumi.get(self, "host_names_disabled")
@host_names_disabled.setter
def host_names_disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "host_names_disabled", value)
@property
@pulumi.getter(name="hostingEnvironmentProfile")
def hosting_environment_profile(self) -> Optional[pulumi.Input['HostingEnvironmentProfileArgs']]:
"""
App Service Environment to use for the app.
"""
return pulumi.get(self, "hosting_environment_profile")
@hosting_environment_profile.setter
def hosting_environment_profile(self, value: Optional[pulumi.Input['HostingEnvironmentProfileArgs']]):
pulumi.set(self, "hosting_environment_profile", value)
@property
@pulumi.getter(name="httpsOnly")
def https_only(self) -> Optional[pulumi.Input[bool]]:
"""
HttpsOnly: configures a web site to accept only https requests. Issues redirect for
http requests
"""
return pulumi.get(self, "https_only")
@https_only.setter
def https_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "https_only", value)
@property
@pulumi.getter(name="hyperV")
def hyper_v(self) -> Optional[pulumi.Input[bool]]:
"""
Hyper-V sandbox.
"""
return pulumi.get(self, "hyper_v")
@hyper_v.setter
def hyper_v(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "hyper_v", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ManagedServiceIdentityArgs']]:
"""
Managed service identity.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ManagedServiceIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter(name="isXenon")
def is_xenon(self) -> Optional[pulumi.Input[bool]]:
"""
Obsolete: Hyper-V sandbox.
"""
return pulumi.get(self, "is_xenon")
@is_xenon.setter
def is_xenon(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_xenon", value)
@property
@pulumi.getter(name="keyVaultReferenceIdentity")
def key_vault_reference_identity(self) -> Optional[pulumi.Input[str]]:
"""
Identity to use for Key Vault Reference authentication.
"""
return pulumi.get(self, "key_vault_reference_identity")
@key_vault_reference_identity.setter
def key_vault_reference_identity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_vault_reference_identity", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource Location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="redundancyMode")
def redundancy_mode(self) -> Optional[pulumi.Input['RedundancyMode']]:
"""
Site redundancy mode
"""
return pulumi.get(self, "redundancy_mode")
@redundancy_mode.setter
def redundancy_mode(self, value: Optional[pulumi.Input['RedundancyMode']]):
pulumi.set(self, "redundancy_mode", value)
@property
@pulumi.getter
def reserved(self) -> Optional[pulumi.Input[bool]]:
"""
<code>true</code> if reserved; otherwise, <code>false</code>.
"""
return pulumi.get(self, "reserved")
@reserved.setter
def reserved(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "reserved", value)
@property
@pulumi.getter(name="scmSiteAlsoStopped")
def scm_site_also_stopped(self) -> Optional[pulumi.Input[bool]]:
"""
<code>true</code> to stop SCM (KUDU) site when the app is stopped; otherwise, <code>false</code>. The default is <code>false</code>.
"""
return pulumi.get(self, "scm_site_also_stopped")
@scm_site_also_stopped.setter
def scm_site_also_stopped(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "scm_site_also_stopped", value)
@property
@pulumi.getter(name="serverFarmId")
def server_farm_id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID of the associated App Service plan, formatted as: "/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
"""
return pulumi.get(self, "server_farm_id")
@server_farm_id.setter
def server_farm_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_farm_id", value)
@property
@pulumi.getter(name="siteConfig")
def site_config(self) -> Optional[pulumi.Input['SiteConfigArgs']]:
"""
Configuration of the app.
"""
return pulumi.get(self, "site_config")
@site_config.setter
def site_config(self, value: Optional[pulumi.Input['SiteConfigArgs']]):
pulumi.set(self, "site_config", value)
@property
@pulumi.getter
def slot(self) -> Optional[pulumi.Input[str]]:
"""
Name of the deployment slot to create or update. The name 'production' is reserved.
"""
return pulumi.get(self, "slot")
@slot.setter
def slot(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "slot", value)
@property
@pulumi.getter(name="storageAccountRequired")
def storage_account_required(self) -> Optional[pulumi.Input[bool]]:
"""
Checks if Customer provided storage account is required
"""
return pulumi.get(self, "storage_account_required")
@storage_account_required.setter
def storage_account_required(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "storage_account_required", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="virtualNetworkSubnetId")
def virtual_network_subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
Azure Resource Manager ID of the Virtual network and subnet to be joined by Regional VNET Integration.
This must be of the form /subscriptions/{subscriptionName}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}
"""
return pulumi.get(self, "virtual_network_subnet_id")
@virtual_network_subnet_id.setter
def virtual_network_subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_subnet_id", value)
class WebAppSlot(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_affinity_enabled: Optional[pulumi.Input[bool]] = None,
client_cert_enabled: Optional[pulumi.Input[bool]] = None,
client_cert_exclusion_paths: Optional[pulumi.Input[str]] = None,
client_cert_mode: Optional[pulumi.Input['ClientCertMode']] = None,
cloning_info: Optional[pulumi.Input[pulumi.InputType['CloningInfoArgs']]] = None,
container_size: Optional[pulumi.Input[int]] = None,
custom_domain_verification_id: Optional[pulumi.Input[str]] = None,
daily_memory_time_quota: Optional[pulumi.Input[int]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
host_name_ssl_states: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['HostNameSslStateArgs']]]]] = None,
host_names_disabled: Optional[pulumi.Input[bool]] = None,
hosting_environment_profile: Optional[pulumi.Input[pulumi.InputType['HostingEnvironmentProfileArgs']]] = None,
https_only: Optional[pulumi.Input[bool]] = None,
hyper_v: Optional[pulumi.Input[bool]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
is_xenon: Optional[pulumi.Input[bool]] = None,
key_vault_reference_identity: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
redundancy_mode: Optional[pulumi.Input['RedundancyMode']] = None,
reserved: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scm_site_also_stopped: Optional[pulumi.Input[bool]] = None,
server_farm_id: Optional[pulumi.Input[str]] = None,
site_config: Optional[pulumi.Input[pulumi.InputType['SiteConfigArgs']]] = None,
slot: Optional[pulumi.Input[str]] = None,
storage_account_required: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_network_subnet_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A web app, a mobile app backend, or an API app.
API Version: 2020-12-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] client_affinity_enabled: <code>true</code> to enable client affinity; <code>false</code> to stop sending session affinity cookies, which route client requests in the same session to the same instance. Default is <code>true</code>.
:param pulumi.Input[bool] client_cert_enabled: <code>true</code> to enable client certificate authentication (TLS mutual authentication); otherwise, <code>false</code>. Default is <code>false</code>.
:param pulumi.Input[str] client_cert_exclusion_paths: client certificate authentication comma-separated exclusion paths
:param pulumi.Input['ClientCertMode'] client_cert_mode: This composes with ClientCertEnabled setting.
- ClientCertEnabled: false means ClientCert is ignored.
- ClientCertEnabled: true and ClientCertMode: Required means ClientCert is required.
- ClientCertEnabled: true and ClientCertMode: Optional means ClientCert is optional or accepted.
:param pulumi.Input[pulumi.InputType['CloningInfoArgs']] cloning_info: If specified during app creation, the app is cloned from a source app.
:param pulumi.Input[int] container_size: Size of the function container.
:param pulumi.Input[str] custom_domain_verification_id: Unique identifier that verifies the custom domains assigned to the app. Customer will add this id to a txt record for verification.
:param pulumi.Input[int] daily_memory_time_quota: Maximum allowed daily memory-time quota (applicable on dynamic apps only).
:param pulumi.Input[bool] enabled: <code>true</code> if the app is enabled; otherwise, <code>false</code>. Setting this value to false disables the app (takes the app offline).
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['HostNameSslStateArgs']]]] host_name_ssl_states: Hostname SSL states are used to manage the SSL bindings for app's hostnames.
:param pulumi.Input[bool] host_names_disabled: <code>true</code> to disable the public hostnames of the app; otherwise, <code>false</code>.
If <code>true</code>, the app is only accessible via API management process.
:param pulumi.Input[pulumi.InputType['HostingEnvironmentProfileArgs']] hosting_environment_profile: App Service Environment to use for the app.
:param pulumi.Input[bool] https_only: HttpsOnly: configures a web site to accept only https requests. Issues redirect for
http requests
:param pulumi.Input[bool] hyper_v: Hyper-V sandbox.
:param pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']] identity: Managed service identity.
:param pulumi.Input[bool] is_xenon: Obsolete: Hyper-V sandbox.
:param pulumi.Input[str] key_vault_reference_identity: Identity to use for Key Vault Reference authentication.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] location: Resource Location.
:param pulumi.Input[str] name: Unique name of the app to create or update. To create or update a deployment slot, use the {slot} parameter.
:param pulumi.Input['RedundancyMode'] redundancy_mode: Site redundancy mode
:param pulumi.Input[bool] reserved: <code>true</code> if reserved; otherwise, <code>false</code>.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[bool] scm_site_also_stopped: <code>true</code> to stop SCM (KUDU) site when the app is stopped; otherwise, <code>false</code>. The default is <code>false</code>.
:param pulumi.Input[str] server_farm_id: Resource ID of the associated App Service plan, formatted as: "/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
:param pulumi.Input[pulumi.InputType['SiteConfigArgs']] site_config: Configuration of the app.
:param pulumi.Input[str] slot: Name of the deployment slot to create or update. The name 'production' is reserved.
:param pulumi.Input[bool] storage_account_required: Checks if Customer provided storage account is required
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] virtual_network_subnet_id: Azure Resource Manager ID of the Virtual network and subnet to be joined by Regional VNET Integration.
This must be of the form /subscriptions/{subscriptionName}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WebAppSlotArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A web app, a mobile app backend, or an API app.
API Version: 2020-12-01.
:param str resource_name: The name of the resource.
:param WebAppSlotArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WebAppSlotArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_affinity_enabled: Optional[pulumi.Input[bool]] = None,
client_cert_enabled: Optional[pulumi.Input[bool]] = None,
client_cert_exclusion_paths: Optional[pulumi.Input[str]] = None,
client_cert_mode: Optional[pulumi.Input['ClientCertMode']] = None,
cloning_info: Optional[pulumi.Input[pulumi.InputType['CloningInfoArgs']]] = None,
container_size: Optional[pulumi.Input[int]] = None,
custom_domain_verification_id: Optional[pulumi.Input[str]] = None,
daily_memory_time_quota: Optional[pulumi.Input[int]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
host_name_ssl_states: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['HostNameSslStateArgs']]]]] = None,
host_names_disabled: Optional[pulumi.Input[bool]] = None,
hosting_environment_profile: Optional[pulumi.Input[pulumi.InputType['HostingEnvironmentProfileArgs']]] = None,
https_only: Optional[pulumi.Input[bool]] = None,
hyper_v: Optional[pulumi.Input[bool]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
is_xenon: Optional[pulumi.Input[bool]] = None,
key_vault_reference_identity: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
redundancy_mode: Optional[pulumi.Input['RedundancyMode']] = None,
reserved: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scm_site_also_stopped: Optional[pulumi.Input[bool]] = None,
server_farm_id: Optional[pulumi.Input[str]] = None,
site_config: Optional[pulumi.Input[pulumi.InputType['SiteConfigArgs']]] = None,
slot: Optional[pulumi.Input[str]] = None,
storage_account_required: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_network_subnet_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WebAppSlotArgs.__new__(WebAppSlotArgs)
__props__.__dict__["client_affinity_enabled"] = client_affinity_enabled
__props__.__dict__["client_cert_enabled"] = client_cert_enabled
__props__.__dict__["client_cert_exclusion_paths"] = client_cert_exclusion_paths
__props__.__dict__["client_cert_mode"] = client_cert_mode
__props__.__dict__["cloning_info"] = cloning_info
__props__.__dict__["container_size"] = container_size
__props__.__dict__["custom_domain_verification_id"] = custom_domain_verification_id
__props__.__dict__["daily_memory_time_quota"] = daily_memory_time_quota
__props__.__dict__["enabled"] = enabled
__props__.__dict__["host_name_ssl_states"] = host_name_ssl_states
__props__.__dict__["host_names_disabled"] = host_names_disabled
__props__.__dict__["hosting_environment_profile"] = hosting_environment_profile
__props__.__dict__["https_only"] = https_only
if hyper_v is None:
hyper_v = False
__props__.__dict__["hyper_v"] = hyper_v
__props__.__dict__["identity"] = identity
if is_xenon is None:
is_xenon = False
__props__.__dict__["is_xenon"] = is_xenon
__props__.__dict__["key_vault_reference_identity"] = key_vault_reference_identity
__props__.__dict__["kind"] = kind
__props__.__dict__["location"] = location
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
__props__.__dict__["redundancy_mode"] = redundancy_mode
if reserved is None:
reserved = False
__props__.__dict__["reserved"] = reserved
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if scm_site_also_stopped is None:
scm_site_also_stopped = False
__props__.__dict__["scm_site_also_stopped"] = scm_site_also_stopped
__props__.__dict__["server_farm_id"] = server_farm_id
__props__.__dict__["site_config"] = site_config
__props__.__dict__["slot"] = slot
__props__.__dict__["storage_account_required"] = storage_account_required
__props__.__dict__["tags"] = tags
__props__.__dict__["virtual_network_subnet_id"] = virtual_network_subnet_id
__props__.__dict__["availability_state"] = None
__props__.__dict__["default_host_name"] = None
__props__.__dict__["enabled_host_names"] = None
__props__.__dict__["host_names"] = None
__props__.__dict__["in_progress_operation_id"] = None
__props__.__dict__["is_default_container"] = None
__props__.__dict__["last_modified_time_utc"] = None
__props__.__dict__["max_number_of_workers"] = None
__props__.__dict__["outbound_ip_addresses"] = None
__props__.__dict__["possible_outbound_ip_addresses"] = None
__props__.__dict__["repository_site_name"] = None
__props__.__dict__["resource_group"] = None
__props__.__dict__["slot_swap_status"] = None
__props__.__dict__["state"] = None
__props__.__dict__["suspended_till"] = None
__props__.__dict__["target_swap_slot"] = None
__props__.__dict__["traffic_manager_host_names"] = None
__props__.__dict__["type"] = None
__props__.__dict__["usage_state"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:web/v20150801:WebAppSlot"), pulumi.Alias(type_="azure-native:web/v20160801:WebAppSlot"), pulumi.Alias(type_="azure-native:web/v20180201:WebAppSlot"), pulumi.Alias(type_="azure-native:web/v20181101:WebAppSlot"), pulumi.Alias(type_="azure-native:web/v20190801:WebAppSlot"), pulumi.Alias(type_="azure-native:web/v20200601:WebAppSlot"), pulumi.Alias(type_="azure-native:web/v20200901:WebAppSlot"), pulumi.Alias(type_="azure-native:web/v20201001:WebAppSlot"), pulumi.Alias(type_="azure-native:web/v20201201:WebAppSlot"), pulumi.Alias(type_="azure-native:web/v20210101:WebAppSlot"), pulumi.Alias(type_="azure-native:web/v20210115:WebAppSlot"), pulumi.Alias(type_="azure-native:web/v20210201:WebAppSlot")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppSlot, __self__).__init__(
'azure-native:web:WebAppSlot',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppSlot':
"""
Get an existing WebAppSlot resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = WebAppSlotArgs.__new__(WebAppSlotArgs)
__props__.__dict__["availability_state"] = None
__props__.__dict__["client_affinity_enabled"] = None
__props__.__dict__["client_cert_enabled"] = None
__props__.__dict__["client_cert_exclusion_paths"] = None
__props__.__dict__["client_cert_mode"] = None
__props__.__dict__["container_size"] = None
__props__.__dict__["custom_domain_verification_id"] = None
__props__.__dict__["daily_memory_time_quota"] = None
__props__.__dict__["default_host_name"] = None
__props__.__dict__["enabled"] = None
__props__.__dict__["enabled_host_names"] = None
__props__.__dict__["host_name_ssl_states"] = None
__props__.__dict__["host_names"] = None
__props__.__dict__["host_names_disabled"] = None
__props__.__dict__["hosting_environment_profile"] = None
__props__.__dict__["https_only"] = None
__props__.__dict__["hyper_v"] = None
__props__.__dict__["identity"] = None
__props__.__dict__["in_progress_operation_id"] = None
__props__.__dict__["is_default_container"] = None
__props__.__dict__["is_xenon"] = None
__props__.__dict__["key_vault_reference_identity"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["last_modified_time_utc"] = None
__props__.__dict__["location"] = None
__props__.__dict__["max_number_of_workers"] = None
__props__.__dict__["name"] = None
__props__.__dict__["outbound_ip_addresses"] = None
__props__.__dict__["possible_outbound_ip_addresses"] = None
__props__.__dict__["redundancy_mode"] = None
__props__.__dict__["repository_site_name"] = None
__props__.__dict__["reserved"] = None
__props__.__dict__["resource_group"] = None
__props__.__dict__["scm_site_also_stopped"] = None
__props__.__dict__["server_farm_id"] = None
__props__.__dict__["site_config"] = None
__props__.__dict__["slot_swap_status"] = None
__props__.__dict__["state"] = None
__props__.__dict__["storage_account_required"] = None
__props__.__dict__["suspended_till"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["target_swap_slot"] = None
__props__.__dict__["traffic_manager_host_names"] = None
__props__.__dict__["type"] = None
__props__.__dict__["usage_state"] = None
__props__.__dict__["virtual_network_subnet_id"] = None
return WebAppSlot(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="availabilityState")
def availability_state(self) -> pulumi.Output[str]:
"""
Management information availability state for the app.
"""
return pulumi.get(self, "availability_state")
@property
@pulumi.getter(name="clientAffinityEnabled")
def client_affinity_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
<code>true</code> to enable client affinity; <code>false</code> to stop sending session affinity cookies, which route client requests in the same session to the same instance. Default is <code>true</code>.
"""
return pulumi.get(self, "client_affinity_enabled")
@property
@pulumi.getter(name="clientCertEnabled")
def client_cert_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
<code>true</code> to enable client certificate authentication (TLS mutual authentication); otherwise, <code>false</code>. Default is <code>false</code>.
"""
return pulumi.get(self, "client_cert_enabled")
@property
@pulumi.getter(name="clientCertExclusionPaths")
def client_cert_exclusion_paths(self) -> pulumi.Output[Optional[str]]:
"""
client certificate authentication comma-separated exclusion paths
"""
return pulumi.get(self, "client_cert_exclusion_paths")
@property
@pulumi.getter(name="clientCertMode")
def client_cert_mode(self) -> pulumi.Output[Optional[str]]:
"""
This composes with ClientCertEnabled setting.
- ClientCertEnabled: false means ClientCert is ignored.
- ClientCertEnabled: true and ClientCertMode: Required means ClientCert is required.
- ClientCertEnabled: true and ClientCertMode: Optional means ClientCert is optional or accepted.
"""
return pulumi.get(self, "client_cert_mode")
@property
@pulumi.getter(name="containerSize")
def container_size(self) -> pulumi.Output[Optional[int]]:
"""
Size of the function container.
"""
return pulumi.get(self, "container_size")
@property
@pulumi.getter(name="customDomainVerificationId")
def custom_domain_verification_id(self) -> pulumi.Output[Optional[str]]:
"""
Unique identifier that verifies the custom domains assigned to the app. Customer will add this id to a txt record for verification.
"""
return pulumi.get(self, "custom_domain_verification_id")
@property
@pulumi.getter(name="dailyMemoryTimeQuota")
def daily_memory_time_quota(self) -> pulumi.Output[Optional[int]]:
"""
Maximum allowed daily memory-time quota (applicable on dynamic apps only).
"""
return pulumi.get(self, "daily_memory_time_quota")
@property
@pulumi.getter(name="defaultHostName")
def default_host_name(self) -> pulumi.Output[str]:
"""
Default hostname of the app. Read-only.
"""
return pulumi.get(self, "default_host_name")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
<code>true</code> if the app is enabled; otherwise, <code>false</code>. Setting this value to false disables the app (takes the app offline).
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="enabledHostNames")
def enabled_host_names(self) -> pulumi.Output[Sequence[str]]:
"""
Enabled hostnames for the app.Hostnames need to be assigned (see HostNames) AND enabled. Otherwise,
the app is not served on those hostnames.
"""
return pulumi.get(self, "enabled_host_names")
@property
@pulumi.getter(name="hostNameSslStates")
def host_name_ssl_states(self) -> pulumi.Output[Optional[Sequence['outputs.HostNameSslStateResponse']]]:
"""
Hostname SSL states are used to manage the SSL bindings for app's hostnames.
"""
return pulumi.get(self, "host_name_ssl_states")
@property
@pulumi.getter(name="hostNames")
def host_names(self) -> pulumi.Output[Sequence[str]]:
"""
Hostnames associated with the app.
"""
return pulumi.get(self, "host_names")
@property
@pulumi.getter(name="hostNamesDisabled")
def host_names_disabled(self) -> pulumi.Output[Optional[bool]]:
"""
<code>true</code> to disable the public hostnames of the app; otherwise, <code>false</code>.
If <code>true</code>, the app is only accessible via API management process.
"""
return pulumi.get(self, "host_names_disabled")
@property
@pulumi.getter(name="hostingEnvironmentProfile")
def hosting_environment_profile(self) -> pulumi.Output[Optional['outputs.HostingEnvironmentProfileResponse']]:
"""
App Service Environment to use for the app.
"""
return pulumi.get(self, "hosting_environment_profile")
@property
@pulumi.getter(name="httpsOnly")
def https_only(self) -> pulumi.Output[Optional[bool]]:
"""
HttpsOnly: configures a web site to accept only https requests. Issues redirect for
http requests
"""
return pulumi.get(self, "https_only")
@property
@pulumi.getter(name="hyperV")
def hyper_v(self) -> pulumi.Output[Optional[bool]]:
"""
Hyper-V sandbox.
"""
return pulumi.get(self, "hyper_v")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]:
"""
Managed service identity.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="inProgressOperationId")
def in_progress_operation_id(self) -> pulumi.Output[str]:
"""
Specifies an operation id if this site has a pending operation.
"""
return pulumi.get(self, "in_progress_operation_id")
@property
@pulumi.getter(name="isDefaultContainer")
def is_default_container(self) -> pulumi.Output[bool]:
"""
<code>true</code> if the app is a default container; otherwise, <code>false</code>.
"""
return pulumi.get(self, "is_default_container")
@property
@pulumi.getter(name="isXenon")
def is_xenon(self) -> pulumi.Output[Optional[bool]]:
"""
Obsolete: Hyper-V sandbox.
"""
return pulumi.get(self, "is_xenon")
@property
@pulumi.getter(name="keyVaultReferenceIdentity")
def key_vault_reference_identity(self) -> pulumi.Output[Optional[str]]:
"""
Identity to use for Key Vault Reference authentication.
"""
return pulumi.get(self, "key_vault_reference_identity")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="lastModifiedTimeUtc")
def last_modified_time_utc(self) -> pulumi.Output[str]:
"""
Last time the app was modified, in UTC. Read-only.
"""
return pulumi.get(self, "last_modified_time_utc")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource Location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maxNumberOfWorkers")
def max_number_of_workers(self) -> pulumi.Output[int]:
"""
Maximum number of workers.
This only applies to Functions container.
"""
return pulumi.get(self, "max_number_of_workers")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="outboundIpAddresses")
def outbound_ip_addresses(self) -> pulumi.Output[str]:
"""
List of IP addresses that the app uses for outbound connections (e.g. database access). Includes VIPs from tenants that site can be hosted with current settings. Read-only.
"""
return pulumi.get(self, "outbound_ip_addresses")
@property
@pulumi.getter(name="possibleOutboundIpAddresses")
def possible_outbound_ip_addresses(self) -> pulumi.Output[str]:
"""
List of IP addresses that the app uses for outbound connections (e.g. database access). Includes VIPs from all tenants except dataComponent. Read-only.
"""
return pulumi.get(self, "possible_outbound_ip_addresses")
@property
@pulumi.getter(name="redundancyMode")
def redundancy_mode(self) -> pulumi.Output[Optional[str]]:
"""
Site redundancy mode
"""
return pulumi.get(self, "redundancy_mode")
@property
@pulumi.getter(name="repositorySiteName")
def repository_site_name(self) -> pulumi.Output[str]:
"""
Name of the repository site.
"""
return pulumi.get(self, "repository_site_name")
@property
@pulumi.getter
def reserved(self) -> pulumi.Output[Optional[bool]]:
"""
<code>true</code> if reserved; otherwise, <code>false</code>.
"""
return pulumi.get(self, "reserved")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> pulumi.Output[str]:
"""
Name of the resource group the app belongs to. Read-only.
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="scmSiteAlsoStopped")
def scm_site_also_stopped(self) -> pulumi.Output[Optional[bool]]:
"""
<code>true</code> to stop SCM (KUDU) site when the app is stopped; otherwise, <code>false</code>. The default is <code>false</code>.
"""
return pulumi.get(self, "scm_site_also_stopped")
@property
@pulumi.getter(name="serverFarmId")
def server_farm_id(self) -> pulumi.Output[Optional[str]]:
"""
Resource ID of the associated App Service plan, formatted as: "/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
"""
return pulumi.get(self, "server_farm_id")
@property
@pulumi.getter(name="siteConfig")
def site_config(self) -> pulumi.Output[Optional['outputs.SiteConfigResponse']]:
"""
Configuration of the app.
"""
return pulumi.get(self, "site_config")
@property
@pulumi.getter(name="slotSwapStatus")
def slot_swap_status(self) -> pulumi.Output['outputs.SlotSwapStatusResponse']:
"""
Status of the last deployment slot swap operation.
"""
return pulumi.get(self, "slot_swap_status")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
Current state of the app.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="storageAccountRequired")
def storage_account_required(self) -> pulumi.Output[Optional[bool]]:
"""
Checks if Customer provided storage account is required
"""
return pulumi.get(self, "storage_account_required")
@property
@pulumi.getter(name="suspendedTill")
def suspended_till(self) -> pulumi.Output[str]:
"""
App suspended till in case memory-time quota is exceeded.
"""
return pulumi.get(self, "suspended_till")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="targetSwapSlot")
def target_swap_slot(self) -> pulumi.Output[str]:
"""
Specifies which deployment slot this app will swap into. Read-only.
"""
return pulumi.get(self, "target_swap_slot")
@property
@pulumi.getter(name="trafficManagerHostNames")
def traffic_manager_host_names(self) -> pulumi.Output[Sequence[str]]:
"""
Azure Traffic Manager hostnames associated with the app. Read-only.
"""
return pulumi.get(self, "traffic_manager_host_names")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="usageState")
def usage_state(self) -> pulumi.Output[str]:
"""
State indicating whether the app has exceeded its quota usage. Read-only.
"""
return pulumi.get(self, "usage_state")
@property
@pulumi.getter(name="virtualNetworkSubnetId")
def virtual_network_subnet_id(self) -> pulumi.Output[Optional[str]]:
"""
Azure Resource Manager ID of the Virtual network and subnet to be joined by Regional VNET Integration.
This must be of the form /subscriptions/{subscriptionName}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}
"""
return pulumi.get(self, "virtual_network_subnet_id")
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
fe735cb9f61a0d4d595382311f8f59fc71f50011 | 80d50ea48e10674b1b7d3f583a1c4b7d0b01200f | /src/datadog_api_client/v1/model/i_frame_widget_definition_type.py | 4b9a12abbd72dab5bebb2895c3a106438d65446c | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] | permissive | DataDog/datadog-api-client-python | 3e01fa630278ad0b5c7005f08b7f61d07aa87345 | 392de360e7de659ee25e4a6753706820ca7c6a92 | refs/heads/master | 2023-09-01T20:32:37.718187 | 2023-09-01T14:42:04 | 2023-09-01T14:42:04 | 193,793,657 | 82 | 36 | Apache-2.0 | 2023-09-14T18:22:39 | 2019-06-25T22:52:04 | Python | UTF-8 | Python | false | false | 857 | py | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import annotations
from datadog_api_client.model_utils import (
ModelSimple,
cached_property,
)
from typing import ClassVar
class IFrameWidgetDefinitionType(ModelSimple):
"""
Type of the iframe widget.
:param value: If omitted defaults to "iframe". Must be one of ["iframe"].
:type value: str
"""
allowed_values = {
"iframe",
}
IFRAME: ClassVar["IFrameWidgetDefinitionType"]
@cached_property
def openapi_types(_):
return {
"value": (str,),
}
IFrameWidgetDefinitionType.IFRAME = IFrameWidgetDefinitionType("iframe")
| [
"noreply@github.com"
] | DataDog.noreply@github.com |
d22ec5d91e8e2f8415c562b398f5064d16e44272 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02380/s032648197.py | e45329928543031c03cf6ec6f6bb061bf46b86d1 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | import math
a, b, C = map(float, input().split())
S = (a * b * math.sin(math.radians(C))) / 2
L = a + b + (math.sqrt(a**2 + b**2 - 2*a*b*math.cos(math.radians(C))))
h = b * math.sin(math.radians(C))
print("{:.8f}".format(S))
print("{:.8f}".format(L))
print("{:.8f}".format(h))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
3e2f28d4cfd271faae64dc9e2da5164da032ab8d | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/0/bib.py | 6779e0a1defb70e03b19560226f2a5e023953e6b | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'biB':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
9d448fa4788fc7db4f9ad0d3f889f2f0add779c3 | a9937139b1af85180cea706a52d447abce2430f4 | /a/cwiczenie_4-1/BMI.py | e4e109f6ff184f3c7a759b39f3ad213864c556ca | [] | no_license | MirekPz/Altkom | 8f16014d43adb10e87804ae2b5d23151924cb226 | 0a49e75e681593b41d07cbff63dea0723a11756b | refs/heads/master | 2020-09-29T00:12:37.990353 | 2019-12-13T15:52:30 | 2019-12-13T15:52:30 | 226,899,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | wzrost = float(input("Podaj wzrost [m]; "))
waga = float(input("Podaj wagę [kg]; "))
BMI = waga/wzrost**2
print(round(BMI, 2))
if BMI < 16:
print("Wygłodzenie")
elif BMI <=16.99:
print("Wychudzenie")
elif BMI <= 18.49:
print("Niedowaga")
elif BMI <= 24.99:
print("Wartość prawidłowa:")
elif BMI <= 29.99:
print("Nadwaga")
elif BMI <= 34.99:
print("I stopień otyłości")
elif BMI <= 39.99:
print("II stopień otyłości (otyłość kliniczna)")
else:
print("III stopień otyłości (otyłość skrajna)")
print(f"Przedział prawidłowej wagi dla określonego wzrostu: [{round(18.49 * wzrost**2, 1)} , {round(24.99 * wzrost**2, 1)}]")
| [
"mirek@info-tur.pl"
] | mirek@info-tur.pl |
388d322e7321b6221b4e638c5421221acdb06151 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startPyquil2978.py | 9e6b9ff316ab2c467b06cfb9f702676e859600a3 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,959 | py | # qubit number=4
# total number=46
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += CNOT(0,3) # number=10
prog += H(3) # number=40
prog += CZ(0,3) # number=41
prog += H(3) # number=42
prog += CNOT(0,3) # number=33
prog += X(3) # number=34
prog += CNOT(0,3) # number=35
prog += CNOT(0,3) # number=25
prog += CNOT(0,3) # number=12
prog += H(2) # number=30
prog += CZ(0,2) # number=31
prog += H(2) # number=32
prog += CNOT(0,2) # number=43
prog += X(2) # number=44
prog += CNOT(0,2) # number=45
prog += H(2) # number=36
prog += CZ(0,2) # number=37
prog += H(2) # number=38
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += H(0) # number=5
prog += H(3) # number=16
prog += CZ(1,3) # number=17
prog += H(3) # number=18
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=9
prog += H(2) # number=39
prog += H(0) # number=26
prog += CZ(3,0) # number=27
prog += H(0) # number=28
prog += CNOT(3,0) # number=14
prog += Y(2) # number=29
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil2978.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
218eff2b76cfe7f74adbaddd19c69e8b4a65b612 | bc441bb06b8948288f110af63feda4e798f30225 | /cmdb_sdk/model/container/hpa_pb2.pyi | ba9ca505ff5c447e180d7f7fc8dc3ec1ccd8f670 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,592 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from cmdb_sdk.model.container.resource_metric_source_pb2 import (
ResourceMetricSource as cmdb_sdk___model___container___resource_metric_source_pb2___ResourceMetricSource,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class HorizontalPodAutoscaler(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class ScaleTargetRef(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
kind = ... # type: typing___Text
name = ... # type: typing___Text
apiVersion = ... # type: typing___Text
def __init__(self,
*,
kind : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
apiVersion : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> HorizontalPodAutoscaler.ScaleTargetRef: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> HorizontalPodAutoscaler.ScaleTargetRef: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"apiVersion",b"apiVersion",u"kind",b"kind",u"name",b"name"]) -> None: ...
class Metrics(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
type = ... # type: typing___Text
@property
def resource(self) -> cmdb_sdk___model___container___resource_metric_source_pb2___ResourceMetricSource: ...
def __init__(self,
*,
type : typing___Optional[typing___Text] = None,
resource : typing___Optional[cmdb_sdk___model___container___resource_metric_source_pb2___ResourceMetricSource] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> HorizontalPodAutoscaler.Metrics: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> HorizontalPodAutoscaler.Metrics: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"resource",b"resource"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"resource",b"resource",u"type",b"type"]) -> None: ...
instanceId = ... # type: typing___Text
resourceName = ... # type: typing___Text
namespace = ... # type: typing___Text
minReplicas = ... # type: builtin___int
maxReplicas = ... # type: builtin___int
@property
def scaleTargetRef(self) -> HorizontalPodAutoscaler.ScaleTargetRef: ...
@property
def metrics(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[HorizontalPodAutoscaler.Metrics]: ...
def __init__(self,
*,
instanceId : typing___Optional[typing___Text] = None,
resourceName : typing___Optional[typing___Text] = None,
namespace : typing___Optional[typing___Text] = None,
scaleTargetRef : typing___Optional[HorizontalPodAutoscaler.ScaleTargetRef] = None,
minReplicas : typing___Optional[builtin___int] = None,
maxReplicas : typing___Optional[builtin___int] = None,
metrics : typing___Optional[typing___Iterable[HorizontalPodAutoscaler.Metrics]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> HorizontalPodAutoscaler: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> HorizontalPodAutoscaler: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"scaleTargetRef",b"scaleTargetRef"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"instanceId",b"instanceId",u"maxReplicas",b"maxReplicas",u"metrics",b"metrics",u"minReplicas",b"minReplicas",u"namespace",b"namespace",u"resourceName",b"resourceName",u"scaleTargetRef",b"scaleTargetRef"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
642b38a413df309b7c1b02b03690ef51953c2841 | b25182d0034468e5e545c6c72e5a2cdd3c43a484 | /.PyCharm2017.2/system/python_stubs/-223353804/lxml/etree/DTD.py | 4a9de7fc7e62c4dfc3e361e63287deb22bbafd98 | [] | no_license | lovewula/config | f9ac16b30082c04be7733969d5359ee6c7258db6 | c0720e5bfd49f579a52f83de36de40c76996ebf6 | refs/heads/master | 2021-08-19T19:31:44.088218 | 2017-11-27T08:04:06 | 2017-11-27T08:04:06 | 111,974,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,892 | py | # encoding: utf-8
# module lxml.etree
# from D:\Python\Python27\lib\site-packages\lxml\etree.pyd
# by generator 1.145
""" The ``lxml.etree`` module implements the extended ElementTree API for XML. """
# imports
import __builtin__ as __builtins__ # <module '__builtin__' (built-in)>
from _Validator import _Validator
class DTD(_Validator):
"""
DTD(self, file=None, external_id=None)
A DTD validator.
Can load from filesystem directly given a filename or file-like object.
Alternatively, pass the keyword parameter ``external_id`` to load from a
catalog.
"""
def elements(self, *args, **kwargs): # real signature unknown
pass
def entities(self, *args, **kwargs): # real signature unknown
pass
def iterelements(self, *args, **kwargs): # real signature unknown
pass
def iterentities(self, *args, **kwargs): # real signature unknown
pass
def __call__(self, etree): # real signature unknown; restored from __doc__
"""
__call__(self, etree)
Validate doc using the DTD.
Returns true if the document is valid, false if not.
"""
pass
def __init__(self, file=None, external_id=None): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
external_id = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
system_url = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__pyx_vtable__ = None # (!) real value is ''
| [
"lovewula1314@gmail.com"
] | lovewula1314@gmail.com |
568c23ea6c150f1790bd62cfcdcca3c4eb2884d9 | cca5ceb42b09e567d79fcb46f298757c1ff04447 | /Requests/ProxyIP.py | 16b5e0c5908fa2e51b4e45542a118d54b3bdf395 | [] | no_license | NishantGhanate/PythonScripts | 92933237720e624a0f672729743a98557bea79d6 | 60b92984d21394002c0d3920bc448c698e0402ca | refs/heads/master | 2022-12-13T11:56:14.442286 | 2022-11-18T14:26:33 | 2022-11-18T14:26:33 | 132,910,530 | 25 | 15 | null | 2022-12-09T09:03:58 | 2018-05-10T14:18:33 | Python | UTF-8 | Python | false | false | 536 | py | import requests
from lxml.html import fromstring
def get_proxies():
url = 'https://free-proxy-list.net/'
response = requests.get(url)
parser = fromstring(response.text)
proxies = set()
for i in parser.xpath('//tbody/tr')[:20]:
if i.xpath('.//td[7][contains(text(),"yes")]'):
#Grabbing IP and corresponding PORT
proxy = ":".join([i.xpath('.//td[1]/text()')[0], i.xpath('.//td[2]/text()')[0]])
proxies.add(proxy)
return proxies
proxies = get_proxies()
print(proxies) | [
"nishant7.ng@gmail.com"
] | nishant7.ng@gmail.com |
d3c160de4501cb84be1bc3e5585762ce8b657d36 | 27e890f900bd4bfb2e66f4eab85bc381cf4d5d3f | /tests/unit/modules/network/fortios/test_fortios_switch_controller_quarantine.py | 4093a700e7f1a2da8709a47b049c6a23e71f795d | [] | no_license | coll-test/notstdlib.moveitallout | eb33a560070bbded5032385d0aea2f3cf60e690b | 0987f099b783c6cf977db9233e1c3d9efcbcb3c7 | refs/heads/master | 2020-12-19T22:28:33.369557 | 2020-01-23T18:51:26 | 2020-01-23T18:51:26 | 235,865,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,077 | py | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible_collections.notstdlib.moveitallout.plugins.modules import fortios_switch_controller_quarantine
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.modules.fortios_switch_controller_quarantine.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_switch_controller_quarantine_creation(mocker):
schema_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_quarantine': {
'quarantine': 'enable',
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_quarantine.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'quarantine': 'enable',
}
set_method_mock.assert_called_with('switch-controller', 'quarantine', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_switch_controller_quarantine_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_quarantine': {
'quarantine': 'enable',
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_quarantine.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'quarantine': 'enable',
}
set_method_mock.assert_called_with('switch-controller', 'quarantine', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_switch_controller_quarantine_idempotent(mocker):
schema_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_quarantine': {
'quarantine': 'enable',
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_quarantine.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'quarantine': 'enable',
}
set_method_mock.assert_called_with('switch-controller', 'quarantine', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_switch_controller_quarantine_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_quarantine': {
'random_attribute_not_valid': 'tag',
'quarantine': 'enable',
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_quarantine.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'quarantine': 'enable',
}
set_method_mock.assert_called_with('switch-controller', 'quarantine', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| [
"wk@sydorenko.org.ua"
] | wk@sydorenko.org.ua |
eec408b4327d41fd937c2ad9c5fbdfce8c90427b | 777844bd87b2e87bb992af18bdf13ca1b2c747fd | /nnvm/tutorials/nlp/from_darknet_rnn.py | bbf70c724bbe43a4aeba64d71b5443cd3a88be94 | [
"Apache-2.0"
] | permissive | FrozenGene/tvm | 5a6d875e39af0f2c00e1508bf33a3f699ae9ac27 | fbd1c16484b5710a48717b9cf50f424326a84cca | refs/heads/master | 2021-06-18T09:14:49.173534 | 2019-04-02T00:10:16 | 2019-04-02T00:10:16 | 155,194,851 | 1 | 2 | Apache-2.0 | 2018-10-29T10:43:18 | 2018-10-29T10:43:18 | null | UTF-8 | Python | false | false | 5,383 | py | """
Compile Darknet Models for RNN
==============================
**Author**: `Siju Samuel <https://siju-samuel.github.io/>`_
This article is an introductory tutorial to deploy darknet rnn models with NNVM.
This script will run a character prediction model
Each module consists of 3 fully-connected layers. The input layer propagates information from the
input to the current state. The recurrent layer propagates information through time from the
previous state to the current one.
The input to the network is a 1-hot encoding of ASCII characters. We train the network to predict
the next character in a stream of characters. The output is constrained to be a probability
distribution using a softmax layer.
Since each recurrent layer contains information about the current character and the past
characters, it can use this context to predict the future characters in a word or phrase.
All the required models and libraries will be downloaded from the internet
by the script.
"""
import random
import numpy as np
import tvm
from tvm.contrib import graph_runtime
from tvm.contrib.download import download_testdata
from nnvm.testing.darknet import __darknetffi__
import nnvm
import nnvm.frontend.darknet
# Set the parameters
# -----------------------
# Set the seed value and the number of characters to predict
#Model name
MODEL_NAME = 'rnn'
#Seed value
seed = 'Thus'
#Number of characters to predict
num = 1000
# Download required files
# -----------------------
# Download cfg and weights file if first time.
CFG_NAME = MODEL_NAME + '.cfg'
WEIGHTS_NAME = MODEL_NAME + '.weights'
REPO_URL = 'https://github.com/dmlc/web-data/blob/master/darknet/'
CFG_URL = REPO_URL + 'cfg/' + CFG_NAME + '?raw=true'
WEIGHTS_URL = REPO_URL + 'weights/' + WEIGHTS_NAME + '?raw=true'
cfg_path = download_testdata(CFG_URL, CFG_NAME, module='darknet')
weights_path = download_testdata(WEIGHTS_URL, WEIGHTS_NAME, module='darknet')
# Download and Load darknet library
DARKNET_LIB = 'libdarknet.so'
DARKNET_URL = REPO_URL + 'lib/' + DARKNET_LIB + '?raw=true'
lib_path = download_testdata(DARKNET_URL, DARKNET_LIB, module='darknet')
DARKNET_LIB = __darknetffi__.dlopen(lib_path)
net = DARKNET_LIB.load_network(cfg_path.encode('utf-8'), weights_path.encode('utf-8'), 0)
dtype = 'float32'
batch_size = 1
# Import the graph to NNVM
# ------------------------
# Import darknet graph definition to nnvm.
#
# Results:
# sym: nnvm graph for rnn model
# params: params converted from darknet weights
print("Converting darknet rnn model to nnvm symbols...")
sym, params = nnvm.frontend.darknet.from_darknet(net, dtype)
# Compile the model on NNVM
data = np.empty([1, net.inputs], dtype)#net.inputs
target = 'llvm'
shape = {'data': data.shape}
print("Compiling the model...")
shape_dict = {'data': data.shape}
dtype_dict = {'data': data.dtype}
with nnvm.compiler.build_config(opt_level=2):
graph, lib, params = nnvm.compiler.build(sym, target, shape_dict, dtype_dict, params)
# Execute the portable graph on TVM
# ---------------------------------
# Now we can try deploying the NNVM compiled model on cpu target.
# Set the cpu context
ctx = tvm.cpu(0)
# Create graph runtime
m = graph_runtime.create(graph, lib, ctx)
# Set the params to runtime
m.set_input(**params)
def _init_state_memory(rnn_cells_count, dtype):
'''Initialize memory for states'''
states = {}
state_shape = (1024,)
for i in range(rnn_cells_count):
k = 'rnn' + str(i) + '_state'
states[k] = tvm.nd.array(np.zeros(state_shape, dtype).astype(dtype))
return states
def _set_state_input(runtime, states):
'''Set the state inputs'''
for state in states:
runtime.set_input(state, states[state])
def _get_state_output(runtime, states):
'''Get the state outputs and save'''
i = 1
for state in states:
data = states[state]
states[state] = runtime.get_output((i), tvm.nd.empty(data.shape, data.dtype))
i += 1
def _proc_rnn_output(out_data):
'''Generate the characters from the output array'''
sum_array = 0
n = out_data.size
r = random.uniform(0, 1)
for j in range(n):
if out_data[j] < 0.0001:
out_data[j] = 0
sum_array += out_data[j]
for j in range(n):
out_data[j] *= float(1.0) / sum_array
r = r - out_data[j]
if r <= 0:
return j
return n-1
print("RNN generaring text...")
out_shape = (net.outputs,)
rnn_cells_count = 3
# Initialize state memory
# -----------------------
states = _init_state_memory(rnn_cells_count, dtype)
len_seed = len(seed)
count = len_seed + num
out_txt = ""
#Initialize random seed
random.seed(0)
c = ord(seed[0])
inp_data = np.zeros([net.inputs], dtype)
# Run the model
# -------------
# Predict character by character till `num`
for i in range(count):
inp_data[c] = 1
# Set the input data
m.set_input('data', tvm.nd.array(inp_data.astype(dtype)))
inp_data[c] = 0
# Set the state inputs
_set_state_input(m, states)
# Run the model
m.run()
# Get the output
tvm_out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).asnumpy()
# Get the state outputs
_get_state_output(m, states)
# Get the predicted character and keep buffering it
c = ord(seed[i]) if i < len_seed else _proc_rnn_output(tvm_out)
out_txt += chr(c)
print("Predicted Text =", out_txt)
| [
"tqchen@users.noreply.github.com"
] | tqchen@users.noreply.github.com |
9e786306217ad5c74dde5a37eab383d4a736229b | d7a1b26449211f4ea67dce9370f6558f16df992c | /todo/settings.py | 1e4b4da5176e67e3e8e7a02f0936daf740f04dde | [] | no_license | SonerArslan2019/ToDoAPP_Django_BULMA | 8c245b49e2f8f6e35d34b9cf34bdc923fc065ce5 | 8a0c8d97699b856c7634b2a98692b9311a0bf183 | refs/heads/master | 2023-04-15T08:33:19.635367 | 2021-04-25T19:11:18 | 2021-04-25T19:11:18 | 361,483,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,296 | py | """
Django settings for todo project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-m_+a)4*n05ypamd=ul=$^eiubs7p62elic_g#z%ppfhfpc^*s('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'todoapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
# AUTH_PASSWORD_VALIDATORS = [
# {
# 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
# },
# ]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"soner@arslanyapi.com.tr"
] | soner@arslanyapi.com.tr |
71ec5eb99a91022cff32c68fb9a82d33c1fe8b5a | 2a5c0c49319989a24f9c9f18530f109bc48a8df1 | /CursesEnded/SecondYear/PythonJava(InternetOfThings)/list5/driver2.py | c165f04582e1365ac5166f1e5e28d3e6b1d1892d | [] | no_license | Kamil-IT/Studies | 0ada6dd92d7ecdbd0a3164c7c80080dd715ce8fc | d70343b2b7818ce303d816443bb15d21e03b42e0 | refs/heads/master | 2022-12-22T01:55:35.047783 | 2022-10-20T18:55:29 | 2022-10-20T18:55:29 | 217,039,987 | 0 | 1 | null | 2022-12-10T06:03:55 | 2019-10-23T11:24:33 | Jupyter Notebook | UTF-8 | Python | false | false | 1,105 | py | import json
import paho.mqtt.client as mqtt
import requests
def subscribe_topic(broker_url, topic, on_message):
client = mqtt.Client()
client.connect(broker_url)
client.subscribe(topic)
client.on_message = on_message
client.loop_forever()
def on_message_app2_driver(client, userdata, msg):
if 41.80 < float(json.loads(str(msg.payload.decode('utf-8')))["Latitude"]):
requests.post("http://127.0.0.1:6002/status", json={"reverse": "on", "status": "on"})
print("Latitude too big " + json.loads(str(msg.payload.decode('utf-8')))["Latitude"])
elif float(json.loads(str(msg.payload.decode('utf-8')))["Latitude"]) < 41.78:
requests.post("http://127.0.0.1:6002/status", json={"reverse": "off", "status": "on"})
print("Latitude too small " + json.loads(str(msg.payload.decode('utf-8')))["Latitude"])
else:
requests.post("http://127.0.0.1:6002/status", json={"status": "off"})
print("Latitude good")
def main():
subscribe_topic("127.0.0.1", "phone_location", on_message_app2_driver)
if __name__ == '__main__':
main()
| [
"kkwolny@vp.pl"
] | kkwolny@vp.pl |
eb13bc5a7c58a79ab899c6c06b92d27c1a45639b | 933ed73cdf117fc6c88c1ebba7a17b82807a16e8 | /docs/00.Python/demo_Chapter01/demo_python_structure/pkg/module1.py | 7fde6166ac45f7f13a4c86150943558d3d96b12d | [
"Apache-2.0"
] | permissive | wan230114/PythonNote | c4fff80f6f3849ed0b0346526d3c6197a4867d2c | f4989a44c03efdcea3f7aa3484e3470e7fd983eb | refs/heads/master | 2023-01-28T14:51:07.304066 | 2023-01-12T18:38:16 | 2023-01-12T18:38:16 | 242,367,069 | 5 | 6 | Apache-2.0 | 2021-01-05T23:35:10 | 2020-02-22T15:45:01 | JavaScript | UTF-8 | Python | false | false | 876 | py | """这是该模块1的说明文档"""
print("""这是模块1,它包含:
三个变量: a -- 数值对象, ClassName -- 类对象, func -- 函数对象
一个类: ClassName; 类方法有: __init__, main; 类属性有: self.arg
一个函数: func
五个语句块: 1 * class, 3 * def, 1 * for
七个语句: 5 * print, a = 520, self.arg = arg
""")
a = 520
class ClassName(object):
"""这是类的说明文档"""
def __init__(self, arg):
"""这是类默认方法的说明文档"""
self.arg = arg
def main(self):
"""这是类方法的说明文档"""
print("用于执行类方法的具体语句,打印该行字符串")
def func():
"""这是函数的说明文档"""
print("用于打印函数")
print("用于执行函数的具体语句,打印该行字符串")
for i in "123456":
print(i)
| [
"1170101471@qq.com"
] | 1170101471@qq.com |
eac26296fc7adb18685967c93a73c56898d63177 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/playground/mehmet/2011/python-pymodel/actions.py | 2f6b2f799e6cfaa6c12c1a1ad5670d9f8917771d | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2011 TUBITAK/BILGEM
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import pisitools
from pisi.actionsapi import pythonmodules
instalDir = "/usr/lib/python2.7/site-packages"
def install():
pythonmodules.install()
pisitools.insinto("%s/python-pymodel" % instalDir, "pymodel/*")
for binary in ["pma.py", "pmg.py", "pmt.py", "trun.py", "dotsvg", \
"clogdiff", "tpath", "dotps", "wsgirunner.py"]:
pisitools.dosym("%s/python-pymodel/%s" % (instalDir, binary), \
"/usr/bin/%s" % binary)
pisitools.insinto("/usr/share/doc/python-pymodel", "samples")
pisitools.insinto("/usr/share/doc/python-pymodel", "notes")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
e3f4ec202b116de4065c3adbc9dff68c89344718 | 7b3009e019e081667df67c6b41328b5db632b898 | /render_video.py | 2c1ab87baf640b84dd9b59daeeae4eb0ce89851e | [
"MIT"
] | permissive | frostburn/multibranch-mandelbrot | d1e2cc6bce6ab8f065b678fb2133bd3057b832d5 | 84e4887ffc90a5338ae448ced6f62fcf40bc11a1 | refs/heads/master | 2023-08-02T18:20:56.671175 | 2021-09-28T09:57:58 | 2021-09-28T09:57:58 | 287,219,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,123 | py | import argparse
from threading import Thread, Lock
import imageio
import progressbar
from pylab import *
from coloring import red_lavender, subharmonics
from mandelbrot import mandelbrot
RESOLUTIONS = {
"2160p": (3840, 2160),
"1440p": (2560, 1440),
"1080p": (1920, 1080),
"720p": (1280, 720),
"480p": (854, 480),
"360p": (640, 360),
"240p": (426, 240),
"160p": (284, 160),
"80p": (142, 80),
"40p": (71, 40),
}
def make_video_frame(rgb, indexing='ij', dither=1.0/256.0):
if dither:
rgb = [channel + random(channel.shape)*dither for channel in rgb]
if indexing == 'ij':
rgb = [channel.T for channel in rgb]
frame = stack(rgb, axis=-1)
frame = clip(frame, 0.0, 1.0)
return (frame * 255).astype('uint8')
def do_render(args, writer):
inside_cutoff = 2**9
color_map = subharmonics
for n in progressbar.progressbar(range(args.num_frames)):
t = n / (args.num_frames - 1)
x, y = -0.11042608495193805, -1.2321253969758166
zoom = t * 44 - 2
image = mandelbrot(args.width, args.height, x, y, zoom, 2.5, 66, color_map=color_map, anti_aliasing=args.anti_aliasing, inside_cutoff=inside_cutoff, clip_outside=True)
frame = make_video_frame(image, indexing=None)
writer.append_data(frame)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Render video frames of a Multi-branch Mandelbrot fractal')
parser.add_argument('outfile', type=str, help='Output file name')
parser.add_argument('--anti-aliasing', type=int, help='Anti-aliasing pixel subdivisions')
parser.add_argument('--resolution', choices=RESOLUTIONS.keys(), help='Video and simulation grid resolution')
parser.add_argument('--width', type=int, help='Video and simulation grid width', metavar='W')
parser.add_argument('--height', type=int, help='Video and simulation grid height', metavar='H')
parser.add_argument('--framerate', type=int, help='Video frame rate')
parser.add_argument('--video-quality', type=int, help='Video quality factor')
parser.add_argument('--video-duration', type=float, help='Duration of video to render in seconds')
args = parser.parse_args()
if not args.anti_aliasing:
args.anti_aliasing = 2
if not args.framerate:
args.framerate = 24
if not args.video_quality:
args.video_quality = 10
writer = imageio.get_writer(args.outfile, fps=args.framerate, quality=args.video_quality, macro_block_size=1)
# Compute derived parameters
if args.resolution:
width, height = RESOLUTIONS[args.resolution]
if not args.width:
args.width = width
if not args.height:
args.height = height
if (not args.width) or (not args.height):
raise ValueError("Invalid or missing resolution")
if not args.video_duration:
raise ValueError("Missing video duration")
args.aspect = args.width / args.height
args.num_frames = int(args.video_duration * args.framerate)
args.dt = 1.0 / args.num_frames
do_render(args, writer)
writer.close()
| [
"lumi.pakkanen@gmail.com"
] | lumi.pakkanen@gmail.com |
9194e098f370acb66cbbfd5d2ba7fb1be66571be | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/streamanalytics/v20211001preview/input.py | b596602c0ad2d333a81941dbb68a5b22a9db6fc3 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,977 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['InputInitArgs', 'Input']
@pulumi.input_type
class InputInitArgs:
def __init__(__self__, *,
job_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
input_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union['ReferenceInputPropertiesArgs', 'StreamInputPropertiesArgs']]] = None):
"""
The set of arguments for constructing a Input resource.
:param pulumi.Input[str] job_name: The name of the streaming job.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] input_name: The name of the input.
:param pulumi.Input[str] name: Resource name
:param pulumi.Input[Union['ReferenceInputPropertiesArgs', 'StreamInputPropertiesArgs']] properties: The properties that are associated with an input. Required on PUT (CreateOrReplace) requests.
"""
pulumi.set(__self__, "job_name", job_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if input_name is not None:
pulumi.set(__self__, "input_name", input_name)
if name is not None:
pulumi.set(__self__, "name", name)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="jobName")
def job_name(self) -> pulumi.Input[str]:
"""
The name of the streaming job.
"""
return pulumi.get(self, "job_name")
@job_name.setter
def job_name(self, value: pulumi.Input[str]):
pulumi.set(self, "job_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="inputName")
def input_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the input.
"""
return pulumi.get(self, "input_name")
@input_name.setter
def input_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "input_name", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Resource name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Union['ReferenceInputPropertiesArgs', 'StreamInputPropertiesArgs']]]:
"""
The properties that are associated with an input. Required on PUT (CreateOrReplace) requests.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Union['ReferenceInputPropertiesArgs', 'StreamInputPropertiesArgs']]]):
pulumi.set(self, "properties", value)
class Input(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
input_name: Optional[pulumi.Input[str]] = None,
job_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['ReferenceInputPropertiesArgs'], pulumi.InputType['StreamInputPropertiesArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
An input object, containing all information associated with the named input. All inputs are contained under a streaming job.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] input_name: The name of the input.
:param pulumi.Input[str] job_name: The name of the streaming job.
:param pulumi.Input[str] name: Resource name
:param pulumi.Input[Union[pulumi.InputType['ReferenceInputPropertiesArgs'], pulumi.InputType['StreamInputPropertiesArgs']]] properties: The properties that are associated with an input. Required on PUT (CreateOrReplace) requests.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: InputInitArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An input object, containing all information associated with the named input. All inputs are contained under a streaming job.
:param str resource_name: The name of the resource.
:param InputInitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(InputInitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
input_name: Optional[pulumi.Input[str]] = None,
job_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['ReferenceInputPropertiesArgs'], pulumi.InputType['StreamInputPropertiesArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = InputInitArgs.__new__(InputInitArgs)
__props__.__dict__["input_name"] = input_name
if job_name is None and not opts.urn:
raise TypeError("Missing required property 'job_name'")
__props__.__dict__["job_name"] = job_name
__props__.__dict__["name"] = name
__props__.__dict__["properties"] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:streamanalytics:Input"), pulumi.Alias(type_="azure-native:streamanalytics/v20160301:Input"), pulumi.Alias(type_="azure-native:streamanalytics/v20170401preview:Input"), pulumi.Alias(type_="azure-native:streamanalytics/v20200301:Input")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Input, __self__).__init__(
'azure-native:streamanalytics/v20211001preview:Input',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Input':
"""
Get an existing Input resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = InputInitArgs.__new__(InputInitArgs)
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["type"] = None
return Input(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output[Any]:
"""
The properties that are associated with an input. Required on PUT (CreateOrReplace) requests.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
66b835b459e04052f8a621dc21cce79fcd3161d7 | 52877e2b60ed675eb16ea66c7398127294a313d3 | /t2t_bert/utils/wmd/emd_utils.py | 9cf43816e992e02220600b9cacca89a96f46c2b0 | [
"Apache-2.0"
] | permissive | yyht/BERT | 0dc82ea8e141cad4774e638dd7d44f781d77b6c3 | 480c909e0835a455606e829310ff949c9dd23549 | refs/heads/master | 2023-04-07T03:32:28.123608 | 2021-02-17T02:15:58 | 2021-02-17T02:15:58 | 162,232,730 | 37 | 12 | Apache-2.0 | 2022-11-21T21:15:04 | 2018-12-18T05:02:27 | Python | UTF-8 | Python | false | false | 2,167 | py | import numpy as np
from pyemd import emd
def wmd_distance(w2v_model, document1, document2, distance_metric=None):
len_pre_oov1 = len(document1)
len_pre_oov2 = len(document2)
document1 = [token for token in document1 if token in w2v_model]
document2 = [token for token in document2 if token in w2v_model]
diff1 = len_pre_oov1 - len(document1)
diff2 = len_pre_oov2 - len(document2)
dictionary = list(set(document1+document2))
vocab_len = len(dictionary)
if vocab_len == 1:
# Both documents are composed by a single unique token
return 0.0
# Sets for faster look-up.
docset1 = set(document1)
docset2 = set(document2)
# Compute distance matrix.
distance_matrix = np.zeros((vocab_len, vocab_len), dtype=np.double)
for i, t1 in enumerate(dictionary):
if t1 not in docset1:
continue
for j, t2 in enumerate(dictionary):
if t2 not in docset2 or distance_matrix[i, j] != 0.0:
continue
if distance_metric == 'euclidean':
# Compute Euclidean distance between word vectors.
euclidean_distance = sqrt(np.sum((w2v_model[t1] - w2v_model[t2])**2))
distance_matrix[i, j] = distance_matrix[j, i] = euclidean_distance
elif distance_metric == 'cosine':
t1_norm = np.sqrt(np.sum(np.power((w2v_model[t1]), 2)))
t2_norm = np.sqrt(np.sum(np.power((w2v_model[t2]), 2)))
cos_distance = np.sum(w2v_model[t1]*w2v_model[t2]) / (t1_norm*t2_norm+1e-10)
distance_matrix[i, j] = distance_matrix[j, i] = 1 - cos_distance
else:
euclidean_distance = np.sqrt(np.sum((w2v_model[t1] - w2v_model[t2])**2))
distance_matrix[i, j] = distance_matrix[j, i] = euclidean_distance
if np.sum(distance_matrix) == 0.0:
# `emd` gets stuck if the distance matrix contains only zeros.
return 1e-10
keys = dict((e[1], e[0]) for e in enumerate(dictionary))
def nbow(document):
d = np.zeros(vocab_len, dtype=np.double)
for word in document:
d[keys[word]] += 1
doc_len = len(document)
for idx, freq in enumerate(d):
d[idx] = freq / float(doc_len) # Normalized word frequencies.
return d
# Compute nBOW representation of documents.
d1 = nbow(document1)
d2 = nbow(document2)
return emd(d1, d2, distance_matrix) | [
"albert.xht@alibaba-inc.com"
] | albert.xht@alibaba-inc.com |
28c9e7a82531a49e4999185788a456a178e709e1 | e585c3a61b830d3c24a8cec8343d262c84c724e7 | /CDocente/page_objects/certificados/certificado.py | 2f5e659cdb2509b4cb1c522eee4d2652a84b9a52 | [] | no_license | Valupiruiz/AutomationPHP | bb0728b2b6508b017c133a7d560a652033adeaf4 | 9a92634ac9f5b27e46723294f9a4cc83a1f99252 | refs/heads/master | 2023-01-18T17:27:57.819270 | 2020-11-27T15:04:49 | 2020-11-27T15:04:49 | 310,594,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | from page_objects.base_page import BasePage
from .locators import CertificadoLocator
from utils.file_utils import FileUtils
import time
from selenium.common.exceptions import TimeoutException
class Certificado(BasePage):
def __init__(self, driver):
super().__init__(driver)
self.__locators = CertificadoLocator()
def completar_info_basica(self, img, fecha):
time.sleep(5)
self.driver.execute_script(
'$("#fecha_certificado").val("'+fecha+'")')
time.sleep(5)
self.find_element(self.__locators.AGREGAR_IMG_BTN).click()
FileUtils.seleccionar_img_gui(img)
self.find_element(self.__locators.TERMIN_CONDIC_INP).click()
self.find_element(self.__locators.ACEPTAR_BTN).click()
try:
self.find_element(self.__locators.ACEPTAR_ADV_BTN).click()
except TimeoutException:
pass
| [
"tomasmoreira04@gmail.com"
] | tomasmoreira04@gmail.com |
965cb0a1756aaadfefc1748a36c0fbcf6e13f0e5 | 595d901410872617023f773a6dbe66a174187c42 | /DL/Avanzado/Vision-Transformer/model.py | 66c2689836cdda5250d8bbda32997a8963b935bb | [] | no_license | HiroForYou/Deep-Learning-Collection | 70c3b4405bd0f733aa946b71be0292a497bbb947 | 2b199692739fac8929eb144f9556af544f4eb2ac | refs/heads/main | 2023-03-26T21:57:23.581940 | 2021-03-19T20:12:28 | 2021-03-19T20:12:28 | 346,814,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,343 | py | import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_addons as tfa
from dataset import dataset
import matplotlib.pyplot as plt
# HIPERPARÁMETROS PARA EL modelo
num_classes = 10 # CIFAR10
input_original_shape = (32, 32, 3)
image_size = 72 # Cambiaremos el tamaño de las imágenes de entrada a este tamaño
patch_size = 6 # Tamaño de los parches que se extraerán de las imágenes de entrada
num_patches = (image_size // patch_size) ** 2
projection_dim = 64
num_heads = 4
transformer_units = [
projection_dim * 2,
projection_dim,
] # Tamaño de las capas del transformer
transformer_layers = 8
mlp_head_units = [2048, 1024] # Tamaño de las capas densas del clasificador final
def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = layers.Dense(units, activation=tfa.activations.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x
class Patches(layers.Layer):
def __init__(self, patch_size):
super(Patches, self).__init__()
self.patch_size = patch_size
def call(self, images):
batch_size = tf.shape(images)[0]
patches = tf.image.extract_patches(
images=images,
sizes=[1, self.patch_size, self.patch_size, 1],
strides=[1, self.patch_size, self.patch_size, 1],
rates=[1, 1, 1, 1],
padding="VALID",
)
patch_dims = patches.shape[-1]
patches = tf.reshape(patches, [batch_size, -1, patch_dims])
return patches
class PatchEncoder(layers.Layer):
def __init__(self, num_patches, projection_dim):
super(PatchEncoder, self).__init__()
self.num_patches = num_patches
self.projection = layers.Dense(units=projection_dim)
self.position_embedding = layers.Embedding(
input_dim=num_patches, output_dim=projection_dim
)
def call(self, patch):
positions = tf.range(start=0, limit=self.num_patches, delta=1)
encoded = self.projection(patch) + self.position_embedding(positions)
return encoded
def create_vit_classifier(data_augmentation):
inputs = layers.Input(shape=input_original_shape)
# Aumento de datos
augmented = data_augmentation(inputs)
# Creamos los parches
patches = Patches(patch_size)(augmented)
# Codificamos los parches
encoded_patches = PatchEncoder(num_patches, projection_dim)(patches)
# Creamos múltiples capas del bloque Transformer
for _ in range(transformer_layers):
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
# Creamos una capa multi-head attention
'''
# solo soportado para TF 2.4
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=projection_dim, dropout=0.1
)(x1, x1)
'''
mha = tfa.layers.MultiHeadAttention(
head_size=projection_dim, num_heads=num_heads, dropout=0.1
)
attention_output = mha([x1, x1])
# Skip connection 1.
x2 = layers.Add()([attention_output, encoded_patches])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=1e-6)(x2)
# MLP.
x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)
# Skip connection 2.
encoded_patches = layers.Add()([x3, x2])
# Creamos un tensor de forma [batch_size, projection_dim].
representation = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
representation = layers.Flatten()(representation)
representation = layers.Dropout(0.5)(representation)
# Agregamos la capa mlp
features = mlp(representation, hidden_units=mlp_head_units, dropout_rate=0.5)
# Clasificamos las salidas
logits = layers.Dense(num_classes)(features)
# Creamos el modelo Keras
model = keras.Model(inputs=inputs, outputs=logits)
return model
if __name__ == "__main__":
(x_train, _, _, _), data_augmentation = dataset(image_size=image_size)
model = create_vit_classifier(data_augmentation)
print("\n\nComprobando funcionamiento de los parches...")
plt.figure(figsize=(4, 4))
image = x_train[np.random.choice(range(x_train.shape[0]))]
plt.imshow(image.astype("uint8"))
plt.axis("off")
resized_image = tf.image.resize(
tf.convert_to_tensor([image]), size=(image_size, image_size)
)
patches = Patches(patch_size)(resized_image)
print(f"Tamaño de la imagen: {image_size} X {image_size}")
print(f"Tamaño del parche: {patch_size} X {patch_size}")
print(f"Parche por imagen: {patches.shape[1]}")
print(f"Elementos por parche: {patches.shape[-1]}")
n = int(np.sqrt(patches.shape[1]))
plt.figure(figsize=(4, 4))
for i, patch in enumerate(patches[0]):
ax = plt.subplot(n, n, i + 1)
patch_img = tf.reshape(patch, (patch_size, patch_size, 3))
plt.imshow(patch_img.numpy().astype("uint8"))
plt.axis("off")
plt.show()
print("Comprobando funcionamiento de ViT_Classifier...")
input_tensor = tf.random.normal([1, 32, 32, 3])
output_tensor = model.predict(input_tensor)
print(input_tensor, end="\n\n")
print(output_tensor, end="\n")
| [
"csanchezs@uni.pe"
] | csanchezs@uni.pe |
09c8465762b44641a8cc4519d5f269a6dc59a91c | 9f9082b2d84da1ade9952c829b8ec99e23db2b98 | /server/fandogh/user/migrations/0001_initial.py | 82beed1205d25f0e08a64e9190376d7f90145cb6 | [
"MIT"
] | permissive | RezaHaidari/fandogh | 384c79fe7eb26e3a7e7f4bf4597e99fa90227921 | 6173ab9dee0e5b9756edf31149aad9af0e0d3564 | refs/heads/master | 2020-03-22T22:53:09.004039 | 2018-07-09T11:36:26 | 2018-07-09T11:36:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | # Generated by Django 2.0.4 on 2018-05-11 16:33
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='EarlyAccessRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254, unique=True)),
],
),
]
| [
"soroosh.sarabadani@gmail.com"
] | soroosh.sarabadani@gmail.com |
d664dec3a87d05a323144383c5556092f7f21f1b | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/03_model_fitting/merra882/202-tideGauge.py | 4b1714fee2523770b8179de06ec5eb94a4ef4150 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,041 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:51:30 2020
This program is designed to validate a multiple
linear regression model by using the KFOLD method
@author: Michael Tadesse
"""
import os
import numpy as np
import pandas as pd
from sklearn import metrics
from scipy import stats
from datetime import datetime
from sklearn.linear_model import LinearRegression
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
def validate():
"""
run KFOLD method for regression
"""
#defining directories
dir_in = "/lustre/fs0/home/mtadesse/merraAllLagged"
dir_out = "/lustre/fs0/home/mtadesse/merraLRValidation"
surge_path = "/lustre/fs0/home/mtadesse/05_dmax_surge_georef"
#cd to the lagged predictors directory
os.chdir(dir_in)
x = 202
y = 203
#empty dataframe for model validation
df = pd.DataFrame(columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse'])
#looping through
for tg in range(x,y):
os.chdir(dir_in)
tg_name = os.listdir()[tg]
print(tg, tg_name)
##########################################
#check if this tg is already taken care of
##########################################
os.chdir(dir_out)
if os.path.isfile(tg_name):
return "file already analyzed!"
os.chdir(dir_in)
#load predictor
pred = pd.read_csv(tg_name)
pred.drop('Unnamed: 0', axis = 1, inplace = True)
#add squared and cubed wind terms (as in WPI model)
pickTerms = lambda x: x.startswith('wnd')
wndTerms = pred.columns[list(map(pickTerms, pred.columns))]
wnd_sqr = pred[wndTerms]**2
wnd_cbd = pred[wndTerms]**3
pred = pd.concat([pred, wnd_sqr, wnd_cbd], axis = 1)
#standardize predictor data
dat = pred.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred['date'], dat_standardized], axis = 1)
#load surge data
os.chdir(surge_path)
surge = pd.read_csv(tg_name)
surge.drop('Unnamed: 0', axis = 1, inplace = True)
#remove duplicated surge rows
surge.drop(surge[surge['ymd'].duplicated()].index, axis = 0, inplace = True)
surge.reset_index(inplace = True)
surge.drop('index', axis = 1, inplace = True)
#adjust surge time format to match that of pred
time_str = lambda x: str(datetime.strptime(x, '%Y-%m-%d'))
surge_time = pd.DataFrame(list(map(time_str, surge['ymd'])), columns = ['date'])
time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
surge_new = pd.concat([surge_time, surge[['surge', 'lon', 'lat']]], axis = 1)
#merge predictors and surge to find common time frame
pred_surge = pd.merge(pred_standardized, surge_new.iloc[:,:2], on='date', how='right')
pred_surge.sort_values(by = 'date', inplace = True)
#find rows that have nans and remove them
row_nan = pred_surge[pred_surge.isna().any(axis =1)]
pred_surge.drop(row_nan.index, axis = 0, inplace = True)
pred_surge.reset_index(inplace = True)
pred_surge.drop('index', axis = 1, inplace = True)
#in case pred and surge don't overlap
if pred_surge.shape[0] == 0:
print('-'*80)
print('Predictors and Surge don''t overlap')
print('-'*80)
continue
pred_surge['date'] = pd.DataFrame(list(map(time_stamp, \
pred_surge['date'])), \
columns = ['date'])
#prepare data for training/testing
X = pred_surge.iloc[:,1:-1]
y = pd.DataFrame(pred_surge['surge'])
y = y.reset_index()
y.drop(['index'], axis = 1, inplace = True)
#apply PCA
pca = PCA(.95)
pca.fit(X)
X_pca = pca.transform(X)
#apply 10 fold cross validation
kf = KFold(n_splits=10, random_state=29)
metric_corr = []; metric_rmse = []; #combo = pd.DataFrame(columns = ['pred', 'obs'])
for train_index, test_index in kf.split(X):
X_train, X_test = X_pca[train_index], X_pca[test_index]
y_train, y_test = y['surge'][train_index], y['surge'][test_index]
#train regression model
lm = LinearRegression()
lm.fit(X_train, y_train)
#predictions
predictions = lm.predict(X_test)
# pred_obs = pd.concat([pd.DataFrame(np.array(predictions)), \
# pd.DataFrame(np.array(y_test))], \
# axis = 1)
# pred_obs.columns = ['pred', 'obs']
# combo = pd.concat([combo, pred_obs], axis = 0)
#evaluation matrix - check p value
if stats.pearsonr(y_test, predictions)[1] >= 0.05:
print("insignificant correlation!")
continue
else:
print(stats.pearsonr(y_test, predictions))
metric_corr.append(stats.pearsonr(y_test, predictions)[0])
print(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
metric_rmse.append(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
#number of years used to train/test model
num_years = (pred_surge['date'][pred_surge.shape[0]-1] -\
pred_surge['date'][0]).days/365
longitude = surge['lon'][0]
latitude = surge['lat'][0]
num_pc = X_pca.shape[1] #number of principal components
corr = np.mean(metric_corr)
rmse = np.mean(metric_rmse)
print('num_year = ', num_years, ' num_pc = ', num_pc ,'avg_corr = ',np.mean(metric_corr), ' - avg_rmse (m) = ', \
np.mean(metric_rmse), '\n')
#original size and pca size of matrix added
new_df = pd.DataFrame([tg_name, longitude, latitude, num_years, num_pc, corr, rmse]).T
new_df.columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse']
df = pd.concat([df, new_df], axis = 0)
#save df as cs - in case of interruption
os.chdir(dir_out)
df.to_csv(tg_name)
#cd to dir_in
os.chdir(dir_in)
#run script
validate()
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.