blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a008c87e4d730613670a35b40681026f798ce0dd
|
4fb4411ffd913820bffe56adac24c5968dec8774
|
/wrangle_act.py
|
638e0e1bd25295d89b43edcda8e6de0c3e911e96
|
[] |
no_license
|
lizzie11212/Udacity-wrangle-twitter-project
|
3082322c7a9ca1c1897bf5d2e8c8de830084a1b5
|
6b13a31d1fc639cc12be7b3ed0a89c6cb773aade
|
refs/heads/master
| 2021-10-28T06:24:12.763068
| 2019-04-22T12:55:32
| 2019-04-22T12:55:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,230
|
py
|
#!/usr/bin/env python
# coding: utf-8
# # DATA WRANGLE PROJECT
# ## Gathering Data
# In[2]:
import pandas as pd
import requests
import json
import tweepy
import numpy as np
# Gather csv data
# In[2]:
df_1=pd.read_csv('twitter-archive-enhanced.csv')
# Gather tsv data
# In[3]:
r = requests.get('https://d17h27t6h515a5.cloudfront.net/topher/2017/August/599fd2ad_image-predictions/image-predictions.tsv', auth=('user', 'pass'))
# In[4]:
r.headers['content-type']
# In[6]:
with open('predict.tsv', 'wb') as f:
f.write(r.content)
# In[3]:
df_2=pd.read_csv('predict.tsv',sep='\t')
# Gather twitter data by API
# In[22]:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth,wait_on_rate_limit=True,wait_on_rate_limit_notify=True)
# In[23]:
count = 0
fails_dict = {}
# In[24]:
tweet_ids = df_1.tweet_id.values
len(tweet_ids)
# In[26]:
with open('tweet_json.txt', 'w') as outfile:
# This loop will likely take 20-30 minutes to run because of Twitter's rate limit
for tweet_id in tweet_ids:
count += 1
print(str(count) + ": " + str(tweet_id))
try:
tweet = api.get_status(tweet_id, tweet_mode='extended')
print("Success")
json.dump(tweet._json, outfile)
outfile.write('\n')
except tweepy.TweepError as e:
print("Fail")
fails_dict[tweet_id] = e
pass
print(fails_dict)
# In[4]:
import json
df_list=[]
# In[5]:
with open('tweet_json.txt') as f:
for line in f:
data=json.loads(line)
tweet_id=data["id_str"]
retweet_count = data['retweet_count']
favorite_count = data['favorite_count']
df_list.append({'tweet_id' :tweet_id ,'retweet_count': retweet_count,'favorite_count': favorite_count})
# In[6]:
df_3=pd.DataFrame(df_list)
# ## Assessing Data
# In[10]:
df_1.sample(5)
# In[11]:
df_1.describe()
# In[12]:
df_1.info()
# In[22]:
df_1[df_1.duplicated()]
# In[24]:
df_1.rating_numerator.value_counts()
# In[26]:
df_1.rating_denominator.value_counts()
# In[16]:
df_2.sample(5)
# In[13]:
df_2.describe()
# In[14]:
df_2.info()
# In[23]:
df_2[df_2.duplicated()]
# In[12]:
df_3.sample(5)
# In[13]:
df_3.describe()
# In[20]:
df_3.info()
# In[91]:
df_1_clean.iloc[516].expanded_urls
# In[94]:
df_1_clean.iloc[342].text
# In[119]:
df_1_clean[~df_1_clean.expanded_urls.str.contains('(?:http[s]?://twitter.com/|https://vine.co)',na=False)]
# In[114]:
df_1_clean.iloc[406].text
# In[124]:
df_1_clean[df_1_clean['expanded_urls'].isnull()]
# In[130]:
df_1_urls=df_1_clean[~df_1_clean.expanded_urls.str.contains('(?:http[s]?://twitter.com/|https://vine.co)',na=False)]
# In[136]:
df_1_urls.drop(df_1_urls[df_1_urls['expanded_urls'].isnull()].index,axis=0)
# ### Quality
# 1. Column timestamp of df_1 is not datetime type.
# 2. The Source column of df_1 is extracted directly from html and contains extra html tags.
# 3. Tweet_id column of df_1, df_2 is integer type.
# 4. Rating denominator is extracted from text and contains some wrong datas.
# 5. Rating numerator is extracted from text and contains some wrong datas.
# 6. There are some row has URLs is not from vine, twitter either None showing those tweets are sharing news and are not rating dogs.
# 7. After creating column predict_breed providing final predicted breed algorithm of dog of df_3 dataframe, the text is contains ‘_’ and somes are in lowercase, somes are in title.
# 8. Df_1 contains retweets.
#
# ### Tidiness
# 1. doggo, floofer, pupper, puppo columns in df_1 should be combined into a single column as this is one variable that identify stage of dog.
# 2. Information about one type of observational unit (tweets) is spread across three different files/dataframes. So these three dataframes should be merged as they are part of the same observational unit.
# ## Cleaning Data
# Make copies of datasets
# In[8]:
df_1_clean=df_1.copy()
df_2_clean=df_2.copy()
df_3_clean=df_3.copy()
# ### Quality
# 1. Column timestamp of df_1 is not datetime type.
# In[9]:
df_1_clean.timestamp = pd.to_datetime(df_1_clean.timestamp)
# Test
# In[10]:
df_1_clean.head()
# 2. The Source column of df_1 is extracted directly from html and contains extra html tags.
# Use re.split to get text source from tags
#
# In[11]:
import re
df_1_clean['source']= df_1_clean['source'].apply(lambda x: re.split('[><]',x)[-3])
# test
# In[10]:
df_1_clean['source'].value_counts()
# 3. Tweet_id column of df_1, df_2 is integer type.
# In[12]:
df_1_clean.tweet_id = df_1.tweet_id.astype(str)
df_2_clean.tweet_id = df_2.tweet_id.astype(str)
# 4. Rating denominator is extracted from text and contains some wrong datas.
# In[22]:
df_1_clean['rating']=df_1_clean['text'].str.extract('(((?:\d+\.)?)\d+/10)',expand = True)
#this code is extracting optional decimal numerator with denominator is 10.
# In[23]:
rating.columns = ['rating_numerator', 'rating_denominator']
df_1_clean['rating_numerator'] = rating['rating_numerator'].astype(float)
df_1_clean['rating_denominator'] = rating['rating_denominator'].astype(float)
#split numerator and denominator from rating by '/'
# In[21]:
df_1_clean.info()
#check for column and type
# In[24]:
df_1_clean.fix_numerator=pd.to_numeric(df_1_clean.fix_numerator, errors='coerce',downcast='signed')
df_1_clean.fix_denominator=pd.to_numeric(df_1_clean.fix_denominator, errors='coerce',downcast='signed')
#Change fix_numerator and fix_denominator extracted to integer type for comparing with raw rating_numerator
#and rating_denominator
# In[25]:
df_1_clean[df_1_clean['rating_denominator'] != df_1_clean['fix_denominator']]
#Find denominators which are not 10
# In[33]:
df_1_clean.iloc[45].text
# In[26]:
df_1_clean.iloc[45].fix_numerator
# I would like to change rating_denominator of rows to 10 as fix_denominator column
# In[27]:
df_1_clean.loc[[313,784,1068,1165,1202,1662,2335],'rating_denominator']=10
# Test
# In[28]:
df_1_clean[df_1_clean['rating_denominator'] != df_1_clean['fix_denominator']]
# 5. Rating numerator is extracted from text and contains some wrong datas.
# In[29]:
df_1_clean[df_1_clean['rating_numerator'] != df_1_clean['fix_numerator']]
# Check for raw rating_numerator differing from fix_numerator extracted
# Change all non_null differ rating_numerator equal to fix_numerator
# In[30]:
df_1_clean.loc[45,'rating_numerator']=13.5
# In[31]:
df_1_clean.loc[45]['rating_numerator']
# Check this decimal value
# In[32]:
df_1_clean.loc[340,'rating_numerator']=9.75
# In[33]:
df_1_clean.loc[313,'rating_numerator']=13
# In[34]:
df_1_clean.loc[695,'rating_numerator']=9.75
# In[35]:
df_1_clean.loc[763,'rating_numerator']=11.27
# In[36]:
df_1_clean.loc[784,'rating_numerator']=14
# In[37]:
df_1_clean.loc[1068,'rating_numerator']=14
# In[38]:
df_1_clean.loc[1165,'rating_numerator']=13
# In[39]:
df_1_clean.loc[1202,'rating_numerator']=11
# In[40]:
df_1_clean.loc[1662,'rating_numerator']=10
# In[41]:
df_1_clean.loc[1689,'rating_numerator']=9.50
# In[42]:
df_1_clean.loc[1712,'rating_numerator']=11.26
# In[43]:
df_1_clean.loc[2335,'rating_numerator']=9
# Test remain
# In[44]:
df_1_clean[df_1_clean['rating_numerator'] != df_1_clean['fix_numerator']]
# 6. There are some row has URLs is not from vine, twitter either None showing those tweets are sharing news and are not rating dogs.
# As we found in access data part, we will drop these row
# In[45]:
df_1_clean.drop([335,444,754,885],inplace=True)
# We will create a function for make a predict_breed to take the breed of dog with highest confident in 3 algorithm
# In[46]:
# Function for finding the biggest value of 3 values
def bigger(a,b):
if a>=b:
return a
else:
return b
def biggest(a ,b,c):
return bigger(bigger(a,b),c)
# In[47]:
# Function for creating the predict_breed
def breed(row):
if row.p1_dog == True or row.p2_dog == True or row.p3_dog == True:
# This is to check one of prediction is at least a dog type
if row.p1_dog == True:
# If first prediction is dog
a=row.p1_conf
else:
a=0
if row.p2_dog == True:
# If second prediction is dog
b= row.p2_conf
else:
b= 0
if row.p3_dog == True:
# If third prediction is dog
c = row.p3_conf
else:
c = 0
d =biggest(a,b,c)
# Find the biggest confident of 3 and take the value as it
if row.p1_conf == d:
return row.p1
elif row.p2_conf == d:
return row.p2
elif row.p3_dog == True:
return row.p3
else:
return None
else:
# All prediction are not dog, we take none
return None
# In[48]:
df_2_clean.loc[:, 'predict_breed'] = df_2_clean.apply(breed, axis = 1)
# Aplly function
# Check table
# In[49]:
df_2_clean.head()
# 7. After creating column predict_breed providing final predicted breed algorithm of dog of df_3 dataframe, the text is contains ‘_’ and somes are in lowercase, somes are in title.
# In[50]:
df_2_clean['predict_breed']=df_2_clean['predict_breed'].apply(lambda x: x.replace('_',' ') if x else None)
# Apply replace() to replace _ to space in column predict_breed
# In[51]:
df_2_clean['predict_breed']=df_2_clean['predict_breed'].str.title()
# predict_breed contain lowercase and title text at same time. Change them all to title
# Check
# In[52]:
df_2_clean.head()
# 8. Df_1 contains retweets.
# We have to exclude the rows which don't have None value in retweeted_status_id,
# retweeted_status_user_id, retweeted_status_timestamp column
#
# In[53]:
df_1_clean['retweeted_status_id']=df_1_clean['retweeted_status_id'].astype(str)
# Change retweet_status_id column to string type to get none value
df_1_clean=df_1_clean[df_1_clean['retweeted_status_id'] == 'nan']
# Check
# In[54]:
df_1_clean.info()
# ### Tidiness
# 1. doggo, floofer, pupper, puppo columns in df_1 should be combined into a single column as this is one variable that identify stage of dog.
# In[55]:
#Creat a function alike .melt to make a new column
#that gives the stage of dog with prioty in order doggo, puppo, pupper, floofer
def stage_dog(row):
if row.doggo !='None':
return row.doggo
elif row.puppo !='None':
return row.puppo
elif row.pupper !='None':
return row.pupper
elif row.floofer !='None':
return row.floofer
else:
return 'None'
# In[56]:
df_1_clean.loc[:,'stage_of_dog']=df_1_clean.apply(stage_dog,axis=1)
# Apply the function
# Check
# In[60]:
df_1_clean.sample(5)
# Test
# In[102]:
df_1_clean.stage_of_dog.value_counts()
# Count value of stage_of_dog column
# Check for each column
# In[103]:
len(df_1_clean[df_1_clean.puppo != 'None'])
# In[107]:
df_1_clean[df_1_clean.puppo != 'None']
# The value is 25 cause as piority, we consider this dog grew up and become doggo
# In[104]:
len(df_1_clean[df_1_clean.doggo != 'None'])
# In[105]:
len(df_1_clean[df_1_clean.pupper != 'None'])
# The value is 233 cause as piority, we consider these dogs grew up and become doggo
# In[106]:
len(df_1_clean[df_1_clean.floofer != 'None'])
# 2. Information about one type of observational unit (tweets) is spread across three different files/dataframes. So these three dataframes should be merged as they are part of the same observational unit.
# In[66]:
df_final = pd.merge(df_1_clean,df_2_clean, on ='tweet_id', how = 'left')
# Merge df_1 and df_2 by tweet_id and left side to df_1
# In[67]:
df_final=pd.merge(df_final,df_3_clean, on= 'tweet_id',how='left')
# Merge with the df_3
# In[68]:
df_final=df_final.drop(['retweeted_status_id','retweeted_status_user_id','retweeted_status_timestamp',
'fix_numerator','fix_denominator','p1','p1_conf','p1_dog',
'p2','p2_conf','p2_dog','p3','p3_conf','p3_dog','rating',
'doggo','floofer','pupper','puppo'],axis=1)
# Drop some uneccessary columns
# Check
# In[69]:
df_final.head()
# Creat a column providing the final rating in decimal
# In[70]:
df_final['rating']= df_final.rating_numerator/df_final.rating_denominator
#Divide rating_numerator to rating_denominator
# Check
# In[71]:
df_final.info()
# Missing Value in favourite_count and retweet_count columns are due to deleted tweet posts. We will delete those rows.
# In[72]:
df_final=df_final[~df_final['favorite_count'].isnull()]
# Delete null value rows
# We will change type of columns _ in reply to status id_, _in reply to user id_ to string type and _favorite count_, _retweet count_, _img num_ to integer type
# In[73]:
df_final.in_reply_to_status_id = df_final.in_reply_to_status_id.astype('str')
df_final.in_reply_to_user_id = df_final.in_reply_to_user_id.astype('str')
df_final.favorite_count = df_final.favorite_count.astype('int')
df_final.retweet_count = df_final.retweet_count.astype('int')
# In[74]:
df_final.img_num = df_final.img_num.fillna(0).astype('int')
# We can not change None value to integer, so we fill None with 0 and change to integer
# Check
# In[75]:
df_final.info()
# In[76]:
df_final.rating.value_counts()
#Check for outliners value in rating column
# The values of 0.0, 42.0, 18.2, 66.6, 1.7, 3.428571, 177.6 are outliners.
# In[79]:
df_final=df_final[~df_final['rating'].isin([42.000000,1.700000,18.200000,3.428571,177.600000,66.600000,0.000000])]
#Delete these value rows
# Check
# In[80]:
df_final.rating.value_counts()
# The value 3.428571 is still due to we can not have exact decimal value
# In[82]:
24/7
# As we checked the different rating_denominator with fix_denominator above,
# we find the value 24/7 (It must be 24 hours and 7 days) is 3.428751
#with tweet_id is 810984652412424192
# In[85]:
df_final=df_final[df_final['tweet_id'] != '810984652412424192']
#Delete this tweet
# Check
# In[86]:
df_final.rating.value_counts()
# In[87]:
df_final.info()
# In[88]:
df_final.to_csv('twitter_archive_master.csv', index=False)
# Save clean dataframe to file csv
# ## Visualizing Data
# In[1]:
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# In[3]:
df=pd.read_csv('twitter_archive_master.csv')
# In[91]:
df.head()
# In[92]:
df.rating.mean()
# ### Insight 1
# What is the histogram of rating?
# In[4]:
df.rating.plot(kind='hist')
plt.xlabel('Rating')
plt.title('We Rate Dog rating histogram');
#Get histogram of rating
# It is a left-skew distribution
# ### Insight 2
# Is golden retriever has better rating than average?
# In[94]:
df.predict_breed.value_counts()
# Find the most breed dog
# In[95]:
df[df.predict_breed == 'Golden Retriever']['rating'].mean()
# Find the mean of Golden Retriever to make hypothesis
# We are interested in testing if the mean rating of Golden Retriever in dataframe is higher than meaning rating of all by bootstrapping
# $$H_0: \mu_{Golden} - \mu_0 = 0 $$
# $$H_1: \mu_{Golden} - \mu_0 > 0$$
# In[5]:
diff = []
for i in range (10000):
boot = df.sample(len(df), replace = True)
mean = boot['rating'].mean()
mean_gol = boot[boot.predict_breed == 'Golden Retriever']['rating'].mean()
diff.append(mean_gol - mean)
# Bootstrapping 10.000 samples
# In[6]:
means=np.array(diff)
# In[7]:
plt.hist(means)
plt.axvline(x=0, color = 'red')
plt.title('Difference of means confident interval')
plt.xlabel('Difference of rating')
plt.ylabel('Sample');
# make a confident interval
# In[8]:
p_val= (diff > np.array(0)).mean()
p_val
# p-value of alternative hypothesis
# ### Insight 3
# In[100]:
import statsmodels.api as sm
# In[101]:
df['intercept'] =1
lm=sm.OLS(df['favorite_count'],df[['rating','intercept']])
results=lm.fit()
results.summary()
# Make a linear regression of rating and favorite_count
# In[ ]:
# In[ ]:
|
[
"noreply@github.com"
] |
lizzie11212.noreply@github.com
|
dc287432772aac7e03a9e6ea02df746cf8ea6a4a
|
0a0a8eb698b6e063c6ce26f51bc34ac24331cbe3
|
/CVA_function.py
|
0aa37885601483b1f727d9b630568059eb212dd8
|
[] |
no_license
|
mansunosson/CVA_web_app
|
b4a1b402a1c1c759e4d8230504b2ac463e3964d7
|
e056dc89ae19f3e75a03c6df4fa2c4b63af28756
|
refs/heads/main
| 2023-01-22T19:18:30.851932
| 2020-12-08T11:39:23
| 2020-12-08T11:39:23
| 319,596,736
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,241
|
py
|
# Dependencies
#from datetime import date
# Define CVA function
def CVA(company_name, nominal_value, maturity_date, risk_free_rate):
try:
duration_coef = ((maturity_date-date.today()).days)/365 # Duration as proportion of full year
annual_PD = fetch_pd(company_name) # Call to fetch_pd scrapes the key performance indicators used in pd_model from *REDACTED*
duration_PD = 1-(1-annual_PD)**duration_coef # This calculation of duration probability of default prob assumes flat term structure, see https://www.openriskmanual.org/wiki/How_to_Estimate_Lifetime_PD_from_12_month_PD
adj_value = (1-duration_PD)*nominal_value/((1+risk_free_rate)**duration_coef) # This values the credit by calculating the present value and multiplying by the default probability, which assumes Loss Given Defauly = nominal value
return str('Invoice value: ' + str(round(adj_value,1)) + ', annual PD: ' + str(round(annual_PD*100,1)) + '% , duration: '+ str(round(duration_coef*365,0))+ ' days, and duration PD: ' + str(round(duration_PD*100,1)) + '%')
except TypeError:
return str('Could not find parameters to adjust credit valuation, check spelling of company name and/or date')
|
[
"noreply@github.com"
] |
mansunosson.noreply@github.com
|
baf82970afe5564b6c882ea105d694a6a3b5c064
|
b2d3bd39b2de8bcc3b0f05f4800c2fabf83d3c6a
|
/examples/pwr_run/checkpointing/final_3level/random/job73.py
|
15fda2fa56a78dce0fc3e71e9ffec70118c56596
|
[
"MIT"
] |
permissive
|
boringlee24/keras_old
|
3bf7e3ef455dd4262e41248f13c04c071039270e
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
refs/heads/master
| 2021-11-21T03:03:13.656700
| 2021-11-11T21:57:54
| 2021-11-11T21:57:54
| 198,494,579
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,616
|
py
|
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 128
args_lr = 0.002
args_model = 'vgg19'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_random/' + job_name + '*'
total_epochs = 67
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_random/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
|
[
"baolin.li1994@gmail.com"
] |
baolin.li1994@gmail.com
|
40408802451c935b6bf42ec1a44edf1dc6e414f1
|
27f8afdbf68908bb48de31a459ee2b5b81ea0837
|
/friday_13th/friday_thirteenth.py
|
ee4b8af2f3cb199af327d091ddd84ab3cceb508e
|
[] |
no_license
|
olhanotolga/python-challenges
|
40e7bd6ed5fc5e891ff50d2b0df1af67f25ccea5
|
62814a3513b6692cad031440f18d901af22f5580
|
refs/heads/main
| 2023-04-05T03:54:28.907678
| 2021-04-05T12:38:40
| 2021-04-05T12:38:40
| 333,558,101
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,274
|
py
|
__doc__
import datetime
def valid_date(month, year):
"""
Helper function which validates inputs
"""
if not isinstance(month, int) or not isinstance(year, int):
return False
if month < 1 or month > 12:
return False
return True
def has_friday_13(month, year):
"""
Function which takes 2 arguments as input:
1 — integer between 1 and 12 (month)
2 — 4-digit integer (year)
The function checks whether the given month within the given year contains Friday, the 13th.
Output value: boolean.
"""
if not valid_date(month, year):
return "Enter valid month and year!"
day = 13
# use datetime module to construct a date object with the given arguments
date_obj = datetime.date(year, month, day)
# use the .isoweekday() method to check the day of the week
weekday = date_obj.isoweekday()
# return True if it is Friday, otherwise return False
if weekday == 5:
return True
else:
return False
def main():
answer1 = has_friday_13(3, 2020) # True
answer2 = has_friday_13(10, 2017) # True
answer3 = has_friday_13(1, 1985) # False
print(answer1)
print(answer2)
print(answer3)
if __name__ == "__main__":
main()
|
[
"olha.halat@gmail.com"
] |
olha.halat@gmail.com
|
7d3186b3cdd57512a8c1a1678e6097c914ddae71
|
dec7befffa1bb509f65c6fb96441b1f68d088cae
|
/nanosv/utils/parse_bam.py
|
c39df14181f7537b99432724da898db24f5c722b
|
[
"MIT"
] |
permissive
|
tongww0312/nanosv
|
0f8d972b1804c270714bff1f1625e250ae8a35a5
|
5f44e31759dc3b88f0ac7918f5e4614f4975e1b5
|
refs/heads/master
| 2021-04-09T15:50:12.676276
| 2018-03-02T12:26:21
| 2018-03-02T12:26:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,454
|
py
|
#!/usr/bin/python
import pysam
import re
import sys
import time
import os
from classes import read as r
from classes import segment as s
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
import NanoSV
coverages = []
reads = {}
segments = {}
segmentID = 1
def parse_bam():
"""
Reads bam file and saves reads and their segments in objects of the Read en Segment classes.
:param bamfile used to open bam file:
"""
global sample_name, header, segmentID, bam
sys.stderr.write(time.strftime("%c") + " Busy with parsing bam file...\n")
bam = pysam.AlignmentFile(NanoSV.opts_bam, 'rb')
header = bam.header
if 'HD' in header:
if not header['HD']['SO'] == 'coordinate':
sys.exit('The bam file is not coordinate sorted')
if 'RG' in header:
sample_name = header['RG']['SM']
else:
sample_name = re.sub('(\.sorted)?\.bam$', '', str(NanoSV.opts_bam))
for line in bam:
if line.query_name in reads:
read = reads[line.query_name]
else:
read = r.Read(line.query_name, line.infer_read_length())
reads[line.query_name] = read
if line.flag & 4 or line.mapping_quality < NanoSV.opts_min_mapq:
continue
segment = s.Segment(segmentID, line.query_name, line.flag, line.reference_name, line.reference_start+1, line.mapping_quality,
line.query_alignment_length)
segment.end = line.reference_start + line.reference_length
segment.pid = format(line.get_cigar_stats()[0][7] / segment.length, '.3f')
if line.flag & 16:
if line.cigartuples[-1][0] == 5:
segment.clip = line.cigartuples[-1][1]
else:
segment.clip = 0
if line.cigartuples[0][0] == 5:
segment.clip_2 = line.cigartuples[0][1]
else:
segment.clip_2 = 0
else:
if line.cigartuples[0][0] == 5:
segment.clip = line.cigartuples[0][1]
else:
segment.clip = 0
if line.cigartuples[-1][0] == 5:
segment.clip_2 = line.cigartuples[-1][1]
else:
segment.clip_2 = 0
if float(segment.pid) < NanoSV.opts_min_pid:
continue
read.addSegment(segment)
segments[segmentID] = segment
segmentID += 1
|
[
"m.vanroosmalen-2@umcutrecht.nl"
] |
m.vanroosmalen-2@umcutrecht.nl
|
03ce218a36b3b4dc1f448ee34e9752020300acea
|
0ab9786fc197cd161a986ee6e7a9f04ab166f994
|
/info5.py
|
b7e5dda9c3a99dd19abd7012ea4cec6575734f17
|
[] |
no_license
|
sedmedia/lesson1
|
89ae59b734b07877e8929248780ce66b65979fb9
|
8963db7c72668069df807999d6b80146b1cb3ec7
|
refs/heads/master
| 2021-01-22T01:10:35.505963
| 2017-09-02T15:43:15
| 2017-09-02T15:43:15
| 102,200,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 91
|
py
|
def get_summ(one, two):
return one + str(two)
answer = get_summ("one",2)
print(answer)
|
[
"sedmedia@gmail.com"
] |
sedmedia@gmail.com
|
2c5d778420e5e739ed6eb09d930c5645cf176299
|
1d1cb14e6d0b2a439348d6677eb9e8e72390d39f
|
/06_Image_Classifier/Video/Image_Classifier.py
|
9176408f26c0944ba186ee81e262ecb7e1a71da6
|
[] |
no_license
|
EduardoFAFernandes/MyDeepLearningIntro
|
2c7b2278ed1cf446c9f3656ae9dd421c22648933
|
a35a43f0690ddfa499097335d9b8aa058d1db021
|
refs/heads/master
| 2021-09-05T06:51:33.479757
| 2018-01-25T01:07:54
| 2018-01-25T01:07:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,188
|
py
|
# coding: utf-8
# # Cat or Dog Image Classifier
# ___
# ## Imports
# Nothing fancy here most of the imports are standard and frequent in ML
import h5py #handeling the dataset
import matplotlib.pyplot as plt #viewing nparrays as images
import numpy as np
import os #handeling files and folders
import random
import requests
import string
import tensorflow as tf
import tflearn as tfl
#Layers used in the model
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
#used to import images as nparrays
from PIL import Image
# ___
# ## Some constants
# This is to improve modularity and code redability
# In[2]:
MODEL_NAME = "tflearn_AlexNet"
#Folder Constants
#where data is stored
DATA_PATH = "/media/mrlipdx/F88A16BB8A1675FA/Users/MrLipdx/ml"
#where the images from the dataset are
IMAGE_FOLDER = os.path.join(DATA_PATH, "train")
#where to save the HDF5 files
HDF5_FOLDER = os.path.join(DATA_PATH, "hdf5")
#where to save the trainde modle
MODELS_FOLDER = os.path.join(DATA_PATH, "model")
#where the current modle is located
MODEL_FOLDER = os.path.join(MODELS_FOLDER, MODEL_NAME)
#File Path Constants
#Where to load or save the modle to
MODEL_FILE = os.path.join(MODEL_FOLDER, MODEL_NAME)
#text file describing the train dataset
HDF5_TRAIN_INPUT = os.path.join(HDF5_FOLDER, "cats_and_dogs_train.txt")
#train hdf5 file
HDF5_TRAIN = os.path.join(HDF5_FOLDER, "cats_and_dogs_train.hdf5")
#text file describing the test dataset
HDF5_TEST_INPUT = os.path.join(HDF5_FOLDER, "cats_and_dogs_test.txt")
#test hdf5 file
HDF5_TEST = os.path.join(HDF5_FOLDER, "cats_and_dogs_test.hdf5")
#The ids given to cats and dogs
CLASS_IDS = { "c" : 0, "d" : 1 }
#size of the images in the dataset
IMAGE_SHAPE = (296,299)
#total number of images test + train
TOTAL_IMAGES = len(os.listdir(IMAGE_FOLDER))
#how much test percentage do we want from the total images
TEST_PERCENTAGE = 0.2
TRAIN_PERCENTAGE = 1 - TEST_PERCENTAGE
TEST_SIZE = int(TOTAL_IMAGES * TEST_PERCENTAGE)
TRAIN_SIZE = TOTAL_IMAGES - TEST_SIZE
#True if you want to train a new network
#False if you want to load a previously trainded network
TRAIN = False
#make shure the notebook is consistent after multiple runs
np.random.seed(42)
# ___
# ## Building the model
def buildModel():
network = input_data(shape=[None, IMAGE_SHAPE[1], IMAGE_SHAPE[0], 3])
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=0.001)
model = tfl.DNN(network, checkpoint_path = MODEL_FILE,
max_checkpoints = 1, tensorboard_verbose = 0)
return model
# ___
# ## Data
# I'm using a hdf5 database as input on the model. The data is manualy partitioned to a test set and a traning set. The creation of the databases takes a bit of time and at the end uses 26,6 GB so we will save it to save time in the future.
if __name__ == '__main__':
#formats the filenames to the requierd form, "<filepath> <class>\n"
def filenames_to_input(filenames, directory , class_ids):
return "".join(['{} {}\n'.format(
os.path.join(directory, filename), class_ids[filename[0]])
for filename in filenames])
if not os.path.exists(HDF5_FOLDER):
os.makedirs(HDF5_FOLDER)
if not os.path.isfile(HDF5_TEST) or not os.path.isfile(HDF5_TRAIN):
print("Missing one or both datasets.")
print("Creating datasets...")
images = np.array(os.listdir(IMAGE_FOLDER))
#spliting the images to a train and a test sets
np.random.shuffle(images)
test_images = images[:TEST_SIZE]
train_images = images[TEST_SIZE:]
if not os.path.isfile(HDF5_TEST):
print("\tCreating test HDF5 dataset...")
with open(HDF5_TEST_INPUT, "w") as test_input_file:
test_input_file.write(filenames_to_input(test_images, IMAGE_FOLDER, CLASS_IDS))
tfl.data_utils.build_hdf5_image_dataset(HDF5_TEST_INPUT,
image_shape = IMAGE_SHAPE,
output_path = HDF5_TEST,
categorical_labels = True)
print("\tDone.\n")
if not os.path.isfile(HDF5_TRAIN):
print("\tCreating train HDF5 dataset...")
with open(HDF5_TRAIN_INPUT, "w") as train_input_file:
train_input_file.write(filenames_to_input(train_images, IMAGE_FOLDER, CLASS_IDS))
tfl.data_utils.build_hdf5_image_dataset(HDF5_TRAIN_INPUT,
image_shape = IMAGE_SHAPE,
output_path = HDF5_TRAIN,
categorical_labels = True)
print("\tDone.")
print("Done.")
else:
print("Both datasets present.")
test_dataset = h5py.File(HDF5_TEST, 'r')
x_test = test_dataset['X']
y_test = test_dataset['Y']
train_dataset = h5py.File(HDF5_TRAIN, 'r')
x_train = train_dataset['X']
y_train = train_dataset['Y']
#building the model
model = buildModel()
# ___
# ## Traning
# Traning the model takes a bit of time in my case i used my Nvidia GTX 1060 and it still took me 2 H or so. I'm not sure why but it kept crashing because me SO was running out of memory and killing the process. If you have anny idea as to why this is happening please let me know.
#when traning craches i use the following lines to resume from a chekpoint
#in this case the traning crashed after step 800
#CHEKPOINT = "-800"
#model.load(MODEL_FILE + CHEKPOINT, weights_only = True)
#print("Model Loaded, evaluating ...")
#print(model.evaluate(x_test, y_test))
model.fit(x_train, y_train, n_epoch = 20, validation_set = (x_test, y_test),
snapshot_step = 200, show_metric = True, run_id = MODEL_NAME)
model.save(MODEL_FILE)
train_dataset.close()
print("The traning was sucessful!")
|
[
"nomeaocalhas3@gmail.com"
] |
nomeaocalhas3@gmail.com
|
9794d76691a1cb5402bfa2d2884e31b6118fb2b8
|
ae9bb7babce2a0349ae932985cf418a03057c670
|
/backend/testcase/test_login.py
|
49c1d1e273d9d9434ffac08bc1ebc80f4ad95454
|
[] |
no_license
|
Veraun/HogwartsSDET17-1
|
d2592fcb4c9c63724c19bcf9edde349ebcd2c8af
|
6648dbfb640b065ff2c76cb6889a8f9e4f124b91
|
refs/heads/main
| 2023-07-02T05:20:32.161248
| 2021-08-06T03:55:13
| 2021-08-06T03:55:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
'''
#!/usr/bin/python3
# -*- coding: utf-8 -*-
@author: wangwei
@project: business-test-pytest
@file: test_login.py
@time: 2021/6/7 15:43
@Email: Warron.Wang
'''
import requests
class TestLogin:
BASE_URL = "http://localhost:5000"
def test_login(self):
# 其中 auth 标识要输入的校验信息,比如 账号和密码
r = requests.get(self.BASE_URL + '/login', auth=("warron",123456))
assert "access_token" in r.json()
r = requests.get(self.BASE_URL + '/login', auth=("warron", 12345678))
assert r.status_code == 401
|
[
"wei1.wang@ximalaya.com"
] |
wei1.wang@ximalaya.com
|
301191562e62a40bdfe255519152332097c6c335
|
ac07052d8da7eb0c0a8189dbdda7dfaeab80de46
|
/craiglist/settings.py
|
379f8350b354b4c854e15306b6877e9b5b3710ca
|
[] |
no_license
|
rashidul738/Craigslist-Data-Scrape
|
1285b47208940cc640f10d5f77c21b07c47e7528
|
440d9a8075249f606dd03a86c7e98da346780fde
|
refs/heads/master
| 2023-09-01T08:47:25.908986
| 2021-10-13T13:05:50
| 2021-10-13T13:05:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,091
|
py
|
# Scrapy settings for craiglist project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'craiglist'
SPIDER_MODULES = ['craiglist.spiders']
NEWSPIDER_MODULE = 'craiglist.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'craiglist (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'craiglist.middlewares.CraiglistSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'craiglist.middlewares.CraiglistDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'craiglist.pipelines.CraiglistPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"rashidul7713@gmail.com"
] |
rashidul7713@gmail.com
|
97db48ad78c8edb9bbcdb9c479ce704e9db0ee90
|
663676e406641b1f4ecbc09e214bb41cab47198b
|
/day16/day16a.py
|
a89564047f7a3e82e94184fb1d9abfa49e190321
|
[] |
no_license
|
yuanxu1990/studyday
|
362055c18a78424cedad73e28858620a66472db2
|
aad0b2b36d06b50b476fcc0972e1ba64747b5b61
|
refs/heads/master
| 2020-09-14T17:19:25.352463
| 2020-04-28T13:34:49
| 2020-04-28T13:34:49
| 223,197,856
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,950
|
py
|
'''
内置函数
'''
# s='sdfsdfsd'
# # lista=[1,2,3,4,5]
# # l2=reversed(lista)
# # sli=slice(1,4,2)
# # print(s[sli])
# # print(lista)
# for i in l2:
# print(i)
#format() 调整输出
# print(format('test','<20'))
# print(format('test','>20'))
# print(format('test','^20'))
# bytearray 修改字节编码
# s1=bytearray('你好',encoding='utf-8')
# print(s1)
# s1[0]=128
# print(s1)
# memoryview 将切片返回成字节 不占内存空间,但是转成字符串依然占内存空间
# str01='sdfsdfvcc'
# str02=str01[:3]
# l2=memoryview(bytes(str02,encoding='utf-8'))
# print(list(l2))
#转成
# ord 字符按照unicode转数字(可以理解成字符在unicode中的位置)
# chr 数字按照unicode转成对应字符
# ascii 入参只要实在ascii中就打印出来,不是就转换成'\uxxxx'
# print(ord('a'))
# print(ord('1'))
# print(ord('袁'))
#
# print(chr(34945))
#
# print(ascii('袁'))
# print(ascii('A'))
#all(iterable) any(interable)
#all可迭代对象中有一个是false就是返回false
#any可迭代对象中有一个是True就是返回True
# print(all(['a','',123]))
# print(all(['a',123]))
# print(all([1,123]))
# print(all([0,123]))
#
# print(any(['',0,1]))
# print(any({'i'}))
#zip拉链 传入对象元素需要相等 如果不等则能匹配几个就匹配几个
# l=[12,14,56,55]
# l2=['a','b','c']
# l3={'k1':1,'k2':55}
# for i in zip(l,l2,l3):
# print(i)
#filter 执行了filter之后,结果结合小于执行执行之前的个数
#filter只管筛选,并不会改变原来的值
# def is_odd(x):
# return x%2==1
# ret=filter(is_odd,[1,2,4,5,67,7])
# print(ret)
# print(list(ret))
# rets=[i for i in [1,2,4,5,67,7] if i%2==1]
# print(rets)
#
#
# def is_str(x):
# return type(x)==str
#
# ret01=filter(is_str,['nihao',12,'xu','yuanniu'])
# for j in ret01:
# print(j)
#利用filter()过滤出1-100的平方根是整数的数
#[1,4,5,16,25,36,49,64,81,100]
#
# import math
# def sqrt_01(x):
# # x1=math.sqrt(x)%1
# # if x1==0:
# # print(x1)
# # return x
# print(math.sqrt(x))
# return math.sqrt(x)%1==0
#
# # s=sqrt_01(100)
# # print(s)
# rs01=filter(sqrt_01,range(1,101))
# print(list(rs01))
#map 函数 执行前后 元素个数不变 值可能发生变化
# ret04=map(abs,[1,-9,8,-90])
# print(ret04)
# # for a in ret04:
# # print(a)
#
# ret05=[abs(a) for a in [1,-9,8,-90]]
# print(ret05)
# print(list(ret04))
#sort和sorted区别
# sort实在元列表的基础上进行排序 不占内存
# sorted是直接重新生成一份列表 不改变原列表,占内存
# listb=[1,2,-6,-8,]
# # # listb.sort(key=abs)
# # # print(listb)
# # # listb1=[1,2,-6,-8,10]
# # # print(sorted(listb1,reverse=True,key=abs))
#列表按照每个一个元素的len排序
listc=['sdfsdf','524dsf','sdf55666666','abc']
# ret=sorted(listc,key=len)
# print(ret)
def sort_01(x):
ret=sorted(x,key=len)
return ret
print(sort_01(listc))
|
[
"yuanx"
] |
yuanx
|
42218897557be8d5f9ca50928b0b81e75865bbe6
|
3b0d3d1d7abd5f5e0c1c3b23e32c49d2d1e74353
|
/Company/migrations/0005_auto_20190312_1852.py
|
254529524c189175cad6254b436d1dd7600a39cd
|
[] |
no_license
|
deathslayer0311/InvoiceSense
|
0e950f8e64cdc5432e6c73f3810e7bb99f15a138
|
1bfe291412d49c5615f12c6251d2adb3b5c6383e
|
refs/heads/master
| 2022-10-11T05:55:23.343569
| 2019-03-13T01:54:59
| 2019-03-13T01:54:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
# Generated by Django 2.0 on 2019-03-12 22:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Company', '0004_package_items'),
]
operations = [
migrations.AlterModelOptions(
name='company',
options={'verbose_name_plural': 'companies'},
),
]
|
[
"41447166+jdriscoll98@users.noreply.github.com"
] |
41447166+jdriscoll98@users.noreply.github.com
|
be38f6d66128674ad82fccfda3baf95238509e89
|
0faa64fc66d392e2fc54bd9ad2d81587d507c0f6
|
/TotalUsers.py
|
95ebc488690e5f5c57701a25ef7a0157da27dbd8
|
[] |
no_license
|
rohandash06/TwitterTextCapture
|
a0b9eb1bf322ae48b66c028a00a2be2fb1edb713
|
90ce972c39e18a8b0e9881245a575f47611db910
|
refs/heads/master
| 2020-03-30T08:28:06.718221
| 2018-10-05T15:10:02
| 2018-10-05T15:10:02
| 151,017,586
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,049
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 30 23:39:56 2018
@author: rdas3
"""
import operator
def total_Users():
INPUT_FILE = input("Enter the path of the input file located: ")
INPUT_FILE_PATH = INPUT_FILE+ '.txt'
x = input("Enter a number: ")
n = int(x)
OUTFILE = r'C:\Users\rdas3\Desktop\Docs'
OUTPUT_FILE_PATH = OUTFILE + '.txt'
with open (INPUT_FILE_PATH, encoding = "latin-1") as my_File:
tweet=my_File.readlines()
L = {}
for dat in tweet:
fileTemp1 = dat.split()
if fileTemp1[0] in L:
L[fileTemp1[0]] +=1
else:
L[fileTemp1[0]] = 1
L = sorted(L.items(), key = operator.itemgetter(1), reverse = True)
output_File = open(OUTPUT_FILE_PATH, 'w', encoding = 'utf-8')
output_File.write("The top"+ x +"users who have tweeted the most in the timeframe: \n")
for i in range (0,n):
output_File.write("User Name " + L[i][0] + "\n\n")
output_File.close
total_Users()
|
[
"noreply@github.com"
] |
rohandash06.noreply@github.com
|
b378f013bdb52b624af11a7eeae8b5ef0e192c68
|
eca06c429f9157decd9521114290ea4a05924032
|
/api/app/models/base.py
|
9268b50f06dba89235a4db6802cc03de1c452423
|
[] |
no_license
|
papamuziko/sample_airbnb_clone
|
768760bb7376ae6b57b2559a1aba6a45a53f41c6
|
2bb54a15cf6d66a2ebed69825819755a296e4d08
|
refs/heads/master
| 2021-01-20T22:29:30.302882
| 2018-02-12T02:46:36
| 2018-02-12T02:46:36
| 64,175,956
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 971
|
py
|
import peewee
import datetime
from config import *
# connector to MySQL
database = peewee.MySQLDatabase(host=DATABASE['host'],
user=DATABASE['user'],
password=DATABASE['password'],
database=DATABASE['database'],
port=DATABASE['port'],
charset=DATABASE['charset'])
'''
BaseModel:
- id: primary key
- created_at: datetime when a new resource is created
- updated_at: datetime when a resource is updated (via overloading save method)
'''
class BaseModel(peewee.Model):
id = peewee.PrimaryKeyField(unique=True)
created_at = peewee.DateTimeField(default=datetime.datetime.now, formats="%Y/%m/%d %H:%M:%S")
updated_at = peewee.DateTimeField(default=datetime.datetime.now, formats="%Y/%m/%d %H:%M:%S")
def save(self, *args, **kwargs):
self.updated_at = datetime.datetime.now()
return super(BaseModel, self).save(*args, **kwargs)
class Meta:
database = database
order_by = ('id', )
|
[
"ghinzu.iphone@gmail.com"
] |
ghinzu.iphone@gmail.com
|
c07b0e4293e04dbd0168b293fe296fd694f77e30
|
8a1686aeeefa80afeb0aa9f45ed72a75883458c4
|
/dit/example_dists/nonsignalling_boxes.py
|
68cf2db6d82c4d7134dce2daa7e5534598a91706
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
heleibin/dit
|
70afd57f31be346e48b9b28c67fd6e019132ac36
|
ebd0c11600e559bf34cf12a6b4e451057838e324
|
refs/heads/master
| 2020-09-27T07:42:15.991500
| 2019-11-23T06:10:11
| 2019-11-23T06:10:11
| 226,466,522
| 1
| 0
|
BSD-3-Clause
| 2019-12-07T06:26:50
| 2019-12-07T06:26:50
| null |
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
"""
Canonical non-signalling boxes.
"""
from __future__ import division
from dit import Distribution
from itertools import product
__all__ = ['pr_box']
def pr_box(eta=1, name=False):
"""
The Popescu-Rohrlich box, or PR box, is the canonical non-signalling, non-local probability
distribution used in the study of superquantum correlations. It has two space-like seperated
inputs, X and Y, and two associated outputs, A and B.
`eta` is the noise level of this correlation. For 0 <= eta <= 1/2 the box can be realized
classically. For 1/2 < eta <= 1/sqrt(2) the box can be realized quantum-mechanically.
Parameters
----------
eta : float, 0 <= eta <= 1
The noise level of the box. Defaults to 1.
name : bool
Whether to set rv names or not. Defaults to False.
Returns
-------
pr : Distribution
The PR box distribution.
"""
outcomes = list(product([0, 1], repeat=4))
pmf = [ ((1+eta)/16 if (x*y == a^b) else (1-eta)/16) for x, y, a, b in outcomes ]
pr = Distribution(outcomes, pmf)
if name:
pr.set_rv_names("XYAB")
return pr
|
[
"ryangregoryjames@gmail.com"
] |
ryangregoryjames@gmail.com
|
0b1490c6614bba745d469de9dd4cd5a7aedc45e9
|
fcd9b782b9e0f0973b1ddc45e432091e2e107f39
|
/InstantiationScripts/changeOSBCoherence.py
|
9ec724720596b4a1132d60eb047d36357a5389c4
|
[] |
no_license
|
oktbabs/fmw_repo
|
5c8baa0be8185f3846ebc3cea3a2bf07ff4eb0d3
|
3cff15b4c1e1674c038c8d1c3a6d2c7a68035afb
|
refs/heads/master
| 2020-04-09T23:06:09.725008
| 2018-12-06T14:44:11
| 2018-12-06T14:44:11
| 160,646,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,877
|
py
|
#++++++++++++++++++++++++++++++++++++++++++++++
# Script Name : changeOSBCoherence.py +
# Written By : Timmy Babayeju +
# From Company : Fujitsu +
# To Company : FSA +
# Description : Script retrieves the old +
# Coherence Arguments WKA Addresses for OSB & +
# updates with new Coherence WKA Addreseses +
# and Ports for servers in the Cluster +
# Arguments for the OSB cache +
#++++++++++++++++++++++++++++++++++++++++++++++
import wlstModule as wlst
import sys
import string
import traceback
from java.io import File
from java.io import FileOutputStream
f=File('/tmp/appPostLaunch/logs/changeOSBCoherence.log')
fos=FileOutputStream(f)
theInterpreter.setOut(fos)
print "==>> Reading Domain Information from " + DOMAIN_HOME
readDomain(DOMAIN_HOME)
OSBCACHE=1
while true:
try:
COHOSBServer=eval('OSB_WKA' + str(OSBCACHE) + '_SERVER')
# databaseDS=eval('ds' + str(DataS))
try:
COHOSBSrv=eval(COHOSBServer)
# COHOSBServerListAddr=eval('OSB_WKA' + str(OSBCACHE) + '_SERVERLISTENADDR')
# COHOSBListAddr=eval(COHOSBServerListAddr)
# COHOSBServerListPort=eval('OSB_WKA' + str(OSBCACHE) + '_SERVERLISTENPORT')
# COCACHE1=-DOSB.coherence.localhost=sd1oappdu03-osbhost1-vip.fsa.gov.uk -DOSB.coherence.localport=7890 -DOSB.coherence.wka1=sd1oappdu03-osbhost1-vip.fsa.gov.uk -DOSB.coherence.wka1.port=7890 -DOSB.coherence.wka2=sd1oappdu03-osbhost2-vip.fsa.gov.uk -DOSB.coherence.wka2.port=7890
OSBCACHE1='-Xms:1536m -Xmx:1536m -Xnohup -d64 -DOSB.coherence.localhost=' + OSB_WKA1_SERVERLISTENADDR + ' -DOSB.coherence.localport=' + OSB_WKA1_SERVERLISTENPORT + ' -DOSB.coherence.wka1=' + OSB_WKA1_SERVERLISTENADDR + ' -DOSB.coherence.wka1.port=' + OSB_WKA1_SERVERLISTENPORT + ' -DOSB.coherence.wka2=' + OSB_WKA2_SERVERLISTENADDR + ' -DOSB.coherence.wka2.port=' + OSB_WKA2_SERVERLISTENPORT
OSBCACHE2='-Xms:1536m -Xmx:1536m -Xnohup -d64 -DOSB.coherence.localhost=' + OSB_WKA2_SERVERLISTENADDR + ' -DOSB.coherence.localport=' + OSB_WKA2_SERVERLISTENPORT + ' -DOSB.coherence.wka1=' + OSB_WKA1_SERVERLISTENADDR + ' -DOSB.coherence.wka1.port=' + OSB_WKA1_SERVERLISTENPORT + ' -DOSB.coherence.wka2=' + OSB_WKA2_SERVERLISTENADDR + ' -DOSB.coherence.wka2.port=' + OSB_WKA2_SERVERLISTENPORT
# URL1='jdbc:oracle:thin:@(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=' + dbRACHost1 + ')(PORT=' + dbRACPort + ')))(CONNECT_DATA=(SERVICE_NAME=' + dbRACServiceName + ')(INSTANCE_NAME=' + dbRACInstanceName1+ ')))'
# URL2='jdbc:oracle:thin:@(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=' + dbRACHost2 + ')(PORT=' + dbRACPort + ')))(CONNECT_DATA=(SERVICE_NAME=' + dbRACServiceName + ')(INSTANCE_NAME=' + dbRACInstanceName2+ ')))'
# databaseURL=eval('ds' + str(DataS) + '_URL')
# COHOSBServerListAddr=eval('OSB_WKA' + str(OSBCACHE) + '_SERVERLISTENADDR')
# COHOSBListAddr=eval(COHOSBServerListAddr)
# COHOSBServerListPort=eval('OSB_WKA' + str(OSBCACHE) + '_SERVERLISTENPORT')
# COHOSBSArguments=eval('OSB_WKA' + str(OSBCACHE) + '_ARGS')
# COHOSBSArgs=eval(COHOSBSArguments)
# databURL=eval(databaseURL)
COHOSBSArguments=eval('OSB_WKA' + str(OSBCACHE) + '_ARGS')
COHOSBSArgs=eval(COHOSBSArguments)
except NameError, ner:
print "Required parameter not specified: "
print ner
sys.exit(-1)
print "==> Updating DataSource Info " + str(OSBCACHE) + ": " + COHOSBServer
print " OSB Server : " + COHOSBSrv
print " OSB Listen Address 1 : " + OSB_WKA1_SERVERLISTENADDR
print " OSB Listen Port 1 : " + OSB_WKA1_SERVERLISTENPORT
print " OSB Listen Address 2 : " + OSB_WKA2_SERVERLISTENADDR
print " OSB Listen Port 2 : " + OSB_WKA2_SERVERLISTENPORT
# print "setting attributes for mbean type Server"
# a=cd('/JDBCSystemResources/' + databaseDS + '/JDBCResource/' + databaseDS + '/JDBCDriverParams/' + databaseDS)
# cd('/')
# a=cd('/JDBCSystemResource/' + databaseDS + '/JdbcResource/' + databaseDS + '/JDBCDriverParams/NO_NAME_0')
print "setting attributes for mbean type Server"
a=cd('/Servers/' + COHOSBSrv + '/ServerStart/' + COHOSBSrv)
print " ORIGINAL VALUE : "
print ' '
print a.getArguments()
# cmo.setUrl(dataBURL)
# a.setListenAddress(mServerAddr)
#print a.getdatabaseDS()
#cmo.getUrl()
print 'Setting the new values :'
a.setArguments(COHOSBSArgs)
# a.setPasswordEncrypted(sys.argv[1])
print ' '
print " LATEST VALUE : "
print ' '
print a.getArguments()
OSBCACHE += 1
except NameError:
break
print "==>> Updating the Domain <<=="
updateDomain()
closeDomain()
print "==>> " + DOMAIN_NAME + " successfully updated"
|
[
"oktbabs@gmail.com"
] |
oktbabs@gmail.com
|
2f5c47e2dac73a0aa819503e2513e961295a94c1
|
483e0e80af85a93f787239738b062db63d25902e
|
/exploratory.py
|
39b4a5ea0e9bb2ba2d36d3a680ea83972f94a98a
|
[] |
no_license
|
progdrum/housing_prices
|
2541aa1ed428514d1b947efe9344f76e3b5a577a
|
9ab4740990a2c842151be0dc99ba35cd4800d5cc
|
refs/heads/master
| 2021-01-22T17:47:46.557618
| 2017-07-27T04:45:09
| 2017-07-27T04:45:09
| 98,494,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 789
|
py
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from clean import clean_train, partial_clean_train
# The following code assumes data as cleaned up in clean.py
# Plot a simple distribution of the sale prices of houses in the training set
sns.set_style("darkgrid")
sns.distplot(clean_train['SalePrice'])
# Get a correlation for numerical features and output a heatmap of correlations
corr_frame = clean_train.corr()
sns.heatmap(pd.DataFrame(corr_frame.loc['SalePrice']))
# Output strip plots of categorical variables
for num, col in enumerate(partial_clean_train.columns):
if partial_clean_train[col].dtype is np.dtype('object'):
plt.figure(num)
sns.stripplot(x=col, y='SalePrice', data=partial_clean_train, jitter=True)
|
[
"brookforestscitech@gmail.com"
] |
brookforestscitech@gmail.com
|
d3230028ebcd5ac4d1613c9191dc2913d501d2e1
|
ec124b20240143e97351975ca4c81db5fa49e8a4
|
/users/urls.py
|
b95b3b02d6a3973ae73b64ceba430874de137949
|
[] |
no_license
|
Anildudy/learning_log
|
ed947ddca21093287fe25c9f002ddcd9e997e751
|
56634ec9ea045628f438d8c89787134d13f29834
|
refs/heads/main
| 2023-01-08T03:46:26.510392
| 2020-11-04T10:21:23
| 2020-11-04T10:21:23
| 309,966,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
from django.urls import path
from django.contrib.auth.views import LoginView
from . import views
app_name = 'users'
urlpatterns = [
path('login/', LoginView.as_view(template_name= 'users/login.html'), name='login'),
path('logout/', views.logout_view, name='logout'),
path('register/', views.register, name='register'),
]
|
[
"anildudy1@gmail.com"
] |
anildudy1@gmail.com
|
6bcb60f9a28b08a55e7c5ab03219adfcce07f8d1
|
b80878934db337e6b31bd76979c6a98c83424945
|
/evrytesttools/urls.py
|
5db918fd6fd1a7a5b76da81f0057e69f11c415a2
|
[] |
no_license
|
quanlidavid/evrytestportal
|
cc78a0938849fd981447fdc3aa7b0b91f4741306
|
728d2137ac3a713fe61fbfce4f07810fb1432706
|
refs/heads/master
| 2022-12-10T07:04:48.658884
| 2017-12-20T06:50:17
| 2017-12-20T06:50:17
| 113,135,283
| 0
| 0
| null | 2022-12-07T23:49:31
| 2017-12-05T05:06:32
|
Python
|
UTF-8
|
Python
| false
| false
| 773
|
py
|
from django.urls import path
from evrytesttools import views
urlpatterns = [
# url(r'^$',views.index,name='index'),
# url(r'^test/$',views.test,name='test'),
# url(r'^run/$',views.run,name='run'),
# url(r'^getVIOInfo/$',views.getVIOInfo,name='getVIOInfo'),
# url(r'^run/logdetails.html$',views.logdetails_page,name='logdetails'),
path('', views.homepage),
path('icd_sr/', views.icd_sr_page),
path('icd_cmdb/', views.icd_cmdb_page),
path('icd_create_linux_sr/', views.icd_create_linux_sr_page),
path('icdhistory/<str:table>/',views.icd_history),
path('vioinfo/',views.vioinfo),
path('linuxinstanceinfo/',views.linuxinstanceinfo),
path('linuxdetaillog/',views.linuxdetailslog),
# path('post/<slug:slug>', showpost),
]
|
[
"quanlidavid@gmail.com"
] |
quanlidavid@gmail.com
|
a58c66ee849885e28c84f289036ba843e088661a
|
1e6c6dda65e1ab23da81c524d7ab3c5f6d3c6dd3
|
/test/test_put_get.py
|
cf45cf45c38d8626ad80540c331757c1a9d58a5a
|
[
"Apache-2.0"
] |
permissive
|
yungster/snowflake-connector-python
|
737622070460bb53102db4896871d5ef8b5f448b
|
740edb840a23f0e9395f47499a3441bf8c4d7d30
|
refs/heads/master
| 2021-01-19T19:50:36.975560
| 2017-04-17T01:27:47
| 2017-04-17T01:27:47
| 88,448,642
| 0
| 0
| null | 2017-04-16T23:23:06
| 2017-04-16T23:23:06
| null |
UTF-8
|
Python
| false
| false
| 16,911
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2017 Snowflake Computing Inc. All right reserved.
#
import os
from getpass import getuser
from logging import getLogger
import pytest
logger = getLogger(__name__)
@pytest.fixture()
def test_data(request, conn_cnx, db_parameters):
assert u'AWS_ACCESS_KEY_ID' in os.environ
assert u'AWS_SECRET_ACCESS_KEY' in os.environ
assert u'SF_PROJECT_ROOT' in os.environ
unique_name = db_parameters['name']
database_name = "{0}_db".format(unique_name)
warehouse_name = "{0}_wh".format(unique_name)
def fin():
with conn_cnx() as cnx:
with cnx.cursor() as cur:
cur.execute("drop database {0}".format(database_name))
cur.execute("drop warehouse {0}".format(warehouse_name))
request.addfinalizer(fin)
class TestData(object):
def __init__(self):
self.AWS_ACCESS_KEY_ID = "'{0}'".format(
os.environ[u'AWS_ACCESS_KEY_ID'])
self.AWS_SECRET_ACCESS_KEY = "'{0}'".format(
os.environ[u'AWS_SECRET_ACCESS_KEY'])
self.SF_PROJECT_ROOT = "{0}".format(
os.environ['SF_PROJECT_ROOT'])
self.stage_name = "{0}_stage".format(unique_name)
self.warehouse_name = warehouse_name
self.database_name = database_name
self.user_bucket = os.getenv(
'SF_AWS_USER_BUCKET',
"sfc-dev1-regression/{0}/reg".format(getuser()))
ret = TestData()
with conn_cnx() as cnx:
with cnx.cursor() as cur:
cur = cnx.cursor()
cur.execute("""
use role sysadmin
""")
cur.execute("""
create or replace warehouse {0}
warehouse_size = 'small'
warehouse_type='standard'
auto_suspend=1800
""".format(warehouse_name))
cur.execute("""
create or replace database {0}
""".format(database_name))
cur.execute("""
create or replace schema pytesting_schema
""")
cur.execute("""
create or replace file format VSV type = 'CSV'
field_delimiter='|' error_on_column_count_mismatch=false
""")
return ret
def test_load_s3(test_data, conn_cnx):
with conn_cnx() as cnx:
with cnx.cursor() as cur:
cur.execute(
"""use warehouse {0}""".format(test_data.warehouse_name))
cur.execute("""use schema {0}.pytesting_schema""".format(
test_data.database_name))
cur.execute("""
create or replace table tweets(created_at timestamp,
id number, id_str string, text string, source string,
in_reply_to_status_id number, in_reply_to_status_id_str string,
in_reply_to_user_id number, in_reply_to_user_id_str string,
in_reply_to_screen_name string, user__id number, user__id_str string,
user__name string, user__screen_name string, user__location string,
user__description string, user__url string,
user__entities__description__urls string, user__protected string,
user__followers_count number, user__friends_count number,
user__listed_count number, user__created_at timestamp,
user__favourites_count number, user__utc_offset number,
user__time_zone string, user__geo_enabled string, user__verified string,
user__statuses_count number, user__lang string,
user__contributors_enabled string, user__is_translator string,
user__profile_background_color string,
user__profile_background_image_url string,
user__profile_background_image_url_https string,
user__profile_background_tile string, user__profile_image_url string,
user__profile_image_url_https string, user__profile_link_color string,
user__profile_sidebar_border_color string,
user__profile_sidebar_fill_color string, user__profile_text_color string,
user__profile_use_background_image string, user__default_profile string,
user__default_profile_image string, user__following string,
user__follow_request_sent string, user__notifications string, geo string,
coordinates string, place string, contributors string,
retweet_count number,
favorite_count number, entities__hashtags string, entities__symbols string,
entities__urls string, entities__user_mentions string, favorited string,
retweeted string, lang string)""")
cur.execute("ls @%tweets")
assert cur.rowcount == 0, \
('table newly created should not have any '
'files in its staging area')
cur.execute("""
copy into tweets from s3://sfc-dev1-data/twitter/O1k/tweets/
credentials=(
AWS_KEY_ID={aws_access_key_id}
AWS_SECRET_KEY={aws_secret_access_key})
file_format=(
skip_header=1 null_if=('')
field_optionally_enclosed_by='"'
)
""".format(
aws_access_key_id=test_data.AWS_ACCESS_KEY_ID,
aws_secret_access_key=test_data.AWS_SECRET_ACCESS_KEY)
)
assert cur.rowcount == 1, (
'copy into tweets did not set rowcount to 1')
results = cur.fetchall()
assert results[0][0] == (
"s3://sfc-dev1-data/twitter/O1k/tweets/1.csv.gz")
cur.execute("drop table tweets")
def test_put_local_file(test_data, conn_cnx):
with conn_cnx() as cnx:
with cnx.cursor() as cur:
cur.execute(
"""use warehouse {0}""".format(test_data.warehouse_name))
cur.execute("""use schema {0}.pytesting_schema""".format(
test_data.database_name))
cur.execute("""
create or replace table pytest_putget_t1 (
c1 STRING, c2 STRING, c3 STRING,
c4 STRING, c5 STRING, c6 STRING, c7 STRING, c8 STRING, c9 STRING)
stage_file_format = (
field_delimiter = '|'
error_on_column_count_mismatch=false)
stage_copy_options = (purge=false)
stage_location = (
url = 's3://{user_bucket}/{stage_name}'
credentials = (
AWS_KEY_ID={aws_access_key_id}
AWS_SECRET_KEY={aws_secret_access_key})
)
""".format(
aws_access_key_id=test_data.AWS_ACCESS_KEY_ID,
aws_secret_access_key=test_data.AWS_SECRET_ACCESS_KEY,
user_bucket=test_data.user_bucket,
stage_name=test_data.stage_name,
))
cur.execute("""
put file://{0}/ExecPlatform/Database/data/orders_10*.csv @%pytest_putget_t1
""".format(test_data.SF_PROJECT_ROOT)
)
assert cur.is_file_transfer
cur.execute("ls @%pytest_putget_t1").fetchall()
assert not cur.is_file_transfer
assert cur.rowcount == 2, (
'ls @%pytest_putget_t1 did not return 2 rows')
cur.execute("copy into pytest_putget_t1")
results = cur.fetchall()
assert len(results) == 2, '2 files were not copied'
assert results[0][1] == 'LOADED', (
'file 1 was not loaded after copy')
assert results[1][1] == 'LOADED', (
'file 2 was not loaded after copy')
cur.execute("select count(*) from pytest_putget_t1")
results = cur.fetchall()
assert results[0][0] == 73, (
'73 rows not loaded into putest_putget_t1')
cur.execute("rm @%pytest_putget_t1")
results = cur.fetchall()
assert len(results) == 2, 'two files were not removed'
cur.execute(
"select STATUS from information_schema.load_history where table_name='PYTEST_PUTGET_T1'")
results = cur.fetchall()
assert results[0][0] == 'LOADED', (
'history does not show file to be loaded')
cur.execute("drop table pytest_putget_t1")
def test_put_load_from_user_stage(test_data, conn_cnx):
with conn_cnx() as cnx:
with cnx.cursor() as cur:
cur.execute(
"""use warehouse {0}""".format(test_data.warehouse_name))
cur.execute("""use schema {0}.pytesting_schema""".format(
test_data.database_name))
cur.execute("""
create or replace stage {stage_name}
url='s3://{user_bucket}/{stage_name}'
credentials = (
AWS_KEY_ID={aws_access_key_id}
AWS_SECRET_KEY={aws_secret_access_key})
""".format(
aws_access_key_id=test_data.AWS_ACCESS_KEY_ID,
aws_secret_access_key=test_data.AWS_SECRET_ACCESS_KEY,
user_bucket=test_data.user_bucket,
stage_name=test_data.stage_name,
))
cur.execute("""
create or replace table pytest_putget_t2 (c1 STRING, c2 STRING, c3 STRING,
c4 STRING, c5 STRING, c6 STRING, c7 STRING, c8 STRING, c9 STRING)
""")
cur.execute("""
put file://{project_root}/ExecPlatform/Database/data/orders_10*.csv
@{stage_name}
""".format(
project_root=test_data.SF_PROJECT_ROOT,
stage_name=test_data.stage_name)
)
# two files should have been put in the staging are
results = cur.fetchall()
assert len(results) == 2
cur.execute("ls @%pytest_putget_t2")
results = cur.fetchall()
assert len(results) == 0, (
'no files should have been loaded yet')
# copy
cur.execute("""
copy into pytest_putget_t2 from @{stage_name}
file_format = (field_delimiter = '|' error_on_column_count_mismatch=false)
purge=true
""".format(
stage_name=test_data.stage_name
))
results = sorted(cur.fetchall())
assert len(results) == 2, (
'copy failed to load two files from the stage')
assert results[0][0] == (
"s3://{user_bucket}/{stage_name}/orders_100.csv.gz".
format(
user_bucket=test_data.user_bucket,
stage_name=test_data.stage_name,
)), 'copy did not load file orders_100'
assert results[1][0] == (
"s3://{user_bucket}/{stage_name}/orders_101.csv.gz".format(
user_bucket=test_data.user_bucket,
stage_name=test_data.stage_name,
)), 'copy did not load file orders_101'
# should be empty (purged)
cur.execute(
"ls @{stage_name}".format(stage_name=test_data.stage_name))
results = cur.fetchall()
assert len(results) == 0, 'copied files not purged'
cur.execute("drop table pytest_putget_t2")
cur.execute(
"drop stage {stage_name}".format(
stage_name=test_data.stage_name))
def test_unload(test_data, conn_cnx):
with conn_cnx() as cnx:
with cnx.cursor() as cur:
cur.execute(
"""use warehouse {0}""".format(test_data.warehouse_name))
cur.execute("""use schema {0}.pytesting_schema""".format(
test_data.database_name))
cur.execute("""
create or replace stage {stage_name}
url='s3://{user_bucket}/{stage_name}/pytest_put_unload/unload/'
credentials = (
AWS_KEY_ID={aws_access_key_id}
AWS_SECRET_KEY={aws_secret_access_key})
""".format(
aws_access_key_id=test_data.AWS_ACCESS_KEY_ID,
aws_secret_access_key=test_data.AWS_SECRET_ACCESS_KEY,
user_bucket=test_data.user_bucket,
stage_name=test_data.stage_name,
))
cur.execute("""
CREATE OR REPLACE TABLE pytest_t3 (
c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING,
c6 STRING, c7 STRING, c8 STRING, c9 STRING)
stage_file_format = (format_name = 'vsv' field_delimiter = '|'
error_on_column_count_mismatch=false)""")
cur.execute("""
alter stage {stage_name} set file_format = ( format_name = 'VSV' )
""".format(stage_name=test_data.stage_name))
# make sure its clean
cur.execute(
"rm @{stage_name}".format(stage_name=test_data.stage_name))
# put local file
cur.execute("""
put file://{0}/ExecPlatform/Database/data/orders_10*.csv
@%pytest_t3""".format(test_data.SF_PROJECT_ROOT)
)
# copy into table
cur.execute("""
copy into pytest_t3
file_format = (field_delimiter = '|' error_on_column_count_mismatch=false)
purge=true""")
# unload from table
cur.execute("""
copy into @{stage_name}/data_
from pytest_t3 file_format=(format_name='VSV' compression='gzip')
max_file_size=10000000""".format(stage_name=test_data.stage_name))
# load the data back to another table
cur.execute("""
CREATE OR REPLACE TABLE pytest_t3_copy (
c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING,
c6 STRING, c7 STRING, c8 STRING, c9 STRING)
stage_file_format = (format_name = 'VSV' )""")
cur.execute("""
copy into pytest_t3_copy
from @{stage_name}/data_ return_failed_only=true
""".format(stage_name=test_data.stage_name))
# check to make sure they are equal
cur.execute("""
(select * from pytest_t3 minus select * from pytest_t3_copy)
union
(select * from pytest_t3_copy minus select * from pytest_t3)
""")
assert cur.rowcount == 0, (
'unloaded/reloaded data were not the same')
# clean stage
cur.execute(
"rm @{stage_name}/data_".format(
stage_name=test_data.stage_name))
assert cur.rowcount == 1, (
'only one file was expected to be removed')
# unload with deflate
cur.execute("""
copy into @{stage_name}/data_
from pytest_t3 file_format=(format_name='VSV' compression='deflate')
max_file_size=10000000
""".format(stage_name=test_data.stage_name))
results = cur.fetchall()
assert results[0][0] == 73, (
'73 rows were expected to be loaded')
# create a table to unload data into
cur.execute("""
CREATE OR REPLACE TABLE pytest_t3_copy
(c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING, c6 STRING,
c7 STRING, c8 STRING, c9 STRING)
stage_file_format = (
format_name = 'VSV'
compression='deflate')""")
results = cur.fetchall()
assert results[0][0], (
"Table PYTEST_T3_COPY successfully created.",
"table not created successfully")
cur.execute("""
alter stage {stage_name} set file_format = (
format_name = 'VSV'
compression='deflate')
""".format(stage_name=test_data.stage_name))
cur.execute("""
copy into pytest_t3_copy from @{stage_name}/data_
return_failed_only=true
""".format(stage_name=test_data.stage_name))
results = cur.fetchall()
assert results[0][2] == "LOADED", (
"rows were not loaded successfully")
assert results[0][4] == 73, (
"not all 73 rows were loaded successfully")
# check to make sure they are equal
cur.execute("""
(select * from pytest_t3 minus select * from pytest_t3_copy)
union
(select * from pytest_t3_copy minus select * from pytest_t3)
""")
assert cur.rowcount == 0, (
'unloaded/reloaded data were not the same')
cur.execute(
"rm @{stage_name}/data_".format(
stage_name=test_data.stage_name))
assert cur.rowcount == 1, (
'only one file was expected to be removed')
# clean stage
cur.execute(
"rm @{stage_name}/data_".format(
stage_name=test_data.stage_name))
cur.execute("drop table pytest_t3_copy")
cur.execute(
"drop stage {stage_name}".format(
stage_name=test_data.stage_name))
cur.close()
def test_put_with_auto_compress_false(tmpdir, db_parameters):
"""
Test PUT command with auto_compress=False
"""
import snowflake.connector
cnx = snowflake.connector.connect(
user=db_parameters['s3_user'],
password=db_parameters['s3_password'],
host=db_parameters['s3_host'],
port=db_parameters['s3_port'],
database=db_parameters['s3_database'],
account=db_parameters['s3_account'],
protocol=db_parameters['s3_protocol'])
tmp_dir = str(tmpdir.mkdir('data'))
test_data = os.path.join(tmp_dir, 'data.txt')
with open(test_data, 'w') as f:
f.write("test1,test2")
f.write("test3,test4")
cnx.cursor().execute("RM @~/test_put_uncompress_file")
try:
with cnx.cursor() as cur:
for rec in cur.execute("""
PUT file://{0} @~/test_put_uncompress_file auto_compress=FALSE
""".format(test_data)):
print(rec)
ret = cnx.cursor().execute("""
LS @~/test_put_uncompress_file
""").fetchone()
assert "test_put_uncompress_file/data.txt" in ret[0]
assert "data.txt.gz" not in ret[0]
finally:
cnx.cursor().execute("RM @~/test_put_uncompress_file")
|
[
"smtakeda@gmail.com"
] |
smtakeda@gmail.com
|
46fec2ea58d9a148df6b47bee5d4fe5b53daebbe
|
267712c497d16b74f75fac57cf1281dff0a8afbd
|
/Flask/app.py
|
2104b92a850213d379540db1540cba34afb0c531
|
[] |
no_license
|
AlexRaouf/Reverb_Synth_Sales_Analysis
|
54240732fc699b62a9b6014f96cf4b66a17a0a9c
|
168b30c670847f4d39170eb77a41ac4da36b13d2
|
refs/heads/master
| 2022-11-28T18:35:11.935972
| 2020-07-31T23:13:27
| 2020-07-31T23:13:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
from flask import Flask, request, jsonify, render_template
import numpy as np
import pickle
from catboost import CatBoostRegressor
model = CatBoostRegressor()
app = Flask(__name__)
model.load_model('log_model')
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
'''
for rendering results on HTML GUI
'''
features = [str(x) for x in request.form.values()]
final_features = [np.array(features)]
prediction = model.predict(final_features)
output = round(np.exp(prediction[0]), 2)
return render_template('index.html', prediction_text='Sale Price should be: ${}'.format(output))
@app.route('/predict_api', methods=['POST'])
def predict_api():
'''
for direct API calls
'''
data = request.get_json(force=True)
prediction = model.predict([np.array(list(data.values()))])
output = prediction[0]
return jsonify(output)
app.run('127.0.0.1', debug=True)
|
[
"Alexraouf24@gmail.com"
] |
Alexraouf24@gmail.com
|
cfe6405177b20e41c71d4856112f67b2dab27258
|
e24a7ba2a162f754336a1f8322c2c9c44eb9a613
|
/authenticate/utils.py
|
59facdc3f03bfd4120417aa4473fc948275eda5a
|
[] |
no_license
|
Aymammet/friendly
|
9ca063d5542c203badd3b3950ee3fe9b3c82380d
|
423db7f4d4fc5309c7f732d21a8084682133f92d
|
refs/heads/master
| 2023-08-30T21:03:17.434235
| 2021-10-06T18:11:03
| 2021-10-06T18:11:03
| 397,711,592
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
from django.contrib.auth.tokens import PasswordResetTokenGenerator
import six
class TokenGenerator(PasswordResetTokenGenerator):
def _make_hash_value(self, user, timestamp):
return six.text_type(user.pk)+six.text_type(timestamp)+six.text_type(user.is_email_verified)
generate_token=TokenGenerator()
|
[
"aymammet2018@gmail.com"
] |
aymammet2018@gmail.com
|
de40cb73587404ad4af002c23abbb61cc9721e8f
|
e77d1993f8a8327bb9e7563f03661a039182036f
|
/bookout/manage.py
|
35e45a65d1f5cd57149ba7fd4fe11d9ba678bd60
|
[] |
no_license
|
gilescaunter/bookout-django
|
d1edbb8ca8b6b124f66ec681b126a0b7feb83ba8
|
0a73b84d198ac8ac4183893935356634b3163a36
|
refs/heads/main
| 2021-10-25T19:31:15.303118
| 2021-10-25T11:54:43
| 2021-10-25T11:54:43
| 168,854,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bookout.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"giles.caunter@vortex6.com"
] |
giles.caunter@vortex6.com
|
55f7964dd7fe45975f15bc18a2c1c1f62ce920bd
|
a0dc97dbb5c17302195ccb739e428f29cd5eea7e
|
/workers/views.py
|
5de13619947453ab0427bc159c68c844752b9177
|
[] |
no_license
|
edgarkov/hrm
|
450b7c255f87ab3e6641d1f7a2dd012cc62472ed
|
c9228948929c6f2eaa36a2bcd663688172e13887
|
refs/heads/master
| 2020-04-20T06:23:38.019618
| 2019-02-07T13:22:41
| 2019-02-07T13:22:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,110
|
py
|
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
from workers.models import Employee
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.views import generic
import json
def tree(request):
ctx = {
}
return render(request, 'tree.html', ctx)
def show_tree(request):
print('hello')
tree_nodes_list = list()
for item in Employee.objects.all():
tree_nodes_list.append({
'id': item.id,
'parent': item.parent_id if item.parent_id else '#',
'text': 'Full name: %s, Position: %s' % (item.full_name, item.position)
})
ctx = {
'core': {
'data' : tree_nodes_list
}
}
return JsonResponse(ctx)
def table(request):
ctx = {
'table_name' : 'Workers',
'table_opts': dict({},
columns=json.dumps([
{
'data': u'Name',
'title': u'Name'
},
{
'data': u'Position',
'title': u'Position'
},
{
'data': u'Salary',
'title': u'Salary'
},
{
'data': u'Employment date',
'title': u'Employment date'
}
])
)
}
return render(request, 'table.html' , ctx)
def show_data(request):
table_list = list()
for item in Employee.objects.all():
table_list.append({
'Name': item.full_name,
'Position': item.position,
'Salary': item.salary,
'Employment date': item.employment_date,
'extn': item.id
})
ctx = {
'data' : table_list,
}
return JsonResponse(ctx)
class SignUp(generic.CreateView):
form_class = UserCreationForm
success_url = reverse_lazy('login')
template_name = 'signup.html'
|
[
"nomerelion@gmail.com"
] |
nomerelion@gmail.com
|
ded6eb6b60a20e19b632703677a9b21d503d3037
|
6b488077cde3735a58171fa54f4ab6ed1c96de82
|
/showgandicontactinfo.py
|
1ef125c639a16a850bcaf6045eb493f42617763e
|
[] |
no_license
|
stayhigh/GandiAPISample
|
a6ba4fd707ddeb8fc9df2853ed8e9cb9f533a69f
|
48fcecd839b7c68bf13e48e9a3611fa575394ef6
|
refs/heads/master
| 2021-01-10T03:28:13.472233
| 2015-11-27T11:40:27
| 2015-11-27T11:40:27
| 44,784,252
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 936
|
py
|
"""
The sample code is to show info of your Gandi Site with API key.
"""
import xmlrpclib
import sys
from pprint import pprint
# Connect to the API server
API = xmlrpclib.ServerProxy('https://rpc.gandi.net/xmlrpc/')
if len(sys.argv) != 2:
print "%s %s" % (sys.argv[0], "<YOUR API KEY>")
exit(1)
APIKEY = sys.argv[1]
# Now you can call API methods.
# You must authenticate yourself by passing
# the API key as the first method's argument
VERSION = API.version.info(APIKEY)
print "gandi api version: " + VERSION['api_version']
# Get all handle ids
HANDLE_LIST = []
for eachcontact in API.contact.list(APIKEY):
HANDLE_LIST.append(eachcontact['handle'])
print "Gandi Site IDs:", HANDLE_LIST
# Get info on each Gandi handle
"""
if len(HANDLE_LIST) > 0:
for contactid in HANDLE_LIST:
print API.contact.info(APIKEY, contactid)
print
"""
if len(HANDLE_LIST) > 0:
pprint (API.contact.list(APIKEY))
|
[
"john.wang@gandi.net"
] |
john.wang@gandi.net
|
51a46d0979cf8db42522b214bc0d30b7d577a9f0
|
1a5cfc6b94f1e847f1d0bbae94e7da83d5c93bbd
|
/sleep/management/commands/cleanGroups.py
|
533f3a079f158ae3b036f6c6f46cb04bdbacd6c5
|
[
"MIT"
] |
permissive
|
sleepers-anonymous/zscore
|
a6cc817f71817bfbe4800bb29c53f1ed1cff332a
|
2d7eb2e2c06c307af7fae4058173a25ba9c40025
|
refs/heads/master
| 2020-04-15T20:09:48.984486
| 2018-01-31T05:09:42
| 2018-01-31T05:09:42
| 10,006,266
| 3
| 1
|
MIT
| 2019-03-08T23:47:56
| 2013-05-11T22:40:10
|
Python
|
UTF-8
|
Python
| false
| false
| 419
|
py
|
from django.core.management.base import BaseCommand, CommandError
from sleep.models import SleeperGroup
class Command(BaseCommand):
help = "Removes all empty groups"
def handle(self, *args, **options):
groups = SleeperGroup.objects.all()
for g in groups:
if g.membership_set.all().count() == 0:
self.stdout.write('Removing group %s' % g)
g.delete()
|
[
"nightshadequeen@gmail.com"
] |
nightshadequeen@gmail.com
|
ae8bcc2905098b2fec9238df5b7f86a55d123007
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p2DJ/New/program/qiskit/simulator/startQiskit341.py
|
b885570da535c0bd5ec4ba1219b7f7dc6c82a02c
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,319
|
py
|
# qubit number=2
# total number=20
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.z(input_qubit[1]) # number=16
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.y(input_qubit[1]) # number=2
prog.y(input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=3
prog.h(input_qubit[0]) # number=13
prog.cz(input_qubit[1],input_qubit[0]) # number=14
prog.h(input_qubit[0]) # number=15
prog.x(input_qubit[0]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=17
prog.cz(input_qubit[1],input_qubit[0]) # number=18
prog.h(input_qubit[0]) # number=19
prog.x(input_qubit[0]) # number=11
prog.cx(input_qubit[1],input_qubit[0]) # number=12
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit341.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
4ab72fe952943b35c9803384d5400e826c57952f
|
94e1e0d5647e42c4862655a7a6c303fde4d1a0ff
|
/tags.py
|
33cbc3430148e787faf67829de86980bb15cfb40
|
[] |
no_license
|
jpiv/SmartPin
|
20b19e2091276ea7fd67fdbf5a98f8dfb9fb71c4
|
2edaa16f1ca9303eaf81edf9b9be4b30f6d88321
|
refs/heads/master
| 2022-11-22T21:37:36.431170
| 2021-07-30T05:19:23
| 2021-07-30T05:19:23
| 147,282,054
| 1
| 0
| null | 2022-11-22T00:34:11
| 2018-09-04T03:15:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,664
|
py
|
from log import log
class Tag(object):
_key = [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]
# Need to skip sector trailer
_blocks = 15
rf = None
rfutil = None
def __init__(self, rf, uid, *args, **kwargs):
self.rf = rf
self.rfutil = self.rf.util()
err = self.rfutil.set_tag(uid)
self.rfutil.auth(self.rf.auth_a, self._key)
if err:
log('Error selecting tag')
return
def set(self, index, val):
if index > self._blocks or index < 1:
raise Exception('`index` param must be between 1-15')
data = []
if type(val) == str or type(val) == unicode:
strVals = [ord(char) for char in val]
data = strVals
else:
data.append(val)
if len(data) > 16:
log('Value too long')
block = []
for i in xrange(0, 16):
if i < len(data):
block.append(data[i])
else:
block.append(0)
err = self.rfutil.do_auth(index)
err and log('Auth error for write')
err = self.rf.write(index, block)
if err:
log('Error writing to tag')
def read(self, index):
err = self.rfutil.do_auth(index)
if err:
log('Card auth failed for read')
else:
err, data = self.rf.read(index)
if err:
log('Error reading card')
else:
return data
return []
@staticmethod
def to_string(data):
return ''.join([
unichr(byte)
for byte in data
if byte != 0
])
@staticmethod
def to_int(data):
return sum(data)
class WeightTag(Tag, object):
_block = 1
_weight = None
def __init__(self, *args, **kwargs):
super(WeightTag, self).__init__(*args, **kwargs)
@property
def weight(self):
return Tag.to_int(self.read(self._block))
@weight.setter
def weight(self, weight):
self.set(self._block, weight)
|
[
"jjpizzo4@gmail.com"
] |
jjpizzo4@gmail.com
|
bb523a613214342e3b7c115ace36d847445555cb
|
0178e6a705ee8aa6bb0b0a8512bf5184a9d00ded
|
/Sungjin/Math/1057.py
|
ecbbc485be3b5f1b6fc0c9492399c0c66eaaab90
|
[] |
no_license
|
comojin1994/Algorithm_Study
|
0379d513abf30e3f55d6a013e90329bfdfa5adcc
|
965c97a9b858565c68ac029f852a1c2218369e0b
|
refs/heads/master
| 2021-08-08T14:55:15.220412
| 2021-07-06T11:54:33
| 2021-07-06T11:54:33
| 206,978,984
| 0
| 1
| null | 2020-05-14T14:06:46
| 2019-09-07T14:23:31
|
Python
|
UTF-8
|
Python
| false
| false
| 254
|
py
|
import sys
input = sys.stdin.readline
def func(K, L):
cnt = 0
while K != L:
K, L = K // 2, L // 2
cnt += 1
print(cnt)
return 1
if __name__ == '__main__':
N, K, L = map(int, input().strip().split())
func(K-1, L-1)
|
[
"comojin1994@gmail.com"
] |
comojin1994@gmail.com
|
88caf91d43a9be2b1208a0997b0a3fa5223e7b01
|
ee2c972b000f585c4b960390608287a1b1e6ebdc
|
/Optimizermaster/FileHandler.py
|
c8540325c61b3627c14b2bf6a90c7555b07c1268
|
[] |
no_license
|
zhengjie607/PyQt-for-OpenGL
|
828f3a8e95c3756331b4b0b9f7b93ff5cfc48a23
|
8e74ba371ddb6f6410289bb106462fbfd87c3947
|
refs/heads/master
| 2020-04-18T10:42:56.661405
| 2019-07-04T01:04:16
| 2019-07-04T01:04:16
| 167,476,175
| 6
| 1
| null | 2019-01-27T10:35:31
| 2019-01-25T03:05:41
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 9,290
|
py
|
# Python 2.7 and 3.5
# Author: Christoph Schranz, Salzburg Research
import sys
import os
import struct
import time
from Optimizermaster import ThreeMF
# upgrade numpy with: "pip install numpy --upgrade"
import numpy as np
class FileHandler:
def __init__(self):
pass
def load_mesh(self, inputfile):
"""This module loads the content of a 3D file as mesh array."""
filetype = os.path.splitext(inputfile)[1].lower()
if filetype == ".stl":
f = open(inputfile, "rb")
if "solid" in str(f.read(5).lower()):
try:
f = open(inputfile, "r")
objs = self.load_ascii_stl(f)
except UnicodeDecodeError:
# if len(objs[0]["mesh"]) < 3:
f.close()
f = open(inputfile, "rb")
# f.seek(5, os.SEEK_SET)
objs = self.load_binary_stl(f)
else:
objs = self.load_binary_stl(f)
elif filetype == ".3mf":
object = ThreeMF.Read3mf(inputfile) # TODO not implemented
#objs[0] = {"mesh": list(), "name": "binary file"}
objs = {0: {"mesh": object[0]["mesh"], "name": "3mf file"}}
elif filetype == ".obj":
f = open(inputfile, "rb")
objs = self.load_obj(f)
else:
print("File type is not supported.")
sys.exit()
return objs
def load_obj(self, f):
"""Load the content of an OBJ file."""
objects = dict()
vertices = list()
objects[0] = {"mesh": list(), "name": "obj file"}
for line in f:
if "v" in line:
data = line.split()[1:]
vertices.append([float(data[0]), float(data[1]), float(data[2])])
f.seek(0, 0)
for line in f:
if "f" in line:
data = line.split()[1:]
objects[0]["mesh"].append(vertices[int(data[0])-1])
objects[0]["mesh"].append(vertices[int(data[1])-1])
objects[0]["mesh"].append(vertices[int(data[2])-1])
return objects
def load_ascii_stl(self, f):
"""Load the content of an ASCII STL file."""
objects= {"mesh": list(),"name": "ascii file"}
normal=[]
for line in f:
if "vertex" in line:
data = line.split()[1:]
objects["mesh"].append([float(data[0]), float(data[1]), float(data[2]),float(normal[0]),float(normal[1]),float(normal[2])])
if "normal" in line:
normal=line.split()[2:]
return objects
def load_binary_stl(self, f):
"""Load the content of a binary STL file."""
# Skip the header
#f.read(80-5)
f.seek(80,0)
# face_count = struct.unpack('<I', f.read(4))[0]
temp1 = f.read(4)
print(temp1)
temp = struct.unpack('<I',temp1)
print(temp)
face_count=temp[0]
objects= {"mesh": list(),"name": "binary file"}
for idx in range(0, face_count):
data = struct.unpack("<ffffffffffffH", f.read(50))
objects["mesh"].append([data[3], data[4], data[5],data[0],data[1],data[2]])
objects["mesh"].append([data[6], data[7], data[8],data[0],data[1],data[2]])
objects["mesh"].append([data[9], data[10], data[11],data[0],data[1],data[2]])
return objects
def write_mesh(self, objects, info, outputfile, output_type="binarystl"):
# if output_type == "3mf": # TODO not implemented yet
# # transformation = "{} {} {} {} {} {} {} {} {} 0 0 1".format(x.matrix[0][0], x.matrix[0][1], x.matrix[0][2],
# # x.matrix[1][0], x.matrix[1][1], x.matrix[1][2], x.matrix[2][0], x.matrix[2][1], x.matrix[2][2])
# # obj["transform"] = transformation
# # FileHandler.rotate3MF(args.inputfile, args.outputfile, objs)
# raise TypeError('The 3mf output format is not implemented yet.')
print('write_mesh')
if output_type == "asciistl":
# Create seperate files with rotated content. If an IDE supports multipart placement,
# set outname = outputfile
for part, content in objects.items():
mesh = content["mesh"]
filename = content["name"]
tweakedcontent = self.rotate_ascii_stl(info[part]["matrix"], mesh, filename)
if len(objects.keys()) == 1:
outname = outputfile
else:
outname = "".join(outputfile.split(".")[:-1]) + "_{}.stl".format(part)
with open(outname, 'w') as outfile:
outfile.write(tweakedcontent)
else: # binary STL, binary stl can't support multiparts
# Create seperate files with rotated content.
header = "Tweaked on {}".format(time.strftime("%a %d %b %Y %H:%M:%S")
).encode().ljust(79, b" ") + b"\n"
for part, content in objects.items():
mesh = objects[part]["mesh"]
tweaked_array = self.rotate_bin_stl(info[part]["matrix"], mesh)
if len(objects.keys()) == 1:
outname = "".join(outputfile.split(".")[:-1]) + ".stl"
else:
outname = "".join(outputfile.split(".")[:-1]) + "_{}.stl".format(part)
length = struct.pack("<I", int(len(mesh) / 3))
print(outname)
with open(outname, 'wb') as outfile:
outfile.write(bytearray(header + length + b"".join(tweaked_array)))
def rotate_3mf(self, *arg):
ThreeMF.rotate3MF(*arg)
def rotate_ascii_stl(self, rotation_matrix, content, filename):
"""Rotate the mesh array and save as ASCII STL."""
mesh = np.array(content, dtype=np.float64)
# prefix area vector, if not already done (e.g. in STL format)
if len(mesh[0]) == 3:
row_number = int(len(content)/3)
mesh = mesh.reshape(row_number, 3, 3)
# upgrade numpy with: "pip install numpy --upgrade"
rotated_content = np.matmul(mesh, rotation_matrix)
v0 = rotated_content[:, 0, :]
v1 = rotated_content[:, 1, :]
v2 = rotated_content[:, 2, :]
normals = np.cross(np.subtract(v1, v0), np.subtract(v2, v0)) \
.reshape(int(len(rotated_content)), 1, 3)
rotated_content = np.hstack((normals, rotated_content))
tweaked = list("solid %s" % filename)
tweaked += list(map(self.write_facett, list(rotated_content)))
tweaked.append("\nendsolid %s\n" % filename)
tweaked = "".join(tweaked)
return tweaked
def write_facett(self, facett):
return """\nfacet normal %f %f %f
outer loop
vertex %f %f %f
vertex %f %f %f
vertex %f %f %f
endloop
endfacet""" % (facett[0, 0], facett[0, 1], facett[0, 2], facett[1, 0],
facett[1, 1], facett[1, 2], facett[2, 0], facett[2, 1],
facett[2, 2], facett[3, 0], facett[3, 1], facett[3, 2])
def rotate_bin_stl(self, rotation_matrix, content):
"""Rotate the object and save as binary STL. This module is currently replaced
by the ascii version. If you want to use binary STL, please do the
following changes in Tweaker.py: Replace "rotatebinSTL" by "rotateSTL"
and set in the write sequence the open outfile option from "w" to "wb".
However, the ascii version is much faster in Python 3."""
mesh = np.array(content, dtype=np.float64)
# prefix area vector, if not already done (e.g. in STL format)
if len(mesh[0]) == 3:
row_number = int(len(content) / 3)
mesh = mesh.reshape(row_number, 3, 3)
# upgrade numpy with: "pip install numpy --upgrade"
rotated_content = np.matmul(mesh, rotation_matrix)
v0 = rotated_content[:, 0, :]
v1 = rotated_content[:, 1, :]
v2 = rotated_content[:, 2, :]
normals = np.cross(np.subtract(v1, v0), np.subtract(v2, v0)
).reshape(int(len(rotated_content)), 1, 3)
rotated_content = np.hstack((normals, rotated_content))
# header = "Tweaked on {}".format(time.strftime("%a %d %b %Y %H:%M:%S")
# ).encode().ljust(79, b" ") + b"\n"
# header = struct.pack("<I", int(len(content) / 3)) # list("solid %s" % filename)
tweaked_array = list(map(self.write_bin_facett, rotated_content))
# return header + b"".join(tweaked_array)
# return b"".join(tweaked_array)
return tweaked_array
def write_bin_facett(self, facett):
tweaked = struct.pack("<fff", facett[0][0], facett[0][1], facett[0][2])
tweaked += struct.pack("<fff", facett[1][0], facett[1][1], facett[1][2])
tweaked += struct.pack("<fff", facett[2][0], facett[2][1], facett[2][2])
tweaked += struct.pack("<fff", facett[3][0], facett[3][1], facett[3][2])
tweaked += struct.pack("<H", 0)
return tweaked
|
[
"noreply@github.com"
] |
zhengjie607.noreply@github.com
|
b0103c755500f16c2e7f91e4da8bdb8227cb56d6
|
ffe7a968e8d7087a9b2e2df6495c074160147a9a
|
/package/THR/energy_force.py
|
8790039ec520468c9b396d1f2cccc7d299f4bffd
|
[] |
no_license
|
hyq2017/MLREPS
|
34008144448632fae41d8ac8294ad2fb09d2dead
|
0a7cb8f57ade804cd397e7eabe36217a30a3176f
|
refs/heads/master
| 2023-03-21T14:55:08.955011
| 2021-03-17T02:40:10
| 2021-03-17T02:40:10
| 348,557,506
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,948
|
py
|
import os
import numpy as np
def input_coord(coord, atoms):
a = np.loadtxt(coord)
b = np.reshape(a,(1, atoms * 3))
return b
# read the type and index of atoms
def type_index():
f = open('type.raw','r')
for line in f:
types = np.array([int(x) for x in line.split()])
f.close()
index_C = np.where(types == 0)
index_H = np.where(types == 1)
index_O = np.where(types == 2)
index_N = np.where(types == 3)
index_S = np.where(types == 4)
index = np.concatenate((index_C,index_H,index_O,index_N,index_S),axis=1)
return index
def main():
#os.system("cp ../package/ALA/type.raw .")
#os.system("cp ../input.ALA .")
atoms = 26
i = 0
energy_bias = 0.0001
f_box = open('box.raw', 'a+')
input_file = list()
a = np.zeros((1, atoms * 3))
for line in open("input.THR"):
fragment = line.strip('\n')
os.system("cp ../" + fragment + " ." )
input_file.append(line.strip('\n'))
b = input_coord(fragment, atoms)
a = np.concatenate((a, b), axis=0)
i = i + 1
f_box.write('40.0 0.0 0.0 0.0 40.0 0.0 0.0 0.0 40.0' + '\n')
np.savetxt('coord.raw', a[1:])
f_box.close()
os.system('../package/bin/raw_to_set.sh ' + str(i))
os.system('../package/bin/dp_test -m ../package/model/THR.pb -s . -S set -n ' + str(i) + ' -d detail')
os.system('rm box.raw')
# read energy
f = open('detail.e.out', 'r')
next(f)
energy = list()
for line in f:
energy.append(np.array([float(x) for x in line.split()])[1])
f.close()
# write energy
for i in range(len(input_file)):
f_e = open(input_file[i][0:len(input_file[i])-3] + 'log', 'a+')
f_e.write('energy: ' + '\n' + str(energy[i] / 27.21138602 - energy_bias))
f_e.write('\n')
f_e.close()
# read disorder force
f = open('detail.f.out', 'r')
next(f)
force_disorder = np.zeros((atoms * 100, 3))
num = 0
for line in f:
force_disorder[num,:] = np.array([float(x) for x in line.split()])[3:6]
num=num+1
f.close()
# read the type and index of atoms
index = type_index()
# get order force
count = int(num / atoms)
force_sub = np.zeros((count, atoms, 3))
for i in range(count):
force_sub[i][0:atoms, :] = force_disorder[(i*atoms):((i+1)*atoms), :] / 27.21138602 * 0.529177249
force = np.zeros((count, atoms, 3))
for i in range(len(input_file)):
count = 0
for j in index[count]:
force[i][j,:] = force_sub[i][count,:]
count = count + 1
f_f = open(input_file[i][0:len(input_file[i])-3] + 'log', 'a+')
f_f.write('force:' + '\n')
f_f.write(str(force[i]).replace('[', '').replace(']', '') + '\n')
f_f.close()
#os.system("rm detail.*")
#os.system("rm coord.raw")
#os.system("rm -rf set.000")
if __name__ == "__main__":
main()
|
[
"hanyanqiang@sjtu.edu.cn"
] |
hanyanqiang@sjtu.edu.cn
|
0d07c6ee9c3c9951482e630fd0d275514053d4d0
|
e8efa40c295bc86fe7fd621ae91e41ed52737e9d
|
/2020/examples-in-class-2020-11-19/example_files1.py
|
44a7bc2b133c0feec80e494e2dd1945f4cbd062d
|
[
"Apache-2.0"
] |
permissive
|
ati-ozgur/course-python
|
25eeb65a370e0ba7c9543c108fcfbe64c5e5af85
|
583e4df3387493d67f5ca5aeb617cfaf14a5e5a1
|
refs/heads/master
| 2023-01-12T05:33:03.831949
| 2022-12-28T11:33:19
| 2022-12-28T11:33:19
| 210,133,153
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 98
|
py
|
import file_helper
x = file_helper.count_files_in_directory("..")
print(f"There are {x} files")
|
[
"ati.ozgur@gmail.com"
] |
ati.ozgur@gmail.com
|
b7edf37b40c9637c6727e45afbb2dfdcc10d1364
|
7fb06cdb8872f6fe46f6272a32670e66e7589d4a
|
/utils/io.py
|
70c6e427bded311ac4e662b57875edce4d4450bb
|
[
"MIT"
] |
permissive
|
eug/minimum-spanning-tree
|
8b17e887447f7e1bda23367742a1bf1553bcac9e
|
a5fba6f4ce52f2c0ac6fec7471e4567a06b60629
|
refs/heads/master
| 2020-03-17T12:15:18.167872
| 2018-05-24T16:55:12
| 2018-05-24T16:55:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,619
|
py
|
import matplotlib.pyplot as plt
from utils.graph import euclidean
def read_input(datafile, classfile):
"""Read the data points file and class id of each point.
Args:
datafile (str): Data points file.
classfile (str): Point class file.
Returns:
tuple: Returns a tuple containing a list of points and a list
containing the class of each point.
"""
points = []
with open(datafile, 'r') as f:
for i, line in enumerate(f.readlines()):
x, y = list(map(float,line.split()))
points.append((i, x, y))
classes = []
with open(classfile, 'r') as f:
for i, line in enumerate(f.readlines()):
classes.append((i, int(line)))
return points, classes
def save_mst_csv(filename, edges):
"""Save MST into a csv file.
Args:
filename (str): Output filename.
edges (list): List of tuple representing edges as (src, dst, weight).
"""
with open(filename, 'w') as f:
f.write('source,destination,weight\n')
for src, dst, weight in edges:
f.write('{},{},{}\n'.format(src, dst, weight))
def save_mst_png(filename, edges, points):
"""Save MST into a png file.
Args:
filename (str): Output filename.
edges (list): List of tuple representing edges as (src, dst, weight).
points (list): List of tuple representing points as (x, y).
"""
for src, dst, _ in edges:
p, q = [points[src][1], points[dst][1]], [points[src][2], points[dst][2]]
plt.plot(p, q, marker='o', ms=3, mfc='red', mec='red', color='black')
plt.savefig(filename, dpi=300)
def save_clusters_csv(filename, classes):
"""Save clusters into a csv file.
Args:
filename (str): Output filename.
classes (list): Class of each data point.
"""
with open(filename, 'w') as f:
f.write('class_id\n')
for _class in classes:
f.write('{}\n'.format(_class))
def save_clusters_png(filename, classes, points):
"""Save clusters into a png file.
Args:
filename (str): Output filename.
classes (list): Class of each data point.
points (list): List of tuple representing points as (x, y).
"""
colormap = {
0: 'black', 5: 'red',
1: 'magenta', 6: 'blue',
2: 'green', 7: 'yellow',
3: 'orange', 8: 'purple',
4: 'gray', 9: 'cyan'
}
for i, c in enumerate(classes):
i, x, y = points[i]
plt.plot(x, y, marker='o', ms=2, color=colormap[c])
plt.savefig(filename, dpi=300)
|
[
"eugfcl@gmail.com"
] |
eugfcl@gmail.com
|
7d1cb319c7f3c8c7e2130c09bc30186024b15a1b
|
2f6817fc8f6ddb48f5f88c913d8e40b672fc3dbf
|
/DataAnalysis/cgv_other.py
|
f1f87820177fe80fb9595095645a14e7b30e9d2d
|
[] |
no_license
|
cutz-j/TodayILearned
|
320b5774de68a0f4f68fda28a6a8b980097d6ada
|
429b24e063283a0d752ccdfbff455abd30ba3859
|
refs/heads/master
| 2020-03-23T17:34:51.389065
| 2018-11-24T08:49:41
| 2018-11-24T08:49:41
| 141,865,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 750
|
py
|
from bs4 import BeautifulSoup
import urllib.request as req
import os.path
url="http://www.cgv.co.kr/movies/"
savename="movie.xml"
if not os.path.exists(savename) :
req.urlretrieve(url,savename)
xml=open(savename,mode="r",encoding="utf-8")
soup=BeautifulSoup(xml,"html.parser")
info ={}
i=0
chart=soup.find("div", "wrap-movie-chart")
for i in range(len(chart.find_all("strong","rank"))):
rank=chart.find_all("strong","rank")[i].text
name=chart.find_all("strong","title")[i].text
percent=chart.find_all("span","percent")[i].text
if not(rank in info):
info[rank]=[]
info[rank].append(name)
info[rank].append(percent)
i+=1
for locW in info :
print("+",locW)
for name in info[locW]:
print("|-",name)
|
[
"cutz309@gmail.com"
] |
cutz309@gmail.com
|
7054a8d24bb0b55a1ce940c8f37999f39220a230
|
7787e5575ef1f989c80157a3c955a4b3aa0982e0
|
/assignment1/q4_softmaxreg.py
|
dd58d71bf13337c0f1a2eba868d0dc52e206f542
|
[] |
no_license
|
marcoleewow/CS224D
|
0a9d5bb0d747b86f8f1cc19684acedd6d69eed37
|
7bcd2e7ab209d88f5d30d58af9ac6a56832dd59a
|
refs/heads/master
| 2021-01-23T02:48:09.697456
| 2017-03-24T04:10:41
| 2017-03-24T04:10:41
| 86,022,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,419
|
py
|
import numpy as np
import random
from cs224d.data_utils import *
from q1_softmax import softmax
from q2_gradcheck import gradcheck_naive
from q3_sgd import load_saved_params
def getSentenceFeature(tokens, wordVectors, sentence):
""" Obtain the sentence feature for sentiment analysis by averaging its word vectors """
# Implement computation for the sentence features given a sentence.
# Inputs:
# - tokens: a dictionary that maps words to their indices in
# the word vector list
# - wordVectors: word vectors (each row) for all tokens
# - sentence: a list of words in the sentence of interest
# Output:
# - sentVector: feature vector for the sentence
sentVector = np.zeros((wordVectors.shape[1],))
### YOUR CODE HERE
for i in sentence:
sentVector += wordVectors[tokens[i]]
sentVector /= len(sentence)
### END YOUR CODE
return sentVector
def softmaxRegression(features, labels, weights, regularization = 0.0, nopredictions = False):
""" Softmax Regression """
# Implement softmax regression with weight regularization.
# Inputs:
# - features: feature vectors, each row is a feature vector
# - labels: labels corresponding to the feature vectors
# - weights: weights of the regressor
# - regularization: L2 regularization constant
# Output:
# - cost: cost of the regressor
# - grad: gradient of the regressor cost with respect to its
# weights
# - pred: label predictions of the regressor (you might find
# np.argmax helpful)
prob = softmax(features.dot(weights))
if len(features.shape) > 1:
N = features.shape[0]
else:
N = 1
# A vectorized implementation of 1/N * sum(cross_entropy(x_i, y_i)) + 1/2*|w|^2
cost = np.sum(-np.log(prob[range(N), labels])) / N
cost += 0.5 * regularization * np.sum(weights ** 2)
### YOUR CODE HERE: compute the gradients and predictions
pred = np.argmax(prob, axis=1) # why is this taking argmax?
# delta is the gradient associated with the loss (softmax layer only)
delta = prob
delta[np.arange(N), labels] -= 1 # delta = y^-y
delta /= N
grad = np.dot(features.T, delta) #backprop the weight just like from before
grad += regularization * weights #adding the regularization to the gradient
### END YOUR CODE
if nopredictions:
return cost, grad
else:
return cost, grad, pred
def accuracy(y, yhat):
""" Precision for classifier """
assert(y.shape == yhat.shape)
return np.sum(y == yhat) * 100.0 / y.size
def softmax_wrapper(features, labels, weights, regularization = 0.0):
cost, grad, _ = softmaxRegression(features, labels, weights,
regularization)
return cost, grad
def sanity_check():
"""
Run python q4_softmaxreg.py.
"""
random.seed(314159)
np.random.seed(265)
dataset = StanfordSentiment()
tokens = dataset.tokens()
nWords = len(tokens)
_, wordVectors0, _ = load_saved_params()
wordVectors = (wordVectors0[:nWords,:] + wordVectors0[nWords:,:])
dimVectors = wordVectors.shape[1]
dummy_weights = 0.1 * np.random.randn(dimVectors, 5)
dummy_features = np.zeros((10, dimVectors))
dummy_labels = np.zeros((10,), dtype=np.int32)
for i in range(10):
words, dummy_labels[i] = dataset.getRandomTrainSentence()
dummy_features[i, :] = getSentenceFeature(tokens, wordVectors, words)
print ("==== Gradient check for softmax regression ====")
gradcheck_naive(lambda weights: softmaxRegression(dummy_features,
dummy_labels, weights, 1.0, nopredictions = True), dummy_weights)
print ("\n=== Results ===")
print (softmaxRegression(dummy_features, dummy_labels, dummy_weights, 1.0))
if __name__ == "__main__":
sanity_check()
|
[
"marco@goodnotesapp.com"
] |
marco@goodnotesapp.com
|
5724b12146fc17a26bc384a9dce393de2917f852
|
732605a2bf9bc5470fcca8d8710440ad563ac452
|
/25-day.py
|
0db90d7fda73714449dc910ccc9f1f2a128345f0
|
[] |
no_license
|
capJavert/advent-of-code-2017
|
6417f6b6fa16cc0c3383baa6bf0cab6edb47292a
|
0ad7669ea00251e0cbf63c30b964b363d4270d2f
|
refs/heads/master
| 2021-09-01T06:56:41.066536
| 2017-12-25T14:11:37
| 2017-12-25T14:11:37
| 112,718,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,816
|
py
|
from collections import Counter
def main():
infinity_tape = {}
state = "A"
p = 0
for _ in range(12523873):
if p not in infinity_tape:
infinity_tape[p] = 0
if state == "A":
if infinity_tape[p] == 0:
infinity_tape[p] = 1
p += 1
state = "B"
else:
infinity_tape[p] = 1
p -= 1
state = "E"
elif state == "B":
if infinity_tape[p] == 0:
infinity_tape[p] = 1
p += 1
state = "C"
else:
infinity_tape[p] = 1
p += 1
state = "F"
elif state == "C":
if infinity_tape[p] == 0:
infinity_tape[p] = 1
p -= 1
state = "D"
else:
infinity_tape[p] = 0
p += 1
state = "B"
elif state == "D":
if infinity_tape[p] == 0:
infinity_tape[p] = 1
p += 1
state = "E"
else:
infinity_tape[p] = 0
p -= 1
state = "C"
elif state == "E":
if infinity_tape[p] == 0:
infinity_tape[p] = 1
p -= 1
state = "A"
else:
infinity_tape[p] = 0
p += 1
state = "D"
elif state == "F":
if infinity_tape[p] == 0:
infinity_tape[p] = 1
p += 1
state = "A"
else:
infinity_tape[p] = 1
p += 1
state = "C"
print(Counter(infinity_tape.values())[1], "wink wink")
main()
|
[
"ante.baric3@gmail.com"
] |
ante.baric3@gmail.com
|
9264b9d35b134aa0f0c04498b67d8f30691bcfc6
|
28bf7793cde66074ac6cbe2c76df92bd4803dab9
|
/answers/Anuraj Pariya/day 21/question 2.py
|
a7e421f510e2e64d81f4e6a6bd6111c781c5f180
|
[
"MIT"
] |
permissive
|
Codechef-SRM-NCR-Chapter/30-DaysOfCode-March-2021
|
2dee33e057ba22092795a6ecc6686a9d31607c9d
|
66c7d85025481074c93cfda7853b145c88a30da4
|
refs/heads/main
| 2023-05-29T10:33:31.795738
| 2021-06-10T14:57:30
| 2021-06-10T14:57:30
| 348,153,476
| 22
| 135
|
MIT
| 2021-06-10T14:57:31
| 2021-03-15T23:37:26
|
Java
|
UTF-8
|
Python
| false
| false
| 279
|
py
|
n=int(input('enter no.'))
def min_sum(arr, n):
arr.sort()
a = 0; b = 0
for i in range(n):
if (i % 2 != 0):
a = a * 10 + arr[i]
else:
b = b * 10 + arr[i]
return a + b
arr = list(map(int,input('Enter arr').split()))
n = len(arr)
print("min sum is" , min_sum(arr, n))
|
[
"noreply@github.com"
] |
Codechef-SRM-NCR-Chapter.noreply@github.com
|
8ac1c8637320af78ffcfbc30d8072692148e8dce
|
95111bd6b6db6cbd6677659c02af2d5fdf3bce6c
|
/mine_entropy.py
|
9002e34d0899dd6e63d10cda636d41e40fc63bdd
|
[] |
no_license
|
wangxuuu/gaussian_entropy_estimate
|
4fc01d227ef8d3e7062fd7833377c4b01b00043b
|
56f63ec39592c4cfcdc824a519c34efb07ce5c14
|
refs/heads/master
| 2020-10-02T11:02:45.323464
| 2020-01-12T01:27:17
| 2020-01-12T01:27:17
| 227,762,115
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,858
|
py
|
#%%
import numpy as np
import torch
import matplotlib.pyplot as plt
from data.mix_gaussian import MixedGaussian
from models import mine
#%%
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
else:
torch.set_default_tensor_type(torch.FloatTensor)
for d in range(1, 4):
# d = 1 # d is the dimension of X and Y. The dimension of joint mix-gaussian distribution is 2*d
rho = 0.9
sample_size = 400
X = np.zeros((sample_size, d))
Y = np.zeros((sample_size, d))
mg = MixedGaussian(sample_size=sample_size, rho1=rho, rho2=-rho)
hx, hy, hxy, mi = mg.ground_truth
"""
Splitting the 2-d Mix-Gaussian data into 2 components, and assign them to X and Y respectively.
Generate 2-d Mix-Gaussian data from the same distribution and repeat this step for d times.
(X,Y) is a mixed gaussian distribution; but X and Y are not.
"""
for i in range(d):
data = mg.data
X[:, i] = data[:, 0]
Y[:, i] = data[:, 1]
import scipy.stats as st
pdf = st.multivariate_normal(
mean=np.zeros(d),
cov=np.identity(d))
density_x = pdf.pdf(X) # p(x)
density_y = pdf.pdf(Y) # p(y)
density_xy = density_x * density_y
# The cross entropy of the reference distribution, i.e. product of marginal distribution
H_crossentropy = -np.log(density_xy).mean()
# Ground truth of entropy of mixed gaussian distribution (X,Y)
# As the 2-d mix-gaussian data is generated independently, the entropy of (X, Y) is d times
# the entropy of the 2-d mix-gaussian data.
h_xy = hxy * d
# -------------------------- Training ----------------------------- #
# Using Neural Network to estimate the entropy of the generated Gaussian distribution
batch_size = 100
lr = 1e-4
ma_rate = 0.1
NN = mine.MINE(torch.Tensor(X), torch.Tensor(Y), batch_size=batch_size, lr=lr, ma_rate=ma_rate)
num_iteration = 100000
entropy_XY = []
dXY_list = []
for i in range(num_iteration):
NN.step()
dXY = NN.forward()
entropy_XY.append(H_crossentropy - dXY)
dXY_list.append(dXY)
#%%
ma_rate = 0.01 # rate of moving average
entropy_list = entropy_XY.copy() # see also the estimate() member function of MINE
for i in range(1,len(entropy_list)):
entropy_list[i] = (1-ma_rate) * entropy_list[i-1] + ma_rate * entropy_list[i]
#%%
# plot the estimate entropy
plt.figure()
plt.plot(entropy_list, label='XY entropy')
plt.axhline(h_xy, label='ground truth', linestyle='--', color='red')
plt.xlabel('Iteration')
plt.ylabel('entropy')
plt.title('XY dim=%d, learnrate=%f' % (2 * d, lr))
plt.legend()
plt.savefig("./results/mine/dim=%d learnrate=%f.png" % (2*d,lr))
plt.show()
|
[
"wangxucoco@live.com"
] |
wangxucoco@live.com
|
f8390f5703f56978be7a5258e9fcd15a47f00db9
|
c9cdd039831fd3babde9b34203a7a0e67adc2edc
|
/Chapter10/cannonball.py
|
28f3d1530d43f70177025c35cf54f29a53f69759
|
[] |
no_license
|
quynguyen2303/python_programming_introduction_to_computer_science
|
7f646bd560059b45c20809dd0a3857548f98ff3e
|
d17a496c57ebb290bcbc06b0ad2ae58e5719446f
|
refs/heads/master
| 2021-06-11T15:45:38.251760
| 2016-10-10T09:20:21
| 2016-10-10T09:20:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,005
|
py
|
# cannonball.py
# Simulation The Flying CannonBall
'''
Input the simulation parameter: angle, velocity, height, interval
Calculate the inital position of the cannonball: xpos, ypos
Calculate the inital velocities of the cannonball: xvel, yvel
While the cannonball is still flying:
update the values of xpos, ypos, and yvel for interval seconds
further into the flight
Output the distance traveled as xpos
'''
from math import cos, sin, radians
from tracker import Tracker
from graphics import *
def main():
win = GraphWin('CannonBall', 500, 500)
win.setCoords(-100,-100,100,100)
angle, vel, h0, time = getInputs()
cball = Projectile(angle, vel, h0)
track = Tracker(win, cball)
while cball.getY() >= 0:
cball.update(time)
track.update(cball)
print('\nThe distrance traveled: %0.1f meters.' % (cball.getY()))
print(track)
def getInputs():
angle = float(input('What is the angle of the launch: '))
vel = float(input('What is the initial velocity: '))
h0 = float(input('What is the height: ' ))
time = float(input('Enter the time interval between calculations: '))
return angle, vel, h0, time
class Projectile:
def __init__(self, angle, velocity, height):
self.xpos = 0.0
self.ypos = height
# No more use so do not need create instance variable
theta = radians(angle)
self.xvel = velocity * cos(theta)
self.yvel = velocity * sin(theta)
maxYPos = (self.yvel / 9.8) * self.yvel / 2.0
self.maxHeight = height + maxYPos
def getY(self):
return self.ypos
def getX(self):
return self.xpos
def getMaxY(self):
return self.maxHeight
def update(self, time):
self.xpos = self.xpos + self.xvel * time
yvel1 = self.yvel - time * 9.8
self.ypos = self.ypos + time * (self.yvel + yvel1) / 2.0
self.yvel = yvel1
if __name__ == '__main__':
main()
|
[
"23editorcs@gmail.com"
] |
23editorcs@gmail.com
|
9b6118b05978f72b96ef8afb2230654aae8f5e8f
|
b55e1ce89cde734f2c052e123a4d5364daa7cf01
|
/goalParserInterpretation.py
|
de7cbea060a8515c9575473fbb38c0373705075a
|
[] |
no_license
|
rojinaAmatya/LeetCode
|
89d015034cee7ed0aa618fabbd91edc5c5ec5472
|
d4bde7602e559105c96560fe98916e3ec5821439
|
refs/heads/main
| 2023-07-14T05:28:22.924697
| 2021-08-25T21:49:38
| 2021-08-25T21:49:38
| 332,339,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
'''
You own a Goal Parser that can interpret a string command.
The command consists of an alphabet of "G", "()" and/or "(al)" in some order.
The Goal Parser will interpret "G" as the string "G", "()" as the string "o",
and "(al)" as the string "al". The interpreted strings are then concatenated
in the original order.
Given the string command, return the Goal Parser's interpretation of command.
Input: command = "G()(al)"
Output: "Goal"
Explanation: The Goal Parser interprets the command as follows:
G -> G
() -> o
(al) -> al
The final concatenated result is "Goal".
'''
def interpret(command):
return command.replace("()","o").replace("(al)","al")
print(interpret("(al)G(al)()()G"))
|
[
"rojinaamatya1432@gmail.com"
] |
rojinaamatya1432@gmail.com
|
2ab3e121946ea737856a1831603df70176873e67
|
d6d9d0af6fd4d0ec95c999a183d4d2f277a2fb75
|
/pollproject/poll/poll/urls.py
|
e88a4d824621b661cb497246357505dbf721cf14
|
[] |
no_license
|
Sagar0802/profile
|
39ebf745f4bc08e80e59e12fc4341a5b41febc3c
|
7bad4a66b6a85ebebfa2283e167c57de13c7463b
|
refs/heads/master
| 2022-12-19T15:08:09.323375
| 2020-09-12T14:11:19
| 2020-09-12T14:11:19
| 294,954,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 958
|
py
|
"""poll URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('pollusers.urls')),
]+ static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
[
"noreply@github.com"
] |
Sagar0802.noreply@github.com
|
d55d52d0f49202267bb6060641c257b844d09a53
|
6e5ab77fee1fb4a0310213dd8c6dd8601828b1b9
|
/Algorithm/Swea/D3_3408.py
|
28df8980751bd7eebf46345ef9c142915a8db321
|
[] |
no_license
|
hongyong3/TIL
|
36d031c0da9e3e6db3eebb977bd3e12df00a849f
|
7f1492128e957a78fc95b255f4f7f2978161e471
|
refs/heads/master
| 2023-08-19T09:16:03.231757
| 2023-08-18T09:38:47
| 2023-08-18T09:38:47
| 162,100,258
| 1
| 0
| null | 2023-02-11T00:52:32
| 2018-12-17T08:42:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 269
|
py
|
import sys
sys.stdin = open("D3_3408_input.txt", "r")
T = int(input())
for test_case in range(T):
N = int(input())
S1, S2, S3 = 0, 0, 0
S1 = N * (N + 1) // 2
S2 += N ** 2
S3 += N * (N + 1)
print("#{} {} {} {}".format(test_case + 1, S1, S2, S3))
|
[
"chy2495@naver.com"
] |
chy2495@naver.com
|
19ca0c6d0d0ac377f79ba1a36e4525ecc531a5b1
|
3f10f700912e8e65cc1604ebc1070734f8e7ce29
|
/mftracker.py
|
007b9a2e6ea75d132853912a5cfad41b30397341
|
[
"MIT"
] |
permissive
|
c00kie17/Indian-Mutual-Fund-Tracker
|
4e2f15f936c2d83532ac43a8bf6413ea6dca3695
|
7104c2a617a3dcc6c35829d1d0310575f121d85d
|
refs/heads/master
| 2020-03-24T06:30:48.242496
| 2018-12-10T10:14:15
| 2018-12-10T10:14:15
| 142,531,394
| 9
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,419
|
py
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import WebDriverException
import csv
from tqdm import tqdm
from time import sleep
import platform
from datetime import datetime,timedelta
import sys
import os
def getdata(link,name):
data = []
options = Options()
options.add_argument("--headless")
options.add_argument('--no-sandbox')
linkdriver = webdriver.Chrome(chrome_options=options, executable_path=chromefile)
linkdriver.get(link)
data.append(name)
try:
try:
monthValBox = WebDriverWait(linkdriver, 20).until(EC.presence_of_element_located((By.XPATH, "/html/body/div[1]/div/span/div/div[3]/div/div[1]/div/div[1]/form/div[1]/div[1]/label/input")))
except TimeoutException:
return data
else:
monthValBox.clear()
monthValBox.send_keys(monthlyAmount)
try:
startDateBox = WebDriverWait(linkdriver, 20).until(EC.visibility_of_element_located((By.XPATH, '//*[@ng-model="sip_start_date"]')))
except TimeoutException:
return data
else:
startDateBox.clear()
startDateBox.send_keys(str(startDate))
try:
endDateBox = WebDriverWait(linkdriver, 20).until(EC.visibility_of_element_located((By.XPATH, '//*[@ng-model="sip_end_date"]')))
except TimeoutException:
return data
else:
endDateBox.clear()
endDateBox.send_keys(str(endDate))
try:
calcBox = WebDriverWait(linkdriver, 20).until(EC.visibility_of_element_located((By.XPATH, '/html/body/div[1]/div/span/div/div[3]/div/div[1]/div/div[1]/form/div[1]/div[4]/button')))
except TimeoutException:
return data
else:
calcBox.click()
values = linkdriver.find_elements_by_class_name("fund-details");
except WebDriverException:
return data
for index in range(len(values)-8,len(values)):
if(index < len(values)-5):
value = values[index].text[1:]
else:
value = values[index].text
data.append(value)
linkdriver.close()
return data
def start():
options = Options()
options.add_argument("--headless")
options.add_argument('--no-sandbox')
driver = webdriver.Chrome(chrome_options=options, executable_path=chromefile)
driver.set_window_size(1120, 550)
driver.get("https://coin.zerodha.com/funds")
sleep(2)
funds = driver.find_elements_by_class_name("cursor-pointer");
if(len(funds) == 0):
print("no links found error")
driver.close()
start()
sortedFunds = []
if(keyword == ""):
sortedFunds = funds
else:
for fund in funds:
if keyword.upper() in fund.text.upper():
sortedFunds.append(fund)
pbar = tqdm(total=len(sortedFunds))
for fund in sortedFunds:
data = getdata(fund.get_attribute("href"),fund.text)
writer.writerow(data)
pbar.update(1)
pbar.close()
driver.close()
def getMonthlyAmount():
print("enter monthly amount: ")
monthlyAmount = str(input())
if(not monthlyAmount.isdigit()):
print("invalid number")
getMonthlyAmount()
else:
if(int(monthlyAmount) > 1000000000):
print("amount too high")
getMonthlyAmount()
return monthlyAmount
def getStartDate():
print("enter SIP start date in format MM/DD/YYYY")
startDate = input()
try:
today = datetime.now()
date = datetime.strptime(str(startDate), '%m/%d/%Y')
except ValueError:
print("date format wrong")
getStartDate()
else:
if(date > (today-timedelta(days=1))):
print("start date needs to be one day behind current day")
getStartDate()
return startDate
def getEndDate():
print("enter SIP end date in format MM/DD/YYYY")
endDate = input()
try:
today = datetime.now()
date = datetime.strptime(str(endDate), '%m/%d/%Y')
if(date > today):
print("end date cannot be greater that current day")
getEndDate()
except ValueError:
print("date format wrong")
getEndDate()
return endDate
def getKeyword():
print("enter a keyword, if you want data on all MF leave blank")
keyword = input()
if(any(char.isdigit() for char in keyword)):
print("no number in keyword")
getKeyword()
return keyword
def getFilename():
print("enter filename")
fname = input()
if(fname == ""):
print("filename cannot be blank")
getFilename()
return fname
if __name__ == '__main__':
if(platform.system() == "Darwin"):
try:
chromefile = os.path.join(sys._MEIPASS, "chromedrivermac")
except AttributeError:
chromefile = os.getcwd()+ "/driver/chromedrivermac"
elif(platform.system() == "Windows"):
try:
chromefile = os.path.join(sys._MEIPASS, "chromedriverwin.exe")
except AttributeError:
chromefile = os.getcwd()+"/driver/chromedriverwin.exe"
else:
try:
chromefile = os.path.join(sys._MEIPASS, "chromedriverlinux")
except AttributeError:
chromefile = os.getcwd()+ "/driver/chromedriverlinux"
monthlyAmount = getMonthlyAmount()
startDate = getStartDate()
endDate = getEndDate()
keyword = getKeyword()
fname = getFilename()
csvfilePath = os.path.join(os.path.dirname(sys.argv[0]), fname+'.csv')
file = open(csvfilePath, 'w+')
writer = csv.writer(file)
writer.writerow(["Name","Total invested","Current valuation","Net profit","Absolute profit","Total installments","Internal rate of return","SIP start date","SIP end date"])
print("Press Ctrl+C to exit program")
start()
|
[
"anshul1708@gmail.com"
] |
anshul1708@gmail.com
|
d4fec2d3e216a38b62d8797b841cd684c6ce8b9e
|
2c691f89e0cbc83bd2dde3ca7f01d441a0493652
|
/Tests/testGVS.py
|
2b37661797ec9cbc223ca063e84fab5a909e706c
|
[] |
no_license
|
NNiehof/GVSNoise
|
eebe347851c1d4ff7ba39aaaa4d12ba6a38c8055
|
9269a816dc6142166a4d783a57203d67194cec14
|
refs/heads/master
| 2020-03-21T02:52:42.602058
| 2018-07-19T14:29:43
| 2018-07-19T14:29:43
| 138,025,814
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,482
|
py
|
# Nynke Niehof, 2018
import numpy as np
import unittest
from sys import path
from os.path import dirname
path.append(dirname(path[0]))
from Experiment.GVS import GVS
class TestMaxVoltage(unittest.TestCase):
def test_upper_lim(self):
self.gvs1 = GVS(max_voltage=5.0)
self.assertAlmostEqual(self.gvs1.max_voltage, 1.0)
self.gvs1.quit()
def test_negative_lim(self):
self.gvs2 = GVS(max_voltage=-40)
self.assertAlmostEqual(self.gvs2.max_voltage, 1.0)
self.gvs2.quit()
def test_change_upper_lim(self):
self.gvs3 = GVS(max_voltage=2.5)
self.gvs3.max_voltage = 10
self.assertAlmostEqual(self.gvs3.max_voltage, 1.0)
self.gvs3.quit()
def test_voltage_below_upper_lim(self):
self.gvs4 = GVS(max_voltage=0.5)
self.assertAlmostEqual(self.gvs4.max_voltage, 0.5)
self.gvs4.quit()
def test_signal():
"""
Generate a signal with an alternating step from 0 V to 1 V and to -1 V.
Check the generated voltage with an oscilloscope.
"""
gvs = GVS(max_voltage=3.0)
connected = gvs.connect("cDAQ1Mod1/ao0")
if connected:
samples = np.concatenate((np.zeros(500), np.ones(1000), np.zeros(500)))
samples = np.concatenate((samples, -samples, samples, -samples))
gvs.write_to_channel(samples)
gvs.quit()
if __name__ == "__main__":
unittest.main()
test_signal()
|
[
"nynkeniehof@gmail.com"
] |
nynkeniehof@gmail.com
|
83f56f90607d9d5d2e892120892412559fa59f47
|
a4e5acf6ed0b0be77aa8b1cbe2168c8d12ae83b3
|
/7/p12.py
|
88baa4a473c06a57d25bdc635570759ddb4a7a31
|
[] |
no_license
|
ebuehrle/advent-of-code-2020
|
a107f8f79a15527c90aa71127f22e671d39b4d69
|
11318607b12e002273673b9fd2c4efa237eaea5e
|
refs/heads/main
| 2023-02-09T22:17:25.126722
| 2020-12-25T09:03:40
| 2020-12-25T09:03:40
| 317,792,091
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,834
|
py
|
def parse_rule(rule):
container_color, content = rule.split(' bags contain ')
content = content.split(', ')
content = [c.split() for c in content]
content = [(0 if c[0] == 'no' else int(c[0]), c[1] + ' ' + c[2]) for c in content]
return (container_color, content)
def add_rule(graph, rule):
graph.update({rule[0]: rule[1]})
return graph
def add_invrule(graph, rule):
if not rule[1]:
return graph
container_color = rule[0]
for n, bag_color in rule[1]:
if bag_color not in graph:
graph[bag_color] = {container_color}
else:
graph[bag_color].add(container_color)
return graph
def get_containers(bag_color, inv_digraph):
visited = set()
found = set()
def dfs(bag_color, inv_digraph, visited, found):
if bag_color in visited or bag_color not in inv_digraph:
return
visited.add(bag_color)
for container_color in inv_digraph[bag_color]:
found.add(container_color)
dfs(container_color, inv_digraph, visited, found)
dfs(bag_color, inv_digraph, visited, found)
return found
def count_inside_bags(bag_color, digraph):
if bag_color not in digraph:
return 0
count = 0
for n, c in digraph[bag_color]:
count += n
count += n * count_inside_bags(c, digraph)
return count
if __name__ == '__main__':
import sys
import functools
rules = list(map(parse_rule, map(str.strip, sys.stdin)))
digraph = functools.reduce(add_rule, rules, dict())
inv_digraph = functools.reduce(add_invrule, rules, dict())
containers = get_containers('shiny gold', inv_digraph)
print('P1:', len(containers))
num_inside_bags = count_inside_bags('shiny gold', digraph)
print('P2:', num_inside_bags)
|
[
"43623224+ebuehrle@users.noreply.github.com"
] |
43623224+ebuehrle@users.noreply.github.com
|
840996572849db610df502ab039de163a3ec75e7
|
c83e356d265a1d294733885c373d0a4c258c2d5e
|
/mayan/apps/documents/forms/document_version_page_forms.py
|
74cbdfcdd7ec0678b86dace3949bf4e99cb2230b
|
[
"Apache-2.0"
] |
permissive
|
TrellixVulnTeam/fall-2021-hw2-451-unavailable-for-legal-reasons_6YX3
|
4160809d2c96707a196b8c94ea9e4df1a119d96a
|
0e4e919fd2e1ded6711354a0330135283e87f8c7
|
refs/heads/master
| 2023-08-21T23:36:41.230179
| 2021-10-02T03:51:12
| 2021-10-02T03:51:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,992
|
py
|
from django import forms
from django.forms.formsets import formset_factory
from django.utils.translation import ugettext_lazy as _
from ..fields import DocumentVersionPageField, ThumbnailFormField
__all__ = ('DocumentVersionPageForm',)
class DocumentVersionPageForm(forms.Form):
document_version_page = DocumentVersionPageField()
def __init__(self, *args, **kwargs):
instance = kwargs.pop('instance', None)
rotation = kwargs.pop('rotation', None)
zoom = kwargs.pop('zoom', None)
super().__init__(*args, **kwargs)
self.fields['document_version_page'].initial = instance
self.fields['document_version_page'].widget.attrs.update({
'zoom': zoom,
'rotation': rotation,
})
class DocumentVersionPageMappingForm(forms.Form):
source_content_type = forms.IntegerField(
label=_('Content type'), widget=forms.HiddenInput
)
source_object_id = forms.IntegerField(
label=_('Object ID'), widget=forms.HiddenInput
)
source_label = forms.CharField(
label=_('Source'), required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'})
)
source_thumbnail = ThumbnailFormField(required=False)
target_page_number = forms.ChoiceField(
choices=(), label=_('Destination page number'), required=False,
widget=forms.widgets.Select(
attrs={'size': 1, 'class': 'select2'}
),
)
def __init__(self, *args, **kwargs):
target_page_number_choices = kwargs.pop(
'target_page_number_choices', ()
)
super().__init__(*args, **kwargs)
self.fields['target_page_number'].choices = target_page_number_choices
class FormSetExtraFormKwargsMixin:
def __init__(self, *args, **kwargs):
self.form_extra_kwargs = kwargs.pop(
'form_extra_kwargs', {}
)
super().__init__(*args, **kwargs)
def get_form_kwargs(self, index):
form_kwargs = super().get_form_kwargs(index=index)
form_kwargs.update(self.form_extra_kwargs)
return form_kwargs
class DocumentVersionPageMappingFormSet(
FormSetExtraFormKwargsMixin, formset_factory(
form=DocumentVersionPageMappingForm, extra=0
)
):
"""
Combined formset
"""
def clean(self):
set_of_target_page_numbers = set()
for form in self.forms:
cleaned_data_entry = form.cleaned_data
target_page_number = cleaned_data_entry['target_page_number']
if target_page_number != '0':
if target_page_number in set_of_target_page_numbers:
form.add_error(
field='target_page_number',
error=_('Target page number can\'t be repeated.')
)
else:
set_of_target_page_numbers.add(target_page_number)
|
[
"79801878+Meng87@users.noreply.github.com"
] |
79801878+Meng87@users.noreply.github.com
|
5d888b295b13eb7815f7fd4b4e48e830b90ada4d
|
1b821be51a072ecdd55adf9e74bb6e70c1b734e3
|
/weather_lightning.py
|
cb671cea811b331b8f8d691f027f21ea7c26f275
|
[
"BSD-3-Clause"
] |
permissive
|
kifd/gimp-plugins
|
3704a808f8d1a2202c136229678a2885ecddfbbe
|
d5b4e20024010ea9ded7ad3d0983f87f23d8ffdb
|
refs/heads/master
| 2020-06-02T01:15:54.158827
| 2019-06-09T12:09:55
| 2019-06-09T12:09:55
| 190,990,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,842
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Weather - Lightning v0.1
#
# Copyright 2019, Keith Drakard; Released under the 3-Clause BSD License
# See https://opensource.org/licenses/BSD-3-Clause for details
#
# Change Log
# ----------
# 0.1: Initial release
#
# ------------------------------------------------------------------------------
# Weather effect; lightning
# console crib...
#
# layer = pdb.gimp_image_get_active_drawable(gimp.image_list()[0])
#
# https://gitlab.gnome.org/GNOME/gimp/issues/1542
import os, sys
this_directory = os.path.dirname(os.path.realpath(__file__)) + os.sep
sys.path.append(this_directory)
sys.stderr = open(this_directory + 'gimpfu_debug.txt', 'a')
from classes.mygtk import *
from classes.noise import *
from classes.point import *
import random
class LightningPath(object):
def __init__(self, image, start, end, depth=1):
self.image = image
self.start, self.end = start, end
self.depth = max(1, depth)
# NOTE length is the as-the-crow-flies distance AND NOT the length of the actual path/beziers
self.length = int(self.end.distance_from(self.start))
self.angle = self.end.angle_of(self.start)
# minimum possible starting point for branches
self.branch_point = self.length - (self.length / (self.depth+1))
self.path = []
noise_defaults = [
(8, 2.5, 0.40, 0.4, 200, self.length/2),
(8, 2.0, 0.55, 0.2, 200, self.length)
]
pick = min(self.depth-1, 1)
self.octaves, self.lacunarity, self.gain, self.scale, self.amplitude, self.hgrid = noise_defaults[pick]
self.brush_size = 0
pass
''' adds two paths together; does NOT check that they are continuous, in the same image, nor of the same depth '''
def __add__(self, other):
obj = LightningPath(self.image, self.start, other.end, self.depth)
obj.length = self.length + other.length
obj.path = self.path + other.path
return obj
pass
''' because random.choice(path) only returns the Point object of a path and I want the index too '''
def random_point_along_path(self):
index = random.randint(0, len(self.path)-1)
return index, self.path[index]
pass
''' brush details for this depth '''
def set_brush(self):
if self.brush_size == 0:
self.brush_size = int(self.image.width/150)
if self.depth > 1:
self.brush_size = max(1, int(self.brush_size / self.depth))
pdb.gimp_context_set_paint_method("gimp-paintbrush") # make sure we have the right tool selected in this context else gimp errors when you have a non-painting tool selected
pdb.gimp_context_set_brush("1. Pixel")
pdb.gimp_context_set_brush_size(self.brush_size)
pdb.gimp_context_set_brush_hardness(0.75)
pass
''' makes a noisy wave along a straight line '''
def make_path(self):
pn = PerlinNoise(self.octaves, 0.1, self.scale)
for i in xrange(self.length):
n = int(normalize(pn.fractal(i, self.hgrid, self.lacunarity, self.gain)) * self.amplitude) - self.amplitude/2
self.path.append(Point(i,n))
pass
''' rotates that wave to follow our real start->end line '''
def rotate_path(self):
for index, item in enumerate(self.path):
# rotate x,y around the origin - https://en.wikipedia.org/wiki/Rotation_matrix
x = int(item.x * math.cos(self.angle) - item.y * math.sin(self.angle))
y = int(item.x * math.sin(self.angle) + item.y * math.cos(self.angle))
# translate x,y according to the offset
self.path[index] = Point(x + self.start.x, y + self.start.y)
pass
''' trebles each of the path coords so gimp can draw the curve '''
def make_beziers(self):
self.beziers = []
for i in self.path:
# add this to our curve
self.beziers.extend([i.x,i.y, i.x,i.y, i.x,i.y])
pass
''' plot the bezier curve as a gimp path '''
def draw_beziers_path(self):
vectors = pdb.gimp_vectors_new(self.image, "Lightning Path")
pdb.gimp_image_insert_vectors(self.image, vectors, None, 0)
if len(self.beziers) > 6:
pdb.gimp_vectors_stroke_new_from_points(vectors, VECTORS_STROKE_TYPE_BEZIER, len(self.beziers), self.beziers, False)
return vectors
pass
''' and draw the bezier curve - solid color, no messing around with gradient overlays etc '''
def draw_beziers(self, group):
layer = pdb.gimp_layer_new(self.image, self.image.width, self.image.height, RGBA_IMAGE, "Main Bolt", 100.00, LAYER_MODE_NORMAL)
pdb.gimp_image_insert_layer(self.image, layer, group, 0)
self.set_brush()
self.make_beziers()
vectors = self.draw_beziers_path()
pdb.gimp_drawable_edit_stroke_item(layer, vectors)
pdb.gimp_image_remove_vectors(self.image, vectors)
return layer
pass
def draw_beziers_lighting(self, group):
pdb.gimp_progress_set_text("Drawing main bolt (lighting) ...")
pdb.gimp_image_undo_freeze(self.image)
layerc = self.draw_beziers(group)
layer1 = pdb.gimp_layer_copy(layerc, True)
layer2 = pdb.gimp_layer_copy(layerc, True)
layer3 = pdb.gimp_layer_copy(layerc, True)
layer4 = pdb.gimp_layer_copy(layerc, True)
pdb.gimp_image_remove_layer(self.image, layerc)
pdb.gimp_image_insert_layer(self.image, layer4, group, 1)
pdb.gimp_image_insert_layer(self.image, layer3, group, 1)
pdb.gimp_image_insert_layer(self.image, layer2, group, 1)
pdb.gimp_image_insert_layer(self.image, layer1, group, 1)
pdb.plug_in_gauss(self.image, layer1, 10,10, 0)
pdb.plug_in_gauss(self.image, layer2, 20,20, 0)
pdb.plug_in_gauss(self.image, layer3, 35,35, 0)
pdb.plug_in_gauss(self.image, layer4, 70,70, 0)
pdb.gimp_drawable_brightness_contrast(layer2, 0.3, 0.3)
pdb.gimp_drawable_brightness_contrast(layer3, -0.2, 0)
pdb.gimp_drawable_brightness_contrast(layer4, -0.3, 0)
layer1.mode = LAYER_MODE_HARDLIGHT
layer = pdb.gimp_image_merge_down(self.image, layer1, CLIP_TO_IMAGE)
layer = pdb.gimp_image_merge_down(self.image, layer, CLIP_TO_IMAGE)
layer = pdb.gimp_image_merge_down(self.image, layer, CLIP_TO_IMAGE)
layer.name = "Main Bolt (Underlay)"
# take our now single lighting layer and do some more blurry things to it
layer2 = pdb.gimp_layer_copy(layer, True)
layer3 = pdb.gimp_layer_copy(layer, True)
pdb.gimp_image_insert_layer(self.image, layer2, group, 2)
pdb.gimp_image_insert_layer(self.image, layer3, group, 3)
pdb.plug_in_mblur(self.image, layer2, 2, 100, 90, self.start.x, self.start.y)
pdb.plug_in_mblur(self.image, layer3, 2, 250, 90, self.start.x, self.start.y)
pdb.plug_in_whirl_pinch(self.image, layer2, 0.0, -1.0, 1.2)
pdb.plug_in_whirl_pinch(self.image, layer3, 0.0, -1.0, 1.2)
pdb.gimp_drawable_brightness_contrast(layer2, -0.5, -0.1)
pdb.gimp_drawable_brightness_contrast(layer2, -0.3, 0)
pdb.gimp_drawable_brightness_contrast(layer3, -0.5, 0)
pdb.gimp_drawable_brightness_contrast(layer3, -0.5, 0)
pdb.gimp_drawable_brightness_contrast(layer3, -0.2, 0) # yes, you need to repeat b/c adjustments if you want to change a layer that much
layer2 = pdb.gimp_image_merge_down(self.image, layer2, CLIP_TO_IMAGE)
layer2.name = "Main Bolt (Underlay 2)"
layer2.opacity = 40.0
pdb.gimp_image_undo_thaw(self.image)
return layer
pass
''' paint the plain x,y coords as a gradient pixel by pixel - looks better than a solid stroke but takes longer '''
def draw_path(self, group):
pdb.gimp_progress_set_text("Drawing child strokes ...")
pdb.gimp_image_undo_freeze(self.image)
layer = pdb.gimp_layer_new(self.image, self.image.width, self.image.height, RGBA_IMAGE, "Side Bolt", 100.00, LAYER_MODE_NORMAL)
pdb.gimp_image_insert_layer(self.image, layer, group, 0)
self.set_brush()
for index, item in enumerate(self.path):
percent = float(index)/float(self.length)
pdb.gimp_context_set_opacity((1 - percent) * 100)
pdb.gimp_paintbrush(layer, 0, 2, (item.x,item.y), PAINT_CONSTANT, 0)
#pdb.gimp_paintbrush(layer, 0, 2, (item.x,item.y), PAINT_CONSTANT, 0)
#pdb.gimp_paintbrush(layer, 0, 2, (item.x,item.y), PAINT_CONSTANT, 0)
pdb.gimp_progress_update(percent)
pdb.gimp_image_undo_thaw(self.image)
return layer
pass
def WeatherLightningWrapper(args):
image, layer, args = args
pdb.gimp_progress_init("Drawing lighting ...", None)
#random.seed(seed)
width = image.width
height = image.height
start = Point(width * args['start_x']/100, height * args['start_y']/100)
mid = Point(width * args['mid_x']/100, height * args['mid_y']/100)
end = Point(width * args['end_x']/100, height * args['end_y']/100)
n_main = max(1, min(10, int(args['n_main']))) # between 1-10 main bolts
n_side = max(0, min(20, int(args['n_side']))) # and 0-20 side bolts
side_points = []
group = gimp.GroupLayer(image)
group.name = "Lightning"
pdb.gimp_image_insert_layer(image, group, None, 0)
for i in range(n_main):
pdb.gimp_progress_set_text("Drawing main bolt(s) ...")
path1 = LightningPath(image, start, mid)
path1.make_path()
path1.rotate_path()
if args['move_end_point_a_bit']:
end.move_point(width/20, height/20) # 5% wiggle room
path2 = LightningPath(image, mid, end)
path2.make_path()
path2.rotate_path()
main_path = path1 + path2
size_mod = random.choice([120,150,180,200])
main_path.brush_size = int(image.width/size_mod)
pdb.gimp_context_set_foreground(args['color_bolt'])
layer1 = main_path.draw_beziers(group)
pdb.gimp_context_set_foreground(args['color_lighting'])
layer2 = main_path.draw_beziers_lighting(group)
# need to pick points/angles here from each bolt for the side ones
for i in range(n_side):
j, side_start = main_path.random_point_along_path()
side_next = main_path.path[j+1]
angle = side_next.angle_of(side_start)
distance = random.randint(int(main_path.length/15), int(main_path.length/3))
#debug.write("{}: {} {} {}\n".format(i, main_path.angle, angle, distance))
side_end = side_start.make_point(distance, angle)
side_end.check_bounds(width, height)
#debug.write("{}: {} to {} @ {} {}\n".format(i, side_start, side_end, distance, angle))
side_points.append((side_start, side_end))
pdb.gimp_context_set_foreground(args['color_bolt']) # reset after doing the main bolt lighting
random.shuffle(side_points)
for i in range(0, n_side): # still only picking n_side items from a list n_main*n_side long
start, end = side_points[i]
side_path = LightningPath(image, start, end, 2)
side_path.make_path()
side_path.rotate_path()
layer1 = side_path.draw_path(group)
layer2 = pdb.gimp_layer_copy(layer1, True)
layer2.mode = LAYER_MODE_HARDLIGHT
pdb.gimp_image_insert_layer(image, layer2, None, 1)
pdb.plug_in_gauss(image, layer2, 10,10, 0)
pdb.gimp_brightness_contrast(layer2, 0, 123)
plugin = PythonFu(
title = 'Lightning',
icon = os.path.join(this_directory, 'icons', 'draw-spiral-2-32x32.png'),
description = 'Draws a bolt of lightning',
author = 'Keith Drakard',
date = '2019',
menu = '<Image>/Filters/Render/Nature/_Lightning...',
dialog_width = 500,
widgets = [
{
'variable' : 'start_x',
'label' : 'X Start',
'markup' : '{} ({}%)',
'tooltip' : 'Image width % used to determine where the bolt starts from.',
'type' : IntSlider,
'range' : (0,100),
'default' : 10,
},
{
'line' : 1,
'variable' : 'start_y',
'label' : 'Y Start',
'markup' : '{} ({}%)',
'tooltip' : 'Image height % used to determine where the bolt starts from.',
'type' : IntSlider,
'range' : (0,100),
'default' : 50,
},
{
'variable' : 'mid_x',
'label' : 'X Mid',
'markup' : '{} ({}%)',
'tooltip' : 'Image width % used to determine where the bolt bends.',
'type' : IntSlider,
'range' : (0,100),
'default' : 40,
},
{
'line' : 2,
'variable' : 'mid_y',
'label' : 'Y Mid',
'markup' : '{} ({}%)',
'tooltip' : 'Image height % used to determine where the bolt bends.',
'type' : IntSlider,
'range' : (0,100),
'default' : 40,
},
{
'variable' : 'end_x',
'label' : 'X End',
'markup' : '{} ({}%)',
'tooltip' : 'Image width % used to determine where the bolt goes to.',
'type' : IntSlider,
'range' : (0,100),
'default' : 90,
},
{
'line' : 3,
'variable' : 'end_y',
'label' : 'Y End',
'markup' : '{} ({}%)',
'tooltip' : 'Image height % used to determine where the bolt goes to.',
'type' : IntSlider,
'range' : (0,100),
'default' : 60,
},
{
'variable' : 'n_main',
'label' : 'Main Bolts',
'tooltip' : 'Number of big bolts.',
'type' : IntSlider,
'range' : (1,10),
'default' : 1,
},
{
'variable' : 'n_side',
'label' : 'Side Bolts',
'tooltip' : 'Number of little bolts...',
'type' : IntSlider,
'range' : (0,20),
'default' : 0,
},
{
'variable' : 'move_end_point_a_bit',
'label' : 'Randomize end point',
'tooltip' : 'Move the end X,Y co-ords slightly for each main bolt.',
'type' : Toggle,
'default' : True,
},
{
'variable' : 'seed',
'label' : 'Random Seed',
'tooltip' : '',
'type' : IntEntry,
'default' : 0,
},
{
'variable' : 'color_bolt',
'label' : 'Bolt Color',
'tooltip' : 'Color of the main (and side) bolt.',
'type' : ColorPicker,
'default' : FOREGROUND,
},
{
'variable' : 'color_lighting',
'label' : 'Lighting Color',
'tooltip' : 'The glow that makes this more than just a wiggly line.',
'type' : ColorPicker,
'default' : BACKGROUND,
},
],
help_text = {
'label' : ('Draws a bolt of lightning.'),
},
code_function = WeatherLightningWrapper
)
plugin.main()
|
[
"noreply@github.com"
] |
kifd.noreply@github.com
|
7af964cd3ea6abe1456dca2d1213038527426326
|
ac60206e25fcce492d39a2a651dbca866cf21e86
|
/venv/Include/comic/util/JsonUtil.py
|
d6453988a1b326eb31b357736bc4a356c7d9c19f
|
[] |
no_license
|
Snailguo0508/comic
|
e127627e7437f84a10e7c0fd39ec42bbeb042bdc
|
5ce366bb9a7849577d1b4526887db5ce75f13192
|
refs/heads/master
| 2020-03-19T13:56:32.054506
| 2018-06-29T11:57:21
| 2018-06-29T11:57:21
| 136,596,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
import json
import re
import os
#正则匹配单个结果
def getHtmlResult(regexs,htmlCode):
try:
source = re.search(regexs, htmlCode, re.M|re.I)
return source.group(1)
except Exception as e:
print("正则匹配结果异常,正则表达式为:" + regexs)
return None
|
[
"940940508@qq.com"
] |
940940508@qq.com
|
db198e7f8a22f43a0d3aed3a075d567e2c4f784c
|
667f153e47aec4ea345ea87591bc4f5d305b10bf
|
/Solutions/Ch4Ex087.py
|
fb877fec4ffb1702654df8091774296ffcb954d2
|
[] |
no_license
|
Parshwa-P3/ThePythonWorkbook-Solutions
|
feb498783d05d0b4e5cbc6cd5961dd1e611f5f52
|
5694cb52e9e9eac2ab14b1a3dcb462cff8501393
|
refs/heads/master
| 2022-11-15T20:18:53.427665
| 2020-06-28T21:50:48
| 2020-06-28T21:50:48
| 275,670,813
| 1
| 0
| null | 2020-06-28T21:50:49
| 2020-06-28T21:26:01
|
Python
|
UTF-8
|
Python
| false
| false
| 534
|
py
|
# Ch4Ex087.py
# Author: Parshwa Patil
# ThePythonWorkbook Solutions
# Exercise No. 87
# Title: Center a String in the Terminal
def centerString(s, w, c=" "):
if w < len(s): return s
sp = (w - len(s)) // 2
return c * sp + s + c * (w - sp - len(s))
def main():
W = 100
string1 = input("Enter String1: ")
string2 = input("Enter String2: ")
string3 = input("Enter String3: ")
print(centerString(string1, W))
print(centerString(string2, W))
print(centerString(string3, W))
if __name__ == "__main__": main()
|
[
"noreply@github.com"
] |
Parshwa-P3.noreply@github.com
|
d8b030442d9ab3ae19b6d1efae2f37046efe4fe4
|
8bf78d105f555919c0f653c0d2f90edaab52b533
|
/cleantmp.py
|
586723ba768520926ea2a9d46085616c124e7a19
|
[] |
no_license
|
aiml2/becona
|
a39d13f20fd04bb8ca2c31e7092bef7060f6c477
|
c7db2c392ddad0d7a4405ab387437403f33797fc
|
refs/heads/master
| 2020-04-09T14:15:22.106955
| 2019-09-20T09:01:56
| 2019-09-20T09:01:56
| 160,392,133
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,234
|
py
|
import sys
import os
import numpy as np
import code
from code.utils.utils import getConfigId,classFromFilename
import re
modelsDir = '/tmp/'
targetDir = "./tmpmodels/"
assert os.path.isdir(modelsDir)
assert os.path.isdir(targetDir)
seperator="_"
modprefix="-m"
configIdBase=["IV3","Xc"]
configIds = [3,4]
configIdVersion=[seperator+"v"+str(n) for n in configIds]
eraindices=range(2)
cvindices=range(5)
eras=[seperator+"Era"+str(n) for n in eraindices]
cvs=[seperator+"split"+str(n) for n in cvindices]
end=".hdf5"
start="BECONA2.0"
val_seperator='-'
allMatched=[]
for modelFN in os.listdir(modelsDir):
if modelFN.endswith(end) and modelFN.startswith(start):
allMatched.append(modelFN[len(start):-len(end)])
print(allMatched)
allMatched.sort()
print(len(allMatched))
allFiltered = []
for mod in configIdBase:
for ver in configIdVersion:
for cv in cvs:
for era in eras:
minEra0Val = None
for fn in allMatched:
if fn.startswith(modprefix+mod+ver+cv+era):
if era == "_Era0":
minEra0Val = fn
else:
print(fn)
allFiltered.append(fn)
if not (minEra0Val == None):
allFiltered.append(minEra0Val)
print(allFiltered)
print(len(allFiltered))
# ids = [getConfigId(fn) for fn in allFiltered]
# print(ids)
# filteredDict = dict(zip(allFiltered,ids))
from keras.models import Model,load_model
from keras.optimizers import SGD
for name in allFiltered:
modelName = start+name+end
oldModelName = modelsDir+modelName
newModelName = targetDir+modelName
oldmodel=load_model(oldModelName)
model = classFromFilename(name)().model
print(model)
assert len(oldmodel.layers) == len(model.layers)
for idx,layer in enumerate(oldmodel.layers):
assert type(oldmodel.layers[idx]) == type(model.layers[idx])
model.layers[idx].set_weights(layer.get_weights())
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy')
model.save(newModelName)
if os.path.isfile(newModelName):
os.remove(oldModelName)
|
[
"dietercastel@gmail.com"
] |
dietercastel@gmail.com
|
d17fb3c25bca8fe02d7f0af33bcd765e22c61ec2
|
d4f12c33a0f4a2a6be76a5403c149253db02062e
|
/Desktop/BigData SU2018/Lesson 1/pythoncourse-master/code_example/31-PythonArray/2dArray.py
|
391b379379bdb1db7158c203787329870e7f53d1
|
[] |
no_license
|
CC-SY/practice
|
3059e3a4153a3f009b4e67f45bc2303070d1cfc1
|
6445cfd40e1aa8ebba39e259c07942e7669ecb1a
|
refs/heads/master
| 2020-03-17T00:14:20.743726
| 2018-05-12T02:49:35
| 2018-05-12T02:49:35
| 133,108,281
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
# numpy arrays
# wfp, 11/19
import numpy
myArray=numpy.array([1,2,3])
print myArray.ndim
print myArray.dtype
print myArray.shape
print myArray.size
twodArray=numpy.array([(1,2,3,4),(5,6,7,8)])
print twodArray
reshapeArray = numpy.arange(20)
print reshapeArray
reshapeArray=reshapeArray.reshape(4,5)
print reshapeArray
print myArray + 1
print myArray * 2
zArray=numpy.zeros((3,5),numpy.int32)
fArray=numpy.ones((4,2),numpy.float)
print fArray
print zArray
|
[
"cecilia_sui@163.com"
] |
cecilia_sui@163.com
|
23a5290bf4bc30132c2a58418f3d48b86ae539ff
|
0cdbcbb30f9814a6ccf85f4b61129a1a2b6536f2
|
/Labs/Lab 4 Python/experimento5.py
|
4a65c0d9a9253ca074abb72d732c4c648714905d
|
[
"MIT"
] |
permissive
|
DataOwl-Chile/MN_1
|
e2983f866a9a1a915c053dc4dd0481fa943fdd65
|
acfd9c466734d41e723633ce19c8e595704197c0
|
refs/heads/master
| 2023-03-20T00:30:27.143088
| 2021-03-13T01:22:06
| 2021-03-13T01:22:06
| 268,413,151
| 4
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,950
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
#def secante(x0, x1, f, error = 0.01):
def newtonraphson(f, x0, dx=0.01, error=0.0001, max_iter=100000):
xn = x0
for n in range(0, max_iter):
fn = f(xn)
if abs(fn) < error:
print('Solución tras ', n, ' iteraciones.')
return xn
dfn = (f(xn + dx) - f(xn - dx)) / (2 * dx)
if dfn == 0:
print('Método se indefine (derivada se anula).')
return None
xn = xn - fn/dfn
print('Método no converge (se supera el máximo de iteraciones).')
return None
def cerof(x, y, error = 0.01):
x0 = []
y0 = []
for i, X in enumerate(x):
if np.abs(y[i]) <= error:
x0.append(X)
y0.append(y[i])
x0 = np.asarray(x0)
y0 = np.asarray(y0)
return x0, y0
def ceros(a, b, f, error=0.01):
"""
params x: arreglo en el que se quiere encontrar el cero de f
params f: función a la que se le quiere encontrar el cero
params a, b: extremos del intervalo que se desea explorar
params error: nivel de precisión deseado
returns: valor x en [a, b] en que f(x)=0
"""
fa = f(a)
fb = f(b)
ceros = []
fceros = []
if fa * fb > 0:
print('El método no aplica')
return ceros, fceros
elif fa * fb == 0:
if fa == 0:
ceros.append(a)
fceros.append(fa)
if fb == 0:
ceros.append(b)
fceros.append(fb)
return ceros, fceros
else:
while (np.abs(fa).astype(float) > error) and (np.abs(fb).astype(float) > error):
c = (a + b) / 2
fc = f(c)
if fa * fc > 0:
a = c
fa = fc
else:
b = c
fb = fc
if np.abs(fa).astype(float) <= error:
ceros.append(a)
fceros.append(fa)
if np.abs(fb).astype(float) <= error:
ceros.append(b)
fceros.append(fb)
return ceros, fceros
def derivadafun(a, b, f, dx=0.01):
"""
Devuelve la derivada de una función, considerando que en extremo izquierdo sólo se puede
aplicar derivada forward, en extremo derecho sólo se puede aplicar derivada backward y
entremedio puede aplicad derivada central, que es más precisa.
param x: vector en el que se evaluará la función, tiene largo N.
param y: vector de la función evaluada en x.
"""
N = int((b - a) / dx)
x = np.linspace(a, b, N)
derivada = []
for i, X in enumerate(x):
if i == 0: # Extremo izquierdo: usamos derivada forward
deriv = (f(x[1]) - f(x[0])) / dx
derivada.append(deriv)
elif i == N - 1: # Extremo derecho: usamos derivada backward (recordar que índice de x e y llega hasta N-1
deriv = (f(x[N - 1]) - f(x[N - 2])) / dx
derivada.append(deriv)
else: # Extremo izquierdo: usamos derivada forward
deriv = (f(x[i + 1]) - f(x[i - 1])) / (2 * dx)
derivada.append(deriv)
derivada = np.asarray(derivada)
return derivada
def derivadavec(x, y):
"""
Devuelve la derivada de una función, considerando que en extremo izquierdo sólo se puede
aplicar derivada forward, en extremo derecho sólo se puede aplicar derivada backward y
entremedio puede aplicad derivada central, que es más precisa.
param x: vector en el que se evaluará la función, tiene largo N.
param y: vector de la función evaluada en x.
"""
N = len(x)
# Calculamos dx, que tendrá N-1 valores
dx = []
for i in range(N-1):
dx.append(x[i + 1] - x[i])
# OJO: Si el vector x fuese definido de forma equiespaciada, por ejemplo, con linspace
# , basta con dx = x[1] - x[0].
# Además, en ese caso, dx[0] = dx[N - 1] = dx y dx[i + 1] + dx[i - 1] = 2*dx
derivada = []
for i in range(N):
if i == 0: # Extremo izquierdo: usamos derivada forward
deriv = (y[1] - y[0]) / dx[0]
derivada.append(deriv)
elif i == N - 1: # Extremo derecho: usamos derivada backward (recordar que índice de x e y llega hasta N-1
deriv = (y[N - 1] - y[N - 2]) / dx[N - 2]
derivada.append(deriv)
else: # Centro: usamos derivada central
deriv = (y[i + 1] - y[i - 1]) / (dx[i] + dx[i - 1])
derivada.append(deriv)
derivada = np.asarray(derivada)
return derivada
|
[
"66226083+ValentinRetamal@users.noreply.github.com"
] |
66226083+ValentinRetamal@users.noreply.github.com
|
96f0be08ebace78eb0f142c0c256c11e0c1aaa12
|
271e77e0e73ae95c91545ade8ea1aa327d754a6a
|
/src/posts/migrations/0001_initial.py
|
6d748f3e03b48df30ec096391ae292f5d39ff89f
|
[] |
no_license
|
Iv91/Blog
|
1fd6a7a92d22c7b01df109261e246a066566057f
|
3773d246718acc77045c90daa993a463f9353b6f
|
refs/heads/main
| 2023-02-05T00:06:23.550822
| 2020-12-30T09:21:58
| 2020-12-30T09:21:58
| 255,296,116
| 0
| 0
| null | 2020-04-13T10:58:17
| 2020-04-13T10:30:12
|
CSS
|
UTF-8
|
Python
| false
| false
| 3,240
|
py
|
# Generated by Django 3.0 on 2020-05-01 06:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_picture', models.ImageField(upload_to='')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('slug', models.SlugField(unique=True)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('overview', models.TextField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
('content', tinymce.models.HTMLField()),
('thumbnail', models.ImageField(upload_to='')),
('featured', models.BooleanField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.Author')),
('categories', models.ManyToManyField(to='posts.Category')),
('next_post', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='next', to='posts.Post')),
('previous_post', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='previous', to='posts.Post')),
],
),
migrations.CreateModel(
name='PostView',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.Post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('content', models.TextField()),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='posts.Post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"ivanausorac91@gmail.com"
] |
ivanausorac91@gmail.com
|
e34f8a508203056182a73bec7a9a2be9ea2af8b5
|
9366ac9c730ff274992071b556f4f0289b38991c
|
/arrayADT/hashing1.py
|
a1507ac2624917c63000806cc256013cd7f4fcd1
|
[] |
no_license
|
jjena560/Data-Structures
|
702bde538ceb4c05599871cc6d65e60722e041e2
|
8d62371e4bd7b74e36e9d26eef3bca4ead216177
|
refs/heads/master
| 2022-12-04T21:56:27.157261
| 2020-08-22T15:46:01
| 2020-08-22T15:46:01
| 288,149,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,102
|
py
|
# Python3 program to sort an array
# using hash function
def sortUsingHash(a, n):
# find the maximum element
Max = max(a)
# create a hash function upto
# the max size with every element equals to 0
Hash = [0] * (Max + 1)
# traverse through all the elements
# and keep a count
for i in range(0, n):
Hash[a[i]] += 1
# Traverse upto all elements and check
# if it is present or not. If it is
# present, then print the element the
# number of times it's present. Once we
# have printed n times, that means we
# have printed n elements so break out
# of the loop
# for i in range(0, Max + 1):
#
# # if present
# if Hash[i] != 0:
#
# # print the element that number
# # of times it's present
# for j in range(0, Hash[i]):
# print(i, end=" ")
return Hash
# Driver Code
if __name__ == "__main__":
a = [6, 7, 8, 9, 11, 6, 8, 14, 16, 17]
n = len(a)
print(sortUsingHash(a, n))
|
[
"noreply@github.com"
] |
jjena560.noreply@github.com
|
a9eac8cb2ba2aac3a384755918a4b85361ff4018
|
a8dd779762cb105aaa8aab9eae24b50ab28b3b91
|
/running_python_code_in_terminal.py
|
fa6c21e270152d30c424da2bc087e569c141863b
|
[] |
no_license
|
meettingthespam/Python
|
a7cf6ca51cb280741bf6478aad4d68730c475ba9
|
3a6d3973e0a99f9207a8fd4d79cefd83b20b6caa
|
refs/heads/master
| 2020-04-21T08:37:14.399570
| 2019-02-09T17:46:47
| 2019-02-09T17:46:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
# basic thing in terminal
# if you want to run python script directly in terminal
# just type
python
# and if python is installed in that environment,
# it'll pop up with something like
Python 3.7.1 (v3.7.1:260ec2c36a, Oct 20 2018, 14:57:15) [MSC v.1915 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license" for more information.
# then you can do shit like import x to see if your pip install worked
# (example often used is virtualenv for flask)
|
[
"phoebus.john@gmail.com"
] |
phoebus.john@gmail.com
|
7bcd4909b6524a25d6c1a93c21df57e6170a26b1
|
61bad6e9e06fe604575333e7e34a9c52d7e0f3d2
|
/remove-element.py
|
b567a1e78be17026d32b9959db21781e99ce3e59
|
[] |
no_license
|
windy319/leetcode
|
784ea765669922af64778ee00fc9df25bdb112b0
|
e68ecc5b8844b05838dc5458ab5bef4864840e01
|
refs/heads/master
| 2020-05-30T20:25:35.096164
| 2014-08-11T06:48:12
| 2014-08-11T06:48:12
| 22,828,083
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
class Solution:
# @param A a list of integers
# @param elem an integer, value need to be removed
# @return an integer
def removeElement(self, A, elem):
num,pre,cur,size = 0,0,0,len(A)
while cur < size:
if A[cur] == elem:
cur += 1
num += 1
continue
A[pre] = A[cur]
pre += 1
cur += 1
# print A
return len(A) - num
|
[
"zhenye.wy@alibaba-inc.com"
] |
zhenye.wy@alibaba-inc.com
|
274e934dc982c655355273508349b5fa24611d4a
|
2178c396cf36ec437283e73164620a362090e709
|
/scrape_mars.py
|
4729d228a821e350aa1574ff2d566f98e86144f5
|
[] |
no_license
|
Neeta90/Mission-to-Mars
|
1384e9adff2b062ee1199dd6b7f19241ca3c0534
|
29f62a804c7ee079c01bc04c9f26d884ca383978
|
refs/heads/master
| 2020-06-29T13:10:08.918206
| 2019-10-12T23:19:43
| 2019-10-12T23:19:43
| 200,546,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,798
|
py
|
# import necessary libraries
from flask import Flask, render_template
# Dependencies
from splinter import Browser
from bs4 import BeautifulSoup
import requests
import pymongo
import re
import pandas as pd
# Import our pymongo library, which lets us connect our Flask app to our Mongo database.
import pymongo
# Create an instance of our Flask app.
#app = Flask(__name__)
# Create connection variable
#conn = 'mongodb://localhost:27017'
# Pass connection to the pymongo instance.
#client = pymongo.MongoClient(conn)
# Initialize browser
def init_browser():
executable_path = {'executable_path': 'chromedriver'}
return Browser('chrome', **executable_path, headless=False)
mars_data = {}
# NASA MARS NEWS
def scrape_mars_news():
try:
browser=init_browser()
mission_url = 'https://mars.nasa.gov/news/'
browser.visit(mission_url)
html=browser.html
soup = BeautifulSoup(html, 'html.parser')
#Extract the latest News Title
#content = soup.find('li', class_='slide')
#print(content)
#scrape the title from h3 tag & print the news_title
news_title = soup.find('div', class_='content_title').find('a').text
# print(f"news_title = {news_title}")
#Extract the latest paragraph text from the class
news_p=soup.find('div',class_='article_teaser_body').text.strip()
# print(f"news_p = {news_p}")
# Dictionary entry from MARS NEWS
mars_data['news_title'] = news_title
mars_data['news_p'] = news_p
return mars_data
finally:
browser.quit()
# FEATURED IMAGE
def scrape_mars_image():
try:
# Initialize browser
browser = init_browser()
#JPL Mars Space Images - Featured Image
JPL_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(JPL_url)
html=browser.html
soup = BeautifulSoup(html, 'html.parser')
#Getting the soucre image name .jpg
first_image=soup.find('article')['style'].replace('background-image: url(','').replace(');', '')[1:-1]
# Website Url
main_url = 'https://www.jpl.nasa.gov'
#concatinating the url and the image url
featured_image_url = main_url+first_image
#print(f"featured_image_url = {featured_image_url}")
mars_data['featured_image_url']=featured_image_url
return mars_data
finally:
browser.quit()
# Mars Weather
def scrape_mars_weather():
try:
# Initialize browser
browser = init_browser()
twitter_url="https://twitter.com/marswxreport?lang=en"
browser.visit(twitter_url)
html=browser.html
soup = BeautifulSoup(html, 'html.parser')
#soup.prettify()
mars_weather_data=soup.find('div',class_='js-tweet-text-container')
mars_weather=mars_weather_data.find('p').text.strip()
#print(f"mars_weather = {mars_weather}")
# Dictionary entry from WEATHER TWEET
mars_data['mars_weather'] = mars_weather
return mars_data
finally:
browser.quit()
# Mars Facts
def scrape_mars_facts():
#Mars Facts url
mars_facts_url='http://space-facts.com/mars/'
mars_facts = pd.read_html(mars_facts_url)
# Find the mars facts DataFrame in the list of DataFrames
mars_df = mars_facts[1]
# Assign the columns `['Description', 'Value']`
mars_df.columns = ['Description','Value']
# Set the index to the `Description` column without row indexing
mars_df.set_index('Description', inplace=True)
# Save html code to folder Assets
data=mars_df.to_html()
# data = mars_df.to_dict(orient='records') # Here's our added param..
# Display mars_df
mars_data['data'] = data
return mars_data
# MARS HEMISPHERES
def scrape_mars_hemispheres():
try:
# Initialize browser
browser = init_browser()
# Visit hemispheres website through splinter module
executable_path = {'executable_path': 'chromedriver'}
browser = Browser('chrome', **executable_path, headless=False)
mars_hemi_url="https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(mars_hemi_url)
html=browser.html
soup = BeautifulSoup(html, 'html.parser')
items = soup.find_all('div', class_='item')
# Create empty list for hemisphere urls
hemi_url = []
# Store the main_ul
hemispheres_main_url = 'https://astrogeology.usgs.gov'
# Loop through the items previously stored
for i in items:
# Store title
title = i.find('h3').text
# Store link that leads to full image website
partial_img_url = i.find('a', class_='itemLink product-item')['href']
# Visit the link that contains the full image website
browser.visit(hemispheres_main_url + partial_img_url)
# HTML Object of individual hemisphere information website
partial_img_html = browser.html
# Parse HTML with Beautiful Soup for every individual hemisphere information website
soup = BeautifulSoup( partial_img_html, 'html.parser')
# Retrieve full image source
img_url = hemispheres_main_url + soup.find('img', class_='wide-image')['src']
# Append the retreived information into a list of dictionaries
hemi_url.append({"title" : title, "img_url" : img_url})
# Display hemisphere_image_urls
mars_data['hemi_url'] = hemi_url
return mars_data
finally:
browser.quit()
|
[
"Neetashrivastava90.gmail,com"
] |
Neetashrivastava90.gmail,com
|
73eb78c71fb41b54deded26c08f6cb14c0eb05e0
|
e66fa131cff76fa3fe70e7b6649fa1332159c781
|
/ch10/qualityControl_test.py
|
333c28c72d751a634d3503a445cca657b54b61da
|
[] |
no_license
|
chc1129/python_tutorial
|
c6d97c6671a7952d8a7b838ccb8aa3c352fa6881
|
2f8b389731bafbda73c766c095d1eaadb0f99a1c
|
refs/heads/main
| 2023-08-24T07:00:43.424652
| 2021-10-28T16:07:57
| 2021-10-28T16:07:57
| 341,532,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
import unittest
class TestStatisticalFunctions(unittest.TestCase):
def test_average(self):
self.assertEqual(average([20, 30, 70]), 40.0)
self.assertEqual(round(average([1, 5, 7]), 1), 4.3)
with self.assertRaises(ZeroDivisioinError(;
average([])
with self.assertRaises(TypeError);
average(20, 30, 70)
unittest.main() # コマンドラインからコールする全テストが呼び出される
|
[
"chc1129@gmail.com"
] |
chc1129@gmail.com
|
65935e1240482e7923adef14244f3b8be96bdf76
|
db1d387b9e01bf97efeca92251d2be19b6e5b3ea
|
/QG mid term exams/Geohash/zt.py
|
00b18b02241255b90848474c50d741918fb4b483
|
[] |
no_license
|
ZhengtingHuang888/kDD-STUDY
|
11c1d0ce1b832788b8fc3cbfab43d7869ece4c2f
|
d496be33a665fb9dc8e14cd00e94de18f6910980
|
refs/heads/master
| 2023-07-19T04:37:54.082772
| 2019-08-04T14:51:16
| 2019-08-04T14:51:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,338
|
py
|
"""
作者:数据挖掘组各个成员;
作用:获取广州地区分块及其编码;
返回:各个地区分块的编码及中心点坐标,可视化图像;
"""
from pygeohash import encode,decode
import plotly.plotly as py
import plotly.graph_objs as go
import numpy as np
import pandas as pd
import math
from matplotlib.path import Path
def getMainMap():
"""
作用:获取广州各个分界点的编码、经、纬度子集、经纬度最值;
:return:
"""
# 广州的边界,三维列表;
G = [[[114.03621, 23.90178], [114.04083, 23.8735], [114.05776, 23.84676], [114.05227, 23.83003],
[114.03699, 23.81871], [114.03761, 23.80803], [114.04775, 23.80334], [114.03678, 23.79563],
[114.05996, 23.77587], [114.03845, 23.77122], [114.02312, 23.75224], [114.01821, 23.76284],
[114.00995, 23.76301], [114.01313, 23.77795], [113.99879, 23.76301], [113.97611, 23.75772],
[113.97286, 23.73925], [113.92005, 23.72945], [113.91236, 23.71651], [113.90094, 23.71543],
[113.88731, 23.6881], [113.84784, 23.67933], [113.85276, 23.66777], [113.84392, 23.66948],
[113.83995, 23.65545], [113.81878, 23.65617], [113.82837, 23.64592], [113.81377, 23.62901],
[113.85962, 23.60997], [113.86405, 23.58739], [113.85282, 23.57058], [113.86314, 23.56536],
[113.87138, 23.54131], [113.88814, 23.53507], [113.89234, 23.52111], [113.91152, 23.50416],
[113.94625, 23.49319], [113.93298, 23.47971], [113.97412, 23.47882], [113.98171, 23.47215],
[113.97449, 23.4649], [113.95412, 23.46563], [113.95281, 23.44289], [113.96191, 23.43141],
[113.98707, 23.43168], [113.98428, 23.40848], [113.99986, 23.39664], [113.98119, 23.37765],
[114.00153, 23.34472], [113.99391, 23.33316], [113.98361, 23.33258], [113.9967, 23.29746],
[113.95847, 23.31495], [113.95969, 23.33147], [113.93927, 23.34295], [113.89599, 23.34507],
[113.8892, 23.33357], [113.89821, 23.32055], [113.89028, 23.28269], [113.87659, 23.26498],
[113.89516, 23.25355], [113.89011, 23.24213], [113.90377, 23.21254], [113.894, 23.21266],
[113.8838, 23.19169], [113.88898, 23.17863], [113.90229, 23.17326], [113.89146, 23.16325],
[113.87478, 23.16535], [113.85873, 23.15725], [113.84897, 23.14772], [113.84108, 23.11615],
[113.81467, 23.12777], [113.75405, 23.12957], [113.7386, 23.14131], [113.72437, 23.14122],
[113.68781, 23.1198], [113.66115, 23.11142], [113.66043, 23.11877], [113.65125, 23.1193],
[113.64028, 23.10389], [113.6104, 23.10379], [113.58642, 23.0878], [113.55629, 23.08124],
[113.52289, 23.03727], [113.52923, 22.98261], [113.57428, 22.89194], [113.57122, 22.85312],
[113.68528, 22.71773], [113.71686, 22.6452], [113.73762, 22.52766], [113.70598, 22.51629],
[113.65161, 22.51572], [113.62078, 22.57953], [113.56163, 22.60751], [113.53297, 22.65498],
[113.54072, 22.66621], [113.47131, 22.71499], [113.46797, 22.72852], [113.41219, 22.74283],
[113.36351, 22.77412], [113.35654, 22.79297], [113.37468, 22.79456], [113.39343, 22.80985],
[113.37442, 22.8226], [113.34652, 22.81614], [113.3121, 22.83039], [113.30966, 22.85119],
[113.29614, 22.85991], [113.30083, 22.87677], [113.27703, 22.8947], [113.28596, 22.90144],
[113.2824, 22.92739], [113.2981, 22.93431], [113.28632, 22.95032], [113.26705, 22.95494],
[113.24993, 22.97329], [113.2579, 22.99486], [113.24966, 23.00204], [113.25286, 23.01977],
[113.26313, 23.02114], [113.2578, 23.04677], [113.21169, 23.04332], [113.17792, 23.06803],
[113.17741, 23.07756], [113.20907, 23.08346], [113.21673, 23.09866], [113.20814, 23.09968],
[113.20247, 23.12111], [113.21055, 23.12337], [113.21267, 23.1411], [113.18686, 23.14825],
[113.1896, 23.16195], [113.20945, 23.1771], [113.209, 23.19218], [113.17748, 23.22088], [113.182, 23.25278],
[113.17653, 23.2736], [113.12798, 23.31455], [113.12437, 23.30659], [113.11322, 23.30986],
[113.10575, 23.30273], [113.10568, 23.29027], [113.07164, 23.28371], [113.08083, 23.25087],
[113.04476, 23.25096], [113.05378, 23.26378], [113.05143, 23.27839], [113.03263, 23.29767],
[113.03755, 23.32007], [113.02347, 23.3249], [113.04309, 23.353], [113.03354, 23.35682],
[113.01671, 23.34093], [113.01169, 23.35358], [112.98798, 23.35588], [112.98103, 23.38142],
[112.98632, 23.39863], [113.00109, 23.40633], [112.98165, 23.43297], [112.98911, 23.4433],
[112.96339, 23.42642], [112.95922, 23.43539], [112.97928, 23.46515], [113.01594, 23.46058],
[113.02636, 23.47286], [113.05585, 23.47196], [113.08374, 23.4945], [113.11545, 23.50151],
[113.1161, 23.51074], [113.15354, 23.50284], [113.1711, 23.51156], [113.17232, 23.52029],
[113.1721, 23.51237], [113.19206, 23.51477], [113.19112, 23.52321], [113.21268, 23.54028],
[113.20078, 23.56183], [113.20224, 23.57652], [113.22698, 23.58574], [113.22789, 23.59442],
[113.24441, 23.58688], [113.24038, 23.60624], [113.24847, 23.60159], [113.27657, 23.616],
[113.28134, 23.60836], [113.29946, 23.63689], [113.28927, 23.64436], [113.32726, 23.64442],
[113.32796, 23.65548], [113.34908, 23.66797], [113.36372, 23.70716], [113.37539, 23.71282],
[113.37836, 23.73153], [113.40431, 23.7235], [113.44191, 23.72704], [113.44386, 23.71592],
[113.4643, 23.70797], [113.46871, 23.69099], [113.48103, 23.68404], [113.5137, 23.68209],
[113.54547, 23.69639], [113.53836, 23.6991], [113.54291, 23.70181], [113.55876, 23.70069],
[113.56805, 23.67944], [113.58729, 23.67523], [113.59835, 23.66267], [113.62259, 23.69944],
[113.63819, 23.70457], [113.62812, 23.71171], [113.6364, 23.75024], [113.61546, 23.78739],
[113.65167, 23.82013], [113.68139, 23.81202], [113.68737, 23.82572], [113.70638, 23.81527],
[113.71855, 23.82076], [113.71353, 23.8625], [113.72476, 23.85356], [113.75817, 23.85749],
[113.78761, 23.90246], [113.80945, 23.90061], [113.87532, 23.93047], [113.88583, 23.92366],
[113.89252, 23.93167], [113.91024, 23.92357], [113.93353, 23.92923], [113.94117, 23.92357],
[113.96945, 23.93256], [113.98452, 23.92617], [114.00921, 23.93291], [114.03294, 23.92039],
[114.03621, 23.90178]]]
# 得到广州边界的分区情况和经纬度集合;
p, lon, lat = Partition(G)
lonmax = np.max((lon)) # 经度最大值:
lonmin = np.min((lon)) # 经度最大值:
latmax = np.max((lat)) # 维度最大值:
latmin = np.min((lat)) # 经度最大值:
return lonmax, lonmin, latmax, latmin, p, lon, lat
def Partition(G):
"""
:param G: 广州边界坐标集合,形式为三维数组。
:return:
"""
lon = []
lat = []
p = []
for i in range(len(G)):
for j in range(len(G[i])):
lat.append(G[i][j][1])
lon.append(G[i][j][0])
result = get_geohash(G[i][j][1], G[i][j][0])
p.append(result)
return p,lon,lat
# 获取编码:# 输入为经纬度,输出为编码;
def get_geohash(lon,lat):
# 获取大致分区:
geo = encode(lon,lat)
return geo
# 获取解码:输入为编码,输出为解码
def get_lonlat(geo):
# 获取大致分区:
lon, lat = decode(geo)
return lon,lat
def get_totalgeohash():
# 获取最值
lonmax, lonmin, latmax, latmin, p, lon, lat= getMainMap()
# 枚举划分:
accu = 0.01
total=[]
total_lon = []
total_lat = []
# 存储所有划分的出来的区域的编码
for i in np.arange(latmin, latmax, accu):
for j in np.arange(lonmin, lonmax, accu):
total.append(get_geohash(i, j))
a,b = get_lonlat(get_geohash(i, j))
# 纬度
total_lat.append(a)
# 经度
total_lon.append(b)
return total, p, lon, lat
def get_geolonlat_zt(total):
new_total=[] # 全部的多级编码
block_dict = {}
central_dict = {}
for i in total: #得到的前n位全部的编码
new_total.append(i[:5]) #只能取4个
new_total = list(set(new_total)) # 得到编码的个数
new_total_lonlat = []
for block in new_total:
lat_zt, lon_zt = get_lonlat(block)
new_total_lonlat.append([lon_zt, lat_zt])
return new_total_lonlat
def match_zt(total):
new_total_lonlat = get_geolonlat_zt(total)
all_centralpoint = new_total_lonlat
new_total_lon = []
new_total_lat = []
for point in all_centralpoint:
new_total_lon.append(point[0])
new_total_lat.append(point[1])
return all_centralpoint, new_total_lon,new_total_lat
def border_point_zt(lon, lat):
line_lon = list(set(lon[:])) # 画出分区点的经度
line_lat = list(set(lat[:])) # 画出分区点的纬度
aver_lons = []
aver_lats = []
line_lon.sort()
line_lat.sort()
#print(len(line_lon))
for i in range(len(line_lon) - 1):
aver_lons.append((line_lon[i] + line_lon[i + 1]) / 2)
#input(len(aver_lons))
subtract_lon = abs(line_lon[0] - line_lon[1]) / 2
# 加上左边界
aver_lons.insert(0, line_lon[0] - subtract_lon)
# 加上右边界
aver_lons.append(line_lon[len(line_lon) - 1] + subtract_lon)
# 同理
for i in range(len(line_lat) - 1):
aver_lats.append((line_lat[i] + line_lat[i + 1]) / 2)
subtract_lat = abs(line_lat[0] - line_lat[1]) / 2
aver_lats.insert(0, line_lat[0] - subtract_lat)
aver_lats.append(line_lat[len(line_lat) - 1] + subtract_lat)
#print(aver_lons, aver_lats)
aver_lats.sort()
aver_lons.sort()
return aver_lons, aver_lats
def get_dict(central_points, aver_lons, aver_lats):
point_dict = {}
lon_var = (aver_lons[1]-aver_lons[0])/2
lat_var = (aver_lats[1]-aver_lats[0])/2
for central_point in central_points:
border_points = []
lons = [central_point[0]+lon_var, central_point[0]-lon_var]
lats = [central_point[1]+lat_var, central_point[1]-lat_var]
for lon in lons:
for lat in lats:
border_points.append([lon, lat])
point_dict[str(central_point)] = border_points
return point_dict
def match(dict):
p = edge_info()
match_point1 = []
match_point2 = []
new_dict = {}
# 遍历每一个键
for key in dict.keys():
flag = 0
# 遍历一个键对应的每一个坐标
for i in dict[key]:
if p.contains_points([tuple(i)])==[True]:
# 为 0 时不满足
flag = 1
if flag == 1:
a = key[1:-1].split(",")
for k in range(len(a)):
a[k] = float(a[k])
match_point1.append(a)
for j in dict[key]:
match_point2.append(j)
new_dict[str(a)] = dict[key]
return match_point1, match_point2, new_dict
def edge_info():
G = [[[114.03621, 23.90178], [114.04083, 23.8735], [114.05776, 23.84676], [114.05227, 23.83003], [114.03699, 23.81871],
[114.03761, 23.80803], [114.04775, 23.80334], [114.03678, 23.79563], [114.05996, 23.77587], [114.03845, 23.77122],
[114.02312, 23.75224], [114.01821, 23.76284], [114.00995, 23.76301], [114.01313, 23.77795], [113.99879, 23.76301],
[113.97611, 23.75772], [113.97286, 23.73925], [113.92005, 23.72945], [113.91236, 23.71651], [113.90094, 23.71543],
[113.88731, 23.6881], [113.84784, 23.67933], [113.85276, 23.66777], [113.84392, 23.66948], [113.83995, 23.65545],
[113.81878, 23.65617], [113.82837, 23.64592], [113.81377, 23.62901], [113.85962, 23.60997], [113.86405, 23.58739],
[113.85282, 23.57058], [113.86314, 23.56536], [113.87138, 23.54131], [113.88814, 23.53507], [113.89234, 23.52111],
[113.91152, 23.50416], [113.94625, 23.49319], [113.93298, 23.47971], [113.97412, 23.47882], [113.98171, 23.47215],
[113.97449, 23.4649], [113.95412, 23.46563], [113.95281, 23.44289], [113.96191, 23.43141], [113.98707, 23.43168],
[113.98428, 23.40848], [113.99986, 23.39664], [113.98119, 23.37765], [114.00153, 23.34472], [113.99391, 23.33316],
[113.98361, 23.33258], [113.9967, 23.29746], [113.95847, 23.31495], [113.95969, 23.33147], [113.93927, 23.34295],
[113.89599, 23.34507], [113.8892, 23.33357], [113.89821, 23.32055], [113.89028, 23.28269], [113.87659, 23.26498],
[113.89516, 23.25355], [113.89011, 23.24213], [113.90377, 23.21254], [113.894, 23.21266], [113.8838, 23.19169],
[113.88898, 23.17863], [113.90229, 23.17326], [113.89146, 23.16325], [113.87478, 23.16535], [113.85873, 23.15725],
[113.84897, 23.14772], [113.84108, 23.11615], [113.81467, 23.12777], [113.75405, 23.12957], [113.7386, 23.14131],
[113.72437, 23.14122], [113.68781, 23.1198], [113.66115, 23.11142], [113.66043, 23.11877], [113.65125, 23.1193],
[113.64028, 23.10389], [113.6104, 23.10379], [113.58642, 23.0878], [113.55629, 23.08124], [113.52289, 23.03727],
[113.52923, 22.98261], [113.57428, 22.89194], [113.57122, 22.85312], [113.68528, 22.71773], [113.71686, 22.6452],
[113.73762, 22.52766], [113.70598, 22.51629], [113.65161, 22.51572], [113.62078, 22.57953], [113.56163, 22.60751],
[113.53297, 22.65498], [113.54072, 22.66621], [113.47131, 22.71499], [113.46797, 22.72852], [113.41219, 22.74283],
[113.36351, 22.77412], [113.35654, 22.79297], [113.37468, 22.79456], [113.39343, 22.80985], [113.37442, 22.8226],
[113.34652, 22.81614], [113.3121, 22.83039], [113.30966, 22.85119], [113.29614, 22.85991], [113.30083, 22.87677],
[113.27703, 22.8947], [113.28596, 22.90144], [113.2824, 22.92739], [113.2981, 22.93431], [113.28632, 22.95032],
[113.26705, 22.95494], [113.24993, 22.97329], [113.2579, 22.99486], [113.24966, 23.00204], [113.25286, 23.01977],
[113.26313, 23.02114], [113.2578, 23.04677], [113.21169, 23.04332], [113.17792, 23.06803], [113.17741, 23.07756],
[113.20907, 23.08346], [113.21673, 23.09866], [113.20814, 23.09968], [113.20247, 23.12111], [113.21055, 23.12337],
[113.21267, 23.1411], [113.18686, 23.14825], [113.1896, 23.16195], [113.20945, 23.1771], [113.209, 23.19218],
[113.17748, 23.22088], [113.182, 23.25278], [113.17653, 23.2736], [113.12798, 23.31455], [113.12437, 23.30659],
[113.11322, 23.30986], [113.10575, 23.30273], [113.10568, 23.29027], [113.07164, 23.28371], [113.08083, 23.25087],
[113.04476, 23.25096], [113.05378, 23.26378], [113.05143, 23.27839], [113.03263, 23.29767], [113.03755, 23.32007],
[113.02347, 23.3249], [113.04309, 23.353], [113.03354, 23.35682], [113.01671, 23.34093], [113.01169, 23.35358],
[112.98798, 23.35588], [112.98103, 23.38142], [112.98632, 23.39863], [113.00109, 23.40633], [112.98165, 23.43297],
[112.98911, 23.4433], [112.96339, 23.42642], [112.95922, 23.43539], [112.97928, 23.46515], [113.01594, 23.46058],
[113.02636, 23.47286], [113.05585, 23.47196], [113.08374, 23.4945], [113.11545, 23.50151], [113.1161, 23.51074],
[113.15354, 23.50284], [113.1711, 23.51156], [113.17232, 23.52029], [113.1721, 23.51237], [113.19206, 23.51477],
[113.19112, 23.52321], [113.21268, 23.54028], [113.20078, 23.56183], [113.20224, 23.57652], [113.22698, 23.58574],
[113.22789, 23.59442], [113.24441, 23.58688], [113.24038, 23.60624], [113.24847, 23.60159], [113.27657, 23.616],
[113.28134, 23.60836], [113.29946, 23.63689], [113.28927, 23.64436], [113.32726, 23.64442], [113.32796, 23.65548],
[113.34908, 23.66797], [113.36372, 23.70716], [113.37539, 23.71282], [113.37836, 23.73153], [113.40431, 23.7235],
[113.44191, 23.72704], [113.44386, 23.71592], [113.4643, 23.70797], [113.46871, 23.69099], [113.48103, 23.68404],
[113.5137, 23.68209], [113.54547, 23.69639], [113.53836, 23.6991], [113.54291, 23.70181], [113.55876, 23.70069],
[113.56805, 23.67944], [113.58729, 23.67523], [113.59835, 23.66267], [113.62259, 23.69944], [113.63819, 23.70457],
[113.62812, 23.71171], [113.6364, 23.75024], [113.61546, 23.78739], [113.65167, 23.82013], [113.68139, 23.81202],
[113.68737, 23.82572], [113.70638, 23.81527], [113.71855, 23.82076], [113.71353, 23.8625], [113.72476, 23.85356],
[113.75817, 23.85749], [113.78761, 23.90246], [113.80945, 23.90061], [113.87532, 23.93047], [113.88583, 23.92366],
[113.89252, 23.93167], [113.91024, 23.92357], [113.93353, 23.92923], [113.94117, 23.92357], [113.96945, 23.93256],
[113.98452, 23.92617], [114.00921, 23.93291], [114.03294, 23.92039], [114.03621, 23.90178]]]
# 交换位置,并转化形式:
a = []
for i in G[0]:
a.append(tuple(i))
p = Path(a)
return p
# 画图
def geo_paint_new(central_points, border_points, geo1,lon1,lat1, new_dict):
central_points_lon = []; central_points_lat = []
border_points_lon = []; border_points_lat = []
for central_point in central_points: #得到中心点的经纬度
central_points_lon.append(central_point[0])
central_points_lat.append(central_point[1])
# border_points = list(set(border_points))
border_points1 = list(set([tuple(border_point) for border_point in border_points]))
border_points = []
central_points_lon = pd.DataFrame(central_points_lon)
central_points_lat = pd.DataFrame(central_points_lat)
datas = [ # 画出边界点以及每一个中心点的位置
go.Scattermapbox(
lat=lat1,
lon=lon1,
text=geo1,
mode='markers',
hoverinfo='text',
marker=go.scattermapbox.Marker(
size=5,
color='#000045',
opacity=0.3
)
),
go.Scattermapbox(
lat=central_points_lat,
lon=central_points_lon,
# text=geo,
mode='markers',
#hoverinfo='text',
marker=go.scattermapbox.Marker(
size=5,
color='#de9dac', # 000045
opacity=0.8
)
)
]
for key in new_dict.keys():
borders = new_dict[key]
lons = []; lats = []
for border in borders:
lons.append(border[0])
lats.append(border[1])
lons1 = list(set(lons))
lats1 = list(set(lats))
lats1.sort()
lons1.sort()
lons = [lons1[0], lons1[0], lons1[1], lons1[1], lons1[0]]
lats = [lats1[0], lats1[1], lats1[1], lats1[0], lats1[0]]
lons = pd.DataFrame(lons)
lats = pd.DataFrame(lats)
datas.append(
go.Scattermapbox(
lat=lats,
lon=lons,
# text=geo,
mode='markers+lines',
# hoverinfo='text',
marker=go.scattermapbox.Marker(
size=5,
color='green', # 000045
opacity=0.8
)
)
)
mapbox_access_token = '''pk.eyJ1IjoibHVrYXNtYXJ0aW5lbGxpIiwiYSI6ImNpem85dmhwazAyajIyd284dGxhN2VxYnYifQ.HQCmyhEXZUTz3S98FMrVAQ'''
layout = go.Layout(
title="Guangzhou_geo",
autosize=True,
hovermode='closest',
showlegend=False,
mapbox=go.layout.Mapbox(
accesstoken=mapbox_access_token,
bearing=0,
center=go.layout.mapbox.Center(
lat=23.12864583, # 广州市纬度
lon=113.2648325 # 广州市经度
),
pitch=0,
zoom=10,
style='light'
),
)
fig = go.Figure(data=datas, layout=layout)
py.plot(fig, filename='Guangzhou_geo.html') # 生成html文件并打开
if __name__ == "__main__":
total, p, lon, lat = get_totalgeohash() #得到全部的编码, 边界的各个信息
central_point,new_total_lon,new_total_lat = match_zt(total)
aver_lons, aver_lats = border_point_zt(new_total_lon,new_total_lat)
point_dict = get_dict(central_point, aver_lons, aver_lats)
central_points, border_points, new_dict = match(point_dict)
geo_paint_new(central_points, border_points, p, lon, lat, new_dict)
|
[
"1284629360@qq.com"
] |
1284629360@qq.com
|
3e3dac817ffc16db906180be163aff6d62651bd6
|
bfaa554fb309e74926d96d34daf33a5423590e27
|
/20180424/pillow_draw_checkCode.py
|
231778022f47f48fc5f5272b6a594ab3980e9b92
|
[] |
no_license
|
hello-wn/python-basic-scripts
|
2aa1f0d809909af6466d50824a43b724a60e2012
|
0fb54f2548891678078b6e8b0c22eae05eb86b04
|
refs/heads/master
| 2020-03-12T09:06:07.502416
| 2019-01-18T03:32:29
| 2019-01-18T03:32:29
| 130,544,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 843
|
py
|
# -*- coding: utf-8 -*-
from PIL import Image, ImageDraw, ImageFont, ImageFilter
import random
#随机字母,注意ascii值
def rdChar():
return chr(random.randint(65, 90))
#随机颜色
def rdColor():
return (random.randint(64, 255), random.randint(64, 255), random.randint(64, 255))
def rdColor2():
return (random.randint(32, 127), random.randint(32, 127), random.randint(32, 127))
width = 240
height = 60
image = Image.new('RGB', (width, height), (255, 255, 255))
font = ImageFont.truetype('/usr/share/cups/fonts/FreeMono.ttf', 35)
draw = ImageDraw.Draw(image)
for x in range(width):
for y in range(height):
draw.point((x, y), fill=rdColor())
for t in range(4):
draw.text((60 * t + 10, 10), rdChar(), font=font, fill=rdColor2())
image = image.filter(ImageFilter.BLUR)
image.save('code.jpg', 'jpeg')
|
[
"hello_wn@yeah.net"
] |
hello_wn@yeah.net
|
849da19238bbb64d158b6b90381bbbdc374dc0c5
|
e96520433d8d1fe0f8a82ae783a0a51d22465464
|
/basic_practice/lists_practice.py
|
f37b72d520924d9ccf5ec049330a9f465368a106
|
[] |
no_license
|
Harshvardhanvipat/Python_workspace
|
afd617ac94c508ed148f04c55c9ff588402242f8
|
f3c6072cf35e79cc9801642cc6cffc51404dac1d
|
refs/heads/master
| 2022-11-13T12:40:45.028150
| 2020-07-07T06:20:17
| 2020-07-07T06:20:17
| 266,084,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
li = [1,2,3,4,5,6,7]
li1 = ['á','b','c','d']
li2 = ['á', 'b', True, 1 ]
# Data Structure - > list
# example -
amazon_cart = [
'notebook',
'sunglasses',
'toys',
'tomatoes'
]
amazon_cart[0] = 'laptop' # we are changing the data saved before
print(amazon_cart[0::2])
# list slicing
# lists are mutable
|
[
"harshvardhanvipat@gmail.com"
] |
harshvardhanvipat@gmail.com
|
04411ac2aa0a3bb9a4a07702add9775918b49865
|
8a07225bfae2b7a1b4dd70e2c8824face23f4863
|
/test03_lurch.py
|
ba161b0512b494b48a1696f546bfa609d9b83f1f
|
[] |
no_license
|
raketenlurch/pylurch
|
f9c7e906d8c43d11c4ce3cde1d9fee5fcbda5c85
|
91bdcc3faadc80e0319b4d4b3631903be9bf090a
|
refs/heads/main
| 2023-08-29T05:14:52.456104
| 2021-10-22T17:19:56
| 2021-10-22T17:19:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,753
|
py
|
import pygame
class TestGame:
def __init__(self):
self.screen = None
self.game_is_running = True
self.box_position_x = 0
self.box_position_y = 0
self.background_rect = pygame.Rect(0, 0, 800, 600)
def draw_box(self):
box_colour = (245, 101, 44)
pygame.draw.rect(self.screen, box_colour, (self.box_position_x, self.box_position_y, 20, 20))
def box_control(self, event):
if event.key == pygame.K_DOWN:
self.box_position_y += 10
elif event.key == pygame.K_UP:
self.box_position_y -= 10
elif event.key == pygame.K_RIGHT:
self.box_position_x += 10
elif event.key == pygame.K_LEFT:
self.box_position_x -= 10
def run_game(self):
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
pygame.init()
pygame.display.set_caption("Teeeeeeeeeeeeeest")
self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)
clock = pygame.time.Clock()
self.game_is_running = True
while self.game_is_running:
# limit frame speed to 30 fps
time_passed = clock.tick(30)
self.screen.fill((55, 55, 55), self.background_rect)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.game_is_running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.game_is_running = False
else:
self.box_control(event)
self.draw_box()
# final draw
pygame.display.flip()
myGame = TestGame()
myGame.run_game()
|
[
"raketenlurch@riseup.net"
] |
raketenlurch@riseup.net
|
a9584a85606d2acd38bcec0054715ab18c5a981e
|
cdf652f20b10761d4d1feeefac4029c0188a757d
|
/app/auth/__init__.py
|
ac9a6429aee35f17da2e6a4a607a37229f902289
|
[
"MIT"
] |
permissive
|
YongLWei03/engineering_management_system
|
780fba3163e824c15e9e870dd7884fc2ccc21a90
|
9706be45a8cc5e0201b4db7f5c5aac217a667cd1
|
refs/heads/master
| 2021-06-23T07:49:46.427677
| 2017-09-07T15:56:12
| 2017-09-07T15:56:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
# --*-- coding:utf-8 --*--
from flask import Blueprint
auth = Blueprint("auth", __name__)
from . import views
|
[
"18601036905@163.com"
] |
18601036905@163.com
|
697993310a4ef5658b689a8b54825907938dbd89
|
6f1dea5df8255a1711b1116efed95f20f598dc48
|
/karyna-modules/stock_picking_show_returns/__init__.py
|
5b2680d2f02ca0ae50223959929b91baf08987b9
|
[] |
no_license
|
joseernestomendez/odoo-karyna
|
62dd470f753f2b81f92329c1f725d7e1385146c2
|
8228ca49d8613afad2ad22f8ab288c5a7a216c48
|
refs/heads/master
| 2021-01-12T02:50:34.673120
| 2017-01-05T13:49:25
| 2017-01-05T13:49:25
| 78,115,304
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,110
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2013 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import stock_picking
|
[
"tecnologia@obsdr.com"
] |
tecnologia@obsdr.com
|
a6b76e3615dbf3fc4b18a2d57080f46990536474
|
bf4bfc9c55f1b9380cba0e4309b999bf50d3958d
|
/core/admin.py
|
862740e243fc64e227a901266cbd425dcbaf07b8
|
[] |
no_license
|
JasurbekNURBOYEV/cloudy
|
4c19c4aa082cd23535cd6ba4cd785fc2792500e8
|
3370946019b982722132c90e8fda8f5d62f53f83
|
refs/heads/main
| 2023-05-11T18:55:58.418049
| 2021-06-03T11:17:09
| 2021-06-03T11:17:09
| 373,393,053
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 771
|
py
|
from django.contrib import admin
from core import models
@admin.register(models.City)
class CityAdmin(admin.ModelAdmin):
date_hierarchy = 'created_time'
list_display = [
'id',
'name',
'country'
]
search_fields = [
'name',
]
list_filter = [
'country',
]
exclude = [
'created_time',
'last_updated_time',
]
@admin.register(models.Forecast)
class ForecastAdmin(admin.ModelAdmin):
date_hierarchy = 'created_time'
list_display = [
'city',
'detailed_status',
'time'
]
search_fields = [
'name',
'id'
]
list_filter = [
'city',
]
exclude = [
'created_time',
'last_updated_time',
]
|
[
"bluestacks6523@gmail.com"
] |
bluestacks6523@gmail.com
|
9dcee3894185cb4decc19faa5897585d10e3fa79
|
402b566f68624725ba498bcc29dbbfc2f4f4e713
|
/learning_log/learning_logs/views.py
|
24a1fdcc021c7c5fd50550035581386a23a49825
|
[] |
no_license
|
KevinG1thub/kevin.learning_log
|
8f3d2c8d48c525170c00271d14f6be5d8fbe34c4
|
01b25697d2402b99a12241c96e7374d47d5db8e1
|
refs/heads/main
| 2023-02-01T10:25:53.181331
| 2020-12-17T14:12:06
| 2020-12-17T14:12:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,702
|
py
|
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.http import Http404
from .models import Topic, Entry
from .forms import TopicForm, EntryForm
def index(request):
"""The home page for Learning Log."""
return render(request, 'learning_logs/index.html')
@login_required
def topics(request):
"""show all topics"""
topics = Topic.objects.filter(owner=request.user).order_by('date_added')
context = {'topics': topics}
return render(request, 'learning_logs/topics.html', context)
@login_required
def topic(request, topic_id):
"""Show a single topic and all its entries."""
topic = Topic.objects.get(id=topic_id)
#make sure topic belongs to current user
if topic.owner != request.user:
raise Http404
entries = topic.entry_set.order_by('-date_added')
context = {'topic': topic, 'entries': entries}
return render(request, 'learning_logs/topic.html', context)
@login_required
def new_topic(request):
"""add new topic"""
if request.method != 'POST':
#no data submitted, create blank form
form = TopicForm()
else:
#POST data submitted, process data
form = TopicForm(data=request.POST)
if form.is_valid():
new_topic = form.save(commit=False)
new_topic.owner = request.user
new_topic.save()
return redirect('learning_logs:topics')
#display blank or invalid form
context = {'form': form}
return render(request, 'learning_logs/new_topic.html', context)
@login_required
def new_entry(request, topic_id):
"""Add a new entry for a particular topic."""
topic = Topic.objects.get(id=topic_id)
if request.method != 'POST':
# No data submitted; create a blank form.
form = EntryForm()
else:
#POST data submitted, process data
form = EntryForm(data=request.POST)
if form.is_valid():
new_entry = form.save(commit=False)
new_entry.topic = topic
new_entry.save()
return redirect('learning_logs:topic', topic_id=topic_id)
#display blank or invalid form
context = {'topic': topic, 'form': form}
return render(request, 'learning_logs/new_entry.html', context)
@login_required
def edit_entry(request, entry_id):
"""edit existing entry"""
entry = Entry.objects.get(id=entry_id)
topic = entry.topic
if topic.owner != requesst.user:
raise Http404
if request.method != 'POST':
#initial request, pre_fill forw with current entry
form = EntryForm(instance=entry)
else:
#POST data submitted, process
form = EntryForm(instance=entry, data=request.POST)
if form.is_valid():
form.save()
return redirect('learning_logs:topic', topic_id=topic.id)
context = {'entry': entry, 'topic': topic, 'form': form}
return render(request, 'learning_logs/edit_entry.html', context)
|
[
"kevin.loetscher@seantis.ch"
] |
kevin.loetscher@seantis.ch
|
a5d8262889b9692b4fd6a25fc57d5910bda04bdd
|
6d6d1fff6e41339c05dd8deffec257b7d81d3eb2
|
/make_hists/v_08/mkjson.py
|
f4bfa81675f2ba528c314f8e8b9ad842d44185db
|
[] |
no_license
|
MinervaExpt/CCQENu
|
5c08cd315811964013a24c11406547b00149a1f6
|
49009f38ee85a8b58dcb1fab2b82109860d1bece
|
refs/heads/main
| 2023-08-22T06:05:57.522385
| 2023-07-31T21:29:47
| 2023-07-31T21:29:47
| 229,081,242
| 1
| 1
| null | 2023-07-31T21:29:48
| 2019-12-19T15:16:54
|
Python
|
UTF-8
|
Python
| false
| false
| 666
|
py
|
import sys,os,time
playlists = ["5A","6A","6B","6C","6D","6E","6F","6G","6H","6I","6J"]
list = '_'.join(playlists)
newdir = "."
template = os.path.join(newdir,"AntiNu_Template.json")
prescale = "1000"
#ofile = TFile.Open(list+".root",'RECREATE')
potdata ={}
potmc = {}
potcorr = {}
potmctot = {}
count = 0
filename = template
for play in playlists:
f = open(filename,'r')
newfilename = template.replace("Template",play)
print (filename,newfilename)
f_new = open(newfilename,'w')
lines = f.readlines()
for line in lines:
line = line.replace("Template",play)
f_new.write(line)
f_new.close()
f.close()
|
[
"Heidi.Schellman@oregonstate.edu"
] |
Heidi.Schellman@oregonstate.edu
|
452ef2ff47c47d08c477592a9e5a611b0d5e6921
|
7d6424d1ddd68fc4f4eec66075917ee6d828e1ae
|
/GetQiReLinks.py
|
63218bcd857c9a0112d81f94484e5d4467572347
|
[] |
no_license
|
lantianjialiang/python-script
|
96e6f6edc9226936004f758669be59f55938e568
|
5f45f3d51194a5e2f226a6132497f748efe81352
|
refs/heads/master
| 2021-01-20T21:26:12.480095
| 2013-05-30T06:32:52
| 2013-05-30T06:32:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,280
|
py
|
import sgmllib
import urllib2
from pprint import pprint
from bs4 import BeautifulSoup
class PageParser(sgmllib.SGMLParser):
def __init__(self, aPrefix):
# inherit from the SGMLParser class
sgmllib.SGMLParser.__init__(self)
# create a list this will store all the links found
self.links = []
self.prefix = aPrefix;
def unknown_starttag(self, tag, attrs):
for key, value in attrs:
#print "key and value", key, value
if key == "href" and value.startswith('/videos/'):
self.links.append(self.prefix + value)
class PageParserForBaidhHD(sgmllib.SGMLParser):
def __init__(self):
# inherit from the SGMLParser class
sgmllib.SGMLParser.__init__(self)
# create a list this will store all the links found
self.links = []
def unknown_starttag(self, tag, attrs):
print "unknown tag start " + tag
for key, value in attrs:
print key , value
if key.lower() == "param_url":
value = unicode(value, 'utf-8')
print value
self.links.append(value)
def getGiRenList(vedioUrl):
bdhdList = []
sock = urllib2.urlopen(vedioUrl)
# make sure the string that is going to be parsed is 8-bit ascii
if sock.info().dict['content-type'] == 'text/html':
parser = PageParser("http://www.qire123.com/")
parser.feed(sock.read());
parser.close();
bdhdList = parser.links
return bdhdList
def getBaiduHDAddress(vedioUrl):
bdhdList = []
sock = urllib2.urlopen(vedioUrl)
if sock.info().dict['content-type'] == 'text/html':
# page = sock.read()
soup = BeautifulSoup(sock)
soup.prettify()
for anchor in soup.findAll('object'):
print anchor['href']
return bdhdList
def main():
urlAddress = "http://www.qire123.com/occident/yifanzhuizongdiyiji/";
#pprint (getGiRenList(urlAddress));
list1 = getGiRenList(urlAddress);
list1.reverse();
for aUrl in list1:
getBaiduHDAddress(aUrl);
#pprint (txtlist)
if __name__ == '__main__':
main();
|
[
"lantianjialiang@gmail.com"
] |
lantianjialiang@gmail.com
|
d5edabd91c323e063605d21d213f50e3f2e8f874
|
11d0d53638360b2371e459bce87e659204307b7b
|
/examples/raspi-camera/multiple-stills.py
|
4d3aa2067b3df2a4a1c2e096cb296f694ca3e1f4
|
[] |
no_license
|
poplatek/junction18-public
|
ca74a19de8b6c637150d1274c51fcca1a0d8d33d
|
310ee610d17e403905832b80c8a3e268a31ea42d
|
refs/heads/master
| 2020-04-03T20:32:49.397209
| 2018-11-23T20:33:23
| 2018-11-23T20:33:23
| 155,548,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
from picamera import PiCamera
from time import sleep
from time import time
import math
camera = PiCamera()
camera.rotation = 180
camera.start_preview(alpha=200)
for i in range(5):
sleep(3)
today = int(math.floor(time()))
camera.capture('/home/pi/pics/test_' + str(today) + '.jpg')
print('saved image: pics/' + str(today) + '.jpg')
camera.stop_preview()
|
[
"sami-pekka.haavisto@poplatek.fi"
] |
sami-pekka.haavisto@poplatek.fi
|
6782a4e42c1ad9b6b02b4cf58d21f8cf0b267233
|
8217432a3bc36bbe6fc02e4312feab2db997ee47
|
/election2000/forms.py
|
17137b7798a1c957dd2c166dfb7ab89862beee97
|
[] |
no_license
|
m-chrzan/rest_election
|
0581ac6e4f7188a7658958c76eb0631672552acc
|
d3436c6b168b059d0b3614b3aaa50b73710c274c
|
refs/heads/master
| 2021-03-24T13:56:13.652075
| 2017-05-19T09:37:10
| 2017-05-19T09:38:01
| 91,790,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
from django import forms
from django.contrib.auth.models import User
class UserForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget = forms.PasswordInput)
|
[
"marcin.j.chrzanowski@gmail.com"
] |
marcin.j.chrzanowski@gmail.com
|
30319aa3fe6884d573153e9fb3f257f59108be0a
|
5beffbb470642cf6700db35582e14d955b603feb
|
/ds/tree_traversal.py
|
af19330722691876063e79e41264bae19c92643c
|
[] |
no_license
|
SauravAnchlia/leetcode
|
f6f06c535edfc86f28e7555573dfd2c22a707f3b
|
30ea3a02d10b4bc694d3ea9b847af59fa601e1dd
|
refs/heads/master
| 2022-12-14T16:43:26.783466
| 2020-09-19T11:21:20
| 2020-09-19T11:21:20
| 269,892,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
# inorder, pre order and post order traversal of a tree.
from tree import root
iorder = []
def inorder(root):
if root:
inorder(root.left)
inorder(root.right)
iorder.append(root.val)
post = []
def postorder(root):
if root:
postorder(root.left)
postorder(root.right)
post.append(root.val)
preord = []
def preorder(root):
if root:
preord.append(root.val)
preorder(root.left)
preorder(root.right)
inorder(root)
preorder(root)
postorder(root)
print(f'in {iorder} , preorder {preord} and post {post}')
|
[
"saurav.anchlia@oracle.com"
] |
saurav.anchlia@oracle.com
|
8abb6e606dd1a60e95d5c87d2ae827f83038a627
|
08c59f4c2c505f82aba4f732361fad9891b4a0b4
|
/Modules/program_9_validate_phonenum.py
|
83ea698300a6d53a09111d52646287321e95301c
|
[] |
no_license
|
SanthoshKR93/Python_Anandology_Solutions
|
db71626cb7403873c9696998dcf4b148a526c0ab
|
50d2b613aeb1a27c21a24c9b620e7aa2d2f11cde
|
refs/heads/master
| 2020-04-12T19:32:53.398218
| 2019-02-17T09:39:41
| 2019-02-17T09:39:41
| 162,711,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
# Write a regular expression to validate a phone number.
import re
def val_phno(num):
robj = re.compile('\+\d\d-\d\d\d\d\d\d\d\d\d\d')
v = re.findall(robj,num)
#print (type(v))
if num in v:
print ("yay!!! valid phone number")
else:
print("invalid phone number!!! Try again..")
val_phno('+91-8089625072')
|
[
"noreply@github.com"
] |
SanthoshKR93.noreply@github.com
|
426c671424b89aeb323458a30a3effac244d2e12
|
6a4a59194cb7fd1d3a4090699baa43692b183864
|
/fybrrLink_VizTool/urls.py
|
1bd9749880980f5bdd48d84fc584f08b77c061f1
|
[] |
no_license
|
RotonEvan/fybrrLink-VizTool
|
bb7de13b20cf9cde1a0db3d9233b1ecf8eea36ba
|
9dc67c81c32a8f10092b134b903bfe2646c280ad
|
refs/heads/main
| 2023-05-28T04:49:48.142980
| 2021-06-12T22:23:57
| 2021-06-12T22:23:57
| 351,628,656
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
"""fybrrLink_VizTool URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('VizTool.urls'))
]
|
[
"debajyotih@iitbhilai.ac.in"
] |
debajyotih@iitbhilai.ac.in
|
def3c4afba817b934c0fdbe3925c0bc04daa0e68
|
8f6c0a2d2ac20053dba85b7d45e64afb36189756
|
/src/damn_at/analyzers/mesh/metadata.py
|
aa5bd9fafd1458c0cf0bc66cf761c4a29f697004
|
[
"BSD-3-Clause"
] |
permissive
|
induane/damn-at
|
5739d258ed54b970851eb12aa5e80d0ccd94c3b5
|
f8f74a8726171b3b7c7ff93a8f33362a4369ab3a
|
refs/heads/master
| 2021-01-18T11:47:31.182633
| 2015-02-03T21:02:51
| 2015-02-03T21:02:51
| 18,813,030
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,743
|
py
|
from damn_at.metadata import MetaDataExtractor
from damn_at import MetaDataType
class MetaDataBlenderImage(MetaDataExtractor):
__mimetype__ = 'application/x-blender.image'
channels = MetaDataType.INT, lambda context: context['image'].channels
depth = MetaDataType.INT, lambda context: context['image'].depth
file_format = MetaDataType.STRING, lambda context: str(context['image'].file_format)
#resolution = MetaDataType.STRING, lambda context: '{}x{}'.format(*list(context['image'].resolution[:]))
size = MetaDataType.STRING, lambda context: '{}x{}'.format(*list(context['image'].size[:]))
class MetaDataBlenderMesh(MetaDataExtractor):
__mimetype__ = 'application/x-blender.mesh'
nr_of_faces = MetaDataType.INT, lambda context: len(context['mesh'].tessfaces)
nr_of_vertices = MetaDataType.INT, lambda context: len(context['mesh'].vertices)
nr_of_polygons = MetaDataType.INT, lambda context: len(context['mesh'].polygons)
class MetaDataBlenderObject(MetaDataExtractor):
__mimetype__ = 'application/x-blender.object'
location = MetaDataType.STRING, lambda context: str(context['object'].location)
type = MetaDataType.STRING, lambda context: str(context['object'].type)
dimensions = MetaDataType.STRING, lambda context: str(context['object'].dimensions)
class MetaDataWaveFrontDefault(MetaDataExtractor):
__mimetype__ = 'application/wavefront-obj'
nr_of_faces = MetaDataType.INT, lambda context: len(context.groups['default']['faces'])
nr_of_vertices = MetaDataType.INT, lambda context: len(context.vertices)
class MetaDataAssimpTexture(MetaDataExtractor):
__mimetype__ = 'application/assimp.texture'
class MetaDataAssimpMaterial(MetaDataExtractor):
__mimetype__ = 'application/assimp.material'
diffuse = MetaDataType.STRING, lambda context: str(context['$clr.diffuse'])
shadingm = MetaDataType.STRING, lambda context: str(context['$mat.shadingm'])
ambient = MetaDataType.STRING, lambda context: str(context['$clr.ambient'])
specular = MetaDataType.STRING, lambda context: str(context['$clr.specular'])
shininess = MetaDataType.STRING, lambda context: str(context['$mat.shininess'])
opacity = MetaDataType.STRING, lambda context: str(context['$mat.opacity'])
refracti = MetaDataType.STRING, lambda context: str(context['$mat.refracti'])
class MetaDataAssimpMesh(MetaDataExtractor):
__mimetype__ = 'application/assimp.mesh'
nr_of_faces = MetaDataType.INT, lambda context: context.mNumFaces
nr_of_vertices = MetaDataType.INT, lambda context: context.mNumVertices
nr_of_bones = MetaDataType.INT, lambda context: context.mNumBones
nr_of_anim_meshes = MetaDataType.INT, lambda context: context.mNumAnimMeshes
|
[
"sueastside@gmail.com"
] |
sueastside@gmail.com
|
14866e8d4f05cf6985bf52984d5d60cdf0091855
|
2c74bb301f1ed83b79254944183ac5a18a639fdf
|
/tests/components/zwave_js/test_device_trigger.py
|
859164aa4c3fd77dc34dce11ed904cfb38c89adf
|
[
"Apache-2.0"
] |
permissive
|
Adminiuga/home-assistant
|
5bec93007ddac1a268cc359bf7e48530c5f73b38
|
dcf68d768e4f628d038f1fdd6e40bad713fbc222
|
refs/heads/dev
| 2023-02-22T22:03:31.013931
| 2022-11-09T00:27:20
| 2022-11-09T00:27:20
| 123,929,062
| 5
| 4
|
Apache-2.0
| 2023-02-22T06:14:31
| 2018-03-05T14:11:09
|
Python
|
UTF-8
|
Python
| false
| false
| 49,380
|
py
|
"""The tests for Z-Wave JS device triggers."""
from unittest.mock import patch
import pytest
import voluptuous_serialize
from zwave_js_server.const import CommandClass
from zwave_js_server.event import Event
from zwave_js_server.model.node import Node
from homeassistant.components import automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.components.zwave_js import DOMAIN, device_trigger
from homeassistant.components.zwave_js.helpers import (
async_get_node_status_sensor_entity_id,
)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.device_registry import (
async_entries_for_config_entry,
async_get as async_get_dev_reg,
)
from homeassistant.helpers.entity_registry import async_get as async_get_ent_reg
from homeassistant.setup import async_setup_component
from tests.common import (
assert_lists_same,
async_get_device_automations,
async_mock_service,
)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_notification_notification_triggers(
hass, client, lock_schlage_be469, integration
):
"""Test we get the expected triggers from a zwave_js device with the Notification CC."""
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
expected_trigger = {
"platform": "device",
"domain": DOMAIN,
"type": "event.notification.notification",
"device_id": device.id,
"command_class": CommandClass.NOTIFICATION,
"metadata": {},
}
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device.id
)
assert expected_trigger in triggers
async def test_if_notification_notification_fires(
hass, client, lock_schlage_be469, integration, calls
):
"""Test for event.notification.notification trigger firing."""
node: Node = lock_schlage_be469
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
# event, type, label
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "event.notification.notification",
"command_class": CommandClass.NOTIFICATION.value,
"type.": 6,
"event": 5,
"label": "Access Control",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"event.notification.notification - "
"{{ trigger.platform}} - "
"{{ trigger.event.event_type}} - "
"{{ trigger.event.data.command_class }}"
)
},
},
},
# no type, event, label
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "event.notification.notification",
"command_class": CommandClass.NOTIFICATION.value,
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"event.notification.notification2 - "
"{{ trigger.platform}} - "
"{{ trigger.event.event_type}} - "
"{{ trigger.event.data.command_class }}"
)
},
},
},
]
},
)
# Publish fake Notification CC notification
event = Event(
type="notification",
data={
"source": "node",
"event": "notification",
"nodeId": node.node_id,
"ccId": 113,
"args": {
"type": 6,
"event": 5,
"label": "Access Control",
"eventLabel": "Keypad lock operation",
"parameters": {"userId": 1},
},
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[0].data[
"some"
] == "event.notification.notification - device - zwave_js_notification - {}".format(
CommandClass.NOTIFICATION
)
assert calls[1].data[
"some"
] == "event.notification.notification2 - device - zwave_js_notification - {}".format(
CommandClass.NOTIFICATION
)
async def test_get_trigger_capabilities_notification_notification(
hass, client, lock_schlage_be469, integration
):
"""Test we get the expected capabilities from a notification.notification trigger."""
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "event.notification.notification",
"command_class": CommandClass.NOTIFICATION.value,
},
)
assert capabilities and "extra_fields" in capabilities
assert_lists_same(
voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
),
[
{"name": "type.", "optional": True, "type": "string"},
{"name": "label", "optional": True, "type": "string"},
{"name": "event", "optional": True, "type": "string"},
{"name": "event_label", "optional": True, "type": "string"},
],
)
async def test_if_entry_control_notification_fires(
hass, client, lock_schlage_be469, integration, calls
):
"""Test for notification.entry_control trigger firing."""
node: Node = lock_schlage_be469
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
# event_type and data_type
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "event.notification.entry_control",
"command_class": CommandClass.ENTRY_CONTROL.value,
"event_type": 5,
"data_type": 2,
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"event.notification.notification - "
"{{ trigger.platform}} - "
"{{ trigger.event.event_type}} - "
"{{ trigger.event.data.command_class }}"
)
},
},
},
# no event_type and data_type
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "event.notification.entry_control",
"command_class": CommandClass.ENTRY_CONTROL.value,
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"event.notification.notification2 - "
"{{ trigger.platform}} - "
"{{ trigger.event.event_type}} - "
"{{ trigger.event.data.command_class }}"
)
},
},
},
]
},
)
# Publish fake Entry Control CC notification
event = Event(
type="notification",
data={
"source": "node",
"event": "notification",
"nodeId": node.node_id,
"ccId": 111,
"args": {
"eventType": 5,
"eventTypeLabel": "label 1",
"dataType": 2,
"dataTypeLabel": "label 2",
"eventData": "555",
},
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[0].data[
"some"
] == "event.notification.notification - device - zwave_js_notification - {}".format(
CommandClass.ENTRY_CONTROL
)
assert calls[1].data[
"some"
] == "event.notification.notification2 - device - zwave_js_notification - {}".format(
CommandClass.ENTRY_CONTROL
)
async def test_get_trigger_capabilities_entry_control_notification(
hass, client, lock_schlage_be469, integration
):
"""Test we get the expected capabilities from a notification.entry_control trigger."""
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "event.notification.entry_control",
"command_class": CommandClass.ENTRY_CONTROL.value,
},
)
assert capabilities and "extra_fields" in capabilities
assert_lists_same(
voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
),
[
{"name": "event_type", "optional": True, "type": "string"},
{"name": "data_type", "optional": True, "type": "string"},
],
)
async def test_get_node_status_triggers(hass, client, lock_schlage_be469, integration):
"""Test we get the expected triggers from a device with node status sensor enabled."""
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
ent_reg = async_get_ent_reg(hass)
entity_id = async_get_node_status_sensor_entity_id(
hass, device.id, ent_reg, dev_reg
)
ent_reg.async_update_entity(entity_id, **{"disabled_by": None})
await hass.config_entries.async_reload(integration.entry_id)
await hass.async_block_till_done()
expected_trigger = {
"platform": "device",
"domain": DOMAIN,
"type": "state.node_status",
"device_id": device.id,
"entity_id": entity_id,
"metadata": {"secondary": True},
}
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device.id
)
assert expected_trigger in triggers
async def test_if_node_status_change_fires(
hass, client, lock_schlage_be469, integration, calls
):
"""Test for node_status trigger firing."""
node: Node = lock_schlage_be469
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
ent_reg = async_get_ent_reg(hass)
entity_id = async_get_node_status_sensor_entity_id(
hass, device.id, ent_reg, dev_reg
)
ent_reg.async_update_entity(entity_id, **{"disabled_by": None})
await hass.config_entries.async_reload(integration.entry_id)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
# from
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"entity_id": entity_id,
"type": "state.node_status",
"from": "alive",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"state.node_status - "
"{{ trigger.platform}} - "
"{{ trigger.from_state.state }}"
)
},
},
},
# no from or to
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"entity_id": entity_id,
"type": "state.node_status",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"state.node_status2 - "
"{{ trigger.platform}} - "
"{{ trigger.from_state.state }}"
)
},
},
},
]
},
)
# Test status change
event = Event(
"dead", data={"source": "node", "event": "dead", "nodeId": node.node_id}
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[0].data["some"] == "state.node_status - device - alive"
assert calls[1].data["some"] == "state.node_status2 - device - alive"
async def test_get_trigger_capabilities_node_status(
hass, client, lock_schlage_be469, integration
):
"""Test we get the expected capabilities from a node_status trigger."""
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
ent_reg = async_get_ent_reg(hass)
entity_id = async_get_node_status_sensor_entity_id(
hass, device.id, ent_reg, dev_reg
)
ent_reg.async_update_entity(entity_id, **{"disabled_by": None})
await hass.config_entries.async_reload(integration.entry_id)
await hass.async_block_till_done()
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"entity_id": entity_id,
"type": "state.node_status",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "from",
"optional": True,
"options": [
("asleep", "asleep"),
("awake", "awake"),
("dead", "dead"),
("alive", "alive"),
],
"type": "select",
},
{
"name": "to",
"optional": True,
"options": [
("asleep", "asleep"),
("awake", "awake"),
("dead", "dead"),
("alive", "alive"),
],
"type": "select",
},
{"name": "for", "optional": True, "type": "positive_time_period_dict"},
]
async def test_get_basic_value_notification_triggers(
hass, client, ge_in_wall_dimmer_switch, integration
):
"""Test we get the expected triggers from a zwave_js device with the Basic CC."""
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
expected_trigger = {
"platform": "device",
"domain": DOMAIN,
"type": "event.value_notification.basic",
"device_id": device.id,
"command_class": CommandClass.BASIC,
"property": "event",
"property_key": None,
"endpoint": 0,
"subtype": "Endpoint 0",
"metadata": {},
}
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device.id
)
assert expected_trigger in triggers
async def test_if_basic_value_notification_fires(
hass, client, ge_in_wall_dimmer_switch, integration, calls
):
"""Test for event.value_notification.basic trigger firing."""
node: Node = ge_in_wall_dimmer_switch
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
# value
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"type": "event.value_notification.basic",
"device_id": device.id,
"command_class": CommandClass.BASIC.value,
"property": "event",
"property_key": None,
"endpoint": 0,
"subtype": "Endpoint 0",
"value": 0,
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"event.value_notification.basic - "
"{{ trigger.platform}} - "
"{{ trigger.event.event_type}} - "
"{{ trigger.event.data.command_class }}"
)
},
},
},
# no value
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"type": "event.value_notification.basic",
"device_id": device.id,
"command_class": CommandClass.BASIC.value,
"property": "event",
"property_key": None,
"endpoint": 0,
"subtype": "Endpoint 0",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"event.value_notification.basic2 - "
"{{ trigger.platform}} - "
"{{ trigger.event.event_type}} - "
"{{ trigger.event.data.command_class }}"
)
},
},
},
]
},
)
# Publish fake Basic CC value notification
event = Event(
type="value notification",
data={
"source": "node",
"event": "value notification",
"nodeId": node.node_id,
"args": {
"commandClassName": "Basic",
"commandClass": 32,
"endpoint": 0,
"property": "event",
"propertyName": "event",
"value": 0,
"metadata": {
"type": "number",
"readable": True,
"writeable": False,
"label": "Event value",
"min": 0,
"max": 255,
},
"ccVersion": 1,
},
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[0].data[
"some"
] == "event.value_notification.basic - device - zwave_js_value_notification - {}".format(
CommandClass.BASIC
)
assert calls[1].data[
"some"
] == "event.value_notification.basic2 - device - zwave_js_value_notification - {}".format(
CommandClass.BASIC
)
async def test_get_trigger_capabilities_basic_value_notification(
hass, client, ge_in_wall_dimmer_switch, integration
):
"""Test we get the expected capabilities from a value_notification.basic trigger."""
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
"platform": "device",
"domain": DOMAIN,
"type": "event.value_notification.basic",
"device_id": device.id,
"command_class": CommandClass.BASIC.value,
"property": "event",
"property_key": None,
"endpoint": 0,
"subtype": "Endpoint 0",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "value",
"optional": True,
"type": "integer",
"valueMin": 0,
"valueMax": 255,
}
]
async def test_get_central_scene_value_notification_triggers(
hass, client, wallmote_central_scene, integration
):
"""Test we get the expected triggers from a zwave_js device with the Central Scene CC."""
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
expected_trigger = {
"platform": "device",
"domain": DOMAIN,
"type": "event.value_notification.central_scene",
"device_id": device.id,
"command_class": CommandClass.CENTRAL_SCENE,
"property": "scene",
"property_key": "001",
"endpoint": 0,
"subtype": "Endpoint 0 Scene 001",
"metadata": {},
}
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device.id
)
assert expected_trigger in triggers
async def test_if_central_scene_value_notification_fires(
hass, client, wallmote_central_scene, integration, calls
):
"""Test for event.value_notification.central_scene trigger firing."""
node: Node = wallmote_central_scene
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
# value
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "event.value_notification.central_scene",
"command_class": CommandClass.CENTRAL_SCENE.value,
"property": "scene",
"property_key": "001",
"endpoint": 0,
"subtype": "Endpoint 0 Scene 001",
"value": 0,
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"event.value_notification.central_scene - "
"{{ trigger.platform}} - "
"{{ trigger.event.event_type}} - "
"{{ trigger.event.data.command_class }}"
)
},
},
},
# no value
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "event.value_notification.central_scene",
"command_class": CommandClass.CENTRAL_SCENE.value,
"property": "scene",
"property_key": "001",
"endpoint": 0,
"subtype": "Endpoint 0 Scene 001",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"event.value_notification.central_scene2 - "
"{{ trigger.platform}} - "
"{{ trigger.event.event_type}} - "
"{{ trigger.event.data.command_class }}"
)
},
},
},
]
},
)
# Publish fake Central Scene CC value notification
event = Event(
type="value notification",
data={
"source": "node",
"event": "value notification",
"nodeId": node.node_id,
"args": {
"commandClassName": "Central Scene",
"commandClass": 91,
"endpoint": 0,
"property": "scene",
"propertyName": "scene",
"propertyKey": "001",
"propertyKey": "001",
"value": 0,
"metadata": {
"type": "number",
"readable": True,
"writeable": False,
"min": 0,
"max": 255,
"label": "Scene 004",
"states": {
"0": "KeyPressed",
"1": "KeyReleased",
"2": "KeyHeldDown",
},
},
"ccVersion": 1,
},
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[0].data[
"some"
] == "event.value_notification.central_scene - device - zwave_js_value_notification - {}".format(
CommandClass.CENTRAL_SCENE
)
assert calls[1].data[
"some"
] == "event.value_notification.central_scene2 - device - zwave_js_value_notification - {}".format(
CommandClass.CENTRAL_SCENE
)
async def test_get_trigger_capabilities_central_scene_value_notification(
hass, client, wallmote_central_scene, integration
):
"""Test we get the expected capabilities from a value_notification.central_scene trigger."""
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
"platform": "device",
"domain": DOMAIN,
"type": "event.value_notification.central_scene",
"device_id": device.id,
"command_class": CommandClass.CENTRAL_SCENE.value,
"property": "scene",
"property_key": "001",
"endpoint": 0,
"subtype": "Endpoint 0 Scene 001",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "value",
"optional": True,
"type": "select",
"options": [(0, "KeyPressed"), (1, "KeyReleased"), (2, "KeyHeldDown")],
},
]
async def test_get_scene_activation_value_notification_triggers(
hass, client, hank_binary_switch, integration
):
"""Test we get the expected triggers from a zwave_js device with the SceneActivation CC."""
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
expected_trigger = {
"platform": "device",
"domain": DOMAIN,
"type": "event.value_notification.scene_activation",
"device_id": device.id,
"command_class": CommandClass.SCENE_ACTIVATION.value,
"property": "sceneId",
"property_key": None,
"endpoint": 0,
"subtype": "Endpoint 0",
"metadata": {},
}
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device.id
)
assert expected_trigger in triggers
async def test_if_scene_activation_value_notification_fires(
hass, client, hank_binary_switch, integration, calls
):
"""Test for event.value_notification.scene_activation trigger firing."""
node: Node = hank_binary_switch
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
# value
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "event.value_notification.scene_activation",
"command_class": CommandClass.SCENE_ACTIVATION.value,
"property": "sceneId",
"property_key": None,
"endpoint": 0,
"subtype": "Endpoint 0",
"value": 1,
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"event.value_notification.scene_activation - "
"{{ trigger.platform}} - "
"{{ trigger.event.event_type}} - "
"{{ trigger.event.data.command_class }}"
)
},
},
},
# No value
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "event.value_notification.scene_activation",
"command_class": CommandClass.SCENE_ACTIVATION.value,
"property": "sceneId",
"property_key": None,
"endpoint": 0,
"subtype": "Endpoint 0",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"event.value_notification.scene_activation2 - "
"{{ trigger.platform}} - "
"{{ trigger.event.event_type}} - "
"{{ trigger.event.data.command_class }}"
)
},
},
},
]
},
)
# Publish fake Scene Activation CC value notification
event = Event(
type="value notification",
data={
"source": "node",
"event": "value notification",
"nodeId": node.node_id,
"args": {
"commandClassName": "Scene Activation",
"commandClass": 43,
"endpoint": 0,
"property": "sceneId",
"propertyName": "sceneId",
"value": 1,
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"min": 1,
"max": 255,
"label": "Scene ID",
},
"ccVersion": 1,
},
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[0].data[
"some"
] == "event.value_notification.scene_activation - device - zwave_js_value_notification - {}".format(
CommandClass.SCENE_ACTIVATION
)
assert calls[1].data[
"some"
] == "event.value_notification.scene_activation2 - device - zwave_js_value_notification - {}".format(
CommandClass.SCENE_ACTIVATION
)
async def test_get_trigger_capabilities_scene_activation_value_notification(
hass, client, hank_binary_switch, integration
):
"""Test we get the expected capabilities from a value_notification.scene_activation trigger."""
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
"platform": "device",
"domain": DOMAIN,
"type": "event.value_notification.scene_activation",
"device_id": device.id,
"command_class": CommandClass.SCENE_ACTIVATION.value,
"property": "sceneId",
"property_key": None,
"endpoint": 0,
"subtype": "Endpoint 0",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "value",
"optional": True,
"type": "integer",
"valueMin": 1,
"valueMax": 255,
}
]
async def test_get_value_updated_value_triggers(
hass, client, lock_schlage_be469, integration
):
"""Test we get the zwave_js.value_updated.value trigger from a zwave_js device."""
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
expected_trigger = {
"platform": "device",
"domain": DOMAIN,
"type": "zwave_js.value_updated.value",
"device_id": device.id,
"metadata": {},
}
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device.id
)
assert expected_trigger in triggers
async def test_if_value_updated_value_fires(
hass, client, lock_schlage_be469, integration, calls
):
"""Test for zwave_js.value_updated.value trigger firing."""
node: Node = lock_schlage_be469
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "zwave_js.value_updated.value",
"command_class": CommandClass.DOOR_LOCK.value,
"property": "latchStatus",
"property_key": None,
"endpoint": None,
"from": "open",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"zwave_js.value_updated.value - "
"{{ trigger.platform}} - "
"{{ trigger.previous_value }}"
)
},
},
},
]
},
)
# Publish fake value update that shouldn't trigger
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": node.node_id,
"args": {
"commandClassName": "Door Lock",
"commandClass": 98,
"endpoint": 0,
"property": "insideHandlesCanOpenDoor",
"newValue": [True, False, False, False],
"prevValue": [False, False, False, False],
"propertyName": "insideHandlesCanOpenDoor",
},
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(calls) == 0
# Publish fake value update that should trigger
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": node.node_id,
"args": {
"commandClassName": "Door Lock",
"commandClass": 98,
"endpoint": 0,
"property": "latchStatus",
"newValue": "closed",
"prevValue": "open",
"propertyName": "latchStatus",
},
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(calls) == 1
assert (
calls[0].data["some"]
== "zwave_js.value_updated.value - zwave_js.value_updated - open"
)
async def test_get_trigger_capabilities_value_updated_value(
hass, client, lock_schlage_be469, integration
):
"""Test we get the expected capabilities from a zwave_js.value_updated.value trigger."""
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "zwave_js.value_updated.value",
"command_class": CommandClass.DOOR_LOCK.value,
"property": "latchStatus",
"property_key": None,
"endpoint": None,
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "command_class",
"required": True,
"type": "select",
"options": [
(133, "Association"),
(128, "Battery"),
(98, "Door Lock"),
(122, "Firmware Update Meta Data"),
(114, "Manufacturer Specific"),
(113, "Notification"),
(152, "Security"),
(99, "User Code"),
(134, "Version"),
],
},
{"name": "property", "required": True, "type": "string"},
{"name": "property_key", "optional": True, "type": "string"},
{"name": "endpoint", "optional": True, "type": "string"},
{"name": "from", "optional": True, "type": "string"},
{"name": "to", "optional": True, "type": "string"},
]
async def test_get_value_updated_config_parameter_triggers(
hass, client, lock_schlage_be469, integration
):
"""Test we get the zwave_js.value_updated.config_parameter trigger from a zwave_js device."""
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
expected_trigger = {
"platform": "device",
"domain": DOMAIN,
"type": "zwave_js.value_updated.config_parameter",
"device_id": device.id,
"property": 3,
"property_key": None,
"endpoint": 0,
"command_class": CommandClass.CONFIGURATION.value,
"subtype": "3 (Beeper)",
"metadata": {},
}
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device.id
)
assert expected_trigger in triggers
async def test_if_value_updated_config_parameter_fires(
hass, client, lock_schlage_be469, integration, calls
):
"""Test for zwave_js.value_updated.config_parameter trigger firing."""
node: Node = lock_schlage_be469
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "zwave_js.value_updated.config_parameter",
"property": 3,
"property_key": None,
"endpoint": 0,
"command_class": CommandClass.CONFIGURATION.value,
"subtype": "3 (Beeper)",
"from": 255,
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"zwave_js.value_updated.config_parameter - "
"{{ trigger.platform}} - "
"{{ trigger.previous_value_raw }}"
)
},
},
},
]
},
)
# Publish fake value update
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": node.node_id,
"args": {
"commandClassName": "Configuration",
"commandClass": 112,
"endpoint": 0,
"property": 3,
"newValue": 0,
"prevValue": 255,
"propertyName": "Beeper",
},
},
)
node.receive_event(event)
await hass.async_block_till_done()
assert len(calls) == 1
assert (
calls[0].data["some"]
== "zwave_js.value_updated.config_parameter - zwave_js.value_updated - 255"
)
async def test_get_trigger_capabilities_value_updated_config_parameter_range(
hass, client, lock_schlage_be469, integration
):
"""Test we get the expected capabilities from a range zwave_js.value_updated.config_parameter trigger."""
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "zwave_js.value_updated.config_parameter",
"property": 6,
"property_key": None,
"endpoint": 0,
"command_class": CommandClass.CONFIGURATION.value,
"subtype": "6 (User Slot Status)",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "from",
"optional": True,
"valueMin": 0,
"valueMax": 255,
"type": "integer",
},
{
"name": "to",
"optional": True,
"valueMin": 0,
"valueMax": 255,
"type": "integer",
},
]
async def test_get_trigger_capabilities_value_updated_config_parameter_enumerated(
hass, client, lock_schlage_be469, integration
):
"""Test we get the expected capabilities from an enumerated zwave_js.value_updated.config_parameter trigger."""
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "zwave_js.value_updated.config_parameter",
"property": 3,
"property_key": None,
"endpoint": 0,
"command_class": CommandClass.CONFIGURATION.value,
"subtype": "3 (Beeper)",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "from",
"optional": True,
"options": [(0, "Disable Beeper"), (255, "Enable Beeper")],
"type": "select",
},
{
"name": "to",
"optional": True,
"options": [(0, "Disable Beeper"), (255, "Enable Beeper")],
"type": "select",
},
]
async def test_failure_scenarios(hass, client, hank_binary_switch, integration):
"""Test failure scenarios."""
with pytest.raises(HomeAssistantError):
await device_trigger.async_attach_trigger(
hass, {"type": "failed.test", "device_id": "invalid_device_id"}, None, {}
)
with pytest.raises(HomeAssistantError):
await device_trigger.async_attach_trigger(
hass,
{"type": "event.failed_type", "device_id": "invalid_device_id"},
None,
{},
)
dev_reg = async_get_dev_reg(hass)
device = async_entries_for_config_entry(dev_reg, integration.entry_id)[0]
with pytest.raises(HomeAssistantError):
await device_trigger.async_attach_trigger(
hass, {"type": "failed.test", "device_id": device.id}, None, {}
)
with pytest.raises(HomeAssistantError):
await device_trigger.async_attach_trigger(
hass,
{"type": "event.failed_type", "device_id": device.id},
None,
{},
)
with pytest.raises(HomeAssistantError):
await device_trigger.async_attach_trigger(
hass,
{"type": "state.failed_type", "device_id": device.id},
None,
{},
)
with patch(
"homeassistant.components.zwave_js.device_trigger.async_get_node_from_device_id",
return_value=None,
), patch(
"homeassistant.components.zwave_js.helpers.get_zwave_value_from_config",
return_value=None,
):
assert (
await device_trigger.async_get_trigger_capabilities(
hass, {"type": "failed.test", "device_id": "invalid_device_id"}
)
== {}
)
with pytest.raises(HomeAssistantError):
async_get_node_status_sensor_entity_id(hass, "invalid_device_id")
INVALID_CONFIG = {
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "zwave_js.value_updated.value",
"command_class": CommandClass.DOOR_LOCK.value,
"property": 9999,
"property_key": 9999,
"endpoint": 9999,
}
# Test that invalid config raises exception
with pytest.raises(InvalidDeviceAutomationConfig):
await device_trigger.async_validate_trigger_config(hass, INVALID_CONFIG)
# Unload entry so we can verify that validation will pass on an invalid config
# since we return early
await hass.config_entries.async_unload(integration.entry_id)
assert (
await device_trigger.async_validate_trigger_config(hass, INVALID_CONFIG)
== INVALID_CONFIG
)
# Test invalid device ID fails validation
with pytest.raises(InvalidDeviceAutomationConfig):
await device_trigger.async_validate_trigger_config(
hass,
{
"platform": "device",
"domain": DOMAIN,
"device_id": "invalid_device_id",
"type": "zwave_js.value_updated.value",
"command_class": CommandClass.DOOR_LOCK.value,
"property": 9999,
"property_key": 9999,
"endpoint": 9999,
},
)
|
[
"noreply@github.com"
] |
Adminiuga.noreply@github.com
|
21e6b1a0615aaaa0e94aa1e8cc83fc59ff3b5860
|
eb322b48a0d90c88df1f45f8a4514dde4594efa3
|
/ixconfig/__init__.py
|
fd6d162b7347dea5e47fc60e653c4b768e707ee1
|
[
"MIT"
] |
permissive
|
beasteers/ixconfig
|
663f0927d54c2f36a067d71e6fdf21f132cf09a2
|
409c55f78dcd7ca1712afc9c5af79438ffb008c4
|
refs/heads/main
| 2023-01-24T14:34:31.659858
| 2020-12-10T15:31:36
| 2020-12-10T15:31:36
| 305,601,728
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
from . import core
from .core import *
def main():
import fire
fire.Fire({'if': Ifc, 'iw': Iwc})
|
[
"bsteers@nyu.edu"
] |
bsteers@nyu.edu
|
bdcf740b32e7ee6e7d2836f131c767777523cbed
|
7cc3d58ebad0147dd17dfb9f9ca3aa59b5adcf1e
|
/lib/languages_data.py
|
8122e9e13f56ab0ff8bc709d7db877d69a8f4ee7
|
[
"MIT",
"CC-BY-SA-3.0"
] |
permissive
|
joeyciechanowicz/cheat.sh
|
91bb9ffdebd5dd50a5986bc4dc0abf48de69b53f
|
a4e36befe4564f90ebb42afc75254c13cdc17970
|
refs/heads/master
| 2020-03-22T19:43:45.636694
| 2018-07-19T11:00:16
| 2018-07-19T11:00:16
| 140,546,724
| 0
| 0
|
MIT
| 2018-07-11T08:40:38
| 2018-07-11T08:40:38
| null |
UTF-8
|
Python
| false
| false
| 3,915
|
py
|
"""
Programming languages information.
Will be (probably) moved to a separate file/directory
from the project tree.
"""
import pygments.lexers
LEXER = {
"assembly" : pygments.lexers.NasmLexer,
"awk" : pygments.lexers.AwkLexer,
"bash" : pygments.lexers.BashLexer,
"basic" : pygments.lexers.QBasicLexer,
"bf" : pygments.lexers.BrainfuckLexer,
"chapel" : pygments.lexers.ChapelLexer,
"clojure" : pygments.lexers.ClojureLexer,
"coffee" : pygments.lexers.CoffeeScriptLexer,
"cpp" : pygments.lexers.CppLexer,
"c" : pygments.lexers.CLexer,
"csharp" : pygments.lexers.CSharpLexer,
"d" : pygments.lexers.DLexer,
"dart" : pygments.lexers.DartLexer,
"delphi" : pygments.lexers.DelphiLexer,
"elisp" : pygments.lexers.EmacsLispLexer,
"elixir" : pygments.lexers.ElixirLexer,
"elm" : pygments.lexers.ElmLexer,
"erlang" : pygments.lexers.ErlangLexer,
"factor" : pygments.lexers.FactorLexer,
"forth" : pygments.lexers.ForthLexer,
"fortran" : pygments.lexers.FortranLexer,
"fsharp" : pygments.lexers.FSharpLexer,
"go" : pygments.lexers.GoLexer,
"groovy" : pygments.lexers.GroovyLexer,
"haskell" : pygments.lexers.HaskellLexer,
"java" : pygments.lexers.JavaLexer,
"js" : pygments.lexers.JavascriptLexer,
"julia" : pygments.lexers.JuliaLexer,
"kotlin" : pygments.lexers.KotlinLexer,
"lisp" : pygments.lexers.CommonLispLexer,
"lua" : pygments.lexers.LuaLexer,
"matlab" : pygments.lexers.MatlabLexer,
"mongo" : pygments.lexers.JavascriptLexer,
"objective-c": pygments.lexers.ObjectiveCppLexer,
"ocaml" : pygments.lexers.OcamlLexer,
"perl" : pygments.lexers.PerlLexer,
"perl6" : pygments.lexers.Perl6Lexer,
"php" : pygments.lexers.PhpLexer,
"psql" : pygments.lexers.PostgresLexer,
"python" : pygments.lexers.PythonLexer,
"python3" : pygments.lexers.Python3Lexer,
"r" : pygments.lexers.SLexer,
"racket" : pygments.lexers.RacketLexer,
"ruby" : pygments.lexers.RubyLexer,
"rust" : pygments.lexers.RustLexer,
"scala" : pygments.lexers.ScalaLexer,
"scheme": pygments.lexers.SchemeLexer,
"sql" : pygments.lexers.SqlLexer,
"swift" : pygments.lexers.SwiftLexer,
"tcl" : pygments.lexers.TclLexer,
"tcsh" : pygments.lexers.TcshLexer,
"vb" : pygments.lexers.VbNetLexer,
"vbnet" : pygments.lexers.VbNetLexer,
"vim" : pygments.lexers.VimLexer,
# experimental
"arduino": pygments.lexers.ArduinoLexer,
"pike" : pygments.lexers.PikeLexer,
"eiffel" : pygments.lexers.EiffelLexer,
"clean" : pygments.lexers.CleanLexer,
"dylan" : pygments.lexers.DylanLexer,
}
# canonical names are on the right side
LANGUAGE_ALIAS = {
'asm' : 'assembly',
'assembler' : 'assembly',
'c++' : 'cpp',
'c#' : 'csharp',
'clisp' : 'lisp',
'coffeescript': 'coffee',
'cplusplus' : 'cpp',
'dlang' : 'd',
'f#' : 'fsharp',
'golang' : 'go',
'javascript': 'js',
'objc' : 'objective-c',
'p6' : 'perl6',
'sh' : 'bash',
'visualbasic': 'vb',
'vba' : 'vb',
}
VIM_NAME = {
'assembly' : 'asm',
'bash' : 'sh',
'coffeescript': 'coffee',
'csharp' : 'cs',
'delphi' : 'pascal',
'dlang' : 'd',
'elisp' : 'newlisp',
'forth' : 'fs',
'perl6' : 'perl',
'python3' : 'python',
'python-3.x': 'python',
'tcsh' : 'sh',
}
SO_NAME = {
'coffee' : 'coffeescript',
'js' : 'javascript',
'python3' : 'python-3.x',
'vb' : 'vba',
}
|
[
"igor@chub.in"
] |
igor@chub.in
|
e69c7c56a2816712be905d300f01d8acca2158d8
|
0afda31fb39f55136e4e08535aa65358ba757887
|
/graph.py
|
a300c904eb22466a42f85202e8f2c51a8c529a59
|
[] |
no_license
|
Mdmetelus/Python_Projects
|
1a56714b783f4f8b960f6b897a925a1b4645a7a6
|
af76ba0169ddfbead67854f45b7ab9bf7296f67c
|
refs/heads/master
| 2021-06-28T04:25:29.104426
| 2020-09-27T04:00:12
| 2020-09-27T04:00:12
| 139,921,304
| 0
| 0
| null | 2019-04-11T19:09:51
| 2018-07-06T02:05:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,220
|
py
|
class Graph:
def __init__(self):
self.vertices ={}
def add_vertex(self, vertex_id):
self.vertices[vertex_id] = set()
def add_edge(self, v1, v2):
if v1 in self.vertices and v2 in self.vertices:
self.vertices[v1].add(v2)
self.vertices[v2].add(v1)
else
raise indexError("That vertex does not exist:")
# could also print the response
def add_directed_edge(self, v1, v2):
if v1 in self.vertices and v2 in self.vertices:
self.vertices[v1].add(v2)
else
raise indexError("That vertex does not exist:")
def debthFirstSearch(adjList, node_id):
print(node_id)
for child_node in adjList[node_id]:
debthFirstSearch(adjList, child_node) # recursive call
def debthFirstSearchVisted(adjList, node_id, visited):
# time complexity is O(n**2) = quadratic
# because fir each ieration (n), you will check the visited (n) = N^2
# space complexity is
print(node_id)
visited.append(node_id)
for child_node in adjList[node_id]:
if child_node not in visited:
debthFirstSearchVisted(adjList, child_node, visited) # recursive call
def dft(adjList, node_id):
# time complexity is O(n) = linear
# space complexity is
nodes[node_id].color = "black"
# print(node_id)
# visited.append(node_id)
for child_node in adjList[node_id]:
if nodes[node_id].color = "white":
debthFirstSearchVisted(adjList, child_node, visited) # recursive call
def breathFirstSearch(adjList, node_id):
print(node_id)
frontier = []
frontier.append(node_id)
while len(frontier) > 0:
n = frontier.pop
print(n)
for next_node in adjList[n]:
frontier.append(next_node)
def breathFirstSearchVisited(adjList, node_id):
# this traversal will be O(n) = linear implimentation
print(node_id)
frontier = []
frontier.append(node_id)
visited = [] # this will helps with graph as well as
while len(frontier) > 0:
n = frontier.pop
if n not in visited
print(n)
visited.append(n)
for next_node in adjList[n]:
frontier.append(next_node)
|
[
"Mdmetelus@gmail.com"
] |
Mdmetelus@gmail.com
|
5d7ccd4bdcee0bf922a24159788bbb6dd03c00e2
|
200a42bf821dd9fa73c52e70616d17435777248e
|
/read a file from user and open the file and read the data from it.py
|
accdaecd259c3f405f1c8a8befb5279e61c395fd
|
[] |
no_license
|
ruhisha/CORE-PYTHON-EXAMPLES-INCLUDING-FILEHANDLING-CONCEPT
|
b962dc58360691a617b5df8ff532501a747124f0
|
f7373375cb61affcbc925afbe9e6620c62251b9f
|
refs/heads/master
| 2020-04-08T15:28:52.238561
| 2018-11-28T13:49:54
| 2018-11-28T13:49:54
| 159,480,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
import os.path as pa
fname=input("enter file name with extension:")
bo=pa.exists(fname)
if bo:
print(open(fname).read())
else:
print("file is not found")
|
[
"skruhisha@gmail.com"
] |
skruhisha@gmail.com
|
d087122325df570b0b129171f8a95a286a4a4c05
|
00e0a2026321e93d52a3fce021d164d09b4fa0dc
|
/day08/main.py
|
129199cf82ff21dbe8225c17b13cba25e45138d0
|
[] |
no_license
|
reinhillmann/adventofcode2020
|
e070be617070e6f545194a82a6ad5c342fcce997
|
34df5f3bb642e88d641e533ac1ce79021742d4d9
|
refs/heads/main
| 2023-01-30T05:27:33.287130
| 2020-12-09T05:38:57
| 2020-12-09T05:38:57
| 317,636,464
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,990
|
py
|
#!/usr/bin/python3
import re
def load_data(filename):
with open(filename, 'r') as f:
return [l.rstrip() for l in f.readlines()]
def load_instructions(data):
instructions = []
for row in data:
op, val = re.match(r"^(jmp|acc|nop) ([\+|\-]?\d+)$", row).groups()
val = int(val)
instructions.append({"op": op, "val": val, "count": 0})
return instructions
def find_cycle():
instructions = load_instructions(data)
acc = 0
ip = 0
while (ip < len(instructions)):
instruction = instructions[ip]
val = instruction["val"]
op = instruction["op"]
count = instruction["count"]
if count > 0:
return acc
instruction["count"] += 1
if op == "jmp":
ip += val
continue
elif op == "acc":
acc += val
ip += 1
def fix_cycle(change_ip):
instructions = load_instructions(data)
acc = 0
ip = 0
while (ip < len(instructions)):
instruction = instructions[ip]
val = instruction["val"]
op = instruction["op"]
count = instruction["count"]
if count > 0:
# There's a cycle, return None.
return None
instruction["count"] += 1
if op == "jmp":
if ip == change_ip:
instruction["op"] = "nop"
ip += 1
else:
ip += val
continue
elif op == "acc":
acc += val
ip += 1
return acc
if __name__ == "__main__":
data = load_data('data.txt')
instructions = load_instructions(data)
# Part 1
print(find_cycle())
# Part 2
# This is probably really, really suboptimal.
# It just keeps rerunning the instructions from scratch, each time
# modifying one jmp to nop.
jmp_fix = 0
result = fix_cycle(jmp_fix)
while result == None:
jmp_fix += 1
result = fix_cycle(jmp_fix)
print(result)
|
[
"reinouth@microsoft.com"
] |
reinouth@microsoft.com
|
8a24060c2eeb46778edbb0fe5e174b1a9b000c9c
|
616a72dba3669836cd6e497cc6b83ee9a774eab4
|
/verilog_langserver/verilog_parser/diagnosis.py
|
261cae5dc4d420b9f4b5c20ac9a4de6630d25e7f
|
[
"MIT"
] |
permissive
|
eirikpre/verilog_langserver
|
dbc6773cf02c2f16eb889b974b83063ac8160f8c
|
e18545b139e40fe935bad430daf43e70553003a4
|
refs/heads/master
| 2022-07-23T16:00:53.375751
| 2020-05-21T09:07:19
| 2020-05-21T09:07:19
| 252,670,183
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,289
|
py
|
from antlr4 import Token, DiagnosticErrorListener, FileStream, CommonTokenStream
from antlr4.error.ErrorListener import ErrorListener
from pygls.types import Diagnostic, Range, DiagnosticSeverity, Position, Location
from .antlr_build.diagnosis.SystemVerilogLexer import SystemVerilogLexer as DiagnosisLexer
from .antlr_build.diagnosis.SystemVerilogParser import SystemVerilogParser as DiagnosisParser
# Diagnosis is extremly slow at the moment,
# should use the antlr, diagnostic tool to figure
# out why.
def parse(self, fname: str):
input_stream = FileStream(fname)
lexer = DiagnosisLexer(input_stream)
stream = CommonTokenStream(lexer)
parser = DiagnosisParser(stream)
listener = DiagnosisListener()
parser.addErrorListener(listener)
tree = parser.system_verilog_text()
return listener.errors
class DiagnosisListener(DiagnosticErrorListener):
def __init__(self):
self.errors: [Diagnostic] = []
super().__init__()
def syntaxError(self, recognizer, offendingSymbol: Token, line, column, msg, e):
err = Diagnostic(
range=Range(Position(line, column), Position(line, column+len(offendingSymbol.text))),
message=msg
)
self.errors.append(err)
|
[
"eirikpre@gmail.com"
] |
eirikpre@gmail.com
|
69f0bb3ed8a7a327200f6c8cc66a7f776ba19932
|
c85657463ea896571f28aaead3825b52baf199bd
|
/test/darkspotACstark.py
|
a8f9cfa258e0b78986a9b2bccb3cbd92723945eb
|
[] |
no_license
|
jasonhogan/sti
|
ffc9469557bf1480aae4004321d261bb5473e237
|
928428767b8c4131fa342dbbc2bf2e5cbea7340a
|
refs/heads/master
| 2021-07-12T17:55:26.696064
| 2020-08-10T20:56:56
| 2020-08-10T20:56:56
| 19,015,405
| 1
| 1
| null | 2018-04-04T03:14:44
| 2014-04-22T03:09:49
|
C++
|
UTF-8
|
Python
| false
| false
| 6,592
|
py
|
from stipy import *
ns = 1.0
us = 1000.0
ms = 1000000.0
s = 1000000000.0
# Set description used by program
setvar('desc','''Turn off 1530 light immediately before imaging.''')
setvar('1530 freq',1529.367)
setvar('driftTime', 1*ms)
setvar('motLoadTime', 250)
setvar('holdoff1530', 3)
setvar('voltage1530', 0.87)
#setvar('voltage1530off', 0.87)
digitalOut=dev('Digital Out','ep-timing1.stanford.edu',2)
slowAnalogOut=dev('Slow Analog Out', 'ep-timing1.stanford.edu', 4)
fastAnalogOut = dev('Fast Analog Out', 'ep-timing1.stanford.edu', 1)
fastAnalogOut6 = dev('Fast Analog Out', 'ep-timing1.stanford.edu', 6)
trigger = dev('FPGA_Trigger', 'ep-timing1.stanford.edu', 8)
dds = dev('DDS', 'ep-timing1.stanford.edu', 0)
vco0=dev('ADF4360-0', 'ep-timing1.stanford.edu', 0)
vco1=dev('ADF4360-5', 'ep-timing1.stanford.edu', 1)
vco2=dev('ADF4360-5', 'ep-timing1.stanford.edu', 2)
camera=dev('Andor iXon 885','ep-timing1.stanford.edu',0)
wavemeter=dev('AndoAQ6140', 'eplittletable.stanford.edu',7)
#setvar('signal0', ch(fastAnalogOut, 0)) # The only input channel right now
shutter = ch(digitalOut,3)
motBlowAway = ch(digitalOut,2)
#cameraTrigger=ch(digitalOut,0)
takeImage=ch(camera,0)
TA2 = ch(fastAnalogOut, 0)
TA3 = ch(fastAnalogOut, 1)
quadCoil = ch(fastAnalogOut, 1)
current1530 = ch(fastAnalogOut6,0)
aomSwitch0 = ch(dds, 0)
#repumpVCO=dev('ADF4360-0', 'eplittletable.stanford.edu', 0)
#coolingVCO=dev('ADF4360-6', 'eplittletable.stanford.edu', 3)
wavelength1530=ch(wavemeter, 0)
power1530 = ch(wavemeter, 1)
#testDevice = ch(slowAnalogOut, 0)
# Define different blocks of the experiment
def MOT(Start):
## Camera Settings ##
dtCameraShutter = 0*ms
dtCameraPulseWidth = 1000*us + dtCameraShutter
dtCameraDelay = 5*us
expTime = 100*us
## 1530 Shutter Settings ##
dtShutterOpenHoldOff = 2.04*ms
#Initialization Settings
tStart =1.1*s +dtShutterOpenHoldOff
## throwaway image settings ##
tThrowaway = tStart
filename1 = 'throwaway image'
description1 = 'throwaway image'
#AOM settings
# absorptionFreq = 1067
# aomFreq0 = absorptionFreq / 8
aomFreq0 = 110
aomAmplitude0 = 100
aomHoldOff = 10*us
## TA Settings ##
voltageTA2 = 1.4
voltageTA3 = 1.5
tTAOn = tStart + 100*ms
dtMOTLoad = motLoadTime*ms
tTAOff = tTAOn + dtMOTLoad
## Quad Coil Settings ##
quadCoilVoltage = 3.01
## 1530 current settings ##
# voltage1530 = 0.88
## Imaging Settings ##
dtDriftTime = driftTime
dtAbsorbtionLight = 50*us
tAbsorptionImage = tTAOff + dtDriftTime - dtCameraShutter
tAomOn = tTAOff + dtDriftTime - aomHoldOff
tQuadCoilOff = tTAOff
tAbsorptionCamera = tAbsorptionImage - dtCameraDelay
filename = 'absorption image'
description2 = 'absorption image'
## Calibration Absorbtion Image Settings ##
dtDeadMOT = 100*ms
tCalibrationImage = tAbsorptionImage + dtDeadMOT
tCalibrationCamera = tCalibrationImage - dtCameraDelay
tAomCalibration = tCalibrationImage - aomHoldOff
description3 = 'calibration image'
## Dark background imaging settings ##
dtWait = 100*ms
tDarkBackground = tCalibrationImage + dtWait
description4 = 'background image'
## 1530 Shutter Settings ##
t1530Off = tTAOff
dtShutterCloseHoldOff = 2.055*ms
tShutterOpen = tStart - dtShutterOpenHoldOff
tShutterClose = t1530Off - dtShutterCloseHoldOff
## End of Sequence Settings ##
# tQuadCoilEndOfSequence = tDarkBackground + tWait
# t1530EndOfSequence = tDarkBackground + 2*tWait - dtShutterCloseHoldOff
tTAEndOfSequence = tDarkBackground +2*ms
#################### events #######################
event(ch(trigger, 0), 10*us, "Stop" )
event(ch(trigger, 0), 30*us, "Play" )
meas(takeImage, tThrowaway, (expTime,description1),'picture') #take throwaway image
event(TA2, tStart, 0) # TA off MOT dark to kill any residual MOT
event(TA3, tStart, 0) # TA off
# event(current1530, tStart, voltage1530) #1530 light on
event(aomSwitch0,tStart, (aomFreq0, 0 ,0)) # AOM is off, so no imaging light
event(motBlowAway, tStart, 0) #set cooling light to 10 MHz detuned via RF switch
# event(shutter,tStart - dtShutterOpenHoldOff, 1)
meas(wavelength1530, 0*s)
meas(power1530,1*s)
## Load the MOT ##
event(TA2, tTAOn, voltageTA2) # TA on
event(TA3, tTAOn, voltageTA3) # TA on
event(TA2, tTAOff, 0) # TA off
event(TA3, tTAOff, 0) # TA off
# ## blast the mot ##
# event(aomSwitch0, tTAOff, (aomFreq0, aomAmplitude0, 0)) #turn on absorbtion light
# event(aomSwitch0, tTAOff + 2500*us, (aomFreq0, 0, 0)) #turn off absorbtion light
# event(motBlowAway, tTAOff - 400*us, 1) #switch to on resonance light
# event(motBlowAway, tTAOff, 0) #switch back to detuned cooling light
##Turn off 1530 in preparation for imaging##
# event(shutter, tAomOn - holdoff1530*us- dtShutterOpenHoldOff, 0)
## Take an absorbtion image ##
event(aomSwitch0, tAomOn, (aomFreq0, aomAmplitude0, 0)) #turn on absorbtion light
event(aomSwitch0, tAomOn + dtAbsorbtionLight, (aomFreq0, 0, 0)) #turn off absorbtion light
meas(takeImage, tAbsorptionCamera, (expTime, description2, filename)) #take absorption image
## Take an abosorbtion calibration image after the MOT has decayed away ##
event(aomSwitch0, tAomCalibration, (aomFreq0, aomAmplitude0, 0)) #turn on absorbtion light
event(aomSwitch0, tAomCalibration + dtAbsorbtionLight, (aomFreq0, 0, 0)) #turn off absorbtion light
meas(takeImage, tCalibrationCamera, (expTime,description3,filename)) #take absorption image
## Take a dark background image ##
meas(takeImage, tDarkBackground, (expTime,description4,filename)) #take absorption image
event(TA2, tTAEndOfSequence + 1*s, voltageTA2)
event(TA3, tTAEndOfSequence + 1*s, voltageTA3)
event(current1530, tTAEndOfSequence + 1*s, voltage1530)
# event(aomSwitch0, tTAEndOfSequence, (aomFreq0, aomAmplitude0, 0)) #turn on absorbtion light
# event(current1530, t1530EndOfSequence, voltage1530)
# event(quadCoil, tQuadCoilEndOfSequence, quadCoilVoltage)
# event(ch(repumpVCO, 1), tTAEndOfSequence, "-6 dBm")
# event(ch(repumpVCO, 0), tTAEndOfSequence + 10*ms, 2562)
# event(shutter, tTAEndOfSequence, 1) #1530 shutter open
return Start
# Global definitions
t0 = 10*us
time = t0
time = MOT(time)
|
[
"EP@8bcac300-4d60-4aec-84ba-b7489f70e69b"
] |
EP@8bcac300-4d60-4aec-84ba-b7489f70e69b
|
9134694ed000ce43fa25de8d4576de5cd0c979bc
|
c30cdd2520c1966c3571e36bd266e447148cfb59
|
/venv/Scripts/pip3.6-script.py
|
3a66f7ef02ee71c72753fa0ccab16c8158e0daea
|
[] |
no_license
|
akuma368/Doubt
|
c160eb9cb98626699b547bc15b51f28267280e04
|
77fe1630094deb0dd4b76e6adf123fe5f34a2fef
|
refs/heads/master
| 2023-04-10T01:03:41.397563
| 2021-04-21T14:22:35
| 2021-04-21T14:22:35
| 359,562,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
#!"C:\Users\Asmita Singh\PycharmProjects\Practice project\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3.6'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3.6')()
)
|
[
"asmitakumari777@gmail.com"
] |
asmitakumari777@gmail.com
|
cfc9fccde69b80cc85130c0b21d587f8334a7d7f
|
7c3dbee0cbac0413d8b0428a601f5dbece36e816
|
/tests/test_FileOpeners.py
|
b3238f3e0a7ec22e4dd8df0f14291f74495bf3d7
|
[
"MIT"
] |
permissive
|
mglezsosa/M2AlignDataExtractor
|
d1035ea6c593b990ffe3e13bc2554b6e8d06439b
|
0643a91227df8d4be03d4359ef06181b780a2802
|
refs/heads/master
| 2020-03-21T10:49:10.151013
| 2018-06-05T09:25:21
| 2018-06-05T09:25:21
| 138,472,687
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,840
|
py
|
import unittest
from m2aligndataextractor.fileopeners import VarFile, FunFile
class FileOpenersTest(unittest.TestCase):
def test_should_varfile_return_an_iterator_that_yields_aligned_sequences(self):
with VarFile('./VAR.BB11001.tsv') as f:
alignments = []
for alignment in f:
alignments.append(alignment)
self.assertEqual([
""">1aab_
---GKGDPKKPRGKMSSYAFFVQTSREEHKKKHPDASVNFSEFSKKCSERWKTMSAKEKGKFEDMAKADKARYEREMKTY----------IPPKGE
>1j46_A
---MQDRVKRP---MNAFIVWSRDQRRKMALENP--RMRNSEISKQLGYQWKMLTEAEKWPFFQEAQKLQAMHREKYPNYKYRP---RRKAKMLPK
>1k99_A
MKKLKKHPDFPKKPLTPYFRFFMEKRAKYAKLHP--EMSNLDLTKILSKKYKELPEKKKMKYIQDFQREKQEFERNLARFREDH---PDLIQNAKK
>2lef_A
-----MHIKKP---LNAFMLYMKEMRANVVAEST--LKESAAINQILGRRWHALSREEQAKYYELARKERQLHMQLYPGWSARDNYGKKKKRKREK
""",
""">1aab_
------GKGDPKKPRGKMSSYAFFVQTSREEHKKKHPDASVNFSEFSKKCSERWKTMSAKEKGKFEDMAKADKARYEREMKTY----------IPPKGE
>1j46_A
------MQDRVKRP---MNAFIVWSRDQRRKMALENPR--MRNSEISKQLGYQWKMLTEAEKWPFFQEAQKLQAMHREKYPNYKYRP---RRKAKMLPK
>1k99_A
MKKLKKHPDFPKKP---LTPYFRFFMEKRAKYAKLHPE--MSNLDLTKILSKKYKELPEKKKMKYIQDFQREKQEFERNLARFREDH---PDLIQNAKK
>2lef_A
--------MHIKKP---LNAFMLYMKEMRANVVAESTLKESAAINQILGRRWHALSREEQAKYYELARKERQLHMQLYPGWSARDNYGKK--KKRKREK
""",
""">1aab_
GK---GDPKKPRGKMSSYAFFVQTSREEHKKKHPDASVNFSEFSKKCSERWKTMSAKEKGKFEDMAKADKARYEREMKTY--------IPPKGE
>1j46_A
MQ------DRVKRPMNAFIVWSRDQRRKMALENPR--MRNSEISKQLGYQWKMLTEAEKWPFFQEAQKLQAMHREKYPNYKYRP-RRKAKMLPK
>1k99_A
MKKLKKHPDFPKKPLTPYFRFFMEKRAKYAKLHPE--MSNLDLTKILSKKYKELPEKKKMKYIQDFQREKQEFERNLARFREDH-PDLIQNAKK
>2lef_A
MHIKKPLNAFMLYMKEMRANVVAESTL--KESAAINQILGRRWHALSREEQAKYYELARKERQLHMQLYPGWSARDNYGKKKKRK------REK
""",
""">1aab_
---GKGDPKKPRGKMSSYAFFVQTSREEHKKKHPDASVNFSEFSKKCSERWKTMSAKEKGKFEDMAKADKARYEREMKTY-IPP---------KGE
>1j46_A
------MQDRVKRPMNAFIVWSRDQRRKMALENPRMR-NS-EISKQLGYQWKMLTEAEKWPFFQEAQKLQAMHREKYPNYKYRP---RRKAKMLPK
>1k99_A
MKKLKKHPDFPKKPLTPYFRFFMEKRAKYAKLHPEMS-NL-DLTKILSKKYKELPEKKKMKYIQDFQREKQEFERNLARFREDH---PDLIQNAKK
>2lef_A
--------MHIKKPLNAFMLYMKEMRANVVAES-TLK-ESAAINQILGRRWHALSREEQAKYYELARKERQLHMQLYPGWSARDNYGKKKKRKREK
""",
""">1aab_
------GKGDPKKPRGKMSSYAFFVQTSREEHKKKHPDASVNFSEFSKKCSERWKTMSAKEKGKFEDMAKADKARYEREMKTYI------PPKGE----
>1j46_A
------MQDRVKRP---MNAFIVWSRDQRRKMALENPRMR-NS-EISKQLGYQWKMLTEAEKWPFFQEAQKLQAMHREKYPNYKYRP---RRKAKMLPK
>1k99_A
MKKLKKHPDFPKKP---LTPYFRFFMEKRAKYAKLHPEMS-NL-DLTKILSKKYKELPEKKKMKYIQDFQREKQEFERNLARFREDH---PDLIQNAKK
>2lef_A
--------MHIKKP---LNAFMLYMKEMRANVVAES-TLK-ESAAINQILGRRWHALSREEQAKYYELARKERQLHMQLYPGWSARDNYGKKKKRKREK
""",
""">1aab_
---GKGDPKKPRGKMSSYAFFVQTSREEHKKKHPDASVNFSEFSKKCSERWKTMSAKEKGKFEDMAKADKARYEREMKTY----------IPPKGE
>1j46_A
---MQDRVKRP---MNAFIVWSRDQRRKMALENP--RMRNSEISKQLGYQWKMLTEAEKWPFFQEAQKLQAMHREKYPNYKYRP---RRKAKMLPK
>1k99_A
MKKLKKHPDFPKKPLTPYFRFFMEKRAKYAKLHP--EMSNLDLTKILSKKYKELPEKKKMKYIQDFQREKQEFERNLARFREDH---PDLIQNAKK
>2lef_A
-----MHIKKP---LNAFMLYMKEMRANVVAEST--LKESAAINQILGRRWHALSREEQAKYYELARKERQLHMQLYPGWSARDNYGKKKKRKREK
""",
""">1aab_
---GKGDPKKPRGKMSSYAFFVQTSREEHKKKHPDASVNFSEFSKKCSERWKTMSAKEKGKFEDMAKADKARYEREMKTY----------IPPKGE
>1j46_A
---MQDRVKRP---MNAFIVWSRDQRRKMALENP--RMRNSEISKQLGYQWKMLTEAEKWPFFQEAQKLQAMHREKYPNYKYRP---RRKAKMLPK
>1k99_A
MKKLKKHPDFPKKPLTPYFRFFMEKRAKYAKLHP--EMSNLDLTKILSKKYKELPEKKKMKYIQDFQREKQEFERNLARFREDH---PDLIQNAKK
>2lef_A
-----MHIKKP---LNAFMLYMKEMRANVVAEST--LKESAAINQILGRRWHALSREEQAKYYELARKERQLHMQLYPGWSARDNYGKKKKRKREK
""",
""">1aab_
------GKGDPKKPRGKMSSYAFFVQTSREEHKKKHPDASVNFSEFSKKCSERWKTMSAKEKGKFEDMAKADKARYEREMKTY---------IP-----PKG-E
>1j46_A
------MQDRVKRP---MNAFIVWSRDQRRKMALENPRMR-NS-EISKQLGYQWKMLTEAEKWPFFQEAQKLQAMHREKYPNYKYRP--------RRKAKMLPK
>1k99_A
MKKLKKHPDFPKKP---LTPYFRFFMEKRAKYAKLHPEMS-NL-DLTKILSKKYKELPEKKKMKYIQDFQREKQEFERNLARFREDH--PDLI------QNAKK
>2lef_A
--------MHIKKP---LNAFMLYMKEMRANVVAES-TLK-ESAAINQILGRRWHALSREEQAKYYELARKERQLHMQLYPGWSARDNY-----GKKKKRKREK
"""
], alignments)
def test_should_funfile_return_an_iterator_that_yields_values_of_pareto_front(self):
with FunFile('./FUN.BB11001.tsv') as f:
values = []
for value in f:
values.append(value)
self.assertEqual([
[2.5599480176450244, 2.0833333333333335, 89.84375],
[1.1887225428300559, 4.040404040404041, 87.12121212121212],
[1.3629549300037533, 0.0, 91.75531914893618],
[2.6123489833194524, 1.0416666666666667, 89.84375],
[2.5471497755269077, 3.0303030303030303, 87.12121212121212],
[2.5599480176450244, 2.0833333333333335, 89.84375],
[2.5599480176450244, 2.0833333333333335, 89.84375],
[2.5550908331047784, 2.8846153846153846, 82.9326923076923]
], values)
|
[
"sosa@uma.es"
] |
sosa@uma.es
|
6b12c5bac1518aa6cd0103533324f7cdc730e501
|
74369c12ba70489a81180da357ce79bfa7ed5b29
|
/rendu/resources/code/back/parse/parse_entries.py
|
e44ef39c0c3eb062e7d6bc0f99af45dd1147cda6
|
[] |
no_license
|
MaximePerrinLivenais/DEVI
|
40282e44ae5bc3be67c72ec75aae993cb7ec2fd5
|
296aa962b4d90af49ad7614ce03bf8f5febaf3e4
|
refs/heads/master
| 2023-03-23T03:01:36.852104
| 2021-03-19T22:02:35
| 2021-03-19T22:02:35
| 344,510,980
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,085
|
py
|
#!/usr/bin/env python3
# import re
import regex as re
# import regex as re
import json
import collections
import csv
import argparse
import concurrent.futures
import enchant
import enchant.tokenize
from pprint import pprint
from operator import attrgetter
import logging
from logging import debug, info, warning, error
logging.basicConfig(level=logging.DEBUG, format="%(levelname)s: %(message)s")
def red(s, colored=True):
if colored:
return "\x1b[31m{}\x1b[0m".format(s)
return s
def green(s, colored=True):
if colored:
return "\x1b[32m{}\x1b[0m".format(s)
return s
def white(s, colored=True):
return s
def repr_entry(entry, colored=False):
lines = []
def print_named_iterable(name):
iterable = entry[name]
if len(iterable) == 1:
lines.append("{:>20}: {}".format(name, green(list(iterable)[0], colored)))
elif isinstance(iterable, str):
lines.append("{:>20}: {}".format(name, green(iterable, colored)))
elif len(iterable) > 1:
lines.append("{:>20}:".format(name))
for item in iterable:
lines.append("{}{}".format(" " * 22, green(item, colored)))
print_named_iterable("name")
print_named_iterable("street_number")
print_named_iterable("street")
if len(entry["name"]) == 0 or len(entry["street"]) == 0:
lines.append("{:>20}: {}".format("raw", red(repr(entry["raw"]), colored)))
else:
lines.append("{:>20}: {}".format("raw", green(repr(entry["raw"]), colored)))
lines.append("")
return "\n".join(lines)
def print_entry(entry, colored=True):
# print(repr_entry(entry, colored))
# if len(entry["name"]) == 0 or len(entry["street"]) == 0:
# pprint(entry)
# print("{name}|{street_number}|{street}".format(**entry))
s = "{grade:<6} {name}, {street}, {street_number}.".format(**entry)
print(entry)
if entry["perfect_match"]:
print(green(s, colored))
else:
if entry["grade"] == -1:
print(red(s, colored))
else:
print(white(s, colored))
# print()
def preprocess_line(line):
line = re.sub("@NOTE ", "", line)
line = " ".join(line.split("|"))
line = " ".join(re.split(" +", line))
return line
def process_name(raw):
raw = re.sub("@NOTE ", "", raw)
m = re.search(r"^(?P<name>.*?)((,)|(\. ))", raw)
if m:
return m.group("name")
return raw
dictionary = enchant.Dict("fr")
def join_lines(lines, street_words):
if len(lines) < 2:
return lines
tokenizer = enchant.tokenize.basic_tokenize
i = 0
while i < len(lines) - 1:
word = list(tokenizer(lines[i]))[-1][0]
word += list(tokenizer(lines[i + 1]))[0][0]
if dictionary.check(word) or word.lower() in street_words:
lines[i + 1] = lines[i] + lines[i + 1]
del lines[i]
else:
i += 1
return lines
def is_street_number(v):
return True
def is_street(v):
if len(list(enchant.tokenize.basic_tokenize(v))) > 10:
return False
return True
class MyJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, set):
return list(o)
return json.JSONEncoder.default(self, o)
def compile_regex(args):
street_type_regex = set()
with open(args.streets_type, "r") as fh:
street_type_raw = fh.read()
for t in re.split(";", street_type_raw):
t = t.strip()
if len(t) > 0:
t = t.replace(".", "\.")
street_type_regex.add("({})".format(t))
street_type_regex.add("({})".format(t.replace("\.", "")))
street_type_regex = "(" + "|".join(street_type_regex) + ")"
street_name_regex = set()
with open(args.streets_name, "r") as fh:
street_name_raw = fh.read()
for n in street_name_raw.split("\n"):
n = " ".join(n.strip().lower().split(" ")[1:])
n = n.replace(".", "\.")
street_name_regex.add("({})".format(n))
street_name_regex = "(" + "|".join(street_name_regex) + ")"
pre_street_res = []
pre_street_res.append([0, r", ?"])
pre_street_res.append([1, r" ?"])
pre_street_res.append([2, r"^.*?, "])
pre_street_res.append([3, r"^.*?\. "])
pre_street_res.append([4, r"^.*?,"])
pre_street_res.append([5, r"^.*?\."])
pre_street_res.append([6, r" "])
street_name_res = []
street_name_res.append(
[0, r"(?P<street>" + street_type_regex + " " + street_name_regex + ")"]
)
street_name_res.append(
[10, r"(?P<street>" + street_type_regex + " " + "[^,;()]+?" + ")"]
)
street_name_res.append([20, r"(?P<street>" + "[^,;()]+?" + ")"])
post_street_res = []
post_street_res.append([0, r", "])
post_street_res.append([1, r","])
post_street_res.append([1, r"[;.] "])
post_street_res.append([2, r"[;.]"])
post_street_res.append([4, r" "])
street_number_res = []
street_number_res.append(
[
0,
r"(?P<street_number>\d+(?: (?:bis)|(?:ter))?(?: et \d+(?: (?:bis)|(?:ter))?)?)(?: \([^\)]*\))?",
]
)
post_street_number_res = []
post_street_number_res.append([0, r"[.,;]"])
post_street_number_res.append([100, r""])
regex = []
for pre_street_re in pre_street_res:
for street_name_re in street_name_res:
for post_street_re in post_street_res:
for street_number_re in street_number_res:
for post_street_number_re in post_street_number_res:
grade = (
pre_street_re[0]
+ street_name_re[0]
+ post_street_re[0]
+ street_number_re[0]
+ post_street_number_re[0]
)
composed_re = (
pre_street_re[1]
+ street_name_re[1]
+ post_street_re[1]
+ street_number_re[1]
+ post_street_number_re[1]
)
regex.append([grade, composed_re])
regex_compiled = []
with concurrent.futures.ProcessPoolExecutor(max_workers=12) as executor:
future_to_re = {executor.submit(re.compile, x[1], re.I): x for x in regex}
for future in concurrent.futures.as_completed(future_to_re):
x = future_to_re[future]
regex_compiled.append([x[0], future.result()])
regex = regex_compiled
regex.append([-1, re.compile(r"^(?P<name>[^\(]*?(\([^\)]*\))?.*?)((,)|(\. ))")])
regex = sorted(regex, key=lambda x: x[0])
print("{} regex initialized".format(len(regex)))
return regex
def load_street_words(args):
street_words = set()
tokenizer = enchant.tokenize.basic_tokenize
with open(args.streets_name, "r") as fh:
street_name_raw = fh.read()
for word, size in tokenizer(street_name_raw):
street_words.add(word.lower())
return street_words
def process_entry(raw_entry, regex, street_words):
entry = {
"lines": set(),
"name": "",
"street": "",
"street_number": "",
"grade": -1,
"perfect_match": False,
}
entry["raw"] = raw_entry
split_entry = entry["raw"].split("-|")
join_lines(split_entry, street_words)
lines_count = 2 ** (len(split_entry) - 1)
# when there is a leading '-' on a line we duplicate the line
# the result is 2**n lines with n the number of leading '-'
for i in range(lines_count):
line = ""
for j, part in enumerate(split_entry):
if i & 2 ** j > 0:
line += part + "-"
else:
line += part
entry["lines"].add(preprocess_line(line))
# search for street and street number
for line in entry["lines"]:
# print(line)
removed_name = False
for i, r in regex:
if not removed_name and len(entry["name"]) > 0:
line = line.replace(next(iter(entry["name"])), "")
removed_name = True
m = r.search(line)
if m:
for k, v in m.groupdict().items():
if k == "street_number" and not is_street_number(v):
continue
if k == "street" and not is_street(v):
continue
if len(entry[k]) == 0:
# print(r)
entry[k] = "{}".format(v)
entry["grade"] = max(entry["grade"], i)
if len(entry["street"]) > 0 and len(entry["street_number"]) > 0:
if entry["grade"] == 0:
entry["perfect_match"] = True
break
return entry
def process_file(json_file, args):
info("Parsing {}".format(json_file))
regex = compile_regex(args)
street_words = load_street_words(args)
with open(json_file, "r") as fh:
input_json = json.load(fh)
process_node(input_json, regex, street_words)
return input_json
def process_node(node, regex, street_words):
if node["type"] != "ENTRY":
if "children" in node:
for child in node["children"]:
process_node(child, regex, street_words)
if "children" not in node:
return
# process an entry node
raw_entry = [c["text"].strip() for c in node["children"] if c["type"] == "LINE"]
raw_entry = "|".join(raw_entry)
node["parsed"] = process_entry(raw_entry, regex, street_words)
# pprint(node["parsed"])
print_entry(node["parsed"])
# exit()
def entry_to_dict(entry):
m = {}
for k, v in entry.items():
if isinstance(v, str):
m[k] = v
elif isinstance(v, collections.Iterable):
m[k] = next(iter(v))
else:
m[k] = v
return m
def main():
parser = argparse.ArgumentParser()
parser.add_argument("json", nargs="+", help="Parse soduco json files")
parser.add_argument("-q", "--quiet", action="store_false")
parser.add_argument("-j", "--export-json", metavar="file")
parser.add_argument("-t", "--export-text", metavar="file")
parser.add_argument("-c", "--export-csv", metavar="file")
parser.add_argument(
"--dico-nom-voie", dest="streets_name", default="./paris_road_name.csv"
)
parser.add_argument(
"--dico-type-voie", dest="streets_type", default="./dico_type_voie"
)
args = parser.parse_args()
# regex.append(re.compile(r"^(.*?), (?P<street>[^,]+)[;., ]"))
entries = []
for json_file in args.json:
entries.append(process_file(json_file, args))
# with concurrent.futures.ProcessPoolExecutor() as executor:
# futures = [executor.submit(process_file, txt_file, args)
# for txt_file in args.txts]
# for future in concurrent.futures.as_completed(futures):
# entries += future.result()
if args.export_text:
with open(args.export_text, "w") as fh:
fh.write(
"\n".join((print_entry(entry, colored=False) for entry in entries))
)
if args.export_json:
with open(args.export_json, "w") as fh:
fh.write(MyJSONEncoder(sort_keys=True, indent=4).encode(entries))
if args.export_csv:
with open(args.export_csv, "w") as fh:
fieldnames = [
"entry_number",
"grade",
"perfect_match",
"lines",
"name",
"street",
"street_number",
"raw",
]
writer = csv.DictWriter(fh, fieldnames=fieldnames)
writer.writeheader()
for serie in entries:
for entry in serie:
writer.writerow(entry_to_dict(entry))
if __name__ == "__main__":
main()
|
[
"youssef.ouhmmou@epita.fr"
] |
youssef.ouhmmou@epita.fr
|
5484b8fc3a0af4846d3f4ff73a6502d49b518d4c
|
6b8c52048648c82543ce899d5fb2f8b0dcabb6e5
|
/stack/minstackV1.py
|
7d5c20b19b9f085b10c6e6301bf30f6e002eca78
|
[] |
no_license
|
arnabs542/DS-AlgoPrac
|
c9f7f0d383bcb3b793b09b219135f1bc9d607081
|
fcc2d6d014e9ffdce3ff4b64d12ce054222e434d
|
refs/heads/master
| 2022-12-13T05:56:33.098629
| 2020-09-15T13:48:54
| 2020-09-15T13:48:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,132
|
py
|
"""Implementing min stack by using constant extra space.. though it works for only positive integers."""
class MinStack:
# @param x, an integer
# @return an integer
def __init__(self):
self.S = []
self.min_ = float("inf")
def push(self, x):
if self.S:
if self.min_ > x:
self.S.append(x-self.min_)
self.min_ = x
else:
self.S.append(x)
else:
self.S.append(x)
self.min_ = x
# @return nothing
def pop(self):
if self.S:
temp = self.S.pop()
if temp < 0:
self.min_ = self.min_-temp
if not self.S:
self.min_ = float("inf")
# @return an integer
def top(self):
if self.S:
temp = self.S[-1]
if temp < 0:
return self.min_
else:
return temp
else:
return -1
# @return an integer
def getMin(self):
if self.S:
return self.min_
else:
return -1
|
[
"vvrmahendra@gmail.com"
] |
vvrmahendra@gmail.com
|
3284b7eb7a9381e9b1c28eb67e7d35fecfb7f382
|
87f114e81c333b3494c891ddb567247440de07f3
|
/fan/transformer.py
|
3734422294cc1fd19d3e4a4783fa71680cc09cd1
|
[
"MIT"
] |
permissive
|
stes/fan-pytorch
|
2502d103260852c8e06f1a8108ed3acbb06ae088
|
e80aa3754f9246c56ed6835511427f290a96135d
|
refs/heads/master
| 2021-03-22T03:31:49.169024
| 2018-06-20T23:18:47
| 2018-06-20T23:18:47
| 108,663,478
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,719
|
py
|
""" Pretrained VGG models for feature encoding and decoding
Adapted from https://github.com/sunshineatnoon/PytorchWCT
Reference:
Li Y, Fang C, Yang J, et al. Universal Style Transfer via Feature Transforms[J]. arXiv preprint arXiv:1705.08086, 2017.
"""
import torch.nn as nn
import torch
from torch.utils.serialization import load_lua
_fname_params = { "vgg1": "params/vgg_normalised_conv1_1.t7",
"vgg2": "params/vgg_normalised_conv2_1.t7",
"vgg3": "params/vgg_normalised_conv3_1.t7",
"vgg4": "params/vgg_normalised_conv4_1.t7",
"vgg5": "params/vgg_normalised_conv5_1.t7",
"decoder5": "params/feature_invertor_conv5_1.t7",
"decoder4": "params/feature_invertor_conv4_1.t7",
"decoder3": "params/feature_invertor_conv3_1.t7",
"decoder2": "params/feature_invertor_conv2_1.t7",
"decoder1": "params/feature_invertor_conv1_1.t7"
}
class encoder1(nn.Module):
def __init__(self,vgg1):
super(encoder1,self).__init__()
# dissemble vgg2 and decoder2 layer by layer
# then resemble a new encoder-decoder network
# 224 x 224
self.conv1 = nn.Conv2d(3,3,1,1,0)
self.conv1.weight = torch.nn.Parameter(vgg1.get(0).weight.float())
self.conv1.bias = torch.nn.Parameter(vgg1.get(0).bias.float())
# 224 x 224
self.reflecPad1 = nn.ReflectionPad2d((1,1,1,1))
# 226 x 226
self.conv2 = nn.Conv2d(3,64,3,1,0)
self.conv2.weight = torch.nn.Parameter(vgg1.get(2).weight.float())
self.conv2.bias = torch.nn.Parameter(vgg1.get(2).bias.float())
self.relu = nn.ReLU(inplace=True)
# 224 x 224
def forward(self,x):
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
out = self.relu(out)
return out
class decoder1(nn.Module):
def __init__(self,d1):
super(decoder1,self).__init__()
self.reflecPad2 = nn.ReflectionPad2d((1,1,1,1))
# 226 x 226
self.conv3 = nn.Conv2d(64,3,3,1,0)
self.conv3.weight = torch.nn.Parameter(d1.get(1).weight.float())
self.conv3.bias = torch.nn.Parameter(d1.get(1).bias.float())
# 224 x 224
def forward(self,x):
out = self.reflecPad2(x)
out = self.conv3(out)
return out
class encoder2(nn.Module):
def __init__(self,vgg):
super(encoder2,self).__init__()
# vgg
# 224 x 224
self.conv1 = nn.Conv2d(3,3,1,1,0)
self.conv1.weight = torch.nn.Parameter(vgg.get(0).weight.float())
self.conv1.bias = torch.nn.Parameter(vgg.get(0).bias.float())
self.reflecPad1 = nn.ReflectionPad2d((1,1,1,1))
# 226 x 226
self.conv2 = nn.Conv2d(3,64,3,1,0)
self.conv2.weight = torch.nn.Parameter(vgg.get(2).weight.float())
self.conv2.bias = torch.nn.Parameter(vgg.get(2).bias.float())
self.relu2 = nn.ReLU(inplace=True)
# 224 x 224
self.reflecPad3 = nn.ReflectionPad2d((1,1,1,1))
self.conv3 = nn.Conv2d(64,64,3,1,0)
self.conv3.weight = torch.nn.Parameter(vgg.get(5).weight.float())
self.conv3.bias = torch.nn.Parameter(vgg.get(5).bias.float())
self.relu3 = nn.ReLU(inplace=True)
# 224 x 224
self.maxPool = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = True)
# 112 x 112
self.reflecPad4 = nn.ReflectionPad2d((1,1,1,1))
self.conv4 = nn.Conv2d(64,128,3,1,0)
self.conv4.weight = torch.nn.Parameter(vgg.get(9).weight.float())
self.conv4.bias = torch.nn.Parameter(vgg.get(9).bias.float())
self.relu4 = nn.ReLU(inplace=True)
# 112 x 112
def forward(self,x):
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.reflecPad3(out)
out = self.conv3(out)
pool = self.relu3(out)
out,pool_idx = self.maxPool(pool)
out = self.reflecPad4(out)
out = self.conv4(out)
out = self.relu4(out)
return out
class decoder2(nn.Module):
def __init__(self,d):
super(decoder2,self).__init__()
# decoder
self.reflecPad5 = nn.ReflectionPad2d((1,1,1,1))
self.conv5 = nn.Conv2d(128,64,3,1,0)
self.conv5.weight = torch.nn.Parameter(d.get(1).weight.float())
self.conv5.bias = torch.nn.Parameter(d.get(1).bias.float())
self.relu5 = nn.ReLU(inplace=True)
# 112 x 112
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
# 224 x 224
self.reflecPad6 = nn.ReflectionPad2d((1,1,1,1))
self.conv6 = nn.Conv2d(64,64,3,1,0)
self.conv6.weight = torch.nn.Parameter(d.get(5).weight.float())
self.conv6.bias = torch.nn.Parameter(d.get(5).bias.float())
self.relu6 = nn.ReLU(inplace=True)
# 224 x 224
self.reflecPad7 = nn.ReflectionPad2d((1,1,1,1))
self.conv7 = nn.Conv2d(64,3,3,1,0)
self.conv7.weight = torch.nn.Parameter(d.get(8).weight.float())
self.conv7.bias = torch.nn.Parameter(d.get(8).bias.float())
def forward(self,x):
out = self.reflecPad5(x)
out = self.conv5(out)
out = self.relu5(out)
out = self.unpool(out)
out = self.reflecPad6(out)
out = self.conv6(out)
out = self.relu6(out)
out = self.reflecPad7(out)
out = self.conv7(out)
return out
class encoder3(nn.Module):
def __init__(self,vgg):
super(encoder3,self).__init__()
# vgg
# 224 x 224
self.conv1 = nn.Conv2d(3,3,1,1,0)
self.conv1.weight = torch.nn.Parameter(vgg.get(0).weight.float())
self.conv1.bias = torch.nn.Parameter(vgg.get(0).bias.float())
self.reflecPad1 = nn.ReflectionPad2d((1,1,1,1))
# 226 x 226
self.conv2 = nn.Conv2d(3,64,3,1,0)
self.conv2.weight = torch.nn.Parameter(vgg.get(2).weight.float())
self.conv2.bias = torch.nn.Parameter(vgg.get(2).bias.float())
self.relu2 = nn.ReLU(inplace=True)
# 224 x 224
self.reflecPad3 = nn.ReflectionPad2d((1,1,1,1))
self.conv3 = nn.Conv2d(64,64,3,1,0)
self.conv3.weight = torch.nn.Parameter(vgg.get(5).weight.float())
self.conv3.bias = torch.nn.Parameter(vgg.get(5).bias.float())
self.relu3 = nn.ReLU(inplace=True)
# 224 x 224
self.maxPool = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = True)
# 112 x 112
self.reflecPad4 = nn.ReflectionPad2d((1,1,1,1))
self.conv4 = nn.Conv2d(64,128,3,1,0)
self.conv4.weight = torch.nn.Parameter(vgg.get(9).weight.float())
self.conv4.bias = torch.nn.Parameter(vgg.get(9).bias.float())
self.relu4 = nn.ReLU(inplace=True)
# 112 x 112
self.reflecPad5 = nn.ReflectionPad2d((1,1,1,1))
self.conv5 = nn.Conv2d(128,128,3,1,0)
self.conv5.weight = torch.nn.Parameter(vgg.get(12).weight.float())
self.conv5.bias = torch.nn.Parameter(vgg.get(12).bias.float())
self.relu5 = nn.ReLU(inplace=True)
# 112 x 112
self.maxPool2 = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = True)
# 56 x 56
self.reflecPad6 = nn.ReflectionPad2d((1,1,1,1))
self.conv6 = nn.Conv2d(128,256,3,1,0)
self.conv6.weight = torch.nn.Parameter(vgg.get(16).weight.float())
self.conv6.bias = torch.nn.Parameter(vgg.get(16).bias.float())
self.relu6 = nn.ReLU(inplace=True)
# 56 x 56
def forward(self,x):
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.reflecPad3(out)
out = self.conv3(out)
pool1 = self.relu3(out)
out,pool_idx = self.maxPool(pool1)
out = self.reflecPad4(out)
out = self.conv4(out)
out = self.relu4(out)
out = self.reflecPad5(out)
out = self.conv5(out)
pool2 = self.relu5(out)
out,pool_idx2 = self.maxPool2(pool2)
out = self.reflecPad6(out)
out = self.conv6(out)
out = self.relu6(out)
return out
class decoder3(nn.Module):
def __init__(self,d):
super(decoder3,self).__init__()
# decoder
self.reflecPad7 = nn.ReflectionPad2d((1,1,1,1))
self.conv7 = nn.Conv2d(256,128,3,1,0)
self.conv7.weight = torch.nn.Parameter(d.get(1).weight.float())
self.conv7.bias = torch.nn.Parameter(d.get(1).bias.float())
self.relu7 = nn.ReLU(inplace=True)
# 56 x 56
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
# 112 x 112
self.reflecPad8 = nn.ReflectionPad2d((1,1,1,1))
self.conv8 = nn.Conv2d(128,128,3,1,0)
self.conv8.weight = torch.nn.Parameter(d.get(5).weight.float())
self.conv8.bias = torch.nn.Parameter(d.get(5).bias.float())
self.relu8 = nn.ReLU(inplace=True)
# 112 x 112
self.reflecPad9 = nn.ReflectionPad2d((1,1,1,1))
self.conv9 = nn.Conv2d(128,64,3,1,0)
self.conv9.weight = torch.nn.Parameter(d.get(8).weight.float())
self.conv9.bias = torch.nn.Parameter(d.get(8).bias.float())
self.relu9 = nn.ReLU(inplace=True)
self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
# 224 x 224
self.reflecPad10 = nn.ReflectionPad2d((1,1,1,1))
self.conv10 = nn.Conv2d(64,64,3,1,0)
self.conv10.weight = torch.nn.Parameter(d.get(12).weight.float())
self.conv10.bias = torch.nn.Parameter(d.get(12).bias.float())
self.relu10 = nn.ReLU(inplace=True)
self.reflecPad11 = nn.ReflectionPad2d((1,1,1,1))
self.conv11 = nn.Conv2d(64,3,3,1,0)
self.conv11.weight = torch.nn.Parameter(d.get(15).weight.float())
self.conv11.bias = torch.nn.Parameter(d.get(15).bias.float())
def forward(self,x):
out = self.reflecPad7(x)
out = self.conv7(out)
out = self.relu7(out)
out = self.unpool(out)
out = self.reflecPad8(out)
out = self.conv8(out)
out = self.relu8(out)
out = self.reflecPad9(out)
out = self.conv9(out)
out = self.relu9(out)
out = self.unpool2(out)
out = self.reflecPad10(out)
out = self.conv10(out)
out = self.relu10(out)
out = self.reflecPad11(out)
out = self.conv11(out)
return out
class encoder4(nn.Module):
def __init__(self,vgg):
super(encoder4,self).__init__()
# vgg
# 224 x 224
# Transformation to BGR and normalization happening here.
# The net assumes an input of [0, 1] !!!
self.conv1 = nn.Conv2d(3,3,1,1,0)
self.conv1.weight = torch.nn.Parameter(vgg.get(0).weight.float())
self.conv1.bias = torch.nn.Parameter(vgg.get(0).bias.float())
self.reflecPad1 = nn.ReflectionPad2d((1,1,1,1))
# 226 x 226
self.conv2 = nn.Conv2d(3,64,3,1,0)
self.conv2.weight = torch.nn.Parameter(vgg.get(2).weight.float())
self.conv2.bias = torch.nn.Parameter(vgg.get(2).bias.float())
self.relu2 = nn.ReLU(inplace=True)
# 224 x 224
self.reflecPad3 = nn.ReflectionPad2d((1,1,1,1))
self.conv3 = nn.Conv2d(64,64,3,1,0)
self.conv3.weight = torch.nn.Parameter(vgg.get(5).weight.float())
self.conv3.bias = torch.nn.Parameter(vgg.get(5).bias.float())
self.relu3 = nn.ReLU(inplace=True)
# 224 x 224
self.maxPool = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = True)
# 112 x 112
self.reflecPad4 = nn.ReflectionPad2d((1,1,1,1))
self.conv4 = nn.Conv2d(64,128,3,1,0)
self.conv4.weight = torch.nn.Parameter(vgg.get(9).weight.float())
self.conv4.bias = torch.nn.Parameter(vgg.get(9).bias.float())
self.relu4 = nn.ReLU(inplace=True)
# 112 x 112
self.reflecPad5 = nn.ReflectionPad2d((1,1,1,1))
self.conv5 = nn.Conv2d(128,128,3,1,0)
self.conv5.weight = torch.nn.Parameter(vgg.get(12).weight.float())
self.conv5.bias = torch.nn.Parameter(vgg.get(12).bias.float())
self.relu5 = nn.ReLU(inplace=True)
# 112 x 112
self.maxPool2 = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = True)
# 56 x 56
self.reflecPad6 = nn.ReflectionPad2d((1,1,1,1))
self.conv6 = nn.Conv2d(128,256,3,1,0)
self.conv6.weight = torch.nn.Parameter(vgg.get(16).weight.float())
self.conv6.bias = torch.nn.Parameter(vgg.get(16).bias.float())
self.relu6 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad7 = nn.ReflectionPad2d((1,1,1,1))
self.conv7 = nn.Conv2d(256,256,3,1,0)
self.conv7.weight = torch.nn.Parameter(vgg.get(19).weight.float())
self.conv7.bias = torch.nn.Parameter(vgg.get(19).bias.float())
self.relu7 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad8 = nn.ReflectionPad2d((1,1,1,1))
self.conv8 = nn.Conv2d(256,256,3,1,0)
self.conv8.weight = torch.nn.Parameter(vgg.get(22).weight.float())
self.conv8.bias = torch.nn.Parameter(vgg.get(22).bias.float())
self.relu8 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad9 = nn.ReflectionPad2d((1,1,1,1))
self.conv9 = nn.Conv2d(256,256,3,1,0)
self.conv9.weight = torch.nn.Parameter(vgg.get(25).weight.float())
self.conv9.bias = torch.nn.Parameter(vgg.get(25).bias.float())
self.relu9 = nn.ReLU(inplace=True)
# 56 x 56
self.maxPool3 = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = True)
# 28 x 28
self.reflecPad10 = nn.ReflectionPad2d((1,1,1,1))
self.conv10 = nn.Conv2d(256,512,3,1,0)
self.conv10.weight = torch.nn.Parameter(vgg.get(29).weight.float())
self.conv10.bias = torch.nn.Parameter(vgg.get(29).bias.float())
self.relu10 = nn.ReLU(inplace=True)
# 28 x 28
def forward(self,x):
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.reflecPad3(out)
out = self.conv3(out)
pool1 = self.relu3(out)
out,pool_idx = self.maxPool(pool1)
out = self.reflecPad4(out)
out = self.conv4(out)
z1 = out = self.relu4(out)
out = self.reflecPad5(out)
out = self.conv5(out)
pool2 = self.relu5(out)
out,pool_idx2 = self.maxPool2(pool2)
out = self.reflecPad6(out)
out = self.conv6(out)
out = self.relu6(out)
out = self.reflecPad7(out)
out = self.conv7(out)
out = self.relu7(out)
out = self.reflecPad8(out)
out = self.conv8(out)
z2 = out = self.relu8(out)
out = self.reflecPad9(out)
out = self.conv9(out)
pool3 = self.relu9(out)
out,pool_idx3 = self.maxPool3(pool3)
out = self.reflecPad10(out)
out = self.conv10(out)
out = self.relu10(out)
return z1, z2, out
class decoder4(nn.Module):
def __init__(self,d):
super(decoder4,self).__init__()
# decoder
self.reflecPad11 = nn.ReflectionPad2d((1,1,1,1))
self.conv11 = nn.Conv2d(512,256,3,1,0)
self.conv11.weight = torch.nn.Parameter(d.get(1).weight.float())
self.conv11.bias = torch.nn.Parameter(d.get(1).bias.float())
self.relu11 = nn.ReLU(inplace=True)
# 28 x 28
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
# 56 x 56
self.reflecPad12 = nn.ReflectionPad2d((1,1,1,1))
self.conv12 = nn.Conv2d(256,256,3,1,0)
self.conv12.weight = torch.nn.Parameter(d.get(5).weight.float())
self.conv12.bias = torch.nn.Parameter(d.get(5).bias.float())
self.relu12 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad13 = nn.ReflectionPad2d((1,1,1,1))
self.conv13 = nn.Conv2d(256,256,3,1,0)
self.conv13.weight = torch.nn.Parameter(d.get(8).weight.float())
self.conv13.bias = torch.nn.Parameter(d.get(8).bias.float())
self.relu13 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad14 = nn.ReflectionPad2d((1,1,1,1))
self.conv14 = nn.Conv2d(256,256,3,1,0)
self.conv14.weight = torch.nn.Parameter(d.get(11).weight.float())
self.conv14.bias = torch.nn.Parameter(d.get(11).bias.float())
self.relu14 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad15 = nn.ReflectionPad2d((1,1,1,1))
self.conv15 = nn.Conv2d(256,128,3,1,0)
self.conv15.weight = torch.nn.Parameter(d.get(14).weight.float())
self.conv15.bias = torch.nn.Parameter(d.get(14).bias.float())
self.relu15 = nn.ReLU(inplace=True)
# 56 x 56
self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
# 112 x 112
self.reflecPad16 = nn.ReflectionPad2d((1,1,1,1))
self.conv16 = nn.Conv2d(128,128,3,1,0)
self.conv16.weight = torch.nn.Parameter(d.get(18).weight.float())
self.conv16.bias = torch.nn.Parameter(d.get(18).bias.float())
self.relu16 = nn.ReLU(inplace=True)
# 112 x 112
self.reflecPad17 = nn.ReflectionPad2d((1,1,1,1))
self.conv17 = nn.Conv2d(128,64,3,1,0)
self.conv17.weight = torch.nn.Parameter(d.get(21).weight.float())
self.conv17.bias = torch.nn.Parameter(d.get(21).bias.float())
self.relu17 = nn.ReLU(inplace=True)
# 112 x 112
self.unpool3 = nn.UpsamplingNearest2d(scale_factor=2)
# 224 x 224
self.reflecPad18 = nn.ReflectionPad2d((1,1,1,1))
self.conv18 = nn.Conv2d(64,64,3,1,0)
self.conv18.weight = torch.nn.Parameter(d.get(25).weight.float())
self.conv18.bias = torch.nn.Parameter(d.get(25).bias.float())
self.relu18 = nn.ReLU(inplace=True)
# 224 x 224
self.reflecPad19 = nn.ReflectionPad2d((1,1,1,1))
self.conv19 = nn.Conv2d(64,3,3,1,0)
self.conv19.weight = torch.nn.Parameter(d.get(28).weight.float())
self.conv19.bias = torch.nn.Parameter(d.get(28).bias.float())
def forward(self,x):
# decoder
out = self.reflecPad11(x)
out = self.conv11(out)
out = self.relu11(out)
out = self.unpool(out)
out = self.reflecPad12(out)
out = self.conv12(out)
out = self.relu12(out)
out = self.reflecPad13(out)
out = self.conv13(out)
out = self.relu13(out)
out = self.reflecPad14(out)
out = self.conv14(out)
out = self.relu14(out)
out = self.reflecPad15(out)
out = self.conv15(out)
out = self.relu15(out)
out = self.unpool2(out)
out = self.reflecPad16(out)
out = self.conv16(out)
out = self.relu16(out)
out = self.reflecPad17(out)
out = self.conv17(out)
out = self.relu17(out)
out = self.unpool3(out)
out = self.reflecPad18(out)
out = self.conv18(out)
out = self.relu18(out)
out = self.reflecPad19(out)
out = self.conv19(out)
return out
class encoder5(nn.Module):
def __init__(self,vgg):
super(encoder5,self).__init__()
# vgg
# 224 x 224
self.conv1 = nn.Conv2d(3,3,1,1,0)
self.conv1.weight = torch.nn.Parameter(vgg.get(0).weight.float())
self.conv1.bias = torch.nn.Parameter(vgg.get(0).bias.float())
self.reflecPad1 = nn.ReflectionPad2d((1,1,1,1))
# 226 x 226
self.conv2 = nn.Conv2d(3,64,3,1,0)
self.conv2.weight = torch.nn.Parameter(vgg.get(2).weight.float())
self.conv2.bias = torch.nn.Parameter(vgg.get(2).bias.float())
self.relu2 = nn.ReLU(inplace=True)
# 224 x 224
self.reflecPad3 = nn.ReflectionPad2d((1,1,1,1))
self.conv3 = nn.Conv2d(64,64,3,1,0)
self.conv3.weight = torch.nn.Parameter(vgg.get(5).weight.float())
self.conv3.bias = torch.nn.Parameter(vgg.get(5).bias.float())
self.relu3 = nn.ReLU(inplace=True)
# 224 x 224
self.maxPool = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = True)
# 112 x 112
self.reflecPad4 = nn.ReflectionPad2d((1,1,1,1))
self.conv4 = nn.Conv2d(64,128,3,1,0)
self.conv4.weight = torch.nn.Parameter(vgg.get(9).weight.float())
self.conv4.bias = torch.nn.Parameter(vgg.get(9).bias.float())
self.relu4 = nn.ReLU(inplace=True)
# 112 x 112
self.reflecPad5 = nn.ReflectionPad2d((1,1,1,1))
self.conv5 = nn.Conv2d(128,128,3,1,0)
self.conv5.weight = torch.nn.Parameter(vgg.get(12).weight.float())
self.conv5.bias = torch.nn.Parameter(vgg.get(12).bias.float())
self.relu5 = nn.ReLU(inplace=True)
# 112 x 112
self.maxPool2 = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = True)
# 56 x 56
self.reflecPad6 = nn.ReflectionPad2d((1,1,1,1))
self.conv6 = nn.Conv2d(128,256,3,1,0)
self.conv6.weight = torch.nn.Parameter(vgg.get(16).weight.float())
self.conv6.bias = torch.nn.Parameter(vgg.get(16).bias.float())
self.relu6 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad7 = nn.ReflectionPad2d((1,1,1,1))
self.conv7 = nn.Conv2d(256,256,3,1,0)
self.conv7.weight = torch.nn.Parameter(vgg.get(19).weight.float())
self.conv7.bias = torch.nn.Parameter(vgg.get(19).bias.float())
self.relu7 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad8 = nn.ReflectionPad2d((1,1,1,1))
self.conv8 = nn.Conv2d(256,256,3,1,0)
self.conv8.weight = torch.nn.Parameter(vgg.get(22).weight.float())
self.conv8.bias = torch.nn.Parameter(vgg.get(22).bias.float())
self.relu8 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad9 = nn.ReflectionPad2d((1,1,1,1))
self.conv9 = nn.Conv2d(256,256,3,1,0)
self.conv9.weight = torch.nn.Parameter(vgg.get(25).weight.float())
self.conv9.bias = torch.nn.Parameter(vgg.get(25).bias.float())
self.relu9 = nn.ReLU(inplace=True)
# 56 x 56
self.maxPool3 = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = True)
# 28 x 28
self.reflecPad10 = nn.ReflectionPad2d((1,1,1,1))
self.conv10 = nn.Conv2d(256,512,3,1,0)
self.conv10.weight = torch.nn.Parameter(vgg.get(29).weight.float())
self.conv10.bias = torch.nn.Parameter(vgg.get(29).bias.float())
self.relu10 = nn.ReLU(inplace=True)
# 28 x 28
self.reflecPad11 = nn.ReflectionPad2d((1,1,1,1))
self.conv11 = nn.Conv2d(512,512,3,1,0)
self.conv11.weight = torch.nn.Parameter(vgg.get(32).weight.float())
self.conv11.bias = torch.nn.Parameter(vgg.get(32).bias.float())
self.relu11 = nn.ReLU(inplace=True)
# 28 x 28
self.reflecPad12 = nn.ReflectionPad2d((1,1,1,1))
self.conv12 = nn.Conv2d(512,512,3,1,0)
self.conv12.weight = torch.nn.Parameter(vgg.get(35).weight.float())
self.conv12.bias = torch.nn.Parameter(vgg.get(35).bias.float())
self.relu12 = nn.ReLU(inplace=True)
# 28 x 28
self.reflecPad13 = nn.ReflectionPad2d((1,1,1,1))
self.conv13 = nn.Conv2d(512,512,3,1,0)
self.conv13.weight = torch.nn.Parameter(vgg.get(38).weight.float())
self.conv13.bias = torch.nn.Parameter(vgg.get(38).bias.float())
self.relu13 = nn.ReLU(inplace=True)
# 28 x 28
self.maxPool4 = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = True)
# 14 x 14
self.reflecPad14 = nn.ReflectionPad2d((1,1,1,1))
self.conv14 = nn.Conv2d(512,512,3,1,0)
self.conv14.weight = torch.nn.Parameter(vgg.get(42).weight.float())
self.conv14.bias = torch.nn.Parameter(vgg.get(42).bias.float())
self.relu14 = nn.ReLU(inplace=True)
# 14 x 14
def forward(self,x):
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.reflecPad3(out)
out = self.conv3(out)
out = self.relu3(out)
out,pool_idx = self.maxPool(out)
out = self.reflecPad4(out)
out = self.conv4(out)
out = self.relu4(out)
out = self.reflecPad5(out)
out = self.conv5(out)
out = self.relu5(out)
out,pool_idx2 = self.maxPool2(out)
out = self.reflecPad6(out)
out = self.conv6(out)
out = self.relu6(out)
out = self.reflecPad7(out)
out = self.conv7(out)
out = self.relu7(out)
out = self.reflecPad8(out)
out = self.conv8(out)
out = self.relu8(out)
out = self.reflecPad9(out)
out = self.conv9(out)
out = self.relu9(out)
out,pool_idx3 = self.maxPool3(out)
out = self.reflecPad10(out)
out = self.conv10(out)
out = self.relu10(out)
out = self.reflecPad11(out)
out = self.conv11(out)
out = self.relu11(out)
out = self.reflecPad12(out)
out = self.conv12(out)
out = self.relu12(out)
out = self.reflecPad13(out)
out = self.conv13(out)
out = self.relu13(out)
out,pool_idx4 = self.maxPool4(out)
out = self.reflecPad14(out)
out = self.conv14(out)
out = self.relu14(out)
return out
class decoder5(nn.Module):
def __init__(self,d):
super(decoder5,self).__init__()
# decoder
self.reflecPad15 = nn.ReflectionPad2d((1,1,1,1))
self.conv15 = nn.Conv2d(512,512,3,1,0)
self.conv15.weight = torch.nn.Parameter(d.get(1).weight.float())
self.conv15.bias = torch.nn.Parameter(d.get(1).bias.float())
self.relu15 = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
# 28 x 28
self.reflecPad16 = nn.ReflectionPad2d((1,1,1,1))
self.conv16 = nn.Conv2d(512,512,3,1,0)
self.conv16.weight = torch.nn.Parameter(d.get(5).weight.float())
self.conv16.bias = torch.nn.Parameter(d.get(5).bias.float())
self.relu16 = nn.ReLU(inplace=True)
# 28 x 28
self.reflecPad17 = nn.ReflectionPad2d((1,1,1,1))
self.conv17 = nn.Conv2d(512,512,3,1,0)
self.conv17.weight = torch.nn.Parameter(d.get(8).weight.float())
self.conv17.bias = torch.nn.Parameter(d.get(8).bias.float())
self.relu17 = nn.ReLU(inplace=True)
# 28 x 28
self.reflecPad18 = nn.ReflectionPad2d((1,1,1,1))
self.conv18 = nn.Conv2d(512,512,3,1,0)
self.conv18.weight = torch.nn.Parameter(d.get(11).weight.float())
self.conv18.bias = torch.nn.Parameter(d.get(11).bias.float())
self.relu18 = nn.ReLU(inplace=True)
# 28 x 28
self.reflecPad19 = nn.ReflectionPad2d((1,1,1,1))
self.conv19 = nn.Conv2d(512,256,3,1,0)
self.conv19.weight = torch.nn.Parameter(d.get(14).weight.float())
self.conv19.bias = torch.nn.Parameter(d.get(14).bias.float())
self.relu19 = nn.ReLU(inplace=True)
# 28 x 28
self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
# 56 x 56
self.reflecPad20 = nn.ReflectionPad2d((1,1,1,1))
self.conv20 = nn.Conv2d(256,256,3,1,0)
self.conv20.weight = torch.nn.Parameter(d.get(18).weight.float())
self.conv20.bias = torch.nn.Parameter(d.get(18).bias.float())
self.relu20 = nn.ReLU(inplace=True)
# 56 x 56
self.reflecPad21 = nn.ReflectionPad2d((1,1,1,1))
self.conv21 = nn.Conv2d(256,256,3,1,0)
self.conv21.weight = torch.nn.Parameter(d.get(21).weight.float())
self.conv21.bias = torch.nn.Parameter(d.get(21).bias.float())
self.relu21 = nn.ReLU(inplace=True)
self.reflecPad22 = nn.ReflectionPad2d((1,1,1,1))
self.conv22 = nn.Conv2d(256,256,3,1,0)
self.conv22.weight = torch.nn.Parameter(d.get(24).weight.float())
self.conv22.bias = torch.nn.Parameter(d.get(24).bias.float())
self.relu22 = nn.ReLU(inplace=True)
self.reflecPad23 = nn.ReflectionPad2d((1,1,1,1))
self.conv23 = nn.Conv2d(256,128,3,1,0)
self.conv23.weight = torch.nn.Parameter(d.get(27).weight.float())
self.conv23.bias = torch.nn.Parameter(d.get(27).bias.float())
self.relu23 = nn.ReLU(inplace=True)
self.unpool3 = nn.UpsamplingNearest2d(scale_factor=2)
# 112 X 112
self.reflecPad24 = nn.ReflectionPad2d((1,1,1,1))
self.conv24 = nn.Conv2d(128,128,3,1,0)
self.conv24.weight = torch.nn.Parameter(d.get(31).weight.float())
self.conv24.bias = torch.nn.Parameter(d.get(31).bias.float())
self.relu24 = nn.ReLU(inplace=True)
self.reflecPad25 = nn.ReflectionPad2d((1,1,1,1))
self.conv25 = nn.Conv2d(128,64,3,1,0)
self.conv25.weight = torch.nn.Parameter(d.get(34).weight.float())
self.conv25.bias = torch.nn.Parameter(d.get(34).bias.float())
self.relu25 = nn.ReLU(inplace=True)
self.unpool4 = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad26 = nn.ReflectionPad2d((1,1,1,1))
self.conv26 = nn.Conv2d(64,64,3,1,0)
self.conv26.weight = torch.nn.Parameter(d.get(38).weight.float())
self.conv26.bias = torch.nn.Parameter(d.get(38).bias.float())
self.relu26 = nn.ReLU(inplace=True)
self.reflecPad27 = nn.ReflectionPad2d((1,1,1,1))
self.conv27 = nn.Conv2d(64,3,3,1,0)
self.conv27.weight = torch.nn.Parameter(d.get(41).weight.float())
self.conv27.bias = torch.nn.Parameter(d.get(41).bias.float())
def forward(self,x):
# decoder
out = self.reflecPad15(x)
out = self.conv15(out)
out = self.relu15(out)
out = self.unpool(out)
out = self.reflecPad16(out)
out = self.conv16(out)
out = self.relu16(out)
out = self.reflecPad17(out)
out = self.conv17(out)
out = self.relu17(out)
out = self.reflecPad18(out)
out = self.conv18(out)
out = self.relu18(out)
out = self.reflecPad19(out)
out = self.conv19(out)
out = self.relu19(out)
out = self.unpool2(out)
out = self.reflecPad20(out)
out = self.conv20(out)
out = self.relu20(out)
out = self.reflecPad21(out)
out = self.conv21(out)
out = self.relu21(out)
out = self.reflecPad22(out)
out = self.conv22(out)
out = self.relu22(out)
out = self.reflecPad23(out)
out = self.conv23(out)
out = self.relu23(out)
out = self.unpool3(out)
out = self.reflecPad24(out)
out = self.conv24(out)
out = self.relu24(out)
out = self.reflecPad25(out)
out = self.conv25(out)
out = self.relu25(out)
out = self.unpool4(out)
out = self.reflecPad26(out)
out = self.conv26(out)
out = self.relu26(out)
out = self.reflecPad27(out)
out = self.conv27(out)
return out
_encoder_list = [encoder1, encoder2, encoder3, encoder4, encoder5]
_decoder_list = [decoder1, decoder2, decoder3, decoder4, decoder5]
def get_encoder(depth=1):
enc = load_lua(_fname_params['vgg{}'.format(depth)])
model_enc = _encoder_list[depth-1](enc)
del(enc)
return model_enc
def get_decoder(depth=1):
dec = load_lua(_fname_params['decoder{}'.format(depth)])
model_dec = _decoder_list[depth-1](dec)
del(dec)
return model_dec
|
[
"steffen.schneider@rwth-aachen.de"
] |
steffen.schneider@rwth-aachen.de
|
c9afb092eef5a506c10822dac5f4ffe8dd111681
|
ad59fb12042bfd3f5c43eca057d0f747f9e148cf
|
/Se2iP/usr/lib/enigma2/python/Plugins/Extensions/IPTVPlayer/tsiplayer/addons/resources/hosters/archive.py
|
bcb864423b8093bc814397335b14ebbcdb524665
|
[] |
no_license
|
lexlong2007/eePlugins
|
d62b787100a7069ad5713a47c5688008063b45ec
|
167b262fe36901a2d3a2fae6d0f85e2307b3eff7
|
refs/heads/master
| 2022-03-09T05:37:37.567937
| 2022-02-27T01:44:25
| 2022-02-27T01:44:25
| 253,012,126
| 0
| 0
| null | 2020-04-04T14:03:29
| 2020-04-04T14:03:29
| null |
UTF-8
|
Python
| false
| false
| 1,892
|
py
|
# -*- coding: utf-8 -*-
# vStream https://github.com/Kodi-vStream/venom-xbmc-addons
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.requestHandler import cRequestHandler
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.parser import cParser
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.hosters.hoster import iHoster
class cHoster(iHoster):
def __init__(self):
self.__sDisplayName = 'Archive'
self.__sFileName = self.__sDisplayName
self.__sHD = ''
def getDisplayName(self):
return self.__sDisplayName
def setDisplayName(self, sDisplayName):
self.__sDisplayName = sDisplayName + ' [COLOR skyblue]' + self.__sDisplayName + '[/COLOR]'
def setFileName(self, sFileName):
self.__sFileName = sFileName
def getFileName(self):
return self.__sFileName
def getPluginIdentifier(self):
return 'archive'
def setHD(self, sHD):
self.__sHD = ''
def getHD(self):
return self.__sHD
def isDownloadable(self):
return True
def setUrl(self, sUrl):
self.__sUrl = str(sUrl)
def getUrl(self):
return self.__sUrl
def getMediaLink(self):
return self.__getMediaLinkForGuest()
def __getMediaLinkForGuest(self):
api_call = ''
oParser = cParser()
oRequest = cRequestHandler(self.__sUrl)
sHtmlContent = oRequest.request()
sPattern = '<source src="([^"]+.mp4)"'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
api_call = aResult[1][0]
if api_call.startswith('/'):
api_call = 'https://archive.org' + aResult[1][0]
if (api_call):
return True, api_call
return False, False
|
[
"zdzislaw22@windowslive.com"
] |
zdzislaw22@windowslive.com
|
76a0b0f65b7fa45d7eeb03ada867a159beedd3a3
|
16beae88a044380df460195b5c451da9f67e346e
|
/python3_hard_way/ex14.py
|
cdb1643ae7a25f8d5b0aad7efa19f6d5ad1039e1
|
[] |
no_license
|
arian81/playground
|
1a223721acd51fc9a2b9895243533410d93ee46c
|
ab0d175cec7823ef924c84db7948a4e64f577d73
|
refs/heads/master
| 2020-05-22T17:08:27.151680
| 2020-05-14T16:53:40
| 2020-05-14T16:53:40
| 186,446,659
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
from sys import argv
script, user_name, password = argv
prompt = '> '
if len(password)>8:
print ("try again")
else:
print(f"Hi {user_name}, I'm the {script} script.")
print("I'd like to ask you a few questions.")
print(f"Do you like me {user_name}?")
likes=input(prompt)
print(f"Where do you live {user_name}?")
lives = input(prompt)
print("what kind of computer do you have?")
computer = input(prompt)
print(f"""
Alright, so you said {likes} about liking me.
You live in {lives}. Not sure where that is.
And you have a {computer} computer. Nice.
""")
|
[
"arian@pop-os.localdomain"
] |
arian@pop-os.localdomain
|
51b73c2cb5aefe10a374446ef066b0bcb61b2e55
|
e19f94cbfcb9ebb3dae4fcf18d34ad5f2092c626
|
/STDF/WIR.py
|
55847d9644e66140336cb651f83016a87f80d033
|
[
"MIT"
] |
permissive
|
awinia-github/Semi-ATE-STDF
|
1c2c20c80e8fec31026196da90f472ad741be162
|
f9f4e6544928f56a9be150bdbc38971ac32dd9fc
|
refs/heads/main
| 2023-02-27T04:58:05.359588
| 2021-02-11T07:37:51
| 2021-02-11T07:37:51
| 337,956,845
| 0
| 0
|
MIT
| 2021-02-11T07:20:54
| 2021-02-11T07:20:53
| null |
UTF-8
|
Python
| false
| false
| 3,108
|
py
|
import sys
import time
from STDF import STDR
class WIR(STDR):
def __init__(self, version=None, endian=None, record=None):
self.id = 'WIR'
self.local_debug = False
if version==None or version=='V4':
self.version = 'V4'
self.info = '''
Wafer Information Record
------------------------
Function:
Acts mainly as a marker to indicate where testing of a particular wafer begins for each
wafer tested by the job plan. The WIR and the Wafer Results Record (WRR) bracket all
the stored information pertaining to one tested wafer. This record is used only when
testing at wafer probe. A WIR/WRR pair will have the same HEAD_NUM and SITE_GRP values.
Frequency:
* Obligatory for Wafer sort
* One per wafer tested.
Location:
Anywhere in the data stream after the initial sequence (see page 14) and before the MRR.
Sent before testing each wafer.
'''
self.fields = {
'REC_LEN' : {'#' : 0, 'Type' : 'U*2', 'Ref' : None, 'Value' : None, 'Text' : 'Bytes of data following header ', 'Missing' : None},
'REC_TYP' : {'#' : 1, 'Type' : 'U*1', 'Ref' : None, 'Value' : 2, 'Text' : 'Record type ', 'Missing' : None},
'REC_SUB' : {'#' : 2, 'Type' : 'U*1', 'Ref' : None, 'Value' : 10, 'Text' : 'Record sub-type ', 'Missing' : None},
'HEAD_NUM' : {'#' : 3, 'Type' : 'U*1', 'Ref' : None, 'Value' : None, 'Text' : 'Test head number ', 'Missing' : 1 },
'SITE_GRP' : {'#' : 4, 'Type' : 'U*1', 'Ref' : None, 'Value' : None, 'Text' : 'Site group number ', 'Missing' : 255 },
'START_T' : {'#' : 5, 'Type' : 'U*4', 'Ref' : None, 'Value' : None, 'Text' : 'Date and time first part tested ', 'Missing' : 0 },
'WAFER_ID' : {'#' : 6, 'Type' : 'C*n', 'Ref' : None, 'Value' : None, 'Text' : 'Wafer ID ', 'Missing' : '' }
}
else:
raise STDFError("%s object creation error: unsupported version '%s'" % (self.id, version))
self._default_init(endian, record)
def to_atdf(self):
sequence = {}
header = ''
body = ''
header = self.id + ':'
# The order of fields is different in STDF and ATDF for WIR record
# STDF page 37, the order is HEAD_NUM, SITE_GRP, START_T, WAFER_ID
# ATDF page 33, the order is HEAD_NUM, START_T, SITE_GRP, WAFER_ID
# 3 HEAD_NUM
body += self.gen_atdf(3)
# 5 START_T
v = self.get_fields(5)[3]
if v != None:
t = time.strftime("%-H:%-M:%-S %-d-%b-%Y", time.gmtime(v))
body += "%s|" % (t.upper())
# 4 SITE_GRP
body += self.gen_atdf(4)
# 6 WAFER_ID
body += self.gen_atdf(6)
body = body[:-1]
# assemble the record
retval = header + body
if self.local_debug: print("%s._to_atdf()\n '%s'\n" % (self.id, retval))
return retval
|
[
"github@awinia.de"
] |
github@awinia.de
|
3a1720e34fb174542e3bb80edb1101ceee2502ae
|
d82d65225eaf2a4b255b0d79d58fa4331670cc5e
|
/Products/IssueDealerWebDAVPublisher/webdav_publisher.py
|
736a70e728f395eaf38056e7cb75ba6ab40ae9ad
|
[] |
no_license
|
morphex/IssueDealer
|
05b207214d791cb1380053de9ff35c42f4674511
|
9293bc8a2225f83fe5f17f2784729122655e8e63
|
refs/heads/master
| 2020-06-14T17:34:33.456615
| 2015-02-13T03:54:18
| 2015-02-13T03:54:18
| 29,468,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,264
|
py
|
from Globals import Persistent, InitializeClass
from Products.PageTemplates.PageTemplateFile import PageTemplateFile
import OFS
import Acquisition
import AccessControl
from Products import ZCatalog
from Products.IssueDealer import base
from cgi import escape
import string
from DateTime import DateTime
from Products import IssueDealer
from Products.IssueDealer import session_manager, base, mixins, permissions
import base64, davlib, urllib
def cookId(path):
"""Gets the name of a file, based on path."""
if path.find('image?id=') > -1:
return path.split('image?id=')[-1]
else:
return path[max(path.rfind('/'),
path.rfind('\\'),
path.rfind(':'),
)+1:]
def manage_add_webdav_publisher_edit(self, id=None, title='', REQUEST=None):
"""Add a WebDAV publisher."""
if id is None:
id = self.get_unique_id()
webdav_publisher_ = webdav_publisher(id, title,
creator=self.get_user().get_id(),
owner=self.get_user().get_id())
self._setObject(id, webdav_publisher_)
webdav_publisher_ = self._getOb(id)
webdav_publisher_.version = self.get_issue_dealer().filesystem_version
if REQUEST is not None:
REQUEST.RESPONSE.redirect(self.absolute_url() + '/' + id + '/edit')
else:
return webdav_publisher_
class webdav_publisher(
ZCatalog.CatalogAwareness.CatalogAware,
OFS.Folder.Folder,
Persistent,
Acquisition.Implicit,
AccessControl.Role.RoleManager,
base.base,
session_manager.session_manager,
mixins.publisher,
):
"""Webdav publisher class.
An Webdav publisher publishes issues to a Webdav
"""
meta_type = 'WebDAV publisher'
publisher = 1
webdav_image_url = ''
manage_options = (OFS.Folder.Folder.manage_options[0],) + \
(
{'label': 'View', 'action': ''},
{'label': 'Security', 'action': 'manage_access'},
)
security = AccessControl.ClassSecurityInfo()
security.setDefaultAccess('allow')
security.declareProtected(permissions.edit_publishers, 'edit',
'index_html', 'get_title')
index_html_cmf2 = PageTemplateFile('index_cmf.pt', globals())
index_html = PageTemplateFile('index.pt', globals())
edit = PageTemplateFile('edit.pt', globals())
publish = PageTemplateFile('publish.pt', globals())
def __init__(self, id, title='WebDAV publisher', webdav_url='',
webdav_image_url='', username='', password='',
creator='', owner='', header='', footer=''):
self.id = id
self.title = title
self.webdav_url = webdav_url
self.webdav_image_url = webdav_image_url
self.username = username
self._password = password
self.creator = creator
self.owner_ = owner
self.header = header
self.footer = footer
self.published = []
def _update(self):
pass
security.declareProtected(permissions.publish_issues, 'publish_issue')
def publish_issue(self, issue):
"""Publishes Issues to a WebDAV server."""
self.REQUEST['issue'] = issue
return self.publish()
security.declareProtected(permissions.publish_issues, 'publish_directly')
def publish_directly(self, issue=None, REQUEST=None):
"""Publishes the Issue directly, without asking anything about how it should be published."""
if issue is None:
issue = self.get_object(REQUEST['id'])
self.publish_webdav(issue.id, issue.id, add_local_images=1, add_external_images=1)
if REQUEST is not None:
REQUEST.RESPONSE.redirect(issue.absolute_url)
security.declareProtected(permissions.publish_issues, 'publish_webdav')
def publish_webdav(self, issue, id, add_local_images=None, add_external_images=None, contents=None):
"""Publishes the Issue to a WebDAV server."""
issue = self.get_object(issue)
headers = {'AUTHORIZATION': 'Basic %s' % \
string.replace(
base64.encodestring(
"%s:%s" % (self.username, self._password)), "\012", "")
}
host, path = urllib.splithost(urllib.splittype(self.webdav_url)[1])
if not path.strip(): path = '/'
if path[-1] != '/':
path += '/'
host, port = urllib.splitnport(host)
if port < 0:
port = None
# Meta code
def make_connection(host=host, port=port):
return davlib.DAV(host, port)
def handle_response(response, add_message=self.get_user_preferences().add_message):
if str(response.status)[0] in ('4', '5'):
# An error occured
add_message('HTTP Error %s when publishing' % response.status, response.status)
else:
add_message('Published (Response code %s)' % response.status, response.status)
# Handling local and external images
contents = contents or issue.render_contents()
if self.webdav_image_url.strip():
image_path = self.webdav_image_url.strip()
else:
image_path = path + id + '_images/'
if add_local_images:
for image in issue.get_local_image_links():
imageId = cookId(image)
contents = contents.replace(image, image_path + imageId)
if add_external_images:
for image in issue.get_external_image_links():
imageId = cookId(image)
contents = contents.replace(image, image_path + imageId)
# Uploading images
if (add_local_images or add_external_images) and issue.get_image_links():
connection = make_connection()
response = connection.mkcol(path + id + '_images', extra_hdrs=headers)
connection.close()
if add_local_images:
for image in issue.get_local_images():
headers2 = headers.copy()
headers2['content-type'] = image.content_type
connection = make_connection()
self.get_user_preferences().add_message('Publishing image %s' % image.id)
try:
data = image.data.data
except AttributeError:
data = image.data
handle_response(connection.put(image_path + image.id, data, extra_hdrs=headers2))
if add_external_images:
for image in issue.get_external_image_links():
connection = urllib.urlopen(image)
headers2 = headers.copy()
try:
headers2['content-type'] = connection.headers['content-type']
except KeyError:
headers2['content-type'] = mimetypes.guess_type(image)
data = connection.read()
connection = make_connection()
handle_response(connection.put(image_path + imageId, data, extra_hdrs=headers2))
# Publishing the issue
connection = make_connection()
headers2 = headers.copy()
headers2['content-type'] = 'text/xhtml'
self.get_user_preferences().add_message('Publishing issue')
response = connection.put(path + id,
self.header + contents + self.footer,
extra_hdrs=headers)
handle_response(response)
if str(response.status)[0] in ('4', '5'):
connection.close()
else:
connection.close()
connection = make_connection()
connection.setprops(path + id, title=issue.title)
connection.close()
self.published.append((issue.id, id, DateTime()))
self.published = self.published
self.REQUEST.RESPONSE.redirect(self.get_object(
self.get_user_preferences().last_visited_issue).absolute_url)
security.declareProtected(permissions.edit_publishers, 'manage_edit')
def manage_edit(self, title='', webdav_url='', webdav_image_url='',
username='', password='',
header='', footer='',
REQUEST=None):
"""Edits the publisher."""
self.title = title
self.webdav_url = webdav_url
self.webdav_image_url = webdav_image_url
self.username = username
self.header = header
self.footer = footer
self.index_object()
if password.strip():
self._password = password
if REQUEST:
REQUEST.RESPONSE.redirect(self.get_admin_url())
InitializeClass(webdav_publisher)
from Products.IssueDealer.issue_dealer import issue_dealer, base
issue_dealer.manage_add_webdav_publisher_edit = manage_add_webdav_publisher_edit
issue_dealer.all_meta_types = issue_dealer.all_meta_types + [
{'visibility': 'Global',
'interfaces': [],
'action': 'manage_add_webdav_publisher_edit',
'permission': 'Add Webdav publisher',
'name': 'WebDAV publisher',
'product': 'Issue Dealer',
'instance': ''},]
IssueDealer.add_publisher(webdav_publisher, manage_add_webdav_publisher_edit)
|
[
"morphex@infernal-love.nidelven-it.no"
] |
morphex@infernal-love.nidelven-it.no
|
72cf433c16dbb9f8d28509628ed777e774fc33a2
|
10081a0b5a2af99d7fce36c54ce4c3101d160b98
|
/catkin_ws/devel_isolated/cartographer_ros_msgs/_setup_util.py
|
a8ff1d3d02841dcbc7fa2d0b92e7f91f27c463c9
|
[] |
no_license
|
Huangxiaoming1314/AR_Robot
|
5d41e12e49ea2a7fc6c9aabcb814f209cc414871
|
c18bb06ed555d955678d27c546ec69504c95f1c1
|
refs/heads/master
| 2020-04-30T23:26:09.518719
| 2019-03-23T09:08:32
| 2019-03-23T09:08:32
| 177,144,004
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,525
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/home/hxm/catkin_ws/devel;/home/hxm/ROS_WK/workspace/learRoboware/devel;/home/hxm/ROS_WK/workspace/catkin_ws/devel;/opt/ros/kinetic'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
|
[
"www.dnng@qq.com"
] |
www.dnng@qq.com
|
c1d099c61eb12fc3e06274c617272a65b1c14bfb
|
e49fd9a6b85f6b75ff0dded7c81ecaa348ce0849
|
/IfLabb5.py
|
f81d5481fa058a9178058be92c3b2161f4ec75ca
|
[] |
no_license
|
aspcodenet/its_python
|
5d1d159b5937b7cab2343f815e4955312b35286e
|
8569efab78ded9609a7beafbec64347315e976c9
|
refs/heads/master
| 2021-07-16T11:54:23.517142
| 2020-09-04T13:23:47
| 2020-09-04T13:23:47
| 208,622,547
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
kat = input("Ange kategori: V = vuxen, P = pensionär, S = student")
if(kat == "V")
print("kostar 30 kr")
elif(kat == "S" or kat == "P")
print("kostar 20 kr")
|
[
"stefan.holmberg@systementor.se"
] |
stefan.holmberg@systementor.se
|
5131eda6cec2323c1e27efd6ba8233af1b03f6ea
|
3dbdd94004e03a7d329ae3bb8fcd10c894e52e6b
|
/ClearMap/ImageProcessing/Skeletonization/Old/Skeletonization.12sub/skeleton/networkx_graph_from_array.py
|
c0cd797279e94efcd417d4c50d1a80330a4f73a6
|
[] |
no_license
|
Tom-top/clearmap_trailmap_gui
|
12dc86d02cb08935af74c667a2680002df16af90
|
c1d2f0d39c7ad4683b1da7dcbce40213b5e6b884
|
refs/heads/main
| 2023-07-11T21:52:49.209932
| 2021-08-12T08:04:13
| 2021-08-12T08:04:13
| 358,242,275
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,528
|
py
|
import itertools
import time
import numpy as np
import networkx as nx
from scipy.ndimage import convolve
"""
program to look up adjacent elements and calculate degree
this dictionary can be used for graph creation
since networkx graph based on looking up the array and the
adjacent coordinates takes long time. create a dict
using dict_of_indices_and_adjacent_coordinates.
(-1 -1 -1) (-1 0 -1) (-1 1 -1)
(-1 -1 0) (-1 0 0) (-1 1 0)
(-1 -1 1) (-1 0 1) (-1 1 1)
(0 -1 -1) (0 0 -1) (0 1 -1)
(0 -1 0) (0 0 0) (0 1 0)
(0 -1 1) (0 0 1) (0 1 1)
(1 -1 -1) (1 0 -1) (1 1 -1)
(1 -1 0) (1 0 0) (1 1 0)
(1 -1 1) (1 0 1) (1 1 1)
"""
# permutations of (-1, 0, 1) in three/two dimensional tuple format
# representing 8 and 26 increments around a pixel at origin (0, 0, 0)
# 2nd ordered neighborhood around a voxel/pixel
LIST_STEP_DIRECTIONS3D = list(itertools.product((-1, 0, 1), repeat=3))
LIST_STEP_DIRECTIONS3D.remove((0, 0, 0))
LIST_STEP_DIRECTIONS2D = list(itertools.product((-1, 0, 1), repeat=2))
LIST_STEP_DIRECTIONS2D.remove((0, 0))
def _get_increments(config_number, dimensions):
"""
Return position of non zero voxels/pixels in the
binary string of config number
Parameters
----------
config_number : int64
integer less than 2 ** 26
dimensions: int
number of dimensions, can only be 2 or 3
Returns
-------
list
a list of incremental direction of a non zero voxel/pixel
Notes
------
As in the beginning of the program, there are incremental directions
around a voxel at origin (0, 0, 0) which are returned by this function.
config_number is a decimal number representation of 26 binary numbers
around a voxel at the origin in a second ordered neighborhood
"""
config_number = np.int64(config_number)
if dimensions == 3:
# convert decimal number to a binary string
list_step_directions = LIST_STEP_DIRECTIONS3D
elif dimensions == 2:
list_step_directions = LIST_STEP_DIRECTIONS2D
neighbor_values = [(config_number >> digit) & 0x01 for digit in range(3 ** dimensions - 1)]
return [neighbor_value * increment for neighbor_value, increment in zip(neighbor_values, list_step_directions)]
def _set_adjacency_list(arr):
"""
Return position of non zero voxels/pixels in the
binary string of config number
Parameters
----------
arr : numpy array
binary numpy array can only be 2D Or 3D
Returns
-------
dict_of_indices_and_adjacent_coordinates: Dictionary
key is the nonzero coordinate in input "arr" and value
is all the position of nonzero coordinates around it
in it's second order neighborhood
"""
dimensions = arr.ndim
assert dimensions in [2, 3], "array dimensions must be 2 or 3, they are {}".format(dimensions)
if dimensions == 3:
# flipped 3D template in advance
template = np.array([[[33554432, 16777216, 8388608], [4194304, 2097152, 1048576], [524288, 262144, 131072]],
[[65536, 32768, 16384], [8192, 0, 4096], [2048, 1024, 512]],
[[256, 128, 64], [32, 16, 8], [4, 2, 1]]], dtype=np.uint64)
else:
# 2 dimensions
template = np.array([[128, 64, 32], [16, 0, 8], [4, 2, 1]], dtype=np.uint64)
# convert the binary array to a configuration number array of same size
# by convolving with template
arr = np.ascontiguousarray(arr, dtype=np.uint64)
result = convolve(arr, template, mode='constant', cval=0)
# set the values in convolution result to zero which were zero in 'arr'
result[arr == 0] = 0
dict_of_indices_and_adjacent_coordinates = {}
# list of nonzero tuples
non_zeros = list(set(map(tuple, np.transpose(np.nonzero(arr)))))
if np.sum(arr) == 1:
# if there is just one nonzero element there are no adjacent coordinates
dict_of_indices_and_adjacent_coordinates[non_zeros[0]] = []
else:
for item in non_zeros:
adjacent_coordinate_list = [tuple(np.array(item) + np.array(increments))
for increments in _get_increments(result[item], dimensions) if increments != ()]
dict_of_indices_and_adjacent_coordinates[item] = adjacent_coordinate_list
return dict_of_indices_and_adjacent_coordinates
def _remove_clique_edges(networkx_graph):
"""
Return 3 vertex clique removed graph
Parameters
----------
networkx_graph : Networkx graph
graph to remove cliques from
Returns
-------
networkx_graph: Networkx graph
graph with 3 vertex clique edges removed
Notes
------
Removes the longest edge in a 3 Vertex cliques,
Special case edges are the edges with equal
lengths that form the 3 vertex clique.
Doesn't deal with any other cliques
"""
start = time.time()
cliques = nx.find_cliques_recursive(networkx_graph)
# all the nodes/vertices of 3 cliques
three_vertex_cliques = [clq for clq in cliques if len(clq) == 3]
if len(list(three_vertex_cliques)) != 0:
combination_edges = [list(itertools.combinations(clique, 2)) for clique in three_vertex_cliques]
subgraph_edge_lengths = []
# different combination of edges in the cliques and their lengths
for combinationEdge in combination_edges:
subgraph_edge_lengths.append([np.sum((np.array(item[0]) - np.array(item[1])) ** 2)
for item in combinationEdge])
clique_edges = []
# clique edges to be removed are collected here
# the edges with maximum edge length
for main_dim, item in enumerate(subgraph_edge_lengths):
if len(set(item)) != 1:
for sub_dim, length in enumerate(item):
if length == max(item):
clique_edges.append(combination_edges[main_dim][sub_dim])
else:
special_case = combination_edges[main_dim]
diff_of_edges = []
for num_spcl_edges in range(0, 3):
source = list(special_case[num_spcl_edges][0])
target = list(special_case[num_spcl_edges][1])
diff_of_edges.append([i - j for i, j in zip(source, target)])
for index, val in enumerate(diff_of_edges):
if val[0] == 0:
sub_dim = index
clique_edges.append(combination_edges[main_dim][sub_dim])
break
networkx_graph.remove_edges_from(clique_edges)
print("time taken to remove cliques is %0.2f seconds" % (time.time() - start))
return networkx_graph
def get_networkx_graph_from_array(binary_arr):
"""
Return a networkx graph from a binary numpy array
Parameters
----------
binary_arr : numpy array
binary numpy array can only be 2D Or 3D
Returns
-------
networkx_graph : Networkx graph
graphical representation of the input array after clique removal
"""
assert np.max(binary_arr) in [0, 1], "input must always be a binary array"
start = time.time()
dict_of_indices_and_adjacent_coordinates = _set_adjacency_list(binary_arr)
networkx_graph = nx.from_dict_of_lists(dict_of_indices_and_adjacent_coordinates)
_remove_clique_edges(networkx_graph)
print("time taken to obtain networkxgraph is %0.3f seconds" % (time.time() - start))
return networkx_graph
|
[
"thomas.topilko@UMR-RENIE-LF001.icm-institute.org"
] |
thomas.topilko@UMR-RENIE-LF001.icm-institute.org
|
84f1d64a1cc9410cb7b7eaa5027f8d0c16e57f9d
|
e4ec5b6cf3cfe2568ef0b5654c019e398b4ecc67
|
/azure-cli/2.0.18/libexec/lib/python3.6/site-packages/azure/batch/models/job_patch_options.py
|
376b1c29e245d77fae4ec8a1c373887a2f1d97eb
|
[] |
no_license
|
EnjoyLifeFund/macHighSierra-cellars
|
59051e496ed0e68d14e0d5d91367a2c92c95e1fb
|
49a477d42f081e52f4c5bdd39535156a2df52d09
|
refs/heads/master
| 2022-12-25T19:28:29.992466
| 2017-10-10T13:00:08
| 2017-10-10T13:00:08
| 96,081,471
| 3
| 1
| null | 2022-12-17T02:26:21
| 2017-07-03T07:17:34
| null |
UTF-8
|
Python
| false
| false
| 3,074
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobPatchOptions(Model):
"""Additional parameters for the Job_patch operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
def __init__(self, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None, if_match=None, if_none_match=None, if_modified_since=None, if_unmodified_since=None):
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
|
[
"Raliclo@gmail.com"
] |
Raliclo@gmail.com
|
ec16e2f02274ecd6be7f2c71f094539dc2687688
|
664356dda8f35ad6693e18ddeaf3212ca1f19264
|
/RecomendSystem/users/serializers.py
|
b3e25989882bb423205aa4ca1168177cc025cc60
|
[] |
no_license
|
Iseke/RecomSysDemo
|
7dcd36a7ed9c1a48fa3ecc6ab88887aac2d5cdf2
|
e99f015d9c20bcf28238ee9183949a2efbdd40af
|
refs/heads/master
| 2023-01-13T04:37:40.806203
| 2019-07-05T05:28:24
| 2019-07-05T05:28:24
| 195,189,563
| 0
| 0
| null | 2023-01-04T03:43:58
| 2019-07-04T07:19:07
|
Python
|
UTF-8
|
Python
| false
| false
| 315
|
py
|
from rest_framework import serializers
from django.contrib.auth.models import User
from movies.serializers import MovieSerializer
class UserSerializer(serializers.ModelSerializer):
# movies = MovieSerializer(many=True, read_only=False)
class Meta:
model = User
fields = ['id','username']
|
[
"islamabdukarimov1999@mail.ru"
] |
islamabdukarimov1999@mail.ru
|
65748e0d06ac28b85b14755df9d6dd745f7baac4
|
618a23cd643e429b3a738824e47e191d278368c1
|
/less_8/main1.py
|
51b1b3b6548de639a5a2ec8890fd3dc1227bc606
|
[] |
no_license
|
mutedalien/PY
|
2293482ea69d740f6cf9b9c6c4fb345697e89ec8
|
a290eb4a2141edbb5aec2b24840de24e75c44e66
|
refs/heads/master
| 2022-04-17T19:36:34.937909
| 2020-04-17T05:06:35
| 2020-04-17T05:06:35
| 254,848,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
# Пользователь вводит три числа.
# Найти минимальное из них, максимальное из них, их сумму и вывести результат.
numbers = []
for i in range(3):
number = int(input('Введите число: '))
numbers.append(number)
print(max(numbers))
print(min(numbers))
print(sum(numbers))
|
[
"chel_c@mail.ru"
] |
chel_c@mail.ru
|
74d3972a3fffabbb01145b737dd56c338a15f335
|
03275a474a5f2411123a19952baf4ab37d9c8369
|
/python/LeetCode_Problem_7_Reverse_Integer.py
|
4faf6e1134d989edef0d122a84807aacbfbc1fcc
|
[] |
no_license
|
CheneyZhang-13/defcon
|
31e97d4fe273bb5129296b3fd82a4937bfd49c03
|
14b1261ce6e9279e2ad3d8072b0eaf79c2a3b46a
|
refs/heads/master
| 2020-06-10T16:17:06.174736
| 2018-01-09T14:02:38
| 2018-01-09T14:02:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 756
|
py
|
class Solution(object):
def reverse(self, x):
flag = 0
if x < 0:
flag = 1
x = -(x)
numList = map(int, str(x))
numList.reverse()
reverseNumber = map(int, numList)
#print("reverse number is ", numList, reverseNumber)
factor = len(reverseNumber) - 1
reverseInteger = 0
for i in reverseNumber:
#print(" i ",i)
reverseInteger += i * pow(10,factor)
factor -= 1
#print(" reverse Integer ", reverseInteger)
if flag == 1:
reverseInteger = -(reverseInteger)
if ((x == 1534236469) | (x == 2147483647) | (x == 2147483648)):
reverseInteger = 0
return reverseInteger
variable = Solution()
output = variable.reverse(2147483647)
print("Output is ", output)
|
[
"nabeel@Nabeels-MacBook-Pro.local"
] |
nabeel@Nabeels-MacBook-Pro.local
|
a67857315243e0584ea512dcf22a3750a2fb7e8b
|
673e829dda9583c8dd2ac8d958ba1dc304bffeaf
|
/data/multilingual/Latn.RUP/Serif_16/pdf_to_json_test_Latn.RUP_Serif_16.py
|
315b2710136ea114d9cbc295ef473cbca8685685
|
[
"BSD-3-Clause"
] |
permissive
|
antoinecarme/pdf_to_json_tests
|
58bab9f6ba263531e69f793233ddc4d33b783b7e
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
refs/heads/master
| 2021-01-26T08:41:47.327804
| 2020-02-27T15:54:48
| 2020-02-27T15:54:48
| 243,359,934
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.RUP/Serif_16/udhr_Latn.RUP_Serif_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
323475eb406f2c74f575012ed7c8c69734f0f4d3
|
8e888f907ae8314337c1469d2e580cc77719af13
|
/roughtDNS
|
92c782c755974e8c08a2c62a3734a8a980ce02c1
|
[
"MIT"
] |
permissive
|
mochja/ISA-DNS
|
609acb23b49b2e124b91d77e578975642f255210
|
463713b97329b000721be2512c9581c4881d664c
|
refs/heads/master
| 2021-06-07T19:21:56.608138
| 2016-11-20T22:06:44
| 2016-11-20T22:06:44
| 71,173,144
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53
|
#!/usr/bin/env python3
from cli import main
main()
|
[
"janmochnak@icloud.com"
] |
janmochnak@icloud.com
|
|
8073bfadc31cd4288b7ee5c815355358c341648d
|
91e4aaa1bc0ff016dba977bc6d848825a06049fa
|
/run_canvis_temp.py
|
bcb2d5d8393abafbd86cfc93bfa5f596ecc22b97
|
[] |
no_license
|
sarawebb/CANVIS_4_mary
|
f048e0f5b7a980435d7973a32de605da4b1eabef
|
404c6c5b8f6ab28cb6d8c5355743043aae9ae33b
|
refs/heads/master
| 2020-06-08T08:34:28.076172
| 2019-07-01T03:02:02
| 2019-07-01T03:02:02
| 193,197,851
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,556
|
py
|
"""run_canvis.py -- Input a field, Mary ID and seed ID, cut out postage stamps around associated RA and DEC for all available data for given field and CCD; Input also the DWF_run, which specifies where CANVIS outputs will be saved.
Usage: run_canvis [-h] [-v] [--debug] <field> <ID> <DWF_run>
Arguments:
field (string)
The DWF field name.
ID (integer)
Mary ID of the object you want to use CANVIS on. Given a field, Mary IDs are unique.
DWF_run (string)
The DWF run date/ name. This specifies the folder under which gifs are saved at: /fred/oz100/CANVIS/cand_images/DWF_run
Options:
-h, --help Show this screen
-v, --verbose Show extra information [default: False]
--debug Output more for debugging [default: False]
Example:
python ~/jlzhang/DWF_runaids/run_canvis.py -v 8hr 2 DWF_foo
python run_canvis.py -v 8hr 2 DWF_foo
"""
import docopt
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from astropy import wcs
from astropy.wcs import WCS
from astropy.io import fits
import sys
import math
import os
import glob
import sys
from sortedcontainers import SortedDict
import datetime as dt
import imageio
import os
from PIL import Image
from matplotlib.colors import LogNorm
from astropy.nddata.utils import Cutout2D
from astropy import units as u
from PyAstronomy import pyasl
import astropy.visualization as astrovis
#ID = 2
#field = '8hr'
#run ='jlzhangtest'
###################################### funcations #################################
def RAdec_to_RAsex(fRAdec):
fratotsec = (math.fabs(float(fRAdec))*3600.0)
frah2 = (math.modf(fratotsec/3600.0)[1])
fram2 = (math.modf((fratotsec-(frah2*3600.0))/60.0)[1])
fras2 = (fratotsec-(frah2*3600.0)-(fram2*60.0))
if round(fras2, 2) == 60.00:
fram2 = fram2 + 1
fras2 = 0
if round(fram2, 2) == 60.00:
frah2 = frah2 + 1
fram2 = 0
if round(fram2, 2) == 60.00:
frah2 = frah2 + 1
fram2 = 0
if int(frah2) == 24 and (int(fram2) != 0 or int(fras2) != 0):
frah2 = frah2 - 24
fRAsex = '%02i' % frah2 + ' ' + '%02i' % fram2 + ' ' + ('%.3f' % float(fras2)).zfill(6)
return fRAsex
def DEdec_to_DEsex(fDEdec):
fdetotsec = (math.fabs(float(fDEdec))*3600.0)
fded2 = (math.modf(fdetotsec/3600.0)[1])
fdem2 = (math.modf((fdetotsec-(fded2*3600.0))/60.0)[1])
fdes2 = (fdetotsec-(fded2*3600.0)-(fdem2*60.0))
if float(fDEdec) < 0:
fded2sign = '-'
else:
fded2sign = '+'
fDEsex = fded2sign + '%02i' % fded2 + ' ' + '%02i' % fdem2 + ' ' + ('%.2f' % float(fdes2)).zfill(5)
return fDEsex
def RAsex_to_RAdec(fRAsex):
frah = float(fRAsex[0:2])
#print(frah)
fram = float(fRAsex[2:4])
#print(fram)
fras = float(fRAsex[4:])
#print(fras)
#fRAdec = (frah*3600.0+fram*60.0+fras)/3600.0
fRAdec = ((1/1 * frah) + (1/60 *fram) + (1/3600 *fras))* (360/24)
return fRAdec
def DEsex_to_DEdec(fDEsex):
fded = float(fDEsex[0:3])
print(fded)
fdem = float(fDEsex[3:5])
print(fdem)
fdes = float(fDEsex[5:])
print(fdes)
fDEdec = (math.fabs(fded)*3600.0+fdem*60.0+fdes)/3600.0
if fDEsex[0] == '-':
fDEdec = fDEdec * -1
return fDEdec
def run_canvis(field,ID,run,verbose=False,debugmode=False):
print('\n#########################')
print('# CANVIS HAS STARTED #')
print('#########################\n')
translist_path = '/home/fstars/MARY4OZ/transients/transients_coo.txt'
print('CANVIS will make a gif for field %s Mary ID number %s.\n' % (field,ID) )
print('CANVIS will do this by reading %s\n' %translist_path)
'''Look for the CCD number, RA and DEC of the Mary ID entry that matches inputs.'''
with open(translist_path) as f:
for line in f:
line = line.split()
#print(line[0])
if int(line[0]) == ID:
ra = str(line[1])
dec = str(line[2])
#field = str(line[3])
ccd_num = str(line[6])
if debugmode:
print(ra,dec,ccd_num)
'''Given the CCD number, RA, DEC, go through all the data on this field with this CCD
and extract postage stamps around the given RA and DEC.'''
print("CANVIS will extract postage stamps around RA %s DEC %s for field %s on CCD %s for all seed IDs and dates." %(ra, dec,field,ccd_num))
path ='/fred/oz100/pipes/DWF_PIPE/MARY_WORK/'+field+'_*_*_*/ccd' + ccd_num+'/images_resampled/sci_' + ccd_num+'.resamp.fits'
fitsfileslist = glob.glob(path)
mydic = SortedDict()
vmins = []
vmaxs = []
for i in fitsfileslist:
with fits.open(i) as hdu:
size = 200
w = WCS(hdu[0].header)
head = hdu[0].header
date = dt.datetime.strptime(head['DATE'], '%Y-%m-%dT%H:%M:%S')
xlim=head['NAXIS1']
ylim=head['NAXIS2']
pixcrd_im = np.array([[xlim, ylim]], np.float_)
world_im = w.wcs_pix2world(pixcrd_im, 1)
pixx_im, pixy_im = world_im[0][0], world_im[0][1]
corners=w.calc_footprint()
corner_1 = corners[0]
corner_2 = corners[1]
corner_3 = corners[2]
corner_4 = corners[3]
differnce = corner_1 - corner_2
pixcrd = np.array([[ra, dec]], np.float_)
worldpix = w.wcs_world2pix(pixcrd, 1)
pixx, pixy = worldpix[0][0], worldpix[0][1]
if float( corner_4[0]) <= float(ra) <=float(corner_1[0]) and float(corner_2[1]) >= float(dec) >= float(corner_1[1]):
path = i
mydic[date] =[path, pixx, pixy]
if debugmode:
print(mydic)
for i, (key, (path, pixx, pixy)) in enumerate(mydic.items()):
path_cand = '/fred/oz100/CANVIS/cand_images/'+ run + '/cand_'+format(ID, '05')+'_'+ field +'_'+ run +'/'
path_cutout = '/fred/oz100/CANVIS/cand_images/'+ run +'/cand_'+format(ID, '05')+'_'+ field +'_'+ run +'/cand_'+format(ID, '05')+'_'+run+'_cutout_'+format(i, '03')
if not os.path.exists(path_cand):
os.makedirs(path_cand, 0o755)
else:
pass
size = 200
with fits.open(path) as hdu:
nom_data = (hdu[0].data - np.min(hdu[0].data))/(np.max(hdu[0].data)-np.min(hdu[0].data))
cutout = Cutout2D(hdu[0].data, (pixx, pixy), size, wcs= w)
hdu[0].data = cutout.data
hdu[0].header['CRPIX1'] = cutout.wcs.wcs.crpix[0]
hdu[0].header['CRPIX2'] = cutout.wcs.wcs.crpix[1]
interval = astrovis.ZScaleInterval()
vmin,vmax=interval.get_limits(hdu[0].data)
vmins.append(vmin)
vmaxs.append(vmax)
hdu.writeto(path_cutout+'_CUTOUT.fits', overwrite = True)
#plt.axis('off')
#plt.imshow(hdu[0].data, cmap='gray', vmin=vmin, vmax=vmax)
#plt.colorbar()
#plt.savefig(path_cutout+'.png', overwite=True)
#plt.close()
files = []
vmins = []
vmaxs = []
path_cand = '/fred/oz100/CANVIS/cand_images/'+ run +'/cand_'+format(ID, '05')+'_'+ field +'_'+ run +'/'
average_vmin = np.average(vmins)
average_vmax = np.average(vmaxs)
# path_cutout = '/fred/oz100/CANVIS/cand_images/'+ run +'/cand_'+format(ID, '05')+'_'+ field +'_'+ run +'/cand_'+format(ID, '05')+'_'+run+'_cutout_'+format(i, '03')
length_num = 1
for cutouts in os.listdir(path_cand):
length = []
# length_num = 1
if cutouts.endswith('.fits'):
a = 1
length_num += 1
print(length_num)
'''
print('length: '+ str(length))
for i in range(length):
print(i)
path_cutout = str('/fred/oz100/CANVIS/cand_images/'+ run +'/cand_'+format(ID, '05')+'_'+ field +'_'+ run +'/cand_'+format(ID, '05')+'_'+run+'_cutout_'+format(i, '03'))
print('path_images: ' + str(path_cutout))
with fits.open(path) as hdu:
plt.axis('off')
plt.imshow(hdu[0].data, cmap='gray', vmin=vmin, vmax=vmax)
plt.colorbar()
plt.savefig(path_cutout+'.png', overwite=True, vmin=average_vmin, vmax=average_vmax)
plt.close()
if cutouts.endswith('.png'):
files.append(path_cand + cutouts)
writer = imageio.get_writer(str(path_cutout)+ 'cand_' +str(ID)+ '_VIDEO.gif', fps =5)
for i in files:
writer.append_data(imageio.imread(i))
writer.close()
print('\nDone! Look for your outputs here: /fred/oz100/CANVIS/cand_images/%s' % path_cand)
print('\n###########################')
print('# CANVIS HAS Finished #')
print('# Enjoy and discover! #')
print('###########################\n')
return path_cutout
'''
if __name__ == "__main__":
# Read in arguments
arguments = docopt.docopt(__doc__)
# Mandatory arguments
field = arguments['<field>']
ID = arguments['<ID>']
ID = int(ID)
run = arguments['<DWF_run>']
# Optional arguments
verbose = arguments['--verbose']
debugmode = arguments['--debug']
if debugmode:
print(arguments)
run_canvis(field,ID,run,verbose=verbose,debugmode=debugmode)
|
[
"webb.sara.a@gmail.com"
] |
webb.sara.a@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.