blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c9d9e59cfb1c0b5c982676b2b0e139cae6120010 | 618bb00f2e647b58ed69295f1d6fd869eab35525 | /preprocess_imgs.py | 0954091fd29b4410558dcec114946d382f3f3ca9 | [] | no_license | hbata/tourstic | a7d807006b0551ce76a64daaf9079eecda254436 | b0bba3c5cbca5debd1c4ca6b06b46151508decd7 | refs/heads/master | 2020-04-09T23:56:18.137971 | 2018-12-06T13:39:01 | 2018-12-06T13:39:01 | 160,670,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,896 | py | import numpy as np
import cv2
import PIL
from PIL import Image
import os
from multiprocessing.pool import Pool
def rename_imgs():
data_dir = 'dataset'
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
folders = [f for f in os.listdir(data_dir) if os.path.isdir(data_dir) and f.endswith('h') or f.endswith('r')]
print(folders)
images = {}
for folder in folders:
image_files = [ff for ff in os.listdir(data_dir + '/' + folder) if
os.path.isfile(os.path.join(data_dir + '/' + folder, ff))]
for idx, filename in enumerate(image_files):
f_0 = os.path.splitext(filename)[0]
f_1 = os.path.splitext(filename)[1]
new_f = str(idx) + f_1
os.rename(os.path.join(data_dir + '/' + folder, filename), os.path.join(data_dir + '/' + folder, new_f))
def image_resize(basewidth, input_dir, outdir):
if not os.path.exists(outdir):
os.makedirs(outdir)
images = [ff for ff in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, ff))]
for img_file in images:
img = Image.open(input_dir + '/' + img_file)
w_perc = (basewidth / float(img.size[0]))
hsize = int((float(img.size[1]) * float(w_perc)))
img = img.resize((basewidth, hsize), PIL.Image.ANTIALIAS)
img.save(outdir + '/' + img_file)
def image_resize2(img_file, out_dir, basewidth):
for im in img_file:
image = Image.open(im)
w_perc = (basewidth / float(image.size[0]))
hsize = int((float(image.size[1]) * float(w_perc)))
# img = image.resize((basewidth, hsize), PIL.Image.ANTIALIAS)
img = image.resize((basewidth, basewidth))
name = im.split('/')[-1]
print(name)
img.save(out_dir + '/' + name)
def yielder(input_dir, chunk_size=50):
images = [input_dir + '/' + ff for ff in os.listdir(input_dir) if
os.path.isfile(os.path.join(input_dir, ff)) and not ff.endswith('.lnk')]
chunk_end = 0
f_end = len(images)
while True:
chunk_start = chunk_end
imgs = images[chunk_start:chunk_start + chunk_size]
chunk_end += chunk_size
yield imgs
if chunk_end > f_end:
break
def parallel_resize(input_dir, out_dir, basewidth):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with Pool(processes=8) as p:
jobs = []
for img in yielder(input_dir):
jobs.append(p.apply_async(image_resize2, (img, out_dir, basewidth)))
for job in jobs:
job.get()
if __name__ == '__main__':
# rename_imgs()
out_dir1 = 'dataset/petra_resized'
input_dir1 = 'dataset/khazneh'
out_dir2 = 'dataset/theater_resized'
input_dir2 = 'dataset/theater'
basewidth = 200
parallel_resize(input_dir1, out_dir1, basewidth)
parallel_resize(input_dir2, out_dir2, basewidth)
| [
"hesham.bataineh@aiesec.net"
] | hesham.bataineh@aiesec.net |
e9ba71831df58fe8207554f2e696a50742e9640a | 20c81ad33259a381080d0a03c72894fea79fa374 | /ClassLabs/Lab8/Python_Lab8_Home.py | bb733d069074b6f6f45bbfe36ea1bd0d936ee049 | [] | no_license | Verbs2004/Python-Programming-In-Context | c9985c59ad6ea2919ad9f5c0f5d343ee61342347 | 5aa5e571c62e070004190b09b85bd9d2628ec3bf | refs/heads/master | 2021-01-23T14:39:57.094228 | 2013-07-15T14:59:41 | 2013-07-15T14:59:41 | 102,695,111 | 1 | 0 | null | 2017-09-07T05:28:45 | 2017-09-07T05:28:45 | null | UTF-8 | Python | false | false | 2,217 | py | # 1: Write a function that takes a single character digit and returns
# its integer value. For example, if the function name is intval, intval('9')
# will return 9(the integer 9, not string '9')
def intval(i):
i = int(i)
return i
# 2: Write the letterToIndex function using ord and chr. letterToIndex is
# defined in page 93 of the textbook.
def letterToIndex(ch):
alphabet = "abcdefghijklmnopqrstuvwxyz"
ch = ord(ch)
num = chr(ch)
return alphabet.find(num)
# 3: Write the indexToLetter function using ord & chr. indexToLetter
# is also defined in page 93.
def index2letter(index):
alphabet = "abcdefghijklmnopqrstuvwxyz"
letters = ''
num = 97
a = index + num
for x in alphabet:
letters = letters + "," + str(ord(x))
b = ''
if str(a) in letters:
b = chr(int(a))
return b
# 4: Write a function that takes an exam score from 0-100 and returns the
# corresponding letter grade. Use the same grading scale your professor
# does for this class.
def grade():
score = int(input('Enter your score: '))
if score >= 95:
lettergrade = "A"
elif score >= 94 or score >= 90:
lettergrade = "A-"
elif score >= 89 or score >= 88:
lettergrade = "B+"
elif score >= 87 or score >= 83:
lettergrade = "B"
elif score >= 82 or score >= 80:
lettergrade = "B-"
elif score >= 79 or score >= 78:
lettergrade = "C+"
elif score >= 77 or score >= 73:
lettergrade = "C"
elif score >= 72 or score >= 70:
lettergrade = "C-"
elif score >= 69 or score >= 68:
lettergrade = "D+"
elif score >= 67 or score >= 63:
lettergrade = "D"
elif score >= 62 or score >= 60:
lettergrade = "D-"
elif score <= 59:
lettergrade = "F"
return lettergrade
###################################################################
"""def index2letter(index):
... alphabet = "abcdefghijklmnopqrstuvwxyz"
... letters = ''
... num = 97
... a = index + num
... for x in alphabet:
... letters = letters + "," + str(ord(x))
... b = ''
... if str(a) in letters:
... b = chr(int(a))"""
| [
"thyrusg@thyrusgorges.com"
] | thyrusg@thyrusgorges.com |
b3a045a004cc3bffc590d6b0196b26d6b5724efd | 3a66cdcf904fa317ea6cd590effab1a6136e2ec0 | /Calculator/calculator.py | 326ad42a13f119b0335fe970519a13f1a81e9f16 | [] | no_license | Ronjaek/TDT4113-Programming-Project | 471c0127a3ff7519d029f21b6571b02cb506c02f | eea151adcb00db12892b8b27fc6f0163bc4a6a31 | refs/heads/master | 2022-12-14T13:21:50.114141 | 2020-09-03T17:11:51 | 2020-09-03T17:11:51 | 292,629,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,179 | py | """Hovedklassen i programmet"""
import numpy
import numbers
import re
from function import Function
from operator_ import Operator
from queue import Queue
from stack import Stack
class Calculator:
def __init__(self):
"""gir kalkulatoren tilgang til regneartene og funksjonene"""
# definerer funksjonene ved å linke dem til Python funksjoner
self.functions = {"EXP": Function(numpy.exp),
"LOG": Function(numpy.log),
"SIN": Function(numpy.sin),
"COS": Function(numpy.cos),
"SQRT": Function(numpy.sqrt)}
# definerer tilgjengelige operasjonenr, kobler de til Python funksjoner
self.operators = {"PLUSS": Operator(numpy.add, 0),
"GANGE": Operator(numpy.multiply, 1),
"DELE": Operator(numpy.divide, 1),
"MINUS": Operator(numpy.subtract, 0)}
# definerer output-queue
self.output_queue = Queue()
def rpn(self):
"""Regneoppgavene kommer inn som en køen output_queue"""
operator_stack = Stack()
for i in range(self.output_queue.size()):
# går gjennom hvert element i køen ved å popé dem av en etter en
element = self.output_queue.pop()
if isinstance(element, numbers.Number):
# hvis elementet er et tall skal det pushes på stacken
operator_stack.push(element)
elif isinstance(element, Function):
# hvis funksjon- popé av stacken og evaluere funksjonen med elementet
result = element.execute(operator_stack.pop())
# legger så resultatet til i stacken
operator_stack.push(result)
elif isinstance(element, Operator):
#popér to elementer av stacken
element2 = operator_stack.pop() # det som popes først er siste tall
element1 = operator_stack.pop() # det som popes sist er det første
result = element.execute(element1, element2)
# pusher det nye resultatet tilbake på stacken
operator_stack.push(result)
else:
# dersom verdien er verken tall, operator eller funksjon
print("Something is wrong with your RPN-queue")
break
if not (self.output_queue.size() == 0 and operator_stack.size() == 1):
print("Something is wrong - output_queue should have been empty and operator stack should have one item")
return operator_stack.pop() # returnerer siste elementet på stacken
def normal_calculator(self, elements):
"""får en streng som input, og oversetter til RPN"""
# bruker shinting-yard algoritmen
output_queue = Queue() # oppretter en output queue som metoden skal returnere
operator_stack = Stack() # en stack for mellom-lagring for at ordningen av elementer skal bli riktig
for element in elements:
if isinstance(element, numbers.Number):
output_queue.push(element) # legger til tallet i ouput_queue
print("numb", output_queue)
# sjekker om er av type Functiton, kaller ikke på klassen Function()
elif isinstance(element, Function):
operator_stack.push(element) # legger det til i operator_stack
print("func", operator_stack)
elif element == "(":
operator_stack.push(element) # pushes på operator_stack
print("(", operator_stack)
elif element == ")":
# iterer gjennom operator_stack og legger til elementene i output-køen
while operator_stack.peek() != "(" and operator_stack.size() != 0:
operator = operator_stack.pop()
output_queue.push(operator)
operator_stack.pop() # fjerner "(" og gjør ingenting med ")"
print(")", "oper: ", operator_stack, "out:", output_queue)
elif isinstance(element, Operator):
# må sortere de fire regneartene på riktig sted
while operator_stack.size() != 0:
# bruker peek for å sjekke om topp-elementet skal flyttes over til output_stack eller ikke
if isinstance(operator_stack.peek(), Operator):
if operator_stack.peek().strength < element.strength:
break # stopper dersom styrken er mindre
if operator_stack.peek() == "(" or operator_stack.peek() == ")":
# stopper while-løkken dersom man kommer til en parantes
break
temp = operator_stack.pop()
output_queue.push(temp)
# tar til slutt å pusher elementet på operator_stack
operator_stack.push(element)
print("operator", operator_stack)
# popér av hvert element på operator_stack og pusher den på output_queue
for i in range(operator_stack.size()):
element = operator_stack.pop()
output_queue.push(element)
print("the ouput", output_queue, output_queue.is_empty())
print("the operator", operator_stack, operator_stack.is_empty())
self.output_queue = output_queue # setter self.output_queue til den som er laget her
print(self.output_queue)
def text_parser(self, text):
"""Mottar en tekststreng, og skal produsere en rpn ut ifra den"""
# starter med å fjerne mellomrom og gjør den til uppercase
text = text.replace(" ", "").upper()
# metoden skal returnere en liste
return_list = []
# en nyttig shortcut i re.search -> kan lete etter flere sub-strenger samtidig
functions = "|".join(["^" + func for func in self.functions.keys()])
operators = "|".join(["^" + oper for oper in self.operators.keys()])
paranthesis = "^[()]"
nums = "^[-1234567890.]+"
while text != "":
check = re.search(nums, text) # sjekker om det er et nummer
print(check==None)
if check != None: # hvis check er None er det ingen match
# check.__getitiem__(0) er teksten som matcher
return_list.append(float(check.__getitem__(0))) # gjør om tallet til float
print(text)
text = text[len(check.__getitem__(0))::]
print("after", text)
continue
check = re.search(paranthesis, text) # sjekker om det er en parantes
if check != None:
return_list.append(check.__getitem__(0)) # lar derimot parantesen forbli en streng
print(text)
text = text[1::]
print("after", text)
continue
check = re.search(operators, text) # sjekker om det er en operator
print(check==None)
if check != None:
print(check.__getitem__(0))
return_list.append(self.operators[check.__getitem__(0)]) # eks: append(self.operators["GANGE"])
print(text)
text = text[len(check.__getitem__(0))::]
print("after", text)
continue
check = re.search(functions, text) # sjekker om det er en function
if check != None:
return_list.append(self.functions[check.__getitem__(0)]) # eks: append(self.functions["EXP"])
print(text)
text = text[len(check.__getitem__(0))::]
print(text)
continue
print("her skjer lite...")
return return_list
def test(self):
text = input("Skriv inn en uttrykk som skal regnes ut: ")
print(text)
return_list = self.text_parser(text)
print(return_list)
self.normal_calculator(return_list)
result = self.rpn()
return result
calc = Calculator()
calc.test()
"""
# sjekker om instansieringen virker
calc = Calculator()
print(calc.functions["EXP"].execute(
calc.operators["+"].execute(
1, calc.operators["*"].execute(2, 3))))
"""
"""
# tester RPN
calc = Calculator()
calc.output_queue.push(1)
calc.output_queue.push(2)
calc.output_queue.push(3)
calc.output_queue.push(calc.operators["*"])
calc.output_queue.push(calc.operators["+"])
calc.output_queue.push(calc.functions["EXP"])
print(calc.rpn())
"""
"""
# tester fra "vanlig" notasjon til RPN
calc = Calculator()
e = calc.functions["EXP"]
add = calc.operators["+"]
multiply = calc.operators["*"]
test_list = [e, "(", 1, add, 2, multiply, 3, ")"]
calc.normal_calculator(test_list)
"""
"""
# tester tekst-parseren
calc = Calculator()
# text_input = "((15 DELE (7 MINUS (1 PLUSS 1))) GANGE 3) MINUS (2 PLUSS (1 PLUSS 1))"
text_input = "EXP(1 PLUSS 2 GANGE 3)"
return_list = calc.text_parser(text_input)
print(return_list)
calc.normal_calculator(return_list)
result = calc.rpn()
print(result)
"""
| [
"ronjaek@Ronjas-MacBook-Pro.local"
] | ronjaek@Ronjas-MacBook-Pro.local |
b2ca28488c94f3f941e3240dbeca0d5ce18d1b19 | 5288d737d3d68e86b666be03dc62600b324a2910 | /main_app/filters.py | 23ddd9ad6fb44e369f4235f9f2c3cbc845160d64 | [] | no_license | aerastov/SkillFactory_D4 | 8b127baca51c69198e8e76d57d69f2567b5bd6c4 | 11bbc5cb936749483c27d9e4c7212270847eb7b1 | refs/heads/master | 2023-08-27T14:30:14.515050 | 2021-10-26T22:46:06 | 2021-10-26T22:46:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | from django_filters import FilterSet # импортируем filterset, чем-то напоминающий знакомые дженерики
from .models import Post
from django_filters import DateFilter
from django import forms
class PostFilter(FilterSet):
class Meta:
model = Post
fields = {
'title': ['icontains'],
'author': ['exact'],
'dateCreation': ['gt'],
}
| [
"a.erastov@gmail.com"
] | a.erastov@gmail.com |
3ec0749c8b2c8d3d54cf97e11f641dfbced8701d | 6a9b9d2e6f811423ab411c83952b9a2ff69b3471 | /code_snippets/process_data.py | 615cf89eb5b96d9598182b2c793997e5496130fb | [] | no_license | thebeancounter/flask_keras_train_and_prediction | 017848e70bc03cac66143d94d3b18a174b786534 | 55e4b488fe30f98824187dd75c72c915cf50aa34 | refs/heads/master | 2020-03-27T13:20:40.308695 | 2018-11-06T14:26:28 | 2018-11-06T14:26:28 | 146,603,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | from keras.datasets import mnist
import keras
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
| [
"shai2go@gmail.com"
] | shai2go@gmail.com |
8d551813a916a1c05fba0afca7bf63e06802bd88 | 72f43041a2645c10295b2e9c17d72274ddf765ef | /src/test/test_benchmarks.py | 7b70d82b518ec30458cbddb7017d3e0057be845f | [
"MIT",
"BSD-3-Clause"
] | permissive | zhaowill/stata-gtools | 85918134c741a32b534786e57208f7640dc181cd | 6409f703a265aa28ee32da9989a276188e83f46e | refs/heads/master | 2020-05-17T23:33:53.840993 | 2019-04-04T16:45:54 | 2019-04-04T16:45:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# import matplotlib.pyplot as plt
# import pandas as pd
# import numpy as np
# import json
| [
"mauricio.caceres.bravo@gmail.com"
] | mauricio.caceres.bravo@gmail.com |
8b9ad83e2464b381fd7db608eb689d5927b9c029 | cac10d84dc970078f5c4bc31f433cd5c3aa186cf | /bikeshare.py | 2c1278a32198c321ab3d0a79fdb481df82257eda | [] | no_license | sarahg/python-bikeshare-stats | 5a9ee03276c25330a978c040768e6a2e008bac8e | 77578021a58fa43560a55a5db1128d1c96513438 | refs/heads/master | 2022-10-30T13:46:09.359330 | 2020-06-15T17:39:29 | 2020-06-15T17:39:29 | 272,494,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,533 | py | import time
import calendar
import pandas as pd
CSV_PATH = './csvs/'
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
SEPARATOR = '-' * 40
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data.')
# Get user input for city.
while True:
try:
city = input("Choose a city (Chicago, New York City, Washington): ").lower()
if CITY_DATA[city]:
break
except KeyError:
print('Unknown city, please try again: ')
# Get user input for month.
while True:
try:
month = input('Enter a month (January-June), or enter "all" for all months: ')
if month.capitalize() in calendar.month_name[:7] or month == 'all':
break
else:
raise ValueError
except ValueError:
print('Please enter a valid month, or "all" for all months: ')
# Get user input for day of the week.
while True:
try:
day = input('Enter a day of the week (e.g, Tuesday), or "all" for all days: ')
if day.capitalize() in calendar.day_name or day == 'all':
break
else:
raise ValueError
except ValueError:
print('Please enter a valid weekday, or "all" for all days: ')
print(SEPARATOR)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - pandas DataFrame containing city data filtered by month and day
"""
# Load data file into a dataframe.
df = pd.read_csv(CSV_PATH + CITY_DATA[city])
# Convert the Start Time column to datetime.
df['Start Time'] = pd.to_datetime(df['Start Time'])
# Extract month, day of week, and hour from Start Time to create new columns.
df['month'] = df['Start Time'].dt.month
df['day_of_the_week'] = df['Start Time'].dt.day_name()
df['hour'] = df['Start Time'].dt.hour
# Filter by month if applicable.
if month != 'all':
# Get month number from month name.
months = list(calendar.month_name)
month = months.index(month.capitalize())
# Filter by month to create the new dataframe.
df = df[df['month'] == month]
# Filter by day of week if applicable.
if day != 'all':
# Filter by day of week to create the new dataframe.
df = df[df['day_of_the_week'] == day.title()]
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# Get most popular months, days, and hours.
popular_stats = ['month', 'day_of_the_week', 'hour']
for stat in popular_stats:
print(get_most_popular(df, stat))
print("\nThis took %s seconds." % (time.time() - start_time))
print(SEPARATOR)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# Add a new column to the dateframe for combined start/end stations.
df['Station Combination'] = df['Start Station'] + ' to ' + df['End Station']
# Get most popular stations.
popular_stats = ['Start Station', 'End Station', 'Station Combination']
for stat in popular_stats:
print(get_most_popular(df, stat))
print("\nThis took %s seconds." % (time.time() - start_time))
print(SEPARATOR)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
total_travel_time = df['Trip Duration'].sum()
print('Total travel time: {} seconds'.format(total_travel_time))
mean_travel_time = df['Trip Duration'].mean()
print('Mean travel time: {} seconds'.format(mean_travel_time))
print("\nThis took %s seconds." % (time.time() - start_time))
print(SEPARATOR)
def user_stats(df, city):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
user_types = df['User Type'].value_counts()
print('--- User types ---')
print(user_types.to_string())
print()
# Washington does not include Gender or Birth Year data.
if city in ['new york city', 'chicago']:
gender_counts = df['Gender'].value_counts()
print('--- Gender ---')
print(gender_counts.to_string())
print()
print('--- Birth year ---')
earliest_birth_year = int(df['Birth Year'].min())
latest_birth_year = int(df['Birth Year'].max())
common_birth_year = int(df['Birth Year'].mode()[0])
print('Earliest birth year: ', earliest_birth_year)
print('Latest birth year: ', latest_birth_year)
print('Most common birth year: ', common_birth_year)
print("\nThis took %s seconds." % (time.time() - start_time))
print(SEPARATOR)
def get_most_popular(df, stat):
"""Returns the most popular value for a given column."""
most_popular = df[stat].mode()[0]
stat_label = stat.replace("_", " ")
# For the month, return the month name instead of the integer.
if stat == 'month':
most_popular = calendar.month_name[most_popular]
return 'Most frequent ' + stat_label + ': ' + str(most_popular)
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df)
station_stats(df)
trip_duration_stats(df)
user_stats(df, city)
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
| [
"sgerman@pantheon.io"
] | sgerman@pantheon.io |
327cab1f61b7fc63a691fa1106537977cd19c625 | e273ac58c34f6a0fba8360aef75f52a7ef03d5bb | /ansiblemetrics/playbook/num_unique_names.py | 86e83089e546f11c6247dd51d9b902c4e8b68bfe | [
"Apache-2.0"
] | permissive | valeriapontillo/radon-ansible-metrics | e25b6c848fd40eb4b5802f540a6fd1ad20a77ce4 | 8a8e27d9b54fc1578d00526c8663184a2e686cb2 | refs/heads/master | 2023-09-06T06:21:43.417616 | 2021-11-04T14:28:04 | 2021-11-04T14:28:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,760 | py | import re
from collections import Counter
from ansiblemetrics.ansible_metric import AnsibleMetric
from ansiblemetrics.utils import key_value_list
class NumUniqueNames(AnsibleMetric):
""" This class measures the number of plays and tasks with unique a name.
"""
def count(self):
"""Return the number of plays and tasks with a unique name.
Example
-------
.. highlight:: python
.. code-block:: python
from ansiblemetrics.general.num_unique_names import NumUniqueNames
playbook = '''
---
- name: demo the logic # unique name
hosts: localhost
gather_facts: false
vars:
num1: 10
num3: 10
tasks:
- name: logic and comparison # duplicate
debug:
msg: "Can you read me?"
when: num1 >= num3 and num1 is even and num2 is not defined
- name: logic and comparison # duplicate
debug:
msg: "Can you read me again?"
when: num3 >= num1
'''
NumUniqueNames(playbook).count()
>> 1
Returns
-------
int
number of plays and tasks with a unique name
"""
names = []
for item in key_value_list(self.playbook): # [(key, value)]
if item[0] == 'name':
item = re.sub(r'\s+', '', str(item[1]))
names.append(item.strip())
frequencies = Counter(names).values() # counts the elements' frequency
unique = sum(1 for v in frequencies if v == 1)
return unique
| [
"stefano.dallapalma0@gmail.com"
] | stefano.dallapalma0@gmail.com |
748b5d931eb15cc7bc4633096be19fd44f691168 | 454c29a8fec880afdc6841b5468aeb89b5f3051c | /project1/urls.py | d0b226636a6d16e748de50d2c4897bdee8ac3437 | [] | no_license | gauravhans8/Lawyered | 24bf5710d5f5b4d89b71c386203c07c06d078431 | 70ee812b1f4887a0cf4b5fb68186ae474758b9a4 | refs/heads/master | 2021-01-11T03:57:02.993476 | 2016-10-18T06:05:43 | 2016-10-18T06:05:43 | 71,244,215 | 1 | 0 | null | 2016-10-18T12:08:26 | 2016-10-18T12:08:26 | null | UTF-8 | Python | false | false | 578 | py | from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
# Examples:
# url(r'^$', 'project1.views.home', name='home'),
url(r'^lawyered/', include('lawyered.urls')),
# url(r'^ath/', include('laath.urls')),
url(r'^login/$', views.login, {'template_name' : 'login.html'}),
url(r'^admin/', include(admin.site.urls)),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT) | [
"kratigyarastogi0705@gmail.com"
] | kratigyarastogi0705@gmail.com |
89531794da9e58caea0fdbc37413f91c45f8d070 | 9ad903f3873e82c91026066e80d9fe77b02d4bc4 | /graph_generator.py | 18a3346519cb40855a90b3229342235926efc25f | [] | no_license | anirudhSK/drmt | c6fa05c6cc1def571ad7ed1b095654211a85009e | b287d31d50f371284cfb3a1f810990ae26637ae1 | refs/heads/master | 2021-04-29T09:30:02.996494 | 2017-09-10T10:13:27 | 2017-09-10T10:13:27 | 77,659,675 | 12 | 10 | null | 2017-02-24T22:45:14 | 2016-12-30T04:12:46 | Python | UTF-8 | Python | false | false | 3,461 | py | import sys
import math
import matplotlib
import importlib
matplotlib.use('Agg')
matplotlib.rcParams.update({'font.size':18})
import matplotlib.pyplot as plt
if (len(sys.argv) != 5):
print("Usage: ", sys.argv[0], " <result folder> <drmt latencies> <prmt latencies> <folder for figs>")
exit(1)
else:
result_folder = sys.argv[1]
drmt_latencies = importlib.import_module(sys.argv[2], "*")
prmt_latencies = importlib.import_module(sys.argv[3], "*")
fig_folder = sys.argv[4]
PROCESSORS=range(1, 51)
progs = ["switch_egress"]
d_archs = ["drmt_ipc_1", "drmt_ipc_2"]
p_archs = ["prmt_coarse", "prmt_fine"]
labels = dict()
labels["drmt_ipc_1"] = "dRMT (IPC=1)"
labels["drmt_ipc_2"] = "dRMT (IPC=2)"
labels["prmt_coarse"]= "RMT"
labels["prmt_fine"] = "RMT fine"
labels["upper_bound"] = "Upper bound"
pipeline_stages = dict()
drmt_min_periods = dict()
drmt_thread_count= dict()
for prog in progs:
for arch in d_archs + p_archs:
fh = open(result_folder + "/" + arch + "_" + prog + ".txt", "r")
for line in fh.readlines():
if arch.startswith("prmt"):
if "stages" in line:
pipeline_stages[(prog, arch)] = float(line.split()[4])
elif arch.startswith("drmt"):
if "achieved throughput" in line:
drmt_min_periods[(prog, arch)] = int(line.split()[7])
if "thread count" in line:
drmt_thread_count[(prog, arch)] = int(line.split()[5])
if "Searching between limits" in line:
drmt_min_periods[(prog, "upper_bound")] = int(line.split()[3])
else:
print ("Unknown architecture")
assert(False)
for prog in progs:
plt.figure()
plt.title("Throughput vs. Processors")
plt.xlabel("Processors", fontsize = 26)
plt.ylabel("Packets per cycle", fontsize = 26)
plt.step(PROCESSORS, [min(1.0, 1.0 / math.ceil(pipeline_stages[(prog, "prmt_coarse")]/n)) for n in PROCESSORS], label = labels["prmt_coarse"], linewidth=4, linestyle = '-')
plt.step(PROCESSORS, [min(1.0, 1.0 / math.ceil(pipeline_stages[(prog, "prmt_fine")]/n)) for n in PROCESSORS], label = labels["prmt_fine"], linewidth=4, linestyle = ':')
plt.step(PROCESSORS, [min(1.0, (n * 1.0) / drmt_min_periods[(prog, "drmt_ipc_1")]) for n in PROCESSORS], label = labels["drmt_ipc_1"], linewidth=4, linestyle = '-.')
plt.step(PROCESSORS, [min(1.0, (n * 1.0) / drmt_min_periods[(prog, "drmt_ipc_2")]) for n in PROCESSORS], label = labels["drmt_ipc_2"], linewidth=4, linestyle = '--')
plt.legend(loc = "lower right")
plt.xlim(0, 15)
plt.tight_layout()
plt.savefig(fig_folder + "/" + prog + ".pdf")
print("drmt thread count")
print("%26s %16s %16s %16s %16s %16s %16s %16s %16s"%(\
"prog", "ipc_1_lat", "ipc_1_period", "ipc_1_thrs", "ipc_2_lat", "ipc_2_period", "ipc_2_thrs", "drmt:max(dM, dA)", "prmt:dM+dA"))
for prog in progs:
print("%26s %16d %16d %16d %16d %16d %16d %16d %16d" %(\
prog,\
int(drmt_thread_count[(prog, "drmt_ipc_1")]), \
int(drmt_min_periods[(prog, "drmt_ipc_1")]),\
int(math.ceil(drmt_thread_count[(prog, "drmt_ipc_1")] / drmt_min_periods[(prog, "drmt_ipc_1")])),\
int(drmt_thread_count[(prog, "drmt_ipc_2")]), \
int(drmt_min_periods[(prog, "drmt_ipc_2")]),\
int(math.ceil(drmt_thread_count[(prog, "drmt_ipc_2")] / drmt_min_periods[(prog, "drmt_ipc_2")])),\
max(drmt_latencies.dM, drmt_latencies.dA),
prmt_latencies.dM + prmt_latencies.dA))
| [
"sk.anirudh@gmail.com"
] | sk.anirudh@gmail.com |
4b07fb305ff28845f8393a96c6bf69fafb122469 | d5b526977adfce0ac21588598a61d0fabbd6ed48 | /tkinterBasic.py | 555ee324d4a6d9c89c44116dd5132240a671f8bd | [
"MIT"
] | permissive | anishmo99/Python-Functionality | b001cdd40828fc0b6879d34ad057dab3524de933 | 91e963609b0ce600d0c46073748611ecbab61dae | refs/heads/master | 2022-12-15T18:46:47.534669 | 2020-09-10T07:14:13 | 2020-09-10T07:14:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | import tkinter as tk
m=tk.Tk()
m.title('hi anish')
m.mainloop() | [
"ani10sh@gmail.com"
] | ani10sh@gmail.com |
c0bd9094de5b1e54fc83d99f8152fdce3ed29c0a | 4a9c10f838128a1c401b2df91a458596bdea7cca | /mysite/settings.py | d89cf373db46f9cdfea7b2856710a471d70cb679 | [] | no_license | daichimitsuzawa/djangolesson | fc587005454fa5e49f8dc6282b1d4122e7fe2463 | ea71bdb9cea13831423639b3620fd7915aba908c | refs/heads/master | 2022-09-09T02:39:58.447149 | 2020-06-01T08:46:09 | 2020-06-01T08:46:09 | 268,469,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,119 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.12.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 're6a&2z=9rwh(%vusj23g&f^dau#qqv3!8r6yrr(98gn&(v$hz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"mitsuzawadaichi@Daichi-no-MacBook-Air.local"
] | mitsuzawadaichi@Daichi-no-MacBook-Air.local |
cdee9bbc3982ee481c0bf44afc8e682f9146e6db | 216e1d474cf04cd0a9b3cabac22bef95716e08e7 | /Intro to Python/Homework/CSC110_2_Ch04/hw_04_ex_02.py | f9125cdcaff0fefd2985cf256718f7ec7d24022a | [] | no_license | RPMeyer/intro-to-python | 9c36a97dcad39d3060f626ab5a1d789b470ba32c | d2283c8902e0699036ef3b117d272691eaf9b490 | refs/heads/master | 2020-12-03T00:06:59.087698 | 2017-08-17T16:56:01 | 2017-08-17T16:56:01 | 95,989,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | # Write a program to draw this ( 5 concentric squares of decreasing size) Assume the innermost square is 20 units per side
# each successive square is 20 units bigger (size += to 20), per side, than the one inside it.
import turtle
wn = turtle.Screen()
wn.bgcolor("lightgreen")
wn.title("Alex draws a line of squares")
alex = turtle.Turtle()
alex.speed()
def draw_square(t,sz):
'''Draws an equilateral polygon of 4 sides (SQUARE) with length sz'''
for i in range(0,4,1):
t.forward(sz)
t.left(360/4)
#USES HELPER FUNCTION draw_square(t,sz)
def draw_concentric_squares(t, sz, n):
'''Draws concentric polygons of 4 sides (SQUARE) with length sz, n times'''
for i in range(0,n,1):
t.penup()
t.setpos(0.00-.5*sz,0.00-.5*sz)
t.pendown()
draw_square(t,sz)
sz +=20
draw_concentric_squares(alex, 20, 5)
| [
"haru.haru77haruko@gmail.com"
] | haru.haru77haruko@gmail.com |
fb4e8cc6019e4b13f7755cdb495d918f89adcf83 | d24632eb9f35ac144bee042facd9f8a596622b15 | /minimos_cuadrados_regresion_lineal.py | 7b1a2cd38d84804eb58dffdf1a9f742c001a047b | [] | no_license | fiscompunipamplona/taller-interpolacion-RobertoCuellar2019 | 02a7b24db597e3e9e2d2b3e130c07c8fcb0366be | 9129885b332f153f003682d5bfae79530565fb44 | refs/heads/master | 2020-05-23T08:34:59.891349 | 2019-05-17T03:41:15 | 2019-05-17T03:41:15 | 186,690,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,545 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 9 08:16:01 2019
@author: usuario
"""
import numpy as np
import matplotlib.pyplot as pl
from sklearn.linear_model import LinearRegression
from regresion_lineal import regresion
from time import time
###implementacion con libreria
x1 = 1e-9*np.array([5,15,25,35, 45,55,65,75,85,95, 105,115])
x = x1.reshape((-1, 1))
y = np.array([32, 17,21,7.5,8, 7, 5, 2, 4, 3,4, 1.5])
yy =np.log(y)
modelo = LinearRegression()
tiempo1_ini = time()
modelo.fit(x, yy)
modelo = LinearRegression().fit(x, yy)
tiempo1_fin = time()
total = tiempo1_fin-tiempo1_ini
r_sq = modelo.score(x, yy)
print('Chi cuadrado:', r_sq)
print('intercepto:', modelo.intercept_) ###intercepto eje y
print('Pendiente:', modelo.coef_) ### pendiente del modelo
print("Tiempo de ejecución = ",total)
f = np.linspace(np.amin(x),np.amax(x),100)
reg = modelo.coef_*f+modelo.intercept_
pl.scatter(x,yy)
pl.plot(f,reg)
pl.title("Regresión Lineal")
pl.xlabel("Tiempo")
pl.ylabel("N(t)")
reg = regresion()
tiempo2_ini = time()
regresion_lineal = reg.regresion_lineal(x1,np.log(y))
tiempo2_fin = time()
total2 = tiempo2_fin-tiempo2_ini
reg_prop = regresion_lineal[1]*f+regresion_lineal[0]
tau = -1/regresion_lineal[1]
pl.plot(f,reg_prop)
pl.xlim([np.amin(x1),np.amax(x1)])
pl.show()
print("Mi modelo =",tau)
print("Python =",1/modelo.coef_)
print("Tiempo ejecucion propio = ", total2)
####
xi = 0.
for i in range(len(x1)):
sigma = np.sqrt(yy[i])
xi += ((yy[i]-reg_prop[i])/(sigma))**2
print("chi =", xi)
| [
"lozano.roberto.2018@gmail.com"
] | lozano.roberto.2018@gmail.com |
27243d518c4d54514aa0ed20d368f35eaf505dbd | 033fb08be4d1ebd99ff9d4a790dc5c86d82f686c | /landing_page/models.py | 5f0a4b81205b47f558357e621dd5a083c75a2954 | [] | no_license | MasoudMusa/Django-Blog-App | 8a145c68520d50d8384fd48badbebcb097c663ed | 48b8bd31608a9d3a0e65a977b7961be9ad774fa6 | refs/heads/master | 2020-04-25T18:38:32.919036 | 2019-02-27T21:14:38 | 2019-02-27T21:14:38 | 172,991,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | from django.db import models
from django.conf import settings
from datetime import datetime
from django.contrib.auth.models import User
# Create your models here.
class BlogStuff(models.Model):
title = models.TextField(max_length=200)
description = models.TextField(max_length=100, default='This is the description...')
date_published = models.DateField(default=datetime.now, blank=True)
post_image = models.ImageField(upload_to='images', blank=True)
def __str__(self):
return self.title
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
bio = models.TextField(max_length=500)
profile_pic = models.ImageField(default='default.jpg', upload_to='profile_pics')
def __str__(self):
return self.user.username | [
"mwendamusa20@gmail.com"
] | mwendamusa20@gmail.com |
4e2542ef9400252a5e53cd863018ba77e7f9302f | 8ec71b40687f2ebabc2d961c9efa2f9a6fc17666 | /src/padronelectoral/views/views_orm.py | e4138bbf85b30898696c30a1cae785b8c0673a2f | [] | no_license | luisza/Training | 37af0c17f2b97bc0a5b812469a5c47d8cd299e2c | 32b901efd0cccc618a26f34f8c1702133f5fac68 | refs/heads/master | 2020-04-15T12:34:08.270263 | 2019-02-25T23:46:11 | 2019-02-25T23:46:11 | 164,680,331 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,339 | py | from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.http import JsonResponse
from crpadron import settings
from django.shortcuts import render, get_object_or_404, redirect
from django.conf import settings
from padronelectoral.forms import ElectorForm
from padronelectoral.models import Elector, Province, Canton, District
def get_electors(request):
"""
In this function, we can find the electors data, passing the full name or id_card
:param request:
:return: Return the render, either for Mongo or ORM template
"""
# by default, its important to send the actual database that we are using
context = {'database':settings.ACTIVE_DATABASE}
if request.method == 'POST':
elector_list = []
data = request.POST.get('input')
# try to convert the input to int. This is because de id_card is always a number
try:
data_id = int(data)
elector = get_object_or_404(Elector, pk=data_id)
elector_list.append(elector)
except:
# the list of electors similar to the input value. Ordered by codelec
elector_list = Elector.objects.filter(fullName__icontains=data).order_by('codelec')
context['message'] = "NOTE: This search is sorted by province, canton and full name (in that order)"
context['info'] = elector_list
return render(request, 'index.html', context)
def get_province_data(request, pk):
"""
This function is to get the province stats
:param request:
:param pk: In this context, pk is the code in Province
:return: Return the stats of the province filtering by this pk
"""
province = get_object_or_404(Province, pk=pk)
# By default, in the database. All the provinces have a -1 in the stats
if province.stats_total == -1:
print("--Calculating--")
elector_list_by_canton = Elector.objects.filter(codelec__canton__province=pk)
province.stats_female = elector_list_by_canton.filter(gender=2).count()
province.stats_male = elector_list_by_canton.filter(gender=1).count()
province.stats_total = province.stats_female + province.stats_male
province.save()
return render(request, 'stats.html', {'totalM': province.stats_male,
'totalF': province.stats_female,
'totalE': province.stats_total,
'location': province})
def get_canton_data(request, pk):
canton = get_object_or_404(Canton, pk=pk)
if canton.stats_total == -1:
print("--Calculating--")
elector_list_by_canton = Elector.objects.filter(codelec__canton=canton)
canton.stats_female = elector_list_by_canton.filter(gender=2).count()
canton.stats_male = elector_list_by_canton.filter(gender=1).count()
canton.stats_total = canton.stats_female + canton.stats_male
canton.save()
return render(request, 'stats.html', {'totalM': canton.stats_male,
'totalF': canton.stats_female,
'totalE': canton.stats_total,
'location': canton})
def get_district_data(request, pk):
"""
Get the district total males, females and total electors.
:param request:
:param pk: The district pk
:return: The render with the stats.
"""
district = get_object_or_404(District, pk=pk)
if district.stats_total == None:
print("--Calculating--")
elector_list = Elector.objects.filter(codelec=pk)
district.stats_female = elector_list.filter(gender=2).count()
district.stats_male = elector_list.filter(gender=1).count()
district.stats_total = district.stats_female + district.stats_male
district.save()
return render(request, 'stats.html', {'totalM': district.stats_male,
'totalF': district.stats_female,
'totalE': district.stats_total,
'location': district})
def get_district_electors(request, pk):
"""
used in district data template to pass pk as context to obtain the codelec
on the javascript
:param request:
:param pk: district id
:return: district name and codelec
"""
district = get_object_or_404(District, pk=pk)
return render(request, 'district-data.html', {'district': district, 'codelec': pk})
def django_datatable(request, district):
electors = Elector.objects.filter(codelec=district).order_by('fullName')
p = Paginator(electors, 10)
actual = p.page(1)
datalist = [[x.idCard, x.fullName, x.gender] for x in actual.object_list]
data = {
"draw": 1,
"recordsTotal": electors.count(),
"recordsFiltered": len(actual.object_list),
"data": datalist
}
return JsonResponse(data)
@login_required
def createElector(request):
if request.method == 'POST':
form = ElectorForm(request.POST)
if form.is_valid():
form.save()
return redirect('index')
else:
form = ElectorForm()
return render(request, 'create_elector.html', {'form': form}) | [
"miguel1796@live.com"
] | miguel1796@live.com |
98f37383c32f1daedec53a0bb197ecf616effd71 | 96a37825fa81ba748edccc0a71b6c78f2a530503 | /wrangle_data/packages/__init__.py | 6851a907cfde581029ac137e0f8ba1be1bc95cf7 | [] | no_license | ErinMa10/gwshm-machine-learning | c0c3ed1c7c5327686b66a0581efb943aa6e10b86 | 02210564dc1a39e297d23413f313bb092c22e86c | refs/heads/master | 2022-04-22T08:20:20.375609 | 2019-02-05T19:36:31 | 2019-02-05T19:36:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py | #from .two_ray_model import RayModel | [
"dibgerge@gmail.com"
] | dibgerge@gmail.com |
a45a07dd66cbbfa57b6a3b8f8445747b4300de28 | 1d9e681b204e6ec2d7a710ef45b7dec082239491 | /venv/Lib/site-packages/od_python/models/inline_response_200_33.py | 2f87d5fa2b2a17141b43a2b9c133a4e168221558 | [] | no_license | 1chimaruGin/DotaAnalysis | 0e0b85805cc83e4cc491d46f7eadc014e8d6b1f1 | 6a74cde2ee400fc0dc96305203d60c5e56d7ecff | refs/heads/master | 2020-07-21T20:48:07.589295 | 2019-09-07T12:20:15 | 2019-09-07T12:20:15 | 206,972,180 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,902 | py | # coding: utf-8
"""
OpenDota API
# Introduction The OpenDota API provides Dota 2 related data including advanced match data extracted from match replays. Please keep request rate to approximately 1/s. **Begining 4/22/2018, the OpenDota API will be limited to 50,000 free calls per month.** We'll be offering a Premium Tier with unlimited API calls and higher rate limits. Check out the [API page](https://www.opendota.com/api-keys) to learn more.
OpenAPI spec version: 17.6.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class InlineResponse20033(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'table_name': 'str',
'column_name': 'str',
'data_type': 'str'
}
attribute_map = {
'table_name': 'table_name',
'column_name': 'column_name',
'data_type': 'data_type'
}
def __init__(self, table_name=None, column_name=None, data_type=None):
"""
InlineResponse20033 - a model defined in Swagger
"""
self._table_name = None
self._column_name = None
self._data_type = None
if table_name is not None:
self.table_name = table_name
if column_name is not None:
self.column_name = column_name
if data_type is not None:
self.data_type = data_type
@property
def table_name(self):
"""
Gets the table_name of this InlineResponse20033.
table_name
:return: The table_name of this InlineResponse20033.
:rtype: str
"""
return self._table_name
@table_name.setter
def table_name(self, table_name):
"""
Sets the table_name of this InlineResponse20033.
table_name
:param table_name: The table_name of this InlineResponse20033.
:type: str
"""
self._table_name = table_name
@property
def column_name(self):
"""
Gets the column_name of this InlineResponse20033.
column_name
:return: The column_name of this InlineResponse20033.
:rtype: str
"""
return self._column_name
@column_name.setter
def column_name(self, column_name):
"""
Sets the column_name of this InlineResponse20033.
column_name
:param column_name: The column_name of this InlineResponse20033.
:type: str
"""
self._column_name = column_name
@property
def data_type(self):
"""
Gets the data_type of this InlineResponse20033.
data_type
:return: The data_type of this InlineResponse20033.
:rtype: str
"""
return self._data_type
@data_type.setter
def data_type(self, data_type):
"""
Sets the data_type of this InlineResponse20033.
data_type
:param data_type: The data_type of this InlineResponse20033.
:type: str
"""
self._data_type = data_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, InlineResponse20033):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"kyitharhein18@gmail.com"
] | kyitharhein18@gmail.com |
59b4a7e0dd46c5e83fc418f6782d8c48fd7a89e1 | be27afb51cbdcc321ee7e80d147d62c0c321ebba | /rocon_client_sdk_py/virtual_core/hooks/bootstrap.py | f396c58086d4ef720832affed70f281b92985e6a | [
"MIT"
] | permissive | boklae/rocon_client_sdk_py | 18cf6dadb5782d34e74e013d500c7ad3a5d36fa8 | 47ffd3a466fbbcb43f77338eddd9a9fa32b3a763 | refs/heads/master | 2022-11-29T19:02:20.386179 | 2020-03-04T06:31:52 | 2020-03-04T06:31:52 | 244,824,897 | 0 | 0 | MIT | 2022-11-22T05:22:10 | 2020-03-04T06:29:21 | Python | UTF-8 | Python | false | false | 3,522 | py | import pydash
class HookBootstrap():
def __init__(self, context):
self._iniitializers = {'battery':self._init_battery, 'location': self._init_location}
self._validators = {'battery': self._validate_battery, 'location': self._validate_location}
self._api_config = context.api_configuration
async def load_worker_context(self, uuid, worker_record):
await self.download_maps()
if(not worker_record):
worker_record = {}
print('Cannot found worker ({}) context file. Generate default worker context'.format(uuid))
worker_record['uuid'] = uuid
worker_record['name'] = 'VirtualWorker({})'.format(uuid)
worker_record['type_specific'] = {}
for item in self._iniitializers:
worker_record = await self._iniitializers[item](worker_record)
return worker_record
else:
return await self._patch(worker_record)
return None
async def _init_battery(self, worker_record):
type_specific = worker_record['type_specific']
pydash.set_(type_specific, 'battery',
{
'battery_level': 75,
'charging_status': 0
})
return worker_record
async def _init_location(self, worker_record):
stations = await self._api_config.get_stations()
for s in stations:
map = await self._api_config.get_maps(s['map'])
if map:
type_specific = worker_record['type_specific']
if 'location' not in type_specific:
type_specific['location'] = {}
pydash.set_(type_specific['location'], 'map', s['map'])
pydash.set_(type_specific['location'], 'pose2d', s['pose'])
return worker_record
assert(False)
async def _validate_battery(self, worker_record):
if pydash.get(worker_record['type_specific'], 'battery') is None:
return {'result': False, 'message': 'battery information is not exist'}
return {'result': True}
async def _patch(self, worker_record):
for item in self._validators:
check = await self._validators[item](worker_record)
if check['result'] is False:
worker_record = await self._iniitializers[item](worker_record)
print('validation failed while path() {}:{}'.format(check['message'], {'updated_worker': worker_record}))
return worker_record
async def _validate_location(self, worker_record):
pose = pydash.get(worker_record['type_specific']['location'], 'pose2d')
map = pydash.get(worker_record['type_specific']['location'], 'map')
if pose is None:
return {'result': False, 'message': 'pose information is not loaded correctly'}
if map is None:
return {'result': False, 'message': 'map information is not loaded correctly'}
return {'result': True}
async def download_maps(self):
try:
map_list = await self._api_config.get_maps()
def cb(m):
return pydash.get(m, 'site')
map_list = pydash.filter_(map_list, cb)
if len(map_list) is 0:
print('there are no maps on site configuration')
return False
except Exception as err:
print('failed to download maps')
return False
return True | [
"blcho@yujinrobot.com"
] | blcho@yujinrobot.com |
de18f9e297b8b2c8cb0d4eb962556836670c4046 | a22a76d50a376ab9184df61d73e872cc80ff1a6f | /REGRIDDING/regrid_PRIMAVERA_on_CORDEX.py | 42db1c9ef1e1f7228246440e20a3d85f234ec280 | [
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] | permissive | PRIMAVERA-H2020/PrecipDistribution | e3eb7c4a224aab1ed4606b553edce906ad53014e | 92c2faf428d1e8809c34473831ac9fbf0e5a0e0a | refs/heads/master | 2023-01-05T15:57:51.768864 | 2020-11-04T11:02:07 | 2020-11-04T11:02:07 | 227,361,674 | 0 | 1 | Apache-2.0 | 2020-02-24T21:35:11 | 2019-12-11T12:32:40 | Python | UTF-8 | Python | false | false | 5,431 | py | import european_masked_subregion as ems
import iris
import subprocess
import os
from cf_units import Unit
def main():
outdir = '/gws/nopw/j04/primavera3/cache/sberthou/'
country = 'prudence'
frequency = 'd'
n512 = False
other = 'PRIMAVERA'
time_con = iris.Constraint(time=lambda cell: 1950 <= cell.point.year < 2006)
# CORDEX grid, obtained by executing "cdo griddes CORDEX_file.nc > EUROCORDEX_grid.txt"
region_name = 'EUROCORDEX'
new_grid = '{}_grid.txt'.format(region_name)
runlist, _ = ems.get_runlist_region(frequency, n512, country, other=other)
for model in runlist:
#### 1st step: load all the data into one single cube and save it, so that CDO regrid is fast (calculates the weights only once, then applies it to the whole time series. ####
#### at the very end, once you're happy that everything has been regridded, don't forget to delete the large files, as they are enormous! ####
large_cube = '{}/pr_{}'.format(outdir, model)
if not os.path.exists('{}_with_grid.nc'.format(large_cube)):
if not os.path.exists('{}.nc'.format(large_cube)):
cubelist = iris.load(runlist[model], time_con, callback=callback_overwrite)
iris.util.unify_time_units(cubelist)
cube = cubelist.concatenate_cube()
print('cubes loaded')
iris.save(cube, large_cube+'.nc')
print('large cube saved')
elif 'EC-Earth' in model:
cube = iris.load_cube('{}.nc'.format(large_cube))
redefine_spatial_coords(cube)
iris.save(cube, '{}_tmp.nc'.format(large_cube))
cmd = 'mv {}_tmp.nc {}.nc'.format(large_cube, large_cube)
shellcmd(cmd, 'mv ECarth failed')
#### get the grid from the large netCDF file ####
cmd = 'cdo griddes {}.nc > init_grid.txt'.format(large_cube)
shellcmd(cmd, 'cdo griddes didn''t complete')
#### set the grid in the file (not sure why you have to do this, I think you need it mostly for the grids which have 1D lat/lon, because cdo requires 2D lat,lons, calculated with cdo griddes ####
cmd = 'cdo -setgrid,init_grid.txt {}.nc {}_with_grid.nc'.format(large_cube, large_cube)
shellcmd(cmd, 'cdo setgrid didn''t complete')
#### remapping itself ####
if not os.path.exists('{}_regridded_on_{}.nc'.format(large_cube, region_name)):
if not model == 'EC-Earth3P-HR':
cmd = 'cdo remapcon,{} {}_with_grid.nc {}_regridded_on_{}.nc'.format(new_grid, large_cube, large_cube, region_name)
shellcmd(cmd, 'cdo remapcon didn''t complete')
else:
cmd = 'cdo remapcon,{} {}.nc {}_regridded_on_{}.nc'.format(new_grid, large_cube, large_cube, region_name)
shellcmd(cmd, 'cdo remapcon didn''t complete')
def shellcmd(cmd, msg):
try:
retcode = subprocess.call(cmd, shell=True)
if retcode < 0:
print('syst.cmd terminated by signal', retcode)
elif retcode:
print('syst.cmd returned in ', msg, '', retcode)
except OSError as ex:
print("Execution failed in " + msg + ": ", ex)
def callback_overwrite(cube, field, filename):
coord2rm = ['forecast_reference_time', 'forecast_period', 'season_number',
'3hours', 'hours']
for co2rm in coord2rm:
if co2rm in [coord.name() for coord in cube.coords()]:
cube.remove_coord(co2rm)
attributes_to_overwrite = ['date_created', 'log', 'converter', 'um_streamid',
'creation_date', 'history', 'iris_version', 'prod_date',
'CDI', 'CDO', 'ArchiveMetadata.0', 'CoreMetadata.0', 'creation_date', 'tracking_id' ]
for att in attributes_to_overwrite:
if cube.attributes.has_key(att):
cube.attributes[att] = 'overwritten'
attributes_to_del = ['radar.flags', 'log', 'iris_version', '_NCProperties', 'NCO']
for att in attributes_to_del:
if cube.attributes.has_key(att):
del cube.attributes[att]
if cube.coords('T'): # for GPCP
cube.coord('T').standard_name = 'time'
def redefine_spatial_coords(cube):
"""
Redefines the latitude and longitude points for the EC-Earth3 model
into single, rather than multi-dimensional, coordinates.
"""
# procedure for handling EC-Earth latitude conversion
cube.coord('cell index along second dimension').points = cube.coord(
'latitude').points[:,0]
cube.remove_coord('latitude') # remove AuxCoord 'latitude'
cube.coord('cell index along second dimension') \
.rename('latitude') # assign DimCoord 'latitude'
cube.coord('latitude').units = Unit('degrees')
cube.coord('latitude').long_name = 'latitude'
cube.coord('latitude').var_name = 'lat'
cube.coord('latitude').guess_bounds()
# procedure for handling EC-earth longitude conversion
cube.coord('cell index along first dimension').points = cube.coord(
'longitude').points[0,:]
cube.remove_coord('longitude') # remove AuxCoord 'longitude'
cube.coord('cell index along first dimension') \
.rename('longitude') # assign DimCoord 'longitude'
cube.coord('longitude').units = Unit('degrees')
cube.coord('longitude').long_name = 'longitude'
cube.coord('longitude').var_name = 'lon'
cube.coord('longitude').guess_bounds()
if __name__ == '__main__':
main()
| [
"segolene.berthou@metoffice.gov.uk"
] | segolene.berthou@metoffice.gov.uk |
4f594bdd576600a328c0961d7f08f576815b4e3e | 0b3279cc150b85f5daf48e74ed164d91d5b0f522 | /week4/linked_lists/odd_even_linked_list.py | bef041c87b7c695c26acea8361a17d270c8aad75 | [] | no_license | membriux/Wallbreakers-Training | 42a0e4bf64f178e48f5228f6559929b7b0b772b7 | 5ec134f964cc87ae6399027bcf8b573e9ea83503 | refs/heads/master | 2020-06-08T05:24:13.204769 | 2019-07-19T21:19:43 | 2019-07-19T21:19:43 | 193,166,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | '''Given a singly linked list, group all odd nodes together followed by the even nodes. Please note here we are talking about the node number and not the value in the nodes.
You should try to do it in place. The program should run in O(1) space complexity and O(nodes) time complexity.
Example 1:
Input: 1->2->3->4->5->NULL
Output: 1->3->5->2->4->NULL'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def oddEvenList(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
odd = head
even = head.next
dummy_even = even
while even and even.next:
odd.next = even.next
odd = odd.next
even.next = odd.next
even = even.next
odd.next = dummy_even
return head
| [
"sanguillermo98@gmail.com"
] | sanguillermo98@gmail.com |
7e8e0c92157894f81c595a9d31c7422fe1804de2 | b80130d763793ee3151b94530adb273ec06bdc04 | /test_synch/optimize/cplx_dream.py | cd9305a6a83f1cbb73ae4df07a71e1ed4a4fc89d | [
"MIT"
] | permissive | mgricci/am_cdbn_mirror | 18d14a354704a2f674758b52b9603a849b66b42d | 6e7539da0a550550b8510bcc84c7063fc90fc701 | refs/heads/master | 2020-04-05T04:36:56.769925 | 2019-03-05T18:04:03 | 2019-03-05T18:04:03 | 156,558,424 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,189 | py | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.ioff()
import numpy as np
import torch
from torchvision import datasets, transforms
from mnist import Net
import scipy.ndimage as nd
from torch.autograd import Variable
from test_utils import cplx_imshow
from tqdm import tqdm
import ipdb
import subprocess
import os
CKPT_PATH='/home/jk/matt/mnist_cnn.pt'
SAVE_PATH='/home/jk/matt/cplx_dreams'
clean_save_path = True
if clean_save_path is True:
clean_string = 'rm {}/*.png &'.format(SAVE_PATH)
subprocess.call(clean_string,shell=True)
img_side = 28
my_net = Net(img_side).double().cuda()
print('Loading model')
my_net.load_state_dict(torch.load(CKPT_PATH))
batch_size = 64
max_iter = 1000
save_every = 10
mu = 0.1307
sigma = 0.3801
origin = -1*mu/sigma
lr = 1e-2
dl = torch.utils.data.DataLoader(datasets.MNIST('../data',
train=False,
download=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((mu,), (sigma,))
])),
batch_size=batch_size, shuffle=True)
def Energy1(out,target=1):
out_norm = torch.sqrt(out[:10]**2 + out[10:]**2)
return -out_norm[target] + 1
def Energy2(out,target=1):
out_norm = torch.sqrt(out[:10]**2 + out[10:]**2)
target_vector = np.zeros(10)
target_vector[target] = 1
target_vector = torch.tensor(target_vector).cuda()
return torch.sqrt(torch.sum((out_norm - target_vector)**2))
def clip_norm(z,constraint=None):
if constraint is None:
constraint = np.ones_like(z)
else:
constraint = constraint*sigma + mu
cond = constraint < np.abs(z*sigma + mu)
z_norm = np.where(cond, constraint, np.abs(z*sigma + mu))
z_angle = np.angle(z*sigma + mu)
constrained_z = z_norm*np.exp(1j*z_angle)
return (constrained_z - mu)/sigma
def init(v,sector=2*np.pi):
v = v.astype(np.complex128)
v = v*sigma + mu
v*= .1*np.random.rand(v.shape[0], v.shape[1], v.shape[2], v.shape[3])
init_phase = sector*np.random.rand(v.shape[0],v.shape[1],v.shape[2],v.shape[3]) - sector / 2
v*= np.exp(1j*init_phase)
return (v-mu) / sigma
def run(z0,k, model, energy=Energy1, constraint=None):
z0_real = np.real(z0).reshape(1, 1, img_side, img_side)
z0_imag = np.imag(z0).reshape(1, 1, img_side, img_side)
z0_cplx = torch.tensor(np.concatenate((z0_real, z0_imag), axis=1)).cuda()
energies = []
for i in tqdm(range(max_iter)):
z0_variable = Variable(z0_cplx, requires_grad=True)
model.zero_grad()
out = model.forward_cplx(z0_variable).squeeze(0)
E = energy(out,target=k)
energies.append(E.cpu().data.numpy())
E.backward()
ratio = np.abs(z0_variable.grad.data.cpu().numpy()).mean()
lr_use = lr / ratio
z0_variable.data.sub_(z0_variable.grad.data * lr_use)
z0_cplx = z0_variable.data.cpu().numpy() # b, c, h, w
z0 = np.expand_dims(z0_cplx[:,0,:,:] + 1j*z0_cplx[:,1,:,:], axis=0)
z0 = clip_norm(z0,constraint=constraint)
# Shape for input
z0_real = np.real(z0)
z0_imag = np.imag(z0)
z0_cplx = torch.tensor(np.concatenate((z0_real, z0_imag), axis=1)).cuda()
if i == 0 or (i + 1) % save_every == 0:
fig, ax = plt.subplots()
cplx_imshow(ax,z0,remap=(mu,sigma))
plt.savefig(os.path.join(SAVE_PATH, 'dream%04d.png' % i))
plt.close()
return z0, np.array(energies)
def make_gif():
process_string = 'convert -delay 10 -loop 0 {}/*.png {}/animation.gif &'.format(SAVE_PATH,SAVE_PATH)
subprocess.call(process_string,shell=True)
if __name__=='__main__':
for (batch, target) in dl:
batch_array = batch.cpu().data.numpy()
k = target[0].cpu().data.numpy()
v_prime = np.expand_dims(batch[0,:,:,:], axis=0)
#v_prime = (np.random.rand(1,1,28,28) - mu) / sigma
z0 = init(v_prime,sector=2*np.pi)
print('Optimizing')
cp, energies = run(z0, k, my_net, constraint=v_prime,energy=Energy1)
plt.plot(energies)
plt.ylim([0,1])
plt.savefig(os.path.join(SAVE_PATH,'energy.png'))
plt.close()
make_gif()
ipdb.set_trace()
| [
"junkyung_kim@brown.edu"
] | junkyung_kim@brown.edu |
6b1570a94afa10a3418bbfa5637e016112f5af3d | 3f8cc11f2322a2939a992378e1d3e478684ce770 | /6_2_3_Natürliche_Kubische_Spline_Interpolation_mit_LGS_4_Stützpunkte.py | 4f5ead4b0aec03ae3e7895c8f3f46df7520f29bf | [] | no_license | Finrod-Amandil/hm-scripts | 6b37f0c0df2b6b34a9edd2b8f36ad3dd3731f3d8 | ddd98ba2194933dc4654030eb4fa6a72fdb3b604 | refs/heads/main | 2023-06-10T04:06:02.448179 | 2021-06-27T14:24:39 | 2021-06-27T14:24:39 | 372,249,707 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,578 | py | import numpy as np
import matplotlib.pyplot as plt
x = np.array([4, 6, 8, 10], dtype=np.float64) # Stützpunkte (Knoten) xi
y = np.array([6, 3, 9, 0], dtype=np.float64) # Stützpunkte (Knoten) yi
x_int = 9 # Zu interpolierender Wert
n = x.shape[0] - 1 # Anzahl Spline-Polynome
dim = 3
print('{} Spline-Polynome {}. Grades (je {} Koeffizienten) => {} * {} = {} Unbekannte => Es braucht {} Gleichungen.'.format(n, dim, dim+1, n, dim+1, (dim+1)*n, (dim+1)*n))
# Allgemeine Spline 3. Grades und deren Ableitungen:
# Si(x) = ai + bi(x - xi) + ci(x - xi)^2 + di(x - xi)^3
# Si'(x) = bi + ci * 2(x - xi) + di * 3(x - xi)^2
# Si''(x) = ci * 2 + di * 6(x - xi)
# Si'''(x) = di * 6
# Natürliche kubische Spline-Interpolation mit 4 Stützstellen
# -----------------------------------------------------------
A = np.array([
# a0 a1 a2 b0 b1 b2 c0 c1 c2 d0 d1 d2
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # S0(x0) = y0 <=> a0 + b0(x0 - x0) + c0(x0 - x0)^2 + d0(x0 - x0)^3 = y0 <=> a0 = y0 (Spline 0 muss durch (x0, y0) gehen)
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # S1(x1) = y1 <=> a1 + b1(x1 - x1) + c1(x1 - x1)^2 + d1(x1 - x1)^3 = y1 <=> a1 = y1 (Spline 1 muss durch (x1, y1) gehen)
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], # S2(x2) = y2 <=> a2 + b2(x2 - x2) + c2(x2 - x2)^2 + d2(x2 - x2)^3 = y2 <=> a2 = y2 (Spline 2 muss durch (x2, y2) gehen)
[1, 0, 0, x[1]-x[0], 0, 0, (x[1]-x[0])**2, 0, 0, (x[1]-x[0])**3, 0, 0], # S0(x1) = S1(x1) <=> S0(x1) = y1 <=> a0 + b0(x1 - x0) + c0(x1 - x0)^2 + d0(x1 - x0)^3 = y1 (Spline 0 und 1 müssen sich im Punkt (x1, y1) treffen <=> Spline 0 muss durch (x1, y1) gehen)
[0, 1, 0, 0, x[2]-x[1], 0, 0, (x[2]-x[1])**2, 0, 0, (x[2]-x[1])**3, 0], # S1(x2) = S2(x2) <=> S2(x2) = y2 <=> a1 + b1(x2 - x1) + c1(x2 - x1)^2 + d1(x2 - x1)^3 = y2 (Spline 1 und 2 müssen sich im Punkt (x2, y2) treffen <=> Spline 1 muss durch (x2, y2) gehen)
[0, 0, 1, 0, 0, x[3]-x[2], 0, 0, (x[3]-x[2])**2, 0, 0, (x[3]-x[2])**3], # S2(x3) = y3 <=> a2 + b2(x3 - x2) + c2(x3 - x2)^2 + d2(x3 - x2)^3 = y3 (Spline 2 muss durch (x3, y3) gehen (letzter Stützpunkt))
[0, 0, 0, 1, -1, 0, 2*(x[1]-x[0]), 0, 0, 3*(x[1]-x[0])**2, 0, 0], # S0'(x1) = S1'(x1) <=> S0'(x1) - S1'(x1) = 0 <=> b0 - b1 + c0 * 2(x1 - x0) - c1 * 2(x1 - x1) + d0 * 3(x1 - x0)^2 - d1 * 3(x1 - x1)^2 = 0 <=> b0 - b1 + c0 * 2(x1 - x0) + d0 * 3(x1 - x0)^2 = 0 (Keine Knicke zwischen S0 und S1)
[0, 0, 0, 0, 1, -1, 0, 2*(x[2]-x[1]), 0, 0, 3*(x[2]-x[1])**2, 0], # S1'(x2) = S2'(x2) <=> S1'(x2) - S2'(x2) = 0 <=> b1 - b2 + c1 * 2(x2 - x1) - c2 * 2(x2 - x2) + d1 * 3(x2 - x1)^2 - d2 * 3(x2 - x2)^2 = 0 <=> b1 - b2 + c1 * 2(x2 - x1) + d1 * 3(x2 - x1)^2 = 0 (Keine Knicke zwischen S1 und S2)
[0, 0, 0, 0, 0, 0, 2, -2, 0, 6*(x[1]-x[0]), 0, 0], # S0''(x1) = S1''(x1) <=> S0''(x1) - S1''(x1) = 0 <=> c0 * 2 - c1 * 2 + d0 * 6(x1 - x0) - d0 * 6(x1 - x1) = 0 <=> c0 * 2 - c1 * 2 + d0 * 6(x1 - x0) = 0 (Gleiche Krümmung zwischen S0 und S1)
[0, 0, 0, 0, 0, 0, 0, 2, -2, 0, 6*(x[2]-x[1]), 0], # S1''(x2) = S2''(x2) <=> S1''(x2) - S2''(x2) = 0 <=> c1 * 2 - c2 * 2 + d1 * 6(x2 - x1) - d1 * 6(x2 - x2) = 0 <=> c1 * 2 - c2 * 2 + d1 * 6(x2 - x1) = 0 (Gleiche Krümmung zwischen S1 und S2)
[0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0], # NATÜRLICHE SPLINE: S0''(x0) = 0 <=> c0 * 2 + d0 * 6(x0 - x0) = 0 <=> c0 * 2 = 0 (Krümmung in Knoten x0 soll 0 sein)
[0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 6*(x[3]-x[2])] # NATÜRLICHE SPLINE: S2''(x3) = 0 <=> c2 * 2 + d2 * 6(x3 - x2) = 0 (Krümmung in Knoten x3 soll 0 sein)
], dtype=np.float64)
b = np.array([
y[0], # S0(x0) = y0
y[1], # S1(x1) = y1
y[2], # S2(x2) = y2
y[1], # S0(x1) = y1
y[2], # S2(x2) = y2
y[3], # S2(x3) = y3
0, # S0'(x1) - S1'(x1) = 0
0, # S1'(x2) - S2'(x2) = 0
0, # S0''(x1) - S1''(x1) = 0
0, # S1''(x2) - S2''(x2) = 0
0, # S0''(x0) = 0
0, # S2''(x3) = 0
], dtype=np.float64)
print('\nLöse LGS Ax = b mit')
print('A = \n{}'.format(A))
print('b = {}'.format(b))
print('LGS wird gelöst...\n')
abcd = np.linalg.solve(A, b)
a = abcd[0:3]
b = abcd[3:6]
c = abcd[6:9]
d = abcd[9:]
print('x = {}'.format(abcd))
print('a = {}'.format(a))
print('b = {}'.format(b))
print('c = {}'.format(c))
print('d = {}'.format(d))
print('\nDiese Werte jetzt einsetzen in Si(x) = ai + bi(x - xi) + ci(x - xi)^2 + di(x - xi)^3:')
for i in range(n):
print('\tS{}(x) = {} + {} * (x - {}) + {} * (x - {})^2 + {} * (x - {})^3'.format(i, a[i], b[i], x[i], c[i], x[i], d[i], x[i]))
print('\nBestimmen, welches Spline-Polynom verwendet werden muss (Vergleich mit den Stützstellen)')
i = np.max(np.where(x <= x_int)) # Finde die Stützstelle, deren x-Wert am grössten, aber gerade noch kleiner ist als x_int
print('Für x_int = {} muss S{} verwendet werden.'.format(x_int, i))
y_int = a[i] + b[i] * (x_int - x[i]) + c[i] * (x_int - x[i]) ** 2 + d[i] * (x_int - x[i]) ** 3
print('S{}({}) = {}'.format(i, x_int, y_int))
# PLOTTING
xx = np.arange(x[0], x[-1], (x[-1] - x[0]) / 10000) # Plot-X-Werte
# Bestimme für jeden x-Wert, welches Spline-Polynom gilt
xxi = [np.max(np.where(x <= xxk)) for xxk in xx]
# Bestimme die interpolierten Werte für jedes x
yy = [a[xxi[k]] + b[xxi[k]] * (xx[k] - x[xxi[k]]) + c[xxi[k]] * (xx[k] - x[xxi[k]]) ** 2 + d[xxi[k]] * (xx[k] - x[xxi[k]]) ** 3 for k in range(xx.shape[0])]
plt.figure(1)
plt.grid()
plt.plot(xx, yy, zorder=0, label='spline interpolation')
plt.scatter(x, y, marker='x', color='r', zorder=1, label='measured')
plt.scatter(x_int, y_int, marker='X', color='fuchsia', label='interpolated')
plt.legend()
plt.show()
| [
"severin.zahler@gmail.com"
] | severin.zahler@gmail.com |
fca0f3066fbd42e823817547f2c01715501558b4 | 046d9405f06774ab8d52cd356b137007e6c9599a | /python/python/老师笔记/python/day01_pm/day01/exercise/salary.py | 9b74a9a7b0e9d1efad83c45d995a6132447b113d | [] | no_license | jluocc/jluo2018 | 3a49d9884bd2bc2e02e0d6ef9f066269c2e430a8 | 30e39f93ef65810d39423a4b66dd253cdeaddae5 | refs/heads/master | 2020-04-07T15:24:07.850373 | 2019-01-06T06:05:19 | 2019-01-06T06:05:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | # 2. 一个学生毕业薪资是10000元,
# 每年涨20%,问十年后它的薪资是多少?
# (要求打印出来)
print((1 + 0.2) ** 10 * 10000)
| [
"473145009@qq.com"
] | 473145009@qq.com |
0a38129fb32469a1590e8cefd7d32d74d316bd54 | c2447adecfdbb05a772f1d84e6e5f229de33d0c6 | /talleres/taller04/taller04.py | e6becc912735e6b385b632684b07b48b1b75a4bd | [] | no_license | Juanesfon5/ST0245-respuestas | e9aa433fedc1ef24891ae66cf3f1d06830b5f248 | 04bd676cd20393447427e8a976714be9b73cae8e | refs/heads/master | 2020-03-28T12:46:38.339203 | 2018-08-21T20:08:19 | 2018-08-21T20:08:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,252 | py | #!/usr/bin/python
import random
from matplotlib import pyplot as pl
import time
def array_generator(len):
"""List generator"""
array = [0] * len
for i in range(len):
array[i] = random.randrange(0,100)
return array
def array_sum(array, sum = 0):
"""Add the elements in the list"""
for element in array:
sum += element
return sum
def multiplication_tables(n):
for i in range(1,n + 1):
for j in range(1,n + 1):
print (str(i) + " * " + str(j) + " = " + str(i*j))
print ("--------------------")
def insertion_sort(list):
for index in range(len(list)):
for j in range(index,0,-1):
if list[j-1] > list[j]:
tmp = list[j]
list[j] = list[j-1]
list[j-1] = tmp
def arrayMax(arr):
return arrayMax_aux(arr, 0, 0)
def arrayMax_aux(arr, i, max):
if i == len(arr):
return max
else:
if arr[i] > max:
max = arr[i]
return arrayMax_aux(arr, i+1, max)
def groupSum_aux(list, start, target):
if start >= len(list): return target == 0
return groupSum_aux(list, start + 1, target - list[start]) \
or groupSum_aux(list, start + 1, target)
def groupSum(list, target):
return groupSum_aux(list, 0, target)
#----------------------------Fibonacci---------------------------------#
def fib_r(n): #Fibonacci recursivo
if n <= 1: return n
return fib_r(n-1) + fib_r(n-2)
def fib_i(n): #Fibonacci iterativo
a, b = 0, 1
for i in range(n):
a, b = b, a+b
return a
Xr,Yr,Zr = [],[],[]
Xi,Yi,Zi = [],[],[]
for i in range(15):
Xr.append(i)
t = time.time()
Zr.append(fib_r(i))
Yr.append(time.time()-t)
for i in range(100):
Xi.append(i)
t = time.time()
Zi.append(fib_i(i))
Yi.append(time.time()-t)
print(Zr) #this print all i's fibonacci i a list
print(Zi)
pl.xlabel('Numero de Fibonacci')
pl.ylabel('Tiempo de ejecucion')
pl.title('Recursive fibonacci vs interative fibonacci')
pl.plot(Xr,Yr,'r') # domain of x(n) vs time
pl.legend(( 'Recursive', ) )
pl.plot(Xi,Yi,'b')
pl.legend(( 'interative'))
pl.savefig("Fibor.png") # produce a .png file
pl.show()
| [
"noreply@github.com"
] | Juanesfon5.noreply@github.com |
1c517b59b04939c02b21ddefa13d2f904e19f7e0 | 9f549fbac72919299cb032bb33aa6595c8ac5789 | /hackerRank/python/set_d_r_p.py | cd6a678a7ec09b7d13de08d04a60eb833ab41922 | [] | no_license | jerilMJ/onlineCoding | 67a1fe24d79c1c681ab2cbc731444209ea02801b | 1630d9a0869e62f4725bc517e1cd834000b039da | refs/heads/master | 2020-07-19T01:05:51.447936 | 2020-01-02T06:24:20 | 2020-01-02T06:24:20 | 206,348,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | ''' Solution to 'Set .discard(), .remove() & .pop()' under Python in HackerRank '''
n = int(input())
s = set(map(int, input().split()))
numOfCommands = int(input())
commands = []
skip = 0
for _ in range(numOfCommands):
command = input().split()
commands.extend(command)
for i in range(len(commands)):
if skip == 1:
skip = 0
continue
if commands[i].lower() == "pop":
s.pop()
elif commands[i].lower() == "remove":
s.remove(int(commands[i+1]))
skip = 1
elif commands[i].lower() == "discard":
s.discard(int(commands[i+1]))
skip = 1
print(sum(s))
| [
"jeril.0ff@gmail.com"
] | jeril.0ff@gmail.com |
7a203d32c16d289fef8f26566ec33d36956c6123 | b11b16bf88d4d9be80986631ba161883cd9a28a4 | /examples/rc/packages/gnu.py | 1d096f2cec2adb7a006c51d7ab8534210bfb4da8 | [
"Apache-2.0"
] | permissive | simone-campagna/zapper | 8ec11f68fdf6904cab3031789cd7553aa71f7869 | fee2aaddcb13f789768a30761670c8c142d2b54d | refs/heads/master | 2020-04-26T01:42:32.180173 | 2013-12-07T14:45:57 | 2013-12-07T14:45:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | py | from zapper.package_file import *
gnu = Suite('gnu', NULL_VERSION)
gnu.add_conflicting_tag('compiler-suite')
for version in '4.1.2', '4.5.2', '4.7.0':
version_name = version.replace('.', '_')
gnu_version = Suite(version_name, NULL_VERSION, suite=gnu)
gnu_version.add_conflicting_tag('gnu-suite')
libfoo = PackageFamily('libfoo', 'library')
libfoo_0_5 = Package(libfoo, '0.5', suite=gnu_version)
libfoo_0_5.var_set("FOO_HOME", "/gnu-{0}/foo-0.5".format(version))
libfoo_0_5_3 = Package(libfoo, '0.5.3', suite=gnu_version)
libfoo_0_5_3.var_set("FOO_HOME", "/gnu-{0}/foo-0.5.3".format(version))
libbar = PackageFamily('libbar', 'library')
libbar_1_0_2 = Package(libbar, '1.0.2', suite=gnu_version)
libbar_1_0_2.var_set("BAR_HOME", "/gnu-{0}/bar-1.0.2".format(version))
baz = PackageFamily('baz', 'tool')
baz_1_1 = Package(baz, '1.1', suite=gnu_version)
baz_1_1.var_set("BAZ_HOME", "/gnu-{0}/baz-1.1".format(version))
baz_1_1.requires('libfoo', VERSION > '0.5')
baz_1_1.requires(libbar_1_0_2)
hello_world = PackageFamily("hello_world", 'application')
hello_world_0_0_1_beta = Package(hello_world, '0.0.1-beta', suite=gnu_version)
hello_world_0_0_1_beta.var_set("HELLO_WORLD_HOME", "/gnu-{0}/hello_world-0.0.1-beta".format(version))
| [
"simone.campagna@tiscali.it"
] | simone.campagna@tiscali.it |
80df44273e2f313dce7038b7329a31df34e2b601 | 7358fef64817a640f224f6a1b0ef22f7e4812d4b | /Materi/Materi 8 Fungsi/isGenap.py | 926f99fc36b94ff6225596af70dc71181e8fc136 | [] | no_license | bimarakajati/Dasar-Pemrograman | 8d4124701c61900c2cc41ec89be2b08c492c8541 | af5e7abf122b8b151625504ac6739ab98996fb7f | refs/heads/master | 2023-08-24T19:32:00.591820 | 2021-10-13T20:10:12 | 2021-10-13T20:10:12 | 302,336,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | def is_Genap(i):
"""
diberikan suatu bilangan i dengan tipe
integer untuk mengecek apakah bilangan
tersebut bilangan genap atau bukan
"""
print('keterangan didalam fungsi is_Genap')
return i%2 == 0
is_Genap(4) | [
"bimandugal@gmail.com"
] | bimandugal@gmail.com |
57b3602e7f6251032bfd21f91cf0b5da466d7f53 | a0b53f6638af5b67cc591994eea7eb3aec0e44f9 | /misago/threads/tests/test_floodprotection.py | 9b8d42c5085c7017af247a4b8703dfe8a2cd13ab | [] | no_license | FelixFreelancer/react-django-forum | bb4d4ed3bac1c77694bd0ef50a122e5bd2efc3a6 | 442db7e56714f2baad17aac525e725873bdf1650 | refs/heads/master | 2022-12-12T21:28:50.301365 | 2019-05-09T03:12:56 | 2019-05-09T03:12:56 | 185,715,690 | 1 | 0 | null | 2022-12-08T01:22:57 | 2019-05-09T02:57:02 | Python | UTF-8 | Python | false | false | 1,525 | py | from django.urls import reverse
from misago.acl.testutils import override_acl
from misago.categories.models import Category
from misago.threads import testutils
from misago.users.testutils import AuthenticatedUserTestCase
class PostMentionsTests(AuthenticatedUserTestCase):
def setUp(self):
super().setUp()
self.category = Category.objects.get(slug='first-category')
self.thread = testutils.post_thread(category=self.category)
self.override_acl()
self.post_link = reverse(
'misago:api:thread-post-list', kwargs={
'thread_pk': self.thread.pk,
}
)
def override_acl(self):
new_acl = self.user.acl_cache
new_acl['categories'][self.category.pk].update({
'can_see': 1,
'can_browse': 1,
'can_start_threads': 1,
'can_reply_threads': 1,
})
override_acl(self.user, new_acl)
def test_flood_has_no_showstoppers(self):
"""endpoint handles posting interruption"""
response = self.client.post(
self.post_link, data={
'post': "This is test response!",
}
)
self.assertEqual(response.status_code, 200)
response = self.client.post(
self.post_link, data={
'post': "This is test response!",
}
)
self.assertContains(
response, "You can't post message so quickly after previous one.", status_code=403
)
| [
"explorerpower@hotmail.com"
] | explorerpower@hotmail.com |
a6866da48739be0c4f8ee125b99b4f00db8ce727 | 90882482926884e9d1bf8db393596b9b392eb3db | /4-repetition/1-while-loop/4-len/bot.py | ca51c1f85e27e69f0df0b92bdf35c8c5544b2f77 | [] | no_license | 4mayet21/COM404 | cc2eee29433c0a5f59985ac4c02af82f0cc01c10 | 377ae5cca76af883c59d208f17178d5697ceeb4b | refs/heads/master | 2020-07-31T09:15:03.783979 | 2019-11-26T12:11:53 | 2019-11-26T12:11:53 | 210,556,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | #asking for input
phrase = input("please enter a phrase: ")
bopLen = (len(str(phrase)))
bops = ""
while bopLen > 0:
bops = bops + "bop "
bopLen = bopLen - 1
print(bops) | [
"4mayet21@solent.ac.uk"
] | 4mayet21@solent.ac.uk |
2702b2b19ddfb0b10a4f4ce3f46c6c567491e9d2 | dbb4d1de645b16fe900d05d93f1fc31545ba9c99 | /Object_Design/5_class_and_static_decorator.py | f14252fb5c3d823baa15cf45b95dee522e1fc067 | [] | no_license | 0x-Robert/Algo_python_Study | 731d5902aec0e9d73b60b8f96e4f931efdcb28ce | 1940b46e5466e060aa69295b167173a316e3247d | refs/heads/main | 2023-07-31T11:49:49.483596 | 2021-09-08T07:24:51 | 2021-09-08T07:24:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | class A(object):
_hello = True
def foo(self, x):
print("foo{0} {1} 실행".format(self,x))
@classmethod
def class_foo(cls, x):
print("class_foo({0}, {1}) 실행: {2}".format(cls,x, cls._hello))
@staticmethod
def static_foo(x):
print("static_foo({0}) 실행".format(x))
if __name__ == "__main__":
a = A()
a.foo(1)
a.class_foo(2)
A.class_foo(2)
a.static_foo(3)
A.static_foo(3)
| [
"smartdragon417@gmail.com"
] | smartdragon417@gmail.com |
30bfa11e0595824edd8f03e6b69ce3e066a2646d | 389b781aaa933f431ee4f4bcc5b8df256ad0c967 | /kevin.py | 06d95b7a94959fd35deca7ffab80ca24a91d5e97 | [] | no_license | FrankJunkar/FavoriteThings | fcf9c133c1a5147489a26b7aef5b58e4cbf7ca59 | 419fa425f472f04ff7cf3324a87c76fe16d94acf | refs/heads/main | 2023-03-13T10:23:10.824339 | 2021-03-02T00:03:10 | 2021-03-02T00:03:10 | 341,363,777 | 0 | 0 | null | 2021-02-22T23:09:49 | 2021-02-22T23:09:48 | null | UTF-8 | Python | false | false | 427 | py | import random
import binascii
favorite_things = "c3ba01c3b35e2bc3b70e70c38c25307719c29427051dc38dc294c3b1c3b50918c297c2b7c2adc39e17c2b272c38e20c2b3386463c2863ac3a3c2b27450c38bc291c2a724c396c382c390c292"
def printfavs():
decoded = binascii.unhexlify(favorite_things).decode()
random.seed("53c437_k3y")
decrypted = ""
for char in decoded:
decrypted += chr(ord(char) ^ random.randint(0, 255))
print(decrypted)
| [
"kevin@drymail.com"
] | kevin@drymail.com |
9b42a9e7e46436d0ff8326b0eda12b97ff858ab2 | f1ff4a61c6b850734c58ec114884cb0fa8f5d82a | /Francesco Raco/src/spotifyGroupingService.py | e86efd94089cba27c8991bad03acd1ae076f7e0e | [] | no_license | Fafixxx96/BigDataAnalitycs | 23227a9dab35345cfc6e81ad06c8781e71571979 | 8e21d3ce9c5dc1d9c4dbfc0023b90d0b5bf38994 | refs/heads/main | 2023-07-01T07:04:07.869518 | 2021-08-01T15:44:52 | 2021-08-01T15:44:52 | 323,847,283 | 0 | 0 | null | 2021-08-01T08:48:53 | 2020-12-23T08:35:17 | Python | UTF-8 | Python | false | false | 11,961 | py | import logging
import matplotlib
from numpy import double
logging.basicConfig(level= logging.ERROR)
import numpy
import math
import spotipy
from spotipy.oauth2 import SpotifyOAuth, SpotifyClientCredentials
import json
from pyspark.sql.session import SparkSession
from pyspark.sql.functions import Column
from pyspark.sql import functions
from pyspark.sql.types import StructType, StructField, DoubleType, StringType, IntegerType, ArrayType, DateType
from pyspark.ml.feature import VectorAssembler, MinMaxScaler
from pyspark.ml.clustering import KMeans, KMeansModel, KMeansSummary
from pyspark.ml.evaluation import ClusteringEvaluator
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from pyspark.ml.classification import NaiveBayes, DecisionTreeClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml.feature import Bucketizer
from sklearn.metrics import silhouette_score, silhouette_samples
from sklearn.cluster import DBSCAN
from sklearn.cluster import KMeans as km
import matplotlib.pyplot as plt
from matplotlib.ticker import FixedLocator, FixedFormatter
"""
The following is the function that will be utilized, once the clustering is done, to map each track with the proper genre.
"""
def map_features(track):
label = 0
distance = 100000000
track_np = numpy.array(list(track), dtype='float64')
for i in range(0, len(centroids)):
current_centroid = numpy.array(centroids[i])
new_distance = numpy.linalg.norm(track_np - current_centroid)
if new_distance < distance:
label = i
distance = new_distance
result = list(track)
result.append(label)
return result
"""
A function for saving the audio features of the user's favorite tracks into the same representation of the Kaggle dataset,
in order to use the same preprocessing.
"""
def map_saved_track(track):
popularity = track['popularity']
result = []
audio_features = client.audio_features(track['id'])[0]
header = list(features_columns)
header.remove('popularity')
header.remove('key')
result.append(track['name'])
result.append(track['id'])
for elem in header:
result.append(float(audio_features[elem]))
result.append(float(popularity))
result.append(float(audio_features['key']))
return result
"""
Creation of the SparkSession and loading of the starting datasets into dataframes
"""
spark = SparkSession.builder.\
master("local[*]").\
appName("genreClassifier").\
config("spark.some.config.option", "some-value").\
getOrCreate()
spark.sparkContext.setLogLevel('ERROR')
tracks_path = "C:\\Users\\franc\\PycharmProjects\\bdaProject\\data\\tracks.csv"
genres_path = "C:\\Users\\franc\\PycharmProjects\\bdaProject\\data\\genres.csv"
genres_schema = StructType([
StructField("mode", DoubleType(), True),
StructField("genres", StringType(), True),
StructField("acousticness", DoubleType(), True),
StructField("danceability", DoubleType(), True),
StructField("duration_ms", DoubleType(), True),
StructField("energy", DoubleType(), True),
StructField("instrumentalness", DoubleType(), True),
StructField("liveness", DoubleType(), True),
StructField("loudness", DoubleType(), True),
StructField("speechiness", DoubleType(), True),
StructField("tempo", DoubleType(), True),
StructField("valence", DoubleType(), True),
StructField("popularity", DoubleType(), True),
StructField("key", DoubleType(), True)
])
starting_genres_df = spark.\
read.\
format("csv").\
option("header", "true").\
schema(genres_schema).\
load(genres_path).\
drop('genres').cache()
tracks_schema = StructType([
StructField("id", StringType(), True),
StructField("name", StringType(), True),
StructField("popularity", DoubleType(), True),
StructField("duration_ms", DoubleType(), True),
StructField("explicit", DoubleType(), True),
StructField("artists", StringType(), True),
StructField("id_artists", StringType(), True),
StructField("release_date", StringType(), True),
StructField("danceability", DoubleType(), True),
StructField("energy", DoubleType(), True),
StructField("key", DoubleType(), True),
StructField("loudness", DoubleType(), True),
StructField("mode", DoubleType(), True),
StructField("speechiness", DoubleType(), True),
StructField("acousticness", DoubleType(), True),
StructField("instrumentalness", DoubleType(), True),
StructField("liveness", DoubleType(), True),
StructField("valence", DoubleType(), True),
StructField("tempo", DoubleType(), True),
StructField("time_signature", DoubleType(), True)
])
starting_tracks_df = spark.\
read.\
format("csv").\
option("header", "true").\
schema(tracks_schema).\
load(tracks_path).dropna().cache()
"""
Now we group together the genres features into vectors of doubles, and then we scale them to have values between 0 and 1.
"""
features_columns = starting_genres_df.columns
assembler = VectorAssembler().setInputCols(features_columns).setOutputCol('features')
assembled_df = assembler.transform(starting_genres_df).select('features')
min_max_scaler = MinMaxScaler().setMin(0).setMax(1).setInputCol('features').setOutputCol('scaled_features')
fitted_scaler = min_max_scaler.fit(assembled_df)
scaled_genres_df = fitted_scaler.transform(assembled_df).select('scaled_features')
"""
Lets implement KMeans
"""
print("starting clustering")
chosen_K = 7
X = []
for elem in scaled_genres_df.select('scaled_features').collect():
X.append(list(elem['scaled_features']))
kmeans_sk = km(n_clusters=chosen_K, random_state=1899).fit(X)
centroids = kmeans_sk.cluster_centers_
"""
Let's now implement the classifier.
"""
print("starting classification")
tracks_tmp_df = starting_tracks_df.select(features_columns)
tracks_assembler = VectorAssembler().setInputCols(features_columns).setOutputCol('features')
assembled_tracks_df = tracks_assembler.transform(tracks_tmp_df).select('features')
tracks_scaler = MinMaxScaler().setMin(0).setMax(1).setInputCol('features').setOutputCol('scaled_tracks_features')
fitted_tracks_scaler = tracks_scaler.fit(assembled_tracks_df)
scaled_tracks_df = fitted_tracks_scaler.transform(assembled_tracks_df).select('scaled_tracks_features')
tracks_rdd = scaled_tracks_df.rdd
mapped_rdd = tracks_rdd.map(lambda x: map_features(x))
mapped_df = spark.createDataFrame(mapped_rdd.collect(), schema=['features', 'label'])
training_set, test_set = mapped_df.randomSplit([0.8,0.2])
print("preprocessing done. creating the classifier")
dt = DecisionTreeClassifier().setFeaturesCol('features').setLabelCol('label')
dt_evaluator = MulticlassClassificationEvaluator().setLabelCol('label').setPredictionCol('prediction')
dt_params = ParamGridBuilder().\
addGrid(dt.impurity, ['gini', 'entropy']).\
addGrid(dt.maxDepth,[5, 8, 10]).build()
dt_validator = CrossValidator().setEstimator(dt).setEvaluator(dt_evaluator).setEstimatorParamMaps(dt_params).setNumFolds(10)
fitted_dt = dt_validator.fit(training_set)
dt_prediction = fitted_dt.transform(test_set)
dt_accuracy = dt_evaluator.evaluate(dt_prediction)
print("accuracy of DT classifier for k = " + str(chosen_K) + " is " + str(dt_accuracy))
"""
Define the credentials
"""
client_id = ""
client_secret = ""
redirect_uri = "http://localhost:8085"
scope ="user-library-read"
#"playlist-read-private"
"""
Creation of the Spotify Client
"""
client = spotipy.Spotify(auth_manager=SpotifyOAuth(client_id=client_id,
client_secret=client_secret,
redirect_uri=redirect_uri,
scope=scope))
tracks_batch = 50
playlist = client.current_user_saved_tracks(limit=tracks_batch)
tracks = playlist['items']
total_tracks = playlist['total']
remaining_tracks = total_tracks - tracks_batch
offset = 50
while remaining_tracks > 0:
if remaining_tracks >= tracks_batch:
tmp = client.current_user_saved_tracks(limit=tracks_batch, offset=offset)
for elem in tmp['items']:
tracks.append(elem)
offset = offset + tracks_batch
remaining_tracks = remaining_tracks - tracks_batch
else:
tmp = client.current_user_saved_tracks(limit=remaining_tracks,offset=offset)
for elem in tmp['items']:
tracks.append(elem)
offset = offset+remaining_tracks
remaining_tracks = 0
print("\n")
saved_tracks = []
for elem in tracks:
saved_tracks.append(map_saved_track(elem['track']))
for elem in saved_tracks:
print(elem)
favorite_tracks_header = list(features_columns)
favorite_tracks_header.insert(0,'id')
favorite_tracks_header.insert(0,'name')
favorite_tracks_schema = StructType([
StructField("name", StringType(), True),
StructField("id", StringType(), True),
StructField("mode", DoubleType(), True),
StructField("acousticness", DoubleType(), True),
StructField("danceability", DoubleType(), True),
StructField("duration_ms", DoubleType(), True),
StructField("energy", DoubleType(), True),
StructField("instrumentalness", DoubleType(), True),
StructField("liveness", DoubleType(), True),
StructField("loudness", DoubleType(), True),
StructField("speechiness", DoubleType(), True),
StructField("tempo", DoubleType(), True),
StructField("valence", DoubleType(), True),
StructField("popularity", DoubleType(), True),
StructField("key", DoubleType(), True)
])
favorite_tracks_df = spark.createDataFrame(data=saved_tracks,schema=favorite_tracks_header)
print("favorite tracks dataframe created")
favorite_tracks_assembler = VectorAssembler().setInputCols(features_columns).setOutputCol('features')
favorite_tracks_assembled_df = favorite_tracks_assembler.transform(favorite_tracks_df).select('name','id','features')
favorite_tracks_scaler = MinMaxScaler().setMin(0).setMax(1).setInputCol('features').setOutputCol('scaled_favorite_tracks_features')
fitted_favorite_tracks_scaler = favorite_tracks_scaler.fit(favorite_tracks_assembled_df)
scaled_favorite_tracks_df = fitted_favorite_tracks_scaler.transform(favorite_tracks_assembled_df).select('name','id', 'scaled_favorite_tracks_features').withColumnRenamed("scaled_favorite_tracks_features", "features")
scaled_favorite_tracks_df.show()
only_features_df = scaled_favorite_tracks_df.select('features')
favorite_tracks_predictions = fitted_dt.transform(only_features_df)
final_df = scaled_favorite_tracks_df.join(favorite_tracks_predictions, 'features', 'inner')
print("tracks scaling done")
print("mapping done")
user_id = 'prp468n1n5qp2sdr1ps5hk8t0'
playlists_names = []
scope2 = 'playlist-modify-public'
client2 = spotipy.Spotify(auth_manager=SpotifyOAuth(client_id=client_id,
client_secret=client_secret,
redirect_uri=redirect_uri,
scope=scope2))
for i in range(chosen_K):
current_name = "genre " + str(i)
current_playlist = client2.user_playlist_create(user=user_id,name = current_name)
playlists_names.append(current_name)
print("current playlist's id is: " + str(current_playlist['id']))
playlist_subset = final_df.where(final_df.prediction == i).select('id').collect()
id_list = []
for elem in playlist_subset:
id_list.append(elem['id'])
print(id_list)
if len(id_list) > 0:
client2.playlist_add_items(playlist_id=current_playlist['id'], items=id_list)
| [
"noreply@github.com"
] | Fafixxx96.noreply@github.com |
e4a8b726e97e5b7e572f45b81f4f0dcf2eabafe5 | 8a932777c890284c5eabc203c4916516f4e314ce | /examples/example_flask.py | cbe30bd866157ced8d7a031e179630bf396f4a4f | [] | no_license | watxaut/flask-project | da58e5da06a865f2619fac2cc4ef63405cb8a9d0 | a81d0e701fc2845157bb346083149ee74985e989 | refs/heads/master | 2021-06-17T18:27:32.656865 | 2019-11-02T08:56:58 | 2019-11-02T08:56:58 | 169,991,536 | 0 | 0 | null | 2021-03-20T00:34:35 | 2019-02-10T15:16:18 | Python | UTF-8 | Python | false | false | 838 | py | from flask import Flask, jsonify, request
# __name__ == "__main__" if executed. It will give the module name if imported and printed
app = Flask(__name__)
# main page (like 'index.html')
@app.route("/")
def home():
# Flash automatically looks at the 'templates' folder, very important to have the same folder name
return "Hello world!"
# POST - Used to receive data
# GET - Used to send data back only
# POST /endpoint data:
@app.route("/endpoint", methods=['GET'])
def return_something():
d_return = {
"something": "this is a return message",
"type": "dummy one ofc"
}
return jsonify(d_return)
# test with postman/swagger
# POST /post data:
@app.route("/post", methods=['POST'])
def return_post():
data = request.get_json()
return jsonify({"data_received": data})
app.run(port=5000)
| [
"watxaut@gmail.com"
] | watxaut@gmail.com |
ca6f20af4089ca0e8e1ecd2c061a2b5fc46bdd11 | a7ed2f9d4cbdadd159dab7c817d8e3955beed2b5 | /home/migrations/0005_product_url.py | 9fa38e0bfdc6143565ad53cb075a891f5ea4ed02 | [] | no_license | m0rtez4/1 | d8cd47613d666b9598fdf111a0da98b074e624b4 | d291dda1f60628287fc9090256c681f758499ff0 | refs/heads/master | 2023-07-21T10:40:20.145749 | 2021-08-19T20:53:29 | 2021-08-19T20:53:29 | 396,295,471 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | # Generated by Django 3.2.5 on 2021-07-13 00:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0004_remove_product_total_price'),
]
operations = [
migrations.AddField(
model_name='product',
name='url',
field=models.CharField(blank=True, max_length=200),
),
]
| [
"m.golalipour@yahoo.com"
] | m.golalipour@yahoo.com |
2a61130e38abfdb993ca7a5d2c63578e361d5ba3 | 7ff908b0d3196d9170e37590654d21288ba0afc7 | /lotto.py | f9fd1a6553693d0bc6e5011740b42c182493c411 | [] | no_license | my0614/python | 9fca95adf7175344e61b13b5c2940284e6cc5fc8 | de87665e8d63c3c878abeef1be55173113c810ad | refs/heads/master | 2021-11-22T23:35:02.103330 | 2021-08-29T05:06:19 | 2021-08-29T05:06:19 | 212,062,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | import random
lotto_value = []
count = 0
one = 0
for i in range(7):
a = random.randrange(1,46)
lotto_value.append(a)
print(lotto_value)
lotto_input = input('로또 번호를 적어주세요').split(' ')
print(lotto_input)
for i in range(0,6):
if int(lotto_input[i]) == lotto_value[i]:
count += 1
if int(lotto_input[6]) == lotto_value[6]:
b = 1
else:
b = 0
print('count',count)
if count >= 6:
print('1등')
elif count + b >= 6:
print('2등')
elif count +b >= 5:
print('3등')
elif count + b >= 4:
print('4등')
elif count + b >= 3:
print('5등')
| [
"noreply@github.com"
] | my0614.noreply@github.com |
a5182a3592a73630ecc56a742be9063f21eaa18f | 1084f6e49cb5bc119e269074abeb19892936ced0 | /2_estruturas_de_condicao/exercicio_21/exercicio.py | 8c043089dab08a77a37dbb68de6301db7f4ba225 | [
"MIT"
] | permissive | ericsporto/Logica_com_Python | b14c68856156d20e54451064bc2854636a724740 | 6036de44776c69da1381d4b4e05abbfd7523c9f5 | refs/heads/main | 2023-09-04T13:25:14.591405 | 2021-10-22T10:44:16 | 2021-10-22T10:44:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | rodas = int(input("Digite a quantidade de rodas: "))
print(rodas)
if rodas > 2:
print("Pagar pedágio!")
if rodas == 2:
print("Pode passar livremente!")
| [
"89411680+ericsporto@users.noreply.github.com"
] | 89411680+ericsporto@users.noreply.github.com |
cbea8d19bfec7562e32c00134f80b76fc7ac3e63 | 035b9151e3e35e9c697c04eb54aca4a44d4a4cd2 | /polls/views.py | 50f21bedcaca2320703383f31f2dda3beebf35b9 | [] | no_license | ghm8418/mysite | 1139e4c8fc3980c0afdaa7ea9945f58444269697 | 4103faef9df2d921c9124190661353c474bf4ed2 | refs/heads/master | 2020-04-07T06:59:28.660217 | 2018-11-19T07:53:24 | 2018-11-19T07:53:24 | 158,159,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,026 | py | from django.shortcuts import render
from django.http import HttpResponse
from polls.models import Question
# Create your views here.
# def index(request):
# q = Question.objects.all()[0]
# choices = q.choice_set.all()
#
# print(q.question_text)
# print(choices[0].choice_text)
# print(choices[1].choice_text)
# print(choices[2].choice_text)
#
# return HttpResponse('polls index')
def index(request):
questions = Question.objects.all()
return render(request,
'polls/index.html',
{'question': questions })
def detail(request, question_id): # 질문 상세 페이지
q = Question.objects.get(id=question_id)
c = q.choice_set.all()
choice = ''
for a in c:
choice += a.choice_text
# request '템플릿' {컨텍스트(데이터/모델)}
return render(request,
'polls/detail.html',
{'question' : q.question_text,
'num': q.id,
'choice' : c})
#return HttpResponse(q.question_text + '<br>' + choice)
def results(request, question_id):
response = "You're looking at the results of question %s."
return HttpResponse(response % question_id)
def vote(request, question_id): # 투표 페이지
q = Question.objects.get(id=question_id)
try:
select = request.POST['select']
c = q.choice_set.get(id=select)
c.votes += 1
c.save()
print(select)
except:
pass
return render(request,
'polls/result.html',
{'q':q}
)
def detail2(request, num1, num2): #덧셈
return HttpResponse(num1 + num2)
def edit(request, question_id):
q = Question.objects.get(id = question_id)
return render(request, 'polls/edit.html', {'q' : q} )
def save(request, question_id):
q = request.POST['q']
question = Question.objects.get(id=question_id)
question.question_text = q
question.save()
return HttpResponse('수정완료') | [
"ghm8418@gmail.com"
] | ghm8418@gmail.com |
d27c1611bd3737bd6b4d8b6aecbf4c536cec23b3 | 015098bcb0e7b5595337c1b3e702942ed5b01272 | /setup.py | 6036b2725a5bdc962a01ccd158e2d7961c4446af | [] | no_license | ASSNAKE/assnake-core-binning | f0e0676aa6bcdc4fc60fa98fcdb49d0f5fa960a7 | 7b7e539722e18226b1dd9cd23231a4fda4ba78c9 | refs/heads/master | 2022-12-25T18:39:22.303737 | 2020-10-09T09:53:07 | 2020-10-09T09:53:07 | 236,690,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | from setuptools import setup, find_packages
from setuptools.command.develop import develop
from setuptools.command.install import install
import os, shutil
setup(
name='assnake-core-binning',
version='0.0.1',
packages=find_packages(),
entry_points = {
'assnake.plugins': ['assnake-core-binning = assnake_core_binning.snake_module_setup:snake_module']
}
) | [
"fedorov.de@gmail.com"
] | fedorov.de@gmail.com |
3a5a5002e2468ce50e405be4a11ab9223ceffb70 | 19d6b853089421142d46e2db26ff639dc4546b6f | /walkietrackie/wsgi.py | 3562f32968095906c9ac6d302791103a0446afc3 | [
"Unlicense"
] | permissive | polmuz/walkietrackie | 478d52857c835d63f69d9c4290e669daca149076 | f498c9b0b833c481292ad60432fc9bc7e15816b3 | refs/heads/master | 2020-04-25T23:30:39.719767 | 2014-12-31T19:19:41 | 2014-12-31T19:19:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | """
WSGI config for walkietrackie project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "walkietrackie.settings")
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
| [
"pablomouzo@gmail.com"
] | pablomouzo@gmail.com |
6d3d0232ab5ec62fd9963d3b5f279118a4925235 | beb315cb6c2ef23652d2de7a9adee062e02c5722 | /shop/auth_/views.py | 4ade748693bcef96cde288bf837051476c857524 | [] | no_license | Yessenaly/SoftProject | 8a537038f15542dd5939187d549ee89816811ebc | 4e9cfeab21d47a05bf658267a4304fe0724cc123 | refs/heads/master | 2020-04-23T10:18:36.927426 | 2019-02-21T20:15:21 | 2019-02-21T20:15:21 | 171,100,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py | from django.shortcuts import render , redirect
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib import auth
# Create your views here.
def register(request):
if request.method == 'GET':
return render(request , 'register.html')
else:
username = request.POST['username']
password = request.POST['password']
user = User.objects.create_user(username = username , password = password)
return redirect('home')
def login(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user is not None and user.is_active:
auth.login(request, user)
return redirect('products')
else:
error = "username or password incorrect"
return render(request, 'login.html', {'error': error})
return render(request , 'login.html')
| [
"yessen3103@gmail.com"
] | yessen3103@gmail.com |
ebe0c7c995796cf5a3412cdf516158f9b5ae94b9 | 8ae22e2027a87c26e18d02febb436960321f005c | /core/migrations/0021_causas_tecnico.py | 45db335b9a6856aa892ab5464df19c5111b410ec | [] | no_license | Astralroth/Proyect-Lex | 67b8136d7b7e0824506e76a8f0924f23a61b50e3 | 9ec756cf2733bd937739a9268e0ef4f566a24cfd | refs/heads/main | 2023-06-11T03:45:11.608048 | 2021-07-01T18:10:15 | 2021-07-01T18:10:15 | 380,637,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | # Generated by Django 3.2.4 on 2021-06-30 22:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0020_auto_20210630_1758'),
]
operations = [
migrations.AddField(
model_name='causas',
name='tecnico',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"francosandoval003@gmail.com"
] | francosandoval003@gmail.com |
a2236073f0785781b83840404f7b1e3f300e4246 | d083fcc77d98c9724d8de534f91e6bdd403925d4 | /CodingTemplate/datasets/kittidataset.py | 100e0c4725f23f040fd7b5bafb717457dadb9c1c | [] | no_license | htXin/CodingTemplate | c17c0b913102f380e53bf9856001aacd14a2d237 | f9232abf069c39586dbaa92f5b71c749ce831f8a | refs/heads/main | 2023-04-15T11:00:22.552421 | 2021-04-20T08:34:18 | 2021-04-20T08:34:18 | 359,731,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,587 | py | import os
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
import datasets.datastruct.label_object as label_object
import datasets.datastruct.calib_object as calib_object
import argparse
class KittiDataSet(Dataset):
def __init__(self, root_dir, split):
IS_TRAIN = split == 'train'
self.data_dir = os.path.join(
root_dir, 'KITTI_DATASET_ROOT', 'training' if IS_TRAIN else 'testing')
self.image_dir = os.path.join(self.data_dir, 'image_2')
self.lidar_dir = os.path.join(self.data_dir, 'velodyne')
self.label_dir = os.path.join(self.data_dir, 'label_2')
self.calib_dir = os.path.join(self.data_dir, 'calib')
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
split_dir = os.path.join(
os.getcwd(), "data_split_file", 'splitedDataSet', split+'.txt')
# print(split_dir)
assert os.path.exists(split_dir)
self.data_idx_list = [line.strip()
for line in open(split_dir).readlines()]
self.num_data = self.data_idx_list.__len__()
def get_data_idx_list(self):
return self.data_idx_list
def get_image(self, index):
import cv2
imagefile = os.path.join(self.image_dir, '%06d.png' % index)
assert os.path.exists(imagefile)
return cv2.imread(imagefile)
def get_image_shape(self, index):
imagefile = os.path.join(self.image_dir, '%06d.png' % index)
assert os.path.exists(imagefile)
image = Image.open(imagefile)
w, h = image.size
return h, w, 3
def get_image_rgb_norm(self, index):
"""
return image with normalziation in rgb model
param: index
return image(H,W,3)
"""
imagefile = os.path.join(self.image_dir, '%06d.png' % index)
# print(imagefile)
assert os.path.exists(imagefile)
img = Image.open(imagefile).convert('RGB')
img = np.array(img).astype(np.float)
img = img / 255.0
img -= self.mean
img /= self.std
imback = np.zeros([384, 1280, 3], dtype=np.float)
imback[:img.shape[0], :img.shape[1], :] = img
return imback
def get_lidar(self, index):
"""
bin 文件存储点云数据方式:
x1,y1,z1,r1,x2,y2,z2,r2,.......xi,yi,zi,ri
其中xi,yi,zi,ri 表示点云数据i的坐标:xyz以及反射率r
"""
liadrfile = os.path.join(self.lidar_dir, '%06d.bin' % index)
assert os.path.exists(liadrfile)
return np.fromfile(liadrfile, dtype=np.float32).reshape(-1, 4)
def get_label(self, index):
"""
return label of image and lidar
param: index
"""
labelfile = os.path.join(self.label_dir, '%06d.txt' % index)
assert os.path.exists(labelfile)
with open(labelfile, 'r') as f:
lines = f.readlines()
objects = list([label_object.Label_Object(line) for line in lines])
return objects
def get_calib(self, index):
'''
return calib of each image
param: index
'''
calibfile = os.path.join(self.calib_dir, '%06d.txt' % index)
assert os.path.exists(calibfile)
with open(calibfile, 'r') as f:
lines = f.readlines()
calibinfo = calib_object.Calib_Object(lines)
return calibinfo
def __getitem__(self, item):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
if __name__ == "__main__":
args = argparse.ArgumentParser(description='get kittidataset root')
args.add_argument('--data_set_root', type=str,
default='d:', help='kittiDataSet root')
args.add_argument('--model', type=str, default='train',
help='select model(such as ''trian'')')
arg = args.parse_args()
root_dir = arg.data_set_root
dataset = KittiDataSet(root_dir, arg.model)
img = dataset.get_image_rgb_norm(000000)
# img2 = dataset.get_image(000000)
# import cv2
# cv2.imshow('image', img2)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# print(img.shape)
lidar = dataset.get_lidar(000000)
label = dataset.get_label(000000)
calib = dataset.get_calib(000000)
print(lidar.shape)
print(label[0].to_str())
print(calib.to_str())
print(len(dataset.get_data_idx_list()))
| [
"noreply@github.com"
] | htXin.noreply@github.com |
56845e58bbe8305e5bc3d7b2e20895a2f7696084 | 6eb0fb83ab926be3835973ab2703fb9fb0f384c7 | /traintest/binary_trainer.py | 65f3055dbaf78d395029d7e151e6a818af5f0259 | [] | no_license | meetsiyuan/MCTS | 26a64434b512303b87787923d6047d9e20b29ed4 | 2d34ec72c2358a5bf4dd0b2855a7900fbb8feae7 | refs/heads/master | 2022-10-13T16:17:29.742075 | 2020-06-07T11:48:35 | 2020-06-07T11:48:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,798 | py | import torch
import numpy as np
import random
import time
from traintest.trainer import Trainer
from utils import *
__all__ = ['BinaryTrainer']
class BinaryTrainer(Trainer):
"""
Binarynet专用
"""
def __init__(self, model, dataloader, criterion, optimizer, device,
vis=None, vis_interval=20, lr_scheduler=None):
super(BinaryTrainer, self).__init__(model, dataloader, criterion, optimizer,
device, vis, vis_interval, lr_scheduler)
def train(self, model=None, epoch=None, train_dataloader=None, criterion=None,
optimizer=None, lr_scheduler=None, vis=None, vis_interval=None):
"""注意:如要更新model必须更新optimizer和lr_scheduler"""
self.update_attr(epoch, model, optimizer, train_dataloader, criterion, vis, vis_interval)
self.model.train() # 训练模式
self.init_meters()
end_time = time.time()
# print("training...")
# pbar = tqdm(
# enumerate(self.train_dataloader),
# total=len(self.train_dataset)/self.config.batch_size,
# )
# for batch_index, (input, target) in pbar:
for batch_index, (input, target) in enumerate(self.train_dataloader):
# measure data loading time
self.dataload_time.update(time.time() - end_time)
# compute output
input, target = input.to(self.device), target.to(self.device)
output = self.model(input)
loss = self.criterion(output, target)
# compute gradient and do SGD step
self.optimizer.zero_grad()
loss.backward()
for param in list(self.model.parameters()):
if hasattr(param, 'org'):
param.data.copy_(param.org)
self.optimizer.step() # 反向传播传的是全精度gradient
for param in list(self.model.parameters()):
if hasattr(param, 'org'):
param.org.copy_(param.data.clamp_(-1, 1))
# meters update
self.upadate_meters(output, target, loss)
# measure elapsed time
self.batch_time.update(time.time() - end_time)
end_time = time.time()
# print log
done = (batch_index+1) * self.train_dataloader.batch_size
percentage = 100. * (batch_index+1) / len(self.train_dataloader)
self.print_log(epoch, done, percentage)
self.visualize_plot(epoch, batch_index, percentage)
print("")
self.visualize_log(epoch)
# update learning rate
if self.lr_scheduler is not None:
self.lr_scheduler.step(epoch=epoch)
return self.model | [
"1934455602@qq.com"
] | 1934455602@qq.com |
8275b4667d792989ce88f4409448f56247504e74 | 74518c42d7d909f134e6958e6aea3e555307d73f | /crawler.py | 1d5722874c768a550cb9ae2d50440f8dfec0d804 | [] | no_license | pariweshsubedi/simple-python-crawler | bc03ae55a76a92ab68b1f4d781b273778daf63a0 | d471c3c2e5d1def259a57046805117ad81eefb6a | refs/heads/master | 2021-01-20T21:06:20.492448 | 2016-06-15T05:32:47 | 2016-06-15T05:39:28 | 61,180,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,152 | py | #coding:utf8
from bs4 import BeautifulSoup
import requests
import json
import re
class Crawler():
"""
A crawler that fetches the latest ads from
site : http://hamrobazaar.com/
with input keyword
to output json in format:
[{
"price" : "",
"ad_url" : "",
"user_profile" : "",
"title" : ""
}]
Note: to be used for eduactional purpose only
"""
def __init__(self,base_url,url,depth):
self.depth = depth #depth of pages to crawl
self.offset = 0 #content offset in a page
self.page = 0
self.url = url
self.base_url = base_url
self.headers = {
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset' : 'utf-8;q=0.7,*;q=0.3',
'Accept-Encoding' : 'gzip,deflate,sdch',
'Accept-Language' : 'en-US,en;q=0.8',
'Connection': 'keep-alive',
'User-Agent' : 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.4 (KHTML, like Gecko) Chrome/22.0.1229.79 Safari/537.4',
'Referer' : self.base_url,
}
def next_page(self):
"""
Paging offset for the search pages
"""
self.offset+=20
self.page+=1
def crawl(self):
data = []
while self.page< self.depth:
try:
self.url = self.url+"&offset="+str(self.offset)
source = self.get_site(url)
soup = self.soup(source.text)
for td in soup.findAll('td', attrs={'height': '115','bgcolor':'#F2F4F9'}):
block = {}
title = td.find('font',{'style':'font-size:15px;font-family:Arial, Helvetica, sans-serif;'})
if title:
block["title"] = title.text
for link in td.findAll('a'):
href = link.get("href")
if re.search("useritems.php?",href):
block["user_profile"] = "http://hamrobazaar.com/" + href
else:
block["ad_url"] = "http://hamrobazaar.com/" + href
while td:
td = td.findNext('td',{'width':'100',"bgcolor":"#F2F4F9"})
if td:
block["price"] = td.find("b").text
data.append(block)
except Exception as e:
print e
self.next_page()
self.fileWriter(data)
def soup(self,plain_text):
return BeautifulSoup(plain_text,"html.parser")
def get_site(self,url):
return requests.get(self.url,headers=self.headers, timeout=10)
def fileWriter(self,data):
with open('urls.json', 'w') as f:
json.dump(data, f)
if __name__ == '__main__':
base_url = "http://hamrobazaar.com/"
url = "http://hamrobazaar.com/search.php?do_search=Search"
keyword = "guitar"
search_keyword_url = url + "&searchword=" + keyword
crawler = Crawler(base_url,search_keyword_url,5)
crawler.crawl() | [
"pariwesh123@gmail.com"
] | pariwesh123@gmail.com |
b86275ae56f9d0014b5c3a45b2b8249d042a0397 | c74b29b68211a51d7283d57b24d7cf83422a8ceb | /historischekranten2folia.py | 49a1dadee9ba395be694155de271a6c80da1c684 | [] | no_license | proycon/nlpsandbox | 63359e7cdd709dd81d66aed9bf1437f8ecf706a0 | 22e5f85852b7b2a658c6b94c3dedd425a5d6396f | refs/heads/master | 2020-12-09T19:37:10.040962 | 2019-04-23T17:17:15 | 2019-04-23T17:17:15 | 2,347,265 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,678 | py | #!/usr/bin/env python3
import csv
import sys
from bs4 import BeautifulSoup
from pynlpl.formats import folia
for filename in sys.argv[1:]:
with open(filename, 'r',encoding='utf-8') as f:
reader = csv.DictReader(f, delimiter='\t', quotechar='"')
for row in reader:
docid = "historischekranten_" + row['id'] + '_' + row['article_id'] + '_' + row['paper_id']
print("Processing " + docid,file=sys.stderr)
doc = folia.Document(id=docid)
for key in ('id', 'article_id', 'article_title', 'paper_id', 'paper_title', 'date','article', 'err_text_type', 'colophon', 'colophon_text'):
doc.metadata[key] = row[key]
doc.declare(folia.Paragraph, "https://raw.githubusercontent.com/proycon/folia/master/setdefinitions/nederlab-historischekranten-par.ttl")
body = doc.append(folia.Text(doc, id=docid+".text"))
div = body.append(folia.Division, id=docid+".div")
if row['header'].strip():
head = div.append(folia.Head, BeautifulSoup(row['header'].strip(),'lxml').text, id=docid+".text.head")
if row['subheader'].strip():
div.append(folia.Paragraph, BeautifulSoup(row['subheader'].strip(), 'lxml').text, id=docid+".text.subheader", cls="subheader")
for i, partext in enumerate(row['article_text'].split('\n\n')):
partext = BeautifulSoup(partext.replace("=\n","").replace("\n"," "), "lxml").text.strip()
if partext:
paragraph = div.append(folia.Paragraph, partext, id=docid+".text.p." + str(i+1), cls="normal")
doc.save(docid + ".folia.xml")
| [
"proycon@anaproy.nl"
] | proycon@anaproy.nl |
b776e05c4aebbeae77ba412fb2ebf0fec81ef261 | d3aef2ce0ee88c92516e64018f6d9f880911438c | /demo/urls.py | 0137d9575b7afac8bf893f382ea0ac49ae67e9f8 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | Apkawa/django-material | 648451d28a21270ddff937abf92931592ab9a56e | 426e845ac27db0e1351bbb7f68377949581dfbd7 | refs/heads/master | 2021-01-15T17:51:49.304338 | 2016-02-26T10:34:15 | 2016-02-26T10:34:15 | 52,146,120 | 0 | 0 | null | 2016-02-20T09:29:25 | 2016-02-20T09:29:24 | null | UTF-8 | Python | false | false | 6,434 | py | from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.views import generic
from django.shortcuts import render
from formtools.wizard.views import SessionWizardView
from material.frontend import urls as frontend_urls
from . import forms, widget_forms
def index_view(request):
context = {
'login': forms.LoginForm(),
'registration': forms.RegistrationForm(),
'checkout': forms.CheckoutForm(),
'order': forms.OrderForm(),
'comment': forms.CommentForm(),
'bank': forms.BankForm(),
'hospital': forms.HospitalRegistrationForm(),
}
return render(request, 'index.html', context)
class Wizard(SessionWizardView):
form_list = [forms.WizardForm1, forms.WizardForm2]
def done(self, form_list, **kwargs):
return render(self.request, 'formtools/wizard/wizard_done.html', {
'form_data': [form.cleaned_data for form in form_list],
})
class WidgetFormView(generic.FormView):
template_name = 'widgets_demo.html'
def form_valid(self, form):
return self.render_to_response(
self.get_context_data(form=form))
urlpatterns = [
url(r'^$', index_view),
# demo
url(r'^demo/login/$', generic.FormView.as_view(
form_class=forms.LoginForm, success_url='/demo/login/', template_name="demo.html")),
url(r'^demo/registration/$', generic.FormView.as_view(
form_class=forms.RegistrationForm, success_url='/demo/registration/', template_name="demo.html")),
url(r'^demo/contact/$', generic.FormView.as_view(
form_class=forms.ContactForm, success_url='/demo/contact/', template_name="demo.html")),
url(r'^demo/order/$', generic.FormView.as_view(
form_class=forms.OrderForm, success_url='/demo/order/', template_name="demo.html")),
url(r'^demo/checkout/$', generic.FormView.as_view(
form_class=forms.CheckoutForm, success_url='/demo/checkout/', template_name="demo.html")),
url(r'^demo/comment/$', generic.FormView.as_view(
form_class=forms.CommentForm, success_url='/demo/comment/', template_name="demo.html")),
url(r'^demo/bank/$', generic.FormView.as_view(
form_class=forms.BankForm, success_url='/demo/bank/', template_name="demo.html")),
url(r'^demo/wizard/$', Wizard.as_view()),
url(r'^demo/hospital/$', generic.FormView.as_view(
form_class=forms.HospitalRegistrationForm, success_url='/demo/hospital/', template_name="demo.html")),
url(r'^foundation/basic/', generic.RedirectView.as_view(url='/?cache=no', permanent=False)),
# widget test
url(r'^demo/widget/boolean/$', WidgetFormView.as_view(form_class=widget_forms.BooleanFieldForm)),
url(r'^demo/widget/char/$', WidgetFormView.as_view(form_class=widget_forms.CharFieldForm)),
url(r'^demo/widget/choice/$', WidgetFormView.as_view(form_class=widget_forms.ChoiceFieldForm)),
url(r'^demo/widget/date/$', WidgetFormView.as_view(form_class=widget_forms.DateFieldForm)),
url(r'^demo/widget/datetime/$', WidgetFormView.as_view(form_class=widget_forms.DateTimeFieldForm)),
url(r'^demo/widget/decimal/$', WidgetFormView.as_view(form_class=widget_forms.DecimalFieldForm)),
url(r'^demo/widget/duration/$', WidgetFormView.as_view(form_class=widget_forms.DurationFieldForm)),
url(r'^demo/widget/email/$', WidgetFormView.as_view(form_class=widget_forms.EmailFieldForm)),
url(r'^demo/widget/file/$', WidgetFormView.as_view(form_class=widget_forms.FileFieldForm)),
url(r'^demo/widget/filepath/$', WidgetFormView.as_view(form_class=widget_forms.FilePathFieldForm)),
url(r'^demo/widget/float/$', WidgetFormView.as_view(form_class=widget_forms.FloatFieldForm)),
url(r'^demo/widget/image/$', WidgetFormView.as_view(form_class=widget_forms.ImageFieldForm)),
url(r'^demo/widget/integer/$', WidgetFormView.as_view(form_class=widget_forms.IntegerFieldForm)),
url(r'^demo/widget/ipaddress/$', WidgetFormView.as_view(form_class=widget_forms.GenericIPAddressFieldForm)),
url(r'^demo/widget/multiplechoice/$', WidgetFormView.as_view(form_class=widget_forms.MultipleChoiceFieldForm)),
url(r'^demo/widget/nullbolean/$', WidgetFormView.as_view(form_class=widget_forms.NullBooleanFieldForm)),
url(r'^demo/widget/regex/$', WidgetFormView.as_view(form_class=widget_forms.RegexFieldForm)),
url(r'^demo/widget/slug/$', WidgetFormView.as_view(form_class=widget_forms.SlugFieldForm)),
url(r'^demo/widget/time/$', WidgetFormView.as_view(form_class=widget_forms.TimeFieldForm)),
url(r'^demo/widget/url/$', WidgetFormView.as_view(form_class=widget_forms.URLFieldForm)),
url(r'^demo/widget/uuid/$', WidgetFormView.as_view(form_class=widget_forms.UUIDField)),
url(r'^demo/widget/combo/$', WidgetFormView.as_view(form_class=widget_forms.ComboFieldForm)),
url(r'^demo/widget/splitdatetime/$', WidgetFormView.as_view(form_class=widget_forms.SplitDateTimeFieldForm)),
url(r'^demo/widget/modelchoice/$', WidgetFormView.as_view(form_class=widget_forms.ModelChoiceFieldForm)),
url(r'^demo/widget/modelmultichoice/$', WidgetFormView.as_view(form_class=widget_forms.ModelMultipleChoiceFieldForm)),
url(r'^demo/widget/password/$', WidgetFormView.as_view(form_class=widget_forms.PasswordInputForm)),
url(r'^demo/widget/hidden/$', WidgetFormView.as_view(form_class=widget_forms.HiddenInputForm)),
url(r'^demo/widget/textarea/$', WidgetFormView.as_view(form_class=widget_forms.TextareaForm)),
url(r'^demo/widget/radioselect/$', WidgetFormView.as_view(form_class=widget_forms.RadioSelectForm)),
url(r'^demo/widget/checkboxmultiple/$', WidgetFormView.as_view(
form_class=widget_forms.CheckboxSelectMultipleForm)),
url(r'^demo/widget/fileinput/$', WidgetFormView.as_view(form_class=widget_forms.FileInputForm)),
url(r'^demo/widget/splithiddendatetime/$', WidgetFormView.as_view(
form_class=widget_forms.SplitHiddenDateTimeWidgetForm)),
url(r'^demo/widget/selectdate/$', WidgetFormView.as_view(form_class=widget_forms.SelectDateWidgetForm)),
# admin
url(r'^admin/', include(admin.site.urls)),
# frontend
url(r'^frontend/$', generic.RedirectView.as_view(url='/frontend/accounting/', permanent=False), name="index"),
url(r'', include(frontend_urls)),
]
if 'zinnia' in settings.INSTALLED_APPS:
urlpatterns += [url(r'^weblog/', include('zinnia.urls', namespace='zinnia'))]
| [
"kmmbvnr@gmail.com"
] | kmmbvnr@gmail.com |
a08dc9693ab281fca9297639b0196989edb536ed | 64f368cdf1555260bcdbdd556cefa4aac99aea77 | /armgcc/aarch64-cortexa53-linux-gnu/lib64/libstdc++.so.6.0.22-gdb.py | ca3f171e4176b78758cca5835b650befc7053c70 | [
"MIT"
] | permissive | best008/BC_ARMGCC_S5P6818 | a3d01e574d4118439b0c922bb25920469c44f7db | 570f410a7f4a10be3416da0aac022443f3bffd24 | refs/heads/master | 2020-04-11T10:46:43.957194 | 2018-12-14T03:23:52 | 2018-12-14T03:23:52 | 161,725,902 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | ../../aarch64-cortexa53-linux-gnu/sysroot/lib64/libstdc++.so.6.0.22-gdb.py | [
"2362317758@qq.com"
] | 2362317758@qq.com |
d69a60ce1ac5bc784e7be8caf9b4d419321c1239 | c0a1c7d0df229cd0236311f8344ae85d20560868 | /Numpy/August-23-2019/calculating_Volume_Weighted_Average_Price.py | 090e9e9a978f33964952f4ee7cd4fbde0578c60c | [] | no_license | edaaydinea/python-libraries-study | a67cc72a954723da92e798e3cd7f10105ad30b6c | 1431f778f6edab0b7209ddbbe4a4adb5805cb251 | refs/heads/master | 2020-07-06T02:39:20.677670 | 2019-09-11T19:47:15 | 2019-09-11T19:47:15 | 202,862,334 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | from __future__ import print_function
import numpy as np
c,v = np.loadtxt("data.csv",delimiter=",",usecols=(6,7),unpack=True)
vwap= np.average(c,weights=v)
print("VMAP=", vwap) | [
"noreply@github.com"
] | edaaydinea.noreply@github.com |
1adbedc4b372ae1eec8d80d473a2248610c0e861 | 41288c85ba5bbb644efd36310858034153893bb2 | /k_medoids.py | cde790f76334ea1421fdd4ecc4f264cd2cab8554 | [] | no_license | suhamida/pythonPractice | 796247652069d5d73c865bbd923c101ee08f6071 | 6345015b66c370a26fb3918b8dd3725a0df42d11 | refs/heads/main | 2023-07-06T19:36:43.915297 | 2021-08-08T07:15:27 | 2021-08-08T07:15:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,879 | py |
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
from IPython import embed
import time
def _get_init_centers(n_clusters, n_samples):
#return random points as initial centers'''
init_ids = []
while len(init_ids) < n_clusters:
_ = np.random.randint(0, n_samples)
if not _ in init_ids:
init_ids.append(_)
return init_ids
def _get_distance(data1, data2):
#example distance function'''
return np.sqrt(np.sum((data1 - data2) ** 2))
def _get_cost(X, centers_id, dist_func):
'''return total cost and cost of each cluster'''
st = time.time()
dist_mat = np.zeros((len(X), len(centers_id)))
# compute distance matrix
for j in range(len(centers_id)):
center = X[centers_id[j], :]
for i in range(len(X)):
if i == centers_id[j]:
dist_mat[i, j] = 0.
else:
dist_mat[i, j] = dist_func(X[i, :], center)
# print 'cost ', -st+time.time()
mask = np.argmin(dist_mat, axis=1)
members = np.zeros(len(X))
costs = np.zeros(len(centers_id))
for i in range(len(centers_id)):
mem_id = np.where(mask == i)
members[mem_id] = i
costs[i] = np.sum(dist_mat[mem_id, i])
return members, costs, np.sum(costs), dist_mat
def _kmedoids_run(X, n_clusters, dist_func, max_iter=1000, tol=0.001, verbose=True):
#run algorithm return centers, members, and etc.'''
# Get initial centers
n_samples, n_features = X.shape
init_ids = _get_init_centers(n_clusters, n_samples)
if verbose:
print('Initial centers are ', init_ids)
centers = init_ids
members, costs, tot_cost, dist_mat = _get_cost(X, init_ids, dist_func)
cc, SWAPED = 0, True
while True:
SWAPED = False
for i in range(n_samples):
if not i in centers:
for j in range(len(centers)):
centers_ = deepcopy(centers)
centers_[j] = i
members_, costs_, tot_cost_, dist_mat_ = _get_cost(X, centers_, dist_func)
if tot_cost_ - tot_cost < tol:
members, costs, tot_cost, dist_mat = members_, costs_, tot_cost_, dist_mat_
centers = centers_
SWAPED = True
if verbose:
print('Change centers to ', centers)
if cc > max_iter:
if verbose:
print('End Searching by reaching maximum iteration', max_iter)
break
if not SWAPED:
if verbose:
print('End Searching by no swaps')
break
cc += 1
return centers, members, costs, tot_cost, dist_mat
class KMedoids(object):
def __init__(self, n_clusters, dist_func=_get_distance, max_iter=10000, tol=0.0001):
self.n_clusters = n_clusters
self.dist_func = dist_func
self.max_iter = max_iter
self.tol = tol
def fit(self, X, plotit=True, verbose=True):
centers, members, costs, tot_cost, dist_mat = _kmedoids_run(
X, self.n_clusters, self.dist_func, max_iter=self.max_iter, tol=self.tol, verbose=verbose)
if plotit:
fig, ax = plt.subplots(1, 1)
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
if self.n_clusters > len(colors):
raise ValueError('we need more colors')
for i in range(len(centers)):
X_c = X[members == i, :]
ax.scatter(X_c[:, 0], X_c[:, 1], c=colors[i], alpha=0.5, s=30)
ax.scatter(X[centers[i], 0], X[centers[i], 1], c=colors[i], alpha=1., s=250, marker='*')
return
def predict(self, X):
raise NotImplementedError()
print(kM)
### https://github.com/shenxudeu/K_Medoids/blob/master/k_medoids.py ### | [
"hamida.cse@gmail.com"
] | hamida.cse@gmail.com |
8f6074736677b40ad3abc447c437659f71c7eb0f | fbf4f26a2b97d4fe35aa7b66e9cfed4cd0224e89 | /chlamdb/eutils/sequence_exact_match.py | 83e231040a3d492e29ca337400d07a339d4fe140 | [] | no_license | metagenlab/chlamdb | a100ab93407e15c33684b8d7175873adc6720d0b | f1829cf19ac1ded032d65689fbbff2d37489f739 | refs/heads/master | 2023-03-07T05:30:02.793914 | 2023-03-02T10:30:57 | 2023-03-02T10:30:57 | 179,291,344 | 6 | 1 | null | 2022-11-01T07:01:54 | 2019-04-03T13:02:40 | HTML | UTF-8 | Python | false | false | 4,816 | py | #!/usr/bin/env python
def process_tag(tag):
return tag.split('}')[-1]
def get_UPI(seq):
for element in seq:
if element.tag == '{http://model.picr.ebi.ac.uk}UPI':
return element.text
def get_hit_attributes(hit):
accession = ''
version = ''
taxon_id = ''
db_name = ''
for element in hit:
if element.tag == '{http://model.picr.ebi.ac.uk}accession':
accession = element.text
if element.tag == '{http://model.picr.ebi.ac.uk}accessionVersion':
version = element.text
if element.tag == '{http://model.picr.ebi.ac.uk}databaseName':
db_name = element.text
if element.tag == '{http://model.picr.ebi.ac.uk}taxonId':
taxon_id = element.text
return {"%s.%s" % (accession, version) : [db_name, taxon_id]}
def accession2exact_matches(sequence, target_databases):
'''
Givent an input AA sequence and target(s) database name(s), return:
- the uniparc accession of the sequence (if exists)
- a dictionary with accession(s) of identical sequence(s) and their taxon ID and source database.
(Accession.version keys)
Return None if no identical squence was found.
:param sequence: input AA sequence
:param target_databases: Input database name (see http://www.ebi.ac.uk/Tools/picr/)
'''
import urllib2
import xml.etree.cElementTree as ElementTree
database_string = '&database=' .join(target_databases)
link = "http://www.ebi.ac.uk/Tools/picr/rest/getUPIForSequence?sequence=%s&database=%s&includeattributes=true" % (sequence,
database_string)
print link
req = urllib2.Request(link)
try:
page = urllib2.urlopen(req)
tree = ElementTree.parse(page)
except:
import time
print 'connexion problem, trying again...'
time.sleep(60)
db2seq = {}
root = tree.getroot()
seq = root.find('{http://www.ebi.ac.uk/picr/AccessionMappingService}getUPIForSequenceReturn')
if seq is None:
return None
UPI = get_UPI(seq)
identical_seqs = seq.findall('{http://model.picr.ebi.ac.uk}identicalCrossReferences')
for seq in identical_seqs:
db2seq.update(get_hit_attributes(seq))
return UPI, db2seq
def fasta_corresp(fasta_file, target_database, n_keep=1):
from Bio import SeqIO
import sys
print 'keep', n_keep
with open(fasta_file, 'r') as f:
records = SeqIO.parse(f, 'fasta')
for record in records:
picr = accession2exact_matches(record.seq,
target_database)
if picr is None:
sys.stdout.write('%s\t%s\t%s\t%s\n' % (record.name, 'None', 'None', 'None'))
else:
uniparc_accession, matches = picr
database2count = {}
for accession in matches:
if matches[accession][0] not in database2count:
database2count[matches[accession][0]] = 1
else:
if database2count[matches[accession][0]] < n_keep:
database2count[matches[accession][0]] += 1
else:
break
sys.stdout.write('%s\t%s\t%s\t%s\n' % (record.name,
uniparc_accession,
accession,
matches[accession][1]))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p", '--protein_seq', type=str, help="Protein sequence")
parser.add_argument("-d", '--database', type=str, help="Target database(s): 'REFSEQ', 'TREMBL', ...", nargs='+', default= ['TREMBL', 'SWISSPROT'])
parser.add_argument("-f", '--fasta_file', type=str, help="Fasta file")
parser.add_argument("-k", '--keep', type=int, help="Number of hit(s) to keep (default: 1)", default=1)
args = parser.parse_args()
if args.protein_seq and args.fasta_file:
raise(IOError('Input either a fasta file or a protein seqience, not both!'))
elif args.protein_seq:
picr = accession2exact_matches(args.protein_seq,
args.database)
if picr is not None:
uniparc_accession, matches = picr
print uniparc_accession, matches
else:
if len(args.database) > 1:
raise(IOError('Fasta file match is only possible for a single database!'))
else:
fasta_corresp(args.fasta_file, args.database, n_keep=args.keep)
| [
"trestan.pillonel@gmail.com"
] | trestan.pillonel@gmail.com |
4d11d38be8a5327f5b2570194e363b7d16a02a24 | 05ef845deda7863134c65da01679b92196a6dab2 | /configs/models/classifier-basic.py | c6cdd3eb98ef2e7375cca5517508a66cd5d40cfe | [
"MIT"
] | permissive | gleb-t/S4 | 6dc6dd417519a8a93b5ef9ac52f412857776eb54 | e88abf5f630aae7f2a825a7f686c23db6066d6ff | refs/heads/master | 2023-01-24T22:26:50.756351 | 2022-04-24T09:51:35 | 2022-04-24T09:51:35 | 399,844,486 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | from typing import *
from Siamese.config import SiameseConfig
# noinspection DuplicatedCode
def build_classifier(config: SiameseConfig, inputShape: Tuple[int, ...]) -> 'keras.models.Sequential':
import keras
classifier = keras.models.Sequential()
classifier.add(keras.layers.Dense(1, activation='sigmoid', input_shape=inputShape))
return classifier
| [
"gleb-t@users.noreply.github.com"
] | gleb-t@users.noreply.github.com |
4f03dcbb3a4981e832ea2ada8019b8427ffa6e1e | 7173d5322427947dcf09174e61193f99901ffb40 | /common/samples-generator.py | 4c47a6f5b8170a21f364dd79e55b8fb6435c433d | [
"MIT"
] | permissive | mksoc/ISA-filter-design | 8b917822e196ebd6496407760723b81e0c4ebfa8 | bc2af7f8ca85a06ffa2e9c3a572c9093ef42cef3 | refs/heads/master | 2020-03-31T12:27:44.488771 | 2019-03-21T10:12:26 | 2019-03-21T10:12:26 | 152,216,761 | 1 | 0 | MIT | 2018-11-11T17:18:37 | 2018-10-09T08:37:41 | Verilog | UTF-8 | Python | false | false | 1,275 | py | #! /usr/bin/python3
from sys import exit
import random
# define possible modes
modes = {1:"Random", 2:"Special (only 0 and extremes)"}
for key, value in modes.items():
print("{} - {}".format(key, value))
# prompt user for mode selection
try:
mode = int(input("Select the generator mode: "))
except ValueError:
print("Error. Invalid option.")
exit(1)
else:
if not (1 <= mode <= 2):
print("Error. Invalid option.")
exit(1)
# prompt user for number of samples
samples = input("Type number of samples (default is 201): ")
if not samples:
samples = 201
else:
try:
samples = int(samples)
except ValueError:
print("Error. Invalid option.")
exit(1)
else:
if samples < 0:
print("Error. Invalid option.")
exit(1)
# generate samples
NB = 12
print("Generating samples...")
with open('py-samples.txt', 'w') as outFile:
for i in range(samples):
if mode == 1:
outFile.write('{}\n'.format(random.randint(-2**(NB-1) + 1, 2**(NB-1) - 1)))
elif mode == 2:
outFile.write('{}\n'.format(random.choice([0, -2**(NB-1) + 1, 2**(NB-1) - 1])))
else:
print("Error. Unknown error. Exiting.")
exit(1)
print("Done.")
| [
"27402195+mksoc@users.noreply.github.com"
] | 27402195+mksoc@users.noreply.github.com |
06cf5691c097f29cecbf4016aadddc46e96c8ec1 | 6b520836bf5caedb064af02bc7b9ec9c828f0885 | /03 Num Dates Times/C04binhexdecoct.py | 6610c06e7b6cfbaf416801949101b4c708c49cf1 | [] | no_license | tomtom0516/python-cookbook-3rd-edition | bc2c641133f96b4d5718335f8d17eee3d4426114 | e72545f99622b1f46fd05ff63e3c8d3d6841ab42 | refs/heads/master | 2022-03-10T22:10:19.179123 | 2019-12-05T02:25:04 | 2019-12-05T02:25:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | x = 1234
bin(x)
hex(x)
oct(x)
int(0x4d2)
int(0b10011010010)
int(0o2322)
int('4d2', 16)
int('10011010010', 2) | [
"pyptom76@163.com"
] | pyptom76@163.com |
7b5f8b2dd459f5a56b4dde5383683cdeec6ffa7a | e7324a597faef537e96d0c1d27b1877d229591bb | /odoo-venv/bin/pysassc | 12341ce28b1a5fff3024ced4898cc0812035101b | [] | no_license | ledonhomeweb1/ming | 6a95c02e456c808b061fd5e7862d51504ba0fca8 | c663fbfe6c2b3d7b0b9f7e5b94dddaca9fd8b7ed | refs/heads/master | 2022-12-16T10:51:23.767786 | 2020-09-14T08:59:02 | 2020-09-14T08:59:02 | 295,359,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | #!/opt/odoo13ming/odoo-venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pysassc import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ledonhome.web1@gmail.com"
] | ledonhome.web1@gmail.com | |
077d1f303b1d8e4453ccf710e00fdc43e75bd68c | 1f38f3cd0ba6d42dd73f273e3dc9df4ebdc0dc9d | /BuzzScoreSite/manage.py | 5463761455fffd46cd530a6382b889c5bc5c5ee1 | [] | no_license | MenshovSergey/BuzzScore | 2a5f8cfd9b46a85665455c2a5cfa298c9a3a698b | 348d1b2feb76a892e489016682f16e7a70a504a9 | refs/heads/master | 2021-01-16T19:49:40.996213 | 2013-10-12T09:43:46 | 2013-10-12T09:43:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "BuzzScoreSite.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"snailandmail@gmail.com"
] | snailandmail@gmail.com |
faebc71ced6cff2f4f66945a372fd278748ac68c | 52e3f68d59615dab6928befa4b31f8bcf23595fa | /algorithm/재미있는 오셀로 게임.py | 9870c96a88804b09da5615cef24c5993cf78dad0 | [] | no_license | talkin24/python | 0b40f2ab8893b3137195987e149a9dc79493392a | b12ba8f97dbadf41fe534a57c57d1600852e00d2 | refs/heads/master | 2023-01-06T14:37:45.480144 | 2020-10-29T08:46:34 | 2020-10-29T08:46:34 | 281,813,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,521 | py | import sys
sys.stdin = open("재미있는 오셀로 게임.txt", "r")
T = int(input())
for t in range(1, T + 1):
print(f"#{t}", end=" ")
N, M = input().split()
N, M = int(N), int(M)
acts = [list(map(int, input().split())) for _ in range(M)]
b, w = 0, 0
board = [[0] * (N + 2) for i in range(N + 2)] # add padding
board[N // 2][N // 2 + 1] = board[N // 2 + 1][N // 2] = 1
board[N // 2][N // 2] = board[N // 2 + 1][N // 2 + 1] = 2
drs = [-1, -1, 0, 1, 1, 1, 0, -1]
dcs = [ 0, 1, 1, 1, 0, -1, -1, -1]
for act in acts:
for d in range(len(drs)):
c = act[0]
r = act[1]
board[r][c] = act[2]
if (board[r + drs[d]][c + dcs[d]] != 0) and (board[r + drs[d]][c + dcs[d]] != act[2]):
r0 = r + drs[d]
c0 = c + dcs[d]
while board[r + drs[d]][c + dcs[d]] != act[2]:
if board[r + drs[d]][c + dcs[d]] == 0:
break
else:
r += drs[d]
c += dcs[d]
if board[r + drs[d]][c + dcs[d]] == act[2]:
while board[r0][c0] != act[2]:
board[r0][c0] = act[2]
r0 += drs[d]
c0 += dcs[d]
break
ws, bs = 0, 0
for row in board:
ws += list(row).count(1)
bs += list(row).count(2)
print(ws, bs) | [
"35341110+talkin24@users.noreply.github.com"
] | 35341110+talkin24@users.noreply.github.com |
443fdd4993d3dad8cc8a84552bb38aad7f19f7ec | e0a991bbc1cc6ff2c595ad4952315028dd5d83a1 | /memory_ver3/finPID_by_processName.py | fca50bc27f77600c7b3d3b7307ae07ade32da906 | [] | no_license | fordivil/python | 1ff40e6995622e0fb3a13cc1e137fc8d528c35b1 | 4d2634158226c4c5ac4881d56ac1eac9253cdcd9 | refs/heads/master | 2021-01-21T14:01:25.461498 | 2016-05-26T04:53:06 | 2016-05-26T04:53:06 | 55,599,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | import psutil
def find_Process_Name(process_name):
for proc in psutil.process_iter():
process = psutil.Process(proc.pid)# Get the process info using PID
pname = process.name()# Here is the process name
#print pname
if pname == process_name:
return proc.pid
return 0
print(find_Process_Name("LolClient.exe")) | [
"fordivil1@gmail.com"
] | fordivil1@gmail.com |
731b016384695c7fbf09a1241bfce9fa6e6acf50 | 2ca90c4da88dd233fd1102d75c6a023263c1d891 | /netbox/netbox_inventory.py | 1633f238f3270a58cfed414e4e1e9ccdcc59026f | [
"Apache-2.0"
] | permissive | mtbutler07/netcfgbu | 499b93d7ee346e6a01dfc10929894d569dc68db6 | d3c5945f50f49c9e646fda47954bd92014282087 | refs/heads/master | 2022-10-07T19:51:17.790289 | 2020-06-10T00:46:29 | 2020-06-10T00:46:29 | 271,167,326 | 0 | 0 | Apache-2.0 | 2020-06-10T03:12:32 | 2020-06-10T03:12:31 | null | UTF-8 | Python | false | false | 4,296 | py | #!/usr/bin/env python
#
# This script is used to retrieve the device inventory from a Netbox system and
# emil the CSV file to either stdout (default) or a filename provided
#
# The following Environment variables are REQUIRED:
#
# NETBOX_ADDR: the URL to the NetBox server
# "https://my-netbox-server"
#
# NETBOX_TOKEN: the NetBox login token
# "e0759aa0d6b4146-from-netbox-f744c4489adfec48f"
#
# The following Environment variables are OPTIONAL:
#
# NETBOX_INVENTORY_OPTIONS
# Same as the options provided by "--help"
#
import sys
import argparse
import os
import csv
import requests # noqa
from urllib3 import disable_warnings # noqa
disable_warnings()
options_parser = argparse.ArgumentParser()
options_parser.add_argument("--site", action="store", help="limit devices to site")
options_parser.add_argument("--region", action="store", help="limit devices to region")
options_parser.add_argument("--role", action="append", help="limit devices with role")
options_parser.add_argument(
"--exclude-role", action="append", help="exclude devices with role"
)
options_parser.add_argument(
"--exclude-tag", action="append", help="exclude devices with tag"
)
options_parser.add_argument(
"--output", type=argparse.FileType("w+"), default=sys.stdout
)
class NetBoxSession(requests.Session):
def __init__(self, url, token):
super(NetBoxSession, self).__init__()
self.url = url
self.headers["authorization"] = "Token %s" % token
self.verify = False
def prepare_request(self, request):
request.url = self.url + request.url
return super(NetBoxSession, self).prepare_request(request)
def main():
try:
nb_url = os.environ["NETBOX_ADDR"]
nb_token = os.environ["NETBOX_TOKEN"]
except KeyError as exc:
sys.exit(f"ERROR: missing envirnoment variable: {exc.args[0]}")
nb_env_opts = os.environ.get("NETBOX_INVENTORY_OPTIONS")
opt_arg = nb_env_opts.split(";") if nb_env_opts else None
nb_opts = options_parser.parse_args(opt_arg)
params = dict(limit=0, status=1, has_primary_ip="true")
if nb_opts.site:
params["site"] = nb_opts.site
if nb_opts.region:
params["region"] = nb_opts.region
netbox = NetBoxSession(url=nb_url, token=nb_token)
res = netbox.get("/api/dcim/devices/", params=params)
if not res.ok:
sys.exit("FAIL: get inventory: " + res.text)
body = res.json()
device_list = body["results"]
# -------------------------------------------------------------------------
# User Filters
# -------------------------------------------------------------------------
# If Caller provided an explicit list of device-roles, then filter the
# device list based on those roles before creating the inventory
filter_functions = []
if nb_opts.role:
def filter_role(dev_dict):
return dev_dict["device_role"]["slug"] in nb_opts.role
filter_functions.append(filter_role)
if nb_opts.exclude_role:
def filter_ex_role(dev_dict):
return dev_dict["device_role"]["slug"] not in nb_opts.exclude_role
filter_functions.append(filter_ex_role)
if nb_opts.exclude_tag:
ex_tag_set = set(nb_opts.exclude_tag)
def filter_ex_tag(dev_dict):
return not set(dev_dict["tags"]) & ex_tag_set
filter_functions.append(filter_ex_tag)
def apply_filters():
for dev_dict in device_list:
if all(fn(dev_dict) for fn in filter_functions):
yield dev_dict
# -------------------------------------------------------------------------
# Create Inventory from device list
# -------------------------------------------------------------------------
csv_wr = csv.writer(nb_opts.output)
csv_wr.writerow(["host", "ipaddr", "os_name"])
for device in apply_filters() if filter_functions else device_list:
hostname = device["name"]
ipaddr = device["primary_ip"]["address"].split("/")[0]
# if the platform value is not assigned, then skip this device.
if not (platform := device["platform"]):
continue
csv_wr.writerow([hostname, ipaddr, platform["slug"]])
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | mtbutler07.noreply@github.com |
bca0bdb8b08da22691ffa39b5e1104087b4180c0 | 9b79dc0b4b2f13dea85a1d29177e5eb266b6e7f7 | /var/lib/python-support/python2.6/rdflib/store/REGEXMatching.py | b7500a712c707ce0fccfdef7843481cac2b31775 | [] | no_license | haniokasai/netwalker-rootfs | 0bc87efc0ae478338b6326fd9118befcbcc5cd06 | d08f7bf370a82b6970387bb9f165d374a9d9092b | refs/heads/master | 2021-01-10T11:04:34.436513 | 2016-01-12T06:09:50 | 2016-01-12T06:09:50 | 36,504,146 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | /usr/share/python-support/python-rdflib/rdflib/store/REGEXMatching.py | [
"htek@haniokasai.com"
] | htek@haniokasai.com |
a25684a49262357be881aa5757e6209498b9ed21 | 51199e2c76498e67049a8e8d8308f551044c262a | /Python/GUI/PyQtLayout.py | 7763f69da7b2716765f481b486f3a7c04b0e08d0 | [
"MIT"
] | permissive | yishantao/DailyPractice | 9ab7129b7a573a9ff5f018bc293a40a6278675b8 | ee26859af3faf48e63d6c2850db1d895a8a88fb1 | refs/heads/master | 2020-03-18T18:59:11.153508 | 2018-08-02T10:58:50 | 2018-08-02T10:58:50 | 135,126,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,552 | py | # -*-coding:utf-8-*-
"""使用布局组件布置多个标签"""
import sys
from PyQt5 import QtWidgets, QtCore
# 通过继承QtWidgets.QWidget创建类
class MyWindow(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
self.setWindowTitle('PyQT') # 设置窗口标题
self.resize(300, 200) # 设置窗口大小
grid_layout = QtWidgets.QGridLayout()
hbox_layout1 = QtWidgets.QHBoxLayout()
hbox_layout2 = QtWidgets.QHBoxLayout()
vbox_layout1 = QtWidgets.QVBoxLayout()
vbox_layout2 = QtWidgets.QVBoxLayout()
label1 = QtWidgets.QLabel('Label1', self)
label1.setAlignment(QtCore.Qt.AlignCenter)
label2 = QtWidgets.QLabel('Label2')
label3 = QtWidgets.QLabel('Label3')
label4 = QtWidgets.QLabel('Label4')
label5 = QtWidgets.QLabel('Label5')
hbox_layout1.addWidget(label1)
vbox_layout1.addWidget(label2)
vbox_layout1.addWidget(label3)
vbox_layout2.addWidget(label4)
vbox_layout2.addWidget(label5)
hbox_layout2.addLayout(vbox_layout1)
hbox_layout2.addLayout(vbox_layout2)
grid_layout.addLayout(hbox_layout1, 0, 0)
grid_layout.addLayout(hbox_layout2, 1, 0)
grid_layout.setRowMinimumHeight(1, 180)
self.setLayout(grid_layout)
app = QtWidgets.QApplication(sys.argv) # 创建QApplication对象
my_window = MyWindow()
my_window.show() # 显示窗口
# exec_进入消息循环,exit确保应用程序的退出
sys.exit(app.exec_())
| [
"38241025+yishantao@users.noreply.github.com"
] | 38241025+yishantao@users.noreply.github.com |
b81d4344b6186530702ceb706cebe876dde0b167 | 8259611d9ad49cca99e26ce07d1d792ab8c28b13 | /geopandas/_vectorized.py | 82e3d08d21a48de2dda8b0db3faed73dcbef410d | [
"BSD-3-Clause"
] | permissive | maifeeulasad/geopandas2 | 83218432bae245d691b592cb4f1ca8814221f23b | 12038d99dade80cacc7af6a55c42bcce8d9964a9 | refs/heads/master | 2023-06-03T23:21:21.857793 | 2021-06-30T05:42:55 | 2021-06-30T05:42:55 | 381,581,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,294 | py | """
Compatibility shim for the vectorized geometry operations.
Uses PyGEOS if available/set, otherwise loops through Shapely geometries.
"""
import warnings
import numpy as np
import shapely.geometry
import shapely.geos
import shapely.wkb
import shapely.wkt
from shapely.geometry.base import BaseGeometry
import _compat as compat
try:
import pygeos
except ImportError:
geos = None
_names = {
"MISSING": None,
"NAG": None,
"POINT": "Point",
"LINESTRING": "LineString",
"LINEARRING": "LinearRing",
"POLYGON": "Polygon",
"MULTIPOINT": "MultiPoint",
"MULTILINESTRING": "MultiLineString",
"MULTIPOLYGON": "MultiPolygon",
"GEOMETRYCOLLECTION": "GeometryCollection",
}
if compat.USE_PYGEOS:
type_mapping = {p.value: _names[p.name] for p in pygeos.GeometryType}
geometry_type_ids = list(type_mapping.keys())
geometry_type_values = np.array(list(type_mapping.values()), dtype=object)
else:
type_mapping, geometry_type_ids, geometry_type_values = None, None, None
def _isna(value):
"""
Check if scalar value is NA-like (None or np.nan).
Custom version that only works for scalars (returning True or False),
as `pd.isna` also works for array-like input returning a boolean array.
"""
if value is None:
return True
elif isinstance(value, float) and np.isnan(value):
return True
else:
return False
def _pygeos_to_shapely(geom):
if geom is None:
return None
if compat.PYGEOS_SHAPELY_COMPAT:
geom = shapely.geos.lgeos.GEOSGeom_clone(geom._ptr)
return shapely.geometry.base.geom_factory(geom)
# fallback going through WKB
if pygeos.is_empty(geom) and pygeos.get_type_id(geom) == 0:
# empty point does not roundtrip through WKB
return shapely.wkt.loads("POINT EMPTY")
else:
return shapely.wkb.loads(pygeos.to_wkb(geom))
def _shapely_to_pygeos(geom):
if geom is None:
return None
if compat.PYGEOS_SHAPELY_COMPAT:
return pygeos.from_shapely(geom)
# fallback going through WKB
if geom.is_empty and geom.geom_type == "Point":
# empty point does not roundtrip through WKB
return pygeos.from_wkt("POINT EMPTY")
else:
return pygeos.from_wkb(geom.wkb)
def from_shapely(data):
"""
Convert a list or array of shapely objects to an object-dtype numpy
array of validated geometry elements.
"""
# First try a fast path for pygeos if possible, but do this in a try-except
# block because pygeos.from_shapely only handles Shapely objects, while
# the rest of this function is more forgiving (also __geo_interface__).
if compat.USE_PYGEOS and compat.PYGEOS_SHAPELY_COMPAT:
if not isinstance(data, np.ndarray):
arr = np.empty(len(data), dtype=object)
with compat.ignore_shapely2_warnings():
arr[:] = data
else:
arr = data
try:
return pygeos.from_shapely(arr)
except TypeError:
pass
out = []
for geom in data:
if compat.USE_PYGEOS and isinstance(geom, pygeos.Geometry):
out.append(geom)
elif isinstance(geom, BaseGeometry):
if compat.USE_PYGEOS:
out.append(_shapely_to_pygeos(geom))
else:
out.append(geom)
elif hasattr(geom, "__geo_interface__"):
geom = shapely.geometry.shape(geom)
if compat.USE_PYGEOS:
out.append(_shapely_to_pygeos(geom))
else:
out.append(geom)
elif _isna(geom):
out.append(None)
else:
raise TypeError("Input must be valid geometry objects: {0}".format(geom))
if compat.USE_PYGEOS:
return np.array(out, dtype=object)
else:
# numpy can expand geometry collections into 2D arrays, use this
# two-step construction to avoid this
aout = np.empty(len(data), dtype=object)
with compat.ignore_shapely2_warnings():
aout[:] = out
return aout
def to_shapely(data):
if compat.USE_PYGEOS:
out = np.empty(len(data), dtype=object)
with compat.ignore_shapely2_warnings():
out[:] = [_pygeos_to_shapely(geom) for geom in data]
return out
else:
return data
def from_wkb(data):
"""
Convert a list or array of WKB objects to a np.ndarray[geoms].
"""
if compat.USE_PYGEOS:
return pygeos.from_wkb(data)
import shapely.wkb
out = []
for geom in data:
if geom is not None and len(geom):
geom = shapely.wkb.loads(geom)
else:
geom = None
out.append(geom)
aout = np.empty(len(data), dtype=object)
with compat.ignore_shapely2_warnings():
aout[:] = out
return aout
def to_wkb(data, hex=False, **kwargs):
if compat.USE_PYGEOS:
return pygeos.to_wkb(data, hex=hex, **kwargs)
else:
if hex:
out = [geom.wkb_hex if geom is not None else None for geom in data]
else:
out = [geom.wkb if geom is not None else None for geom in data]
return np.array(out, dtype=object)
def from_wkt(data):
"""
Convert a list or array of WKT objects to a np.ndarray[geoms].
"""
if compat.USE_PYGEOS:
return pygeos.from_wkt(data)
import shapely.wkt
out = []
for geom in data:
if geom is not None and len(geom):
if isinstance(geom, bytes):
geom = geom.decode("utf-8")
geom = shapely.wkt.loads(geom)
else:
geom = None
out.append(geom)
aout = np.empty(len(data), dtype=object)
with compat.ignore_shapely2_warnings():
aout[:] = out
return aout
def to_wkt(data, **kwargs):
if compat.USE_PYGEOS:
return pygeos.to_wkt(data, **kwargs)
else:
out = [geom.wkt if geom is not None else None for geom in data]
return np.array(out, dtype=object)
def _points_from_xy(x, y, z=None):
# helper method for shapely-based function
if not len(x) == len(y):
raise ValueError("x and y arrays must be equal length.")
if z is not None:
if not len(z) == len(x):
raise ValueError("z array must be same length as x and y.")
geom = [shapely.geometry.Point(i, j, k) for i, j, k in zip(x, y, z)]
else:
geom = [shapely.geometry.Point(i, j) for i, j in zip(x, y)]
return geom
def points_from_xy(x, y, z=None):
x = np.asarray(x, dtype="float64")
y = np.asarray(y, dtype="float64")
if z is not None:
z = np.asarray(z, dtype="float64")
if compat.USE_PYGEOS:
return pygeos.points(x, y, z)
else:
out = _points_from_xy(x, y, z)
aout = np.empty(len(x), dtype=object)
aout[:] = out
return aout
# -----------------------------------------------------------------------------
# Helper methods for the vectorized operations
# -----------------------------------------------------------------------------
def _binary_method(op, left, right, **kwargs):
# type: (str, np.array[geoms], [np.array[geoms]/BaseGeometry]) -> array-like
if isinstance(right, BaseGeometry):
right = from_shapely([right])[0]
return getattr(pygeos, op)(left, right, **kwargs)
def _binary_geo(op, left, right):
# type: (str, np.array[geoms], [np.array[geoms]/BaseGeometry]) -> np.array[geoms]
"""Apply geometry-valued operation
Supports:
- difference
- symmetric_difference
- intersection
- union
Parameters
----------
op: string
right: np.array[geoms] or single shapely BaseGeoemtry
"""
if isinstance(right, BaseGeometry):
# intersection can return empty GeometryCollections, and if the
# result are only those, numpy will coerce it to empty 2D array
data = np.empty(len(left), dtype=object)
with compat.ignore_shapely2_warnings():
data[:] = [
getattr(s, op)(right) if s is not None and right is not None else None
for s in left
]
return data
elif isinstance(right, np.ndarray):
if len(left) != len(right):
msg = "Lengths of inputs do not match. Left: {0}, Right: {1}".format(
len(left), len(right)
)
raise ValueError(msg)
data = np.empty(len(left), dtype=object)
with compat.ignore_shapely2_warnings():
data[:] = [
getattr(this_elem, op)(other_elem)
if this_elem is not None and other_elem is not None
else None
for this_elem, other_elem in zip(left, right)
]
return data
else:
raise TypeError("Type not known: {0} vs {1}".format(type(left), type(right)))
def _binary_predicate(op, left, right, *args, **kwargs):
# type: (str, np.array[geoms], np.array[geoms]/BaseGeometry, args/kwargs)
# -> array[bool]
"""Binary operation on np.array[geoms] that returns a boolean ndarray
Supports:
- contains
- disjoint
- intersects
- touches
- crosses
- within
- overlaps
- covers
- covered_by
- equals
Parameters
----------
op: string
right: np.array[geoms] or single shapely BaseGeoemtry
"""
# empty geometries are handled by shapely (all give False except disjoint)
if isinstance(right, BaseGeometry):
data = [
getattr(s, op)(right, *args, **kwargs) if s is not None else False
for s in left
]
return np.array(data, dtype=bool)
elif isinstance(right, np.ndarray):
data = [
getattr(this_elem, op)(other_elem, *args, **kwargs)
if not (this_elem is None or other_elem is None)
else False
for this_elem, other_elem in zip(left, right)
]
return np.array(data, dtype=bool)
else:
raise TypeError("Type not known: {0} vs {1}".format(type(left), type(right)))
def _binary_op_float(op, left, right, *args, **kwargs):
# type: (str, np.array[geoms], np.array[geoms]/BaseGeometry, args/kwargs)
# -> array
"""Binary operation on np.array[geoms] that returns a ndarray"""
# used for distance -> check for empty as we want to return np.nan instead 0.0
# as shapely does currently (https://github.com/Toblerity/Shapely/issues/498)
if isinstance(right, BaseGeometry):
data = [
getattr(s, op)(right, *args, **kwargs)
if not (s is None or s.is_empty or right.is_empty)
else np.nan
for s in left
]
return np.array(data, dtype=float)
elif isinstance(right, np.ndarray):
if len(left) != len(right):
msg = "Lengths of inputs do not match. Left: {0}, Right: {1}".format(
len(left), len(right)
)
raise ValueError(msg)
data = [
getattr(this_elem, op)(other_elem, *args, **kwargs)
if not (this_elem is None or this_elem.is_empty)
| (other_elem is None or other_elem.is_empty)
else np.nan
for this_elem, other_elem in zip(left, right)
]
return np.array(data, dtype=float)
else:
raise TypeError("Type not known: {0} vs {1}".format(type(left), type(right)))
def _binary_op(op, left, right, *args, **kwargs):
# type: (str, np.array[geoms], np.array[geoms]/BaseGeometry, args/kwargs)
# -> array
"""Binary operation on np.array[geoms] that returns a ndarray"""
# pass empty to shapely (relate handles this correctly, project only
# for linestrings and points)
if op == "project":
null_value = np.nan
dtype = float
elif op == "relate":
null_value = None
dtype = object
else:
raise AssertionError("wrong op")
if isinstance(right, BaseGeometry):
data = [
getattr(s, op)(right, *args, **kwargs) if s is not None else null_value
for s in left
]
return np.array(data, dtype=dtype)
elif isinstance(right, np.ndarray):
if len(left) != len(right):
msg = "Lengths of inputs do not match. Left: {0}, Right: {1}".format(
len(left), len(right)
)
raise ValueError(msg)
data = [
getattr(this_elem, op)(other_elem, *args, **kwargs)
if not (this_elem is None or other_elem is None)
else null_value
for this_elem, other_elem in zip(left, right)
]
return np.array(data, dtype=dtype)
else:
raise TypeError("Type not known: {0} vs {1}".format(type(left), type(right)))
def _affinity_method(op, left, *args, **kwargs):
# type: (str, np.array[geoms], ...) -> np.array[geoms]
# not all shapely.affinity methods can handle empty geometries:
# affine_transform itself works (as well as translate), but rotate, scale
# and skew fail (they try to unpack the bounds).
# Here: consistently returning empty geom for input empty geom
left = to_shapely(left)
out = []
for geom in left:
if geom is None or geom.is_empty:
res = geom
else:
res = getattr(shapely.affinity, op)(geom, *args, **kwargs)
out.append(res)
data = np.empty(len(left), dtype=object)
with compat.ignore_shapely2_warnings():
data[:] = out
return from_shapely(data)
# -----------------------------------------------------------------------------
# Vectorized operations
# -----------------------------------------------------------------------------
#
# Unary operations that return non-geometry (bool or float)
#
def _unary_op(op, left, null_value=False):
# type: (str, np.array[geoms], Any) -> np.array
"""Unary operation that returns a Series"""
data = [getattr(geom, op, null_value) for geom in left]
return np.array(data, dtype=np.dtype(type(null_value)))
def is_valid(data):
if compat.USE_PYGEOS:
return pygeos.is_valid(data)
else:
return _unary_op("is_valid", data, null_value=False)
def is_empty(data):
if compat.USE_PYGEOS:
return pygeos.is_empty(data)
else:
return _unary_op("is_empty", data, null_value=False)
def is_simple(data):
if compat.USE_PYGEOS:
return pygeos.is_simple(data)
else:
return _unary_op("is_simple", data, null_value=False)
def is_ring(data):
if "Polygon" in geom_type(data):
warnings.warn(
"is_ring currently returns True for Polygons, which is not correct. "
"This will be corrected to False in a future release.",
FutureWarning,
stacklevel=3,
)
if compat.USE_PYGEOS:
return pygeos.is_ring(data) | pygeos.is_ring(pygeos.get_exterior_ring(data))
else:
# for polygons operates on the exterior, so can't use _unary_op()
results = []
for geom in data:
if geom is None:
results.append(False)
elif geom.type == "Polygon":
results.append(geom.exterior.is_ring)
elif geom.type in ["LineString", "LinearRing"]:
results.append(geom.is_ring)
else:
results.append(False)
return np.array(results, dtype=bool)
def is_closed(data):
if compat.USE_PYGEOS:
return pygeos.is_closed(data)
else:
return _unary_op("is_closed", data, null_value=False)
def has_z(data):
if compat.USE_PYGEOS:
return pygeos.has_z(data)
else:
return _unary_op("has_z", data, null_value=False)
def geom_type(data):
if compat.USE_PYGEOS:
res = pygeos.get_type_id(data)
return geometry_type_values[np.searchsorted(geometry_type_ids, res)]
else:
return _unary_op("geom_type", data, null_value=None)
def area(data):
if compat.USE_PYGEOS:
return pygeos.area(data)
else:
return _unary_op("area", data, null_value=np.nan)
def length(data):
if compat.USE_PYGEOS:
return pygeos.length(data)
else:
return _unary_op("length", data, null_value=np.nan)
#
# Unary operations that return new geometries
#
def _unary_geo(op, left, *args, **kwargs):
# type: (str, np.array[geoms]) -> np.array[geoms]
"""Unary operation that returns new geometries"""
# ensure 1D output, see note above
data = np.empty(len(left), dtype=object)
with compat.ignore_shapely2_warnings():
data[:] = [getattr(geom, op, None) for geom in left]
return data
def boundary(data):
if compat.USE_PYGEOS:
return pygeos.boundary(data)
else:
return _unary_geo("boundary", data)
def centroid(data):
if compat.USE_PYGEOS:
return pygeos.centroid(data)
else:
return _unary_geo("centroid", data)
def convex_hull(data):
if compat.USE_PYGEOS:
return pygeos.convex_hull(data)
else:
return _unary_geo("convex_hull", data)
def envelope(data):
if compat.USE_PYGEOS:
return pygeos.envelope(data)
else:
return _unary_geo("envelope", data)
def exterior(data):
if compat.USE_PYGEOS:
return pygeos.get_exterior_ring(data)
else:
return _unary_geo("exterior", data)
def interiors(data):
data = to_shapely(data)
has_non_poly = False
inner_rings = []
for geom in data:
interior_ring_seq = getattr(geom, "interiors", None)
# polygon case
if interior_ring_seq is not None:
inner_rings.append(list(interior_ring_seq))
# non-polygon case
else:
has_non_poly = True
inner_rings.append(None)
if has_non_poly:
warnings.warn(
"Only Polygon objects have interior rings. For other "
"geometry types, None is returned."
)
data = np.empty(len(data), dtype=object)
data[:] = inner_rings
return data
def representative_point(data):
if compat.USE_PYGEOS:
return pygeos.point_on_surface(data)
else:
# method and not a property -> can't use _unary_geo
out = np.empty(len(data), dtype=object)
out[:] = [
geom.representative_point() if geom is not None else None for geom in data
]
return out
#
# Binary predicates
#
def covers(data, other):
if compat.USE_PYGEOS:
return _binary_method("covers", data, other)
else:
return _binary_predicate("covers", data, other)
def covered_by(data, other):
if compat.USE_PYGEOS:
return _binary_method("covered_by", data, other)
else:
raise NotImplementedError(
"covered_by is only implemented for pygeos, not shapely"
)
def contains(data, other):
if compat.USE_PYGEOS:
return _binary_method("contains", data, other)
else:
return _binary_predicate("contains", data, other)
def crosses(data, other):
if compat.USE_PYGEOS:
return _binary_method("crosses", data, other)
else:
return _binary_predicate("crosses", data, other)
def disjoint(data, other):
if compat.USE_PYGEOS:
return _binary_method("disjoint", data, other)
else:
return _binary_predicate("disjoint", data, other)
def equals(data, other):
if compat.USE_PYGEOS:
return _binary_method("equals", data, other)
else:
return _binary_predicate("equals", data, other)
def intersects(data, other):
if compat.USE_PYGEOS:
return _binary_method("intersects", data, other)
else:
return _binary_predicate("intersects", data, other)
def overlaps(data, other):
if compat.USE_PYGEOS:
return _binary_method("overlaps", data, other)
else:
return _binary_predicate("overlaps", data, other)
def touches(data, other):
if compat.USE_PYGEOS:
return _binary_method("touches", data, other)
else:
return _binary_predicate("touches", data, other)
def within(data, other):
if compat.USE_PYGEOS:
return _binary_method("within", data, other)
else:
return _binary_predicate("within", data, other)
def equals_exact(data, other, tolerance):
if compat.USE_PYGEOS:
return _binary_method("equals_exact", data, other, tolerance=tolerance)
else:
return _binary_predicate("equals_exact", data, other, tolerance=tolerance)
def almost_equals(self, other, decimal):
if compat.USE_PYGEOS:
return self.equals_exact(other, 0.5 * 10 ** (-decimal))
else:
return _binary_predicate("almost_equals", self, other, decimal=decimal)
#
# Binary operations that return new geometries
#
def difference(data, other):
if compat.USE_PYGEOS:
return _binary_method("difference", data, other)
else:
return _binary_geo("difference", data, other)
def intersection(data, other):
if compat.USE_PYGEOS:
return _binary_method("intersection", data, other)
else:
return _binary_geo("intersection", data, other)
def symmetric_difference(data, other):
if compat.USE_PYGEOS:
return _binary_method("symmetric_difference", data, other)
else:
return _binary_geo("symmetric_difference", data, other)
def union(data, other):
if compat.USE_PYGEOS:
return _binary_method("union", data, other)
else:
return _binary_geo("union", data, other)
#
# Other operations
#
def distance(data, other):
if compat.USE_PYGEOS:
return _binary_method("distance", data, other)
else:
return _binary_op_float("distance", data, other)
def buffer(data, distance, resolution=16, **kwargs):
if compat.USE_PYGEOS:
return pygeos.buffer(data, distance, quadsegs=resolution, **kwargs)
else:
out = np.empty(len(data), dtype=object)
if isinstance(distance, np.ndarray):
if len(distance) != len(data):
raise ValueError(
"Length of distance sequence does not match "
"length of the GeoSeries"
)
with compat.ignore_shapely2_warnings():
out[:] = [
geom.buffer(dist, resolution, **kwargs)
if geom is not None
else None
for geom, dist in zip(data, distance)
]
return out
with compat.ignore_shapely2_warnings():
out[:] = [
geom.buffer(distance, resolution, **kwargs)
if geom is not None
else None
for geom in data
]
return out
def interpolate(data, distance, normalized=False):
if compat.USE_PYGEOS:
return pygeos.line_interpolate_point(data, distance, normalize=normalized)
else:
out = np.empty(len(data), dtype=object)
if isinstance(distance, np.ndarray):
if len(distance) != len(data):
raise ValueError(
"Length of distance sequence does not match "
"length of the GeoSeries"
)
out[:] = [
geom.interpolate(dist, normalized=normalized)
for geom, dist in zip(data, distance)
]
return out
out[:] = [geom.interpolate(distance, normalized=normalized) for geom in data]
return out
def simplify(data, tolerance, preserve_topology=True):
if compat.USE_PYGEOS:
# preserve_topology has different default as pygeos!
return pygeos.simplify(data, tolerance, preserve_topology=preserve_topology)
else:
# method and not a property -> can't use _unary_geo
out = np.empty(len(data), dtype=object)
with compat.ignore_shapely2_warnings():
out[:] = [
geom.simplify(tolerance, preserve_topology=preserve_topology)
for geom in data
]
return out
def _shapely_normalize(geom):
"""
Small helper function for now because it is not yet available in Shapely.
"""
from shapely.geos import lgeos
from shapely.geometry.base import geom_factory
from ctypes import c_void_p, c_int
lgeos._lgeos.GEOSNormalize_r.restype = c_int
lgeos._lgeos.GEOSNormalize_r.argtypes = [c_void_p, c_void_p]
geom_cloned = lgeos.GEOSGeom_clone(geom._geom)
lgeos._lgeos.GEOSNormalize_r(lgeos.geos_handle, geom_cloned)
return geom_factory(geom_cloned)
def normalize(data):
if compat.USE_PYGEOS:
return pygeos.normalize(data)
else:
out = np.empty(len(data), dtype=object)
with compat.ignore_shapely2_warnings():
out[:] = [
_shapely_normalize(geom) if geom is not None else None for geom in data
]
return out
def project(data, other, normalized=False):
if compat.USE_PYGEOS:
return pygeos.line_locate_point(data, other, normalize=normalized)
else:
return _binary_op("project", data, other, normalized=normalized)
def relate(data, other):
data = to_shapely(data)
if isinstance(other, np.ndarray):
other = to_shapely(other)
return _binary_op("relate", data, other)
def unary_union(data):
if compat.USE_PYGEOS:
return _pygeos_to_shapely(pygeos.union_all(data))
else:
return shapely.ops.unary_union(data)
#
# Coordinate related properties
#
def get_x(data):
if compat.USE_PYGEOS:
return pygeos.get_x(data)
else:
return _unary_op("x", data, null_value=np.nan)
def get_y(data):
if compat.USE_PYGEOS:
return pygeos.get_y(data)
else:
return _unary_op("y", data, null_value=np.nan)
def get_z(data):
if compat.USE_PYGEOS:
return pygeos.get_z(data)
else:
data = [geom.z if geom.has_z else np.nan for geom in data]
return np.array(data, dtype=np.dtype(float))
def bounds(data):
if compat.USE_PYGEOS:
return pygeos.bounds(data)
# ensure that for empty arrays, the result has the correct shape
if len(data) == 0:
return np.empty((0, 4), dtype="float64")
# need to explicitly check for empty (in addition to missing) geometries,
# as those return an empty tuple, not resulting in a 2D array
bounds = np.array(
[
geom.bounds
if not (geom is None or geom.is_empty)
else (np.nan, np.nan, np.nan, np.nan)
for geom in data
]
)
return bounds
#
# Coordinate transformation
#
def transform(data, func):
if compat.USE_PYGEOS:
coords = pygeos.get_coordinates(data)
new_coords = func(coords[:, 0], coords[:, 1])
result = pygeos.set_coordinates(data.copy(), np.array(new_coords).T)
return result
else:
from shapely.ops import transform
n = len(data)
result = np.empty(n, dtype=object)
for i in range(n):
geom = data[i]
if _isna(geom):
result[i] = geom
else:
result[i] = transform(func, geom)
return result
| [
"maifeeulasad@gmail.com"
] | maifeeulasad@gmail.com |
9fdf1fcd02e82e69e482cbdf80c02a24fcb02aef | 01200401ef046a917df1205268fa92f23cfd28d8 | /tests/test_histogram.py | f59279d6ed0771159428df32ce8d3a52d8e06d3d | [
"BSD-3-Clause"
] | permissive | murodin/pyclesperanto_prototype | 5fa8922dcbbc98aa69e1aab779c62a326a6937d7 | 4687e3085a5f8bc12e798bf25acd295ee249fb5e | refs/heads/master | 2023-01-20T14:34:47.858014 | 2020-11-30T11:56:47 | 2020-11-30T11:56:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,534 | py | import pyclesperanto_prototype as cle
import numpy as np
def test_histogram():
test = cle.push_zyx(np.asarray([
[1, 2, 4, 4, 2, 3],
[3, 3, 4, 4, 5, 5]
]))
ref_histogram = [1, 2, 3, 4, 2]
my_histogram = cle.histogram(test, num_bins = 5)
print(my_histogram)
a = cle.pull(my_histogram)
assert (np.allclose(a, ref_histogram))
def test_histogram_3d():
test = cle.push_zyx(np.asarray([
[
[1, 2, 4, 4, 2, 3]
], [
[3, 3, 4, 4, 5, 5]
]
]))
ref_histogram = [1, 2, 3, 4, 2]
my_histogram = cle.histogram(test, num_bins = 5)
print(my_histogram)
a = cle.pull(my_histogram)
assert (np.allclose(a, ref_histogram))
def test_histogram_3d_2():
test = cle.push_zyx(np.asarray([
[
[1, 2, 4],
[4, 2, 3]
], [
[3, 3, 4],
[4, 5, 5]
]
]))
ref_histogram = [1, 2, 3, 4, 2]
my_histogram = cle.histogram(test, num_bins = 5)
print(my_histogram)
a = cle.pull(my_histogram)
assert (np.allclose(a, ref_histogram))
def test_histogram_against_scikit_image():
from skimage.data import camera
image = camera()
from skimage import exposure
hist, bc = exposure.histogram(image.ravel(), 256, source_range='image')
print(str(hist))
gpu_image = cle.push(image)
gpu_hist = cle.histogram(gpu_image, num_bins=256)
print(str(cle.pull_zyx(gpu_hist)))
assert (np.allclose(hist, cle.pull_zyx(gpu_hist)))
| [
"haesleinhuepf@users.noreply.github.com"
] | haesleinhuepf@users.noreply.github.com |
1f3b8ca61fb87703a1d5b7ee53112abbc6cca035 | 80e3a4fd8921ea6794f31f2f3dda13d049675f7d | /utils/__init__.py | 15f6d6d9659925861c4be53aa16ef68ac9c94192 | [
"MIT"
] | permissive | dan-kez/lambda-ftp-to-gsheet-transform | e228ce9d6948aa9f15a547a519988c6676c8bef0 | e6707542719ed133ee90addc6b73c9c057b7898a | refs/heads/master | 2023-02-02T18:37:56.947431 | 2020-04-04T20:50:01 | 2020-04-04T20:50:01 | 237,648,380 | 2 | 0 | MIT | 2023-01-24T01:17:05 | 2020-02-01T17:09:14 | Python | UTF-8 | Python | false | false | 37 | py | from .s3 import get_matching_s3_keys
| [
"daniel.kez@gmail.com"
] | daniel.kez@gmail.com |
555e2b6b866cd6328a6db4f4f3185a895ce25130 | 9f517ba528665be4f1065b95ed13b414b19b4580 | /Tutorials/MachineLearning/SirajologyMusicDemo/tf_music_generation_demo.py | c436288ce6c8053baa70f0c7919ef031c2516ab1 | [] | no_license | bgenchel/practice | d6120bf2b047ac49c0741040cbe17f17e4846883 | ddd10410165bf63ef9786af8421d3d39bae8d153 | refs/heads/master | 2020-06-18T00:53:04.717907 | 2016-11-28T10:29:43 | 2016-11-28T10:29:43 | 74,961,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,092 | py | # Generate Music in TensorFlow
# https://www.youtube.com/watch?v=ZE7qWXX05T0
import numpy as np
import pandas as pd
import tensorflow as tf
from tqdm import tqdm
import midi_manipulation
from rbm_chords import *
## 4 steps to generate music! (Using restricted boltzmann machine)
##############################
### Step 1 - HyperParameters
##############################
lowest_note = midi_manipulation.lowerBound
highest_note = midi_manipulation.upperBound
note_range = highest_note - lowest_note
# number of timesteps that we will create at a time
num_timesteps = 15
# this is the size of the visible layer
n_visible = 2*note_range*num_timesteps
# this is the size of the hidden layer
n_hidden = 50
# the number of training epochs that we are going to run.
# for each epoch, we go through the entire data set
num_epochs = 200
# the number of training examples that we are going to send through the
# RBM at a time.
batch_size = 100
# the learning rate of our model
lr = tf.constant(0.005, tf.float32)
##################################
### Step 2 - Tensorflow Variables
##################################
# the placeholder variable that holds our data
x = tf.placeholder(tf.float32, [None, n_visible], name='x')
# the weight matrix that stores the edge weights
W = tf.Variable(tf.random_normal([n_visible, n_hidden], 0.01), name="W")
# the bias vector for the hidden layer
bh = tf.Variable(tf.zeros([1, n_hidden], tf.float32, name="bh"))
# the bias vector for the visible layer
bv = tf.Variable(tf.zeros([1, n_visible], tf.float32, name="bv"))
######################################
### Step 3 - Our Generative Algorithm
######################################
# the sample of x
x_sample = gibbs_sample(1)
# the sample of the hidden nodes, starting from the visible state of x
h = sample(tf.sigmoid(tf.matmul(x, W) + bh))
# the sample of the hidden nodes, starting from the visible state of x_sample
h_sample = sample(tf.sigmoid(tf.matmul(x_sample, W) + bh))
# Next we update the values of W, bh, and bv;
# based on the difference between the samples that
# we drew and the original values
size_bt = tf.cast(tf.shape(x)[0], tf.float32)
W_adder = tf.mul(
lr/size_bt,
tf.sub(
tf.matmul(tf.transpose(x), h),
tf.matmul(tf.transpose(x_sample), h_sample)
)
)
bv_adder = tf.mul(lr/size_bt, tf.reduce_sum(tf.sub(x, x_sample), 0, True))
bh_adder = tf.mul(lr/size_bt, tf.reduce_sum(tf.sub(h, h_sample), 0, True))
# When we do sess.run(updt), TensorFlow will run all 3 update steps
update = [W.assign_add(W_adder), bv.assign_add(bv_adder), bh.assign_add(bh_adder)]
###############################################
### Step 4 (Final) - Run the Computation Graph
###############################################
with tf.Session() as sess:
# initialize the variables of the model
init = tf.initialize_all_variables()
sess.run(init)
# Run through all of the training data num_epochs times
for epoch in tqdm(range(num_epochs)):
for song in songs:
# the songs are stored in a time x notes format.
# the size of each song is timesteps_in_song x 2*note_range
# Here we reshape the songs so that each training example
# is a vector with num_timesteps x 2*note_range elements
song = np.array(song)
# train the RBM on batch_size samples at a time
for i in range(1, len(song), batch_size):
tr_x = song[i:i+batch_size]
sess.run(update, feed_dict={x: tr_x})
# now the model is fully trained, so let's make some music!
# run a gibbs chain where the visible nodes are initialized to 0
sample = gibbs_sample(1).eval(session=sess, feed_dict={x: np.zeros((10, n_visible))})
for i in range(sample.shape[0]):
if not any(sample[i, :]):
continue
# Here we reshape the vector to be time x notes, and
# then save the vector as a midi file.
S = np.reshape(sample[i, :], (num_timesteps, 2*note_range))
midi_manipulation.noteStateMatrixToMidi(S, "generated_chord{}".format(i))
| [
"benjiegenchel@gmail.com"
] | benjiegenchel@gmail.com |
c03058bdab453ed37f520ba3c345fbea4559e0f9 | 2a2777d02a0c99206d19e637a1c13ccf0f8e8fbb | /djangoWeb/iqc/migrations/0012_iqcuploadrecord.py | 0cbf6e98bbc65489a2f59073af241cdb0d7c4cf0 | [] | no_license | huangle63/DjangoWeb | 38360d190feb9c201cf1cfe8e820634d6e09662c | 7861e012c3788fd2e88927c0897c7ac8c4082b9c | refs/heads/master | 2021-01-13T11:20:20.111551 | 2017-03-10T03:13:09 | 2017-03-10T03:13:09 | 77,189,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-03-06 15:14
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('iqc', '0011_auto_20170228_1438'),
]
operations = [
migrations.CreateModel(
name='IQCUploadRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('upload_num', models.DateTimeField(verbose_name='上传数量')),
('upload_time', models.DateTimeField(auto_now_add=True, verbose_name='上传时间')),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='person_upload_record', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"huangle63@163.com"
] | huangle63@163.com |
05646c325d4d03bded9609b2dd0420a49fcb62fa | 1a806c868d43fc87999a89e892b9a446b433c58f | /assignment/material/migrations/0013_auto_20201203_1126.py | e4321548bb7f2cd1a8b8e26200eb04f917dca58a | [] | no_license | MalsawmaKhiangte/material_management | ec0281eb0da4a8067e37d4823577d8b0b273acfa | dc31a7eb53b139db533effa379b7210ab58b30ae | refs/heads/main | 2023-01-25T02:23:56.912193 | 2020-12-06T18:18:09 | 2020-12-06T18:18:09 | 318,254,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | # Generated by Django 3.1.3 on 2020-12-03 05:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('material', '0012_auto_20201203_1126'),
]
operations = [
migrations.AlterField(
model_name='district',
name='email',
field=models.EmailField(max_length=30, null=True),
),
]
| [
"mskhiangte00@gmail.com"
] | mskhiangte00@gmail.com |
8e6a40aabb5d98acecdf713ba9a997923ae08b27 | 7bf617f77a55d8ec23fa8156c1380b563a5ac7f6 | /CG/SciPy/mm_color_cluster.py | c069d4d292db408ca47cdbeff36617ac590abb43 | [] | no_license | anyatran/school | c06da0e08b148e3d93aec0e76329579bddaa85d5 | 24bcfd75f4a6fe9595d790808f8fca4f9bf6c7ec | refs/heads/master | 2021-06-17T10:45:47.648361 | 2017-05-26T12:57:23 | 2017-05-26T12:57:23 | 92,509,148 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,066 | py | # Auto-clustering, suggested by Matt Terry
from skimage import io, color, exposure
from sklearn import cluster, preprocessing
import numpy as np
import matplotlib.pyplot as plt
url = 'http://blogs.mathworks.com/images/steve/2010/mms.jpg'
import os
if not os.path.exists('mm.png'):
print "Downloading M&M's..."
import urllib2
u = urllib2.urlopen(url)
f = open('mm.png', 'w')
f.write(u.read())
f.close()
print "Image I/O..."
mm = io.imread('mm.png')
mm_lab = color.rgb2lab(mm)
ab = mm_lab[..., 1:]
print "Mini-batch K-means..."
X = ab.reshape(-1, 2)
kmeans = cluster.MiniBatchKMeans(n_clusters=6)
y = kmeans.fit(X).labels_
labels = y.reshape(mm.shape[:2])
N = labels.max()
def no_ticks(ax):
ax.set_xticks([])
ax.set_yticks([])
# Display all clusters
for i in range(N):
mask = (labels == i)
mm_cluster = mm_lab.copy()
mm_cluster[..., 1:][~mask] = 0
ax = plt.subplot2grid((2, N), (1, i))
ax.imshow(color.lab2rgb(mm_cluster))
no_ticks(ax)
ax = plt.subplot2grid((2, N), (0, 0), colspan=2)
ax.imshow(mm)
no_ticks(ax)
# Display histogram
L, a, b = mm_lab.T
left, right = -100, 100
bins = np.arange(left, right)
H, x_edges, y_edges = np.histogram2d(a.flatten(), b.flatten(), bins,
normed=True)
ax = plt.subplot2grid((2, N), (0, 2))
H_bright = exposure.rescale_intensity(H, in_range=(0, 5e-4))
ax.imshow(H_bright,
extent=[left, right, right, left], cmap=plt.cm.gray)
ax.set_title('Histogram')
ax.set_xlabel('b')
ax.set_ylabel('a')
# Voronoi diagram
mid_bins = bins[:-1] + 0.5
L = len(mid_bins)
yy, xx = np.meshgrid(mid_bins, mid_bins)
Z = kmeans.predict(np.column_stack([xx.ravel(), yy.ravel()]))
Z = Z.reshape((L, L))
ax = plt.subplot2grid((2, N), (0, 3))
ax.imshow(Z, interpolation='nearest',
extent=[left, right, right, left],
cmap=plt.cm.Spectral, alpha=0.8)
ax.imshow(H_bright, alpha=0.2,
extent=[left, right, right, left],
cmap=plt.cm.gray)
ax.set_title('Clustered histogram')
no_ticks(ax)
plt.show()
| [
"panhtran249@gmail.com"
] | panhtran249@gmail.com |
16620636c556a91a3c0e8574830ea3e9a9f2ee90 | 9db2c871d1715d4068cc638458e573d58567d0f2 | /labs-neural-networks/hw-lab6/mnist-improved.py | 5e91ede4c5f63b28357f13c39b6c2de7769a6ad3 | [
"MIT"
] | permissive | xR86/ml-stuff | ec2bff6e57aaa89ca8740cece562a4e7b8cfe685 | 2a1b79408897171b78032ff2531ab6f8b18be6c4 | refs/heads/master | 2021-05-01T10:36:22.362741 | 2021-04-08T22:22:20 | 2021-04-08T22:22:20 | 63,726,853 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,159 | py | import cPickle, gzip, numpy as np
f = gzip.open('mnist.pkl.gz', 'rb')
f2 = open('matplot-text.txt', 'w+')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
print 'train_set length: ', len(train_set[0])
epochs = 1
learning_rate = 0.1
class MLP(object):
def __init__(self):
# Layer init =>
# tag, no_elements, activation, weight, bias, train_set, valid_set, test_set
# 100 neurons, sigmoid activation
hidden_node_no = 100
self.hidden_layer = PerceptronLayer(
"hidden",
hidden_node_no,
"sigmoid",
[np.random.uniform(0, 1, size=784) for i in range(hidden_node_no)],
np.zeros(hidden_node_no),
train_set,
valid_set,
test_set
) # 784 -> 100 ?
# 10 neurons, softmax activation
perceptron_node_no = 10
self.perceptron_layer = PerceptronLayer(
"output",
perceptron_node_no,
"softmax",
[np.random.uniform(0, 1, size=784) for i in range(perceptron_node_no)],
np.zeros(perceptron_node_no),
self.hidden_layer.train_set,
valid_set,
test_set
)
self.hidden_layer.init_layer()
self.perceptron_layer.init_layer()
def train(self):
self.hidden_layer.train_network()
# self.hidden_layer.test()
self.perceptron_layer.train_network()
def test(self):
self.perceptron_layer.test()
#class Layer(object):
# pass
class PerceptronLayer(object):
def __init__(self, tag, no_elements, activation, weight, bias, train_set, valid_set, test_set):
self.tag = tag
self.no_elements = no_elements
self.activation = activation
self.weight = weight
self.bias = bias
self.train_set = train_set
self.valid_set = valid_set
self.test_set = test_set
self.layer = []
self.ok_rate = 0
self.error_rate = 0
def init_layer(self):
for i in range(self.no_elements):
# print [numpy.random.uniform(0, 1, size=784) for i in range(10)]
# print len(self.train_set[0])
# shapes (50000,) and (784,) not aligned: 50000 (dim 0) != 784 (dim 0) => np.random.rand(1, len(self.train_set[0]))
self.layer.append(Perceptron(i, self)) #HACK: 1 - should only expose: layer_name, train_set, activation, weight, bias
# check for initialization
for perceptron in self.layer:
print perceptron, perceptron.digit # , i.weight, i.bias
def train_network(self):
for perceptron in self.layer:
print 'queued train: ', perceptron.digit
perceptron.train()
def test(self):
self.train_network()
ok = 0
clock_counter = 0
for i in range(len(self.test_set[0])):
maximum = -1
digitmax = -1
for digit in range(10):
z = np.dot(self.test_set[0][i], self.weight[digit]) + self.bias[digit]
if z > maximum:
maximum = z
digitmax = digit
if digitmax == self.test_set[1][i]:
ok += 1
self.ok_rate = ok * 1.0 / len(self.test_set[0]) * 100
self.error_rate = 100 - self.ok_rate
str = "%d, %f" % (clock_counter, self.error_rate)
print "clock, error: ", str
print clock_counter, self.error_rate
f2.write(str + "\n")
clock_counter += 10
#self.ok_rate = ok * 1.0 / len(self.test_set[0]) * 100
#self.error_rate = 100 - self.ok_rate
self.error_rate = 100 - (ok * 1.0 / len(self.test_set[0]) * 100)
self.ok_rate = ok * 1.0 / len(self.test_set[0]) * 100
print "Final result: ", self.ok_rate, "%"
print "Error rate: ", self.error_rate, "%"
f2.close
class Perceptron(object):
#error = 1
#errorRate = 100 - perceptron_layer.ok_rate
def __init__(self, digit, parent):
self.digit = digit
# HACK-1: - should only expose: layer_name, train_set, activation, weight, bias
self.parent = parent #should work (pass-by-assignment)
def description(self):
print "This is a perceptron object"
# Functions used for activation of the neuron
def activation_step(self, input):
# Step activation function
if input > 0: return 1
return 0
def activation_sigmoid(self, input):
# Sigmoid activation function.
f = 1.0 / (1.0 + np.exp(-input))
if f > 0.5: return f
return 0
def activation_sigmoid_deriv(self, input):
# Derivative of the sigmoid activation function.
f = self.activation_sigmoid(input) * (1 - self.activation_sigmoid(input))
if f > 0.5: return f
return 0
def activation_softmax(self, input):
#Compute softmax values for each sets of scores in x.
f = np.exp(input) / np.sum(np.exp(input), axis=0)
if f > 0.5: return f
return 0
def expected(self, value):
if self.digit == value:
return 1
return 0
def train(self):
print(self.parent.tag + "###Train neuron: " + str(self.digit))
for i in range(len(self.parent.train_set[0])):
z = np.dot(self.parent.train_set[0][i], self.parent.weight[self.digit]) + self.parent.bias[self.digit]
output = getattr(self, 'activation_' + self.parent.activation)(z) #self.activation_step(z)
x = np.array(self.parent.train_set[0][i]).dot((self.expected(self.parent.train_set[1][i]) - output) * learning_rate)
self.parent.weight[self.digit] = np.add(self.parent.weight[self.digit], x)
self.parent.bias[self.digit] += (self.expected(self.parent.train_set[1][i]) - output) * learning_rate
# change self.parent.train_set such that it will be propagated forward
self.parent.train_set[0][i] = z
print("---Digit trained: " + str(self.digit))
"""
############# Main code #############
"""
mlp = MLP()
mlp.train()
mlp.test()
| [
"9412441+xR86@users.noreply.github.com"
] | 9412441+xR86@users.noreply.github.com |
e43483e4f83e7c1f95abb3cc02b7dce519a7661a | b0f0f72f60f4d0524a696eb67818f7bdec64f8fd | /places/migrations/0004_auto_20200827_1343.py | 7457b304f34cd3240427202b307ba3e875942940 | [] | no_license | denisorionov/where_to_go | 7fa9c15a831f32a7e8ac1e59775b58ddb44aeecd | 54107ce61de9a08cf61e3768a6f6de04cb549a4b | refs/heads/master | 2022-12-18T21:56:28.200461 | 2020-10-01T04:47:58 | 2020-10-01T04:47:58 | 290,710,605 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | # Generated by Django 3.1 on 2020-08-27 10:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('places', '0003_image'),
]
operations = [
migrations.AlterField(
model_name='image',
name='img',
field=models.ImageField(upload_to='picture'),
),
]
| [
"orionovdv@gmail.com"
] | orionovdv@gmail.com |
936a5b37ef17d1036536797c91be42c6e52793c9 | 58563dbe063c2ed612a59ca78a99faa3611a0144 | /SteetVector.py | 19144468c684db0a1d91347e644adb8443ef6333 | [
"MIT"
] | permissive | DieuwerH/AE3537 | 13b2f087a0af6414460be3c3cbcfe1079e978acc | 3957a58374b5783725565fc874ed1829c6208106 | refs/heads/master | 2020-04-07T14:27:43.658590 | 2019-02-14T19:17:29 | 2019-02-14T19:17:29 | 158,448,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | import numpy as np
import datetime
import time_helper
class StateVector:
def __init__(self, pos=np.array([0, 0, 0]), vel=np.array([0,0,0]), time=datetime.datetime.now()):
self.pos_eci = np.array(pos)
self.vel_eci = np.array(vel)
self.time = time
self.pos_ecef, self.vel_ecef = self.calculate_ecef()
def calculate_ecef(self):
angle = time_helper.datetime2rad(self.time)
pos_ecef = self.calculate_ecef_pos(angle)
vel_ecef = self.calculate_ecef_vel(angle)
return pos_ecef, vel_ecef
def calculate_ecef_pos(self, angle):
rotation_matrix = np.array([
[np.cos(angle), np.sin(angle), 0],
[-np.sin(angle), np.cos(angle), 0],
[0, 0, 1] ])
return rotation_matrix.dot(self.pos_eci)
def calculate_ecef_vel(self, angle):
rotation_matrix = np.array([
[np.cos(angle), np.sin(angle), 0],
[-np.sin(angle), np.cos(angle), 0],
[0, 0, 1]
])
plus1min1 = np.array([
[0, 1, 0],
[-1, 0, 0],
[0, 0, 0]
])
omega_earth = 7.2921158553e-5
derivative_matrix = (omega_earth * plus1min1).dot(rotation_matrix)
return rotation_matrix.dot(self.vel_eci) + derivative_matrix.dot(self.pos_eci)
def get_range(self):
return np.linalg.norm(self.pos_ecef)
| [
"dieuwer.hondelink@gmail.com"
] | dieuwer.hondelink@gmail.com |
e2fa124d83cd3c760b0eff2d53eef09fec49c3aa | 7e266469a84e06e3551a7ba0dca25e894f2f3111 | /Bloomy_Core_CreateQualityInspection_TestCase/test_createqualityinspection_testcase.py | 7f7b7aeeb1520353cfd2c98e5bd56a96ac51aa33 | [] | no_license | Bloomstack-Test-Automation/Bloomstack-Test-Automation | 43862b6761951effee5f17d7428f5be0c34b4499 | 2450df2018715cf6f0ec080ca1dc0751a230d969 | refs/heads/main | 2023-06-06T10:52:57.695175 | 2021-06-30T11:33:30 | 2021-06-30T11:33:30 | 368,438,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,647 | py | import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from src.testproject.classes import DriverStepSettings, StepSettings
from src.testproject.decorator import report_assertion_errors
from src.testproject.enums import SleepTimingType
from src.testproject.sdk.drivers import webdriver
import pytest
"""
This pytest test was automatically generated by TestProject
Project: Bloomy_Core
Package: TestProject.Generated.Tests.BloomyCore
Test: CreateQualityInspection_TestCase
Generated by: Rahul Prakash (rahulprakash0862@gmail.com)
Generated on 05/26/2021, 10:11:04
"""
@pytest.fixture()
def driver():
driver = webdriver.Chrome(token="5o-UXmLZug6gaKmDcoeI6tT7NM19XyG1qnolFybLul4",
project_name="Bloomy_Core",
job_name="CreateQualityInspection_TestCase")
step_settings = StepSettings(timeout=15000,
sleep_time=500,
sleep_timing_type=SleepTimingType.Before)
with DriverStepSettings(driver, step_settings):
yield driver
driver.quit()
@report_assertion_errors
def test_main(driver):
"""Generated By: Rahul."""
# Test Parameters
# Auto generated application URL parameter
ApplicationURL = "https://epitest-demo.bloomstack.io/"
# 1. Navigate to '{ApplicationURL}'
# Navigates the specified URL (Auto-generated)
driver.get(f'{ApplicationURL}')
# 2. Is 'Login' visible?
login = driver.find_element(By.XPATH,
"//a[. = 'Login']")
assert login.is_displayed()
# 3. Click 'Login'
login = driver.find_element(By.XPATH,
"//a[. = 'Login']")
login.click()
# 4. Click 'Email Address'
email_address = driver.find_element(By.CSS_SELECTOR,
"#login_email")
email_address.click()
# 5. Type 'testautomationuser@bloomstack.com' in 'Email Address'
email_address = driver.find_element(By.CSS_SELECTOR,
"#login_email")
email_address.send_keys("testautomationuser@bloomstack.com")
# 6. Click 'Password'
password = driver.find_element(By.CSS_SELECTOR,
"#login_password")
password.click()
# 7. Type 'epi@123' in 'Password'
password = driver.find_element(By.CSS_SELECTOR,
"#login_password")
password.send_keys("epi@123")
# 8. Click 'Login1'
login1 = driver.find_element(By.XPATH,
"//button[. = '\n\t\t\t\tLogin']")
login1.click()
# 9. Click 'Search or type a command (Ctrl + G)'
search_or_type_a_command_ctrl_g_ = driver.find_element(By.CSS_SELECTOR,
"#navbar-search")
search_or_type_a_command_ctrl_g_.click()
# 10. Type 'quality ins' in 'Search or type a command (Ctrl + G)'
search_or_type_a_command_ctrl_g_ = driver.find_element(By.CSS_SELECTOR,
"#navbar-search")
search_or_type_a_command_ctrl_g_.send_keys("quality ins")
# 11. Click 'Quality Inspection List'
quality_inspection_list = driver.find_element(By.XPATH,
"//span[. = 'Quality Inspection List']")
quality_inspection_list.click()
# 12. Does 'Quality Inspection1' contain 'Quality Inspection'?
quality_inspection1 = driver.find_element(By.XPATH,
"//div[. = 'Quality Inspection']")
step_output = quality_inspection1.text
assert step_output and ("Quality Inspection" in step_output)
time.sleep(2)
# 13. Click 'New6'
new6 = driver.find_element(By.XPATH,
"//button[. = 'New']")
new6.click()
# 14. Is 'New Quality Inspection4' visible?
new_quality_inspection4 = driver.find_element(By.XPATH,
"//h4[. = 'New Quality Inspection']")
assert new_quality_inspection4.is_displayed()
# 15. Click 'SELECT19'
select19 = driver.find_element(By.XPATH,
"//div[3]/div/div[2]//select")
select19.click()
# 16. Select the 'Incoming' option in 'SELECT19'
select19 = driver.find_element(By.XPATH,
"//div[3]/div/div[2]//select")
Select(select19).select_by_value("Incoming")
# 17. Click 'SELECT19'
select19 = driver.find_element(By.XPATH,
"//div[3]/div/div[2]//select")
select19.click()
# 18. Click 'INPUT84'
input84 = driver.find_element(By.XPATH,
"//div[4]/div/div[2]//input")
input84.click()
# 19. Click 'P15'
p15 = driver.find_element(By.XPATH,
"//div/div/div/ul/li[1]/a/p")
p15.click()
# 20. Click 'INPUT12'
input12 = driver.find_element(By.XPATH,
"//div[5]/div/div[2]//input")
input12.click()
# 21. Type '3.00' in 'INPUT12'
input12 = driver.find_element(By.XPATH,
"//div[5]/div/div[2]//input")
input12.send_keys("3.00")
# 22. Click 'SELECT2'
select2 = driver.find_element(By.XPATH,
"//div[7]//select")
select2.click()
# 23. Select the 'Internal' option in 'SELECT2'
select2 = driver.find_element(By.XPATH,
"//div[7]//select")
Select(select2).select_by_value("Internal")
# 24. Click 'SELECT2'
select2 = driver.find_element(By.XPATH,
"//div[7]//select")
select2.click()
# 25. Click 'Save12'
save12 = driver.find_element(By.XPATH,
"//button[. = 'Save']")
save12.click()
# 26. Click 'Submit7'
submit7 = driver.find_element(By.XPATH,
"//button[. = 'Submit']")
submit7.click()
# 27. Click 'Settings1'
settings1 = driver.find_element(By.XPATH,
"//span[. = ' Settings']")
settings1.click()
# 28. Click 'Logout'
logout = driver.find_element(By.XPATH,
"//a[. = ' Logout']")
logout.click()
# 29. Does 'Login' contain 'Login'?
login = driver.find_element(By.XPATH,
"//a[. = 'Login']")
step_output = login.text
assert step_output and ("Login" in step_output)
| [
"noreply@github.com"
] | Bloomstack-Test-Automation.noreply@github.com |
afad18cb8a7156f474cc091590422f26708cce8a | 417a35ad8b20c4ef5a8f1f346163811e514b84d6 | /app/main/__init__.py | 8cb55b9004a6418394bef59469386a16ab5ff076 | [] | no_license | Heaveniost/Flask_Web | c045fb783f8d200ddb7ef2f5c742136eed5f1e04 | c14438f9743b6b60055cc7c1871156350cc61c5d | refs/heads/master | 2022-12-12T21:38:52.217316 | 2018-06-23T03:33:58 | 2018-06-23T03:33:58 | 136,014,275 | 1 | 0 | null | 2022-12-08T02:17:44 | 2018-06-04T11:19:06 | Python | UTF-8 | Python | false | false | 216 | py | from flask import Blueprint
main = Blueprint('main',__name__)
from . import views,errors
from ..models import Permission
@main.app_context_processor
def inject_permisssions():
return dict(Permission=Permission) | [
"304090717@qq.com"
] | 304090717@qq.com |
fdec849742b2c5fc11f45cd91469855165bf18e9 | c6b1d783937d5b77ac25de55f3e8a17b503fbada | /sheet-3/LTH/exercise-12.py | 7d7ca5f814d4540b8140e30fa87d44e9d76d2ac7 | [] | no_license | thierschi/python-exercise | 5a136c990d1a7ec269511b997a98bd245a26aeba | 975cfbc9a48eb985ffd584e30cc95273e456af6a | refs/heads/main | 2023-06-26T02:18:42.348957 | 2021-07-20T13:47:31 | 2021-07-20T13:47:31 | 358,187,095 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,541 | py | # ------------------------------------------------------------------------------
# Sheet 3 Exercise 12
# Niklas Markert - 1611460 / bt70985
# Lukas Thiersch - 1607110 / bt708626
# ------------------------------------------------------------------------------
# a) What happens to variables in a local scope when the function call returns?
# When the function call returns the local-scope-variable is getting removed
# from memory, thus it is only accessible from the point where it gets
# declared until the point where the function returns.
# ------------------------------------------------------------------------------
# b) Write a function collatz(int_number) that takes as parameter an integer
# int_number. If int_number is even, then the function should print and return
# int_number // 2. If int_number is odd, then the function should print and
# return 3 * int_number + 1. Then let a user type in an integer number and store
# it in a variable num. Call the collatz function on num and save the result in
# the variable num. Keep doing this until the collatz function returns the value
# 1. Hint: Use a while loop for the second part.
def collatz(int_number):
if int_number % 2 == 0:
new_number = int_number // 2
else:
new_number = 3 * int_number + 1
print(new_number)
return new_number
num = int(input("Type a number: "))
while num != 1:
num = collatz(num)
# ------------------------------------------------------------------------------
| [
"lukas.thiersch@lansco.de"
] | lukas.thiersch@lansco.de |
757d03bce27b39fac9e2f1c38ab480780eb5d862 | 9194df1ff1de9425af605b84e59334832b577de8 | /litter/q2.py | 16daa943a2990f67c257b6f0688299d35672ff92 | [] | no_license | wuc6602fx/ws | 42aa8b7fcdd460b6847694be7d3d093b5067abfd | 3b2da54dc031c299e5908225464bf8d8a8b4ffc3 | refs/heads/master | 2021-09-14T22:26:06.081344 | 2018-05-21T08:48:42 | 2018-05-21T08:48:42 | 107,844,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | #ques2
from collections import OrderedDict
#tuple
myTuple = list()
with open('example.csv') as f:
for line in f.read().splitlines(): #avoid \r\n
myTuple.append(line.split(', '))
myTuple = sorted(myTuple) #by alphblet
myTuple.pop(len(myTuple)-1) #remove header
print 'Tuple = ', myTuple
#dict
scores = [x[2] for x in myTuple]
counts = [0, 0, 0, 0]
for score in scores:
if int(score)/10 == 9:
counts[3] = counts[3] + 1
elif int(score)/10 == 8:
counts[2] = counts[2] + 1
elif int(score)/10 == 7:
counts[1] = counts[1] + 1
elif int(score)/10 == 6:
counts[0] = counts[0] + 1
myRange = ['60~69', '70~79', '80~89', '90~99']
myDict = OrderedDict(zip(myRange, counts)) #70~79 90~99 80~89 60~69, if no use ordereddict
print myDict
| [
"cat5530xm@gmail.com"
] | cat5530xm@gmail.com |
eac4f52c73cf9c889363316c2fbb245274207d84 | 713711cce4d1ab1bd17439d9ab7913c0147a54f7 | /pyton.py | 4facf2c583a18183b4885f5e92a1d9b4c0eaf771 | [] | no_license | IliaLuksha/labs | eb05a0cdb2438d56b77df4b8a02d9c7d61b5d4a4 | 220bc86f272d0cddf83b9888847190a4c742ddd5 | refs/heads/master | 2021-07-24T08:22:20.035558 | 2021-06-08T20:43:01 | 2021-06-08T20:43:01 | 252,173,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | def fibonachi(n):
numb = 1
if n > 2:
numb = fibonachi(n - 1) + fibonachi(n - 2)
return numb
number = input('Enter the number: ')
number = int(number)
answer = fibonachi(number)
print(' sequence element = ' + str(answer)) | [
"ilich.02@yandex.ru"
] | ilich.02@yandex.ru |
b9ad5d3f538a6de721c9603acde868d0da3788d0 | bc167f434158921bcf2c678155c5cdfec1c9b0c9 | /PI_code/simulator/behaviourGeneration/firstGenScripts_preyHunter/behav388.py | 2b4f02b7722c2854864b1dddca35aacd975e7d93 | [] | no_license | s0217391/DifferentProjects | 6450efc89c64ecd21b86c705737e89e5c69433a6 | 7f4da153660817b6cbf72d2e823aa29c0c2f95a9 | refs/heads/master | 2021-01-17T02:58:46.219240 | 2015-05-26T22:45:46 | 2015-05-26T22:45:46 | 34,995,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | #!/usr/bin/python
import sys
def compute(prey):
temp0 = prey[0] + prey[1]
if temp0 > prey[0]:
temp1 = min(temp0, temp0)
else:
if prey[1] != 0:
temp1 = prey[1] % prey[1]
else:
temp1 = prey[1]
temp0 = max(prey[0], temp0)
temp0 = temp0 - temp1
if prey[1] != 0:
temp2 = prey[0] / prey[1]
else:
temp2 = prey[1]
temp0 = prey[0] - prey[0]
temp1 = min(temp1, temp0)
temp0 = min(prey[1], prey[0])
temp3 = min(prey[0], temp0)
temp0 = min(prey[0], temp2)
temp0 = prey[1] + temp1
if prey[0] > temp1:
temp2 = prey[0] + temp0
else:
temp2 = -1 * prey[1]
if temp2 != 0:
temp2 = prey[0] / temp2
else:
temp2 = temp2
return [temp1, temp1]
| [
"i7674211@bournemouth.ac.uk"
] | i7674211@bournemouth.ac.uk |
ab074c76f1cd3c7b21498b686f78ed8ea7f39077 | 0fa7eaca5da443628461b429d7d1bcb4f2cbb7e2 | /code_review.py | 100be4af32af533c7020f3c01c313700c50a09d0 | [] | no_license | Boehrer/technical-screen | f45b285c143c9fca63c17c5359787cdd748171a6 | b4144b2a8041a8b9bf9d491caf900d94e3673bd3 | refs/heads/main | 2023-08-13T22:27:20.771354 | 2021-09-22T19:00:49 | 2021-09-22T19:00:49 | 356,362,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,855 | py | import unittest
import pandas as pd
ETLS = {
"us_spending": {
"steps": [
{
"function": "download_csv",
"args": {
"path": "https://www.usgovernmentspending.com/rev/usgs_downchart_csv.php?year=1990_2026&state=US&units=b&view=2&fy=fy21&chart=F0-fed&stack=1&local=s&thing=",
"header": 1
}
},
{
"function": "crop_csv",
"args": {}
},
{
"function": "write_csv",
"args": {
"path": "s3://pipeline-data/us_spending.csv"
}
}
]
},
"population_density": {
"steps": [
{
"function": "download_csv",
"args": {
"path": "http://data.un.org/_Docs/SYB/CSV/SYB63_1_202009_Population,%20Surface%20Area%20and%20Density.csv"
}
},
{
"function": "transform_csv",
"args": {}
},
{
"function": "write_csv",
"args": {
"path": "s3://pipeline-data/population_density.csv"
}
}
]
}
}
def download_csv(path, upstream_data, header=None):
return pd.read_csv(path, header=header)
def crop_csv(upstream_data):
return upstream_data.iloc[0:37]
def transform_csv(upstream_data):
transformed_records = []
for name, record in df.iterrows():
record["foo"] = record["foo"] * 100
transformed_records.append(record)
return pd.DataFrame.from_records(transformed_records)
def write_csv(path, upstream_data):
upstream_data.to_csv(path)
class Pipeline:
def __init__(self, functions):
self.functions = functions
def run_step(self, step, upstream_data):
"""
runs a step from an etl
"""
function = self.functions[step["function"]]
return function(**step["args"], upstream_data=upstream_data)
def run_etl(self, etl):
"""
runs all steps in an etl
"""
data = None
for step in etl["steps"]:
data = self.run_step(step, data)
def run_etls(self, etls):
"""
runs multiple etls
"""
for etl in etls:
data = None
for step in etl["steps"]:
data = self.run_step(step, data)
class TestPipeline(unittest.TestCase):
pass
if __name__ == "__main__":
pipeline = Pipeline(
functions={
"download_csv": download_csv,
"transform_csv": transform_csv,
"crop_csv": crop_csv
}
)
for name, etl in ETLS.items():
print(f"running etl {name}")
pipeline.run(etl)
| [
"jack@kevalaanalytics.com"
] | jack@kevalaanalytics.com |
721c16210b081c6ce406706a8bf7b814db33d02e | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayDataAiserviceHellobikeSiteQueryModel.py | dc06ebbcb58713b563f9fd0994a388c9ab3da002 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 896 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayDataAiserviceHellobikeSiteQueryModel(object):
def __init__(self):
self._plan_id = None
@property
def plan_id(self):
return self._plan_id
@plan_id.setter
def plan_id(self, value):
self._plan_id = value
def to_alipay_dict(self):
params = dict()
if self.plan_id:
if hasattr(self.plan_id, 'to_alipay_dict'):
params['plan_id'] = self.plan_id.to_alipay_dict()
else:
params['plan_id'] = self.plan_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayDataAiserviceHellobikeSiteQueryModel()
if 'plan_id' in d:
o.plan_id = d['plan_id']
return o
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
b02cacc52666b9eb02ec53c25a36fbf0ce87c48c | 2b86c931e2a85b285897f5ef6b120ea3dbfe79e7 | /core/settings.py | fff98839188d485665ccaa568ec075a8ffe8b912 | [] | no_license | mobinkazak/django-blog | bae4ed47045e3a304e4507ef9da972c562ddaf8b | bf1d15a30a51cccb03a7ff18124658306f1efff6 | refs/heads/main | 2023-06-22T18:30:17.018487 | 2021-07-25T06:10:51 | 2021-07-25T06:10:51 | 430,067,686 | 1 | 0 | null | 2021-11-20T10:05:51 | 2021-11-20T10:05:51 | null | UTF-8 | Python | false | false | 3,505 | py | """
Django settings for core project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-*oxwtde1rm57yony01u3kk+(8&g!t0v)45*b5jdr90kp4mk^)z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
'accounts.apps.AccountsConfig',
'ckeditor',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/'
| [
"kzmasut@gmail.com"
] | kzmasut@gmail.com |
41adb72dc70a7653c92e9b5e7b57f5a021b21354 | 4129c8f94e113ea9bb18986a1fabefe7b08d5022 | /biggerDataHeatmiser.py | 911a228d97c08c59461c008fd0b4b1e4fe27b415 | [] | no_license | NylaWorker/AI_assignmentOne | f225d49d18d934dbddc271899e2bd7ed32ef2690 | 3bbba4e500f0a54a7cc84d691d09315952ef9606 | refs/heads/master | 2021-05-11T08:15:42.264189 | 2018-01-19T05:32:35 | 2018-01-19T05:32:35 | 118,046,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,728 | py | import numpy as np
import math
import decimal
import csv
'''
Generating initial conditions of the 12 rooms.
'''
maxTem = 75.
minTem = 65.
maxHum = 55.
minHum = 45.
temps = np.random.uniform(low=minTem, high=maxTem, size=(12,))[np.newaxis]
humidity = np.random.uniform(low=minHum, high=maxHum, size=(12,))[np.newaxis]
rooms = np.stack((temps.T, humidity.T), axis=1)
print("The initial condition of the offices")
print(rooms.T)
'''
Robot Actions
'''
def lowTemp(ofNum):
rooms[ofNum][0] = rooms[ofNum][0] - 1.
def raiseTemp(ofNum):
rooms[ofNum][0] = rooms[ofNum][0] + 1.
def raiseHum(ofNum):
rooms[ofNum][1] = rooms[ofNum][1] + 1.
def lowHum(ofNum):
rooms[ofNum][1] = rooms[ofNum][1] - 1.
def stdRooms(opt="Both"):
if opt == "Tem":
return np.std(rooms.T, axis=2)[0][0]
elif opt == "Hum":
return np.std(rooms.T, axis=2)[0][1]
else:
return np.std(rooms.T, axis=2)
def averageRooms(opt="Both"):
if opt == "Tem":
return np.average(rooms.T, axis=2)[0][0]
elif opt == "Hum":
return np.average(rooms.T, axis=2)[0][1]
else:
return np.average(rooms.T, axis=2)
def baseAlgorithm(curTem, curHum, temPercent, humPercent, office):
if curHum > 48. and curTem > 73.:
if temPercent > humPercent:
lowTemp(office)
else:
lowHum(office)
elif curHum < 47. and curTem < 73.:
if temPercent > humPercent:
raiseTemp(office)
else:
raiseHum(office)
elif curHum < 47. and curTem > 73.:
if temPercent > humPercent:
lowTemp(office)
else:
raiseHum(office)
elif curHum > 48. and curTem < 73.:
if temPercent > humPercent:
raiseTemp(office)
else:
lowHum(office)
def basicSolution(curTem, curHum, office):
if math.fabs(curTem - 73) > math.fabs(curHum - 48):
if curTem > 73.:
lowTemp(office)
else:
raiseTemp(office)
else:
if curHum > 48.:
lowHum(office)
else:
raiseHum(office)
'''
Simmulation
'''
trialCount = 0
office = 0
'''
In this solution HeatMiser will change the temperature or humidity of each room it visits, so long it is not in the desired range.
The distance is evaluated in each room. Which one is farther from the ideal average?
For that the following formula is used:
abs(idealAverage - current office value)/abs(maximum value and minimum value)
'''
runs = 0
with open('dataHeatNew1.csv', 'w') as csvfile:
datawriter = csv.writer(csvfile)
datawriter.writerow(["Number of Run"," Trial Count", "Initial Average Hum","Average hum after algorithm", " Initial Standard hum deviation", "Standard deviation hum after algorithm", "Initial Average tem","Average tem after algorithm", " Initial Standard tem deviation", "Standard deviation tem after algorithm", "humAveIss ", "humStdIss","temAveIss","temStdIss"])
for y in range(0,5):
runs = 0
for x in range(0,100):
temps = np.random.uniform(low=minTem, high=maxTem, size=(12,))[np.newaxis]
humidity = np.random.uniform(low=minHum, high=maxHum, size=(12,))[np.newaxis]
rooms = np.stack((temps.T, humidity.T), axis=1)
trialCount = 0
office = 0
prevRooms = rooms
initialStdHum = stdRooms("Hum")
initialAveragesHum = averageRooms("Hum")
initialStdTem = stdRooms("Tem")
initialAveragesTem = averageRooms("Tem")
humAveIss = 0
temAveIss = 0
humStdIss = 0
temStdIss = 0
while (
math.ceil(averageRooms("Tem")) != 73 or math.ceil(averageRooms("Hum")) != 48 or stdRooms(
"Tem") > 1.5 or stdRooms("Hum") > 1.7):
curTem = rooms[office][0][0]
curHum = rooms[office][1][0]
temPercent = math.fabs(73. - curTem) / math.fabs(maxTem - minTem + 4.)
humPercent = math.fabs(48. - curHum) / math.fabs(maxHum - minHum + 4.)
if trialCount == 100:
runs = runs +1
break
'''This is the base algorithm'''
if trialCount < 20:
baseAlgorithm(curTem, curHum, temPercent, humPercent, office)
else:
if math.ceil(averageRooms("Tem")) == 73 and math.ceil(averageRooms("Hum")) == 48 and stdRooms("Tem") > 1.5 and stdRooms("Hum") < 1.7:
if math.fabs(curTem - 73) > .5: #desired stdtemp =1.5
if curTem >73:
lowTemp(office)
if(math.ceil(averageRooms("Tem")) != 73):
raiseTemp(office)
elif curTem < 73:
raiseTemp(office)
if (math.ceil(averageRooms("Tem")) != 73):
lowTemp(office)
elif math.ceil(averageRooms("Tem")) == 73 or math.ceil(averageRooms("Hum")) == 48 or stdRooms("Tem") < 1.5 or stdRooms("Hum") > 1.7:
if math.fabs(curHum - 48) > .5:
if curHum > 48:
lowHum(office)
if (math.ceil(averageRooms("Hum")) != 48):
raiseHum(office)
elif curHum < 48:
raiseHum(office)
if (math.ceil(averageRooms("Hum")) != 48):
lowHum(office)
elif math.ceil(averageRooms("Tem")) != 73 or math.ceil(averageRooms("Hum")) == 48 or stdRooms("Tem") < 1.5 or stdRooms("Hum") < 1.7:
if math.fabs(curTem - 73) < 1.5:
if curTem > 73:
lowTemp(office)
if (stdRooms("Tem") > 1.5):
raiseTemp(office)
elif curTem < 73:
raiseTemp(office)
if (stdRooms("Tem") > 1.5):
lowTemp(office)
elif math.ceil(averageRooms("Tem")) == 73 or math.ceil(averageRooms("Hum")) != 48 or stdRooms("Tem") < 1.5 or stdRooms("Hum") < 1.7:
if math.fabs(curHum - 48) < 1.7:
if curHum > 48:
lowHum(office)
if ( stdRooms("Hum") > 1.7):
raiseHum(office)
elif curHum < 48:
raiseHum(office)
if ( stdRooms("Hum") > 1.7):
lowHum(office)
else:
basicSolution(curTem, curHum, office)
office += 1
if office == 11:
trialCount += 1
office = 0
if x+1 == 100:
datawriter.writerow( [x + 1, trialCount, initialAveragesHum, averageRooms("Hum"), initialStdHum, stdRooms("Hum"),initialAveragesTem, averageRooms("Tem"), initialStdTem, stdRooms("Tem"), humAveIss, humStdIss,temAveIss, temStdIss,y,runs])
else:
datawriter.writerow([x+1,trialCount,initialAveragesHum, averageRooms("Hum"), initialStdHum, stdRooms("Hum"),initialAveragesTem, averageRooms("Tem"), initialStdTem, stdRooms("Tem"),humAveIss,humStdIss,temAveIss,temStdIss])
| [
"nyla.worker@gmail.com"
] | nyla.worker@gmail.com |
7578264ef7177ae35cfa708b9ce8fceb34121745 | 6065db223f746b5c65ad33945e88d2105526d30d | /Leetcode/85-Maximal-Rectangle/solution.py | 0d5cf2eac4dc57a085c4ab57450d479f9d58768e | [] | no_license | chocoluffy/leetcode-cpp | ce245a18c24ec5818e0b493c0360f0f6c73f991f | 7f7e8325500b335ffc35c41b3d41b01472cce7cb | refs/heads/master | 2020-03-16T06:11:15.057166 | 2018-12-05T15:24:08 | 2018-12-05T15:24:08 | 132,548,814 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,459 | py | """
- Construct a matrix by a bottom-up fashion, each entry record (m, n), meaning
the consecutive 1s in two directions as bottom and right. Such as at position
(i, j), there are m consecutive 1s from current position towards the bottom,
and n such consecutive 1s towards the right.
- then one more pass to find out the maximum area from the matrix entry.
# challenge:
- inner list being updated at the same time. but not individually! mind such
reference error.
- handle the base case in the matrix form, especially the case:
- empty matrix.
- single row matrix.
"""
import pprint
class Solution(object):
def maximalRectangle_better(self, matrix):
if not matrix or not matrix[0]:
return 0
heights = [0] * (len(matrix[0])+1)
max_area = 0
for row in matrix:
for i in xrange(len(row)): # update height
heights[i] = heights[i] + 1 if row[i] == '1' else 0
stack = [-1]
for i in xrange(len(matrix[0])+1):
while heights[i] < height[stack[-1]]:
height = heights[stack.pop()]
l_index = stack[-1]
max_area = max(max_area, height * (i - 1 - l_index))
stack.append(i)
return max_area
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]] :rtype: int
"""
if not matrix:
return 0
table = [[(0, 0)] * (len(matrix[0]) + 1)
for _ in range(len(matrix) + 1)]
# update right direction count.
for i in range(len(matrix) - 1, -1, -1):
for j in range(len(matrix[0]) - 1, -1, -1):
this_lst = list(table[i][j])
if matrix[i][j] == '1':
this_lst[1] = table[i][j+1][1] + 1
else:
this_lst[1] = 0
table[i][j] = tuple(this_lst)
# update bottom direction count.
for i in range(len(matrix[0]) - 1, -1, -1):
for j in range(len(matrix) - 1, -1, -1):
this_lst = list(table[j][i])
if matrix[j][i] == '1':
this_lst[0] = table[j+1][i][0] + 1
else:
this_lst[0] = 0
table[j][i] = tuple(this_lst)
max_area = 0
for i in range(len(matrix)):
for j in range(len(matrix[0])):
x = float('inf') if j+1 == len(matrix[0]) else table[i][j+1][0]
y = float('inf') if i+1 == len(matrix) else table[i+1][j][1]
validator = (x, y)
# print i, j, validator
if table[i][j][0] == 1 or table[i][j][1] == 1 or (table[i][j][0] <= validator[0] and table[i][j][1] <= validator[1]):
current_area = table[i][j][0] * table[i][j][1]
max_area = current_area if current_area > max_area else max_area
pprint.pprint(table)
return max_area
# this_matrix = [["1","0","1","0","0"], ["1","0","1","1","1"],
# ["1","1","1","1","1"], ["1","0","0","1","0"]
# ]
# this_matrix = [["1", "1"]]
# this_matrix = [["1", "0"], ["1", "0"]]
this_matrix = [["1","1","1","1","1","1","1","1"],["1","1","1","1","1","1","1","0"],["1","1","1","1","1","1","1","0"],["1","1","1","1","1","0","0","0"],["0","1","1","1","1","0","0","0"]]
pprint.pprint(this_matrix)
print Solution().maximalRectangle(this_matrix)
| [
"yushunzhe951104@gmail.com"
] | yushunzhe951104@gmail.com |
4b96a0e33cc538fef822e3e29414a224c8b0e26e | 86048feeeb4f741061637e69ab1a6bc59fffa553 | /methcomp/regression.py | 2c16f0b2c6e1143bec2832bbea3589f24759185f | [
"MIT"
] | permissive | FloEll/methcomp | 258f65a2d84335b16e757a43936c3f7d81b0d191 | 920c51b2f799cd6041c7013ad7ed6c2444cbff3f | refs/heads/master | 2020-09-29T16:26:55.800583 | 2019-12-09T19:54:42 | 2019-12-09T19:54:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,820 | py | import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import math
import numpy as np
from collections import Iterable
__all__ = ["deming", "passingbablok"]
class _Deming(object):
"""Internal class for drawing a Deming regression plot"""
def __init__(self, method1, method2,
vr, sdr, bootstrap,
x_label, y_label, title,
CI, line_reference, line_CI, legend,
color_points, color_deming):
self.method1: np.array = np.asarray(method1)
self.method2: np.array = np.asarray(method2)
self.vr = vr
self.sdr = sdr
self.bootstrap = bootstrap
self.x_title = x_label
self.y_title = y_label
self.graph_title = title
self.color_points = color_points
self.color_deming = color_deming
self.CI = CI
self.line_reference = line_reference
self.line_CI = line_CI
self.legend = legend
self._check_params()
self._derive_params()
def _check_params(self):
if len(self.method1) != len(self.method2):
raise ValueError('Length of method 1 and method 2 are not equal.')
if self.bootstrap is not None and not isinstance(self.bootstrap, int):
raise ValueError('Bootstrap argument should either be None or an integer.')
if self.CI is not None and (self.CI > 1 or self.CI < 0):
raise ValueError('Confidence interval must be between 0 and 1.')
if any([not isinstance(x, str) for x in [self.x_title, self.y_title]]):
raise ValueError('Axes labels arguments should be provided as a str.')
def _derive_params(self):
def _deming(x, y, lamb):
ssdx = np.var(x, ddof=1) * (self.n - 1)
ssdy = np.var(y, ddof=1) * (self.n - 1)
spdxy = np.cov(x, y)[1][1] * (self.n - 1)
beta = (ssdy - lamb * ssdx + math.sqrt((ssdy - lamb * ssdx) ** 2 + 4 * lamb * (ssdy ** 2))) / (
2 * spdxy)
alpha = y.mean() - beta * x.mean()
ksi = (lamb * x + beta * (y - alpha)) / (lamb + beta ** 2)
sigmax = lamb * sum((x - ksi) ** 2) + sum((y - alpha - beta * ksi) ** 2) / (
(self.n - 2) * beta)
sigmay = math.sqrt(lamb * sigmax)
sigmax = math.sqrt(sigmax)
return alpha, beta, sigmax, sigmay
self.n = len(self.method1)
if self.vr is not None:
_lambda = self.vr
elif self.sdr is not None:
_lambda = self.sdr
else:
_lambda = 1
params = _deming(self.method1, self.method2, _lambda)
if self.bootstrap is None:
self.alpha = params[0]
self.beta = params[1]
self.sigmax = params[2]
self.sigmay = params[3]
else:
_params = np.zeros([self.bootstrap, 4])
for i in range(self.bootstrap):
idx = np.random.choice(range(self.n), self.n, replace=True)
_params[i] = _deming(np.take(self.method1, idx), np.take(self.method2, idx), _lambda)
_paramsdf = pd.DataFrame(_params, columns=['alpha', 'beta', 'sigmax', 'sigmay'])
se = np.sqrt(np.diag(np.cov(_paramsdf.cov())))
t = np.transpose(
np.apply_along_axis(np.quantile, 0, _params, [0.5, (1 - self.CI) / 2, 1 - (1 - self.CI) / 2]))
self.alpha = [t[0][0], se[0], t[0][1], t[0][2]]
self.beta = [t[1][0], se[1], t[0][1], t[0][2]]
self.sigmax = [t[2][0], se[2], t[0][1], t[0][2]]
self.sigmay = [t[3][0], se[3], t[0][1], t[0][2]]
def plot(self, ax):
# plot individual points
ax.scatter(self.method1, self.method2, s=20, alpha=0.6, color=self.color_points)
# plot reference line
if self.line_reference:
ax.plot([0, 1], [0, 1], label='Reference',
color='grey', linestyle='--', transform=ax.transAxes)
# plot Deming-line
_xvals = np.array(ax.get_xlim())
if self.bootstrap is None:
_yvals = self.alpha + self.beta * _xvals
ax.plot(_xvals, _yvals, label=f'{self.alpha:.2f} + {self.beta:.2f} * Method 1',
color=self.color_deming, linestyle='-')
else:
_yvals = [self.alpha[s] + self.beta[0] * _xvals for s in range(0, 4)]
ax.plot(_xvals, _yvals[0], label=f'{self.alpha[0]:.2f} + {self.beta[0]:.2f} * Method 1',
color=self.color_deming, linestyle='-')
ax.fill_between(_xvals, _yvals[2], _yvals[3], color=self.color_deming, alpha=0.2)
if self.line_CI:
ax.plot(_xvals, _yvals[2], linestyle='--')
ax.plot(_xvals, _yvals[3], linestyle='--')
if self.legend:
ax.legend(loc='upper left', frameon=False)
ax.set_ylabel(self.y_title)
ax.set_xlabel(self.x_title)
if self.graph_title is not None:
ax.set_title(self.graph_title)
def deming(method1, method2,
vr=None, sdr=None, bootstrap=1000,
x_label='Method 1', y_label='Method 2', title=None,
CI=0.95, line_reference=True, line_CI=False, legend=True,
color_points='#000000', color_deming='#008bff',
square=False, ax=None):
"""Provide a method comparison using Deming regression.
This is an Axis-level function which will draw the Passing-Bablok plot
onto the current active Axis object unless ``ax`` is provided.
Parameters
----------
method1, method2 : array, or list
Values obtained from both methods, preferably provided in a np.array.
vr : float
The assumed known ratio of the (residual) variance of the ys relative to that of the xs.
Defaults to 1.
sdr : float
The assumed known standard deviations. Parameter vr takes precedence if both are given.
Defaults to 1.
bootstrap : int
Amount of bootstrap estimates that should be performed to acquire standard errors (and confidence
intervals). If None, no bootstrapping is performed. Defaults to 1000.
x_label : str, optional
The label which is added to the X-axis. If None is provided, a standard
label will be added.
y_label : str, optional
The label which is added to the Y-axis. If None is provided, a standard
label will be added.
title : str, optional
Title of the Passing-Bablok plot. If None is provided, no title will be plotted.
CI : float, optional
The confidence interval employed in the mean difference and limit of agreement
lines. Defaults to 0.95.
line_reference : bool, optional
If True, a grey reference line at y=x will be plotted in the plot.
Defaults to true.
line_CI : bool, optional
If True, dashed lines will be plotted at the boundaries of the confidence intervals.
Defaults to false.
legend : bool, optional
If True, will provide a legend containing the computed Passing-Bablok equation.
Defaults to true.
color_points : str, optional
Color of the individual differences that will be plotted.
Color should be provided in format compatible with matplotlib.
color_deming : str, optional
Color of the mean difference line that will be plotted.
Color should be provided in format compatible with matplotlib.
square : bool, optional
If True, set the Axes aspect to "equal" so each cell will be
square-shaped.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
Returns
-------
ax : matplotlib Axes
Axes object with the Bland-Altman plot.
See Also
-------
Koopmans, T. C. (1937). Linear regression analysis of economic time series. DeErven F. Bohn, Haarlem, Netherlands.
Deming, W. E. (1943). Statistical adjustment of data. Wiley, NY (Dover Publications edition, 1985).
"""
plotter: _Deming = _Deming(method1, method2,
vr, sdr, bootstrap,
x_label, y_label, title,
CI, line_reference, line_CI, legend,
color_points, color_deming)
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect('equal')
plotter.plot(ax)
return ax
class _PassingBablok(object):
"""Internal class for drawing a Passing-Bablok regression plot"""
def __init__(self, method1, method2,
x_label, y_label, title,
CI, line_reference, line_CI, legend,
color_points, color_paba):
self.method1: np.array = np.asarray(method1)
self.method2: np.array = np.asarray(method2)
self.x_title = x_label
self.y_title = y_label
self.graph_title = title
self.CI = CI
self.color_points = color_points
self.color_paba = color_paba
self.line_reference = line_reference
self.line_CI = line_CI
self.legend = legend
self._check_params()
self._derive_params()
def _check_params(self):
if len(self.method1) != len(self.method2):
raise ValueError('Length of method 1 and method 2 are not equal.')
if self.CI is not None and (self.CI > 1 or self.CI < 0):
raise ValueError('Confidence interval must be between 0 and 1.')
if any([not isinstance(x, str) for x in [self.x_title, self.y_title]]):
raise ValueError('Axes labels arguments should be provided as a str.')
def _derive_params(self):
self.n = len(self.method1)
self.sv = []
for i in range(self.n - 1):
for j in range(i + 1, self.n):
self.sv.append((self.method2[i] - self.method2[j]) /
(self.method1[i] - self.method1[j]))
self.sv.sort()
n = len(self.sv)
k = math.floor(len([a for a in self.sv if a < 0]) / 2)
if n % 2 == 1:
self.slope = self.sv[int((n + 1) / k + 2)]
else:
self.slope = math.sqrt(self.sv[int(n / 2 + k)] * self.sv[int(n / 2 + k + 1)])
_ci = st.norm.ppf(1 - (1 - self.CI) / 2) * math.sqrt((self.n * (self.n - 1) * (2 * self.n + 5)) / 18)
_m1 = int(round((n - _ci) / 2))
_m2 = n - _m1 - 1
self.slope = [self.slope, self.sv[k + _m1], self.sv[k + _m2]]
self.intercept = [np.median(self.method2 - self.slope[0] * self.method1),
np.median(self.method2 - self.slope[1] * self.method1),
np.median(self.method2 - self.slope[2] * self.method1)]
def plot(self, ax):
# plot individual points
ax.scatter(self.method1, self.method2, s=20, alpha=0.6, color=self.color_points)
# plot reference line
if self.line_reference:
ax.plot([0, 1], [0, 1], label='Reference',
color='grey', linestyle='--', transform=ax.transAxes)
# plot PaBa-line
_xvals = np.array(ax.get_xlim())
_yvals = [self.intercept[s] + self.slope[s] * _xvals for s in range(0, 3)]
ax.plot(_xvals, _yvals[0], label=f'{self.intercept[0]:.2f} + {self.slope[0]:.2f} * Method 1',
color=self.color_paba, linestyle='-')
ax.fill_between(_xvals, _yvals[1], _yvals[2], color=self.color_paba, alpha=0.2)
if self.line_CI:
ax.plot(_xvals, _yvals[1], linestyle='--')
ax.plot(_xvals, _yvals[2], linestyle='--')
if self.legend:
ax.legend(loc='upper left', frameon=False)
ax.set_ylabel(self.y_title)
ax.set_xlabel(self.x_title)
if self.graph_title is not None:
ax.set_title(self.graph_title)
def passingbablok(method1, method2,
x_label='Method 1', y_label='Method 2', title=None,
CI=0.95, line_reference=True, line_CI=False, legend=True,
color_points='#000000', color_paba='#008bff',
square=False, ax=None):
"""Provide a method comparison using Passing-Bablok regression.
This is an Axis-level function which will draw the Passing-Bablok plot
onto the current active Axis object unless ``ax`` is provided.
Parameters
----------
method1, method2 : array, or list
Values obtained from both methods, preferably provided in a np.array.
x_label : str, optional
The label which is added to the X-axis. If None is provided, a standard
label will be added.
y_label : str, optional
The label which is added to the Y-axis. If None is provided, a standard
label will be added.
title : str, optional
Title of the Passing-Bablok plot. If None is provided, no title will be plotted.
CI : float, optional
The confidence interval employed in the mean difference and limit of agreement
lines. Defaults to 0.95.
line_reference : bool, optional
If True, a grey reference line at y=x will be plotted in the plot.
Defaults to true.
line_CI : bool, optional
If True, dashed lines will be plotted at the boundaries of the confidence intervals.
Defaults to false.
legend : bool, optional
If True, will provide a legend containing the computed Passing-Bablok equation.
Defaults to true.
color_points : str, optional
Color of the individual differences that will be plotted.
Color should be provided in format compatible with matplotlib.
color_paba : str, optional
Color of the mean difference line that will be plotted.
Color should be provided in format compatible with matplotlib.
square : bool, optional
If True, set the Axes aspect to "equal" so each cell will be
square-shaped.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
Returns
-------
ax : matplotlib Axes
Axes object with the Bland-Altman plot.
See Also
-------
Passing H and Bablok W. J Clin Chem Clin Biochem, vol. 21, no. 11, 1983, pp. 709 - 720
"""
plotter: _PassingBablok = _PassingBablok(method1, method2,
x_label, y_label, title,
CI, line_reference, line_CI, legend,
color_points, color_paba)
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect('equal')
plotter.plot(ax)
return ax
| [
"wptmdoorn@gmail.com"
] | wptmdoorn@gmail.com |
5feb48a56df0215523400f5f2217468fc2545777 | 2a610b820d7964601fcf5e3228ad7b31dd2fe98d | /Reddit_Files_Scrapper copy.py | 9cb0dcc14429a0215bb700489c43fda9d7def3a2 | [] | no_license | adnanhaider/RedditScrapper | f7e075d5ff0f52103a5f8f45f8140bee495f7922 | 334a63a09da150e039b25901824a6c1673c51784 | refs/heads/master | 2023-03-14T02:27:10.940190 | 2021-03-13T08:39:25 | 2021-03-13T08:39:25 | 338,262,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,015 | py | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import ElementNotVisibleException
from selenium.common.exceptions import StaleElementReferenceException
import numpy as np
from time import sleep
import time
import io
import datetime
import csv
import pandas as pd
import json
# import unicodecsv as csv
# from io import BytesIO
import os
import pathlib
base_dir = pathlib.Path(__file__).parent.absolute()
def createDriver():
chrome_options = Options()
chrome_options.add_argument("--disable-infobars")
chrome_options.add_argument("start-maximized")
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_experimental_option("detach", True)
chrome_options.add_experimental_option("prefs", {
"profile.default_content_setting_values.notifications": 2 })
driver = webdriver.Chrome(executable_path='chromedriver', options=chrome_options)
# driver = webdriver.Chrome(executable_path='./chromedriver', options=chrome_options)
return driver
def new_process(url):
driver = createDriver()
driver.get(url)
posts = scrape_new(driver)
driver.close()
return posts
def scrape_new(driver):
scrolling = True
posts = dict()
data = []
consecutive_post_count_older_than_one_hour = 0
is_consecutive = True
try:
online_users = driver.find_elements_by_xpath('//div[@class="_3XFx6CfPlg-4Usgxm0gK8R"]')[1].text
except:
online_users = 0
while scrolling:
sleep(5)
page_cards = driver.find_elements_by_css_selector('.Post')
for i, card in enumerate(page_cards):
if i+1 < len(page_cards):
post = get_post_data(driver, card, i)
scroll_to_element(driver, page_cards[i+1]) #scrolling to next post
sleep(5)
if post and not more_than_hour_ago(post['timestamp']):
is_consecutive = False
consecutive_post_count_older_than_one_hour = 0
data.append(post)
elif is_consecutive:
consecutive_post_count_older_than_one_hour +=1
is_consecutive = True
if consecutive_post_count_older_than_one_hour >= 3:
scrolling = False
break
posts['online_users'] = online_users
posts['number_of_posts'] = len(data)
posts['data'] = data
return posts
def scroll_to_element(driver, element):
"""Mimics human scrolling behavior and will put the element with 70 pixels of the center of the current viewbox."""
window_height = driver.execute_script("return window.innerHeight")
start_dom_top = driver.execute_script("return document.documentElement.scrollTop")
element_location = element.location['y']
desired_dom_top = element_location - window_height/2 #Center It!
to_go = desired_dom_top - start_dom_top
cur_dom_top = start_dom_top
while np.abs(cur_dom_top - desired_dom_top) > 70:
scroll = np.random.uniform(2,69) * np.sign(to_go)
driver.execute_script("window.scrollBy(0, {})".format(scroll))
cur_dom_top = driver.execute_script("return document.documentElement.scrollTop")
sleep(np.abs(np.random.normal(0.0472, 0.003)))
def get_post_data(driver, card, i):
try:
post_data = dict()
try:
post_data['timestamp'] = card.find_element_by_css_selector('a[data-click-id="timestamp"]').text
except:
print('could not find the timestamp id')
try:
post_data['comments'] = card.find_element_by_css_selector('span.FHCV02u6Cp2zYL0fhQPsO').text.split()[0]
except:
time.sleep(3)
post_data['comments'] = card.find_element_by_css_selector('span.FHCV02u6Cp2zYL0fhQPsO').text.split()[0]
# post_data['comments'] = card.find_elements_by_css_selector('span.D6SuXeSnAAagG8dKAb4O4')[1].text
# post_data['comments'] = '0'
# pass
# print(type(post_data['comments']),'----------------------comments number')
if post_data['comments'] == '0':
return
post_data['comments_on_post'] = get_post_comments(driver, i)
# post_data['comments'] = len(post_data['comments_on_post'])
return post_data
except:
return None
def get_post_comments(driver, i):
try:
url = driver.find_elements_by_css_selector('a[data-click-id="comments"]')[i].get_attribute('href')
driver.execute_script(f"window.open('{url}');")
driver.switch_to.window(driver.window_handles[1])
sleep(3)
comments_on_post = []
try:
# scroll_to_element(driver, driver.find_element_by_xpath("//button[contains(text(), 'View Entire Discussion')]"))
driver.find_element_by_xpath("//button[contains(text(), 'View Entire Discussion')]").click()
except:
pass
sleep(3)
for comment in driver.find_elements_by_css_selector('.P8SGAKMtRxNwlmLz1zdJu.Comment'):
try:
comments_on_post.append(comment.find_element_by_css_selector('._3tw__eCCe7j-epNCKGXUKk ._3cjCphgls6DH-irkVaA0GM ._292iotee39Lmt0MkQZ2hPV.RichTextJSON-root ._1qeIAgB0cPwnLhDF9XSiJM').text)
except:
pass
driver.close()
driver.switch_to.window(driver.window_handles[0])
return comments_on_post
except:
return None
def more_than_hour_ago(timestamp):
if 'hour ago' in timestamp or 'hours ago' in timestamp or 'days ago' in timestamp or 'week ago' in timestamp or 'weeks ago' in timestamp or 'month ago' in timestamp or 'months ago' in timestamp or 'year ago' in timestamp or 'years ago' in timestamp:
return True
return False
def first_project_functionality():
urls_new = [
# 'https://reddit.com/r/btc/new/',
'https://reddit.com/r/bitcoin/new/',
# 'https://reddit.com/r/ethereum/new/',
# 'https://reddit.com/r/monero/new/',
# 'https://reddit.com/r/dashpay/new/',
# 'https://reddit.com/r/ethtrader/new/',
# 'https://reddit.com/r/ethfinance/new/',
# 'https://reddit.com/r/xmrtrader/new/',
]
urls_hot = [
# 'https://reddit.com/r/btc/hot/',
'https://reddit.com/r/bitcoin/hot/',
# 'https://reddit.com/r/ethereum/hot/',
# 'https://reddit.com/r/monero/hot/',
# 'https://reddit.com/r/dashpay/hot/',
# 'https://reddit.com/r/ethtrader/hot/',
# 'https://reddit.com/r/ethfinance/hot/',
# 'https://reddit.com/r/xmrtrader/hot/',
]
not_ran = True
running = True
counter = 1
hour = 1
main_csv = 'output/main.csv'
# main_txt = 'output/main.txt'
while running:
if counter == 2: # here you will put 24
running = False
counter += 1
for i, _ in enumerate(urls_new):
# result = new_process(urls_new[i])
# print(result,'<------ result')
# total_vote = hot_process(urls_hot[i])
result = {"online_users":'1.5k',"number_of_posts": '3','data':[{'timestamp': '7 minutes ago', 'comments': '3', 'comments_on_post': ['hello how are you doing.','what is the rate for bitcoin today?', 'blahblahablah']}, {'timestamp': '7 minutes ago', 'comments': '3', 'comments_on_post': ['hello how are you doing.','what is the rate for bitcoin today?', 'blahblahablah']},{'timestamp': '7 minutes ago', 'comments': '3', 'comments_on_post': ['hello how are you doing.','what is the rate for bitcoin today?', 'blahblahablah']}]}
total_vote = 999
web_name = urls_hot[i].split("/")[-3]
with open(web_name+".csv", 'a+', newline='', buffering=1,encoding='utf-8') as f:
# print(f'total votes for {web_name} website is = {total_vote}')
row = []
f.write(str(hour))
f.write('\n')
dt = datetime.datetime.now()
f.write(str(dt.strftime("%m/%d/%Y %H:%M")))
f.write('\n')
f.write(str(result['online_users']))
f.write('\n')
f.write(str(result['number_of_posts']))
f.write('\n')
f.write('"')
# f.write('[')
# for dictionary in result['data']:
# f.write(json.dumps(dictionary,ensure_ascii=True))
# f.write(str(result['data']))
# f.write(']')
# f.DictWriter(str(result['data']))
# f.write(str(result['data']))
f.write(str(result['data']))
f.write('"')
f.write('\n')
f.write(str(total_vote))
# f.write('\n')
files = os.listdir(base_dir)
files = list(filter(lambda f: f.endswith('.csv'), files))
with open(main_csv , 'a+', newline='', buffering=1, encoding='utf-8') as f:
text_to_write_in_main_csv_file = []
header_text_of_all_files = []
for _file in files:
with open(_file, 'r',encoding='utf-8') as read_obj:
file_name = _file.split('.csv')[0]
header_text_of_all_files.append(file_name+'hour')
header_text_of_all_files.append(file_name+'_time_&_date')
header_text_of_all_files.append(file_name+'_online_users')
header_text_of_all_files.append(file_name+'__number_of_post')
header_text_of_all_files.append(file_name+'_comments')
header_text_of_all_files.append(file_name+'_total_votes')
lis = [line.split('\n') for line in read_obj]
for i, x in enumerate(lis):
# reading values from each fle
text_to_write_in_main_csv_file.append(x[0])
# print(header_text_of_all_files)
header_text_of_all_files = ",".join(str(x) for x in header_text_of_all_files)
text_to_write_in_main_csv_file = ",".join(str(x) for x in text_to_write_in_main_csv_file)
if os.stat(main_csv).st_size == 0:
f.write(header_text_of_all_files)
f.write('\n')
f.write(text_to_write_in_main_csv_file)
f.write('\n')
for _file in files:
os.remove(_file)
hour += 1
sleep(3) # how long to wait between runs
# os.rename(main_txt , main_csv)
def hot_process(url):
driver = createDriver()
driver.get(url)
total_votes = scrape_hot(driver)
driver.close()
return total_votes
def scrape_hot(driver):
data = []
screen_height = driver.execute_script("return window.screen.height;") # get the screen height of the web
i = 1
scroll_pause_time = 2
while True:
# scroll one screen height each time
driver.execute_script("window.scrollTo(0, {screen_height}*{i});".format(screen_height=screen_height, i=i))
i += 1
time.sleep(scroll_pause_time)
# update scroll height each time after scrolled, as the scroll height can change after we scrolled the page
scroll_height = driver.execute_script("return document.body.scrollHeight;")
# Break the loop when the height we need to scroll to is larger than the total scroll height
# print(scroll_height,'---scroll height ---')
# if (screen_height) * i > scroll_height:
# break
posts = driver.find_elements_by_css_selector('.Post')
print(len(posts),'----------')
if len(posts) > 50:
break
posts = driver.find_elements_by_css_selector('.Post')
print(len(posts),'<-- posts scraped ')
for i, post in enumerate(posts):
try:
vote = post.find_element_by_css_selector('div._23h0-EcaBUorIHC-JZyh6J div._1E9mcoVn4MYnuBQSVDt1gC div._1rZYMD_4xY3gRcSS3p8ODO').text
except:
print('this post does not seen to have a valid class name for vote.')
# vote = post.find_element_by_css_selector('div._1E9mcoVn4MYnuBQSVDt1gC span.D6SuXeSnAAagG8dKAb4O4').text
if vote:
data.append(vote)
total_votes = 0
for value in data:
try:
if 'k' in value:
value = value.replace('.', '')
value = value.replace('k', '000')
value = int(value)
else:
value = int(value)
total_votes = total_votes + value
except:
print('post vote value was not convertable to int')
# pass
return total_votes
if __name__ == "__main__":
first_project_functionality()
| [
"adnanhaider530@gmail.com"
] | adnanhaider530@gmail.com |
b495e97bd11ddc3e52cae162040ef09ee78e3920 | cc9b87da7d710fc0c873e9a968b1b578a460c3d1 | /Practical 6/prac6_bubble_sort.py | fc2094f2dcebc248a73ca0215b0ce6f83400f907 | [] | no_license | nikmalviya/Python | 480434edce8480ac826ca72dc03b7a3017048861 | a03f1d325ebe62282de91bc3eee57f84db9965b3 | refs/heads/master | 2020-04-25T05:34:53.726313 | 2019-03-26T15:27:10 | 2019-03-26T15:27:10 | 172,548,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | import random as r
nums = [r.randint(1, 50) for x in range(10)]
print('list : ', nums)
def bubble_sort(nums):
for i in range(len(nums)):
for j in range(len(nums) - i - 1):
if nums[j] > nums[j+1]:
nums[j], nums[j+1] = nums[j+1], nums[j]
return nums
print('list sorted : ', bubble_sort(nums)) | [
"nikhilmalviya1662000@gmail.com"
] | nikhilmalviya1662000@gmail.com |
825ee6d09c56019645eeafe4b5e563ef3a057a23 | 251c7aa5b09cd3c260acb6eb37332755358e34f8 | /Python_DataAnalysis/02reading_writing.py | adf1af8a0f7f472228b4d489c2259f00b4419a9a | [] | no_license | izaromanowska/ABM_tutorials | 3efc82eb2f483d761f09d3ec6d06843098ff29dd | f0d8b0425345d31fc454f953588d0e5ffca6f361 | refs/heads/master | 2021-01-19T00:26:18.266606 | 2017-10-12T10:42:31 | 2017-10-12T10:42:31 | 87,171,411 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | # -*- coding: utf-8 -*-
"""
Created on Wed May 13 18:41:21 2015
@author: user
"""
# import libraries
import pandas as pd
import matplotlib.pyplot as plt
#__________ READ A FILE __________
Acheul_Afr = pd.read_csv("Acheulean.csv", header = 0) # read in the file
print Acheul_Afr.head() # check how the data looks like
#__________ PLOT THE DATA __________
plt.style.use('ggplot') # this line makes the graphs prettier
Acheul_Afr.hist(figsize = (10,10)) # summary plot of the data
#__________ WRITE TO FILE __________
Location_output = 'Acheul_Afr.csv' # specify where to save the file to
Acheul_Afr.to_csv(Location_output) # save the .csv | [
"izaromanowska@gmail.com"
] | izaromanowska@gmail.com |
d574e853ce03ff5093d2f5b1f586476ab7c17043 | 17c897fa76df9fb5d8106649327d68f54ed7ef40 | /Python/Flask/python_stack/flask_fundamentals/flask_mysql/server.py | c948051547e0f0bd9f20daeb024b21813ff2b4bf | [] | no_license | gitRobV/DojoAssignments | 5c23b75b2d8d57c92b4eccb04395091d1cf3127a | 472d13c0461847973abc57cac345047811066157 | refs/heads/master | 2021-01-21T20:11:56.534898 | 2017-06-19T04:48:54 | 2017-06-19T04:48:54 | 92,205,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,696 | py | from flask import Flask, render_template, request, redirect, session, flash
from mysqlconnection import MySQLConnector
import re
from validation import Validation
app = Flask(__name__)
app.secret_key = 'This is some secure issh'
mysql = MySQLConnector(app, 'registration')
@app.route('/')
def index():
query = "SELECT * FROM users"
users = mysql.query_db(query)
return render_template('index.html', users = users)
@app.route('/users/<user_id>')
def user(user_id):
query = 'SELECT * FROM users WHERE id = :specific_id'
data = {
'specific_id': user_id
}
user = mysql.query_db(query, data)
return render_template('/index.html', user = user[0])
@app.route('/users', methods=['POST'])
def add_user():
request_form = [
('alpha','first_name', request.form['first_name']),
('alpha','last_name', request.form['last_name']),
('email','email', request.form['email']),
('pass_check','password', request.form['password'],request.form['confirm_password'], 8,16)
]
sanitize = Validation(request_form)
if len(sanitize.errors) == 0:
query = 'INSERT INTO users (first_name, last_name, email, password, created_at, updated_at) VALUES ( :first_name, :last_name, :email, :password, now(), now())'
data = sanitize.data
user_id = mysql.query_db(query, data)
flash("You have successfully registered! You user ID is " + str(user_id) + ".")
if 'data' in session:
session.pop('data')
return redirect('/')
else:
session['data'] = sanitize.data
for error in sanitize.errors:
flash(error)
return redirect('/')
app.run(debug=True)
| [
"robertv1979@gmail.com"
] | robertv1979@gmail.com |
59a80fd32689ab9609a62db3fbe8835124d957d7 | b36eace4a335f2518988f5929ffdba6e48c9e97e | /mysite/settings.py | 81460bc3bd4c4677073bbf0441cd2810bd8e8926 | [] | no_license | berkyaglioglu/Post-Sharing-Application | d3c4187a8b05cf34c65fee5cc90ab6b85740e754 | c169af6d27dd55ea711207e3e4b04cc61e678dca | refs/heads/master | 2020-06-22T22:42:56.720956 | 2019-08-01T08:40:04 | 2019-08-01T08:40:04 | 198,419,497 | 0 | 0 | null | 2019-08-01T08:40:06 | 2019-07-23T11:46:57 | Python | UTF-8 | Python | false | false | 3,318 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!taz!xt^=+9=xh4$4b_0e6qqg$a(*^t_jc01h&@b!k5jdh#$ew'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'posts', 'users',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Istanbul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
AUTH_USER_MODEL = 'users.User'
| [
"berkyaglioglu@sabanciuniv.edu"
] | berkyaglioglu@sabanciuniv.edu |
99599038e3f80650a765e87ef0ff275f2dcf95a0 | 21d89e598ceae51ff889f3788c728b25cdbc9dbc | /critical_critiques/critical_critiques/views.py | 0f1633895ac0c2e01b1c57e268f1891ebcf1a1c6 | [
"MIT"
] | permissive | team-stroller/critical_critiques | 5723938f49dcfff07f6cfd914ae8202a4354b4d5 | 99f3b073abbfda3ad62ea01f72d39756a1e5b088 | refs/heads/master | 2016-08-08T03:20:21.607260 | 2013-05-11T17:00:08 | 2013-05-11T17:00:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | from django.contrib.auth import logout
from django.views.generic.base import RedirectView
from django.core.urlresolvers import reverse
class LogoutView(RedirectView):
url = "/"
def get(self, request, *args, **kwargs):
logout(request)
return super(LogoutView, self).get(request, *args, **kwargs)
| [
"mike@mike-burns.com"
] | mike@mike-burns.com |
8679364a43e61b2d99d04acba5b49a59559584f4 | 60e4ce6fd4089e481dc130f0f0e7e95ae3de2e71 | /知识图谱11 数据大清洗1.1.py | 7c10902ddad998c6ae693eb6d3b40c615c47ae19 | [] | no_license | hhxx2015/KnowledgeGraph1.1 | fd6aa1c176eb827fcdfb872b4473cf2c6c6cb308 | a2077762e4d12d5724600dfa8680e98f7dc560c4 | refs/heads/main | 2023-02-03T17:01:06.351425 | 2020-12-28T08:59:15 | 2020-12-28T08:59:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | with open('./data/zstp11_sjdqx_ALL_LABLE.csv', 'w', encoding='utf-8') as ff:
ff.close()
with open('./data/zstp11_sjdqx_ALL_LABLE.csv', 'a+', encoding='utf-8') as ff:
with open('./data/zstp10_gxcz1d2_ALL_LABLE.csv', 'r', encoding='utf-8') as f:
count=0
for line in f:
count+=1
stt=line.strip('\n').replace(' ','').replace('"','').replace("'",'').split(',')
if stt[0]!='' and stt[1]!='' and stt[2]!='':
if count!=1:
ff.write(stt[0]+","+stt[1]+","+stt[2]+'\n') | [
"noreply@github.com"
] | hhxx2015.noreply@github.com |
b039e6acf62b07e17f702ba4b667476922ffc9f5 | ea003fc657c1da911d3389bd627459f7460335f7 | /wagtail_env/lib/python3.5/site-packages/wagtail/core/middleware.py | 3c0181223bd3631ccbcbd22bd2e602db25bc6ad0 | [] | no_license | saikrishnasri/wagtail_project | db839c393ab48001947bd437bd2d7606cd6ed7c0 | f749813943871bd57951636259d451464be581be | refs/heads/main | 2023-02-26T23:59:23.063936 | 2021-02-03T12:14:29 | 2021-02-03T12:14:29 | 335,604,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | import warnings
from django.utils.deprecation import MiddlewareMixin
from wagtail.core.models import Site
from wagtail.utils.deprecation import RemovedInWagtail211Warning
warnings.warn(
'wagtail.core.middleware.SiteMiddleware and the use of request.site is deprecated. '
'Please update your code to use Site.find_for_request(request) in place of request.site, '
'and remove wagtail.core.middleware.SiteMiddleware from MIDDLEWARE',
RemovedInWagtail211Warning
)
class SiteMiddleware(MiddlewareMixin):
def process_request(self, request):
"""
Set request.site to contain the Site object responsible for handling this request,
according to hostname matching rules
"""
try:
request.site = Site.find_for_request(request)
except Site.DoesNotExist:
request.site = None
| [
"saikrishnakerla@gmail.com"
] | saikrishnakerla@gmail.com |
ef3871e6e0bd3f1773995a76bbdcc659792a2f07 | 440f84cb332dc4a9970143a8eac45cfb34dbcc62 | /Semana6_pythonF/ej6.py | dc0dd63f6b19d2380a93eb4b9cef793d6074ff73 | [] | no_license | INS125/Laboratorio | 1c73dc1df50739ad3c8293bbaec30d8210d37dd7 | 4dc2d967a28032e07a4f05e67f1089cdc6cfce38 | refs/heads/main | 2021-07-14T07:59:00.647876 | 2021-06-26T22:33:25 | 2021-06-26T22:33:25 | 356,095,644 | 12 | 16 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | from functools import reduce
lista = [10, 20, 30, 40 , 50, 60]
reduccion = reduce(lambda x,y: x+y, filter(lambda x: x!= 10, lista))
print(reduccion)
red_mult = reduce(lambda x,y: x*y, lista)
print(red_mult)
lista_de_listas = [ [1,2], [3,4], [5,6], [7,8,9] ]
#[ [1,2], [3,4], [5,6], [7,8,9] ]
#[ [1,2,3,4], [5,6], [7,8,9] ]
#[ [1,2,3,4,5,6], [7,8,9] ]
#[ [1,2,3,4,5,6,7,8,9] ]
#[1,2,3,4,5,6,7,8,9]
nueva_lista = reduce(lambda x,y: x + y, lista_de_listas)
print(nueva_lista)
lista_de_listas = [ [1,2], [3,4], [5,6], [7,8,9] ]
otra_lista = map(lambda x_: [i*2 for i in x_], lista_de_listas)
print(list(otra_lista))
otra_lista_map = map(lambda lista_: map(lambda x: x*2, lista_), lista_de_listas)
#map(lambda x: print(list(x)) , otra_lista_map)
for i in otra_lista_map:
print(list(i))
otra_lista_map_2 = map(lambda lista_: map(lambda x: x*2, lista_), lista_de_listas)
print(list(map(lambda x: list(x) , otra_lista_map_2))) | [
"nhormazabal@geovictoria.com"
] | nhormazabal@geovictoria.com |
029601d73023044b3cbf50c8f580bd1ea21d97fb | 37e49d0866f2a3668333e438f89d91c5de181f3b | /singlepage2/singlepage2/urls.py | 1648b5a218ce4d5e55d0a83d6f4d2db94aa70a92 | [] | no_license | pireats-abhi/user-interface-js-django | f6146f128bdb522db4f2b08356b29d5736ffb173 | 56128bea7b1128974401c1ff190f89438dd9e077 | refs/heads/master | 2022-12-23T20:22:53.796265 | 2020-10-02T07:11:48 | 2020-10-02T07:11:48 | 300,529,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | """singlepage2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('singlepage.urls'))
]
| [
"pireats.abhi@gmail.com"
] | pireats.abhi@gmail.com |
a07c34f83feb8c45f87b09c7e1834de971aaafb7 | 65306b41168a5afa6fc80904cc0fbf737939a01a | /scale/recipe/seed/recipe_connection.py | 6c80d4edab2353b587342b6305fb77cb63e50ceb | [
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | kfconsultant/scale | 9e5df45cd36211d1bc5e946cf499a4584a2d71de | 28618aee07ceed9e4a6eb7b8d0e6f05b31d8fd6b | refs/heads/master | 2020-12-07T00:04:37.737556 | 2020-01-06T12:57:03 | 2020-01-06T12:57:03 | 232,587,229 | 0 | 0 | Apache-2.0 | 2020-01-08T14:53:36 | 2020-01-08T14:53:35 | null | UTF-8 | Python | false | false | 10,510 | py | """Defines connections that will provide data to execute recipes"""
from __future__ import unicode_literals
from recipe.configuration.data.exceptions import InvalidRecipeConnection
from recipe.configuration.data.recipe_data import ValidationWarning
from storage.media_type import UNKNOWN_MEDIA_TYPE
class RecipeConnection(object):
"""Represents a connection that will provide data to execute recipes. This class contains the necessary description
needed to ensure the data provided by the connection will be sufficient to execute the given recipe.
"""
def __init__(self):
"""Constructor
"""
self.param_names = set()
self.properties = []
self.files = {} # Param name -> (multiple, media types, optional)
self.workspace = False
def add_input_file(self, file_name, multiple, media_types, optional):
"""Adds a new file parameter to this connection
:param file_name: The file parameter name
:type file_name: str
:param multiple: Whether the file parameter provides multiple files (True)
:type multiple: bool
:param media_types: The possible media types of the file parameter (unknown if None or [])
:type media_types: list of str
:param optional: Whether the file parameter is optional and may not be provided (True)
:type optional: bool
"""
if file_name in self.param_names:
raise Exception('Connection already has a parameter named %s' % file_name)
if not media_types:
media_types = [UNKNOWN_MEDIA_TYPE]
self.param_names.add(file_name)
self.files[file_name] = (multiple, media_types, optional)
def add_property(self, property_name):
"""Adds a new property parameter to this connection
:param property_name: The property parameter name
:type property_name: str
"""
if property_name in self.param_names:
raise Exception('Connection already has a parameter named %s' % property_name)
self.param_names.add(property_name)
self.properties.append(property_name)
def add_workspace(self):
"""Indicates that this connection provides a workspace for storing output files
"""
self.workspace = True
def has_workspace(self):
"""Indicates whether this connection provides a workspace for storing output files
:returns: True if this connection provides a workspace, False otherwise
:rtype: bool
"""
return self.workspace
def validate_input_files(self, files):
"""Validates the given file parameters to make sure they are valid with respect to the recipe definition.
:param files: Dict of file parameter names mapped to a tuple with three items: whether the parameter is required
(True), if the parameter is for multiple files (True), and the description of the expected file meta-data
:type files: dict of str ->
tuple(bool, bool, :class:`job.configuration.interface.scale_file.ScaleFileDescription`)
:returns: A list of warnings discovered during validation.
:rtype: list[:class:`recipe.configuration.data.recipe_data.ValidationWarning`]
:raises :class:`recipe.configuration.data.exceptions.InvalidRecipeConnection`: If there is a configuration
problem
"""
warnings = []
for name in files:
required = files[name][0]
multiple = files[name][1]
file_desc = files[name][2]
if name not in self.files:
if required:
raise InvalidRecipeConnection('Data input %s is required and was not provided' % name)
continue
conn_file = self.files[name]
conn_multiple = conn_file[0]
conn_media_types = conn_file[1]
conn_optional = conn_file[2]
if conn_optional and required:
raise InvalidRecipeConnection('Data input %s is required and data from connection is optional' % name)
if not multiple and conn_multiple:
raise InvalidRecipeConnection('Data input %s only accepts a single file' % name)
for conn_media_type in conn_media_types:
if not file_desc.is_media_type_allowed(conn_media_type):
warn = ValidationWarning('media_type',
'Invalid media type for data input: %s -> %s' % (name, conn_media_type))
warnings.append(warn)
return warnings
def validate_input_json(self, property_names):
"""Validates the given property names to make sure all properties exist if they are required.
:param property_names: Dict of property names mapped to a bool indicating if they are required
:type property_names: dict of str -> bool
:returns: A list of warnings discovered during validation.
:rtype: list[:class:`recipe.configuration.data.recipe_data.ValidationWarning`]
:raises :class:`recipe.configuration.data.exceptions.InvalidRecipeConnection`: If there is a configuration
problem
"""
warnings = []
for name in property_names:
if name not in self.properties and property_names[name]:
raise InvalidRecipeConnection('Property %s is required and was not provided' % name)
return warnings
class SeedRecipeConnection(object):
"""Represents a connection that will provide data to execute jobs. This class contains the necessary description
needed to ensure the data provided by the connection will be sufficient to execute the given job.
"""
def __init__(self):
"""Constructor
"""
self.param_names = set()
self.properties = []
self.files = {} # Param name -> (multiple, media types, optional)
self.workspace = False
def add_input_file(self, file_name, multiple, media_types, optional, partial):
"""Adds a new file parameter to this connection
:param file_name: The file parameter name
:type file_name: str
:param multiple: Whether the file parameter provides multiple files (True)
:type multiple: bool
:param media_types: The possible media types of the file parameter (unknown if None or [])
:type media_types: list of str
:param optional: Whether the file parameter is optional and may not be provided (True)
:type optional: bool
:param partial: Flag indicating if the parameter only requires a small portion of the file
:type partial: bool
"""
if file_name in self.param_names:
raise Exception('Connection already has a parameter named %s' % file_name)
if not media_types:
media_types = [UNKNOWN_MEDIA_TYPE]
self.param_names.add(file_name)
self.files[file_name] = (multiple, media_types, optional, partial)
def add_property(self, property_name):
"""Adds a new property parameter to this connection
:param property_name: The property parameter name
:type property_name: str
"""
if property_name in self.param_names:
raise Exception('Connection already has a parameter named %s' % property_name)
self.param_names.add(property_name)
self.properties.append(property_name)
def add_workspace(self):
"""Indicates that this connection provides a workspace for storing output files
"""
self.workspace = True
def has_workspace(self):
"""Indicates whether this connection provides a workspace for storing output files
:returns: True if this connection provides a workspace, False otherwise
:rtype: bool
"""
return self.workspace
def validate_input_files(self, files):
"""Validates the given file parameters to make sure they are valid with respect to the job interface.
:param files: List of file inputs
:type files: [:class:`job.seed.types.SeedInputFiles`]
:returns: A list of warnings discovered during validation.
:rtype: list[:class:`job.configuration.data.job_data.ValidationWarning`]
:raises :class:`job.configuration.data.exceptions.InvalidConnection`: If there is a configuration problem.
"""
warnings = []
for file in files:
if file.name not in self.files:
if file.required:
raise InvalidRecipeConnection('Data input %s is required and was not provided' % file.name)
continue
conn_file = self.files[file.name]
conn_multiple = conn_file[0]
conn_media_types = conn_file[1]
conn_optional = conn_file[2]
if conn_optional:
if file.required:
raise InvalidRecipeConnection('Data input %s is required and data from connection is optional' %
file.name)
if not file.multiple and conn_multiple:
raise InvalidRecipeConnection('Data input %s only accepts a single file' % file.name)
for conn_media_type in conn_media_types:
if not file.is_media_type_allowed(conn_media_type):
warn = ValidationWarning('media_type',
'Invalid media type for data input: %s -> %s' %
(file.name, conn_media_type))
warnings.append(warn)
return warnings
def validate_properties(self, property_names):
"""Validates the given property names to make sure all properties exist if they are required.
:param property_names: Dict of property names mapped to a bool indicating if they are required
:type property_names: dict of str -> bool
:returns: A list of warnings discovered during validation.
:rtype: list[:class:`job.configuration.data.job_data.ValidationWarning`]
:raises :class:`job.configuration.data.exceptions.InvalidConnection`: If there is a configuration problem.
"""
warnings = []
for name in property_names:
if name not in self.properties and property_names[name]:
raise InvalidRecipeConnection('Property %s is required and was not provided' % name)
return warnings
| [
"jon@gisjedi.com"
] | jon@gisjedi.com |
90a81f052f088c36ebd6b1e883906c583d2493e2 | 96b55c342ca2269270d7f6623c050fac10651aa7 | /valid-palindrome.py | 3267eaad299a06784671bc60cecebf642d768614 | [] | no_license | nancy-cai/Data-Structure-and-Algorithm-in-Python | bbb5e54b9c8fc23669b8e4df9daeb1ee0480ff39 | 84258f5d848d1d9aed5a0ea98af0a06b70b716f4 | refs/heads/master | 2022-07-19T04:04:27.861406 | 2022-06-24T06:49:02 | 2022-06-24T06:49:02 | 199,798,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | # https://leetcode.com/problems/valid-palindrome/
# def isPalindrome(s):
# trimmedS = ''.join(c for c in s if c.isalnum()).lower()
# reversedString = ""
# for i in range(len(trimmedS),0,-1):
# reversedString += trimmedS[i-1]
# print(reversedString)
# print (trimmedS)
# if trimmedS == reversedString:
# return True
# else:
# return False
def isPalindrome(s):
newStr = ""
for c in s:
if c.isalnum():
newStr += c.lower()
return newStr == newStr[::-1]
print(isPalindrome("A man, a plan, a canal: Panama"))
# Fastest solution, O(n)
def isPalindromeTwoPointers(s):
l = 0
r = len(s)-1
while l <= r:
if not s[l].isalnum():
l += 1
elif not s[r].isalnum():
r -= 1
else:
if s[l].lower() == s[r].lower():
l += 1
r -= 1
else:
return False
return True
print(isPalindromeTwoPointers("A man, a plan,,^ a canal: Panama"))
| [
"noreply@github.com"
] | nancy-cai.noreply@github.com |
6a19fed144487fa7d519f1231fecc8f5b63bc650 | 830bcc71b7924094644f36a1e3f670086a8ea109 | /deep_cluster/triplets/triplets_gui.py | 7295591f07163a9076a557d3c766497efc442060 | [] | no_license | ysterin/deep_cluster | 550ec9b6a4266603cf275e5094b2d435426b0359 | e1d5f8d01332d3bcb9c7de75340a68e06e14e88e | refs/heads/master | 2023-06-13T08:48:25.566379 | 2021-07-11T17:53:15 | 2021-07-11T17:53:15 | 294,084,774 | 0 | 0 | null | 2020-12-22T17:47:53 | 2020-09-09T10:54:11 | Jupyter Notebook | UTF-8 | Python | false | false | 17,920 | py | import sys
# sys.path.append('..')
import os
import numpy as np
import cv2 as cv
from scipy import signal as sig
from collections import defaultdict
from pathlib import Path
from contextlib import contextmanager
from deep_cluster.dataloader import LandmarkDataset, SequenceDataset
from matplotlib import pyplot as plt
from collections import Counter
import torch
from torch.utils.data import ConcatDataset
import pickle
import re
import math
import time
import tkinter as tk
from PIL import ImageTk, Image
from threading import Thread, Event
from multiprocessing import Process
from deep_cluster.triplets.landmarks_video import Video, LandmarksVideo
from deep_cluster.triplets.sample_triplets import Segment, triplets_segments_gen, load_segments
import math
from contextlib import contextmanager
@contextmanager
def timethis(label):
t0 = time.time()
yield
elapsed = time.time() - t0
print(f"{label} took {elapsed} seconds")
def get_seg_clip(vid, seg, n_frames, fps=120):
frames = vid[seg.start_frame: seg.start_frame + seg.n_frames: int(math.ceil(vid.fps/fps))]
frames = list(frames)
if n_frames < len(frames):
n_frames_to_discard = len(frames) - n_frames
n_frames_to_discard_beginning = math.floor(n_frames_to_discard / 2)
n_frames_to_discard_end = math.ceil(n_frames_to_discard / 2)
frames = frames[n_frames_to_discard_beginning: - n_frames_to_discard_end]
n_pad = n_frames - len(frames)
pad_beginning, pad_ending = math.floor(n_pad / 2), math.ceil(n_pad / 2)
frames = [frames[0]] * pad_beginning + frames + [frames[-1]] * pad_ending
return np.stack(frames)
def get_random_clips(vid, duration=0.5, max_n_clips=100, fps=60):
for i in range(max_n_clips):
random_idxs = np.random.randint(0, 3*10**5, size=(3,))
# print(vid[random_idxs[0]].shape)
n_frames = int(duration * vid.fps)
yield [vid[idx:idx + n_frames: int(math.ceil(vid.fps/fps))] for idx in random_idxs]
'''
A widget for displaying animation.
root: parent of widget
frames: list of frames to animate, each frame is a numpy array.
n_frames: number of frames to show in the animation - if less then length of frames, discard frames from beginning
and end as needed. if more, pad with same frame in beginning and end.
'''
class Animation(tk.Canvas):
def __init__(self, root, frames, n_frames=None, fps=30, *args, **kwargs):
# self.n_frames = len(frames)
self.interval = 1 / fps
self.root = root
self.stop = Event()
if 'width' in kwargs:
width = kwargs['width']
height = kwargs['height']
else:
height, width, *_ = frames[0].shape
if 'rescale' in kwargs:
height, width = int(height * kwargs['rescale']), int(width * kwargs['rescale'])
tk.Canvas.__init__(self, root, width=width, height=height, *args)
self.n_frames = n_frames if n_frames else len(frames)
if self.n_frames < len(frames):
n_frames_to_discard = len(frames) - self.n_frames
n_frames_to_discard_beginning = math.floor(n_frames_to_discard / 2)
n_frames_to_discard_end = math.ceil(n_frames_to_discard / 2)
frames = frames[n_frames_to_discard_beginning: - n_frames_to_discard_end]
self.images = [ImageTk.PhotoImage(Image.fromarray(frame).resize((width, height))) for frame in frames]
n_pad = self.n_frames - len(frames)
self.pad_beginning, self.pad_ending = math.floor(n_pad / 2), math.ceil(n_pad / 2)
self.images = [self.images[0]] * self.pad_beginning + self.images + [self.images[-1]] * self.pad_ending
self.pack()
self.thread = Thread(target=self.animation)
# self.thread.setDaemon(True)
self.root.after(0, self.thread.start)
def animation(self):
try:
while not self.stop.is_set():
for i in range(self.n_frames):
time.sleep(self.interval)
if self.stop.is_set():
return
self.create_image(0, 0, image=self.images[i], anchor='nw')
self.update()
except tk.TclError as e:
print("[INFO] caught a RuntimeError")
def destroy(self):
del self.images
self.stop.set()
self.thread.join(0.1)
super().destroy()
class ClipsDisplay(tk.Frame):
# @profile
def __init__(self, root, clips, fps=30, *args, **kwargs):
tk.Frame.__init__(self, root, *args, **kwargs)
self.window = tk.Frame(self)
n_frames = int(np.mean([len(clip) for clip in clips]))
self.anchor_anim = Animation(self, clips[0], fps=fps, n_frames=n_frames, rescale=1.5)#, width=100, height=240)
self.anchor_anim.pack(side=tk.LEFT)
pos_frame = tk.Frame(self.window)
neg_frame = tk.Frame(self.window)
choice_var = tk.IntVar(self)
self.choice_var = choice_var
radio_button_1 = tk.Radiobutton(pos_frame, var=choice_var, value=1)
radio_button_2 = tk.Radiobutton(neg_frame, var=choice_var, value=2)
self.pos_anim = Animation(pos_frame, clips[1], fps=fps, n_frames=n_frames, rescale=1.5)#, width=100, height=200)
self.neg_anim = Animation(neg_frame, clips[2], fps=fps, n_frames=n_frames, rescale=1.5)#, width=100, height=200)
self.pos_anim.pack()
self.neg_anim.pack()
radio_button_1.pack(side=tk.BOTTOM)
radio_button_2.pack(side=tk.BOTTOM)
pos_frame.pack(side=tk.LEFT)
neg_frame.pack(side=tk.LEFT)
self.window.pack()
self.pack()
def destroy(self):
self.anchor_anim.destroy()
self.pos_anim.destroy()
self.neg_anim.destroy()
super().destroy()
import pandas as pd
class App(tk.Frame):
def __init__(self, root, video, encoded=None, save_file=None, *args, **kwargs):
tk.Frame.__init__(self, root, *args, **kwargs)
self.root = root
self.encoded = encoded
# self.df = pd.DataFrame(columns=['video_file', 'anchor', 'sample1', 'sample2', 'selected'], index=pd.Index(np.arange(100)))
self.saved_triplets = []
self.video = video
if not save_file:
save_file = 'data/selected_triplets.csv'
self.save_file = save_file
# self.triplets_gen = triplet_segment_gen
self.i = 1
self.display = tk.Frame()
self.display.pack()
self.next_button = tk.Button(self, command=self.next, text="NEXT")
self.next_button.pack(side=tk.BOTTOM)
self.ilabel = tk.Label(self, text=str(self.i), font=("Courier", 32))
self.ilabel.pack(side=tk.LEFT)
if self.encoded is not None or True:
self.dist_label1 = tk.Label(self, text='distance from anchor to positive:')
self.dist_label1.pack(side=tk.BOTTOM)
self.dist_label2 = tk.Label(self, text='distance from anchor to negative:')
self.dist_label2.pack(side=tk.BOTTOM)
self.dist_label3 = tk.Label(self, text='distance from negative to positive:')
self.dist_label3.pack(side=tk.BOTTOM)
self.bind('<Return>', lambda event: self.save())
self.bind('<space>', self.next)
self.bind('w', self.next)
self.bind('q', lambda event: self.quit())
self.focus_set()
self.load_clips()
self.reload_display()
self.bind('<Left>', lambda event: self.display.choice_var.set(1))
self.bind('<Right>', lambda event: self.display.choice_var.set(2))
self.bind('<Down>', lambda event: self.display.choice_var.set(0))
self.bind('a', lambda event: self.display.choice_var.set(1))
self.bind('d', lambda event: self.display.choice_var.set(2))
self.bind('s', lambda event: self.display.choice_var.set(0))
self.bind('e', lambda event: self.save())
self.pack()
def filter_segment(self, segment):
segment_df = self.video.landmarks.df.loc[segment].drop(['tail1', 'tail2', 'tail3'], axis=1, level=0)
confidence = np.prod(segment_df.xs('likelihood', level=1, axis=1, drop_level=True) > 0.9, axis=1)
if confidence.mean() < 0.8:
return False
data = segment_df.drop('likelihood', level=1, axis=1).values.astype(np.float32)
ff, Pxx = sig.periodogram(data.T, fs=segment_df.attrs['fps'])
energy = Pxx[:,:10].mean()
if energy < 2e0:
return False
return True
def sample_random_triplets(self, n_frames=60, fps=120):
selected_segments = []
n_tries = 0
while len(selected_segments) < 3:
n_tries += 1
start_idx = np.random.randint(len(self.video) - self.video.fps)
segment = slice(start_idx, start_idx + n_frames, int(math.ceil(self.video.fps/fps)))
if self.filter_segment(segment):
selected_segments.append(segment)
if n_tries > 1000:
raise Exception("too many tries to sample segments")
self.segments = selected_segments
# enc_segments = [slice(idx // 4 + 15, (idx + n_frames) // 4 - 15 + 1) for idx in random_idxs]
if self.encoded is not None:
self.clip_encodeings = [self.encoded[idx // int(len(self.video)/len(self.encoded))] for idx in random_idxs]
# print(self.clip_encodeings[0].shape)
if hasattr(self, 'clips'):
del self.clips
self.clips = [self.video[seg] for seg in self.segments]
def next(self, event=None):
self.save_triplet()
self.i += 1
self.reload_display()
def reload_display(self):
self.display.destroy()
self.display = ClipsDisplay(self, self.clips, fps=60)
self.display.pack(side=tk.TOP)
if self.encoded is not None:
dist1 = np.linalg.norm(self.clip_encodeings[0] - self.clip_encodeings[1])
dist2 = np.linalg.norm(self.clip_encodeings[0] - self.clip_encodeings[2])
dist3 = np.linalg.norm(self.clip_encodeings[2] - self.clip_encodeings[1])
print(dist1, dist2, dist3)
# self.dist_label1['text'] = f"distance between anchor and sample1: {diffs[0]:.2f}"
# self.dist_label2['text'] = f"distance between anchor and sample2: {diffs[1]:.2f}"
# self.dist_label3['text'] = f"distance between sample2 and sample1: {diffs[2]:.2f}"
diffs = []
for i in range(3):
seg = self.segments[i]
data =self.video.landmarks[seg].T
filt = sig.butter(4, 3, btype='low', output='ba', fs=self.video.fps)
filtered = sig.filtfilt(*filt, data).T
diffs.append(np.linalg.norm(np.diff(filtered, axis=0), axis=-1).mean())
self.dist_label1['text'] = f"anchor: {diffs[0]:.4f}"
self.dist_label2['text'] = f"sample 1: {diffs[1]:.4f}"
self.dist_label3['text'] = f"sample2: {diffs[2]:.4f}"
self.load_clips()
def save_triplet(self):
self.saved_triplets.append({'video_file': self.video.video_file,
'anchor': (self.segments[0].start, self.segments[0].stop),
'sample1': (self.segments[1].start, self.segments[1].stop),
'sample2': (self.segments[2].start, self.segments[2].stop),
'selected': self.display.choice_var.get()})
if self.i % 20 == 0:
self.save()
# @profile
def load_clips(self):
print("start load clips")
# anchor, pos, neg = next(self.triplets_gen)
# self.clips = [get_seg_clip(self.video, seg, n_frames=60, fps=120) for seg in [anchor, pos, neg]]
self.sample_random_triplets()
self.ilabel['text'] = self.i
print("finish load clips")
def save(self):
df = pd.DataFrame.from_records(self.saved_triplets)
# save_path = 'data/selected_triplets.csv'
# import pdb; pdb.set_trace()
mode = 'a' if os.path.exists(self.save_file) else 'w'
df.to_csv(path_or_buf=self.save_file, mode=mode)
self.saved_triplets = []
def quit(self):
print("quitting")
self.save()
self.root.quit()
def decode_seg_string(seg_string):
start, end = seg_string[1:-1].split(',')
start, end = int(start), int(end)
start_idx, end_idx = start , end
return (start_idx, end_idx)
class VerificationApp(tk.Frame):
def __init__(self, root, video, df, encoded=None, to_save=True, start_idx=-1, *args, **kwargs):
tk.Frame.__init__(self, root, *args, **kwargs)
self.root = root
self.df = df
self.to_save = to_save
self.encoded = encoded
# self.df = pd.DataFrame(columns=['video_file', 'anchor', 'sample1', 'sample2', 'selected'], index=pd.Index(np.arange(100)))
self.saved_triplets = []
self.video = video
self.i = start_idx
self.display = tk.Frame()
self.display.pack()
self.next_button = tk.Button(self, command=self.next, text="NEXT")
self.next_button.pack(side=tk.BOTTOM)
self.bind('<Return>', lambda event: self.save())
self.bind('<space>', self.next)
self.bind('w', self.next)
self.bind('q', lambda event: self.quit())
if self.encoded is not None:
self.dist_label1 = tk.Label(self, text='distance from anchor to positive:')
self.dist_label1.pack(side=tk.BOTTOM)
self.dist_label2 = tk.Label(self, text='distance from anchor to negative:')
self.dist_label2.pack(side=tk.BOTTOM)
self.dist_label3 = tk.Label(self, text='distance from negative to positive:')
self.dist_label3.pack(side=tk.BOTTOM)
self.focus_set()
self.load_clips()
self.reload_display()
self.bind('<Left>', lambda event: self.display.choice_var.set(1))
self.bind('<Right>', lambda event: self.display.choice_var.set(2))
self.bind('<Down>', lambda event: self.display.choice_var.set(0))
self.bind('a', lambda event: self.display.choice_var.set(1))
self.bind('d', lambda event: self.display.choice_var.set(2))
self.bind('s', lambda event: self.display.choice_var.set(0))
self.bind('e', lambda event: self.save())
self.pack()
def next(self, event=None):
self.save_triplet()
self.reload_display()
def new_triplet(self, fps=120):
self.i += 1
row = self.df.iloc[self.i]
sample_names = ['anchor', 'sample1', 'sample2']
try:
segments = [decode_seg_string(row[sample]) for sample in sample_names]
except ValueError as e:
print(e)
# import pdb; pdb.set_trace()
return
self.segments = [slice(seg[0], seg[1], int(math.ceil(self.video.fps/fps))) for seg in segments]
if self.encoded is not None:
self.clip_encodeings = [self.encoded[idx // int(len(self.video)/len(self.encoded))] for idx in random_idxs]
self.clips = [self.video[seg] for seg in self.segments]
def reload_display(self):
self.display.destroy()
self.display = ClipsDisplay(self, self.clips, fps=30)
self.display.pack(side=tk.TOP)
if self.encoded is not None:
dist1 = np.linalg.norm(self.clip_encodeings[0] - self.clip_encodeings[1])
dist2 = np.linalg.norm(self.clip_encodeings[0] - self.clip_encodeings[2])
dist3 = np.linalg.norm(self.clip_encodeings[2] - self.clip_encodeings[1])
print(dist1, dist2, dist3)
self.dist_label1['text'] = f"distance between anchor and sample1: {dist1:.2f}"
self.dist_label2['text'] = f"distance between anchor and sample2: {dist2:.2f}"
self.dist_label3['text'] = f"distance between sample2 and sample1: {dist3:.2f}"
self.load_clips()
def save_triplet(self):
self.saved_triplets.append({'video_file': self.video.video_file,
'anchor': (self.segments[0].start, self.segments[0].stop),
'sample1': (self.segments[1].start, self.segments[1].stop),
'sample2': (self.segments[2].start, self.segments[2].stop),
'selected': self.df.iloc[self.i]['selected'],
'selected_verification': self.display.choice_var.get()})
def load_clips(self):
print("start load clips")
# anchor, pos, neg = next(self.triplets_gen)
# self.clips = [get_seg_clip(self.video, seg, n_frames=60, fps=120) for seg in [anchor, pos, neg]]
self.new_triplet()
print("finish load clips")
def save(self):
if not self.to_save:
return
print("saving dataframe...")
df = pd.DataFrame.from_records(self.saved_triplets)
path = 'triplets/data/selected_triplets_verificatio1.csv'
mode = 'a' if os.path.exists(path) else 'w'
df.to_csv(path_or_buf=path, mode=mode)
self.saved_triplets = []
def quit(self):
print("quitting")
self.save()
self.root.quit()
# data_root = Path("/home/orel/Storage/Data/K6/2020-03-26/Down")
#data_root = Path("/mnt/storage2/shuki/data/THEMIS/0015")
# landmark_file = data_root/'2020-03-23'/'Down'/'0008DeepCut_resnet50_Down2May25shuffle1_1030000.h5'
# video_file = data_root/'2020-03-23'/'Down'/'0008.MP4'
def __main__():
print(os.getcwd())
data_root = Path("/mnt/Storage1/Data/K7/")
root = tk.Tk()
vid_dir = list(data_root.glob('2020-*/Down/'))[5]
print(vid_dir)
video = LandmarksVideo(vid_dir, include_landmarks=True)
print(video.fps)
app = App(root, video, save_file='data/robust_triplets1.csv')
root.mainloop()
if __name__ == '__main__':
__main__()
| [
"shukistern@gmail.com"
] | shukistern@gmail.com |
77858201d3e474368bf11eb53a147fa09d12a3b1 | cc4269b6b2c7ca8a9869a36452237d22ae251754 | /backend/app/api/routes/root.py | a8dafb1437439f1968fd823ab8f07f72f777cfad | [] | no_license | AKCEJIEPATOP/peak_flow_meter | f6ef7ac559d6941d2e8b375ca3e2288ac2b18769 | ad4aedd61ddcc5027d052437ed3a7ec28670f9d7 | refs/heads/master | 2023-01-19T14:54:12.063563 | 2020-12-02T23:48:02 | 2020-12-02T23:48:02 | 297,611,999 | 0 | 0 | null | 2020-12-01T06:09:33 | 2020-09-22T10:17:40 | JavaScript | UTF-8 | Python | false | false | 210 | py | from fastapi import APIRouter
from . import user
router = APIRouter()
router.include_router(user.router, prefix='/user', tags=['User'])
@router.get('/ping')
async def ping():
return 'pong'
| [
"zhek26rus@google.com"
] | zhek26rus@google.com |
8f6d37f5508564a602db569ef8a1464aa1e84a87 | 53c983bbae20ec053ac6edfacbdf8f85b304911b | /tfomics/model_custom.py | eb243b858ee904170acc8ffac5de6836d65adb24 | [
"MIT"
] | permissive | p-koo/tfomics | 4efcad5c94c77a2522e004d9eb169aead893118a | 3db5e7ae7fd379b3c1a26f693504e28d4e3dceb0 | refs/heads/master | 2022-06-17T09:57:29.843982 | 2021-12-05T03:48:43 | 2021-12-05T03:48:43 | 269,372,383 | 4 | 7 | MIT | 2022-05-10T21:23:36 | 2020-06-04T13:50:15 | Python | UTF-8 | Python | false | false | 2,382 | py | from tensorflow import keras
import tensorflow as tf
import numpy as np
class CustomModel(keras.Model):
""" Example of a custom model in keras """
def train_step(self, data):
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
if len(data) == 3:
x, y, sample_weight = data
else:
x, y = data
sample_weight=None
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
# Compute the loss value.
# The loss function is configured in `compile()`.
loss = self.compiled_loss(
y,
y_pred,
sample_weight=sample_weight,
regularization_losses=self.losses,
)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update the metrics.
# Metrics are configured in `compile()`.
self.compiled_metrics.update_state(y, y_pred, sample_weight=sample_weight)
# Return a dict mapping metric names to current value.
# Note that it will include the loss (tracked in self.metrics).
return {m.name: m.result() for m in self.metrics}
def test_step(self, data):
# Unpack the data
x, y = data
# Compute predictions
y_pred = self(x, training=False)
# Updates the metrics tracking the loss
self.compiled_loss(y, y_pred, regularization_losses=self.losses)
# Update the metrics.
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metric names to current value.
# Note that it will include the loss (tracked in self.metrics).
return {m.name: m.result() for m in self.metrics}
#@property
#def metrics(self):
# We list our `Metric` objects here so that `reset_states()` can be
# called automatically at the start of each epoch
# or at the start of `evaluate()`.
# If you don't implement this property, you have to call
# `reset_states()` yourself at the time of your choosing.
#return [loss_tracker, mae_metric]
| [
"koolaboratory.gmail.com"
] | koolaboratory.gmail.com |
dfc012c6ff96ced457c4637729049e7122b56867 | b186998016df23c04a2104537f16adad6b76bce2 | /turtle dot using loop.py | 19721c21c769c439c4fde7781fb67ff47ada6319 | [] | no_license | SoyabulIslamLincoln/Home-Practice | 13f7fbc7e2320477876f44231efd8640b7e9e031 | 09183ce5a5a0808c14b2eba273ccf5799c37316d | refs/heads/master | 2022-12-12T09:31:24.699697 | 2020-09-03T15:59:18 | 2020-09-03T15:59:18 | 292,612,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | Python 3.7.0 (v3.7.0:1bf9cc5093, Jun 27 2018, 04:59:51) [MSC v.1914 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> import turtle
>>> turtle.shape("turtle")
>>> turtle.speed(1)
>>> for i in range(20):
turtle.forward(10)
turtle.penup()
turtle.forward(2)
turtle.pendown()
>>> turtle.exitonclick()
Traceback (most recent call last):
File "<pyshell#9>", line 1, in <module>
turtle.exitonclick()
File "<string>", line 5, in exitonclick
turtle.Terminator
>>>
| [
"noreply@github.com"
] | SoyabulIslamLincoln.noreply@github.com |
efe09aef39889d38f44489e3f2c1b0308a60216d | 39c7c59020e823b3cff15537a8e336d4bcd8d801 | /getSolidotContentByDate.py | d45905d1b5d92bf11b9e965540d62e9026cbb830 | [] | no_license | glodsky/curlSolidotNewByDate | 7638a24797a96600432fc5fd683bf664ef031255 | 423c7cb1a7fdfe186bd6c574c0e7b7af945b6a64 | refs/heads/master | 2020-04-08T18:53:37.052279 | 2018-12-04T04:35:00 | 2018-12-04T04:35:00 | 159,629,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,493 | py | # -*- coding: utf-8 -*-
# python version : 3.6
import urllib.request
import pandas as pd
import os
import json
import datetime
import random
from lxml import etree
Proxies_POOLs =[]
def init_proxiesPOOLs(): #初始化IP代理池
global Proxies_POOLs
with open('./prxies_pools.csv','r') as f:
contents = f.readlines()
f.close()
num = len(contents)
for i in range(num):
details = contents[i].split(',')
proxy= {details[2].strip('\n') :"%s:%s"%(details[0],details[1])}
Proxies_POOLs.append(proxy)
def get_OneProxy(): # 随机化 返回一个代理IP
global Proxies_POOLs
proxyNums = len(Proxies_POOLs)
proxy = Proxies_POOLs[random.randint(0,proxyNums-1)]
#print(proxy)
return proxy
def use_proxy(url):
req=urllib.request.Request(url)
proxy_addr = None # get_OneProxy()
req.add_header("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0")
proxy=urllib.request.ProxyHandler(proxy_addr)
opener=urllib.request.build_opener(proxy,urllib.request.HTTPHandler)
urllib.request.install_opener(opener)
data=urllib.request.urlopen(req).read().decode('utf-8','ignore')
return data
def get_OneDayInformation(url,peroid):
content=use_proxy(url)
#print (content)
day_news = []
try:
html = etree.HTML(content)
titles = html.xpath('//div[@class="bg_htit"]/h2/a/text()')
talk_times = html.xpath('//div[@id="center"]/div[@class="block_m"]/div[@class="talk_time"]/text()')
div_mainnews = html.xpath('//div[@id="center"]/div[@class="block_m"]/div[@class="p_content"]/div[@class="p_mainnew"]')
news = [[] for i in range(4)]
for title in titles:
news[0].append(title)
for talk_time in talk_times:
tt = talk_time.strip()
if tt !='\r\n' and tt !='':
news[1].append(tt)
for mainnew in div_mainnews:
mn = mainnew.xpath('string(.)')
news[2].append(mn)
#extract keywords and links
keywords = mainnew.xpath('.//a/text()')
keywords_link = mainnew.xpath('.//a/@href')
link_nums = len(keywords)
key_links = []
for k in range(link_nums):
key_links.append( {'keyword':keywords[k],'link':keywords_link[k]})
if len(key_links)>0:
news[3].append(key_links)
else:
news[3].append([])
#print('Length : news[0] = %s news[1] = %s news[2] = %s news[3]=%s'%(len(news[0]),len(news[1]),len(news[2]),len(news[3]) ))
nums = len(news[0])
if (nums>0):
print("Found %5d news"%nums)
else:
print("Not Found")
return 0
for i in range(nums):
day_news.append({'title':news[0][i],'talk_time':news[1][i],'mainnews':news[2][i],'keywords_links':news[3][i]})
#print("title:%s\ntalk_time: %s\nmainnewes: %s\n"%(news[0][i],news[1][i],news[2][i]))
print("%s\n%s\n%s"%(news[0][i],news[1][i],news[2][i]))
## print("详情请访问:")
## tar = news[3][i]
## for x in range(len(tar)):
## print("\t%s"%(tar[x]["link"]))
print("\n")
fname = './SolidotNews_%s.json'% peroid
with open(fname,'w',encoding='utf-8') as f:
f.write(str(day_news))
f.close()
except etree.ParserError as e:
print("At url=%s \nError type = %s"%(url,e ))
def get_NewsFromDateRange():
peroid_range = pd.period_range('11/01/2018','12/01/2018',freq='D')
for day in peroid_range:
url = "https://www.solidot.org/?issue=%s"%(str(day).replace('-',''))
print(url)
get_OneDayInformation(url)
def main():
init_proxiesPOOLs()
ResentDaysNews = 3 # '最近三天Solidot网站新闻 1 表示今天
ResentDaysNews_list = []
for i in range(ResentDaysNews):
ResentDaysNews_list.append( (datetime.datetime.now()+datetime.timedelta(days= -(i) )).strftime("%Y%m%d"))
print(ResentDaysNews_list)
for day in ResentDaysNews_list:
url = "https://www.solidot.org/?issue=%s"%(day)
get_OneDayInformation(url,day)
return
#get_NewsFromDateRange()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | glodsky.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.