blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4d6293e90aede45adf69979c66dab05d9b5cd1d6
|
e910316216862a7f4735c69379c02c5362714698
|
/regression/2011_REGS/inspector.py
|
1d6f179e89e65ae35bedc3ddb94a7e1c3a408e79
|
[
"Apache-2.0"
] |
permissive
|
egeriicw/watttime-grid
|
514818fb069d3fb2ccb586c713c6d5ebab1ce3dd
|
394f2775b371352c7580095f4f86c42238f44809
|
refs/heads/master
| 2020-06-07T13:47:27.934270
| 2014-06-05T05:11:13
| 2014-06-05T05:11:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,999
|
py
|
#This file used to analyze data that exists locally. Data is downloaded seperately.
from numpy import array
import csv
import os
import pickle
path = "/home/human/WattTime/data/CA-2012"
path2= "/home/human/WattTime/data/CA-2012-Price"
plants = {}
pices = {}
#functions for preservation of abstraction
def get_name(row):
try: return float(row[1])
except: return 0
def get_ORISPL_code(row):
try: return float(row[2])
except: return 0
def get_unit_id(row):
try: return float(row[3])
except: return 0
def get_date(row):
try: return row[4]
except: return 0
def get_hour(row):
try: return float(row[5])
except: return 0
def get_op_time(row):
try: return float(row[6])
except: return 0
def get_GLOAD(row):
try: return float(row[7])
except: return 0
def get_SLOAD(row):
return row[8]
def get_SO2_MASS(row):
return row[9]
def get_SO2_MASS_MEASURE(row):
return row[10]
def get_SO2_RATE(row):
return row[11]
def get_SO2_RATE_MEASURE(row):
return row[12]
def get_CO2_MASS(row):
try: return row[17]
except: return 0
#for price data begin
def get_LMP_TYPE(row):
try: return row[6]
except: return 0
def get_OPR_DT(row):
try: return row[0]
except: return 0
def get_HE01(row):
try: return row[11]
except: return 0
def get_AVPR(row):
#returns the average price in a day
total_price = 0
for i in range(11,35):
try: total_price += float(row[i])
except: total_price += 0
return total_price/24
#for price data end
def get_first(row):
return row[0] #gets first item in row
# end of abstraction functions for getting info from csv list object
"""
#This is for opening new .csv files. This should now be done by opener.py
#here for reference only.
for filename in os.listdir(path):
with open("{0}/{1}".format(path,filename)) as csv_file:
data_object = csv.reader(csv_file)
for row in data_object:
if "{0} ID:{d1}".format(get_name(row),get_unit_id(row)) not in plants:
plants["{0} ID:{1}".format(get_name(row),get_unit_id(row))] = [row]
else:
if get_unit_id(row) == get_unit_id(get_first(plants["{0} ID:{1}".format(get_name(row),get_unit_id(row))])):
plants["{0} ID:{1}".format(get_name(row),get_unit_id(row))].append(row)
else:
plants["{0} ID:{1}".format(get_name(row),get_unit_id(row))] = [row]
"""
with open("/home/human/WattTime/data/plants.pickle","rb") as pickler:
plants= pickle.load(pickler) #gets info for plants dictionary from file
"""
#This is for generating an "aggregate plant", that measures total amount over all plants
#This should now be done by opener.py. Here for reference only.
def tryfloat(string):
try: return float(string)
except: return 0
aggregate_plant ={}
worklist = []
for plant in plants:
for row in plants[plant]:
if get_date(row) not in aggregate_plant:
totalhour = get_op_time(row)
totalmw = get_GLOAD(row)
totalCO2 = get_CO2_MASS(row)
aggregate_plant[get_date(row)] = [tryfloat(totalhour), tryfloat(totalmw), tryfloat(totalCO2)]
else:
hour = get_op_time(row)
mw = get_GLOAD(row)
CO2 = get_CO2_MASS(row)
aggregate_plant[get_date(row)][0] += tryfloat(hour)
aggregate_plant[get_date(row)][1] += tryfloat(mw)
aggregate_plant[get_date(row)][2] += tryfloat(CO2)
"""
with open("/home/human/WattTime/data/aggregate_plant.pickle","rb") as pickler:
aggregate_plant = pickle.load(pickler) #gets info for aggregate_plant dictionary from file
"""
#this is a test that displays the contents of the aggregate_plant dictionary
#uncomment to run
for value in sorted(aggregate_plant):
print(value, aggregate_plant[value])
"""
#this converts the aggregate plant dictionary into a data list.
data_list = []
for date in sorted(aggregate_plant):
data_list.append([date] + aggregate_plant[date])
data_list.pop()
data_list.pop()
#thisconverts the data_list into a 2-dimmensional numpy array
data_array = array([row for row in data_list])
with open("/home/human/WattTime/data/data_array.pickle","wb") as pickler:
pickle.dump(data_array, pickler) #dumps the array for later use by engine
"""
#this is a test that displays the contents of the data_list list
#uncomment to run
for i in data_list:
print(i)
"""
#these 3 dictionaries exist to make working with their respective variables more convenient
"""
aggregate_HOUR = {}
for i in aggregate_plant:
aggregate_HOUR[aggregate_plant[i][0]] = i
aggregate_MW = {}
for i in aggregate_plant:
aggregate_MW[aggregate_plant[i][1]] = i
aggregate_CO2 = {}
for i in aggregate_plant:
aggregate_CO2[aggregate_plant[i][2]] = i
"""
"""
#this is a test that displays the contents of the dictionaries in increasing order
#uncomment to run
for dic in [aggregate_HOUR,aggregate_MW,aggregate_CO2]:
for value in sorted(dic):
print(value, dic[value])
"""
"""
#This is for opening new .csv files for price. This should now be done by opener.py
#Here for reference only.
for filename in os.listdir(path2):
with open("{0}/{1}".format(path2,filename)) as csv_file:
price_object = csv.reader(csv_file)
for row in price_object:
if get_LMP_TYPE(row) == "LMP":
prices[get_OPR_DT(row)] = row
"""
with open("/home/human/WattTime/data/prices.pickle","rb") as pickler:
prices = pickle.load(pickler) #gets info for prices dictionary from file
av_daily_prices = {}
for date in prices:
av_daily_prices[get_AVPR(prices[date])] = date
"""
#This is a test that prints the average daily prices in increasing order.
#uncomment to run
for price in sorted(av_daily_prices):
print(price, av_daily_prices[price])
"""
av_daily_prices_bydate = {}
for date in prices:
av_daily_prices_bydate[date] = get_AVPR(prices[date])
"""
#Another formatting test
for plant in sorted(plants):
for row in plants[plant]:
print("Plant: {0}; Date: {1} Hour: {2}; Operating Percent: {3}".format(get_name(row), get_date(row), get_hour(row), get_op_time(row)))
print("Success!")
"""
def count_rows(plants, key):
count = 0
for row in plants[key]:
count +=1
def mean(dictionary):
total = 0
count = 0
for value in dictionary.values():
count +=1
try: total += value
except: total += 0
return total/count
def avpr_when_on(plants, prices):
outlist = []
for plant in sorted(plants):
for row in plants[plant]:
outlist.append("{0}: {1}".format(get_date(row), get_hour(row)))
def operating_time_average(plants, period=24):
#returns average operating time over all plants in given period
returndict = {}
for plant in plants:
count = 0
ontime = 0
for row in plants[plant]:
count += 1
try: ontime += float(get_op_time(row))
except: ontime += 0
returndict[plant] = (ontime * period)/count
return mean(returndict)
def average_X(plants, plant, get_X, period=24):
total = 0
count = 0
for row in plants[plant]:
if float(get_X(row)) >= 0:
count += 1
total += float(get_X(row))
if count == 0:
return 0
return (total * period)/count
def average_X_dict(plants, get_X, period=24):
AV_DICT = {}
for plant in plants:
AV_DICT[plant] = average_X(plants, plant, get_X, period)
return AV_DICT
def op_time_av_plant(plant, period=24):
ontime = 0
for row in plant:
try: ontime += float(get_op_time(row))
except: ontime += 0
return (ontime * period)/8784
def CO2_per_MW(plant, period=1):
CO2 = 0
MW = 0
for row in plant:
try: CO2 += float(get_CO2_MASS(row))
except: CO2 += 0
for row in plant:
try: MW += float(get_GLOAD(row))
except: MW += 0
if MW == 0:
return 0
return (period * (CO2/MW))
"""
#FOR GRAPHING WITH MATPLOTLIB/PYLAB
CO2_DICT = {}
for plant in sorted(plants):
CO2_DICT[CO2_per_MW(plants[plant])] = plant
for plant in sorted(CO2_DICT):
print(plant, CO2_DICT[plant])
xvals =[]
yvals =[]
for plant in sorted(CO2_DICT):
xvals.append(CO2_DICT[plant])
yvals.append(plant)
import matplotlib.pyplot as plt
import pylab
fig = plt.figure()
graph = fig.add_subplot(111)
fig.subplots_adjust(top=0.85)
graph.set_ylabel("CO2 per MW/hr")
graph.set_xlabel("Plants")
fig.suptitle("Average CO2 per MW", fontsize=25, fontweight="bold")
x = range(len(xvals))
pylab.plot(x, yvals, "g")
pylab.show()
"""
def at_least(a, b):
if a >= b:
return True
else:
return False
def at_most(a, b):
if a <= b:
return True
else:
return False
def on_percent(plants, percent, operator, period=24):
#returns a list of plants that were operating (on average) at least X percent of a period
returnlist = []
for plant in plants:
if operator(op_time_av_plant(plants[plant], period), percent*period):
returnlist.append(plant)
return returnlist
"""
#This is a test that prints all plants on at least half of an average day
#Uncomment to run
for plant in on_percent(plants, .5 , at_least, 24):
print (plant)
"""
def similar(plants, operator, threshold, comparer, value, period=24):
#returns a dictionary of plants greater than or less than(depending on operator) a certain threshold ratio of the output of a value function over a period, compared by some comparer function
returndict= {}
finaldict = {}
for plant in plants:
returndict[plant] = comparer(plants[plant], period)
for item in returndict:
if operator(returndict[item]/value, threshold * value):
finaldict[item] = returndict[item]
return finaldict
""""
#This is a test which displays an example use of "similar" function from above.
#Uncomment to run
sortdict = similar(plants, at_least, 1 , op_time_av_plant, operating_time_average(plants))
reverse = {}
for plant in sorted(sortdict):
reverse[sortdict[plant]] = plant
for i in reverse:
print(i, reverse[i])
"""
def similar_days(dict_list, date, radius):
""" takes in a list of pairs of dictionaries of format:
[({average of var1: corresponding-date }, {corresponding-date: average of var1}), ({average of var2: corresponding-date... }...)...]
as well as a date, and a radius. Returns the most similar days to the given day by minimizing
the distance between the values recorded in that day and the average values in
the {radius} amount of days requested. A radius of 5 would return the 10 most
similar days: 5 days with values greater than the day, and 5 days with lower values
"""
return("failure")
def similar_days(dict_list, date, amount):
my_difference ="nope"
print("Success!")
|
[
"jessie.salas@berkeley.edu"
] |
jessie.salas@berkeley.edu
|
e21d699512069e63bf2f6782af55797cde8e2c7e
|
5efab0d0b9dc889f213fae970f5df7220973ae16
|
/demo.py
|
bd0241a8cafe15cc2009ae145e5d410bbcb639d1
|
[] |
no_license
|
milulya/Resonator-Quality-Factor
|
4f946d63aaa0387ecdad0550f1a49f3e1cdae0a3
|
8bbdf04a82ec19f05711cf9dd78aa511925966dd
|
refs/heads/master
| 2023-03-27T00:58:38.624627
| 2021-04-04T09:12:29
| 2021-04-04T09:12:29
| 330,783,597
| 0
| 1
| null | 2021-04-04T09:06:40
| 2021-01-18T20:49:05
|
Python
|
UTF-8
|
Python
| false
| false
| 985
|
py
|
import numpy as np
from os import path
import os
import matplotlib.pyplot as plt
import Resonator
import generate
import logger_module
logger = logger_module.getlogger('Q_Factor')
def single_measurement():
# path to csv file from vna
csv_path = r"C:\Users\physicsuser\Dropbox (Weizmann Institute)\Quantum Circuits Lab\Ofir\Cavity Qubit\cooldowns\cooldown3\Read out\RR_S21_-20DdBm_Delay_corrected_55.4ns_2042021.csv"
# creating measurement object
# "config" argument can take the value 'T' for T connectoe configuration, and 'circulator' for circulator configuration
# "s_mat_element" is for reading the the desried data from the csv file
Readout = Resonator.Measurement(csv_path, config='circulator', s_mat_element='21')
# running the calculationd, use "plot_data=True" to see the algorithm converged properly
Ql_calc, Qc_calc, Qi_calc, fr = Readout.measure(plot_data=True)
if __name__=="__main__":
# multi()
single_measurement()
plt.show()
|
[
"ofirmilul@gmail.com"
] |
ofirmilul@gmail.com
|
4a02676f05851e61e51cbafb4135755a87455929
|
e710fddf82f52f786e7d350ea3e87d46e0cc08d5
|
/HELLOPYTHON/day10/mycrawl06.py
|
8d6eabde3e9603ec4ad7f2dbc91b5ac26345ce8c
|
[] |
no_license
|
larlarru/eclipse_python_study
|
2a4eeb5976e619c4141ac4e468869a9871bea4bc
|
250e0043de29ead0bb0581e6776934a0333d5ccb
|
refs/heads/master
| 2023-04-04T21:08:13.635782
| 2021-04-23T02:22:27
| 2021-04-23T02:22:27
| 358,185,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen("https://vip.mk.co.kr/newSt/rate/item_all.php")
bs = BeautifulSoup(html, "html.parser")
tds = bs.select(".st2");
# tds = bs.select(".st2 > td");
for td in tds :
# print(i.text)
s_code = td.find(["a"]).get("title")
s_name = td.text
s_price = td.parent.select("td")[1].text
print("s_code :",s_code, end=" ")
print("s_name :", s_name, end=" ")
print("s_price :",s_price)
|
[
"kgy9466@gmail.com"
] |
kgy9466@gmail.com
|
32ea31849e6bd4ef0acd560f4be8b565f98587d3
|
f0b5238cf64ca46dafd8aab484278dd40feffa4d
|
/insta/migrations/0008_image_profile.py
|
58a792198142bdfb3043e57e53faa92eb2d84078
|
[
"MIT"
] |
permissive
|
niklauspeter/instagram
|
0e7ef612b4bd1301b8b1c146a281a645d5940f49
|
303e26f88d3cdcc9a7a8a05d41a6fa21bf91737e
|
refs/heads/master
| 2021-09-09T14:44:48.293670
| 2019-05-23T15:56:49
| 2019-05-23T15:56:49
| 187,219,168
| 0
| 0
| null | 2021-09-08T01:00:34
| 2019-05-17T13:14:56
|
Python
|
UTF-8
|
Python
| false
| false
| 548
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-05-23 06:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('insta', '0007_remove_image_profile_photo'),
]
operations = [
migrations.AddField(
model_name='image',
name='profile',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='insta.Profile'),
),
]
|
[
"oriokiklaus@gmail.com"
] |
oriokiklaus@gmail.com
|
678a1f669bff4a1dad00643c8001612102a4876d
|
6669d9e2de3208af0d8b7e495f5e7cdad41b08af
|
/benovate/settings.py
|
8b03e1d6582ca5d89af1c8e525c41e05db7a00cf
|
[] |
no_license
|
glebuzheg/test_benovate
|
8c08fd5a54d45fcdba728436ea507e375e4fc971
|
0f27d145501050610be11320510c45a3995002c5
|
refs/heads/master
| 2020-12-12T03:04:25.524090
| 2020-01-20T12:50:14
| 2020-01-20T12:50:14
| 234,027,660
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,087
|
py
|
"""
Django settings for benovate project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'aix^cii=1wp5ack-4ao5c^=65g3gnu&%mj8wp0$92sadm*x7zi'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'users',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'benovate.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'benovate.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'benovate',
'USER': 'postgres',
'PASSWORD': 'Qwerty123',
'HOST': 'localhost',
'PORT': '5432',
'CLIENT_ENCODING': 'UTF8',
'ATOMIC_REQUESTS': True,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'users.User'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
}
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
}
},
'loggers': {
'django.db.backends': {
'level': 'DEBUG',
'handlers': ['console'],
}
}
}
|
[
"puper.inferno@yandex.ru"
] |
puper.inferno@yandex.ru
|
1ab9ea1bed1a976d6bb697422d25a5c207ee4545
|
46cca8009bdb8196087532d6ffe5ec3c7d71261c
|
/myacg/settings.py
|
a2c1944723c0add8f5f4c5fd402f90d5f22a897a
|
[] |
no_license
|
Lingzy/myacg
|
fb021598ec145b0cae190e00ce2d8021aa9282fb
|
b744dbee1146064096a5d50b6e4457bfc9affd37
|
refs/heads/master
| 2020-03-06T21:14:21.273374
| 2018-03-28T03:13:15
| 2018-03-28T03:13:15
| 126,428,219
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,101
|
py
|
"""
Django settings for myacg project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e9i=t_nqc_!r0$twy!+2j%2#wh+)b^-ik-q8buvmqe6$m94%*9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'postlist',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myacg.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myacg.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"lingzhengying@outlook.com"
] |
lingzhengying@outlook.com
|
9e2f3eb1ba2e6e3dd8940294c0074a16853e3b04
|
664fe19a2de4d202f5b4566078766b9b2a2d59c1
|
/site_Api/Api_Web/admin.py
|
64283c7e38ffc2ffe8603b90f79209a804b654bb
|
[] |
no_license
|
bakhao/GoodProjects
|
06feb59272c0e5efcf767edefead554cdd5298c6
|
0569162bcdd3f93a052d098376bf1003e0f8c1b8
|
refs/heads/master
| 2022-12-28T00:07:41.999102
| 2020-09-25T03:34:36
| 2020-09-25T03:34:36
| 298,458,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 127
|
py
|
from django.contrib import admin
# Register your models here.
from . models import employees
admin.site.register(employees)
|
[
"bakhao95@live.fr"
] |
bakhao95@live.fr
|
287a27055431ae2cfd7160f89e4dd3d0ccbd32f3
|
83d8d2afe72b3e394a886e1c7060410f1747c6ff
|
/todo/views.py
|
fbb4679dcf95333ec3d93164567db03e3cd97b30
|
[] |
no_license
|
oris-96/todo-notes
|
d35ef10a1ddc16df5ad9a949967a3bfaecec1d14
|
9a02ad6423ce5bd41afd218c79032ba4f7e4c0aa
|
refs/heads/master
| 2022-12-08T03:33:09.330974
| 2020-03-14T03:55:04
| 2020-03-14T03:55:04
| 247,195,307
| 0
| 0
| null | 2022-11-22T05:23:47
| 2020-03-14T02:05:40
|
Python
|
UTF-8
|
Python
| false
| false
| 3,897
|
py
|
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.contrib.auth import login, logout, authenticate
from .forms import TodoForm
from .models import Todo
from django.utils import timezone
from django.contrib.auth.decorators import login_required
def home(request):
return render(request, 'todo/home.html')
def signupuser(request):
if request.method == 'GET':
return render(request, 'todo/signupuser.html', {'form':UserCreationForm()})
else:
if request.POST['password1'] == request.POST['password2']:
try:
user = User.objects.create_user(request.POST['username'], password=request.POST['password1'])
user.save()
login(request, user)
return redirect('currenttodos')
except IntegrityError:
return render(request, 'todo/signupuser.html', {'form':UserCreationForm(), 'error':'That username has already been taken. Please choose a new username'})
else:
return render(request, 'todo/signupuser.html', {'form':UserCreationForm(), 'error':'Passwords did not match'})
def loginuser(request):
if request.method == 'GET':
return render(request, 'todo/loginuser.html', {'form':AuthenticationForm()})
else:
user = authenticate(request, username=request.POST['username'], password=request.POST['password'])
if user is None:
return render(request, 'todo/loginuser.html', {'form':AuthenticationForm(), 'error':'Username and password did not match'})
else:
login(request, user)
return redirect('currenttodos')
@login_required
def logoutuser(request):
if request.method == 'POST':
logout(request)
return redirect('home')
@login_required
def createtodo(request):
if request.method == 'GET':
return render(request, 'todo/createtodo.html', {'form':TodoForm()})
else:
try:
form = TodoForm(request.POST)
newtodo = form.save(commit=False)
newtodo.user = request.user
newtodo.save()
return redirect('currenttodos')
except ValueError:
return render(request, 'todo/createtodo.html', {'form':TodoForm(), 'error':'Bad data passed in. Try again.'})
@login_required
def currenttodos(request):
todos = Todo.objects.filter(user=request.user, datecompleted__isnull=True)
return render(request, 'todo/currenttodos.html', {'todos':todos})
@login_required
def completedtodos(request):
todos = Todo.objects.filter(user=request.user, datecompleted__isnull=False).order_by('-datecompleted')
return render(request, 'todo/completedtodos.html', {'todos':todos})
@login_required
def viewtodo(request, todo_pk):
todo = get_object_or_404(Todo, pk=todo_pk, user=request.user)
if request.method == 'GET':
form = TodoForm(instance=todo)
return render(request, 'todo/viewtodo.html', {'todo':todo, 'form':form})
else:
try:
form = TodoForm(request.POST, instance=todo)
form.save()
return redirect('currenttodos')
except ValueError:
return render(request, 'todo/viewtodo.html', {'todo':todo, 'form':form, 'error':'Inconsistent Data'})
@login_required
def completetodo(request, todo_pk):
todo = get_object_or_404(Todo, pk=todo_pk, user=request.user)
if request.method == 'POST':
todo.datecompleted = timezone.now()
todo.save()
return redirect('currenttodos')
@login_required
def deletetodo(request, todo_pk):
todo = get_object_or_404(Todo, pk=todo_pk, user=request.user)
if request.method == 'POST':
todo.delete()
return redirect('currenttodos')
|
[
"iabdulkareem94@gmail.com"
] |
iabdulkareem94@gmail.com
|
322c2cfc3449d6bb6166f5187ba048115fb66187
|
8d51e653d0a90140975477ea6f6e2744ffc04093
|
/student_management_app/admin.py
|
28e7881e5745276b4a9bc2edcdd4132f7d0008db
|
[] |
no_license
|
Bilal898/supercoders-student-ms
|
6186c0ec3aea22298f811f2c3b3bdc89d74cb46b
|
32eb924502ae74891a3dd596ac9727870399eb23
|
refs/heads/master
| 2023-08-18T09:44:10.799471
| 2020-05-25T07:58:23
| 2020-05-25T07:58:23
| 266,454,049
| 0
| 0
| null | 2021-09-22T19:09:36
| 2020-05-24T02:15:04
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 222
|
py
|
from django.contrib import admin
from .models import CustomUser
# Register your models here.
from django.contrib.auth.admin import UserAdmin
class UserModel(UserAdmin):
pass
admin.site.register(CustomUser, UserModel)
|
[
"bfhamid@gmail.com"
] |
bfhamid@gmail.com
|
d3d1ba274df3c9e32d65b77b40f7b3b416ade480
|
ebcc3f199a4dc7763bb4984fc8a910d015b0c5d0
|
/dht_temperature.py
|
9e9f4a9f3703f52d6e30aab16700eaefb40ef65a
|
[
"MIT"
] |
permissive
|
BurntTech/homie4
|
31aba5be338cee46ce2dad6483821cd837aa6704
|
577bdb413778865d3be03e0149e1773b5d312d51
|
refs/heads/master
| 2021-07-13T12:12:48.528194
| 2021-02-03T19:02:41
| 2021-02-03T19:02:41
| 233,911,796
| 1
| 0
|
MIT
| 2020-01-14T18:48:18
| 2020-01-14T18:48:17
| null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
# Raspberry PI
import Adafruit_DHT
import time
from homie.device_temperature import Device_Temperature
mqtt_settings = {
'MQTT_BROKER' : 'OpenHAB',
'MQTT_PORT' : 1883,
}
try:
temperature_device = Device_Temperature(device_id="temperature-sensor-1",name = "Temperature_Sensor 1",mqtt_settings=mqtt_settings)
sensor = Adafruit_DHT.AM2302
pin = 4
while True:
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
print(temperature)
temperature_device.update_temperature(temperature)
time.sleep(5)
except (KeyboardInterrupt, SystemExit):
print("Quitting.")
|
[
"mike@4831.com"
] |
mike@4831.com
|
4013981c65eed2174946c158583b6552973aaab1
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03473/s440381272.py
|
619cef532b71e399863972fc78c98e9f050e46ca
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 92
|
py
|
# -*- coding: utf-8 -*-
#----------
M = int(input().strip())
#----------
print(24+(24-M))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
891c43643af4e4de0fae22eee808d95454e31074
|
e7504a4acdf4e80bd57d3beb1411ed6db45b58c1
|
/slate/__main__.py
|
c7a7f65b1a5993eba873d5b916897632b41f63ca
|
[
"ISC"
] |
permissive
|
jkkummerfeld/slate
|
23fd32129c73c2de2150a35bc5f0bfe52c121d93
|
c49eaa45268fd80436d08ef3f4000b72fbd7faba
|
refs/heads/master
| 2023-05-11T05:17:23.020595
| 2023-05-07T10:34:23
| 2023-05-07T10:34:23
| 59,550,720
| 82
| 15
|
ISC
| 2023-04-19T07:02:32
| 2016-05-24T07:33:39
|
Python
|
UTF-8
|
Python
| false
| false
| 66
|
py
|
from .annotate import main
if __name__ == "__main__":
main()
|
[
"jkk@berkeley.edu"
] |
jkk@berkeley.edu
|
f43590c652911bad8adec58b56b9c1fc8becad8b
|
8f15a70833480bcdf0c54e62d7f919b7d6db953c
|
/cyy_extract_edg.py
|
291aa4b1cb8aa37d68e081036d27fe6a5442f963
|
[] |
no_license
|
cyy-hub/Deecamp
|
3d4f70a802df9210b14ca31f85fba1a1fe2aa6ca
|
1ba0a190f0c6fd9739ba0f1e88cc649cd0d14b1f
|
refs/heads/master
| 2022-12-27T06:23:10.201344
| 2020-08-31T09:12:40
| 2020-08-31T09:12:40
| 287,190,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,653
|
py
|
# cyy 20200726 提取图像的边缘占比
from __future__ import print_function, division
import os
import shutil
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image, ImageSequence
import datetime
import matplotlib.pyplot as plt
import warnings
from cyy_cnn import AlexNet_extra
from cyy_cnn import AdDataset, Rescale, ToTensor, Normalize
import pdb
warnings.filterwarnings("ignore")
plt.ion() # interactive mode
date = datetime.date.today()
data_str = date.strftime("%Y-%m-%d")
def edge_proportion(img):
# 输入为一通道的灰度图片,再做边缘提取处理
# plt.figure()
# plt.subplot(1,2,1)
# plt.imshow(img.astype('uint8'),cmap='gray')
im1 = torch.from_numpy(img.reshape((1, 1, img.shape[0], img.shape[1]))).float()
conv1 = nn.Conv2d(1, 1, 3, bias=False)
sobel_kernel = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]], dtype='float32')
sobel_kernel = sobel_kernel.reshape((1, 1, 3, 3))
conv1.weight.data = torch.from_numpy(sobel_kernel)
edge1 = conv1(im1)
edge1 = edge1.data.squeeze().numpy()
all_pix = edge1.shape[0] * edge1.shape[1]
non_zero_pix = np.count_nonzero(edge1)
# plt.subplot(1,2,2)
# plt.imshow(edge1, cmap='gray')
# plt.savefig("edge-2020-08-28.png")
# pdb.set_trace()
return non_zero_pix / all_pix
if __name__ == "__main__":
train_set = AdDataset(csv_file="./deecamp_ad/train_8000.csv", root_dir="./deecamp_ad/sample_data/", Train=True,
Drop=False, Factor=1)
test_set = AdDataset(csv_file="./deecamp_ad/test_2000_new.csv", root_dir="./deecamp_ad/sample_data/", Train=True,
Drop=False, Factor=1)
# 读取已经提取的特征文件,在该文件中追加特征
train_feature = pd.read_csv("./deecamp_ad/train_feature.csv")
test_feature = pd.read_csv("./deecamp_ad/test_feature.csv")
train_edge_pro = []
for img, _ in train_set:
img = img.mean(2)
train_edge_pro.append(edge_proportion(img))
train_feature["edg_proportion"] = np.array(train_edge_pro)
test_edge_pro = []
for img, _ in test_set:
img = img.mean(2)
test_edge_pro.append(edge_proportion(img))
test_feature["edg_proportion"] = np.array(test_edge_pro)
train_feature.to_csv("./deecamp_ad/train_feature.csv", index=False)
test_feature.to_csv("./deecamp_ad/test_feature.csv", index=False)
|
[
"chenyingying2018@ia.ac.cn"
] |
chenyingying2018@ia.ac.cn
|
e315c8fb008698d36a31713a17567010a9d3dd03
|
251119e70ef280fc7c7dcf1febfc7e01d1932c72
|
/exampleHbb.py
|
0de40d9c20f08e235d7cde8cd4a7d86d2df5e4c2
|
[] |
no_license
|
leonardogiannini/Alphabet
|
be2762578eaa347f900117579282f7d855a6d5cb
|
8bf1e338585b2021d646eb4487459304efc7c190
|
refs/heads/master
| 2021-01-17T00:39:50.818411
| 2015-12-09T01:44:43
| 2015-12-09T01:44:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,639
|
py
|
# TEST AREA
import os
import math
from array import array
import optparse
import ROOT
from ROOT import *
import scipy
# Our functions:
import Alphabet_Header
from Alphabet_Header import *
import Plotting_Header
from Plotting_Header import *
import Converters
from Converters import *
import Distribution_Header
from Distribution_Header import *
import Alphabet
from Alphabet import *
### DEFINE THE DISTRIBUTIONS YOU WANT TO USE:
# DISTRIBUTIONS YOU WANT TO ESTIMATE:
# FORMAT IS:
# dist = ("name", "location of file", "name of tree", "weight (can be more complicated than just a number, see MC example below)")
QCD = DIST("QCD", "/home/osherson/Work/Alphabet/QCD_HT_v6p3.root", "myTree", "(1.)")
# Now we arrange them correctly:
DistsWeWantToEstiamte = [QCD]
HbbTest = Alphabetizer("QCDalphaTest", DistsWeWantToEstiamte, [])
# apply a preselection to the trees:
presel = "(dijetmass>1000&(jet2pmass<135&jet2pmass>105)&jet2tau21<0.5&jet1tau21<0.5&jet2bbtag>-0.84)"
# pick the two variables to do the estiamte it (in this case, Soft Drop Mass (from 70 to 350 in 48 bins) and tau32 (from 0 to 1))
var_array = ["jet1pmass", "jet1bbtag", 30,50,200, 200, -1.1, 1.1]
HbbTest.SetRegions(var_array, presel) # make the 2D plot
C1 = TCanvas("C1", "", 800, 600)
C1.cd()
HbbTest.TwoDPlot.Draw() # Show that plot:
# NOW DO THE ACTUAL ALPHABETIZATION: (Creating the regions)
# The command is: .GetRates(cut, bins, truthbins, center, fit)
cut = [-0.84, ">"]
# so we need to give it bins:
bins = [[50,80],[80,105],[135,160],[160,200]]
# truth bins (we don't want any because we are looking at real data::)
truthbins = []
# a central value for the fit (could be 0 if you wanted to stay in the mass variable, we are looking at tops, so we'll give it 170 GeV)
center = 125.
# and finally, a fit, taken from the file "Converters.py". We are using the linear fit, so:
F = QuadraticFit([0.1,0.1,0.1], -75, 75, "quadfit", "EMRFNEX0")
#F = LinearFit([0.5,-0.5], -75, 75, "linFit1", "EMRNS")
# All the error stuff is handled by the LinearFit class. We shouldn't have to do anything else!
# So we just run:
HbbTest.GetRates(cut, bins, truthbins, center, F)
## Let's plot the results:
C2 = TCanvas("C2", "", 800, 600)
C2.cd()
HbbTest.G.Draw("AP")
HbbTest.Fit.fit.Draw("same")
HbbTest.Fit.ErrUp.Draw("same")
HbbTest.Fit.ErrDn.Draw("same")
# Now we actually run the estiamte!
# cuts:
tag = "(dijetmass>1000&(jet2pmass<135&jet2pmass>105)&(jet1pmass<135&jet1pmass>105)&jet1tau21<0.5&jet2tau21<0.5&(jet1bbtag>-0.84&jet2bbtag>-0.84))"
antitag = "(dijetmass>1000&(jet2pmass<135&jet2pmass>105)&(jet1pmass<135&jet1pmass>105)&jet1tau21<0.5&jet2tau21<0.5&(jet1bbtag<-0.84&jet2bbtag>-0.84))"
# var we want to look at:
var_array2 = ["dijetmass", 20,1000,3000]
FILE = TFile("Hbb_output.root", "RECREATE")
FILE.cd()
HbbTest.MakeEst(var_array2, antitag, tag)
# now we can plot (maybe I should add some auto-plotting functions?)
# the real value is the sum of the histograms in self.hists_MSR
V = TH1F("V", "", 20,1000,3000)
for i in HbbTest.hists_MSR:
V.Add(i,1.)
# the estimate is the sum of the histograms in self.hists_EST and self.hist_MSR_SUB
N = TH1F("N", "", 20,1000,3000)
for i in HbbTest.hists_EST:
N.Add(i,1.)
# We can do the same thing for the Up and Down shapes:
NU = TH1F("NU", "", 20,1000,3000)
for i in HbbTest.hists_EST_UP:
NU.Add(i,1.)
ND = TH1F("ND", "", 20,1000,3000)
for i in HbbTest.hists_EST_DN:
ND.Add(i,1.)
N.SetFillColor(kYellow)
ND.SetLineStyle(2)
NU.SetLineStyle(2)
FindAndSetMax([V,N, NU, ND])
C3 = TCanvas("C3", "", 800, 600)
C3.cd()
N.Draw("Hist")
V.Draw("same E0")
NU.Draw("same")
ND.Draw("same")
FILE.Write()
FILE.Save()
|
[
"oshersonmarc@gmail.com"
] |
oshersonmarc@gmail.com
|
e9bc5465c930dd2d172634b613cc53ccd8def43b
|
087fd153ee32351e6a3aa06da1c026b3eda75105
|
/Pruning+Splitting/eventmakerTryServer.py
|
7bc46635f8fcac9ab5edeb9689f5f061a34b057f
|
[] |
no_license
|
lara-martin/ASTER
|
c05e36591f12c0fa79b0d5b31e70b0f2d8d0457d
|
68fc5e3ef81e914b71fb3320a4adbaeb9183f6c9
|
refs/heads/master
| 2022-03-11T11:57:17.855036
| 2022-03-03T21:59:56
| 2022-03-03T21:59:56
| 127,340,869
| 30
| 12
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,489
|
py
|
# This is a class to extract events from sentences, store words in a working memory and ?translate back to human readable
# sentences from partially generalized sentences.
from nltk.tag.stanford import StanfordNERTagger
import nltk.tag.stanford as stanford
from nltk.internals import find_jars_within_path
from nltk import word_tokenize, pos_tag, DependencyGraph, FreqDist
from nltk.tree import Tree
from nltk.internals import find_jar, find_jar_iter, config_java, java, _java_options, find_jars_within_path
#from en import verb
import os, copy
from collections import defaultdict
import nltk.corpus
from nltk.corpus import wordnet as wn
import regex as re
from pprint import pprint
import tempfile
from nltk import compat
from subprocess import PIPE
import subprocess
import json
from numpy.random import choice
from nltk.wsd import lesk
# necessary parameters
stanford_dir = "/mnt/sdb1/Pipeline/tools/stanford/stanford-corenlp-full-2016-10-31"
models = "/mnt/sdb1/Pipeline/tools/stanford/stanford-english-corenlp-2016-10-31-models"
ner_loc = "/mnt/sdb1/Pipeline/tools/stanford/stanford-ner-2016-10-31"
verbnet = nltk.corpus.VerbnetCorpusReader('/mnt/sdb1/Pipeline/tools/verbnet', ['absorb-39.8.xml','continue-55.3.xml','hurt-40.8.3.xml','remove-10.1.xml','accept-77.xml','contribute-13.2.xml','illustrate-25.3.xml','render-29.90.xml','accompany-51.7.xml','convert-26.6.2.xml','image_impression-25.1.xml','require-103.xml','acquiesce-95.xml','cooking-45.3.xml','indicate-78.xml','resign-10.11.xml','addict-96.xml','cooperate-73.xml','inquire-37.1.2.xml','risk-94.xml','adjust-26.9.xml','cope-83.xml','instr_communication-37.4.xml','roll-51.3.1.xml','admire-31.2.xml','correlate-86.1.xml','interrogate-37.1.3.xml','rummage-35.5.xml','admit-65.xml','correspond-36.1.xml','investigate-35.4.xml','run-51.3.2.xml','adopt-93.xml','cost-54.2.xml','involve-107.xml','rush-53.2.xml','advise-37.9.xml','crane-40.3.2.xml','judgment-33.xml','say-37.7.xml','allow-64.xml','create-26.4.xml','keep-15.2.xml','scribble-25.2.xml','amalgamate-22.2.xml','curtsey-40.3.3.xml','knead-26.5.xml','search-35.2.xml','amuse-31.1.xml','cut-21.1.xml','learn-14.xml','see-30.1.xml','animal_sounds-38.xml','debone-10.8.xml','leave-51.2.xml','seem-109.xml','appeal-31.4.xml','declare-29.4.xml','lecture-37.11.xml','send-11.1.xml','appear-48.1.1.xml','dedicate-79.xml','light_emission-43.1.xml','separate-23.1.xml','appoint-29.1.xml','deduce-97.2.xml','limit-76.xml','settle-89.xml','assessment-34.1.xml','defend-72.2.xml','linger-53.1.xml','shake-22.3.xml','assuming_position-50.xml','destroy-44.xml','sight-30.2.xml','avoid-52.xml','devour-39.4.xml','lodge-46.xml','simple_dressing-41.3.1.xml','banish-10.2.xml','differ-23.4.xml','long-32.2.xml','slide-11.2.xml','base-97.1.xml','dine-39.5.xml','manner_speaking-37.3.xml','smell_emission-43.3.xml','battle-36.4.xml','disappearance-48.2.xml','marry-36.2.xml','snooze-40.4.xml','become-109.1.xml','disassemble-23.3.xml','marvel-31.3.xml','beg-58.2.xml','discover-84.xml','masquerade-29.6.xml','sound_emission-43.2.xml','begin-55.1.xml','dress-41.1.1.xml','matter-91.xml','sound_existence-47.4.xml','being_dressed-41.3.3.xml','dressing_well-41.3.2.xml','meander-47.7.xml','spank-18.3.xml','bend-45.2.xml','drive-11.5.xml','meet-36.3.xml','spatial_configuration-47.6.xml','benefit-72.1.xml','dub-29.3.xml','mine-10.9.xml','spend_time-104.xml','berry-13.7.xml','eat-39.1.xml','mix-22.1.xml','split-23.2.xml','bill-54.5.xml','empathize-88.2.xml','modes_of_being_with_motion-47.3.xml','spray-9.7.xml','body_internal_motion-49.xml','enforce-63.xml','multiply-108.xml','stalk-35.3.xml','body_internal_states-40.6.xml','engender-27.xml','murder-42.1.xml','steal-10.5.xml','braid-41.2.2.xml','ensure-99.xml','neglect-75.xml','stimulus_subject-30.4.xml','break-45.1.xml','entity_specific_cos-45.5.xml','nonvehicle-51.4.2.xml','stop-55.4.xml','breathe-40.1.2.xml','entity_specific_modes_being-47.2.xml','nonverbal_expression-40.2.xml','subjugate-42.3.xml','bring-11.3.xml','equip-13.4.2.xml','obtain-13.5.2.xml','substance_emission-43.4.xml','build-26.1.xml','escape-51.1.xml','occurrence-48.3.xml','succeed-74.xml','bulge-47.5.3.xml','establish-55.5.xml','order-60.xml','suffocate-40.7.xml','bump-18.4.xml','estimate-34.2.xml','orphan-29.7.xml','suspect-81.xml','butter-9.9.xml','exceed-90.xml','other_cos-45.4.xml','sustain-55.6.xml','calibratable_cos-45.6.xml','exchange-13.6.xml','overstate-37.12.xml','swarm-47.5.1.xml','calve-28.xml','exhale-40.1.3.xml','own-100.xml','swat-18.2.xml','captain-29.8.xml','exist-47.1.xml','pain-40.8.1.xml','talk-37.5.xml','care-88.1.xml','feeding-39.7.xml','patent-101.xml','tape-22.4.xml','carry-11.4.xml','ferret-35.6.xml','pay-68.xml','tell-37.2.xml','carve-21.2.xml','fill-9.8.xml','peer-30.3.xml','throw-17.1.xml','change_bodily_state-40.8.4.xml','fire-10.10.xml','pelt-17.2.xml','tingle-40.8.2.xml','characterize-29.2.xml','fit-54.3.xml','performance-26.7.xml','touch-20.xml','chase-51.6.xml','flinch-40.5.xml','pit-10.7.xml','transcribe-25.4.xml','cheat-10.6.xml','floss-41.2.1.xml','pocket-9.10.xml','transfer_mesg-37.1.1.xml','chew-39.2.xml','focus-87.1.xml','poison-42.2.xml','try-61.xml','chit_chat-37.6.xml','forbid-67.xml','poke-19.xml','turn-26.6.1.xml','classify-29.10.xml','force-59.xml','pour-9.5.xml','urge-58.1.xml','clear-10.3.xml','free-80.xml','preparing-26.3.xml','use-105.xml','cling-22.5.xml','fulfilling-13.4.1.xml','price-54.4.xml','vehicle-51.4.1.xml','coil-9.6.xml','funnel-9.3.xml','promise-37.13.xml','vehicle_path-51.4.3.xml','coloring-24.xml','future_having-13.3.xml','promote-102.xml','complain-37.8.xml','get-13.5.1.xml','pronounce-29.3.1.xml','complete-55.2.xml','give-13.1.xml','push-12.xml','void-106.xml','comprehend-87.2.xml','gobble-39.3.xml','put-9.1.xml','waltz-51.5.xml','comprise-107.1.xml','gorge-39.6.xml','put_direction-9.4.xml','want-32.1.xml','concealment-16.xml','groom-41.1.2.xml','put_spatial-9.2.xml','weather-57.xml','confess-37.10.xml','grow-26.2.xml','reach-51.8.xml','weekend-56.xml','confine-92.xml','help-72.xml','reflexive_appearance-48.1.2.xml','wink-40.3.1.xml','confront-98.xml','herd-47.5.2.xml','refrain-69.xml','wipe_instr-10.4.2.xml','conjecture-29.5.xml','hiccup-40.1.1.xml','register-54.1.xml','wipe_manner-10.4.1.xml','consider-29.9.xml','hire-13.5.3.xml','rehearse-26.8.xml','wish-62.xml','conspire-71.xml','hit-18.1.xml','relate-86.2.xml','withdraw-82.xml','consume-66.xml','hold-15.1.xml','rely-70.xml','contiguous_location-47.8.xml','hunt-35.1.xml','remedy-45.7.xml'])
class eventMaker:
def __init__(self,sentence):
self.sentence = sentence
self.nouns = defaultdict(list)
self.verbs = defaultdict(list)
self.events = []
def lookupNoun(self,word, pos, original_sent):
#print(word, pos)
# This is a function that supports generalize_noun function
if len(wn.synsets(word)) > 0:
#word1 = lesk(original_sent.split(), word, pos='n') #word1 is the first synonym of word
#print(word1)
return str(lesk(original_sent.split(), word, pos='n'))
else:
return word.lower()
def lookupAdj(self,word, pos, original_sent):
#print(word, pos)
# This is a function that supports generalize_noun function
if len(wn.synsets(word)) > 0:
#word1 = lesk(original_sent.split(), word, pos='n') #word1 is the first synonym of word
#print(word1)
return str(lesk(original_sent.split(), word, pos='a'))
else:
return word.lower()
'''
hyper = lambda s: s.hypernyms()
TREE = word1.tree(hyper, depth=6)
temp_tree = TREE
for i in range(2):
try:
temp_tree = temp_tree[1]
except:
break
result = temp_tree[0]
return str(result)
'''
def generalize_noun(self, word, tokens, named_entities, original_sent):
# This function is to support getEvent functions. Tokens have specific format(lemma, pos, ner)
lemma = tokens[word][0]
pos = tokens[word][1]
ner = tokens[word][2]
resultString = ""
if ner != "O": # output of Stanford NER: default values is "O"
if ner == "PERSON":
if word not in named_entities: # named_entities is a list to store the names of people
named_entities.append(word)
resultString = "<NE>"+str(named_entities.index(word))
else:
resultString = ner
else:
word = lemma
if "NN" in pos: # and adjective? #changed from only "NN"
resultString = self.lookupNoun(word, pos, original_sent) # get the word's ancestor
elif "JJ" in pos:
resultString = self.lookupAdj(word, pos, original_sent)
elif "PRP" in pos:
if word == "he" or word == "him":
resultString = "Synset('male.n.02')"
elif word == "she" or word == "her":
resultString = "Synset('female.n.02')"
elif word == "I" or word == "me" or word == "we" or word == "us":
resultString = "Synset('person.n.01')"
elif word == "they" or word == "them":
resultString = "Synset('physical_entity.n.01')"
else:
resultString = "Synset('entity.n.01')"
else:
resultString = word
return resultString, named_entities
def generalize_verb(self,word,tokens):
# This function is to support getEvent functions. Tokens have specific format:tokens[word] = [lemma, POS, NER]
word = tokens[word][0]
if word == "have": return "own-100"
classids = verbnet.classids(word)
if len(classids) > 0:
#return choice based on weight of number of members
mems = []
for classid in classids:
vnclass = verbnet.vnclass(classid)
num = len(list(vnclass.findall('MEMBERS/MEMBER')))
mems.append(num)
mem_count = mems
mems = [x/float(sum(mem_count)) for x in mems]
return str(choice(classids, 1, p=mems)[0])
else:
return word
def callStanford(self,sentence):
# This function can call Stanford CoreNLP tool and support getEvent function.
encoding = "utf8"
'''cmd = ["java", "-cp", stanford_dir+"/*","-Xmx20g",
"edu.stanford.nlp.pipeline.StanfordCoreNLP",
"-annotators", "tokenize,ssplit,pos,lemma,depparse,ner",
"-nthreads","10",
'-outputFormat', 'json',
"-parse.flags", "",
'-encoding', encoding,
'-model', models+'/edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz'
]'''
cmd = ["java", "-cp", stanford_dir+"/*","-Xmx20g", "edu.stanford.nlp.pipeline.StanfordCoreNLPClient",
"-annotators", "tokenize,ssplit,parse,ner,pos,lemma,depparse", #ner,tokenize,ssplit,pos,lemma,parse,depparse #tokenize,ssplit,pos,lemma,depparse,ner
'-outputFormat','json',
"-parse.flags", "",
'-encoding', encoding,
'-model', models+'/edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz',"-backends","localhost:9000,12"]
input_ = ""
default_options = ' '.join(_java_options)
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as input_file:
# Write the actual sentences to the temporary input file
if isinstance(sentence, compat.text_type) and encoding:
input_ = sentence.encode(encoding)
input_file.write(input_)
input_file.flush()
input_file.seek(0)
#print(input_file)
devnull = open(os.devnull, 'w')
out = subprocess.check_output(cmd, stdin=input_file, stderr=devnull)
out = out.replace(b'\xc2\xa0',b' ')
out = out.replace(b'\xa0',b' ')
out = out.replace(b'NLP>',b'')
#print(out)
out = out.decode(encoding)
#print(out)
os.unlink(input_file.name)
# Return java configurations to their default values.
config_java(options=default_options, verbose=False)
return out
def callStanfordNER(self,sentence):
# This function can call Stanford Name Entity Recognizer and support getEvent function.
encoding = "utf8"
default_options = ' '.join(_java_options)
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as input_file:
# Write the actual sentences to the temporary input file
if isinstance(sentence, compat.text_type) and encoding:
input_ = sentence.encode(encoding)
input_file.write(input_)
input_file.flush()
input_file.seek(0)
cmd = ["java", "-cp", ner_loc+"/stanford-ner.jar:"+ner_loc+"/lib/*","-Xmx20g",
"edu.stanford.nlp.ie.crf.CRFClassifier",
"-loadClassifier",ner_loc+"/classifiers/english.all.3class.distsim.crf.ser.gz",
'-encoding', encoding,
'-textFile', input_file.name,
"-ner.useSUTime", "false"
]
devnull = open(os.devnull, 'w')
out = subprocess.check_output(cmd, stderr=devnull)
#print(out)
out = out.replace(b'\xc2\xa0',b' ')
out = out.replace(b'\xa0',b' ')
out = out.decode(encoding)
#print(out)
os.unlink(input_file.name)
# Return java configurations to their default values.
config_java(options=default_options, verbose=False)
return out
def getEvent(self):
# This is a function that can extract the event format from the sentence given.
words = self.sentence.split()
original_sent = self.sentence.strip()
print(original_sent)
#ner = self.callStanfordNER(original_sent).split() # get the name entities in the sentence
#ner_dict = {} # Example: "Sally":"PERSON"
#for pair in ner:
# word, label = pair.rsplit("/", 1)
# ner_dict[word] = label
#print(ner_dict)
json_data = self.callStanford(self.sentence)
#print(type(json_data))
d = json.loads(json_data)
all_json = d["sentences"]
ner_dict2 = {}
for i, sentence in enumerate(all_json):
for token in sentence["tokens"]:
ner_dict2[token["word"]] = token["ner"]
#print(ner_dict2)
#print(all_json)
#print(len(all_json))
for sent_num, sentence in enumerate(all_json): # for each sentence in the entire input
tokens = defaultdict(list)
#print(sentence)
for token in sentence["tokens"]:
tokens[token["word"]] = [token["lemma"], token["pos"], ner_dict2[token["word"]],token["index"]] # each word in the dictionary has a list of [lemma, POS, NER]
deps = sentence["enhancedPlusPlusDependencies"] # retrieve the dependencies
named_entities = []
verbs = []
subjects = []
modifiers = []
objects = []
pos = {}
pos["EmptyParameter"] = "None"
chainMods = {} # chaining of mods
index = defaultdict(list) #for identifying part-of-speech
index["EmptyParameter"] = -1
# create events
for d in deps:
#subject
if 'nsubj' in d["dep"] and "RB" not in tokens[d["dependentGloss"]][1]: #adjective? #"csubj" identifies a lot of things wrong
#print(tokens[d["dependentGloss"]][1])
if d["governorGloss"] not in verbs:
#create new event
if not "VB" in tokens[d["governorGloss"]][1]: continue
verbs.append(d["governorGloss"])
index[d["governorGloss"]] = d["governor"] #adding index
subjects.append(d["dependentGloss"])
index[d["dependentGloss"]] = d["dependent"] #adding index to subject
pos[d["governorGloss"]] = tokens[d["governorGloss"]][1]
pos[d["dependentGloss"]] = tokens[d["dependentGloss"]][1]
modifiers.append('EmptyParameter')
objects.append('EmptyParameter')
elif d["governorGloss"] in verbs:
if subjects[verbs.index(d["governorGloss"])] == "EmptyParameter": # if verb alrady exist
subjects[verbs.index(d["governorGloss"])] = d["dependentGloss"]
index[d["dependentGloss"]] = d["dependent"]
else:
subjects.append(d["dependentGloss"])
index[d["dependentGloss"]] = d["dependent"]
verbs.append(d["governorGloss"])
index[d["governorGloss"]] = d["governor"]
modifiers.append('EmptyParameter')
objects.append('EmptyParameter')
pos[d["governorGloss"]] = tokens[d["governorGloss"]][1]
pos[d["dependentGloss"]] = tokens[d["dependentGloss"]][1]
elif d["dependentGloss"] in subjects: # one subject multiple verbs
verbs[subjects.index(d["dependentGloss"])] = d["governorGloss"]
index[d["governorGloss"]] = d["governor"]
pos[d["governorGloss"]] = tokens[d["governorGloss"]][1]
pos[d["dependentGloss"]] = tokens[d["dependentGloss"]][1]
else: #check to see if we have a subject filled ??
if len(subjects) >1:
if subjects[-1] == "EmptyParameter":
subjects[-1] = subjects[-2]
#conjunction of verbs
if 'conj' in d["dep"] and 'VB' in tokens[d["dependentGloss"]][1]:
if d["dependentGloss"] not in verbs:
verbs.append(d["dependentGloss"])
pos[d["dependentGloss"]] = tokens[d["dependentGloss"]][1]
index[d["dependentGloss"]] = d["dependent"]
subjects.append('EmptyParameter')
modifiers.append('EmptyParameter')
objects.append('EmptyParameter')
#conjunction of subjects
elif 'conj' in d["dep"] and d["governorGloss"] in subjects: # governor and dependent are both subj. e.g. Amy and Sheldon
loc = subjects.index(d["governorGloss"])
verb = verbs[loc] #verb already exist. question: should the verb have the same Part-of-Speech tag?
subjects.append(d["dependentGloss"])
index[d["dependentGloss"]] = d["dependent"]
verbs.append(verb)
modifiers.append('EmptyParameter')
objects.append('EmptyParameter')
pos[d["dependentGloss"]] = tokens[d["dependentGloss"]][1]
elif 'conj' in d["dep"] and d["governorGloss"] in objects:
loc = objects.index(d["governorGloss"])
match_verb = verbs[loc] #??? is it a correct way to retrieve the verb?
#print(match_verb)
temp_verbs = copy.deepcopy(verbs)
for i, verb in enumerate(temp_verbs):
if match_verb == verb: # what if the verb appears more than one times?
subjects.append(subjects[i])
verbs.append(verb)
modifiers.append('EmptyParameter')
objects.append(d["dependentGloss"])
index[d["dependentGloss"]] = d["dependent"]
pos[d["dependentGloss"]] = tokens[d["dependentGloss"]][1]
# case 1: obj
elif 'dobj' in d["dep"] or 'xcomp' == d["dep"]: #?? 'xcomp' is a little bit tricky
if d["governorGloss"] in verbs:
#modify that object
pos[d["dependentGloss"]] = tokens[d["dependentGloss"]][1]
for i, verb in reversed(list(enumerate(verbs))):
if verb == d["governorGloss"] and objects[i] == "EmptyParameter":
objects[i] = d["dependentGloss"]
index[d["dependentGloss"]] = d["dependent"]
# case 2: nmod
elif ('nmod' in d["dep"] or 'ccomp' in d["dep"] or 'iobj' in d["dep"] or 'dep' in d["dep"]) and 'NN' in tokens[d["dependentGloss"]][1]:
if d["governorGloss"] in verbs: # how about PRP?
#modify that modifier
for i, verb in reversed(list(enumerate(verbs))):
if verb == d["governorGloss"] and modifiers[i] == "EmptyParameter":
modifiers[i] = d["dependentGloss"]
index[d["dependentGloss"]] = d["dependent"]
pos[d["dependentGloss"]] = tokens[d["dependentGloss"]][1]
elif d["governorGloss"] in chainMods: # is not used actually
v = chainMods[d["governorGloss"]]
if v in verbs:
modifiers[verbs.index(v)] = d["dependentGloss"]
index[d["dependentGloss"]] = d["dependent"]
pos[d["dependentGloss"]] = tokens[d["dependentGloss"]][1]
# PRP
elif ('nmod' in d["dep"] or 'ccomp' in d["dep"] or 'iobj' in d["dep"] or 'dep' in d["dep"]) and 'PRP' in tokens[d["dependentGloss"]][1]:
if d["governorGloss"] in verbs: # how about PRP?
#modify that modifier
for i, verb in reversed(list(enumerate(verbs))):
if verb == d["governorGloss"] and modifiers[i] == "EmptyParameter":
modifiers[i] = d["dependentGloss"]
index[d["dependentGloss"]] = d["dependent"]
pos[d["dependentGloss"]] = tokens[d["dependentGloss"]][1]
elif d["governorGloss"] in chainMods: # is not used actually
v = chainMods[d["governorGloss"]]
if v in verbs:
modifiers[verbs.index(v)] = d["dependentGloss"]
index[d["dependentGloss"]] = d["dependent"]
pos[d["dependentGloss"]] = tokens[d["dependentGloss"]][1]
# generalize the words and store them in instance variables
for (a,b,c,d) in zip(subjects, verbs, objects, modifiers):
pos1 = "None"
pos2 = "None"
pos3 = "None"
pos4 = "None"
poslabel = "None"
#print((a,b,c,d))
num = 0
if a != 'EmptyParameter':
if index[a] == tokens[a][-1]: #adding part-of-speech
pos1 = tokens[a][1]
a1, named_entities = self.generalize_noun(a, tokens, named_entities, original_sent)
if "<NE>" in a1:
self.nouns["<NE>"].append(tokens[a][0])
elif a1 == a:
a1 = self.generalize_verb(a, tokens) #changed line
self.verbs[a1].append(tokens[a][0])
else:
self.nouns[a1].append(tokens[a][0])
else:
a1 = a
if b != 'EmptyParameter':
if index[b] == tokens[b][-1]: #may have issue in looping
pos2 = tokens[b][1]
b1 = self.generalize_verb(b, tokens) #changed line
self.verbs[b1].append(tokens[b][0])
else:
b1 = b
if c != 'EmptyParameter':
if index[c] == tokens[c][-1]:
pos3 = tokens[c][1]
c1, named_entities = self.generalize_noun(c, tokens, named_entities, original_sent)
if "<NE>" in c1:
self.nouns["<NE>"].append(tokens[c][0])
elif c1 == c:
c1 = self.generalize_verb(c, tokens) #changed line
self.verbs[c1].append(tokens[c][0])
else:
self.nouns[c1].append(tokens[c][0])
else:
c1 = c
if d == 'EmptyParameter': #adding new lines start.
label = 'EmptyParameter'
else:
label = 'None' #change from Exist
for dep in deps:
if b == dep["governorGloss"] and d == dep["dependentGloss"] and "nmod" in dep["dep"]:
if ":" in dep["dep"]:
label = dep["dep"].split(":")[1] # shoud this line be added??
num = dep['dependent']
#print(dep)
#print("number:")
#print(num)
#print(type(num))
for dep in deps: # how about obl dependency?
if b == dep["governorGloss"] and d == dep["dependentGloss"] and "obl" in dep["dep"]:
if ":" in dep["dep"]:
label = dep["dep"].split(":")[1]
num = dep['dependent']
#print(dep)
for dep in deps:
if "case" in dep["dep"] and d == dep["governorGloss"] and num == dep['governor']: # #what if modifier is related to multiple labels?
label = dep["dependentGloss"] #adding new lines end
index[label] = dep["dependent"]
#print("label:")
#print(num)
#print(dep['governor'])
#print(type(dep['governor']))
if d != 'EmptyParameter':
if index[d] == tokens[d][-1]:
pos4 = tokens[d][1]
d1, named_entities = self.generalize_noun(d, tokens, named_entities, original_sent)
if "<NE>" in d1:
self.nouns["<NE>"].append(tokens[d][0])
else:
self.nouns[d1].append(tokens[d][0])
else:
d1 = d
if label != "EmtpyParameter" and label != "None":
if len(tokens[label]) == 4:
#print(tokens[label])
#print(index[label])
if tokens[label][-1] == index[label]:
poslabel = tokens[label][1]
#print("end")
#print(pos)
#print(index)
#poslabel = tokens[label][1]
#self.events.append([a1,b1,c1,label,d1])
self.events.append([a1,b1,c1,d1])
#print(named_entities)
#print([a1,pos[a],pos1,b1,pos[b],pos2,c1,pos[c],pos3,label,poslabel,d1,pos[d],pos4])
#self.events.append([a1,pos1,b1,pos2,c1,pos3,label,poslabel,d1,pos4])
#self.events.append([a1,pos[a],pos1,b1,pos[b],pos2,c1,pos[c],pos3,label,poslabel,d1,pos[d],pos4])
#line = "People's properties are protected by law."
line = "There is an unguarded exit to the east."
maker = eventMaker(line)
maker.getEvent()
print(maker.events)
output = []
quit()
for event in maker.events:
sentence = " ".join(event)
f.write(sentence+" @@@@ "+line)
quit()
#print(sentence+"@@@@"+line)
|
[
"noreply@github.com"
] |
lara-martin.noreply@github.com
|
e69ce4d7b1736031c237b11fe94998dcab8f9803
|
c1d97105b3fd98b4d8d54d29613b933d666a8621
|
/index.py
|
bfcc1827fff3994db7ff651b8bb00a6391d2cbf6
|
[] |
no_license
|
Seh83/DOX-BOX
|
755641e06ffc2a2fc37b376be02c3ec7d755d188
|
ee3774556c7f2d691efb40f35ef8938666dd1e99
|
refs/heads/master
| 2023-01-30T22:20:16.212253
| 2020-12-16T21:22:37
| 2020-12-16T21:22:37
| 322,094,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return "Welcome to Free AI Page"
def route_photo():
return "This is A Photo App."
app.add_url_rule('/photo', 'route_photo', route_photo)
if __name__ == "__main__":
app.run(debug=True)
|
[
"sviras@intellicentrics.com"
] |
sviras@intellicentrics.com
|
73d12c61155fbb679cf6f632c756bc0889002274
|
c2f92d75d235ff5ed7b213c02c4a0657545ba02f
|
/newchama_web/2/newchama/tools/test_mq1.py
|
dd2c58bbbefb696a43f1e8523ee83b7da1bbcca3
|
[] |
no_license
|
cash2one/tstpthon
|
fab6112691eb15a8a26bd168af3f179913e0c4e0
|
fc5c42c024065c7b42bea2b9de1e3874a794a30d
|
refs/heads/master
| 2021-01-20T01:52:06.519021
| 2017-04-14T09:50:55
| 2017-04-14T09:50:55
| 89,338,193
| 0
| 1
| null | 2017-04-25T08:46:06
| 2017-04-25T08:46:06
| null |
UTF-8
|
Python
| false
| false
| 847
|
py
|
#encoding:utf-8
import os,sys
sys.path.append(os.path.abspath('../'))
sys.path.append(os.path.abspath('/var/www/newchama'))
import newchama.settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "newchama.settings")
import pika
import pickle
from django.template import loader, Context
connection = pika.BlockingConnection(pika.ConnectionParameters(
'localhost'))
channel = connection.channel()
channel.queue_declare(queue='email')
email="richard@newchama.com"
mail_dic = dict()
mail_dic['email'] = email
mail_dic['name'] = 'richard'
html_content = loader.render_to_string('tools/update_mail.html', mail_dic)
c={}
c['title']=u'NewChama用户通知'
c['email']=email
c['content']=html_content
channel.basic_publish(exchange='', routing_key='email', body=pickle.dumps(c))
print " [x] Sent 'Hello World!'"
connection.close()
|
[
"yxlz_0910@163.com"
] |
yxlz_0910@163.com
|
9d797b6bf30c71bc954c45080f6140a0820a27a0
|
ebb381aa9f914891dd50e038dcd50750db2e9fc0
|
/option.py
|
f10814aeb66162c43690e541ff57112e1b6260c3
|
[] |
no_license
|
NeonRice/Food-Ration-GraphDB
|
b97d4ca1dcfeeaa6b78e7b510671a70033d12e6f
|
f34855a1ed916a116dc70db9b0204e3d193ad591
|
refs/heads/main
| 2023-01-30T17:28:57.296968
| 2020-12-06T19:29:01
| 2020-12-06T19:29:01
| 319,113,921
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,928
|
py
|
class Option():
def __init__(self, optionName, optionFunction):
self.name = optionName
self.function = optionFunction
def drawOptions(listOfOptions):
optionNr = 1
for option in listOfOptions:
print(optionNr, option.name)
optionNr += 1
def clearOutput():
print("\033[H\033[J")
def handleInput(options):
choice = input()
if choice.isdigit() and int(choice) <= len(options):
choice = int(choice)
return options[choice - 1].function()
def enterToContinue(instruction="\nPress enter to continue.."):
input(instruction)
def initOptions(query):
def find_ingredient_by_name():
clearOutput()
query.find_ingredient(input("Enter ingredient to look for -> ").title())
enterToContinue()
def find_ingredients_in_meal():
clearOutput()
query.find_ingredients_by_meal(input("Enter meal -> ").title())
enterToContinue()
def find_allergenic_meals():
clearOutput()
query.find_allergen_food(input("Enter person name -> ").title())
enterToContinue()
def find_calculate_meal_price():
clearOutput()
query.find_meal_price(input("Enter meal name -> ").title())
enterToContinue()
def find_shortest_path_to_meal():
clearOutput()
query.find_shortest_path_to_meal(
input("Enter person name -> ").title(),
input("Enter meal name -> ").title())
enterToContinue()
options = (
Option("Find ingredient by name", find_ingredient_by_name),
Option("Find ingredients in a meal", find_ingredients_in_meal),
Option("Find a person's allergenic meals", find_allergenic_meals),
Option("Find and calculate a meals price", find_calculate_meal_price),
Option("Find the shortest path to a meal source for person", find_shortest_path_to_meal)
)
return options
|
[
"zanas.kovaliovas@inbox.lt"
] |
zanas.kovaliovas@inbox.lt
|
d275dea089f82589d64b6e34389c57e1f4171fe1
|
9863e755bffd5234078624b050b98fcbf7770ae4
|
/208dowels_2018/208dowels
|
87819a5c1c4490e324f8dbc688e0ee0a39ae6892
|
[] |
no_license
|
Thibautguerin/EpitechMaths
|
271b49b44d5d65c541d471353548aca312376576
|
962cfa11afcaf3f47849e416af4324759dfc49ac
|
refs/heads/master
| 2021-01-30T21:53:01.436849
| 2020-02-27T11:56:26
| 2020-02-27T11:56:26
| 243,507,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,294
|
#!/usr/bin/python3
import re
import sys
from math import *
table = [[0, [99, 90, 80, 70, 60, 50, 40, 30, 20, 10, 5, 3, 1]],
[1, [0.00, 0.02, 0.06, 0.15, 0.27, 0.45, 0.71, 1.07, 1.64, 2.71, 3.84, 5.41, 6.63]],
[2, [0.02, 0.21, 0.45, 0.71, 1.02, 1.39, 1.83, 2.41, 3.22, 4.61, 5.99, 7.82, 9.21]],
[3, [0.11, 0.58, 1.01, 1.42, 1.87, 2.37, 2.95, 3.66, 4.64, 6.25, 7.81, 9.84, 11.34]],
[4, [0.30, 1.06, 1.65, 2.19, 2.75, 3.36, 4.04, 4.88, 5.99, 7.78, 9.49, 11.67, 13.28]],
[5, [0.55, 1.61, 2.34, 3.00, 3.66, 4.35, 5.13, 6.06, 7.29, 9.24, 11.07, 13.39, 15.09]],
[6, [0.87, 2.20, 3.07, 3.83, 4.57, 5.35, 6.21, 7.23, 8.56, 10.64, 12.59, 15.03, 16.81]],
[7, [1.24, 2.83, 3.82, 4.67, 5.49, 6.35, 7.28, 8.38, 9.80, 12.02, 14.07, 16.62, 18.48]],
[8, [1.65, 3.49, 4.59, 5.53, 6.42, 7.34, 8.35, 9.52, 11.03, 13.36, 15.51, 18.17, 20.09]],
[9, [2.09, 4.17, 5.38, 6.39, 7.36, 8.34, 9.41, 10.66, 12.24, 14.68, 16.92, 19.68, 21.67]],
[10, [2.56, 4.87, 6.18, 7.27, 8.30, 9.34, 10.47, 11.78, 13.44, 15.99, 18.31, 21.16, 23.21]]]
def coef_binomial(n, k):
return (factorial(n)/(factorial(k) * factorial(n - k)))
def display(results):
i = 0
print(" x ", end="")
while (i != len(results[0])):
if (i == len(results[0]) - 1):
print("| ", '{:<6}'.format(str(results[0][i][0]) + "+"), sep="", end="")
elif (results[0][i][0] != results[0][i][len(results[0][i]) - 1]):
print("| ", '{:<6}'.format(str(results[0][i][0]) + "-" + str(results[0][i][len(results[0][i]) - 1])), sep="", end="")
else:
print("| ", '{:<6}'.format(results[0][i][0]), sep="", end="")
i = i + 1
print("| Total")
print(" Ox ", end="")
i = 0
while (i != len(results[1])):
print("| ", '{:<6}'.format(results[1][i]), sep="", end="")
i = i + 1
print("| 100")
print(" Tx ", end="")
i = 0
j = 0
tmp = 0
res = 0
while (i != len(results[0])):
if (i == len(results[0]) - 1):
print("| ", '{:<6}'.format('{:.1f}'.format(100 - res)), sep="", end="")
else:
while (j != len(results[0][i])):
tmp = tmp + coef_binomial(100, results[0][i][j]) * (results[2])**results[0][i][j] * (1 - results[2])**(100 - results[0][i][j]) * 100
j = j + 1
res = res + tmp
print("| ", '{:<6}'.format('{:.1f}'.format(tmp)), sep="", end="")
tmp = 0
j = 0
i = i + 1
print("| 100")
print("Distribution: B(100, ", '{:.4f}'.format(results[2]), ")", sep="")
print("Chi-squared: 2.029")
print("Degrees of freedom: ", results[3], sep="")
print("Fit validity: 60% < P < 70%")
def program():
results = []
nb = 0
i = 1
xbuff = []
x = []
Ox = []
Tx = []
v = 0
d = 0
while (i != len(sys.argv)):
d = d + (i - 1) * int(sys.argv[i])
i = i + 1
d = d / 10000
i = 1
while (i != len(sys.argv)):
nb = nb + int(sys.argv[i])
xbuff.append(i - 1)
if (nb >= 10):
if (i + 1 < len(sys.argv) and i + 2 < len(sys.argv)):
if (int(sys.argv[i + 1]) < 10 and int(sys.argv[i + 2]) > nb):
nb = nb + int(sys.argv[i + 1])
xbuff.append(i)
i = i + 1
elif (i + 1 == len(sys.argv) - 1):
if (int(sys.argv[i + 1]) < 10):
nb = nb + int(sys.argv[i + 1])
xbuff.append(i)
i = i + 1
Ox.append(nb)
x.append(xbuff)
nb = 0
xbuff = []
i = i + 1
v = len(Ox) - 2
results.append(x)
results.append(Ox)
results.append(d)
results.append(v)
return (results)
def parsing():
string = (" ".join(sys.argv))
regexp = r"^((./208dowels( [1-9][0-9]*){9})|(./208dowels -h))$"
results = []
if (re.match(regexp, string) is None):
exit(84)
elif (sys.argv[1] == "-h"):
print("USAGE")
print(" ./208dowels O0 O1 O2 O3 O4 O5 O6 O7 O8\n")
print("DESCRIPTION")
print(" Oi size of the observed class")
else:
results = program()
display(results)
def main():
parsing()
exit(0)
main()
|
[
"noreply@github.com"
] |
Thibautguerin.noreply@github.com
|
|
80ac7e056209733131087423968ac4d799eb9f70
|
a2c53ee2fcd38c3b7f55a8d02726163a5385d9c1
|
/TrustBeauty/urls.py
|
d252f931ee5100162b6c2055ceb12d00eab869a9
|
[] |
no_license
|
Bilalharoon/Django_website
|
dfd22ec2d7fcfe751be443778f3c381234d4d27b
|
e806b7c0fb63f517b06b0509557145a604e6e503
|
refs/heads/master
| 2020-03-15T15:01:18.919931
| 2018-06-11T04:53:09
| 2018-06-11T04:53:09
| 132,202,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,003
|
py
|
"""TrustBeauty URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from shopping import views
urlpatterns = [
path('admin/', admin.site.urls),
path('catagories/<str:category>', views.index, name='filter'),
path('', views.index, name='index'),
path('product/<int:product_id>/', views.item, name='item'),
path('about/', views.about, name='about')
]
|
[
"Bilal.a.Haroon@gmail.com"
] |
Bilal.a.Haroon@gmail.com
|
24704315c3e1fb17fbc22106ab15b9b1f91a59a6
|
bffac6af0c77d1c0d24e443be51df0d98418ef28
|
/basicbert.py
|
df522565b8633374f16d8eed40b865518b8876eb
|
[] |
no_license
|
neuron-whisperer/basicbert
|
ff0bdd28968d49f15f714f0979871e5b35fbfb13
|
4ac928283abf2ea91110a1da66024a7942347fdd
|
refs/heads/master
| 2022-11-10T18:35:59.409892
| 2022-10-19T12:47:08
| 2022-10-19T12:47:08
| 259,193,688
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,957
|
py
|
# basicbert.py
# Written by David Stein (david@djstein.com).
# See https://www.djstein.com/basicbert/ for more info.
# Source: https://github.com/neuron-whisperer/basicbert
# This code is a wrapper class for the Google BERT transformer model:
# https://github.com/google-research/bert
import collections, csv, ctypes, datetime, logging, modeling, numpy
import os, random, shutil, sys, tensorflow as tf, time, tokenization
from tensorflow.contrib import predictor
from tensorflow.python.util import deprecation
# these settings are positioned here to suppress warnings from run_classifier
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from run_classifier import *
# ====== Main Class ======
class BERT(logging.Filter):
def __init__(self, config = {}):
# read values from config file, or choose defaults
self.script_dir = os.path.dirname(os.path.realpath(__file__))
if os.path.isfile(os.path.join(self.script_dir, 'config.txt')):
with open(os.path.join(self.script_dir, 'config.txt'), 'rt') as file:
for line in (line.strip() for line in file.readlines()):
if len(line) == 0 or line[0] == '#' or line.find('=') == -1:
continue
params = list(p.strip() for p in line.split('='))
if len(params) == 2 and params[0] not in config:
config[params[0]] = params[1]
self.data_dir = os.path.join(self.script_dir, config.get('data_dir', 'input/'))
self.output_dir = os.path.join(self.script_dir, config.get('output_dir', 'output/'))
self.bert_config_file = os.path.join(self.script_dir, config.get('bert_config_file', 'bert_base/bert_config.json'))
self.vocab_file = os.path.join(self.script_dir, config.get('vocab_file', 'bert_base/vocab.txt'))
self.labels_file = config.get('labels_file', os.path.join(self.data_dir, 'labels.txt'))
self.init_checkpoint = os.path.join(self.script_dir, config.get('init_checkpoint', 'bert_base/bert_model.ckpt'))
self.exported_model_dir = config.get('exported_model_dir', '')
self.tf_output_file = config.get('tf_output_file', None)
self.tf_output_file = os.path.join(self.script_dir, self.tf_output_file) if self.tf_output_file else None
self.do_lower_case = True if config.get('train_batch_size', False).lower() == 'true' else False
self.train_batch_size = int(config.get('train_batch_size', 25))
self.num_train_epochs = int(config.get('num_train_epochs', 100))
self.warmup_proportion = float(config.get('warmup_proportion', 0.05))
self.learning_rate = float(config.get('learning_rate', 5e-5))
self.max_seq_length = int(config.get('max_seq_length', 256))
self.save_checkpoint_steps = int(config.get('save_checkpoint_steps', 10000))
# erase TensorFlow log in output
if self.tf_output_file:
with open(self.tf_output_file, 'wt') as log:
log.write(f'{datetime.datetime.now():%Y%m%d %H:%M:%S %p}: Starting BERT\n')
# turn off warnings
logger = logging.getLogger('tensorflow')
logger.setLevel(logging.INFO)
logger.addFilter(self)
# assorted configuration
self.examples = None; self.loaded_model = None
self.epoch = 0; self.num_train_steps = None; self.loss = None
csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
tokenization.validate_case_matches_checkpoint(self.do_lower_case, self.init_checkpoint)
# tf.io.gfile.makedirs(self.output_dir)
self.bert_config = modeling.BertConfig.from_json_file(self.bert_config_file)
self.labels = self._get_labels()
self.tokenizer = tokenization.FullTokenizer(vocab_file=self.vocab_file, do_lower_case=self.do_lower_case)
self.is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
self.run_config = tf.contrib.tpu.RunConfig(cluster=None, master=None, model_dir=self.output_dir, save_checkpoints_steps=True)
def filter(self, record):
if self.tf_output_file:
with open(self.tf_output_file, 'at') as log: # log all output
log.write(f'{datetime.datetime.now():%Y%m%d %H:%M:%S %p}: {record.getMessage()}\n')
if record.msg.find('Saving checkpoints for') > -1:
step = int(record.args[0])
now = datetime.datetime.now()
print(f'\r{now:%Y%m%d %H:%M:%S %p}: Epoch {self.epoch + 1} Step {(step % self.steps_per_epoch) + 1} / {self.steps_per_epoch} ', end='')
elif record.msg.find('Loss for final step') > -1:
self.loss = float(record.args[0])
return False
def _get_labels(self): # get labels from labels file or training file
if self.labels_file and os.path.isfile(self.labels_file):
with open(self.labels_file, 'rt') as file:
return list(line.strip() for line in file.readlines() if len(line.strip()) > 0)
lines = DataProcessor._read_tsv(os.path.join(self.data_dir, 'train.tsv'))
return list(line[1].strip() for line in lines if len(line) >= 4 and len(line[1].strip()) > 0)
def _get_ids(self, filename): # get identifiers from first column of TSV
labels = []
with open(filename, 'rt') as file:
lines = file.readlines()
lines = list(line.strip().split('\t') for line in lines if len(line.strip()) > 0)
return list(line[0].strip() for line in lines if len(line[0].strip()) > 0)
def _create_estimator(self):
num_warmup_steps = 0 if self.num_train_steps is None else int(self.num_train_steps * self.warmup_proportion)
self.model_fn = model_fn_builder(bert_config=self.bert_config, num_labels=len(self.labels),
init_checkpoint=self.init_checkpoint, learning_rate=self.learning_rate,
num_train_steps=self.num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=False,
use_one_hot_embeddings=False)
self.estimator = tf.contrib.tpu.TPUEstimator(use_tpu=False, model_fn=self.model_fn,
config=self.run_config, train_batch_size=self.train_batch_size,
eval_batch_size=1, predict_batch_size=1)
def _create_examples(self, lines, set_type):
self.examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
if set_type == "test":
if i == 0: # discard header row
continue
text_a = tokenization.convert_to_unicode(line[1])
label = self.labels[0]
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
self.examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
def _prepare_input_fn(self, mode):
tsv = DataProcessor._read_tsv(os.path.join(self.data_dir, mode + '.tsv'))
self._create_examples(tsv, mode)
self.steps_per_epoch = int(len(self.examples) / self.train_batch_size)
self.num_train_steps = self.steps_per_epoch * self.num_train_epochs
self._create_estimator()
record = os.path.join(self.output_dir, mode + '.tf_record')
file_based_convert_examples_to_features(self.examples, self.labels, self.max_seq_length,
self.tokenizer, record)
return file_based_input_fn_builder(input_file=record, seq_length=self.max_seq_length,
is_training=(mode == 'train'), drop_remainder=(mode == 'train'))
def _load_model(self):
if self.loaded_model is not None:
return
filename = self.find_exported_model()
if filename == '':
print('Error: No exported model specified or located.'); return
self.loaded_model = predictor.from_saved_model(filename)
# ====== Callable Utility Functions ======
def prepare_data(self, train, dev, input_filename = None, output_path = None):
""" Prepares training data file based on train and dev ratios. """
input_filename = input_filename or os.path.join(self.script_dir, 'data.csv')
output_path = output_path or self.data_dir
records = []; t = '\t'; n = '\n'
with open(input_filename, 'rt') as file:
for line in list(line.strip() for line in file.readlines() if len(line.strip()) > 0):
params = line.split(',')
if len(params) >= 3:
records.append((params[0].strip(), params[1].strip(), ','.join(params[2:]).strip()))
random.shuffle(records)
train_index = int(len(records) * train); dev_index = int(len(records) * (train + dev))
if train_index > 0:
with open(os.path.join(output_path, 'train.tsv'), 'wt') as file:
for record in records[:train_index]:
file.write(f'{record[0]}{t}{record[1]}{t}a{t}{record[2]}{n}')
if dev_index > train_index:
with open(os.path.join(output_path, 'dev.tsv'), 'wt') as file:
for record in records[train_index:dev_index]:
file.write(f'{record[0]}{t}{record[1]}{t}a{t}{record[2]}{n}')
if dev_index < len(records):
with open(os.path.join(output_path, 'test.tsv'), 'wt') as file:
with open(os.path.join(output_path, 'test-labels.tsv'), 'wt') as labels_file:
for record in records[dev_index:]:
file.write(f'{record[0]}{t}{record[2]}{n}') # write identifier and text
labels_file.write(f'{record[0]}{t}{record[1]}{n}') # write identifier and label
self.export_labels(os.path.join(output_path, 'labels.txt'))
def find_exported_model(self):
""" Finds the latest exported model based on timestamps in output folder. """
best_path = self.exported_model_dir
if best_path and os.path.isfile(os.path.join(best_path, 'saved_model.pb')):
return best_path
best_path = ''; best = None
files = os.listdir(self.output_dir)
for dir in files:
path = os.path.join(self.output_dir, dir)
if os.path.isdir(path) and dir.isnumeric():
if not best or int(dir) > best:
if os.path.isfile(os.path.join(path, 'saved_model.pb')):
best = int(dir); best_path = path
return best_path
def export_labels(self, filename = None):
""" Exports the label set to a file. One label per line. """
filename = filename or self.labels_file
with open(filename, 'wt') as f:
for label in self.labels:
f.write(f'{label}\n')
def reset(self, output = False):
""" Resets the training state of the model. """
for file in os.listdir(self.output_dir):
if os.path.isfile(os.path.join(self.output_dir, file)):
os.unlink(os.path.join(self.output_dir, file))
else:
shutil.rmtree(os.path.join(self.output_dir, file), ignore_errors = True)
if output:
print('Reset input.')
# ====== Callable Primary Functions ======
def train(self, num_epochs = None, output = False):
""" Trains the model for a number of epochs."""
fn = self._prepare_input_fn('train')
epochs = num_epochs or self.num_train_epochs
while self.epoch < epochs:
steps = (self.epoch + 1) * self.steps_per_epoch
epoch_start = time.time()
self.estimator.train(input_fn=fn, max_steps=steps)
duration = time.time() - epoch_start
if self.loss is None: # epoch was skipped
if num_epochs: # increment so that we run at least (num_epochs)
epochs += 1
elif output:
print(f'Done. Loss: {self.loss:0.4f}. Duration: {int(duration)} seconds.')
self.epoch +=1
self.export_labels()
return self.loss
def eval(self, output = False):
""" Evaluates the contents of dev.tsv and prints results. """
fn = self._prepare_input_fn('dev')
results = self.estimator.evaluate(input_fn=fn)
output_eval_file = os.path.join(self.output_dir, "eval_results.txt")
if output:
print('Evaluation results:')
for key in sorted(results.keys()):
print(f' {key} = {str(results[key])}')
return results
def test(self, output = False):
""" Tests the contents of test.tsv and prints results. """
ids = self._get_ids(os.path.join(self.data_dir, 'test.tsv'))
# get labels from test input
fn = self._prepare_input_fn('test')
records = self.estimator.predict(input_fn=fn)
output_predict_file = os.path.join(self.output_dir, "test_results.tsv")
results = []
if output:
print("Prediction results:")
for (i, prediction) in enumerate(records):
probabilities = prediction["probabilities"]
probabilities_dict = {}
for j in range(len(probabilities)):
probabilities_dict[self.labels[j]] = probabilities[j]
best_class = int(numpy.argmax(probabilities))
if output:
print(f'Input {i+1} ({ids[i]}): {self.labels[best_class]} ({probabilities[best_class] * 100.0:0.2f}%)')
results.append((ids[i], self.labels[best_class], probabilities[best_class], probabilities_dict))
return results
def export(self, path = None, output = False):
""" Exports the model to output_dir or to the specified path. """
self._create_estimator()
def serving_input_fn():
label_ids = tf.placeholder(tf.int32, [None], name='label_ids')
input_ids = tf.placeholder(tf.int32, [None, self.max_seq_length], name='input_ids')
input_mask = tf.placeholder(tf.int32, [None, self.max_seq_length], name='input_mask')
segment_ids = tf.placeholder(tf.int32, [None, self.max_seq_length], name='segment_ids')
input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({
'label_ids': label_ids,
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids,
})()
return input_fn
self.estimator._export_to_tpu = False
model_dir = path or self.output_dir
self.estimator.export_saved_model(model_dir, serving_input_fn)
return self.find_exported_model()
def predict(self, input, output = False):
""" Predicts the classification of an input string. """
self._load_model()
text_a = tokenization.convert_to_unicode(input)
example = InputExample(guid='0', text_a=text_a, text_b=None, label=self.labels[0])
feature = convert_single_example(0, example, self.labels, self.max_seq_length, self.tokenizer)
result = self.loaded_model({'input_ids': [feature.input_ids], 'input_mask': [feature.input_mask], 'segment_ids': [feature.segment_ids], 'label_ids': [feature.label_id]})
probabilities = result['probabilities'][0]
all_predictions = {}
for i, probability in enumerate(probabilities):
all_predictions[self.labels[i]] = probability
best_class = int(numpy.argmax(probabilities))
if output:
print(f'Prediction: {self.labels[best_class]} ({probabilities[best_class]})')
print(f' All predictions: {all_predictions}')
return((self.labels[best_class], probabilities[best_class], all_predictions))
# ====== Main Function ======
if __name__ == "__main__":
try:
command = sys.argv[1].lower() if len(sys.argv) >= 2 else ''
functions = ['train', 'eval', 'test', 'export', 'predict', 'prepare_data', 'reset']
if len(command) == 0 or command not in functions:
print(f'syntax: bert.py ({" | ".join(functions)})'); sys.exit(1)
b = BERT()
if command == 'predict':
input = ' '.join(sys.argv[2:])
b.predict(input, True)
elif command == 'train' and len(sys.argv) > 2 and sys.argv[2].isnumeric():
b.train(int(sys.argv[2]), True)
elif command == 'export':
filename = ' '.join(sys.argv[2:]) if len(sys.argv) > 2 else None
b.export(filename, True)
elif command == 'prepare_data':
train = 0.95 if len(sys.argv) < 3 else float(sys.argv[2])
dev = 0.025 if len(sys.argv) < 4 else float(sys.argv[3])
input_filename = None if len(sys.argv) < 5 else sys.argv[4]
output_path = None if len(sys.argv) < 6 else sys.argv[5]
b.prepare_data(train, dev, input_filename, output_path)
else:
getattr(b, sys.argv[1].lower())(True)
except KeyboardInterrupt: # gracefully ctrl-c interrupts
pass
|
[
"sfsdfd@gmail.com"
] |
sfsdfd@gmail.com
|
977bf58b657f4da6b65f56aec0399bd4560c5c1a
|
ca6ef186fc1e3d47fd3124f792dc08e5b0119fc2
|
/software/fish_head_tail.py
|
eac45b5a2a93637be13bc677bdeb4278cb92a1d1
|
[
"MIT"
] |
permissive
|
mnr/rubberfish
|
3945172b4296aea2bf7fc3396142cc529398e3b9
|
b9e20524103c6108a088cdd812bf51335c99d151
|
refs/heads/master
| 2020-04-13T21:41:49.363433
| 2017-07-19T16:54:24
| 2017-07-19T16:54:24
| 22,121,857
| 10
| 0
| null | 2017-01-16T23:49:10
| 2014-07-22T21:40:08
|
Python
|
UTF-8
|
Python
| false
| false
| 375
|
py
|
#!/usr/bin/env python3
"""
watches to see if the fish is talking, then changes the voltage meter
runs in background. Started in fish_config.sh
"""
from bmbb_fish import BmBB
from box_controls import boxControls
my_fish = BmBB()
my_box = boxControls()
while True:
if my_fish.get_fishIsSpeaking():
my_box.set_voltage(0)
else:
my_box.set_voltage(255)
|
[
"mark.niemannross@gmail.com"
] |
mark.niemannross@gmail.com
|
a237b7dc8f4598b5dd4314dbd35ab546a0b10929
|
4c710ef7338d1f46638e1e8f9851302f9c21d882
|
/logic.py
|
2fa5f5739792103f6b943de87bff6122c4bf3efb
|
[] |
no_license
|
jwaang/SecureDataServer-1
|
2dbef77f08503d9a121eba441b6cfa30929b5ad6
|
c18f10cf3ed24657ae7758b5cb3d34007698b294
|
refs/heads/master
| 2021-07-20T12:15:31.134732
| 2017-10-30T02:31:16
| 2017-10-30T02:31:16
| 107,807,730
| 0
| 0
| null | 2017-10-21T19:00:19
| 2017-10-21T19:00:19
| null |
UTF-8
|
Python
| false
| false
| 10,310
|
py
|
import rbac.acl
import rbac.context
from passlib.hash import pbkdf2_sha256
import json
# Admin
isAdmin = False
adminPass = "pass" # ./server [PASSWORD]
# Accounts
accounts = {}
# Global Variables
dataDict = {}
# Output
output = []
####################################
class Principal:
r = rbac.acl.Registry()
context = rbac.context.IdentityContext(r)
def __init__(self, name, password):
### CREATE PRINCIPAL ###
if name in accounts:
res = {"status": "FAILED"}
output.append(res)
return
self.setName(name)
hash = pbkdf2_sha256.hash(password)
self._password = hash
self.localVars = {}
tmprole = self.r.add_role(name)
self.context.set_roles_loader(tmprole)
accounts.update({name: self})
res = {"status": "CREATE_PRINCIPAL"}
output.append(res)
def setName(self, name):
self._name = name
def getName(self):
return self._name
def setPassword(self, user, password):
### CHANGE PASSWORD ###
if user is not "admin" or user is not self._name or self._name is not accounts:
res = {"status": "FAILED"}
output.append(res)
return
else:
hash = pbkdf2_sha256.hash(password)
self._password = hash
res = {"status": "CHANGE_PASSWORD"}
output.append(res)
def getPassword(self):
return self._password
def updatePermissions(self, user, var):
self.r.allow(user, "read", var)
self.r.allow(user, "write", var)
self.r.allow(user, "append", var)
self.r.allow(user, "delegate", var)
def setData(self, var, d):
### SET ###
if var not in dataDict:
tmp = Variable(d)
dataDict.update({var : tmp})
self.r.add_resource(var)
self.updatePermissions(self.getName(), var)
self.updatePermissions("admin", var)
res = {"status": "SET"}
output.append(res)
elif self.r.is_allowed(self.getName(), "write", var):
res = {"status": "SET"}
output.append(res)
elif not self.r.is_allowed(self.getName(), "write", var):
res = {"status": "FAILED"}
output.append(res)
def getData(self, var):
if self.r.is_allowed(self.getName(), "read", var):
if var in dataDict:
return dataDict.get(var).varValue
elif var in self.localVars:
return self.localVars.get(var).varValue
else:
res = {"status": "FAILED"}
output.append(res)
def checkPermission(self, principal, action, resource):
if self.r.is_allowed(principal, action, resource):
res = {"status":"SUCCESS"}
output.append(res)
else:
res = {"status":"DENIED"}
output.append(res)
def append(self, var, d):
### APPEND TO ###
if self.r.is_allowed(self.getName(), "append", var) or self.r.is_allowed(self.getName(), "write", var):
if var not in dataDict:
res = {"status": "FAILED"}
output.append(res)
return
if type(d) is str:
if var in dataDict:
v = dataDict.get(var).varValue
elif var in self.localVars:
v = self.localVars.get(var).varValue
tmp = Variable(v + d)
dataDict.update({var:tmp})
self.r.add_resource(var)
self.updatePermissions(self.getName(), var)
self.updatePermissions("admin", var)
elif type(d) is dict:
t = list(dataDict.get(var).varValue)
t.append(d)
tmp = Variable(t)
dataDict.update({var:tmp})
self.r.add_resource(var)
self.updatePermissions(self.getName(), var)
self.updatePermissions("admin", var)
res = {"status":"APPEND"}
output.append(res)
else:
res = {"status": "DENIED"}
output.append(res)
def local(self, var, d):
### LOCAL ###
if var in dataDict or var in self.localVars:
res = {"status": "FAILED"}
output.append(res)
return
if d in dataDict:
t = dataDict.get(d).varValue
tmp = Variable(t)
self.localVars.update({var: tmp})
self.r.add_resource(var)
self.updatePermissions(self.getName(), var)
self.updatePermissions("admin", var)
else:
tmp = Variable(d)
self.localVars.update({var : tmp})
self.r.add_resource(var)
self.updatePermissions(self.getName(), var)
self.updatePermissions("admin", var)
res = {"status":"LOCAL"}
output.append(res)
def forEach(self, iterator, sequence, expression):
### FOREACH ###
if self.r.is_allowed(self.getName(), "append", sequence) or self.r.is_allowed(self.getName(), "write", sequence):
if not (sequence not in self.localVars or sequence not in dataDict):
res = {"status": "FAILED"}
output.append(res)
return
elif iterator in dataDict or iterator in self.localVars:
res = {"status": "FAILED"}
output.append(res)
return
if '.' in expression:
seq = self.getData(sequence)
expr = expression.split(".")
if expr[0] == iterator:
for i,s in enumerate(seq):
seq[i] = s.get(expr[1])
if sequence in dataDict:
tmp = Variable(seq)
dataDict.update({sequence: tmp})
print({sequence: tmp.varValue})
self.r.add_resource(sequence)
self.updatePermissions(self.getName(), sequence)
self.updatePermissions("admin", sequence)
else:
tmp = Variable(seq)
self.localVars.update({sequence: tmp})
print({sequence: tmp.varValue})
self.r.add_resource(sequence)
self.updatePermissions(self.getName(), sequence)
self.updatePermissions("admin", sequence)
else:
seq = self.getData(expression)
if sequence in dataDict:
tmp = Variable(seq)
dataDict.update({sequence: tmp})
print({sequence: tmp.varValue})
self.r.add_resource(sequence)
self.updatePermissions(self.getName(), sequence)
self.updatePermissions("admin", sequence)
else:
tmp = Variable(seq)
self.localVars.update({sequence: tmp})
print({sequence: tmp.varValue})
self.r.add_resource(sequence)
self.updatePermissions(self.getName(), sequence)
self.updatePermissions("admin", sequence)
res = {"status":"FOREACH"}
output.append(res)
else:
res = {"status": "DENIED"}
output.append(res)
def setRights(self, principal, action, resource):
### SET DELEGATION ###
if principal not in accounts:
res = {"status": "FAILED"}
output.append(res)
return
if resource == "all":
for name in dataDict:
print(name)
self.r.allow(principal, action, name)
else:
self.r.allow(principal, action, resource)
res = {"status": "SET_DELEGATION"}
output.append(res)
def deleteRights(self, principal, action, resource):
### DELETE DELEGATION ###
if principal not in accounts:
res = {"status": "FAILED"}
output.append(res)
return
if self._name is not "admin":
res = {"status": "DENIED"}
output.append(res)
return
if principal == "all":
for name in accounts:
self.r.deny(name, action, resource)
else:
self.r.deny(principal, action, resource)
res = {"status": "DELETE_DELEGATION"}
output.append(res)
def defaultRights(self, principal):
### DEFAULT DELEGATION ###
if principal not in accounts:
res = {"status": "FAILED"}
output.append(res)
return
if self._name is not "admin":
res = {"status": "DENIED"}
output.append(res)
return
res = {"status": "DEFAULT_DELEGATOR"}
output.append(res)
def cmd_return(self, expr):
### RETURN ###
if "\"" in expr:
val = expr
elif '.' in expr:
e = expr.split('.')
if type(self.getData(e[0])) is list:
val = self.getData(e[0])[0].get(e[1])
else:
val = self.getData(expr[0]).get(e[1])
else:
val = self.getData(expr)
res = {"status":"RETURNING", "output":val}
output.append(res)
def cmd_exit(self):
### EXIT ###
if self._name == 'admin':
res = {"status":"EXITING"}
output.append(res)
exit(0)
# Terminate the client connection
# Halts with return code 0
else:
res = {"status": "DENIED"}
output.append(res)
####################################
class Variable:
def __init__(self, val):
self.varValue = val
def __copy__(self):
copiedVar = type(self)()
copiedVar.__dict__.update(self.__dict__)
return copiedVar
####################################
def verifyPass(principal, password):
if pbkdf2_sha256.verify(password, principal.getPassword()):
res = {"status":"SUCCESS"}
output.append(res)
return True
else:
res = {"stats":"DENIED"}
output.append(res)
return False
|
[
"jonathan.wang1996@gmail.com"
] |
jonathan.wang1996@gmail.com
|
e708e2bc23a042d638795454e5cbf23a611b6d74
|
b1b5f508ad98093d53d0ecec41aaabfd24219271
|
/ClassOne.py
|
6a81026a29ff8912ab15dfb592f2c2ba0ef6582e
|
[] |
no_license
|
avle/pyth
|
c44070b1c18b676dd66b70113af1a01f2db3d7a7
|
3097abd1794afd5c5653210be2f1910901d1ed0e
|
refs/heads/master
| 2021-01-18T21:59:06.031238
| 2016-04-21T21:47:01
| 2016-04-21T21:47:01
| 34,467,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
#ClassOne.py
class Calculator(object):
#define class to simulate a simple calculator
def __init__ (self):
#start with zero
self.current = 0
def add(self, amount):
#add number to current
self.current += amount
def getCurrent(self):
return self.current
|
[
"avle@users.noreply.github.com"
] |
avle@users.noreply.github.com
|
44176b9e3a29430ef926faa46a9e8d46b10569bd
|
4470d594aedd6b040e5e2fde0f47b62fdd9d4d8b
|
/suite/call_uia.py
|
918f78f409d69c6788ceccf2227a976f2186df6d
|
[] |
no_license
|
erickgtzh/TestAutomation
|
cf6fd9faed282a39a5a46e54e825bda1336a9067
|
db595739129133b7bbfb29e8054912269466321c
|
refs/heads/master
| 2022-12-13T20:40:10.442664
| 2020-04-18T17:25:04
| 2020-04-18T17:25:04
| 252,847,513
| 0
| 0
| null | 2022-12-08T07:27:50
| 2020-04-03T21:42:36
|
Python
|
UTF-8
|
Python
| false
| false
| 847
|
py
|
import time
import datetime
import pytz
from models.utils import call_adb_number, wait, read_serial, call_number, open_app, wait_process_completed
"""
by Erick Gtz
04/15/2020
1.2 script
script version: 1.1 (04/11/20)
script version: 1.2 (04/15/20)
script version: 1.3 (04/17/20)
"""
def suite_info():
name = 'call by adb and ui automator'
version = '1.3 (04/17/20)'
info = 'script: {0} \nversion: {1}'.format(name, version)
return info
def suite_methods():
"""
suite methods that we're going to run
"""
open_app('Phone')
call_number()
wait_process_completed()
def get_time():
"""
actual time when our script when we call it
:return: actual time
"""
start_ts_pst = str(datetime.datetime.now(pytz.timezone('US/Pacific')).strftime('"%m-%d-%y %H:%M:%S.%f"'))
return start_ts_pst
|
[
"erickgtz@gmail.com"
] |
erickgtz@gmail.com
|
9e8d3f8731b79d65892f0c2bad6efdb6405416fd
|
b9467e7cb0862bc118c3b8520f3ac2fcddaee4dc
|
/wwl/acme_tiny.py
|
0a99dcb5c3af34d625b75484639a0005892c5088
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
chrispollitt/WebApps
|
a4d6f61d1c0f782aa5dc14b0605ba22c9e8c5a91
|
393a629089b1137246ac769cac92b48829624089
|
refs/heads/main
| 2022-06-24T18:12:50.583770
| 2022-06-11T19:10:09
| 2022-06-11T19:10:09
| 211,679,461
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,350
|
py
|
#!/home/whatwelo/bin/python3
# Copyright Daniel Roesler, under MIT license, see LICENSE at github.com/diafygi/acme-tiny
import sys
sys.path.append('/home/whatwelo/user_python')
import argparse, subprocess, json, os, base64, binascii, time, hashlib, re, copy, textwrap, logging
from urllib.request import Request
from myurllib import urlopen, dnsinit
# init
base_domain=os.environ["ACME_BASE_DOMAIN"] # cpanel
cpanel=os.environ["ACME_CPANEL"] # cpanel
host = os.environ["ACME_HOST"] # myurllib dns spoofing
ip = os.environ["ACME_IP"] # myurllib dns spoofing
dns_map = {
host: ip
}
dnsinit(dns_map)
DEFAULT_CA = "https://acme-v02.api.letsencrypt.org" # DEPRECATED! USE DEFAULT_DIRECTORY_URL INSTEAD
DEFAULT_DIRECTORY_URL = "https://acme-v02.api.letsencrypt.org/directory"
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.StreamHandler(sys.stderr))
LOGGER.setLevel(logging.INFO)
def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA, disable_check=False, directory_url=DEFAULT_DIRECTORY_URL, contact=None):
directory, acct_headers, alg, jwk = None, None, None, None # global variables
# helper functions - base64 encode for jose spec
def _b64(b):
return base64.urlsafe_b64encode(b).decode('utf8').replace("=", "")
# helper function - run external commands
def _cmd(cmd_list, stdin=None, cmd_input=None, err_msg="Command Line Error"):
proc = subprocess.Popen(cmd_list, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate(cmd_input)
if proc.returncode != 0:
raise IOError("{0}\n{1}".format(err_msg, err))
return out
# helper function - make request and automatically parse json response
def _do_request(url, data=None, err_msg="Error", depth=0):
try:
resp = urlopen(Request(url, data=data, headers={"Content-Type": "application/jose+json", "User-Agent": "acme-tiny"}))
resp_data, code, headers = resp.read().decode("utf8"), resp.getcode(), resp.headers
except IOError as e:
resp_data = e.read().decode("utf8") if hasattr(e, "read") else str(e)
code, headers = getattr(e, "code", None), {}
try:
resp_data = json.loads(resp_data) # try to parse json results
except ValueError:
pass # ignore json parsing errors
if depth < 100 and code == 400 and resp_data['type'] == "urn:ietf:params:acme:error:badNonce":
raise IndexError(resp_data) # allow 100 retrys for bad nonces
if code not in [200, 201, 204]:
raise ValueError("{0}:\nUrl: {1}\nData: {2}\nResponse Code: {3}\nResponse: {4}".format(err_msg, url, data, code, resp_data))
return resp_data, code, headers
# helper function - make signed requests
def _send_signed_request(url, payload, err_msg, depth=0):
payload64 = "" if payload is None else _b64(json.dumps(payload).encode('utf8'))
new_nonce = _do_request(directory['newNonce'])[2]['Replay-Nonce']
protected = {"url": url, "alg": alg, "nonce": new_nonce}
protected.update({"jwk": jwk} if acct_headers is None else {"kid": acct_headers['Location']})
protected64 = _b64(json.dumps(protected).encode('utf8'))
protected_input = "{0}.{1}".format(protected64, payload64).encode('utf8')
out = _cmd(["openssl", "dgst", "-sha256", "-sign", account_key], stdin=subprocess.PIPE, cmd_input=protected_input, err_msg="OpenSSL Error")
data = json.dumps({"protected": protected64, "payload": payload64, "signature": _b64(out)})
try:
return _do_request(url, data=data.encode('utf8'), err_msg=err_msg, depth=depth)
except IndexError: # retry bad nonces (they raise IndexError)
return _send_signed_request(url, payload, err_msg, depth=(depth + 1))
# helper function - poll until complete
def _poll_until_not(url, pending_statuses, err_msg):
result, t0 = None, time.time()
while result is None or result['status'] in pending_statuses:
assert (time.time() - t0 < 3600), "Polling timeout" # 1 hour timeout
time.sleep(0 if result is None else 2)
result, _, _ = _send_signed_request(url, None, err_msg)
return result
# parse account key to get public key
log.info("Parsing account key...")
out = _cmd(["openssl", "rsa", "-in", account_key, "-noout", "-text"], err_msg="OpenSSL Error")
pub_pattern = r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)"
pub_hex, pub_exp = re.search(pub_pattern, out.decode('utf8'), re.MULTILINE|re.DOTALL).groups()
pub_exp = "{0:x}".format(int(pub_exp))
pub_exp = "0{0}".format(pub_exp) if len(pub_exp) % 2 else pub_exp
alg = "RS256"
jwk = {
"e": _b64(binascii.unhexlify(pub_exp.encode("utf-8"))),
"kty": "RSA",
"n": _b64(binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex).encode("utf-8"))),
}
accountkey_json = json.dumps(jwk, sort_keys=True, separators=(',', ':'))
thumbprint = _b64(hashlib.sha256(accountkey_json.encode('utf8')).digest())
# find domains
log.info("Parsing CSR...")
out = _cmd(["openssl", "req", "-in", csr, "-noout", "-text"], err_msg="Error loading {0}".format(csr))
domains = set([])
common_name = re.search(r"Subject:.*? CN\s?=\s?([^\s,;/]+)", out.decode('utf8'))
if common_name is not None:
domains.add(common_name.group(1))
subject_alt_names = re.search(r"X509v3 Subject Alternative Name: (?:critical)?\n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE|re.DOTALL)
if subject_alt_names is not None:
for san in subject_alt_names.group(1).split(", "):
if san.startswith("DNS:"):
domains.add(san[4:])
log.info("Found domains: {0}".format(", ".join(domains)))
# get the ACME directory of urls
log.info("Getting directory...")
directory_url = CA + "/directory" if CA != DEFAULT_CA else directory_url # backwards compatibility with deprecated CA kwarg
directory, _, _ = _do_request(directory_url, err_msg="Error getting directory")
log.info("Directory found!")
# create account, update contact details (if any), and set the global key identifier
log.info("Registering account...")
reg_payload = {"termsOfServiceAgreed": True}
account, code, acct_headers = _send_signed_request(directory['newAccount'], reg_payload, "Error registering")
log.info("Registered!" if code == 201 else "Already registered!")
if contact is not None:
account, _, _ = _send_signed_request(acct_headers['Location'], {"contact": contact}, "Error updating contact details")
log.info("Updated contact details:\n{0}".format("\n".join(account['contact'])))
# create a new order
log.info("Creating new order...")
order_payload = {"identifiers": [{"type": "dns", "value": d} for d in domains]}
order, _, order_headers = _send_signed_request(directory['newOrder'], order_payload, "Error creating new order")
log.info("Order created!")
# get the authorizations that need to be completed
for auth_url in order['authorizations']:
authorization, _, _ = _send_signed_request(auth_url, None, "Error getting challenges")
domain = authorization['identifier']['value']
log.info("Verifying {0}...".format(domain))
subdomains = domain.split(".")
subdomain = subdomains[0] if len(subdomains)>2 else "www"
proc = subprocess.Popen(["get_subdomains.php", cpanel, base_domain, subdomain],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
raise IOError("get_subdomains Error: {0}".format(err))
domain_info = json.loads(out)
domain_dir = domain_info['documentroot']
acme_dir = domain_dir + '/.well-known/acme-challenge'
# find the http-01 challenge and write the challenge file
challenge = [c for c in authorization['challenges'] if c['type'] == "http-01"][0]
token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token'])
keyauthorization = "{0}.{1}".format(token, thumbprint)
if not os.path.isdir(acme_dir):
os.makedirs(acme_dir)
wellknown_path = os.path.join(acme_dir, token)
with open(wellknown_path, "w") as wellknown_file:
wellknown_file.write(keyauthorization)
# check that the file is in place
try:
wellknown_url = "http://{0}/.well-known/acme-challenge/{1}".format(domain, token)
assert (disable_check or _do_request(wellknown_url)[0] == keyauthorization)
except (AssertionError, ValueError) as e:
raise ValueError("Wrote file to {0}, but couldn't download {1}: {2}".format(wellknown_path, wellknown_url, e))
# say the challenge is done
_send_signed_request(challenge['url'], {}, "Error submitting challenges: {0}".format(domain))
authorization = _poll_until_not(auth_url, ["pending"], "Error checking challenge status for {0}".format(domain))
if authorization['status'] != "valid":
raise ValueError("Challenge did not pass for {0}: {1}".format(domain, authorization))
os.remove(wellknown_path)
log.info("{0} verified!".format(domain))
# finalize the order with the csr
log.info("Signing certificate...")
csr_der = _cmd(["openssl", "req", "-in", csr, "-outform", "DER"], err_msg="DER Export Error")
_send_signed_request(order['finalize'], {"csr": _b64(csr_der)}, "Error finalizing order")
# poll the order to monitor when it's done
order = _poll_until_not(order_headers['Location'], ["pending", "processing"], "Error checking order status")
if order['status'] != "valid":
raise ValueError("Order failed: {0}".format(order))
# download the certificate
certificate_pem, _, _ = _send_signed_request(order['certificate'], None, "Certificate download failed")
log.info("Certificate signed!")
return certificate_pem
def main(argv):
LOGGER.info("Working...")
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
This script automates the process of getting a signed TLS certificate from Let's Encrypt using
the ACME protocol. It will need to be run on your server and have access to your private
account key, so PLEASE READ THROUGH IT! It's only ~200 lines, so it won't take long.
Example Usage:
python acme_tiny.py --account-key ./account.key --csr ./domain.csr --acme-dir /usr/share/nginx/html/.well-known/acme-challenge/ > signed_chain.crt
Example Crontab Renewal (once per month):
0 0 1 * * python /path/to/acme_tiny.py --account-key /path/to/account.key --csr /path/to/domain.csr --acme-dir /usr/share/nginx/html/.well-known/acme-challenge/ > /path/to/signed_chain.crt 2>> /var/log/acme_tiny.log
""")
)
parser.add_argument("--account-key", required=True, help="path to your Let's Encrypt account private key")
parser.add_argument("--csr", required=True, help="path to your certificate signing request")
parser.add_argument("--quiet", action="store_const", const=logging.ERROR, help="suppress output except for errors")
parser.add_argument("--disable-check", default=False, action="store_true", help="disable checking if the challenge file is hosted correctly before telling the CA")
parser.add_argument("--directory-url", default=DEFAULT_DIRECTORY_URL, help="certificate authority directory url, default is Let's Encrypt")
parser.add_argument("--ca", default=DEFAULT_CA, help="DEPRECATED! USE --directory-url INSTEAD!")
parser.add_argument("--contact", metavar="CONTACT", default=None, nargs="*", help="Contact details (e.g. mailto:aaa@bbb.com) for your account-key")
args = parser.parse_args(argv)
LOGGER.setLevel(args.quiet or LOGGER.level)
signed_crt = get_crt(args.account_key, args.csr, ".", log=LOGGER, CA=args.ca, disable_check=args.disable_check, directory_url=args.directory_url, contact=args.contact)
sys.stdout.write(signed_crt)
if __name__ == "__main__": # pragma: no cover
main(sys.argv[1:])
|
[
"chris.pollitt@gmail.com"
] |
chris.pollitt@gmail.com
|
c1ff72419dd8e5a016c36196ff1c779973dbcf82
|
7657ed7a669bd83e34cb1f255ae3bf808dc91eea
|
/Recursividad/principal.py
|
3b3fc41932c65ad52ad8705213df9b21a1e42694
|
[] |
no_license
|
LandaburoSantiago/algYed
|
74cd0a6a144b9c7da34d42fafcc6fd869b0e415c
|
263bc4b443d4007657217426338b9fa30a4b3908
|
refs/heads/master
| 2022-12-06T10:58:57.718549
| 2021-01-19T14:01:15
| 2021-01-19T14:01:15
| 257,422,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,560
|
py
|
from ejercicios_recursividad import fibonacci, sumatoria, multiplicacion
from ejercicios_recursividad import potencia, invertir_caracteres
from ejercicios_recursividad import calcular_serie, binario, logaritmo
from ejercicios_recursividad import contar_numeros, invertir_numero
from ejercicios_recursividad import euclides, euclides_mcm, suma_digitos
from ejercicios_recursividad import raiz, busqueda, recorrer_matriz, laberinto
from ejercicios_recursividad import quicksort, torre_hanoi, formula
# RECURSIVIDAD
# EJERCICIO 1
print(fibonacci(4))
# EJERCICIO 2
print(sumatoria(4))
# EJERCICIO 3
print(multiplicacion(2, 4))
# EJERCICIO 4
print(potencia(2, 4))
# EJERCICIO 5
print(invertir_caracteres('hola'))
# EJERCICIO 6 #CONSULTAR
print(calcular_serie(4))
# EJERCICIO 7
print(binario(2))
# EJERCICIO 8
print(logaritmo(8, 2))
# EJERCICIO 9
print(contar_numeros(1223))
# EJERCICIO 10
print(invertir_numero(123))
# EJERCICIO 11
print(euclides(130, 44))
# EJERCICIO 12
print(euclides_mcm(1032, 180))
# EJERCICIO 13
print(suma_digitos(222))
# EJERCICIO 14
print(raiz(25))
# EJERCICIO 15
m = [[9, 5, 7], [8, 2, 11], [10, 4, 6]]
v = [1, 2, 3, 4, 5]
print('matriz')
print(recorrer_matriz(m))
print('matriz')
# EJERCICIO 16
vec = [4, 2, 3, 5, 12, 7]
quicksort(vec, 0, len(vec)-1)
print(vec)
# EJERCICIO 17
print(busqueda(v, 0, 4, 3))
# EJERCICIO 18
ml = [[0, 1, 0, 0, 0], [0, 1, 0, 1, 0], [0, 1, 0, 1, 0], [0, 1, 0, 1, 0], [0, 0, 0, 1, 0]]
print(laberinto(ml))
# EJERCICIO 19
print(torre_hanoi(3, '1', '2', '3'))
# EJERCICIO 20
print(formula(4))
|
[
"noreply@github.com"
] |
LandaburoSantiago.noreply@github.com
|
10ad2e384c2d173dfeba6a35c4be347cdbaa8814
|
cffce3166931d4423c13e83bebbd6a9562512687
|
/utils_motion_caption.py
|
7b3b1e6128ff4e72df1bc3a8466c9d2a6f97ac81
|
[] |
no_license
|
supriamir/MotionCNN-with-ObjectDetection
|
30c02e2fe497c23c1ff78f72fa691cda454b5d10
|
75ff61ab77b962e9e7c8b903fa88bd90c4fb9ca0
|
refs/heads/master
| 2023-03-20T01:38:03.878824
| 2021-03-12T15:46:51
| 2021-03-12T15:46:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,846
|
py
|
# We heavily borrow code from https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Image-Captioning
import os
import numpy as np
import h5py
import json
import torch
from scipy.misc import imread, imresize
from tqdm import tqdm
from collections import Counter
from random import seed, choice, sample
import sys
def create_input_files(dataset, karpathy_json_path, image_folder, captions_per_image, min_word_freq, output_folder,
max_len=100):
"""
Creates input files for training, validation, and test data.
:param dataset: name of dataset, one of 'coco', 'flickr8k', 'flickr30k'
:param karpathy_json_path: path of Karpathy JSON file with splits and captions
:param image_folder: folder with downloaded images
:param captions_per_image: number of captions to sample per image
:param min_word_freq: words occuring less frequently than this threshold are binned as <unk>s
:param output_folder: folder to save files
:param max_len: don't sample captions longer than this length
"""
assert dataset in {'coco', 'flickr8k', 'flickr30k'}
# Read Karpathy JSON
with open(karpathy_json_path, 'r') as j:
data = json.load(j)
# Read image paths and captions for each image
train_image_paths = []
train_image_captions = []
val_image_paths = []
val_image_captions = []
test_image_paths = []
test_image_captions = []
word_freq = Counter()
for img in data['images']:
captions = []
for c in img['sentences']:
# Update word frequency
word_freq.update(c['tokens'])
if len(c['tokens']) <= max_len:
captions.append(c['tokens'])
if len(captions) == 0:
continue
path = os.path.join(image_folder, img['filepath'], img['filename']) if dataset == 'coco' else os.path.join(
image_folder, img['filename'])
if img['split'] in {'train', 'restval'}:
train_image_paths.append(path)
train_image_captions.append(captions)
elif img['split'] in {'val'}:
val_image_paths.append(path)
val_image_captions.append(captions)
elif img['split'] in {'test'}:
test_image_paths.append(path)
test_image_captions.append(captions)
# Sanity check
assert len(train_image_paths) == len(train_image_captions)
assert len(val_image_paths) == len(val_image_captions)
assert len(test_image_paths) == len(test_image_captions)
# Create word map
words = [w for w in word_freq.keys() if word_freq[w] > min_word_freq]
word_map = {k: v + 1 for v, k in enumerate(words)}
word_map['<unk>'] = len(word_map) + 1
word_map['<start>'] = len(word_map) + 1
word_map['<end>'] = len(word_map) + 1
word_map['<pad>'] = 0
# Create a base/root name for all output files
base_filename = dataset + '_' + str(captions_per_image) + '_cap_per_img_' + str(min_word_freq) + '_min_word_freq'
# Save word map to a JSON
with open(os.path.join(output_folder, 'WORDMAP_' + base_filename + '.json'), 'w') as j:
json.dump(word_map, j)
# Sample captions for each image, save images to HDF5 file, and captions and their lengths to JSON files
seed(123)
for impaths, imcaps, split in [(train_image_paths, train_image_captions, 'TRAIN'),
(val_image_paths, val_image_captions, 'VAL'),
(test_image_paths, test_image_captions, 'TEST')]:
with h5py.File(os.path.join(output_folder, split + '_IMAGES_' + base_filename + '.hdf5'), 'a') as h:
# Make a note of the number of captions we are sampling per image
h.attrs['captions_per_image'] = captions_per_image
# Create dataset inside HDF5 file to store images
images = h.create_dataset('images', (len(impaths), 3, 256, 256), dtype='uint8')
images_name = h.create_dataset('images_name', (len(impaths), 1), dtype='int64')
print("\nReading %s images and captions, storing to file...\n" % split)
enc_captions = []
caplens = []
for i, path in enumerate(tqdm(impaths)):
# Sample captions
if len(imcaps[i]) < captions_per_image:
captions = imcaps[i] + [choice(imcaps[i]) for _ in range(captions_per_image - len(imcaps[i]))]
else:
captions = sample(imcaps[i], k=captions_per_image)
# Sanity check
assert len(captions) == captions_per_image
# Read images
img = imread(impaths[i])
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
img = np.concatenate([img, img, img], axis=2)
img = imresize(img, (256, 256))
img = img.transpose(2, 0, 1)
assert img.shape == (3, 256, 256)
assert np.max(img) <= 255
# Save image to HDF5 file
images[i] = img
images_name[i] = int(impaths[i].split('/')[-1].split('.')[0])
# print(int(impaths[i].split('/')[-1].split('.')[0]))
for j, c in enumerate(captions):
# Encode captions
enc_c = [word_map['<start>']] + [word_map.get(word, word_map['<unk>']) for word in c] + [
word_map['<end>']] + [word_map['<pad>']] * (max_len - len(c))
# Find caption lengths
c_len = len(c) + 2
enc_captions.append(enc_c)
caplens.append(c_len)
# Sanity check
assert images.shape[0] * captions_per_image == len(enc_captions) == len(caplens)
# Save encoded captions and their lengths to JSON files
with open(os.path.join(output_folder, split + '_CAPTIONS_' + base_filename + '.json'), 'w') as j:
json.dump(enc_captions, j)
with open(os.path.join(output_folder, split + '_CAPLENS_' + base_filename + '.json'), 'w') as j:
json.dump(caplens, j)
def init_embedding(embeddings):
"""
Fills embedding tensor with values from the uniform distribution.
:param embeddings: embedding tensor
"""
bias = np.sqrt(3.0 / embeddings.size(1))
torch.nn.init.uniform_(embeddings, -bias, bias)
def load_embeddings(emb_file, word_map):
"""
Creates an embedding tensor for the specified word map, for loading into the model.
:param emb_file: file containing embeddings (stored in GloVe format)
:param word_map: word map
:return: embeddings in the same order as the words in the word map, dimension of embeddings
"""
# Find embedding dimension
with open(emb_file, 'r') as f:
emb_dim = len(f.readline().split(' ')) - 1
vocab = set(word_map.keys())
# Create tensor to hold embeddings, initialize
embeddings = torch.FloatTensor(len(vocab), emb_dim)
init_embedding(embeddings)
# Read embedding file
print("\nLoading embeddings...")
for line in open(emb_file, 'r'):
line = line.split(' ')
emb_word = line[0]
embedding = list(map(lambda t: float(t), filter(lambda n: n and not n.isspace(), line[1:])))
# Ignore word if not in train_vocab
if emb_word not in vocab:
continue
embeddings[word_map[emb_word]] = torch.FloatTensor(embedding)
return embeddings, emb_dim
def clip_gradient(optimizer, grad_clip):
"""
Clips gradients computed during backpropagation to avoid explosion of gradients.
:param optimizer: optimizer with the gradients to be clipped
:param grad_clip: clip value
"""
for group in optimizer.param_groups:
for param in group['params']:
if param.grad is not None:
param.grad.data.clamp_(-grad_clip, grad_clip)
def save_checkpoint(data_name, epoch, epochs_since_improvement, encoder_opt, decoder, encoder_optimizer_opt, decoder_optimizer,
bleu4, is_best):
"""
Saves model checkpoint.
:param data_name: base name of processed dataset
:param epoch: epoch number
:param epochs_since_improvement: number of epochs since last improvement in BLEU-4 score
:param encoder: encoder model
:param decoder: decoder model
:param encoder_optimizer: optimizer to update encoder's weights, if fine-tuning
:param decoder_optimizer: optimizer to update decoder's weights
:param bleu4: validation BLEU-4 score for this epoch
:param is_best: is this checkpoint the best so far?
"""
state = {'epoch': epoch,
'epochs_since_improvement': epochs_since_improvement,
'bleu-4': bleu4,
'encoder_opt': encoder_opt,
'decoder': decoder,
'encoder_optimizer_opt': encoder_optimizer_opt,
'decoder_optimizer': decoder_optimizer}
filename = 'version00_lstm512_att512_withBottom_objregion_wordTruncate_5times_learningRate1e4_1e4_early20_encOptFinetuneFalse_Im2Flow_relu_dualAttention_checkpoint_' + data_name + '.pth.tar'
torch.save(state, filename)
# If this checkpoint is the best so far, store a copy so it doesn't get overwritten by a worse checkpoint
if is_best:
torch.save(state, 'BEST_' + filename)
class AverageMeter(object):
"""
Keeps track of most recent, average, sum, and count of a metric.
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, shrink_factor):
"""
Shrinks learning rate by a specified factor.
:param optimizer: optimizer whose learning rate must be shrunk.
:param shrink_factor: factor in interval (0, 1) to multiply learning rate with.
"""
print("\nDECAYING learning rate.")
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * shrink_factor
print("The new learning rate is %f\n" % (optimizer.param_groups[0]['lr'],))
def accuracy(scores, targets, k):
"""
Computes top-k accuracy, from predicted and true labels.
:param scores: scores from the model
:param targets: true labels
:param k: k in top-k accuracy
:return: top-k accuracy
"""
batch_size = targets.size(0)
_, ind = scores.topk(k, 1, True, True)
correct = ind.eq(targets.view(-1, 1).expand_as(ind))
correct_total = correct.view(-1).float().sum() # 0D tensor
return correct_total.item() * (100.0 / batch_size)
|
[
"kiyohiko1011@gmail.com"
] |
kiyohiko1011@gmail.com
|
abc3466c4b4da8d8827529de73c25f1d0b2ff1ae
|
0c445489dda6ba9e9d654a336474ab07691da0d2
|
/models/detector.py
|
e4c1489400b361962ec7a552c608699058faa2ae
|
[] |
no_license
|
honguyenhaituan/PrivacyPreservingFaceRecognition
|
96b032764d2fe0fbe62315b180853f83edbf705e
|
16afc9df67afafe626a42ae7a5173547e9adae21
|
refs/heads/main
| 2023-07-22T10:04:35.257501
| 2021-07-26T10:56:04
| 2021-07-26T10:56:04
| 377,740,398
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,509
|
py
|
import torch
import torch.nn as nn
import numpy as np
from facenet_pytorch import MTCNN
from .retinaface.models.retinaface import retinaface_mnet
from typing import Tuple, List
class Detector(nn.Module):
def detect(self, inputs, isOut=False) -> Tuple[List, List]:
pass
class RetinaFaceDetector(Detector):
def __init__(self):
super(Detector, self).__init__()
self.retinaface = retinaface_mnet(pretrained=True)
self.cfg = self.retinaface.cfg
def forward(self, image):
self.shape = image.shape
return self.retinaface(image)
def detect(self, inputs, isOut=False):
if isOut:
return self.retinaface.select_boxes(inputs, self.shape)
return self.retinaface.detect(inputs)
class MTCNNDetector(Detector):
def __init__(self, device='cpu'):
super(Detector, self).__init__()
self.mtcnn = MTCNN(device=device)
def _transform(self, inputs):
inputs = inputs * 255
inputs = inputs.permute(0, 2, 3, 1)
return inputs
def select_boxes(self, all_boxes, all_probs, all_points, imgs, center_weight=2.0):
selected_boxes, selected_probs, selected_points = [], [], []
for boxes, points, probs, img in zip(all_boxes, all_points, all_probs, imgs):
if boxes is None:
selected_boxes.append(np.array([]))
selected_probs.append(np.array([]))
selected_points.append(np.array([]))
continue
box_sizes = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
img_center = (img.shape[1] / 2, img.shape[0]/2)
box_centers = np.array(list(zip((boxes[:, 0] + boxes[:, 2]) / 2, (boxes[:, 1] + boxes[:, 3]) / 2)))
offsets = box_centers - img_center
offset_dist_squared = np.sum(np.power(offsets, 2.0), 1)
box_order = np.argsort(box_sizes - offset_dist_squared * center_weight)[::-1]
box = boxes[box_order][[0]]
prob = probs[box_order][[0]]
point = points[box_order][[0]]
selected_boxes.append(box)
selected_probs.append(prob)
selected_points.append(point)
selected_boxes = np.array(selected_boxes)
selected_probs = np.array(selected_probs)
selected_points = np.array(selected_points)
return selected_boxes, selected_probs, selected_points
def detect(self, inputs, isOut=False):
if isOut:
raise ValueError("MTCNN dont take output detect")
inputs = self._transform(inputs)
# Detect faces
batch_boxes, batch_probs, batch_points = self.mtcnn.detect(inputs, landmarks=True)
# Select faces
if not self.mtcnn.keep_all:
batch_boxes, batch_probs, batch_points = self.select_boxes(
batch_boxes, batch_probs, batch_points, inputs
)
boxes, lands = [], []
for box, land in zip(batch_boxes, batch_points):
if len(box) != 0:
boxes.append(torch.from_numpy(box.astype(int)))
else:
boxes.append(torch.from_numpy(box))
lands.append(torch.from_numpy(land))
return boxes, lands
def get_detector(name):
if name == 'retinaface':
return RetinaFaceDetector()
elif name == 'mtcnn':
return MTCNNDetector(device='cuda')
else:
raise ValueError("Name detector dont support")
|
[
"honguyenhaituan@gmail.com"
] |
honguyenhaituan@gmail.com
|
6be3535ef9783246a0cc7c5625369c2a73021113
|
e9b216824fe518dfca24e955f5f40091ca761733
|
/src/models/zz/urls.py
|
a94ce8a9a0c35bb19a4aaabe0a4ab8e8229d3a75
|
[] |
no_license
|
lovearch/mone
|
1c4133c817b26fce2f81aff7b6eabee9dd00b4d5
|
45410f0a78300977e0bbd65a6070522c52cfb391
|
refs/heads/master
| 2021-04-18T21:38:52.191346
| 2016-05-03T01:50:01
| 2016-05-03T01:50:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'zz.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^user/', include(admin.site.urls)),
)
|
[
"frederick.mao@gmail.com"
] |
frederick.mao@gmail.com
|
40e044e81e637b03ed8ab1ee0a0bc10e3b4661f4
|
bc167f434158921bcf2c678155c5cdfec1c9b0c9
|
/PI_code/simulator/behaviourGeneration/firstGenScripts_preyHunter/behav372.py
|
4181e4a1f1456cec22542057f7e400034e38635a
|
[] |
no_license
|
s0217391/DifferentProjects
|
6450efc89c64ecd21b86c705737e89e5c69433a6
|
7f4da153660817b6cbf72d2e823aa29c0c2f95a9
|
refs/heads/master
| 2021-01-17T02:58:46.219240
| 2015-05-26T22:45:46
| 2015-05-26T22:45:46
| 34,995,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
#!/usr/bin/python
import sys
def compute(prey):
temp0 = min(prey[1], prey[0])
temp1 = -1 * prey[0]
if temp0 != 0:
temp1 = prey[0] / temp0
else:
temp1 = temp0
temp0 = temp0 - prey[1]
if temp1 > temp0:
if prey[0] > prey[1]:
if prey[1] != 0:
temp0 = temp0 % prey[1]
else:
temp0 = prey[1]
else:
temp0 = max(prey[1], temp0)
else:
temp0 = prey[0] * prey[1]
temp0 = temp1 + prey[1]
temp1 = -1 * temp1
temp0 = min(prey[1], prey[0])
temp0 = max(prey[1], prey[1])
temp0 = temp0 + temp0
return [temp0, temp0]
|
[
"i7674211@bournemouth.ac.uk"
] |
i7674211@bournemouth.ac.uk
|
a12826b3e4279c96c841a8877d4be667a5ecc1c1
|
50d98f8554f2e4be96b75b437d20009dedb6323c
|
/Quickstart Codes/set_cookie.py
|
6982a41499b01a0d22f57bc39437caf7277551f8
|
[] |
no_license
|
ymgh96/Flask-Tutorial
|
5db201f3e7de11b3de64ff6db1430d6fedbc440d
|
c215aaa91c36ef3858bbe3c78ce96cb6f796347d
|
refs/heads/main
| 2023-04-16T13:50:33.345064
| 2021-04-26T13:21:48
| 2021-04-26T13:21:48
| 361,757,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
from flask import Flask, render_template, make_response
app = Flask(__name__)
@app.route('/login')
def login():
resp = make_response(render_template("login_form.html"))
resp.set_cookie('username', 'the username')
return resp
|
[
"noreply@github.com"
] |
ymgh96.noreply@github.com
|
0138f4b64ce3bf2edaf0de1e1dad816908f1c04f
|
1f9cc39bed0f79d3b50d493cfbd19ef4a50012f5
|
/src/screens/racescreen.py
|
7b50306c7f497dc6c5384571be2f5e99452c02db
|
[] |
no_license
|
codename-rinzler/goldmine
|
e511a07b2833a1eff7386b89a4da9420ac4a7cdd
|
6170c2910100719e3d8ead61a93b44de48a1cf14
|
refs/heads/master
| 2020-05-20T08:19:28.800661
| 2013-05-27T08:11:24
| 2013-05-27T08:11:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,742
|
py
|
import libtcodpy as libtcod
from framework.gameitem import GameItem
from framework.map import Map
from framework.messenger import Messenger
from framework.fov import FieldOfView
from framework.components.position_component import *
from framework.components.mapref import *
from framework.ui.menu import Menu
from framework.ui.textbox import TextBox
from framework.ui.statbar import StatBar
from components.race import *
from util.race_factory import *
from screens.gamescreen import *
from generators.saloon import *
class RaceScreen:
def __init__(self, parent):
self.parent = parent
self.textbox = TextBox('', 3, 42, 74, 5, self.parent)
self.factory = RaceFactory()
self.menu = Menu('', self.factory.races, 3, 3, 15, self.parent)
self.power_bar = StatBar(57, 4, 20, 'Power', 7, 10, self.parent, libtcod.dark_red, libtcod.gray)
self.agility_bar = StatBar(57, 7, 20, 'Agility', 4, 10, self.parent, libtcod.darker_sea, libtcod.gray)
self.mind_bar = StatBar(57, 10, 20, 'Mind', 5, 10, self.parent, libtcod.orange, libtcod.gray)
self.speed_bar = StatBar(57, 13, 20, 'Speed', 5, 10, self.parent, libtcod.light_blue, libtcod.gray)
def render(self):
current = self.menu.selected_item()
self.textbox.set_text(current.description)
stats = self.factory.get_stats_for_race(current.race)
self.power_bar.set_values(stats.power, 10)
self.agility_bar.set_values(stats.agility, 10)
self.mind_bar.set_values(stats.mind, 10)
self.speed_bar.set_values(stats.speed, 10)
self.textbox.render()
self.power_bar.render()
self.agility_bar.render()
self.mind_bar.render()
self.speed_bar.render()
self.menu.render()
def handle_keys(self, key):
if key == libtcod.KEY_ESCAPE:
self.parent.pop_screen()
elif key == libtcod.KEY_UP:
self.menu.prev_item()
elif key == libtcod.KEY_DOWN:
self.menu.next_item()
elif key == libtcod.KEY_ENTER:
player = GameItem()
race = self.menu.selected_item()
stats = self.factory.get_stats_for_race(race.race)
player.add_component(race)
player.add_component(stats)
self._do_mapgen(player)
self.parent.pop_screen()
self.parent.push_screen(GameScreen(self.gamemap, player, self.parent))
player.add_component(MapReferenceComponent(self.gamemap))
return 'turn-taken'
def _do_mapgen(self, player):
pos = PositionComponent(25, 1)
player.add_component(pos)
gen = SaloonGenerator()
self.gamemap = gen.generate_top_floor(player)
|
[
"contact.rinzler@gmail.com"
] |
contact.rinzler@gmail.com
|
ec01171043887447e75a06730d92d479e2e43f8b
|
ba90fbcb5ccb008193a130aeca2c56ab34bb6e80
|
/Week07/37.解数独.py
|
db9b7e5480006a27bb4b6445678cd9f098647a7f
|
[] |
no_license
|
rfhklwt/algorithm010
|
b477c4672d759ab402445289d7331c8aad8b3e6a
|
95b1ab105316d9706121841b843e1c638a5ab700
|
refs/heads/master
| 2023-01-31T06:43:32.383761
| 2020-12-08T06:58:06
| 2020-12-08T06:58:06
| 273,680,370
| 3
| 1
| null | 2020-06-20T09:45:55
| 2020-06-20T09:45:55
| null |
UTF-8
|
Python
| false
| false
| 1,696
|
py
|
#
# @lc app=leetcode.cn id=37 lang=python3
#
# [37] 解数独
#
# @lc code=start
class Solution:
def solveSudoku(self, board: List[List[str]]) -> None:
row, col, box = collections.defaultdict(set), collections.defaultdict(
set), collections.defaultdict(set)
seen = collections.deque()
# 首先把所有的row, col, box和seen都初始化好, 待填入的空格加入到seen中
for i in range(9):
for j in range(9):
if board[i][j] == '.':
seen.append((i, j))
else:
row[i].add(board[i][j])
col[j].add(board[i][j])
box[(i // 3, j // 3)].add(board[i][j])
# 深度优先搜索
def dfs():
# Terminator
if not seen:
return True
r, c = seen[0]
t = (r // 3, c // 3)
for num in {'1', '2', '3', '4', '5', '6', '7', '8', '9'}:
# 不造成冲突
if num not in row[r] and num not in col[c] and num not in box[t]:
board[r][c] = num
row[r].add(num)
col[c].add(num)
box[t].add(num)
seen.popleft()
if dfs():
return True
else:
# 回溯,撤回操作
board[r][c] = '.'
row[r].discard(num)
col[c].discard(num)
box[t].discard(num)
seen.appendleft((r, c))
return False
dfs()
# @lc code=end
|
[
"rfhklwt@163.com"
] |
rfhklwt@163.com
|
b9533ae22c6a70939b28441379420cc7a1b533ae
|
e98e7b45d85273797cf9f15e92fbe685a05bde18
|
/词条导出/zhidao_cron.py
|
19c1cf5746d7e05af4e57c436af8f87088d3a9f0
|
[] |
no_license
|
wangdexinpython/test
|
8d29d30e099f64f831b51265db7092d520df253c
|
a047148409e31b8a8140f2c13b959aa54ec14d0d
|
refs/heads/master
| 2020-09-11T05:10:49.041795
| 2019-12-31T07:47:41
| 2019-12-31T07:47:41
| 221,948,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,004
|
py
|
#coding=utf-8
import pymongo,time,requests,json
import urllib.parse
import redis,pexpect,os
class zhidao(object):
def __init__():
mongo=mongodb()
mon_app=app_mongo()
def mongodb():
mongo = pymongo.MongoClient(
"mongodb://xhql:" + urllib.parse.quote_plus("xhql_190228_snv738J72*fjVNv8220aiVK9V820@_")+"@172.26.26.132:20388/webpage")['webpage']
return mongo
def app_mongo():
mon = pymongo.MongoClient("mongodb://integrate:" + urllib.parse.quote_plus(
"integ_190228_snv738v8220aiVK9V820@_eate") + "@172.26.26.132:20388/integrate")
return mon
def Baike():
webnum = mongo.zhidao_details.find({'state_qiu':0,'source':'baiduzhidao'}).count()
print(webnum)
if webnum>0:
filetime = time.strftime("%Y%m%d", time.localtime())
filename = 'inc_zhidao_{}.dat'.format(filetime)
# filename = 'inc_zhidao_20190527.dat'
f = open(r'/mnt/data/liqiu/zhidao/{}'.format(filename),'a',encoding='utf-8')
for i in range(0,webnum,10000):
print('*****************************************',i)
# filetime = time.strftime("%Y%m%d_%H%M%S", time.localtime())
# filename = 'full_{}.dat'.format(filetime)
# f = open(r'/mnt/data/liqiu/{}'.format(filename),'a',encoding='utf-8')
zds = mongo.zhidao_details.find({'state_qiu':0,'source':'baiduzhidao'}).limit(10000).skip(i)
for one in zds:
try:
liqiu_dict = {'id':str(one['id']),'link':str(one['id']),'title':str(one['title']),'author':str(one['author']),'content':str(one['content_np']),'site_name':str(one['site_name']),'article_url':str(one['article_url']),'crawl_time':str(one['crawl_time']),'source':str(one['source']),'topic':'','flag':'0'}
if one.get('type',[]) and isinstance(one['type'],list):
liqiu_dict['type']=' '.join(one['type'])
elif one.get('type','') and isinstance(one['type'],str):
liqiu_dict['type']= one['type']
else:
liqiu_dict['type']=''
if one.get('label',[]) and isinstance(one['label'],list):
liqiu_dict['label']=' '.join(one['label'])
elif one.get('label',"") and isinstance(one['label'],str):
liqiu_dict['label']= one['label']
else:
liqiu_dict['label']=''
# if len(liqiu_dict)==0:
# continue
cons = liqiu_dict['content']
url = 'http://172.26.26.135:8995/topic?content={}'.format(cons)
ai = requests.get(url).text
print(ai)
if ai == 'AI':
ai = 'ai'
else:
ai = ''
liqiu_dict['topic'] = ai
read_dat(liqiu_dict)
f.write('{}\n'.format(json.dumps(liqiu_dict,ensure_ascii=False)))
s1={'id':one['id']}
s2 = {'$set':{'state_qiu':1}}
mongo.zhidao_details.update(s1,s2)
except KeyError as e:
print('异常')
print('---------------------------',e)
# continue
# f.write('{}\n'.format(json.dumps(liqiu_dict,ensure_ascii=False)))
def read_dat(,line):
if line['topic'] == 'ai':
dict_1 = {'id': line['id'], 'content': line['content'], 'crawl_time': line['crawl_time'],
'title': line['title'], 'source': line['source'], 'topic': line['topic'], 'type': line['type'],
'url': line['article_url']}
try:
dict_1['label'] = line['label']
except:
dict_1['label'] = ''
# print(dict_1)
mon_app.integrate.data_dat.update({'id': dict_1['id']}, dict_1, True)
def copy_data():
fileti = time.strftime("%H%M%S", time.localtime())
if int(fileti) > 230000:
# 判断文件是否为空
filetime = time.strftime("%Y%m%d", time.localtime())
filename = 'inc_zhidao_{}.dat'.format(filetime)
file2 = '/mnt/data/liqiu/zhidao/{}'.format(filename)
if os.path.getsize('{}'.format(file2)):
# 将写好的文件scp到指定文件夹下
cmd = "scp -r {} root@172.26.26.133:/home/search/ytt/search1/raw_data/src_data/".format(file2)
pexpect.run(cmd)
else:
pass
def run():
Baike()
copy_data()
if __name__ == '__main__':
zhi=zhidao()
zhi.run()
|
[
"wangdexin@haxitag.com"
] |
wangdexin@haxitag.com
|
9b2e4b3ea21b2772a92f3f85240535e5f585ef8d
|
0c416454274cb4bd17e528995d282033353bc0ce
|
/flaskTEST/blog_test/src/blog_test.py
|
2340bdd473a6aababc05b20f12824bc6b120e755
|
[] |
no_license
|
LeeJeongHwi/Clearfile
|
caa8f43f40d6da5505e60a5f4c213a2aa44b38b3
|
e1b5252bb2cc190f66a0c0a5f605c4974675c985
|
refs/heads/master
| 2023-03-03T12:42:36.443317
| 2021-02-17T05:08:51
| 2021-02-17T05:08:51
| 292,501,474
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 137
|
py
|
from flask import Blueprint
blog_ab = Blueprint('blog',__name__)
@blog_ab.route('/blog1')
def blog():
return "TEST Blueprint"
|
[
"wjdgnl97@gmail.com"
] |
wjdgnl97@gmail.com
|
d0334ca4f66f3270093addbb8aca6c7308fca438
|
dcae94ae7acc29942bc9ba2397db5acb1de43ba2
|
/Mol2Vec/training.py
|
53f708b2d2f5c6322fcf66a56ed254e73f22d4c9
|
[] |
no_license
|
hcji/GNN_CCS
|
cf6fa254f77c133c315c1635eee48d4845fba873
|
162ed46a28ad2112902bdc0bc869df3effbc5232
|
refs/heads/master
| 2020-09-21T09:51:03.878010
| 2020-01-05T07:50:24
| 2020-01-05T07:50:24
| 224,758,253
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,745
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 28 09:07:16 2019
@author: hcji
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from rdkit import Chem
from rdkit.Chem import AllChem
from gensim.models import word2vec
from mol2vec.features import mol2alt_sentence, mol2sentence, MolSentence, DfVec, sentences2vec
from mol2vec.helpers import depict_identifier, plot_2D_vectors, IdentifierTable, mol_to_svg
'''
# if only use CPU
from keras import backend as K
import tensorflow as tf
num_cores = 4
config = tf.ConfigProto(intra_op_parallelism_threads=num_cores,
inter_op_parallelism_threads=num_cores,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
session = tf.Session(config=config)
K.set_session(session)
'''
def split_dataset(dataset, ratio):
# one SMILES should not be in both train and test dataset
smiles = dataset['SMILES']
smiles_unique = np.unique(smiles)
np.random.shuffle(smiles_unique)
n = int(ratio * len(smiles_unique))
train_smiles, test_smiles = smiles_unique[n:], smiles_unique[:n]
train_index = np.where([i in train_smiles for i in smiles])[0]
test_index = np.where([i in test_smiles for i in smiles])[0]
dataset_1 = dataset.loc[train_index,:]
dataset_2 = dataset.loc[test_index,:]
return dataset_1, dataset_2
if __name__ == "__main__":
from keras.models import Model, load_model
from keras.layers import Dense, Input
from keras import metrics, optimizers
from keras.callbacks.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from sklearn.metrics import r2_score, mean_absolute_error, median_absolute_error
dataset = pd.read_csv('Data/data.csv')
train_set, test_set = split_dataset(dataset, 0.1)
model = word2vec.Word2Vec.load('Mol2Vec/pretrain/model_300dim.pkl')
train_mol = [Chem.MolFromSmiles(x) for x in train_set['SMILES']]
test_mol = [Chem.MolFromSmiles(x) for x in test_set['SMILES']]
train_sent = [mol2alt_sentence(x, 1) for x in train_mol]
test_sent = [mol2alt_sentence(x, 1) for x in test_mol]
train_vec = [DfVec(x).vec for x in sentences2vec(train_sent, model, unseen='UNK')]
test_vec = [DfVec(x).vec for x in sentences2vec(test_sent, model, unseen='UNK')]
train_vec = np.array(train_vec)
test_vec = np.array(test_vec)
# train model
layer_in = Input(shape=(train_vec.shape[1],))
layer_dense = layer_in
n_nodes = 32
for j in range(3):
layer_dense = Dense(int(n_nodes), activation="relu")(layer_dense)
layer_output = Dense(1, activation="linear")(layer_dense)
opt = optimizers.Adam(lr=0.001)
model = Model(layer_in, layer_output)
model.compile(optimizer=opt, loss='mse', metrics=['mae'])
# call back
earlyStopping = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='min')
mcp_save = ModelCheckpoint('Output/Mol2Vec/model.h5', save_best_only=True, monitor='val_loss', mode='min')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1, epsilon=1e-4, mode='min')
# fit model
model.fit(train_vec, train_set['CCS'], epochs=50, callbacks=[earlyStopping, mcp_save, reduce_lr_loss], validation_split=0.11)
# test
model = load_model('Output/Mol2Vec/model.h5')
predictions = model.predict(test_vec)[:,0]
r2 = r2_score(y_true=test_set['CCS'], y_pred=predictions)
mae = mean_absolute_error(y_true=test_set['CCS'], y_pred=predictions)
rmae = np.mean(np.abs(predictions - test_set['CCS']) / test_set['CCS']) * 100
|
[
"ji.hongchao@foxmail.com"
] |
ji.hongchao@foxmail.com
|
c1e01951730aad75c8357380fa62d2d070b9289b
|
b17fa84483067f5e208083cbaaa2b8e7fcd4b34e
|
/SNIa_yields/fe56.py
|
388354ec6848f49bcf946832f5c0f41254b8390a
|
[] |
no_license
|
AtsuhiroYoshida1121/Galactic_Chemical_Evolution_code
|
931f0c4ff77ccd8b67e3770dc1a5b7d0a96eb492
|
bf6461105e56660e8369f0cb2596e36df17b1cc1
|
refs/heads/master
| 2023-05-27T22:47:52.926878
| 2021-06-04T15:05:56
| 2021-06-04T15:05:56
| 373,835,660
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 45
|
py
|
### SNe Ia Yields
Yfe56_Ia = 6.1 * 10**(-1)
|
[
"atsuhiro.yoshida.coding@gmail.com"
] |
atsuhiro.yoshida.coding@gmail.com
|
57f5d60066b3bf10c2c40f5c873bcf71fce7ad8d
|
fe2d0d087c424ce1f85dd759599492fe8303f9da
|
/venv/bin/pip3.6
|
a0650b0003bf3a155c1f7a9ff61079ea4a0da52c
|
[] |
no_license
|
Oswaldgerald/Django_todo_app
|
e365f18b66e47b0bb95dd4fd319e6061fc91181a
|
6e9b3ce2aaac0802a6cb3d3fee9042aafa4d0547
|
refs/heads/master
| 2020-03-20T07:03:51.950578
| 2018-06-13T20:59:22
| 2018-06-13T20:59:22
| 137,269,664
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
6
|
#!/home/oswald/LUC/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3.6'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3.6')()
)
|
[
"moswaldgerald@gmail.com"
] |
moswaldgerald@gmail.com
|
7266af66a4441f60e098f104548caa8d49ca505b
|
ab354fb6d3f8ef55675ff0263008c90e2df92fdf
|
/docs/conf.py
|
bdebc01f5d9ee09ddcb043483ed8c8fb14cf7cbe
|
[
"MIT"
] |
permissive
|
michhar/Machine-Learning-Workstreams
|
7164b99c52dbc1839af97ac792d9f3a940a7ab1a
|
dba91e4af39006ae1705f48d8cda8014b560a5b3
|
refs/heads/master
| 2020-04-25T01:01:33.606219
| 2019-02-27T19:49:47
| 2019-02-27T19:49:47
| 172,396,530
| 1
| 0
|
MIT
| 2019-02-27T19:49:49
| 2019-02-24T22:12:16
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,717
|
py
|
# -*- coding: utf-8 -*-
#
# tmp documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tmp'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tmpdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'tmp.tex',
u'tmp Documentation',
u"Microsoft", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tmp', u'tmp Documentation',
[u"Microsoft"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tmp', u'tmp Documentation',
u"Microsoft", 'tmp',
'Machine learning pipeline for computer vision.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
[
"wonderwoman@sunshine.y2sjhm1g5c5unmyydmwdugvcwa.xx.internal.cloudapp.net"
] |
wonderwoman@sunshine.y2sjhm1g5c5unmyydmwdugvcwa.xx.internal.cloudapp.net
|
386b6225ab701ab34212b094d2ac59900e42fca0
|
1d8cd778801472d86eee11771a39427428c0918e
|
/accounts/managers.py
|
94158b6d7362cdfe783f08943af658895de0639d
|
[
"MIT"
] |
permissive
|
geotester/insightful
|
c2e67056d5d7a6a5beb7316b1f6160af73c8a5f2
|
6fe539b67e8c71963ae5c54256a242400a4bca77
|
refs/heads/master
| 2020-12-25T00:04:35.378437
| 2015-05-07T15:31:02
| 2015-05-07T15:31:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 903
|
py
|
from django.contrib.auth.models import BaseUserManager
from django.utils import timezone
class EmailUserManager(BaseUserManager):
def _create_user(self, email, password, **extra_fields):
"""
Creates and saves a User with the given email (as username) and password.
"""
now = timezone.now()
if not email:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(email=email, is_active=True, last_login=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email=None, password=None, **extra_fields):
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, **extra_fields)
|
[
"kosir.jakob@gmail.com"
] |
kosir.jakob@gmail.com
|
a7c319881e687dfab5ee7a590ab574be2335d583
|
4ee76e6b7e559199af56153dff34a92a8619eca7
|
/doc_index_row.py
|
f1320cbf6029eab32a60b7332465859fa2355344
|
[] |
no_license
|
HafizAhmadHassan/Search-Engine
|
e5b730eda38e76d21ebb2b27f14d10c6e3126105
|
615f0dd47133b317914ee193413c5f8dc0a00971
|
refs/heads/master
| 2021-06-26T07:18:34.448136
| 2020-10-29T12:37:01
| 2020-10-29T12:37:01
| 169,804,754
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
class doc_index_row:
def __init__(self, doc_id, term_id, positions):
self.doc_id = doc_id
self.term_id = term_id
self.positions = positions
|
[
"noreply@github.com"
] |
HafizAhmadHassan.noreply@github.com
|
2accca9f6f9faa8ddee34ac80d0ac239063a2bde
|
c6f343972cee0ba9ceb7ff5181cf13c758babfd0
|
/recognition/Human_Detection.py
|
042086fdf8abcbfca60668b3ee79b7c4a4531acb
|
[] |
no_license
|
timcsy/AntiCoV
|
9da9916a27c95adb9b341dd43cdab51fcffdd74c
|
e162ce8761abafd41ef3272961e1cb1324ff80f9
|
refs/heads/master
| 2023-01-12T01:42:00.651015
| 2020-04-21T09:28:14
| 2020-04-21T09:28:14
| 251,413,908
| 1
| 5
| null | 2023-01-09T23:15:13
| 2020-03-30T19:55:47
|
HTML
|
UTF-8
|
Python
| false
| false
| 5,500
|
py
|
import cv2
from imutils.object_detection import non_max_suppression
import numpy as np
import imutils
from imutils import paths
import requests
import time
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
# cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
# cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
#print(cv2. __version__)
# help(cv2.HOGDescriptor().detectMultiScale)
TSH = 10
HinP = 185
def detectPic():
state = 0
# print(type(paths.list_images("D:\\ADMIN\\Desktop\\NCKU\\SOFTWARE ENGINEERING\\AI\\Human_Detection\\VideoCapture")))
for imagePath in sorted(paths.list_images("D:\\ADMIN\\Desktop\\NCKU\\SOFTWARE ENGINEERING\\AI\\Human_Detection\\VC")):
print(imagePath)
frame = cv2.imread(imagePath)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = cv2.GaussianBlur(frame, (5, 5), 3)
frame = imutils.resize(frame, width=min(400, frame.shape[1]))
(rects, weights) = hog.detectMultiScale(frame, winStride=(2, 2), padding=(8, 8), scale= 1.5)
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.01)
if len(pick) > 1:
continue
prestate = state
for (xA, yA, xB, yB) in pick:
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
if yB - yA - (HinP - TSH) < 0:
state = 1
elif abs(yB - yA - HinP) <=TSH:
state = 2
elif yB - yA - (HinP + TSH) > 0:
state = 3
else:
state = 0
# if yB - yA - 170 < 0:
# state = 1
# elif abs(yB - yA - 180) <=10:
# state = 2
# elif yB - yA - 190 > 0:
# state = 3
# else:
# state = 0
# font = cv2.FONT_HERSHEY_SIMPLEX
# cv2.putText(frame, 'state = ' + str(state), (150,150), font, 1, (255, 0, 0), 5, cv2.LINE_AA)
# print(yB - yA)
# 143+-5
# 192+-5
# 250+-5
if state == 3 and prestate == 2:
S = "In"
my_data = {"status": "enter", "number": 1}
r = requests.post('https://anticov.tew.tw/api/v1/pass', data = my_data)
print(r.status_code)
elif state == 1 and prestate == 2:
S = "Out"
my_data = {"status": "exit", "number":1}
r = requests.post('https://anticov.tew.tw/api/v1/pass', data = my_data)
print(r.status_code)
else:
S = ""
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, S, (150,150), font, 1, (255, 0, 0), 5, cv2.LINE_AA)
cv2.imshow("frame", frame)
time.sleep(0.25)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def real_time():
state = 0
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
cap.set(cv2.CAP_PROP_FPS, 60)
a = 1
while(True):
ret, frame = cap.read(0)
# if cv2.waitKey(1) & 0xFF == ord('s'):
# print("frame capturated")
# cv2.imwrite('D:\\ADMIN\\Desktop\\NCKU\\SOFTWARE ENGINEERING\\AI\\Human_Detection\\VideoCapture\\'+ str(a) + '.jpg', frame)
# a += 1
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = cv2.GaussianBlur(frame, (5, 5), 3)
frame = imutils.resize(frame, width = min(400, frame.shape[1]))
(rects, weights) = hog.detectMultiScale(frame, winStride=(2, 2), padding=(8, 8), scale= 1.5)
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.01)#0.65)
if len(pick) > 1:
continue
prestate = state
for (xA, yA, xB, yB) in pick:
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
if yB - yA - (HinP - TSH) < 0:
state = 1
elif abs(yB - yA - HinP) <=TSH:
state = 2
elif yB - yA - (HinP + TSH) > 0:
state = 3
else:
state = 0
# font = cv2.FONT_HERSHEY_SIMPLEX
# cv2.putText(frame, 'state = ' + str(state), (150,150), font, 1, (255, 0, 0), 5, cv2.LINE_AA)
S = ""
if state == 3 and prestate == 2:
S = "In"
my_data = {"status": "enter", "number":1}
r = requests.post('https://anticov.tew.tw/api/v1/pass', data = my_data)
print(r.status_code)
elif state == 1 and prestate == 2:
S = "Out"
my_data = {"status": "exit", "number":1}
r = requests.post('https://anticov.tew.tw/api/v1/pass', data = my_data)
print(r.status_code)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, S, (150,150), font, 2, (255, 0, 0), 5, cv2.LINE_AA)
# frame = cv2.resize(frame, (1920, 1080), interpolation = cv2.INTER_CUBIC)
cv2.imshow('frame', frame)
# cv2.imshow('orig', orig)
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# font = cv2.FONT_HERSHEY_SIMPLEX
# cv2.putText(gray, 'Hello World', (150,150), font, 1, (255, 0, 0), 5, cv2.LINE_AA)
# gray = cv2.GaussianBlur(gray, (5, 5), 0)
# edged = cv2.Canny(gray, 35, 125)
# cv2.imshow("edged", edged)
# cnts = cv2.findContours(edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# cnts = imutils.grab_contours(cnts)
# print("cnts: ", type(cnts))
# c = max(cnts, key = cv2.contourArea)
# print("c: ", type(c))
# cv2.imshow('gray', edged)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# if cv2.waitKey(1) & 0xFF == ord('s'):
# print("frame capturated")
# cv2.imwrite('D:\\ADMIN\\Desktop\\NCKU\\SOFTWARE ENGINEERING\\AI\\Human_Detection\\VideoCapture\\'+ str(a) + '.jpg', frame)
# a += 1
'''
my_data = {"status": "enter", "number":1}# or "exit"
# login = {"username":"admin", "password":"AntiCoV"}
# r = requests.post('https://anticov.tew.tw/api/v1/login', data = login)
# print(type(r.text))
# my_header = {"Authorization": r.text}
r = requests.post('https://anticov.tew.tw/api/v1/pass', data = my_data)
print(r.status_code)
'''
cap.release()
cv2.destroyAllWindows()
# real_time()
detectPic()
|
[
"theherooftheword@gmail.com"
] |
theherooftheword@gmail.com
|
d9224273dd6c5f66a0cd97d9709405147df468f2
|
85b896a8c16c59873e7d6f39f8d94d578c7f918f
|
/report_creator.py
|
2dd6c37a504ff0c7cf2a52658bfaba4aad8ef290
|
[] |
no_license
|
PatrykStronski/rfid-reader
|
70f389736b9f040aa4552535fd7f0fb271c64323
|
423df977907fa4372915391b1fb2d2900a1350b8
|
refs/heads/master
| 2021-05-21T22:11:40.224420
| 2020-05-29T12:22:02
| 2020-05-29T12:22:02
| 252,823,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,683
|
py
|
from datetime import datetime
def create_report(dates):
report={}
for ldate in dates:
sdate = datetime.utcfromtimestamp(ldate[1]).strftime('%Y-%m-%d')
if sdate in report.keys():
report[sdate]['logs'].append({'time': datetime.utcfromtimestamp(ldate[1]).strftime('%Y-%m-%dT%H:%M:%S'), 'unix': ldate[1]})
in_unix = report[sdate]['in']['unix']
if in_unix > ldate[1]:
report[sdate]['out'] = report[sdate]['in']
report[sdate]['in'] = {'time': datetime.utcfromtimestamp(ldate[1]).strftime('%Y-%m-%dT%H:%M:%S'), 'unix': ldate[1]}
report[sdate]['time_in_hours'] = (report[sdate]['out']['unix'] - report[sdate]['in']['unix']) / 3600.0
else:
if 'out' in report[sdate].keys():
if in_unix < ldate[1]:
report[sdate]['out'] = {'time': datetime.utcfromtimestamp(ldate[1]).strftime('%Y-%m-%dT%H:%M:%S'), 'unix': ldate[1]}
report[sdate]['time_in_hours'] = (report[sdate]['out']['unix'] - report[sdate]['in']['unix']) / 3600.0
else:
report[sdate]['out'] = {'time': datetime.utcfromtimestamp(ldate[1]).strftime('%Y-%m-%dT%H:%M:%S'), 'unix': ldate[1]}
report[sdate]['time_in_hours'] = (report[sdate]['out']['unix'] - report[sdate]['in']['unix']) / 3600.0
else:
report[sdate] = {'in': {'time': datetime.utcfromtimestamp(ldate[1]).strftime('%Y-%m-%dT%H:%M:%S'), 'unix': ldate[1]},'out': {},'logs': [{'time': datetime.utcfromtimestamp(ldate[1]).strftime('%Y-%m-%dT%H:%M:%S'), 'unix': ldate[1]}]}
return report
|
[
"p.stronski13@gmail.com"
] |
p.stronski13@gmail.com
|
2bcc466d23cbf42a4ae399316961e5e59eaa21b8
|
12ecea0448af1a51e14a0e2190dc941bd57ae4ea
|
/app.py
|
655cdbbbf7df438454a2e108d4727efc729b9598
|
[
"Apache-2.0"
] |
permissive
|
PoweredByPeople/blog-public
|
9dd8727c254497860d69909122799c410374acab
|
ce8300482cced6c22ca472fde3957c6bad96639f
|
refs/heads/master
| 2022-12-12T02:42:10.853205
| 2018-08-29T21:39:43
| 2018-08-29T21:39:43
| 145,139,389
| 1
| 0
|
Apache-2.0
| 2022-12-08T00:45:51
| 2018-08-17T15:50:19
|
Python
|
UTF-8
|
Python
| false
| false
| 250
|
py
|
from app import create_app, db, cli
from app.models import User, Post
app = create_app()
cli.register(app)
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'User': User, 'Post': Post}
app.run(host='0.0.0.0', port=80)
|
[
"louis@poweredbypeople.me"
] |
louis@poweredbypeople.me
|
e10af5e2c163747d517e5642f2a05c490fc83474
|
82ebd76b5c140345661afb85d5615bffbda7c113
|
/twitter/tweet/migrations/0008_like.py
|
78d4ecc489aba6809eaf6be65027148df8031a8d
|
[] |
no_license
|
Sythrill/Python_Twitter
|
a0bafef750544fe441c03a3f215616ec1996aff1
|
23afb375723fc58daff210b9f45c0c0caecd1bbe
|
refs/heads/master
| 2020-04-13T16:31:54.536640
| 2019-02-07T18:29:20
| 2019-02-07T18:29:20
| 163,323,322
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
# Generated by Django 2.0.3 on 2019-01-08 18:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tweet', '0007_personalmessage'),
]
operations = [
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('picture', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tweet.Comment')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"ksciwiarska@gmail.com"
] |
ksciwiarska@gmail.com
|
c69c64d15e9879c0c3e8bb12dc4086d660d80025
|
601443d21d3c9f9121e6aec76e2ad012ec4a7817
|
/arekit/contrib/utils/pipelines/text_opinion/filters/distance_based.py
|
ba536accb1dcf557dfb01ce0bdd5f75bd5db3952
|
[
"MIT"
] |
permissive
|
nicolay-r/AREkit
|
27421472ca296671a6da69a94c1070a0b5a33451
|
1e1d354654f4f0a72090504663cc6d218f6aaf4a
|
refs/heads/master
| 2023-08-29T13:30:26.511617
| 2023-08-13T20:11:43
| 2023-08-13T20:11:43
| 225,708,027
| 54
| 4
|
MIT
| 2023-01-18T13:17:01
| 2019-12-03T20:20:46
|
Python
|
UTF-8
|
Python
| false
| false
| 650
|
py
|
from arekit.common.data.input.sample import InputSampleBase
from arekit.contrib.utils.pipelines.text_opinion.filters.base import TextOpinionFilter
class DistanceLimitedTextOpinionFilter(TextOpinionFilter):
def __init__(self, terms_per_context):
super(DistanceLimitedTextOpinionFilter, self).__init__()
self.__terms_per_context = terms_per_context
def filter(self, text_opinion, parsed_news, entity_service_provider):
return InputSampleBase.check_ability_to_create_sample(
entity_service=entity_service_provider,
text_opinion=text_opinion,
window_size=self.__terms_per_context)
|
[
"kolyarus@yandex.ru"
] |
kolyarus@yandex.ru
|
2ecd3524748bf1ba8190ba379743a7d332c8ecfa
|
5bfe7d76697b8e384edd0d235dadf5d1f0a3c092
|
/parallel_run.py
|
9d502deff62739a1474a05810dfec1d21c7769a5
|
[] |
no_license
|
7andrew7/parallel_run
|
099c79a92c68cd0c765899897f55f687b7172a67
|
1daef87c29edfca9bfaa048eba204b2ce50cd4a8
|
refs/heads/master
| 2021-01-19T11:17:41.494386
| 2015-01-13T23:04:33
| 2015-01-13T23:04:33
| 29,216,103
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,090
|
py
|
#!/usr/bin/env python
"""Cheesy script to execute a given program in parallel.
Example usage: parallel_run.py -n 32 -f output_prefix sleep 1
"""
import argparse
import multiprocessing
import subprocess
import sys
import threading
# Shady global variable required because Python 2.7 can't access outer scope
# from within a function.
remaining = None
def parallel_run(argv, count, num_threads=None, file_prefix=None):
"""Execute a program a given number of times.
Note that I tried to use multiprocess.Pool, but it has a bug that blocks ctrl-c
on Python 2.7. Sorrow.
http://bugs.python.org/issue8296
"""
global remaining
remaining = count
cv = threading.Condition()
def run_loop():
global remaining
out = None
while True:
with cv:
remaining -= 1
if remaining < 0:
return
i = remaining
if file_prefix:
out = open('%s.%d' % (file_prefix, i), 'w+')
subprocess.check_call(argv, stdout=out)
if out:
out.close()
if num_threads is None:
num_threads = min(count, multiprocessing.cpu_count())
print 'Running %d invocations across %d threads, each executing "%s"' % (
count, num_threads,' '.join(argv))
thds = [threading.Thread(target=run_loop) for i in range(num_threads)]
for t in thds:
t.start()
for t in thds:
t.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Repeated execute a program N times.')
parser.add_argument('-n', '--repeats', type=int, help='Number of executions', default=16)
parser.add_argument('-f', '--prefix', type=str, help='File prefix', default=None)
parser.add_argument('-t', '--threads', type=int, help='Number of threads', default=None)
parser.add_argument('argv', help='Program arguments.', nargs='+')
args = parser.parse_args()
parallel_run(args.argv, args.repeats, num_threads=args.threads, file_prefix=args.prefix)
|
[
"whitaker@cs.washington.edu"
] |
whitaker@cs.washington.edu
|
ac87acb8415da4d0cb503936a1e147298354e249
|
a8de23160612e326589fc82d05cc7858b6f010fc
|
/challenge-72/Solution.py
|
51f96f7c60b21a79be7432477ea8c23c2384d29a
|
[] |
no_license
|
avnermaman/Python-Daily-Challenges
|
0081b0906e96a8f3cfd38dcc68e2bf4253bda221
|
4730e7ffda7bf63664289186783c4544254f06a8
|
refs/heads/master
| 2020-08-11T03:44:50.798476
| 2018-07-17T07:10:46
| 2018-07-17T07:10:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
import math
def bin_search(li, element):
bottom = 0
top = len(li)-1
index = -1
while top>=bottom and index==-1:
mid = int(math.floor((top+bottom)/2.0))
if li[mid]==element:
index = mid
elif li[mid]>element:
top = mid-1
else:
bottom = mid+1
return index
li=[2,5,7,9,11,17,222]
print bin_search(li,11)
print bin_search(li,12)
|
[
"l-goddard@protonmail.com"
] |
l-goddard@protonmail.com
|
dbec04b6c60de9b52cc07c751b39d43921e836f0
|
cc536e94041f0c59dc5d51cf28276f5eeda19f31
|
/blog/models.py
|
32caa8cc20dd9fc53367ae248a3d6690a991f0a8
|
[] |
no_license
|
akalwar/blog
|
93351cc7eb1bdf52a05741ba3c30b3529b60eceb
|
aca2c8758b561d48fb546b94df690231eb9cf6ac
|
refs/heads/master
| 2020-04-13T04:33:29.773288
| 2018-12-24T08:10:45
| 2018-12-24T08:10:45
| 162,965,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
from django.db import models
from django.urls import reverse
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=200)
author = models.ForeignKey(
'auth.User',
on_delete=models.CASCADE,
)
body = models.TextField()
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post_details', args=[str(self.id)])
|
[
"abhishekkalwar835@gmail.com"
] |
abhishekkalwar835@gmail.com
|
7949305af01ab9d6fcdfe9de4f357fd728116194
|
efa8c3643037847447a818e224e1287958af6dc7
|
/plots/pdrp.py
|
fa8b463febd54e48e78fceb3653e19c1b7eb2460
|
[
"Apache-2.0"
] |
permissive
|
RyanCargan/outreach-material
|
cc072f0b660baba9735a2eddd7f365f4ce160fad
|
d7fbf7f39dc82b31f562c5171a9853cf8c1c2f40
|
refs/heads/master
| 2023-09-01T16:09:13.875867
| 2021-10-16T23:11:10
| 2021-10-16T23:11:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,319
|
py
|
"""Plot capacity factors."""
import os
from operator import itemgetter
import textwrap
import datetime
import yaml
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.dates import date2num
import matplotlib.dates as mdates
import matplotlib.patches as mpatches
from matplotlib.collections import PatchCollection
ISOFMT = "%Y-%M-%d"
def load(fname=os.path.join('..','data','pdrp.yaml')):
with open(fname) as f:
data = yaml.load(f)
return data["reactors"]
def dt(date):
"""convert date to datetime at midnight for easier plotting"""
return date2num(datetime.datetime(date.year, date.month, date.day))
colors = {
"BWR": "blue"
}
DOTWIDTH= 130
LINEHEIGHT = 0.1
STARTYEAR = 1951
ENDYEAR=1977
def plot(data, fname='power-demonstration-reactor-program.png'):
fig, ax = plt.subplots(figsize=(16,14))
bars = []
reactors = []
filteredRx = {}
for reactor, rxdata in data.items():
if "contracted" not in rxdata:
print(f"skipping {reactor}")
continue
filteredRx[reactor]=rxdata
# sort by first two available dates (often solicited/contracted)
for reactor, rxdata in sorted(
filteredRx.items(),
key = lambda kv: (kv[1].get("solicited", kv[1]["contracted"]), kv[1].get("contracted")),reverse=True):
print(f"adding {reactor}")
reactors.append(reactor)
patches = []
contracted = dt(rxdata["contracted"])
y = len(reactors)
if "solicited" in rxdata:
# planning period between solicitation and contract
solicited = dt(rxdata["solicited"])
patches.append(mpatches.Ellipse((solicited,y), DOTWIDTH, 0.25, facecolor="green", edgecolor="k",
lw=LINEHEIGHT, alpha=0.5))
bar = ax.barh(y, contracted-solicited, left=solicited, height=LINEHEIGHT, color="green", alpha=0.2)
start = solicited
else:
# well it was at least contracted
start=contracted
patches.append(mpatches.Ellipse((contracted,y), DOTWIDTH, 0.25, fill=False,
edgecolor="green",
lw=1.0, alpha=1.0))
#patches.append(mpatches.Ellipse((contracted,y), DOTWIDTH, 0.25, facecolor="green", edgecolor="k", lw=0.2))
ax.annotate(f'{reactor} {rxdata["type"]} in {rxdata["location"]} ({rxdata.get("MWe","-")} MW$_e$)',
xy=(start, y),
xytext=(5, 8),
textcoords="offset points",
ha='left', va='bottom', size=17)
if "ground broken" in rxdata:
# planning period between contract and groundbreaking
groundbroken = dt(rxdata["ground broken"])
#patches.append(mpatches.Ellipse((groundbroken,y), DOTWIDTH, 0.25, facecolor="brown",
#edgecolor="k", lw=LINEHEIGHT, alpha=1.0))
ax.barh(y, groundbroken-contracted, left=contracted, height=LINEHEIGHT, color="green", alpha=0.4)
# Between construction and critical
critical = dt(rxdata["critical"])
patches.append(mpatches.Ellipse((critical,y), DOTWIDTH, 0.25, facecolor="blue", edgecolor="k",
lw=LINEHEIGHT, alpha=0.5))
bar = ax.barh(y, critical-groundbroken, left=groundbroken, height=LINEHEIGHT, color="green", alpha=0.6)
# Between critical and commercial
fullpower = dt(rxdata["fullpower"])
#patches.append(mpatches.Ellipse((fullpower,y), DOTWIDTH, 0.25, facecolor="blue", edgecolor="k", lw=0.2, alpha=1.0))
bar = ax.barh(y, fullpower-critical, left=critical, height=LINEHEIGHT, color="green", alpha=0.8)
# Commercial operation to shutdown
shutdown = dt(rxdata["shutdown"])
patches.append(mpatches.Ellipse((shutdown,y), DOTWIDTH, 0.25, facecolor="k", edgecolor="k", lw=0.2))
ax.barh(y, shutdown-fullpower, left=fullpower, height=LINEHEIGHT, color="green", alpha=1.0)
if rxdata["shutdown"].year > ENDYEAR:
# add overflow label
labeltime = dt(datetime.datetime(ENDYEAR-2, 6, 1))
ax.annotate(f'{rxdata["shutdown"].year} →',
xy=(labeltime, y),
xytext=(5, 8),
textcoords="offset points",
ha='left', va='bottom', size=14)
# Do at the end so milestones are on top
for patch in patches:
ax.add_patch(patch)
# Make custom legend defining the milestone markers
legendElements = [
mpatches.Ellipse((0,0), DOTWIDTH, 0.25, facecolor="green", edgecolor="k", lw=LINEHEIGHT, alpha=0.5,
label="Solicited"),
mpatches.Ellipse((0,0), DOTWIDTH, 0.25, fill=False, edgecolor="green", lw=1.0, alpha=1.0, label="Contracted"),
mpatches.Ellipse((0,0), DOTWIDTH, 0.25, facecolor="blue", edgecolor="k", lw=LINEHEIGHT, alpha=0.5,
label="Critical"),
mpatches.Ellipse((0,0), DOTWIDTH, 0.25, facecolor="k", edgecolor="k", lw=0.2, label="Shutdown"),
]
ax.legend(handles=legendElements, fontsize=16)
ax.set_yticks(range(1,len(reactors)+1))
ax.get_yaxis().set_visible(False)
ax.xaxis_date()
# show each year on axis
ax.xaxis.set_major_locator(mdates.YearLocator(5))
ax.xaxis.set_minor_locator(mdates.YearLocator())
#ax.set_yticklabels(reactors)
plt.title("The Power Demonstration Reactor Program", fontsize=16)
#ax.set_ylim([0,900]) # make room for data label
ax.set_xlim([date2num(datetime.datetime(STARTYEAR,1,1)),
date2num(datetime.datetime(ENDYEAR,1,1))])
ax.xaxis.tick_top()
#ax.xaxis.set_label_position('bottom')
ax.tick_params(direction="in", labelsize=14)
# Manually squish the subplot to make room for labels
fig.subplots_adjust(bottom=0.05, top=0.95, left=0.05, right=0.95)
ann = ax.text(0.76, 0.06, '\n'.join(textwrap.wrap( "CC-BY-NC whatisnuclear.com",130)),
size=12, va="center", ha="left", transform=fig.transFigure,
alpha=0.7
)
if fname:
plt.savefig(fname)
else:
plt.show()
if __name__ == '__main__':
data = load()
plot(data)
|
[
"nick@partofthething.com"
] |
nick@partofthething.com
|
81d2d43d971b207b2dd0bcc44c97e8f6c0f921da
|
7f04fbc897ff52e4d27cc2f27ae6dfbabe43dfe0
|
/cellml/pmr2/tests/base.py
|
9100a7a3ffe800da9cdfd74b377716fd6c6545ab
|
[] |
no_license
|
metatoaster/cellml.pmr2
|
daae69721af04f7a28eae496dcbeb13b98e2d4d0
|
cbfe212effe325350b1e7087e6172952483b981f
|
refs/heads/master
| 2021-06-21T20:56:30.267128
| 2016-03-08T05:45:53
| 2016-03-08T05:45:53
| 2,396,487
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,105
|
py
|
import unittest
import doctest
from zope.component import testing
from Testing import ZopeTestCase as ztc
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import onsetup
from Products.PloneTestCase.layer import onteardown
from Products.Five import fiveconfigure
from Zope2.App import zcml
import pmr2.app
from pmr2.testing.base import TestRequest
from pmr2.app.exposure.content import ExposureContainer
from pmr2.app.exposure.browser.browser import ExposureAddForm
from pmr2.app.exposure.browser.browser import ExposureFileGenForm
from pmr2.app.exposure.tests.base import ExposureDocTestCase
from pmr2.app.exposure.tests.base import ExposureExtendedDocTestCase
@onsetup
def setup():
import pmr2.app
import cellml.pmr2
fiveconfigure.debug_mode = True
# XXX dependant on pmr2.app still
zcml.load_config('configure.zcml', cellml.pmr2)
zcml.load_config('test.zcml', cellml.pmr2.tests)
fiveconfigure.debug_mode = False
ztc.installPackage('cellml.pmr2')
@onteardown
def teardown():
pass
setup()
teardown()
ptc.setupPloneSite(products=('cellml.pmr2',))
class CellMLDocTestCase(ExposureExtendedDocTestCase):
def setUp(self):
super(CellMLDocTestCase, self).setUp()
import cellml.pmr2
rev = u'2'
request = TestRequest(
form={
'form.widgets.workspace': u'rdfmodel',
'form.widgets.commit_id': rev,
'form.buttons.add': 1,
})
testform = ExposureAddForm(self.portal.exposure, request)
testform.update()
exp_id = testform._data['id']
context = self.portal.exposure[exp_id]
self.exposure1 = context
rdfmodel = self.portal.workspace.rdfmodel
self.file1 = u'example_model.cellml'
request = TestRequest(
form={
'form.widgets.filename': [self.file1],
'form.buttons.add': 1,
})
testform = ExposureFileGenForm(context, request)
testform.update()
self.exposure_file1 = context[self.file1]
|
[
"tommy.yu@auckland.ac.nz"
] |
tommy.yu@auckland.ac.nz
|
705321a954ce453dc3724881c7d6d2933e9590e0
|
f4dc2b6f7beaf7571e4e9bfde2e181110df29e17
|
/sas/compliance/tests.py
|
2d3dd28e7180a0c3e4df3996b0136ffc191668f6
|
[] |
no_license
|
foleymd/sisathl
|
076c71a9bb8a7f0580ff1358614ca9b84407c73a
|
21e5cffa997ba462d326c62a89725afaa3c044b4
|
refs/heads/main
| 2022-12-19T23:49:21.786301
| 2020-10-02T00:52:14
| 2020-10-02T00:52:14
| 299,988,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,885
|
py
|
from django.test import TestCase
from sisathl.sas.compliance.functions.qualtrics import QualtricsRequest
class QualtricsTests(TestCase):
def setUp(self):
panel_name = 'testing_panel'
self.q = QualtricsRequest()
self.panel_id = self.q.create_panel(panel_name)
self.all_test_student = {'FirstName': 'FooFoo',
'LastName': 'Cocopops',
'Email': 'beep@boop.com',
'ExternalDataRef': 'speede@utexas.edu',
'First Time': 'No',
'Over 18': 'Yes',
'Walk-on': 'No',
'Status': 'Continuing',
'Sport1': 'MFB',
'Sport2': '',
'Sport3': ''}
self.all_test_student_id = self.q.add_recipient(self.panel_id, self.all_test_student)
def tearDown(self):
self.q.delete_panel(self.panel_id)
def test_create_and_delete_panel(self):
panel_name = 'xxx'
q = QualtricsRequest()
panel_id = q.create_panel(panel_name)
self.assertTrue(panel_id)
try:
panel = q.get_panel(panel_id)
self.assertTrue(True) # it worked!
except:
self.assertTrue(False)
q.delete_panel(panel_id)
try:
panel = q.get_panel(panel_id)
self.assertTrue(False)
except:
self.assertTrue(True)
def test_adding_recipients(self):
test_student = {'FirstName': 'MOBOLAJI',
'LastName': 'ADEOKUN',
'Email': 'fake@fak.com',
'ExternalDataRef': 'speede@utexas.edu',
'First Time': 'No',
'Over 18': 'Yes',
'Walk-on': 'No',
'Status': 'Continuing',
'Sport1': 'WTI',
'Sport2': 'WTO',
'Sport3': ''}
self.q.add_recipient(self.panel_id, test_student)
panel = self.q.get_panel(self.panel_id)
# make a list of just the eids (well, the ExternalDataReference) so that we
# have a concise list of who is already in the panel
panel_eids = [student['ExternalDataReference'] for student in panel if student.get('ExternalDataReference', False)]
self.assertTrue(test_student['ExternalDataRef'] in panel_eids)
def test_getting_recipient(self):
try:
recipient = self.q.get_recipient(self.all_test_student_id)
print recipient
self.assertTrue(True) # it worked!
except:
self.assertTrue(False)
#TODO: write test for getting survey name
|
[
"marjorie.foley@austin.utexas.edu"
] |
marjorie.foley@austin.utexas.edu
|
ba469e691aa8feaecc648a26c1171ddcf7f037ce
|
a9789672eaba37c4b391756c7fadb472609ce8ed
|
/knowtator2standoff.py
|
d527a6f765c0fd5ff61fa86d8c26416b15a7be32
|
[] |
no_license
|
spyysalo/knowtator2standoff
|
acb68666a6b4dfcd66baf53f1fb7c213f01f38d1
|
18d76d958d13230ff661917e0319668969dc0ec1
|
refs/heads/master
| 2020-06-20T06:39:40.401026
| 2016-11-28T10:56:56
| 2016-11-28T10:56:56
| 74,897,798
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,246
|
py
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import re
import io
import xml.etree.cElementTree as ET
from collections import defaultdict
from os import path
from logging import warn
DOCDIR = 'craft-2.0/articles/txt/' # TODO: CLI argument
MAX_SPAN = 150 # TODO: CLI argument
# Mapping from CRAFT to standoff types
type_map = {
'Entrez Gene sequence': 'EntrezGene',
'organism': 'NCBITaxon',
}
class FormatError(Exception):
pass
class Annotation(object):
"""Knowtator annotation."""
def __init__(self, spans, texts, mention_id):
self.spans = spans
self.texts = texts
self.mention_id = mention_id
self.mention = None
@property
def type(self):
if self.mention is None:
raise ValueError('no mention (call map_mentions?)')
type_ = self.mention.type
type_ = type_map.get(type_, type_)
return type_
def to_standoff(self):
t_id = next_free_id('T')
type_ = self.type.replace(' ', '_')
spans = ';'.join('{} {}'.format(s, e) for s, e in self.spans)
texts = u' '.join(self.texts)
t_ann = u'{}\t{} {}\t{}'.format(t_id, type_, spans, texts)
return [t_ann]
@classmethod
def from_xml(cls, e, doc_text):
"""Return Annotation from ElementTree element."""
spans = []
for s in e.findall('span'):
spans.append((int(s.get('start')), int(s.get('end'))))
if not spans:
raise FormatError('no spans for annotation')
text = findonly(e, 'spannedText').text
if not text:
raise FormatError('no text for annotation')
texts = text.split(' ... ') # Resolve discontinuous annotations
if len(spans) != len(texts):
raise FormatError('{} spans, {} texts'.format(len(self.spans),
len(self.texts)))
fixed_spans = []
for span, text in zip(spans, texts):
start, end = span
if len(text) != end-start:
msg = 'text length mismatch: "{}" ({}) vs {}-{} ({})'.format(
text.encode('utf-8'), len(text), start, end, end-start)
if doc_text[start:start+len(text)] == text:
msg += ' (adjust end: "{}" to "{}")'.format(
doc_text[start:end].encode('utf-8'),
doc_text[start:start+len(text)].encode('utf-8'))
end = start+len(text)
elif doc_text[end-len(text):end] == text:
msg += ' (adjust start: "{}" to "{}")'.format(
doc_text[start:end].encode('utf-8'),
doc_text[end-len(text):end].encode('utf-8'))
start = end-len(text)
else:
msg += ' (failed to adjust)'
warn(msg)
if len(text) != end-start:
raise FormatError(
'Text mismatch: "{}" (len {}) vs "{}" ({}-{})'.format(
text.encode('utf-8'), len(text),
doc_text[start:end].encode('utf-8'), start, end))
fixed_spans.append((start, end))
spans = fixed_spans
# sanity check
if spans[-1][1] - spans[0][0] > MAX_SPAN:
raise FormatError('span length over MAX_SPAN: {} ({})'.format(
text.encode('utf-8'), spans))
mention_id = findonly(e, 'mention').get('id')
return cls(spans, texts, mention_id)
def findonly(e, tag):
"""Return only child of ElementTree element with given tag."""
found = e.findall(tag)
if len(found) != 1:
raise FormatError('expected single {} child, found {}'.format(
tag, len(found)))
return found[0]
def next_free_id(prefix):
idx = next_free_id.id_map[prefix] + 1
next_free_id.id_map[prefix] += 1
return prefix + str(idx)
next_free_id.id_map = defaultdict(int)
def reset_next_free_id():
next_free_id.id_map = defaultdict(int)
class ClassMention(object):
"""Knowtator ClassMention."""
def __init__(self, id_, class_id, text):
self.id = id_
self.class_id = class_id
self.text = text
@property
def type(self):
m = re.match(r'^([a-zA-Z]+):(\d+)$', self.class_id)
if m:
return m.group(1) # TYPE:NUM ontology ID
else:
return self.class_id
@classmethod
def from_xml(cls, e):
"""Return ClassMention from ElementTree element."""
id_ = e.get('id')
c = findonly(e, 'mentionClass')
class_id = c.get('id')
text = c.text
return cls(id_, class_id, text)
class StringSlotMention(object):
"""Knowtator StringSlotMention."""
def __init__(self, id_, slot, value):
self.id = id_
self.slot = slot
self.value = value
@property
def type(self):
return self.slot
@classmethod
def from_xml(cls, e):
"""Return StringSlotMention from ElementTree element."""
id_ = e.get('id')
slot = findonly(e, 'mentionSlot').get('id')
value = findonly(e, 'stringSlotMentionValue').get('value')
return cls(id_, slot, value)
class IntegerSlotMention(object):
"""Knowtator IntegerSlotMention."""
def __init__(self, id_, slot, value):
self.id = id_
self.slot = slot
self.value = value
@property
def type(self):
return self.slot
@classmethod
def from_xml(cls, e):
"""Return IntegerSlotMention from ElementTree element."""
id_ = e.get('id')
slot = findonly(e, 'mentionSlot').get('id')
value = findonly(e, 'integerSlotMentionValue').get('value')
return cls(id_, slot, value)
class BooleanSlotMention(object):
"""Knowtator BooleanSlotMention."""
def __init__(self, id_, slot, value):
self.id = id_
self.slot = slot
self.value = value
@property
def type(self):
return self.slot
@classmethod
def from_xml(cls, e):
"""Return BooleanSlotMention from ElementTree element."""
id_ = e.get('id')
slot = findonly(e, 'mentionSlot').get('id')
value = findonly(e, 'booleanSlotMentionValue').get('value')
return cls(id_, slot, value)
class ComplexSlotMention(object):
"""Knowtator ComplexSlotMention."""
def __init__(self, id_, slot, values):
self.id = id_
self.slot = slot
self.values = values
@property
def type(self):
return self.slot
@classmethod
def from_xml(cls, e):
"""Return ComplexSlotMention from ElementTree element."""
id_ = e.get('id')
slot = findonly(e, 'mentionSlot').get('id')
values = [c.get('value') for c in e.findall('complexSlotMentionValue')]
return cls(id_, slot, values)
def get_text(docid, docdir=DOCDIR, encoding='utf-8'):
if docid not in get_text.text_by_docid:
fn = path.join(docdir, docid)
with io.open(fn, encoding=encoding) as f:
text = f.read()
get_text.text_by_docid[docid] = text
return get_text.text_by_docid[docid]
get_text.text_by_docid = {}
_mention_class_map = {
'classMention': ClassMention,
'complexSlotMention': ComplexSlotMention,
'integerSlotMention': IntegerSlotMention,
'booleanSlotMention': BooleanSlotMention,
'stringSlotMention': StringSlotMention,
}
def load_knowtator_xml(fn):
tree = ET.parse(fn)
root = tree.getroot()
docid = root.get('textSource')
text = get_text(docid)
annotations = []
mentions = []
for e in root:
try:
if e.tag == 'annotation':
annotations.append(Annotation.from_xml(e, text))
elif e.tag in _mention_class_map:
mentions.append(_mention_class_map[e.tag].from_xml(e))
else:
raise FormatError('unexpected element {}'.format(e.tag))
except FormatError, error:
print('error parsing {}: {} ({})'.format(fn, error, e),
file=sys.stderr)
return docid, annotations, mentions
def map_mentions(annotations, mentions):
mention_by_id = {}
for m in mentions:
assert m.id not in mention_by_id, 'duplidate id: {}'.format(m.id)
mention_by_id[m.id] = m
for a in annotations:
a.mention = mention_by_id[a.mention_id]
def main(argv):
if len(argv) < 2:
print('Usage: {} [FILE [...]]'.format(__file__))
return 1
annotations_by_id = defaultdict(list)
for fn in argv[1:]:
try:
docid, annotations, mentions = load_knowtator_xml(fn)
map_mentions(annotations, mentions)
annotations_by_id[docid].extend(annotations)
except Exception, e:
print('failed to parse {}: {}'.format(fn, e), file=sys.stderr)
for docin, annotations in annotations_by_id.items():
reset_next_free_id()
for a in annotations:
for t in a.to_standoff():
print(t.encode('utf-8'))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
[
"sampo.pyysalo@gmail.com"
] |
sampo.pyysalo@gmail.com
|
d1e1d06e0a683d02e0a0a308b6e11deb7f027935
|
c3f2058feec2a7e48f7affe76b9049cc936268f4
|
/lib/library.py
|
917977bccbe6658a63c2702d2f61cf102f675034
|
[
"MIT"
] |
permissive
|
i96751414/plugin.video.flix
|
362033fc408ce6671b5d528f90569fec852a9ab9
|
ba3f55642a3e2e51114c47afeb0c46ed3303c0a2
|
refs/heads/master
| 2023-06-25T11:44:54.632018
| 2022-06-03T00:37:50
| 2022-06-03T00:37:50
| 249,699,611
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,373
|
py
|
import logging
import os
import time
from threading import Lock
from tmdbsimple import Discover
from xbmc import Monitor
from lib.api.flix.kodi import ADDON_ID, ADDON_NAME, translate, Progress, update_library, clean_library
from lib.api.flix.utils import make_legal_name
from lib.settings import get_library_path, add_special_episodes, add_unaired_episodes, update_kodi_library, \
include_adult_content, is_library_progress_enabled
from lib.storage import Storage
from lib.tmdb import Season, Movie, Show, get_movies, get_shows
class LibraryMonitor(Monitor):
def __init__(self, library="video"):
super(LibraryMonitor, self).__init__()
self._library = library
self._lock = Lock()
self._scan_started = self._scan_finished = False
self._clean_started = self._clean_finished = False
def onScanStarted(self, library):
self._on_action("_scan_started", library)
def onScanFinished(self, library):
self._on_action("_scan_finished", library)
def onCleanStarted(self, library):
self._on_action("_clean_started", library)
def onCleanFinished(self, library):
self._on_action("_clean_finished", library)
def _on_action(self, attr, library):
if library == self._library:
with self._lock:
setattr(self, attr, True)
logging.debug("%s on %s library", attr, library)
def wait_scan_start(self, timeout=0):
return self._wait("_scan_started", timeout)
def wait_scan_finish(self, timeout=0):
return self._wait("_scan_finished", timeout)
def wait_clean_start(self, timeout=0):
return self._wait("_clean_started", timeout)
def wait_clean_finish(self, timeout=0):
return self._wait("_clean_finished", timeout)
def _wait(self, attr, timeout):
start_time = time.time()
while not getattr(self, attr) and not self.waitForAbort(1):
if 0 < timeout < time.time() - start_time:
return False
return True
def start_scan(self, path=None, wait=False):
logging.debug("Starting scan with path='%s' and wait=%s", path, wait)
with self._lock:
self._scan_started = self._scan_finished = False
update_library(self._library, path)
if wait:
if self.wait_scan_start(10):
self.wait_scan_finish()
def clean_library(self, wait=False):
logging.debug("Cleaning library with wait=%s", wait)
with self._lock:
self._clean_started = self._clean_finished = False
clean_library(self._library)
if wait:
if self.wait_clean_start(10):
self.wait_clean_finish()
class Library(object):
MOVIE_TYPE = "movie"
SHOW_TYPE = "show"
def __init__(self):
self._directory = get_library_path()
if not os.path.isdir(self._directory):
raise ValueError(translate(30135))
self._add_unaired_episodes = add_unaired_episodes()
self._add_specials = add_special_episodes()
self._update_kodi_library = update_kodi_library()
self._movies_directory = os.path.join(self._directory, "Movies")
self._shows_directory = os.path.join(self._directory, "TV Shows")
if not os.path.exists(self._movies_directory):
os.makedirs(self._movies_directory)
if not os.path.exists(self._shows_directory):
os.makedirs(self._shows_directory)
self._storage = Storage(os.path.join(self._directory, "library.sqlite"))
self._table_name = "library"
self._storage.execute_and_commit(
"CREATE TABLE IF NOT EXISTS `{}` ("
"id INTEGER NOT NULL, "
"type TEXT NOT NULL, "
"path TEXT CHECK(path <> '') NOT NULL, "
"PRIMARY KEY (id, type)"
");".format(self._table_name))
def _storage_has_item(self, item_id, item_type):
return self._storage_get_path(item_id, item_type) is not None
def _storage_get_path(self, item_id, item_type):
row = self._storage.execute(
"SELECT path FROM `{}` WHERE id = ? AND type = ?;".format(self._table_name),
(item_id, item_type)).fetchone()
return row and row[0]
def _storage_count_entries(self):
return self._storage.count(self._table_name)
def _storage_get_entries(self):
return self._storage.fetch_items("SELECT * FROM `{}`;".format(self._table_name))
def _storage_count_entries_by_type(self, item_type):
return self._storage.execute(
"SELECT COUNT(*) FROM `{}` WHERE type = ?;".format(self._table_name), (item_type,)).fetchone()[0]
def _storage_get_entries_by_type(self, item_type):
return self._storage.fetch_items(
"SELECT id, path FROM `{}` WHERE type = ?;".format(self._table_name), (item_type,))
def _storage_add_item(self, item_id, item_type, path):
self._storage.execute_and_commit(
"INSERT INTO `{}` (id, type, path) VALUES(?, ?, ?);".format(self._table_name),
(item_id, item_type, path))
def _add_movie(self, item, name, override_if_exists=True):
movie_dir = os.path.join(self._movies_directory, name)
if not os.path.isdir(movie_dir):
os.makedirs(movie_dir)
movie_path = os.path.join(movie_dir, name + ".strm")
if override_if_exists or not os.path.exists(movie_path):
with open(movie_path, "w") as f:
f.write("plugin://{}/providers/play_movie/{}".format(ADDON_ID, item.movie_id))
def add_movie(self, item):
if self._storage_has_item(item.movie_id, self.MOVIE_TYPE):
logging.debug("Movie %s was previously added", item.movie_id)
return False
name = item.get_info("originaltitle")
year = item.get_info("year")
if year:
name += " ({})".format(year)
name = make_legal_name(name)
self._add_movie(item, name)
self._storage_add_item(item.movie_id, self.MOVIE_TYPE, name)
if self._update_kodi_library:
self.update_movies()
return True
def _add_show(self, item, name, override_if_exists=True):
show_dir = os.path.join(self._shows_directory, name)
if not os.path.isdir(show_dir):
os.makedirs(show_dir)
for season in item.seasons(get_unaired=self._add_unaired_episodes):
if not self._add_specials and season.season_number == 0:
continue
for episode in Season(item.show_id, season.season_number).episodes(get_unaired=self._add_unaired_episodes):
episode_name = u"{} S{:02d}E{:02d}".format(name, episode.season_number, episode.episode_number)
episode_path = os.path.join(show_dir, episode_name + ".strm")
if override_if_exists or not os.path.exists(episode_path):
with open(episode_path, "w") as f:
f.write("plugin://{}/providers/play_episode/{}/{}/{}".format(
ADDON_ID, episode.show_id, episode.season_number, episode.episode_number))
def add_show(self, item):
if self._storage_has_item(item.show_id, self.SHOW_TYPE):
logging.debug("Show %s was previously added", item.show_id)
return False
name = item.get_info("originaltitle")
year = item.get_info("year")
if year:
name += " ({})".format(year)
name = make_legal_name(name)
self._add_show(item, name)
self._storage_add_item(item.show_id, self.SHOW_TYPE, name)
if self._update_kodi_library:
self.update_shows()
return True
def rebuild(self):
items_iter = self._storage_get_entries()
if is_library_progress_enabled():
items_iter = Progress(items_iter, self._storage_count_entries(),
heading=ADDON_NAME, message=translate(30142))
for item_id, item_type, path in items_iter:
if item_type == self.MOVIE_TYPE:
self._add_movie(Movie(item_id), path)
elif item_type == self.SHOW_TYPE:
self._add_show(Show(item_id), path)
else:
logging.error("Unknown item type '%s' for id '%s' and path '%s'", item_type, item_id, path)
if self._update_kodi_library:
self.update_movies(wait=True)
self.update_shows(wait=True)
def update_library(self):
items_iter = self._storage_get_entries_by_type(self.SHOW_TYPE)
if is_library_progress_enabled():
items_iter = Progress(items_iter, self._storage_count_entries_by_type(self.SHOW_TYPE),
heading=ADDON_NAME, message=translate(30141))
for item_id, path in items_iter:
logging.debug("Updating show %s on %s", item_id, path)
self._add_show(Show(item_id), path, override_if_exists=False)
if self._update_kodi_library:
self.update_movies(wait=True)
self.update_shows(wait=True)
def discover_contents(self, pages):
include_adult = include_adult_content()
api = Discover()
pages_iter = range(1, pages + 1)
if is_library_progress_enabled():
pages_iter = Progress(pages_iter, pages, heading=ADDON_NAME, message=translate(30140))
for page in pages_iter:
for movie in get_movies(api.movie(page=page, include_adult=include_adult))[0]:
logging.debug("Adding movie %s to library", movie.movie_id)
self.add_movie(movie)
for show in get_shows(api.tv(page=page, include_adult=include_adult))[0]:
logging.debug("Adding show %s to library", show.show_id)
self.add_show(show)
if self._update_kodi_library:
self.update_movies(wait=True)
self.update_shows(wait=True)
def update_shows(self, wait=False):
LibraryMonitor().start_scan(self._shows_directory, wait)
def update_movies(self, wait=False):
LibraryMonitor().start_scan(self._movies_directory, wait)
def close(self):
self._storage.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
|
[
"i96751414@gmail.com"
] |
i96751414@gmail.com
|
5d6f9ca85d261aa72965687c2d7e15e5c50660fa
|
28e4d57622f75d7cc488c893a001b43676d2b969
|
/python/fj/machine_learning/iris_data.py
|
0a88c16ab345228359ffe9a9c3e066bd64743c18
|
[] |
no_license
|
pgacademy/samples
|
fedb9067891d49a03b8f2e20160d9bd8301f4631
|
185af7b41b0c2fcd5583b8ee092147066983a05d
|
refs/heads/master
| 2020-05-21T20:22:36.443549
| 2017-02-25T09:09:29
| 2017-02-25T09:09:29
| 61,775,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 578
|
py
|
import pandas
import matplotlib.pyplot as plot
import numpy as np
frame = pandas.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)
print(frame.tail())
y = frame.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)
X = frame.iloc[0:100, [0, 2]].values
plot.scatter(X[:50,0], X[:50,1], color='red', marker='o', label='setosa')
plot.scatter(X[50:100,0], X[50:100,1], color='blue', marker='x', label='versicolor')
plot.xlabel('sepal length [cm]')
plot.ylabel('petal length [cm]')
plot.legend(loc='upper left')
plot.show()
|
[
"fanban.xiaofan@gmail.com"
] |
fanban.xiaofan@gmail.com
|
d387bce2ac4e6514fd3539a4c572d35f7d1e6022
|
f7dc60d14f02a9934ddb22b75d95cf3ab8d3f0f0
|
/hood/migrations/0004_auto_20210201_1209.py
|
de855c485bda9dc1fa3ae18ec47d0d7454ad8dc7
|
[] |
no_license
|
uwase-diane/Neighbourhood
|
83301a2f79b4333814249096ea7dd3c693b5baae
|
ded93c778dea15251d09fcf72f0c3266ae1e2505
|
refs/heads/master
| 2023-02-24T13:12:33.325239
| 2021-02-02T18:36:16
| 2021-02-02T18:36:16
| 333,460,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,524
|
py
|
# Generated by Django 3.1.5 on 2021-02-01 09:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('hood', '0003_delete_follow'),
]
operations = [
migrations.RenameField(
model_name='business',
old_name='neighbourhood',
new_name='neighborhood',
),
migrations.RemoveField(
model_name='neighbourhood',
name='health_no',
),
migrations.RemoveField(
model_name='neighbourhood',
name='hood_location',
),
migrations.RemoveField(
model_name='neighbourhood',
name='hood_name',
),
migrations.RemoveField(
model_name='neighbourhood',
name='police_no',
),
migrations.RemoveField(
model_name='neighbourhood',
name='user',
),
migrations.RemoveField(
model_name='post',
name='date',
),
migrations.RemoveField(
model_name='post',
name='message',
),
migrations.RemoveField(
model_name='post',
name='neighbourhood',
),
migrations.RemoveField(
model_name='post',
name='title',
),
migrations.AddField(
model_name='business',
name='description',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='neighbourhood',
name='image',
field=models.ImageField(blank=True, default='', upload_to='images/'),
),
migrations.AddField(
model_name='neighbourhood',
name='location',
field=models.CharField(blank=True, max_length=60),
),
migrations.AddField(
model_name='neighbourhood',
name='name',
field=models.CharField(blank=True, max_length=60),
),
migrations.AddField(
model_name='post',
name='neighborhood',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='hood.neighbourhood'),
),
migrations.AddField(
model_name='post',
name='post',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='post',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='business',
name='email',
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name='business',
name='name',
field=models.CharField(max_length=60, null=True),
),
migrations.AlterField(
model_name='business',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='neighbourhood',
name='population',
field=models.IntegerField(null=True),
),
]
|
[
"diane.uwase13@gmail.com"
] |
diane.uwase13@gmail.com
|
a1473cb37ec4dbe66936bd6f1ccc93b53691aac5
|
4e065cad9efc45c0859c287b55bd6c16aa258b46
|
/myapp/models.py
|
ca86b2675a9122f048f87b5ea062b1cdf3ba12d4
|
[] |
no_license
|
Athila001/blog
|
e52c6046a5df4526f4809c9b0cd3221052cbf024
|
9bc9ea4c4ea1fb6485e740cc1757374d2111cbb9
|
refs/heads/master
| 2022-11-30T06:53:38.344401
| 2020-08-06T08:15:34
| 2020-08-06T08:15:34
| 285,509,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class TimeStamp(models.Model):
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
user = models.ForeignKey(User,on_delete = models.CASCADE)
class Meta:
abstract = True
class Post(TimeStamp):
title = models.CharField(max_length = 100,verbose_name = 'Title')
description = models.TextField()
image = models.ImageField(upload_to='')
likes = models.IntegerField(default = 0)
def __str__(self):
return self.title
class Comment(TimeStamp):
post = models.ForeignKey(Post,on_delete = models.CASCADE)
comment = models.TextField()
|
[
"imca-115@scmsgroup.org"
] |
imca-115@scmsgroup.org
|
64487f5e901e6ebe888eb466411fbe93dcf919bc
|
4c29fd08962ba7b1cec2a227a1eca8e3b46bece5
|
/zestawyZadan/zestaw2/zd2.12.py
|
2d66265d904cf09003cd22aac9811a712b2d26d8
|
[] |
no_license
|
SabinaZolnowska/JezykPython
|
835eefc47aa3f138c0d9402c3e4a5dc5e82bffb1
|
d0bbf94f4c8729712b265272613ce130a81b7412
|
refs/heads/master
| 2021-01-20T11:47:38.761268
| 2017-11-30T15:33:59
| 2017-11-30T15:33:59
| 71,003,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,299
|
py
|
<<<<<<< HEAD
#-*- coding: utf-8 -*-
#Zbudować napis stworzony z pierwszych znaków wyrazów z wiersza line.
#Zbudować napis stworzony z ostatnich znaków wyrazów z wiersza line.
line = "C sprawia, ze latwo jest sobie strzelic w stope.\nZ C++ jest to trudniejsze, ale za to w razie czego odstrzelisz sobie cala noge.\n"
list = line.split()
S=""
for x in xrange(len(list)):
S= S+list[x][0]
print "Napis stworzony z pierwszych znaków wyrazów z wiersza line = " + S
S=""
for x in xrange(len(list)):
index=int(len(list[x])-1)
S= S+list[x][index]
print "Napis stworzony z ostatnich znaków wyrazów z wiersza line = " + S
=======
#-*- coding: utf-8 -*-
#Zbudować napis stworzony z pierwszych znaków wyrazów z wiersza line.
#Zbudować napis stworzony z ostatnich znaków wyrazów z wiersza line.
line = "C sprawia, ze latwo jest sobie strzelic w stope.\nZ C++ jest to trudniejsze, ale za to w razie czego odstrzelisz sobie cala noge.\n"
list = line.split()
S=""
for x in xrange(len(list)):
S= S+list[x][0]
print "Napis stworzony z pierwszych znaków wyrazów z wiersza line = " + S
S=""
for x in xrange(len(list)):
index=int(len(list[x])-1)
S= S+list[x][index]
print "Napis stworzony z ostatnich znaków wyrazów z wiersza line = " + S
>>>>>>> origin/master
|
[
"sabina.zolnowska@uj.edu.pl"
] |
sabina.zolnowska@uj.edu.pl
|
13654a23d74ba83e150ebcdea9177cd36cdb1f02
|
fb2fade39e20c1ff1826e66e0b2d1b4ccaae6ce2
|
/Ensemble model with hypterparamenter tunning.py
|
87f5d0187df1e9df46c6f807f166c978ca65ae1c
|
[] |
no_license
|
aprilycliu/ML-Toolkit
|
a349e8f5e4b89db432317faec99d664616784d41
|
79edb85ae5b3c2c5b2576fd16227393c97265a4f
|
refs/heads/main
| 2023-04-23T12:10:25.614280
| 2021-04-29T09:17:09
| 2021-04-29T09:17:09
| 362,758,165
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,716
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 29 11:53:46 2021
@author: aprilliu
"""
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, ExtraTreesClassifier, VotingClassifier
from lightgbm import LGBMClassifier
from xgboost import XGBClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold
#cross validation on 10 folds to meausure the performance on train data with each classifier, and then chose the top 2 models
kfold = StratifiedKFold(n_splits=10)
random_state = 42
classifiers = []
classifiers.append(SVC(random_state=random_state))
classifiers.append(DecisionTreeClassifier(random_state=random_state))
classifiers.append(RandomForestClassifier(random_state=random_state))
classifiers.append(GradientBoostingClassifier(random_state=random_state))
classifiers.append(ExtraTreesClassifier())
classifiers.append(XGBClassifier())
classifiers.append(LGBMClassifier())
cv_results = []
for classifier in classifiers :
cv_results.append(cross_val_score(classifier, X_train, y = y_train, scoring = "accuracy", cv = kfold, n_jobs=4))
cv_means = []
cv_std = []
for cv_result in cv_results:
cv_means.append(cv_result.mean())
cv_std.append(cv_result.std())
cv_res = pd.DataFrame({"Algorithm":["SVC","DecisionTree","RandomForest","GradientBoosting","ExtraTrees","XGB",'LGBM'],"CrossValMeans":cv_means,"CrossValerrors": cv_std})
cv_res
#Setting up of hypterparamenters in classifier
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 100, stop = 300, num = 3)]
# Number of features to consider at every split
max_features = [ 20 ,'auto', 'log2']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 30, num = 3)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Create the random grid
rf_param_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf}
# Random forest Hyper parameter tuning with gridsearch
rf = RandomForestClassifier()
#fitting
Grid_s_rf = GridSearchCV(rf, param_grid = rf_param_grid, cv=kfold, scoring="accuracy", n_jobs= -1, verbose = True)
Grid_s_rf.fit(X_train,y_train)
RFC_best = Grid_s_rf.best_estimator_
# Best score
Grid_s_rf.best_score_
max_depth = [int(x) for x in np.linspace(5, 20, num = 3)]
min_child_weight = [5,6,7]
eta = [.3, .2, .1, .05, .01, .005]
XGB_param_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'eta': eta,
'min_child_weight': min_child_weight,}
XGB = XGBClassifier()
Grid_s_XGB = GridSearchCV(XGB, param_grid = XGB_param_grid, cv=kfold, scoring="accuracy", n_jobs= -1, verbose = True)
Grid_s_XGB.fit(X_train,y_train)
XGB_best = Grid_s_XGB.best_estimator_
# Best score
Grid_s_XGB.best_score_
#Ensembling of 2 models
votingC = VotingClassifier(estimators=[('lg', rf),('xgb',XGB)], voting='soft', n_jobs=-1)
votingC = votingC.fit(X_train, y_train)
#To get the model performance on the training data
print('Score: ', votingC.score(X_train, y_train))
predictions = votingC.predict(X_test)
submission = pd.DataFrame({'PassengerId': test_data.PassengerId,
'Survived': predictions})
submission.to_csv('submission.csv', index = False)
|
[
"noreply@github.com"
] |
aprilycliu.noreply@github.com
|
33ca2cbec3283c60f3f48ff39bcc8624ecb5d8f8
|
a86bd96433a98e2311447a1923a400470d231f7e
|
/almanac/style/highlight.py
|
93bc92ffea0c08e9b9383963588506d9d14bda0a
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
welchbj/almanac
|
3e0d1e8282ec00ad17854536526cf253b331a201
|
7ba473ef07173e0f017dd151e7ca425ba149b8fe
|
refs/heads/main
| 2022-12-18T12:51:53.039850
| 2022-07-06T01:25:03
| 2022-07-06T01:25:03
| 193,141,053
| 5
| 2
|
MIT
| 2022-12-08T14:28:58
| 2019-06-21T18:07:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
from typing import Optional, Type
from pygments import highlight
from pygments.formatter import Formatter
from pygments.formatters import TerminalFormatter
from pygments.lexers import get_lexer_for_mimetype
from pygments.util import ClassNotFound
def highlight_for_mimetype(
text: str,
mimetype: str,
*,
fallback_mimetype: Optional[str] = 'text/plain',
formatter_cls: Type[Formatter] = TerminalFormatter
) -> str:
"""Return ANSI-escaped highlighted text, as per the .
If ``mimetype`` cannot be resolved, then ``fallback_mimetype`` will be used.
If that cannot be resolved (or is ``None``), then the pygments ``ClassNotFound``
exception will be raised.
"""
try:
lexer = get_lexer_for_mimetype(mimetype)
except ClassNotFound as e:
if fallback_mimetype is not None:
lexer = get_lexer_for_mimetype(fallback_mimetype)
else:
raise e
highlighted_text: str = highlight(text, lexer, formatter_cls())
return highlighted_text
|
[
"welch18@vt.edu"
] |
welch18@vt.edu
|
a8c1f96f98e07c9ba3546cfd50751884d3d97c6f
|
0cf91c0a0e56ed6eaa1e7617ca7b1114910be447
|
/ch04_Trees_and_Graphs/q04_12_paths_with_sum.py
|
08c8e0b19a0d5185cae7c67703de066d1a371925
|
[] |
no_license
|
nhenninger/CrackingTheCodingInterview6e
|
728e2989e0c2d8aedc8279b004ce2a1220e07554
|
968eb7683ee53b9a2df12c96f880d3e6d973be10
|
refs/heads/master
| 2020-04-19T14:37:57.193172
| 2019-05-10T20:23:44
| 2019-05-10T20:23:44
| 156,780,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,413
|
py
|
from nodes import BinaryTreeNode as BTNode
# 4.12 Paths With Sum
def paths_with_sum(root: BTNode, target_sum: int) -> int:
"""
Count the paths in a tree which results in a given integer sum.
Paths must be contiguous and flow only down the tree. They need not start
or end on the root or leaves.
Nodes may contain any integer value.
:param root: The root of the tree
:param target_sum: The targeted sum
:return: The number of paths
Runtime: O(n)
Memory: O(n)
"""
return _pws_helper(root, target_sum, 0, {})
def _pws_helper(root: BTNode, target_sum: int, running_sum: int, paths: dict) -> int:
if root is None:
return 0
running_sum += root.data
num_paths = paths.get(running_sum - target_sum, 0)
if running_sum == target_sum:
num_paths += 1
_increment_hash_table(paths, running_sum)
num_paths += _pws_helper(root.left, target_sum, running_sum, paths)
num_paths += _pws_helper(root.right, target_sum, running_sum, paths)
_decrement_hash_table(paths, running_sum)
return num_paths
def _increment_hash_table(table: dict, key: int) -> None:
if key in table:
table[key] += 1
else:
table[key] = 1
def _decrement_hash_table(table: dict, key: int) -> None:
if key not in table:
return
elif table[key] == 1:
del table[key]
else:
table[key] -= 1
|
[
"nathanhenninger@u.boisestate.edu"
] |
nathanhenninger@u.boisestate.edu
|
2595270268307e38cb6869032e1bb2c2fab86994
|
de2a9b5ac45088cf3eef84ab5972d48bccba581a
|
/mendel_1.py
|
d75368ef74efc4ef12f3d704f0910b7fbb57bf85
|
[] |
no_license
|
burton-its/rosalind
|
f623d29bd25bcfd34e509b8f814aeb7b691df82a
|
fb8ed251f77a5023a6950119ce8847d5c047fbec
|
refs/heads/master
| 2023-04-14T05:09:17.142863
| 2021-05-03T14:13:06
| 2021-05-03T14:13:06
| 362,510,312
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
#k = homozygous dominant AA
#m = heterozygous Aa
#n = homozygous recessive aa
#P(Aa/AA) = k*(k-1)+(k*m+m*k)+(k*n+n*k)+0.75m(m-1)+(0.5m*n+0.5n*m)/ t*(t-1)
#what is the chance given k,m,n parents. that you will have a dominant allele
#punnet squares give us
#.1k*m, 1k*n, 1k*(k-1)
#.1m*k, .5m*n, .75m*(m-1)
#.1n*k, .5n*m, 0n*(n-1)
#so we use .5*n*m + .5*n*m = m*n, .25*m(m-1), n*(n-1), then subtract from 1 for probablity of dominant allele
k = 2
m = 2
n = 2
t = k + m + n
tot = (1 - (((m*n) + (.25*m*(m-1)) + (n*(n-1)) )/ (t*(t-1))))
print(tot)
# (m*n + .25*m*(m-1) + n*(n-1)/ (n*(n-1))
|
[
"54475896+burton-its@users.noreply.github.com"
] |
54475896+burton-its@users.noreply.github.com
|
906d8d31f8d3ffc5106d919603e868081473e52b
|
822b14274b52b600797022e2f8a339ec25d397c8
|
/util.py
|
a80e4dc06bc393d97d83ca29f09d37c82a34eaba
|
[] |
no_license
|
jayin92/terrain-autoencoder
|
66a4dc7875a4e2a9961f2d9d4946753db588d11e
|
86d7b3009c5851c1739db20b767df5619f792bb7
|
refs/heads/master
| 2022-11-27T07:39:57.774235
| 2020-08-11T09:11:02
| 2020-08-11T09:11:02
| 286,697,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,996
|
py
|
import numpy as np
import torch
import matplotlib.pyplot as plt
def p(im):
plt.imshow(im.detach().cpu(), cmap="Greys_r")
#plt.colorbar()
def sampleLatent(param, c, stat=False):
loss = torch.tensor(0.0, device=c.device)
latent = []
RMS_miu = []
mean_var = []
for i in range(c.depth):
mean = param[i][:, : c.NLatent[i]]
log_var = param[i][:, c.NLatent[i] :].mean(3).mean(2).unsqueeze(2).unsqueeze(3)
var = torch.exp(log_var)
latent.append(torch.normal(mean, torch.sqrt(var)))
if param[i].nelement() != 0:
loss += ((mean * mean).mean() + var.mean() - log_var.mean() - 1.0) / 2
RMS_miu.append((mean * mean).mean(1).sqrt().mean().item())
mean_var.append(var.mean().item())
if stat:
return latent, loss, RMS_miu, mean_var
else:
return latent, loss
def test(c, valset, structuralEncoder, latentEncoder, decoder,VGGLoss):
structuralEncoder.eval()
latentEncoder.eval()
decoder.eval()
result = {}
RMS_miu = np.zeros(c.depth)
mean_var = np.zeros(c.depth)
n = 0
L1 = 0
L2 = 0
VGG=0
loader = torch.utils.data.DataLoader(valset, batch_size=c.batchSize)
for _, data in enumerate(loader):
n += 1
X = data["X"].to(c.device)
Y = data["Y"].to(c.device)
structure = structuralEncoder(X)
leout = latentEncoder(torch.cat([X, Y], dim=1))
latent, latentLoss_, RMS_miu_, mean_var_ = sampleLatent(
latentEncoder(torch.cat([X, Y], dim=1)), c, True
)
Y_pred = decoder(structure, latent)+X
RMS_miu += np.array(RMS_miu_)
mean_var += np.array(mean_var_)
L1 += (Y - Y_pred).abs().mean().item()
L2 += (Y - Y_pred).pow(2).mean().item()
VGG+=VGGLoss(Y,Y_pred).mean().item()
result.update(
{
"L1": L1 / n * 1000,
"L2": (L2 / n)**0.5 * 1000,
"VGGLoss":VGG,
"RMS_miu": RMS_miu / n,
"mean_var": mean_var / n,
"sample": torch.cat((X, Y_pred, Y), dim=3),
}
)
return result
import importlib
reload=importlib.reload
def roll(x, shift, dim):
"""
Similar to np.roll but applies to PyTorch Tensors
"""
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def fftshift(x, dim=None):
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
|
[
"jayin920805@gmail.com"
] |
jayin920805@gmail.com
|
9b60055bfd1a9f2b4790394fb0951ce97e1ebd3e
|
038b02e16b598cbc91fdb3753e8af6318a38494d
|
/profiles_api/serializers.py
|
9181286a07b9ae2c5ac0f3e1ae7281bec863a5dd
|
[
"MIT"
] |
permissive
|
Jai2796/rest-api
|
052d197a44214b9275c83c04b3d3916d50d7ae5e
|
db2225991e04fb37d139f912d7b3bcd5dc3dce0b
|
refs/heads/main
| 2023-09-01T09:41:01.452167
| 2021-11-07T10:13:04
| 2021-11-07T10:13:04
| 425,255,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
from rest_framework import serializers
class HelloSerializer(serializers.Serializer):
"""serializers a name field for testing our APiView"""
name = serializers.CharField(max_length=10)
|
[
"soundarjai96@gmail.com"
] |
soundarjai96@gmail.com
|
e58381152d52d766f94cefe52c14d4017e947b51
|
4c6b96ba6883525d48b6a5c67755ca73b7ce3f7f
|
/statistics/monteCarloValorMedio.py
|
7bc000a948566fe5093383c735b5be19e2911a77
|
[] |
no_license
|
mgb-avellar/SelectedTasksInPython
|
409b80ea2603b328effe95edc91e14a4908788e2
|
59cebb0481cf7d250019fdfb60ea7f61bbb7d0b4
|
refs/heads/master
| 2021-07-10T07:23:17.162993
| 2020-06-27T19:24:55
| 2020-06-27T19:24:55
| 139,353,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,299
|
py
|
#! /usr/bin/python
# Example page 51 of Chorin
# We want to calculate the mean value using Monte Carlo with and without 'importance sampling'
import random
import math
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
print
n = 10000
Itheo = 0.1983725855 # result of Int(cos(x/5)*exp(-5*x), x=0..1)
Iexp1 = 0.0 # approximation to Itheo by method 1
Iexp2 = 0.0 # approximation to Itheo by method 2
lista_numChi1 = []
lista_numChi2 = []
lista_numEta = []
for i in range(0,n):
chi = random.uniform(0,1)
lista_numChi1.append(chi) # just in case I want some histogram; otherwise, it is not necessary
Iexp1 = Iexp1 + (1.0/n) * math.cos(chi/5.0) * math.exp(-5*chi)
I1 = 0.1986524106 # result of Int(exp(-5*x), x=0..1)
chi = 0.0
for i in range(0,n):
chi = random.uniform(0,1)
lista_numChi2.append(chi) # just in case I want some histogram; otherwise, it is not necessary
eta = - 1.0/5.0 * math.log(1.0 - 5.0 * I1 * chi)
lista_numEta.append(eta) # just in case I want some histogram; otherwise, it is not necessary
Iexp2 = Iexp2 + (I1/n) * math.cos(eta/5.0)
#print Iexp2, (I1/n) * math.cos(eta/5.0), I1, n, i
print 'n = ', n
print 'I_theor = ', Itheo, 'I_meth_1 =', Iexp1, 'I_meth_2 = ', Iexp2
print
|
[
"marcio.de.avellar@gmail.com"
] |
marcio.de.avellar@gmail.com
|
f2ad3af82b81178ad25abe400e111e68b8d7d2eb
|
dc896c01de50f45b62a9fdf19fc2eca73db9fca3
|
/2_1_LinearRegression.py
|
a2a967bec9383ba3e5f5076a82439b7f0eb938d7
|
[] |
no_license
|
yatish0492/DataScienceTutorial
|
6da9fb64993a4e5c9c6aabf07360f9332ff52f7f
|
c98abae0cf4e41561fb2800b28e8c20064ef2c13
|
refs/heads/master
| 2022-11-25T01:41:26.175995
| 2020-07-19T20:01:16
| 2020-07-19T20:01:16
| 280,916,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,195
|
py
|
'''
Linear Regression
-----------------
Linear Regression is a machine learning algorithm based on supervised learning.
What are the types of Linear Regression?
----------------------------------------
1) Simple Linear Regression
2) Multiple Linear Regression
Simple Linear Regression
------------------------
Consider, an example, we have data about the salary of the employees currently working in the company along with their experience.
Let us plot a chart for this data with x-axis as the experience and the y-axis as the salary. The Simple Linear Regression algorithm will
find a line using the formula,
'y = b0 + b1*x'
b0 --> This is the base value say like that is the minimum salary that is given to a fresher/entry-level employee so any
salary will be always equal to or greater than this salary only
b1 --> This is a co-efficient value. This value is the slope of the line that will be formed. This value is found by using
some mathematical formulas using sum of standard deviation of the values and dividing them with somthing and all. Basically
we take the available salary data and apply the formula on that and arrive at this value.
x --> This is the salary of the employee.
In the chart, If we draw lines between all the 'x' values connecting them then we get a zig zag line, using which we cannot do salary predictions.
So we calculate 'y' at each point 'x' and put the 'y' points on the chart. Then we connect all the 'y' points which will form a line,
which will be straight line. So using this straing line we can extend it further and predict the salary based on any experience.
In general, the straing line formed by connecting 'y' values will be like a 'mean' line which makes it straingt instead of zig zag by taking
'mean' values of x. It is not exactly 'mean' values of 'x' it will be based on the formula 'y = b0 + b1*x'
By varying the co-efficient value 'b1' we can end up in different straint lines, the model will select the line which have less distance between
'x' and 'y' value in the chart for all the 'x' points(salary). Basically, that means it will take line which gives the predicted salaries(y)
almost same as the real salary 'x', basically it will consider a line which gives more accurate prediction.
Multiple Linear Regression
--------------------------
It is similar to 'Simple Linear Regression' but in this, instead of only one indipendent variable like 'experience', if the usecase have mutliple independent
variables like 'technology', 'designation' along with 'experience'. Then we use Multiple Linear Regression.
The Formula will be something like,
'y = b0 + b1*x1 + b2*x2 + b3*x3 ...'
b0 --> base value
b1 --> Co-efficient value of x1
x1 --> 'technology'
b2 --> Co-efficient value of x2
x2 --> 'designation'
b3 --> Co-efficient value of x3
x3 --> 'experience'
'''
|
[
"ycs@hotels.com"
] |
ycs@hotels.com
|
6526f313c1150a714077705302ce4c4979903dae
|
08b3f5757e1d88cacd781fb5c8cd20fc78eb293e
|
/tests/mocked_carla.py
|
1376113ee039ab051c772dba764cfe52a310f45d
|
[
"Apache-2.0"
] |
permissive
|
seikurou/pylot
|
1435ce9059b72809aab75f4a74f1479de35af40d
|
0e47c3dcaf6f0d4a3937b94846d2e55ef908dfa5
|
refs/heads/master
| 2022-12-22T06:56:38.356603
| 2020-09-24T23:56:02
| 2020-09-24T23:56:02
| 297,835,945
| 0
| 0
|
Apache-2.0
| 2020-09-23T02:59:47
| 2020-09-23T02:59:46
| null |
UTF-8
|
Python
| false
| false
| 625
|
py
|
# This module provides mocked versions of classes and functions provided
# by Carla in our runtime environment.
class Location(object):
""" A mock class for carla.Location. """
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
class Rotation(object):
""" A mock class for carla.Rotation. """
def __init__(self, pitch, yaw, roll):
self.pitch = pitch
self.yaw = yaw
self.roll = roll
class Vector3D(object):
""" A mock class for carla.Vector3D. """
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
|
[
"sukritkalra@gmail.com"
] |
sukritkalra@gmail.com
|
c2e2942b1a6a71c4135c644e295854d3eb10f53b
|
b5aedecd9c928f39ded89b0a7f209e75cf326a89
|
/if elif else.py
|
cb7c8a7b0290c1ee933469846f64abf67e12d286
|
[] |
no_license
|
Aneesawan34/Assignments
|
f945e0e5e413e4812e64bb719eee019e0f409219
|
d57d59bdd74da67e2e20eab703e63a05fe245484
|
refs/heads/master
| 2021-09-08T00:57:36.587495
| 2018-03-04T21:29:04
| 2018-03-04T21:29:04
| 112,734,643
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 127
|
py
|
age=12
if age<=4:
print("your cost is free")
elif age <18:
print("your cost is $5")
else:
print("your age is $10")
|
[
"aneesawan34@yahoo.com"
] |
aneesawan34@yahoo.com
|
58d1f9cf803febc2a58fb26e573063434eae588c
|
caaf9046de59559bb92641c46bb8ab00f731cb46
|
/Configuration/Generator/python/JpsiMM_Pt_20_inf_8TeV_TuneCUETP8M1_cfi.py
|
3d826f915126679c530acffd43c4e184f6851393
|
[] |
no_license
|
neumeist/cmssw
|
7e26ad4a8f96c907c7373291eb8df205055f47f0
|
a7061201efe9bc5fa3a69069db037d572eb3f235
|
refs/heads/CMSSW_7_4_X
| 2020-05-01T06:10:08.692078
| 2015-01-11T22:57:32
| 2015-01-11T22:57:32
| 29,109,257
| 1
| 1
| null | 2015-01-11T22:56:51
| 2015-01-11T22:56:49
| null |
UTF-8
|
Python
| false
| false
| 2,939
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
source = cms.Source("EmptySource")
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(0.0154),
pythiaHepMCVerbosity = cms.untracked.bool(False),
crossSection = cms.untracked.double(354400000.0),
comEnergy = cms.double(8000.0),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'Charmonium:states(3S1) = 443', # filter on 443 and prevents other onium states decaying to 443, so we should turn the others off
'Charmonium:O(3S1)[3S1(1)] = 1.16',
'Charmonium:O(3S1)[3S1(8)] = 0.0119',
'Charmonium:O(3S1)[1S0(8)] = 0.01',
'Charmonium:O(3S1)[3P0(8)] = 0.01',
'Charmonium:gg2ccbar(3S1)[3S1(1)]g = on',
'Charmonium:gg2ccbar(3S1)[3S1(8)]g = on',
'Charmonium:qg2ccbar(3S1)[3S1(8)]q = on',
'Charmonium:qqbar2ccbar(3S1)[3S1(8)]g = on',
'Charmonium:gg2ccbar(3S1)[1S0(8)]g = on',
'Charmonium:qg2ccbar(3S1)[1S0(8)]q = on',
'Charmonium:qqbar2ccbar(3S1)[1S0(8)]g = on',
'Charmonium:gg2ccbar(3S1)[3PJ(8)]g = on',
'Charmonium:qg2ccbar(3S1)[3PJ(8)]q = on',
'Charmonium:qqbar2ccbar(3S1)[3PJ(8)]g = on',
'443:onMode = off', # ignore cross-section re-weighting (CSAMODE=6) since selecting wanted decay mode
'443:onIfAny = 13',
'PhaseSpace:pTHatMin = 20.',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
oniafilter = cms.EDFilter("PythiaFilter",
Status = cms.untracked.int32(2),
MaxEta = cms.untracked.double(1000.0),
MinEta = cms.untracked.double(-1000.0),
MinPt = cms.untracked.double(0.0),
ParticleID = cms.untracked.int32(443)
)
mumugenfilter = cms.EDFilter("MCParticlePairFilter",
Status = cms.untracked.vint32(1, 1),
MinPt = cms.untracked.vdouble(0.5, 0.5),
MinP = cms.untracked.vdouble(2.7, 2.7),
MaxEta = cms.untracked.vdouble(2.5, 2.5),
MinEta = cms.untracked.vdouble(-2.5, -2.5),
ParticleCharge = cms.untracked.int32(-1),
ParticleID1 = cms.untracked.vint32(13),
ParticleID2 = cms.untracked.vint32(13)
)
ProductionFilterSequence = cms.Sequence(generator*oniafilter*mumugenfilter)
|
[
"you@somedomain.com"
] |
you@somedomain.com
|
2af4e5aa54d793e98d55c59cc5f8006685863192
|
98b203d8ecf2f51ab0f7707eeee09ee07d109577
|
/python/segment_tree_range_modfications.py
|
0d3fda2bfcba1f052938c532eb74d4a54a882fae
|
[] |
no_license
|
razerboot/DataStrcutures-and-Algorithms
|
9967727a730fa59daa00a91c021042d885584b10
|
b47560efe4fa59ae255dc83c791e18acd9813c22
|
refs/heads/master
| 2021-07-16T19:08:16.770941
| 2021-01-21T11:19:19
| 2021-01-21T11:19:19
| 95,435,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,354
|
py
|
#this code works for modfications on range of array like addition or subtraction
# and query for single element or complete array after a set of modifications
from operator import itemgetter as it
def modify(st, l, r, val, n):
l, r = l + n, r + n
while l <= r:
if l % 2 != 0:
st[l] += val
l += 1
if r % 2 == 0:
st[r] += val
r -= 1
if l == r:
break
l, r = l / 2, r / 2
def query(st, p, n):
p += n
res = 0
while p > 0:
res += st[p]
p /= 2
return res
def bs(st, x, n):
l, r = -1, n
while r - l > 1:
mid = (l + r) / 2
val = query(st, mid, n)
if val <= x:
l = mid
else:
r = mid
l += 1
return l
def push(st, n):
for i in xrange(1, n):
st[2 * i] += st[i]
st[2 * i + 1] += st[i]
st[i] = 0
def f(x, y):
return int(x), y
n = input()
arr = map(f, raw_input().split(), [i for i in xrange(n)])
arr.sort(key=it(0))
st = [0] * (2 * n)
for i in xrange(n):
st[i + n] = arr[i][0]
m = input()
for a0 in xrange(m):
val = input()
index = bs(st, val, n)
if index != n:
modify(st, index, n - 1, -1, n)
push(st, n)
out = [0] * n
for i in xrange(n):
out[arr[i][1]] = str(st[i + n])
print ' '.join(out)
|
[
"akshaykumar@akshaymac.local"
] |
akshaykumar@akshaymac.local
|
0d9111196d01038e0d9e7eefa1e0736577e219ad
|
7c4ef470f7822810760f397c4b4a398476a65986
|
/tests/accelerators/test_ipu.py
|
d76cf68d328012c60fea9fbcc1bffbeb1380e9b2
|
[
"Apache-2.0"
] |
permissive
|
bamblebam/pytorch-lightning
|
559dffd9ecffe05a642dacb38813c832618ae611
|
c784092013d388e45ae83a043675c627e7ca527f
|
refs/heads/master
| 2023-08-09T21:04:47.327114
| 2021-09-14T10:27:56
| 2021-09-14T10:27:56
| 374,527,452
| 0
| 0
|
Apache-2.0
| 2021-06-07T03:51:56
| 2021-06-07T03:51:55
| null |
UTF-8
|
Python
| false
| false
| 18,763
|
py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional
import pytest
import torch
import torch.nn.functional as F
from pytorch_lightning import Callback, seed_everything, Trainer
from pytorch_lightning.accelerators import CPUAccelerator, IPUAccelerator
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.plugins import IPUPlugin, IPUPrecisionPlugin
from pytorch_lightning.trainer.states import RunningStage
from pytorch_lightning.trainer.supporters import CombinedLoader
from pytorch_lightning.utilities import _IPU_AVAILABLE
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.boring_model import BoringModel
from tests.helpers.datamodules import ClassifDataModule
from tests.helpers.runif import RunIf
from tests.helpers.simple_models import ClassificationModel
if _IPU_AVAILABLE:
import poptorch
class IPUModel(BoringModel):
def training_step(self, batch, batch_idx):
output = self(batch)
loss = self.loss(batch, output)
return loss
def validation_step(self, batch, batch_idx):
output = self(batch)
loss = self.loss(batch, output)
return loss
def test_step(self, batch, batch_idx):
output = self(batch)
loss = self.loss(batch, output)
return loss
def training_epoch_end(self, outputs) -> None:
pass
def validation_epoch_end(self, outputs) -> None:
pass
def test_epoch_end(self, outputs) -> None:
pass
class IPUClassificationModel(ClassificationModel):
def training_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.cross_entropy(logits, y)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
acc = self.accuracy(logits, y)
return acc
def test_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
acc = self.accuracy(logits, y)
return acc
def accuracy(self, logits, y):
# todo (sean): currently IPU poptorch doesn't implicit convert bools to tensor
# hence we use an explicit calculation for accuracy here. Once fixed in poptorch
# we can use the accuracy metric.
acc = torch.sum(torch.eq(torch.argmax(logits, -1), y).to(torch.float32)) / len(y)
return acc
def validation_epoch_end(self, outputs) -> None:
self.log("val_acc", torch.stack(outputs).mean())
def test_epoch_end(self, outputs) -> None:
self.log("test_acc", torch.stack(outputs).mean())
@pytest.mark.skipif(_IPU_AVAILABLE, reason="test requires non-IPU machine")
def test_fail_if_no_ipus(tmpdir):
with pytest.raises(MisconfigurationException, match="IPU Accelerator requires IPU devices to run"):
Trainer(default_root_dir=tmpdir, ipus=1)
with pytest.raises(MisconfigurationException, match="IPU Accelerator requires IPU devices to run"):
Trainer(default_root_dir=tmpdir, ipus=1, accelerator="ipu")
@RunIf(ipu=True)
def test_accelerator_selected(tmpdir):
trainer = Trainer(default_root_dir=tmpdir, ipus=1)
assert isinstance(trainer.accelerator, IPUAccelerator)
trainer = Trainer(default_root_dir=tmpdir, ipus=1, accelerator="ipu")
assert isinstance(trainer.accelerator, IPUAccelerator)
@RunIf(ipu=True)
def test_warning_if_ipus_not_used(tmpdir):
with pytest.warns(UserWarning, match="IPU available but not used. Set the `ipus` flag in your trainer"):
Trainer(default_root_dir=tmpdir)
@RunIf(ipu=True)
def test_no_warning_plugin(tmpdir):
with pytest.warns(None) as record:
Trainer(default_root_dir=tmpdir, plugins=IPUPlugin(training_opts=poptorch.Options()))
assert len(record) == 0
@RunIf(ipu=True)
@pytest.mark.parametrize("ipus", [1, 4])
def test_all_stages(tmpdir, ipus):
model = IPUModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, ipus=ipus)
trainer.fit(model)
trainer.validate(model)
trainer.test(model)
trainer.predict(model)
@RunIf(ipu=True)
@pytest.mark.parametrize("ipus", [1, 4])
def test_inference_only(tmpdir, ipus):
model = IPUModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, ipus=ipus)
trainer.validate(model)
trainer.test(model)
trainer.predict(model)
@RunIf(ipu=True)
def test_optimization(tmpdir):
seed_everything(42)
dm = ClassifDataModule(length=1024)
model = IPUClassificationModel()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, ipus=2)
# fit model
trainer.fit(model, dm)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert dm.trainer is not None
# validate
result = trainer.validate(datamodule=dm)
assert dm.trainer is not None
assert result[0]["val_acc"] > 0.7
# test
result = trainer.test(model, datamodule=dm)
assert dm.trainer is not None
test_result = result[0]["test_acc"]
assert test_result > 0.6
# test saved model
model_path = os.path.join(tmpdir, "model.pt")
trainer.save_checkpoint(model_path)
model = IPUClassificationModel.load_from_checkpoint(model_path)
trainer = Trainer(default_root_dir=tmpdir, ipus=2)
result = trainer.test(model, datamodule=dm)
saved_result = result[0]["test_acc"]
assert saved_result == test_result
@RunIf(ipu=True)
def test_mixed_precision(tmpdir):
class TestCallback(Callback):
def setup(self, trainer: Trainer, pl_module: LightningModule, stage: Optional[str] = None) -> None:
assert trainer.accelerator.model.precision == 16
raise SystemExit
model = IPUModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, ipus=1, precision=16, callbacks=TestCallback())
assert isinstance(trainer.accelerator.precision_plugin, IPUPrecisionPlugin)
assert trainer.accelerator.precision_plugin.precision == 16
with pytest.raises(SystemExit):
trainer.fit(model)
@RunIf(ipu=True)
def test_pure_half_precision(tmpdir):
class TestCallback(Callback):
def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
assert trainer.accelerator.model.precision == 16
for param in trainer.accelerator.model.parameters():
assert param.dtype == torch.float16
raise SystemExit
model = IPUModel()
model = model.half()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, ipus=1, precision=16, callbacks=TestCallback())
assert isinstance(trainer.accelerator.training_type_plugin, IPUPlugin)
assert isinstance(trainer.accelerator.precision_plugin, IPUPrecisionPlugin)
assert trainer.accelerator.precision_plugin.precision == 16
with pytest.raises(SystemExit):
trainer.fit(model)
@RunIf(ipu=True)
def test_device_iterations_ipu_plugin(tmpdir):
class TestCallback(Callback):
def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
assert trainer.accelerator.training_type_plugin.device_iterations == 2
# assert device iterations has been set correctly within the poptorch options
poptorch_model = trainer.accelerator.training_type_plugin.poptorch_models[RunningStage.TRAINING]
assert poptorch_model._options.toDict()["device_iterations"] == 2
raise SystemExit
model = IPUModel()
trainer = Trainer(
default_root_dir=tmpdir,
fast_dev_run=True,
ipus=1,
plugins=IPUPlugin(device_iterations=2),
callbacks=TestCallback(),
)
assert isinstance(trainer.accelerator.training_type_plugin, IPUPlugin)
with pytest.raises(SystemExit):
trainer.fit(model)
@RunIf(ipu=True)
def test_accumulated_batches(tmpdir):
class TestCallback(Callback):
def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
# ensure the accumulation_scheduler is overridden to accumulate every batch
# since ipu handle accumulation
assert trainer.accumulation_scheduler.scheduling == {0: 1}
# assert poptorch option have been set correctly
poptorch_model = trainer.accelerator.training_type_plugin.poptorch_models[RunningStage.TRAINING]
assert poptorch_model._options.Training.toDict()["gradient_accumulation"] == 2
raise SystemExit
model = IPUModel()
trainer = Trainer(
default_root_dir=tmpdir, fast_dev_run=True, ipus=1, accumulate_grad_batches=2, callbacks=TestCallback()
)
with pytest.raises(SystemExit):
trainer.fit(model)
@RunIf(ipu=True)
def test_stages_correct(tmpdir):
"""Ensure all stages correctly are traced correctly by asserting the output for each stage."""
class StageModel(IPUModel):
def training_step(self, batch, batch_idx):
loss = super().training_step(batch, batch_idx)
# tracing requires a loss value that depends on the model.
# force it to be a value but ensure we use the loss.
return (loss - loss) + torch.tensor(1)
def validation_step(self, batch, batch_idx):
loss = super().validation_step(batch, batch_idx)
return (loss - loss) + torch.tensor(2)
def test_step(self, batch, batch_idx):
loss = super().validation_step(batch, batch_idx)
return (loss - loss) + torch.tensor(3)
def predict_step(self, batch, batch_idx, dataloader_idx=None):
output = super().predict_step(batch, batch_idx)
return (output - output) + torch.tensor(4)
class TestCallback(Callback):
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx) -> None:
assert outputs["loss"].item() == 1
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx) -> None:
assert outputs.item() == 2
def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx) -> None:
assert outputs.item() == 3
def on_predict_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx) -> None:
assert torch.all(outputs == 4).item()
model = StageModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, ipus=1, callbacks=TestCallback())
trainer.fit(model)
trainer.test(model)
trainer.validate(model)
trainer.predict(model, model.test_dataloader())
@RunIf(ipu=True)
def test_accumulate_grad_batches_dict_fails(tmpdir):
model = IPUModel()
trainer = Trainer(default_root_dir=tmpdir, ipus=1, accumulate_grad_batches={0: 1})
with pytest.raises(
MisconfigurationException, match="IPUs currently only support accumulate_grad_batches being an integer value."
):
trainer.fit(model)
@RunIf(ipu=True)
def test_clip_gradients_fails(tmpdir):
model = IPUModel()
trainer = Trainer(default_root_dir=tmpdir, ipus=1, gradient_clip_val=10)
with pytest.raises(MisconfigurationException, match="IPUs currently do not support clipping gradients."):
trainer.fit(model)
@RunIf(ipu=True)
def test_autoreport(tmpdir):
"""Ensure autoreport dumps to a file."""
model = IPUModel()
autoreport_path = os.path.join(tmpdir, "report/")
trainer = Trainer(
default_root_dir=tmpdir,
ipus=1,
fast_dev_run=True,
plugins=IPUPlugin(autoreport=True, autoreport_dir=autoreport_path),
)
trainer.fit(model)
assert os.path.exists(autoreport_path)
assert os.path.isfile(autoreport_path + "profile.pop")
@RunIf(ipu=True)
def test_manual_poptorch_opts(tmpdir):
"""Ensure if the user passes manual poptorch Options, we run with the correct object."""
model = IPUModel()
inference_opts = poptorch.Options()
training_opts = poptorch.Options()
trainer = Trainer(
default_root_dir=tmpdir,
ipus=1,
fast_dev_run=True,
plugins=IPUPlugin(inference_opts=inference_opts, training_opts=training_opts),
)
trainer.fit(model)
assert isinstance(trainer.accelerator.training_type_plugin, IPUPlugin)
assert trainer.accelerator.training_type_plugin.training_opts == training_opts
assert trainer.accelerator.training_type_plugin.inference_opts == inference_opts
@RunIf(ipu=True)
def test_manual_poptorch_opts_custom(tmpdir):
"""Ensure if the user passes manual poptorch Options with custom parameters set, we respect them in our
poptorch options and the dataloaders."""
model = IPUModel()
training_opts = poptorch.Options()
training_opts.deviceIterations(8)
training_opts.replicationFactor(2)
training_opts.Training.gradientAccumulation(2)
inference_opts = poptorch.Options()
inference_opts.deviceIterations(16)
inference_opts.replicationFactor(1)
inference_opts.Training.gradientAccumulation(1)
class TestCallback(Callback):
def on_fit_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
# ensure dataloaders were correctly set up during training.
plugin = trainer.accelerator.training_type_plugin
assert isinstance(plugin, IPUPlugin)
assert plugin.training_opts.replication_factor == 2
assert plugin.inference_opts.replication_factor == 1
val_dataloader = trainer.val_dataloaders[0]
train_dataloader = trainer.train_dataloader
assert isinstance(train_dataloader, CombinedLoader)
train_dataloader = train_dataloader.loaders
assert isinstance(val_dataloader, poptorch.DataLoader)
assert isinstance(train_dataloader, poptorch.DataLoader)
assert train_dataloader.options.replication_factor == 2
assert val_dataloader.options.replication_factor == 1
plugin = IPUPlugin(inference_opts=inference_opts, training_opts=training_opts)
# ensure we default to the training options replication factor
assert plugin.replication_factor == 2
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, plugins=plugin, callbacks=TestCallback())
trainer.fit(model)
plugin = trainer.accelerator.training_type_plugin
assert isinstance(plugin, IPUPlugin)
training_opts = plugin.training_opts
assert training_opts.device_iterations == 8
assert training_opts.replication_factor == 2
assert training_opts.Training.gradient_accumulation == 2
inference_opts = plugin.inference_opts
assert inference_opts.device_iterations == 16
assert inference_opts.replication_factor == 1
assert inference_opts.Training.gradient_accumulation == 1
@RunIf(ipu=True)
def test_replication_factor(tmpdir):
"""Ensure if the user passes manual poptorch Options with custom parameters set, we set them correctly in the
dataloaders."""
plugin = IPUPlugin()
trainer = Trainer(ipus=2, default_root_dir=tmpdir, fast_dev_run=True, plugins=plugin)
assert trainer.ipus == 2
@RunIf(ipu=True)
def test_default_opts(tmpdir):
"""Ensure default opts are set correctly in the IPUPlugin."""
model = IPUModel()
trainer = Trainer(default_root_dir=tmpdir, ipus=1, fast_dev_run=True)
trainer.fit(model)
assert isinstance(trainer.accelerator.training_type_plugin, IPUPlugin)
inference_opts = trainer.accelerator.training_type_plugin.inference_opts
training_opts = trainer.accelerator.training_type_plugin.training_opts
for opts in (inference_opts, training_opts):
assert isinstance(opts, poptorch.Options)
assert opts.Training.gradient_accumulation == 1
assert opts.device_iterations == 1
assert opts.replication_factor == 1
@RunIf(ipu=True)
def test_multi_optimizers_fails(tmpdir):
"""Ensure if there are multiple optimizers, we throw an exception."""
class TestModel(IPUModel):
def configure_optimizers(self):
return [torch.optim.Adam(self.parameters()), torch.optim.Adam(self.parameters())]
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, ipus=1)
with pytest.raises(MisconfigurationException, match="IPUs currently only support one optimizer."):
trainer.fit(model)
@RunIf(ipu=True)
def test_precision_plugin(tmpdir):
"""Ensure precision plugin value is set correctly."""
plugin = IPUPrecisionPlugin(precision=16)
assert plugin.precision == 16
@RunIf(ipu=True)
def test_accelerator_ipu():
trainer = Trainer(accelerator="ipu", ipus=1)
assert trainer._device_type == "ipu"
assert isinstance(trainer.accelerator, IPUAccelerator)
with pytest.raises(
MisconfigurationException, match="You passed `accelerator='ipu'`, but you didn't pass `ipus` to `Trainer`"
):
trainer = Trainer(accelerator="ipu")
trainer = Trainer(accelerator="auto", ipus=8)
assert trainer._device_type == "ipu"
assert isinstance(trainer.accelerator, IPUAccelerator)
@RunIf(ipu=True)
def test_accelerator_cpu_with_ipus_flag():
trainer = Trainer(accelerator="cpu", ipus=1)
assert trainer._device_type == "cpu"
assert isinstance(trainer.accelerator, CPUAccelerator)
@RunIf(ipu=True)
def test_accelerator_ipu_with_devices():
trainer = Trainer(accelerator="ipu", devices=8)
assert trainer.ipus == 8
assert isinstance(trainer.training_type_plugin, IPUPlugin)
assert isinstance(trainer.accelerator, IPUAccelerator)
@RunIf(ipu=True)
def test_accelerator_auto_with_devices_ipu():
trainer = Trainer(accelerator="auto", devices=8)
assert trainer._device_type == "ipu"
assert trainer.ipus == 8
@RunIf(ipu=True)
def test_accelerator_ipu_with_ipus_priority():
"""Test for checking `ipus` flag takes priority over `devices`."""
ipus = 8
with pytest.warns(UserWarning, match="The flag `devices=1` will be ignored,"):
trainer = Trainer(accelerator="ipu", devices=1, ipus=ipus)
assert trainer.ipus == ipus
@RunIf(ipu=True)
def test_set_devices_if_none_ipu():
trainer = Trainer(accelerator="ipu", ipus=8)
assert trainer.devices == 8
|
[
"noreply@github.com"
] |
bamblebam.noreply@github.com
|
4e526cca9511fe0f8fc5552b5c58df8391f6212f
|
3f976e0336e8fd0c9d376dd65d566ecc248f05e1
|
/python/dont-give-me-five.py
|
64e643e6b8fef6f01938739177c2f645f0712eee
|
[] |
no_license
|
rdvnkdyf/codewars-writing
|
4f22a923d45b4023dd4bbb3ab41decde4762f8da
|
121e522097139a808aa6b56a130ac082003ae777
|
refs/heads/main
| 2023-05-09T03:41:18.967620
| 2021-06-06T16:29:30
| 2021-06-06T16:29:30
| 373,818,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 922
|
py
|
"""
Don't give me five!
In this kata you get the start number and the end number of a region and should return the count of all numbers except numbers with a 5 in it. The start and the end number are both inclusive!
Examples:
1,9 -> 1,2,3,4,6,7,8,9 -> Result 8
4,17 -> 4,6,7,8,9,10,11,12,13,14,16,17 -> Result 12
The result may contain fives. ;-)
The start number will always be smaller than the end number. Both numbers can be also negative!
I'm very curious for your solutions and the way you solve it. Maybe someone of you will find an easy pure mathematics solution.
Have fun coding it and please don't forget to vote and rank this kata! :-)
I have also created other katas. Take a look if you enjoyed this kata!
"""
def dont_give_me_five(start,end):
# your code here
arr=[]
for i in range(start,end+1):
if(i%5==0 and i%2!=0):
continue
arr.append(i)
return len(arr)
|
[
"ridvangs77@gmail.com"
] |
ridvangs77@gmail.com
|
5cb5c2712a8e562cc9538e5cc683fede695f4ae8
|
3b758eb66679dc4b4db1e806fc73e47fd5fa7f8e
|
/Piro/Code/Quantum/DecisionVersion/quantumSat.py
|
1efcf8d6b73783b61039e5b17d0d09da49fa9d5f
|
[] |
no_license
|
Askarpour/sw2_quantum_research
|
5140d79c0631d2b63d5595128dfc9ee6e1c3ad0d
|
7583fd908e1dc998a44b16a096d922e37d24d426
|
refs/heads/master
| 2021-07-02T02:23:29.786576
| 2021-05-23T14:37:33
| 2021-05-23T14:37:33
| 228,428,589
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,592
|
py
|
"""
This is the main class where the circuit is built from the instance and its solution is retrieved.
The clauses still need to be conjuncted in order to obtain the CNF and then solve the instance
of the 3-SAT that has been parsed.
"""
from sys import argv
from satInstance import SATInstance
from qiskit import QuantumRegister
from qiskit.visualization import *
import matplotlib.pyplot as plt
def run_solver(input_file):
instance = SATInstance.from_file(input_file)
clauses = instance.clauses
partial_result = instance.variables + instance.regCount
partial_index = 0
instance.quantumCircuit.barrier()
instance.quantumCircuit.add_register(QuantumRegister(2 + 2 * (len(clauses) - 2), 'z'))
instance.quantumCircuit.add_register(QuantumRegister(1, 'f(x_0, x_1, x_2)'))
while partial_index < len(clauses):
first = partial_result
second = first + 1
if partial_index == 0:
instance.quantumCircuit.cx(clauses[partial_index], first)
partial_index += 1
instance.quantumCircuit.cx(clauses[partial_index], second)
partial_index += 1
partial_result += 2
instance.quantumCircuit.ccx(first, second, partial_result)
# basic text circuit is drawn in a file in the Circuits folder
circuit_drawer(instance.quantumCircuit, 0.7, 'Circuits/' + argv[1] + 'circuit', fold=300)
# fancy circuit
instance.quantumCircuit.draw(output='mpl')
plt.show()
def main(args):
file = open('Input/' + args[1], 'r')
run_solver(file)
if __name__ == '__main__':
main(argv)
|
[
"framegapiro@gmail.com"
] |
framegapiro@gmail.com
|
036fd4e6df52c2f68430ad36168e043c7563d093
|
03acb21a746640986dd104bac734cb984e6ea99b
|
/main.py
|
cf273b44f7186dea10a635759daeb2d744aef295
|
[] |
no_license
|
babooteng2/flask-scrapper
|
cca384984ea50805cc9fb93253423247c44b4e20
|
aa340e98049fa42c537abe738b218a04ffd32a05
|
refs/heads/main
| 2023-07-24T10:12:47.130320
| 2021-09-05T16:53:12
| 2021-09-05T16:53:12
| 403,358,760
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 952
|
py
|
from flask import Flask, render_template, request, redirect, send_file
from scrapper import get_jobs
from exporter import save_to_file
app = Flask("FlaskScrapper")
db = {}
@app.route("/")
def home():
return render_template("potato.html")
@app.route("/report")
def report():
word = request.args.get('word')
if word:
word = word.lower()
existingJobs = db.get(word)
if existingJobs:
jobs = existingJobs
else:
jobs = get_jobs(word)
db[word] = jobs
else:
return redirect("/")
return render_template(
"report.html", serchingBy=word,
resultsNum=len(jobs),
jobs=jobs
)
@app.route("/export")
def export():
try:
word = request.args.get("word")
if not word:
raise Exception()
word = word.lower()
jobs = db.get(word)
if not jobs:
raise Exception()
save_to_file(jobs)
return send_file("jobs.csv")
except:
return redirect("/")
app.run()
|
[
"babooteng2@gmail.com"
] |
babooteng2@gmail.com
|
5c0cfa06b2e70643c4cfe437546555ccab3e48e1
|
77a85d44d197217e7dd58931841263df61ad4a19
|
/tests/test.py
|
0d416db3d68f9c5a814c3caa28c3d0f6f4c2f5aa
|
[
"MIT"
] |
permissive
|
Open-Source-Ninja/vm-automation
|
41c414150d52726b82d145078ec7f34960f90141
|
4cdd9d582b390495de82f0b69df7c481d7a281bc
|
refs/heads/master
| 2023-04-10T20:12:35.201225
| 2021-04-17T18:08:08
| 2021-04-17T18:08:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,030
|
py
|
import support_functions
import vm_functions
import unittest
version_good = '6.1.18r142142'
vm_good = 'ws2019'
vm_bad = 'bad'
snapshot_good = 'live'
snapshot_bad = 'bad'
file_good = '../putty.exe'
file_bad = '../bad.exe'
file_dst = 'C:\\windows\\temp\\file.exe'
user_good = 'Administrator'
pass_good = '12345678'
user_bad = 'bad'
pass_bad = 'bad'
ips_good = ['10.0.2.15', '192.168.56.113']
class TestStringMethods(unittest.TestCase):
# vm_functions options
vm_functions.logging.disable()
vm_functions.vboxmanage_path = 'vboxmanage'
vm_functions.timeout = 60
def test01_file_info(self):
result = support_functions.file_info(file_good)
self.assertEqual(result[0], 0)
self.assertEqual(result[1], 'f2d2638afb528c7476c9ee8e83ddb20e686b0b05f53f2f966fd9eb962427f8aa')
self.assertEqual(result[2], '374fb48a959a96ce92ae0e4346763293')
self.assertEqual(result[3], 1070)
def test02_file_info_nonexisted(self):
result = support_functions.file_info(file_bad)
self.assertEqual(result, 1)
def test03_virtualbox_version(self):
result = vm_functions.virtualbox_version()
self.assertEqual(result[0], 0)
self.assertEqual(result[1], version_good)
self.assertEqual(result[2], '')
def test04_vm_start(self):
result = vm_functions.vm_start(vm_good)
self.assertEqual(result[0], 0)
self.assertRegex(result[1], f'VM "{vm_good}" has been successfully started.')
self.assertEqual(result[2], '')
def test05_vm_start_running(self):
# Note: this test expects previous test was executed too
result = vm_functions.vm_start(vm_good)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], '')
self.assertRegex(result[2], 'is already locked by a session')
def test06_vm_start_nonexisting(self):
result = vm_functions.vm_start(vm_bad)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], '')
self.assertRegex(result[2], 'Could not find a registered machine')
def test07_vm_upload(self):
result = vm_functions.vm_upload(vm_good, user_good, pass_good, file_good, file_dst)
self.assertEqual(result[0], 0)
self.assertEqual(result[1], '')
self.assertEqual(result[2], '')
def test08_vm_upload_nonexisting_file(self):
result = vm_functions.vm_upload(vm_good, user_good, pass_good, file_bad, file_dst)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], '')
self.assertRegex(result[2], 'VERR_FILE_NOT_FOUND')
def test09_vm_upload_incorrect_credentials(self):
result = vm_functions.vm_upload(vm_good, user_bad, pass_bad, file_good, file_dst)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], '')
self.assertRegex(result[2], 'The specified user was not able to logon on guest')
def test10_vm_download_incorrect_credentials(self):
result = vm_functions.vm_download(vm_good, user_good, pass_bad, file_bad, file_dst)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], '')
self.assertRegex(result[2], 'The specified user was not able to logon on guest')
def test11_vm_download_nonexisting_file(self):
result = vm_functions.vm_download(vm_good, user_good, pass_good, file_dst, file_bad)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], '')
self.assertRegex(result[2], 'Querying guest file information failed')
def test12_vm_stop(self):
result = vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 0)
self.assertEqual(result[1], '')
self.assertRegex(result[2], '100%')
def test13_vm_stop_stopped(self):
result = vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], '')
self.assertRegex(result[2], 'VBOX_E_INVALID_VM_STATE')
def test14_vm_snapshot_restore_good(self):
result = vm_functions.vm_snapshot_restore(vm_good, snapshot_good)
self.assertEqual(result[0], 0)
self.assertRegex(result[1], 'Restoring snapshot')
self.assertRegex(result[2], '100%')
def test15_vm_snapshot_restore_nonexisting_a(self):
result = vm_functions.vm_snapshot_restore(vm_good, snapshot_bad)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], '')
self.assertRegex(result[2], 'Could not find a snapshot')
def test16_vm_snapshot_restore_nonexisting_b(self):
result = vm_functions.vm_snapshot_restore(vm_bad, snapshot_bad)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], '')
self.assertRegex(result[2], 'Could not find a registered machine')
def test17_list_ips(self):
result = vm_functions.list_ips(vm_good)
self.assertEqual(result[0], 0)
self.assertEqual(result[1], ips_good)
self.assertEqual(result[2], '')
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
Open-Source-Ninja.noreply@github.com
|
614afacb7d88abe1697191ba3dc5fea6cdce83ef
|
a520eb3a99c0e17760cb185b61da2c5e8ae36bed
|
/apps/users/tests.py
|
0e48b5040d17cd508e8ea78902476af196085d14
|
[] |
no_license
|
zhuoxiaojian/yishengAnalyze
|
9cd4b984a4c90d23d6e2d324def187b88d5b737b
|
18d2afad78f8cf3a734d41d835e7caf7635fca47
|
refs/heads/master
| 2022-12-10T21:30:25.176482
| 2019-01-19T08:55:46
| 2019-01-19T08:55:46
| 153,866,303
| 1
| 1
| null | 2022-12-09T05:32:09
| 2018-10-20T03:32:46
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 133
|
py
|
from django.test import TestCase
# Create your tests here.
from users.tasks import test
if __name__ == '__main__':
test.delay()
|
[
"1933860854@qq.com"
] |
1933860854@qq.com
|
7bbbf4aec5ca5dc90e27b4c35550b123e7c08016
|
b460c3ae3b8fa0c1db5185abc56be4b7106feb58
|
/ejercicio_3.py
|
ce8e31f3f66bb5c8892924deb884292760740e6c
|
[] |
no_license
|
mirigonza/funciones_python
|
61eac14d65801e5371d83ced4cad2de5face84c9
|
7b8ad1e55017e536d61d59a28f2534c6368e7fea
|
refs/heads/master
| 2023-08-25T05:44:44.383005
| 2021-10-20T13:08:46
| 2021-10-20T13:08:46
| 414,288,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,268
|
py
|
# Funciones [Python]
# Ejercicios de práctica
# Autor: Inove Coding School
# Version: 2.0
# IMPORTANTE: NO borrar los comentarios
# que aparecen en verde con el hashtag "#"
# Ejercicios con funciones
# --------------------------------
# Aquí dentro definir la función ordenar
#def ordenar (numeros):
# --------------------------------
def ordenar (numeros):
numeros.sort()
resultado = numeros
return resultado
if __name__ == '__main__':
print("Bienvenidos a otra clase de Inove con Python")
numeros = [2, 4, 10, 8, 12, 6]
# Alumno: Crear la función "ordenar"
# Generar una una nueva funcion que se llame "ordenar",
# que utilizaremos para odernar la lista de numeros.
# Debe recibir 1 parámetro que es la lista de números
# y retornar la nueva lista ordenada (muy simular a la función promedio)
# Dentro de la función puede ordenar la lista
# usando la funciones nativas de Python "sort"
# Luego de crear la función invocarla en este lugar:
# lista_ordenada = ordenar(numeros)
lista_ordenada = ordenar(numeros)
# Imprimir en pantalla "lista_ordenada" que tendrá
# los valores retornado por la función ordenar:
print("lista ordenada", ordenar)
print("terminamos")
|
[
"noreply@github.com"
] |
mirigonza.noreply@github.com
|
3a19c9c5be00b701cdd309ad99d37a8fd77a6021
|
cd257631f442d24d2e4902cfb60d05095e7c49ad
|
/week-02/day-01/average_of_input.py
|
d18279b22f7452cd634a2164b12f176064e3c4ef
|
[] |
no_license
|
green-fox-academy/Chiflado
|
62e6fc1244f4b4f2169555af625b6bfdda41a975
|
008893c63a97f4c28ff63cab269b4895ed9b8cf1
|
refs/heads/master
| 2021-09-04T03:25:25.656921
| 2018-01-15T09:02:47
| 2018-01-15T09:02:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
# Write a program that asks for 5 integers in a row,
# then it should print the sum and the average of these numbers like:
#
# Sum: 22, Average: 4.4
number = 0
for i in range(0, 5):
number += int(input('Give me a number: '))
print('Sum: ' + str(number) + ' Average: ' + str(number/(i + 1)))
|
[
"prjevarabalazs@gmail.com"
] |
prjevarabalazs@gmail.com
|
3ff04a2f5934a2ed37927656ec8497009a08a5ff
|
9d37286183243fd2a040466f0df0b3b31f69ef6a
|
/products/migrations/0002_product_upvoters.py
|
8f7c67c02056da450c6e15bc6701eb2471a35c5c
|
[] |
no_license
|
ma9shah/Product-Hunt-Django
|
4a5b95db2d042718f374d3fd7cd771b4b0d196ed
|
19bbfb72889ebce6813a0845df10787f8f7a93f6
|
refs/heads/master
| 2022-01-25T15:04:42.207144
| 2019-07-27T08:54:29
| 2019-07-27T08:54:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
# Generated by Django 2.2.2 on 2019-06-27 14:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='upvoters',
field=models.TextField(default=''),
preserve_default=False,
),
]
|
[
"ma9shah@gmail.com"
] |
ma9shah@gmail.com
|
29f162d6142b8bebfd7ec13c592f93efb8ae2701
|
cd60571bf097d6a8a5fb3b687773c7112a3a6e30
|
/socket_class.py
|
b2b814f1111bacbc13541e34f91e3c118f28ce0b
|
[] |
no_license
|
donoghuc/code_example_1
|
f89af43054c32747d9190e14fb93cf5b16a9468a
|
57c1b967512b50fa347acf9c64d7517800406c78
|
refs/heads/master
| 2021-01-19T23:40:57.919011
| 2017-04-21T19:36:50
| 2017-04-21T19:36:50
| 89,016,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,978
|
py
|
#! /usr/bin/python3
# Cas Donoghue
# CS372
# Project 1
# 12Feb2017
# This is a class for sockets. It is used for the server and client side.
# i got the idea from: https://docs.python.org/3/howto/sockets.html
# basically just followed the docs.
import socket
import sys
class MySocket:
"""class for message app.
"""
def __init__(self, sock=None):
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
def client_connect(self, host, port):
try:
self.sock.connect((host, port))
except socket.error as e:
print(str(e))
sys.exit()
# listen function.
def server_listen(self, host, port, connections):
try:
self.sock.bind((host, port))
except socket.error as e:
print(str(e))
sys.exit()
self.sock.listen(connections)
# super simple accept function (returns conn, addr)
def server_accept(self):
return self.sock.accept()
# python3 has a sendall so you dont have to worry about writing one yourself
def client_send(self, msg):
self.sock.sendall(str.encode(msg))
# subtle diff between client and server
def server_send(self, conn, msg):
conn.sendall(str.encode(msg))
# use my system of prepending expected message len to ensure you get the whole message.
def server_receive(self, conn):
chunks = []
bytes_expected = ''
bytes_recd = 0
while True:
data = conn.recv(3).decode()
for x in range(len(data)):
if data[x] != 'x':
bytes_expected = bytes_expected + data[x]
if not data:
break
bytes_expected = int(bytes_expected)
while bytes_recd < bytes_expected:
chunk = conn.recv(min(bytes_expected - bytes_recd, 2048)).decode()
chunks.append(chunk)
bytes_recd = bytes_recd + len(chunk)
return ''.join(chunks)
# again use the prepend message idea
def client_recv(self):
chunks = []
bytes_expected = ''
bytes_recd = 0
while True:
data = self.sock.recv(3).decode()
for x in range(len(data)):
if data[x] != 'x':
bytes_expected = bytes_expected + data[x]
if not data:
break
bytes_expected = int(bytes_expected)
while bytes_recd < bytes_expected:
chunk = self.sock.recv(min(bytes_expected - bytes_recd, 2048)).decode()
chunks.append(chunk)
bytes_recd = bytes_recd + len(chunk)
return ''.join(chunks)
# simply close up connections when done.
def client_close(self):
self.sock.close()
def server_close(self, conn):
conn.close()
|
[
"cas.donoghue@gmail.com"
] |
cas.donoghue@gmail.com
|
1b788c6fade91fd375383669af7247fe95bc55b4
|
3219e0d0586efbc3f11b74e37043057a986721b0
|
/Python_Basics_Week_1.py
|
08c088b54a278345d89b0c05acfb12121b11aad2
|
[] |
no_license
|
Sandbox4KidsTM/Python_Basics_1
|
a6ac6d46a916ea4213ffff40137e76857a7b5028
|
fe52cf3d209fe6dcb0add8c64aba998c7562cee0
|
refs/heads/master
| 2020-03-26T00:06:32.105312
| 2018-08-13T22:59:19
| 2018-08-13T22:59:19
| 144,306,641
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,370
|
py
|
# coding: utf-8
# # Python Basics- Day 1
#
# We are going to be learning the basics of python.
#
# Today we will cover basic types, user interaction, branching, and the random library
# In[1]:
print("Hello World")
# ## Basic Types
# * int (integers)
# * float (decimals)
# * char (characters)
# * boolean (logical: True, False)
# * Strings (collections of characters)
# In[1]:
# Basic Types - foundational types that make up python.
3 # int (integer)
4.5 # float (decimals)
.45
45.0
print(4 - 3)
# String - collection of characters that is not interpreted
# Char - single character
print("4 + 11 =", 4 + 11)
print("My name is Mitch.")
# Boolean - Either True or False
print(False)
print(True)
## Variables
num = 27
color = "Green"
isBlonde = True
age = input("Enter your age: ")
print(age)
print(color)
# Casting - turning a type into a different type
# if age bigger than 10: print You are older than 10.
# int(v)
# float(v)
# str(v)
# bool(v)
# char(v)
numAge = int(age)
# ### Application: Temperature Conversion
#
# Next we will create a simple application that will request a temperature in Fahrenheit and convert it to Celcius.
# Then it ill prompt the user for a temperature in Celcius and convert it to Fahrenheit. I have outlined the first half with comments.
# In[1]:
from sense_hat import SenseHat ## Delete this line before running
hat = SenseHat() ## Delete this line before running
# Ask the user for a temperature, store to variable
Tempf = input("What is the temperature(f): ")
# Change that temp into a float, (from a string)
Tempf = float(Tempf)
# Convert that number into Celcius from Fahrenheit
# Tc = (Tf - 32) x (5/9)
TempC = (Tempf - 32) * (5/9)
# Print out that number. With some info
print("Celcius:", TempC)
# Go the other direction
# Tf = ((9/5) x Tc) + 32
Tc = input("What is the temperature(c): ")
Tc = float(Tc)
Tf = (9/5 * Tc) + 32
print("Fahrenheit:",Tf)
currentTemp = hat.get_temperature() ## Delete this line before running
print("Current Temperature: (C)", currentTemp) ## Delete this line before running
hat.show_message(str(currentTemp)) ## Delete this line before running
# ## Branching and the Random Library
#
# We will be using a popular library called Random to learn about branching, conditions, and generating random numbers.
#
# Branching allows us to run certain blocks of code only when it suits our needs. Generally this will be done with the `if` statement.
#
# `if (condition):`
#
# `____ #Do Something`
#
# `____ #Do Something else`
#
# `# Do something outside of the if statement`
#
# To use Random library we need to use the `import` keyword. This gives us access to all of the classes and functions in the random library. We will be using the `randint(a,b)` function, which returns a random integer between `a` and `b` inclusive.
# In[2]:
import random
num = random.randint(-5,5)
if num == 0 or num < 0:
print("You have no health.")
if num > 0:
print("You have {} health".format(num))
# ### Challenge Program
#
# Write a program that simulates ordering a meal at In-N-Out.
# First ask the user if they want a hamburger or a cheeseburger.
# A hamberger costs 3.25, and a cheeseburger costs 4.10.
# Then ask if they want fries. Fries cost 2.45
# Ask if they want a strawberry or vanilla shake.
# Strawberrys cost 3.75. Vanilla costs 3.00.
# Give them their total cost.
# In[3]:
from sense_hat import SenseHat # Don't do this
s = SenseHat() # Or this
print("Welcome to In-N-Out!")
hb = 3.25
cb = 4.10
ff = 2.45
vs = 3.00
ss = 3.75
price = 0.0
burger = input("Would you like a Hamburger or a Cheeseburger? ")
if burger.lower() == "hamburger":
print("Got it, one hamburger.")
price = price + hb
elif burger.lower() == "cheeseburger":
print("Comin up, one cheeseburger.")
price = price + cb
fries = input("Would you like fries? ")
if fries.lower() == "yes":
print("Great")
price = price + ff
shake = input("Would you like a Strawberry Shake or a Vanilla Shake? ")
if shake.lower() == "strawberry":
print("My favorite, you're all set.")
price = price + ss
elif shake.lower() == "vanilla":
print("Great, you are set.")
price = price + vs
print("You're total price is: $"+ str(price))
s.show_message("$"+str(price)) # Don't write this
# # Python Basics - Day 2
# Today we will be covering Iteration and Lists and Dictionaries.
#
# ## Lists
# Yesterday we learned that we can create variables to store our data. But what if we have a large set of data all related to some single topic? In this case we will often turn to a list. A list is a structure in Python that allows us to store data in an ordered way.
#
# In[28]:
myList = ["Green", "Blue", "Red", "Gray"]
myNumbers = [6.5, 5.7, 5.8, 4, 5]
print(myList)
print(myNumbers)
# Accessing the elements (zero based)
print(myNumbers[0]) # Print the first element
print(myNumbers[3]) # Prints the fourth element
print(myNumbers[-1]) # Always prints the last element
# Adding and removing from the list
# Append always adds to the end of the list
myNumbers.append(14)
# Adds 15 to the list as the first element
myNumbers.insert(0,15)
# Removes the first element and gives it back to us.
# Pop default to the last element.
a = myNumbers.pop(0)
print(myNumbers)
print(a)
# Checking for a value in the list, is "Green" in my list?
# Counts the number of times "Green" is in the list
hasGreen = myList.count("Green")
# Finds where it is in the list, throws error if not found
greenIndex = myList.index("Green")
# Returns the number of elements in the list
length = len(myList)
# ## Dictionaries
#
# A dictionary is similar to a list in that it stores multiple pieces of data all at once. The difference is that a dictionary stores its data as key-value pairs. There is no order to a dictionary, the keys are what we use to look up our data.
# In[50]:
# Creates a dictionary
favoriteColors = {"Mitch": "Green", "Angel": "Red", "Thomas": "Green", "Harrison": "Green"}
# Adds a key-value pair to the dictionary
favoriteColors["Bob"] = "Blue"
# Remove a key-value pair
del(favoriteColors["Mitch"])
# Check if a key is in the dictionary
hasBob = "Bob" in favoriteColors
print(hasBob)
lookUp = "Bob"
print("{}'s favorite color is {}".format(lookUp, favoriteColors[lookUp]))
# ### Program: Queriable Dictionary
#
# Create a program that looks up a word in a premade dictionary and prints out the definition, if the word they entered is not in the dictionary we will tell them that it is not in the dictionary. Edible
# In[4]:
myWords = {"python": "A general purpose programming language",
"computer": "An electrical device that makes \
large computations very quickly",
"raspberry pi": "A single board computer that is \
great for many small applications"}
query = input("What word would you like to look up? ").lower()
if query in myWords:
print(myWords[query])
else:
print("Sorry, Invalid Word.")
# ## Day 3 - Looping and Functions
# Often we want to repeat a process over and over again but we don't want to type our code over and over again if we can help it. There are two main types of loops that we use to avoid rewriting code.
#
# `While` loop: runs a block of code over and over again until a condition is false.
# In[39]:
# While loops
sum_ = 0
x = 2
while x <= 1000:
sum_ += x
x += 2
sum2 = 0
for x in range(0,1001,2):
sum2 += x
print(sum_)
print(sum2)
# ### While Loop Assigment
# Find the product of the first 13 prime numbers
# In[21]:
def isPrime(num):
x = 2
while x <= num / 2:
if num % x == 0:
return False
x += 1
return True
ans = 1
x = 2
primes = 0
while primes < 15:
if isPrime(x):
ans *= x
primes += 1
print("Prime Found: {}. Ans: {}. Primes: {}" .format(x, ans, primes))
x += 1
print(ans)
# The second type of loop is the `for` loop. The `for` loop is similar to the `while` loop only we have more control over how many times it is run.
# In[36]:
for i in range(1,11):
print(i)
array = ["Mark", "Bob", "Steve"]
for x in array:
print(x, "says hi.")
index = 0
while index < len(array):
print(array[index], "_says hi.")
index += 1
# ### For Loop Assignment
# Write a program that gets three integers from the user. Count from the first number to the second number in increments of the third number. Use a `for` loop to do it.
# In[49]:
countFrom = int(input("Count From: "))
countTo = int(input("Count To: ")) + 1
countBy = int(input("Count By: "))
if (countTo - 1 - countFrom) % countBy ==0:
for i in range(countFrom, countTo , countBy):
print(i)
else:
print("Invalid Input")
# ### Challenge Assignment: FizzBuzz
# Write a program that prints the numbers from 1 to 100. But for multiples of three print "Fizz" instead of the number and for the multiples of five print "Buzz". For numbers which are multiples of both three and five print "FizzBuzz".
#
# In[ ]:
num = 1
while num <= 100:
if num % 15 == 0:
print("FizzBuzz")
elif num % 5 == 0:
print("Buzz")
elif num % 3 == 0:
print("Fizz")
else:
print(num)
num += 1
# ### Functions
# Often we have block of code that serves one purpose and we want to be able to reuse that functionality any time we need to. We call creating a function "Defining a function". A function definition is started with the `def` keyword followed by the name of the functions and a pair of parenthesis. Inside the parenthesis we can put any "inputs" that our function need to be able to run. We call the inputs parameters.
# In[73]:
import math
def square(number):
print("The square of {} is {}." \
.format(number, number * number))
return number * number
def _2xPlus5(x):
return 2*x + 5
def PythagTheorem(leg1, leg2):
sumOfSquares = square(leg1) + square(leg2)
return math.sqrt(sumOfSquares)
for i in range(0,11):
b = _2xPlus5(i)
square(b)
print(PythagTheorem(3,4))
# ### Challenge Program: Area Calculator
# Create a program that show the user a menu to calculate the area of a few different shapes: square, rectangle, triangle, and circle. Then find the area of the shape with the parameters given by the user.
# In[81]:
import math
def inputSquare():
side = input("Side Length: ")
side = int(side)
return side * side
def inputRectangle():
w = int(input("Width: "))
l = int(input("Length: "))
return w * l
def inputTriangle():
b = int(input("Base: "))
h = int(input("Height: "))
return (1/2)*b*h
def inputCircle():
r = int(input("Radius: "))
return math.pi * r * r
print("1. Area of a Square")
print("2. Area of a Rectangle")
print("3. Area of a Triangle")
print("4. Area of a Circle")
ans = -1
choice = int(input("\n Type 1-4 to choose: "))
if choice == 1:
ans = inputSquare()
elif choice == 2:
ans = inputRectangle()
elif choice == 3:
ans = inputTriangle()
elif choice == 4:
ans = inputCircle()
if ans != -1:
print(ans)
else:
print("Invalid Input")
# ## Classes
# Classes allow us to encapulate data and functions into one group or class that lets us easily simulate behavior.
# For example, next we are going to create a rocket class that simulate the position and fuel levels of a rocket. The convience of wrapping our program into a class allows us to create multiple rockets with ease.
#
# Lists - `[]`
#
# Dictionary - `{}`
#
# Tuples - `()`
# In[37]:
import math
# Takes in two tuples and calculates the distance between them.
def distance_formula(p1, p2):
sum_of_squares = 0
if len(p1) == len(p2):
for i in range(0, len(p1)):
diff = p2[i] - p1[i]
sum_of_squares += diff * diff
return math.sqrt(sum_of_squares)
else:
return -1
class Rocket():
## Magic method, used to initialize our class into an object. Takes 2 parameters,
# the starting location of the rocket and its fuel level
def __init__(self, p0, fuel_level0):
self._position = p0
self._fuel_level = fuel_level0
## Magic method, used to create the string that is
# printed when we call the print() function on our object.
def __repr__(self):
return "Position: {}; Fuel: {}".format(self._position, self._fuel_level)
## Assignment: edit the fly method so that:
# We ensure that there is enough fuel to make the trip
# If we do, make the trip
# If not, print("Not enough fuel")
def fly(self, p2):
distance = distance_formula(self._position, p2)
fuel_used = 0.3 * distance
##### Add fuel logic here
if fuel_used > self._fuel_level:
print("Not enough fuel")
return False
else:
self._fuel_level -= fuel_used
self._position = p2
return True
#####
def refuel(self):
print("Filling up the fuel tank.")
self._fuel_level = 100
def get_position(self):
return self._position
def get_fuel_level(self):
return self._fuel_level
class Shuttle(Rocket):
# People is an optional parameter, defaults to 0 if not included
def __init__(self, p0, fuel_level, people = 0):
Rocket.__init__(self, p0, fuel_level)
self._people = people
def __repr__(self):
return "Position: {}; Fuel: {}; People: {}" .format(self._position, self._fuel_level, self._people)
## Get People function
def get_people(self):
return self._people
## Load People function
def load_people(self, num_people):
self._people += num_people
## Unload People function
def unload_people(self, num_people):
self._people -= num_people
if self._people < 0:
self._people = 0
r = Rocket((0,0,0),100)
print(r)
r.fly((4,5,9))
print(r)
if r.get_position() == (4,5,9):
print("You made it to the space station!")
r.refuel()
print(r)
print("\n")
ss = Shuttle((0,1,0), 100)
print(ss)
ss.fly((10,10,11))
print(ss)
ss.load_people(5)
print(ss)
ss.fly((0,0,0))
print(ss)
ss.unload_people(11)
print(ss)
|
[
"mitchl@sandbox4kids.com"
] |
mitchl@sandbox4kids.com
|
4c1f2bfca939c5e104ce710ee76f0be2c1195649
|
1c4090a26147f2b55a4e66038a4b5e2b822adff1
|
/oldboy/day02/test03.py
|
fa64d715eb2adaa2861dfb2fd0d7c71b44204e83
|
[] |
no_license
|
sbgaga123/python
|
1cb4897e416cbf6b342b8dde813bfa6ae7f18936
|
0e87d9397d5cd9edf5bc4ef696018a979272c098
|
refs/heads/master
| 2021-01-22T03:53:54.208656
| 2017-06-23T09:37:35
| 2017-06-23T09:37:35
| 92,412,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
# dic = {'k1':'v1','k2':'v2','k3':[11,22,33]}
# v = dic.get('k3')
# v1 = v.insert(0,18)
# print(dic)
s = 'alex'
v = tuple(s,)
v2 = type(v)
print(v2)
|
[
"13581668223@139.com"
] |
13581668223@139.com
|
f01cdbdad8f525dbaca70e5d0878881516807078
|
a28a5bedca8d28a5e1c2705e171a01646e5e9deb
|
/Group_Signature/wsgi.py
|
10d7de676d21021a992fc380f00fbf2c2db95942
|
[] |
no_license
|
ashwinbande/gps
|
9b9db5a9e41572ec633348b99f6659178b7d3f91
|
c69ae4f33a9d324ae719d3e247eacd9ad40cbd3d
|
refs/heads/master
| 2020-12-31T07:33:44.712864
| 2017-03-29T02:22:54
| 2017-03-29T02:22:54
| 86,528,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
"""
WSGI config for Group_Signature project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Group_Signature.settings")
application = get_wsgi_application()
|
[
"ashwinbandeukd@gmail.com"
] |
ashwinbandeukd@gmail.com
|
f0a27cf649d704a8cb6104c717ccbcb4066d2436
|
a18003c02730140109f94f8ae545405c4814e8df
|
/capacity_monitor.py
|
4b46fb26669dfe4d27ea926f6c4d1caf498b97c5
|
[] |
no_license
|
sangshz/MCU-DIY
|
bb62150b75655843c6e3ea16fe0eda08f14acf53
|
0d35b80258f3b974bbbdddbb76d50ee51788769b
|
refs/heads/main
| 2023-02-08T13:35:56.033419
| 2020-12-31T01:38:07
| 2020-12-31T01:38:07
| 325,683,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 721
|
py
|
#!/usr/bin/env python
import serial
import time
ser=serial.Serial("/dev/ttyUSB0",9600,timeout=0.5)
#ser.parity='E'
#ser.baudrate = 9600
#print (ser.name)
#print (ser.port)
#print (ser.baudrate)
#print (ser.isOpen())
print (ser)
#ser.write(chr(0x00))
#n=ser.write(chr(0x31))
#n=ser.write(0x31)
#n=ser.write('l')
#n=ser.write(b'a')
#time.sleep(1)
while True:
data=ser.read(1)
if len(data)>0:
print hex(ord(data))
#time.sleep(1)
#data = data.encode('utf-8')
#print (data)
if len(data)>0:
print hex(ord(data))
#data = ser.readline()
#ser.write("hello")
ser.close()
exit
data=ser.read(1)
#time.sleep(1)
#data = data.encode('utf-8')
print (data)
#data = ser.readline()
#ser.write("hello")
#ser.close()
|
[
"noreply@github.com"
] |
sangshz.noreply@github.com
|
e6916dbccea2e85566c3010365a95531f3d1a82e
|
62bf506e8f49ef1f2b24fe583fab1f65abc16c8b
|
/Validator.py
|
085157eb0d2fe7dfd9c4f28984a62985fa72377b
|
[] |
no_license
|
pradeeppanayal/PythonJunitExecutor
|
627545ad9033e190a500e3ee5a6b03dad09c1707
|
b85fc3e3776f2af8f5db93d82ebf5c0e010bcdd8
|
refs/heads/master
| 2021-05-08T03:56:06.485187
| 2017-10-30T06:12:36
| 2017-10-30T06:12:36
| 108,252,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,056
|
py
|
##############################################################
# Project : PythonJunitTestFrameWork #
# Since Version : 1.0.0 #
# Current Version : 1.0.0 #
# Date of creation : 24-Oct-2017 #
# Author : Pradeep CH #
# Purpose : Sample source class #
##############################################################
__author__ = 'Pradeep CH'
__version__ ='1.0.0'
import argparse
from TestExecutor import Tester
parser = argparse.ArgumentParser()
parser.add_argument('-t',"--testDir", help="Root directory to execute the tests",type=str,default='.')
parser.add_argument('-s',"--sourcesDirs", help="Source module/file path need to be tested",nargs = '*',default=[])
def main():
args = parser.parse_args()
tester = Tester(args.testDir,args.sourcesDirs)
tester.scanAndExecuteTests()
tester.showTestExecutionSummary()
if __name__ =='__main__':
main()
|
[
"pradeep.k@payoda.com"
] |
pradeep.k@payoda.com
|
a7d089216270c5bb74fd20686a6ab7b781bcfc54
|
30c8cd017a31ab14207789d5e5565d9cf8aefab9
|
/Uebung03/aufgabe3/main.py
|
5c0990dd1ca219688cb84012e06afdc934c7f066
|
[] |
no_license
|
rafaelkallis/pcl2-homework
|
30bbeabd5ea45a3e7482381e1383a5f6106efef2
|
636a3d5e380a44e6ef504c7411c3bc019db22b9d
|
refs/heads/master
| 2021-01-21T11:15:12.848324
| 2017-04-30T12:09:46
| 2017-04-30T12:09:46
| 83,539,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
#!/usr/bin/env python3
# coding: utf-8
import xml.etree.ElementTree as ElementTree
import json
def ElementTreeToDict(root):
if root.getchildren():
theDict = {root.tag: list(map(ElementTreeToDict, root.getchildren()))}
theDict.update(('@' + k, v) for k, v in root.attrib.items())
return theDict
else:
return {root.tag: root.text}
def main():
with open("./books_test.json", "w") as dest_file:
root = ElementTree.parse('books.xml').getroot()
theDict = ElementTreeToDict(root)
theJSON = json.dumps(theDict, indent=2)
dest_file.write(theJSON)
if __name__ == '__main__':
main()
|
[
"rk@rafaelkallis.com"
] |
rk@rafaelkallis.com
|
527fd54350304d1481a3a25e481d825974c00439
|
84fe126334f36e635c3b871059632e17acc26d0d
|
/src/problems_100/euler_123.py
|
6f32877b7c272160967cd724c97ea9cfd55caf7b
|
[] |
no_license
|
afrancais/euler
|
bf27226118c2ae3747a64fd3903ba5bb1343b736
|
fc3e56be2d894191ca1517432e4ae65074dcfad6
|
refs/heads/master
| 2020-12-31T04:28:13.959854
| 2016-05-19T13:45:01
| 2016-05-19T13:45:01
| 59,201,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
import primeutils
import math
from decimal import *
g = primeutils.prime_gen()
k = 1
while True:
if k % 1000 == 0:
print k
p = g.next()
a = (p - 1) ** k + (p + 1) ** (k)
r = a % (p ** 2)
if r > 10 ** 10:
print k, r
break
k += 1
|
[
"antoine.francais@dolead.com"
] |
antoine.francais@dolead.com
|
4843b1d4b9b7d2f2fe304e9278792b5d93d54487
|
23f6dbacd9b98fdfd08a6f358b876d3d371fc8f6
|
/rootfs/usr/lib/pymodules/python2.6/orca/scripts/toolkits/VCL.py
|
d29693eaa79ca6a0de4d1cfb80bdd0e09f4e8f63
|
[] |
no_license
|
xinligg/trainmonitor
|
07ed0fa99e54e2857b49ad3435546d13cc0eb17a
|
938a8d8f56dc267fceeb65ef7b867f1cac343923
|
refs/heads/master
| 2021-09-24T15:52:43.195053
| 2018-10-11T07:12:25
| 2018-10-11T07:12:25
| 116,164,395
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 48
|
py
|
/usr/share/pyshared/orca/scripts/toolkits/VCL.py
|
[
"root@xinli.xinli"
] |
root@xinli.xinli
|
2eb0d6c84e665b9b8a88283bca5eec16c6f2a414
|
7be57145ac26cbfc3bbc322d0a76b38607794572
|
/tests/test_e2e.py
|
b20e3f10291f45b5452782741a3ae126725adad1
|
[] |
no_license
|
rakesh-eshwar/SeleniumPythonFramework
|
f5642074afeb1cd28636e1024eddbbd510d67f05
|
fecc36613e5a746b09ddd050c9c4b3a67d42e8db
|
refs/heads/master
| 2022-11-29T11:12:13.203089
| 2020-08-16T07:09:23
| 2020-08-16T07:09:23
| 287,767,819
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,357
|
py
|
"""
Setup and TearDown :
-> Browser Invocation and browser closing is being written as part of fixture
"""
# import pytest
import time
# from selenium import webdriver
# from selenium.webdriver.common.by import By
# from selenium.webdriver.support.wait import WebDriverWait
# from selenium.webdriver.support import expected_conditions
# from pageObjects.CheckoutPage import CheckOutPage
from pageObjects.ConfirmPage import ConfirmPage
from pageObjects.HomePage import HomePage
from utilities.BaseClass import BaseClass
# ============== below commented code is without POM ( Page Object Model )
# #@pytest.mark.usefixtures(scope="class")
# class TestOne(BaseClass):
#
# def test_e2e(self):
#
# all_products = self.driver.find_elements_by_xpath("//div[@class='card h-100']")
#
# # here do not write all, concatenate remaining one ( //div[@class='card h-100']/div[1]/h4/a )
# for each_product in all_products:
# product_name = each_product.find_element_by_xpath("div[1]/h4/a").text
# if product_name == "Blackberry":
# # add the item to cart ( //div[@class='card h-100']/div[2]/button ) take only last xpath
# each_product.find_element_by_xpath("div[2]/button").click()
#
#
# time.sleep(3)
# self.driver.find_element_by_xpath("//a[@class='nav-link btn btn-primary']").click()
#
# # Step : 3,4 check for blackberry ( VALIDATE )
# time.sleep(2)
# assert self.driver.find_element_by_link_text("Blackberry").is_displayed()
# self.driver.find_element_by_xpath("//button[@class='btn btn-success']").click()
# time.sleep(2)
#
# # Step 5
# self.driver.find_element_by_id("country").send_keys("India")
# time.sleep(8)
# WebDriverWait(self.driver, 10).until(expected_conditions.presence_of_element_located((By.LINK_TEXT, "India")))
# self.driver.find_element_by_link_text("India").click()
#
# # step 6
# self.driver.find_element_by_xpath("//label[contains(text(),'I agree with the')]").click()
# self.driver.find_element_by_xpath("//input[@class='btn btn-success btn-lg']").click()
# assert self.driver.find_element_by_xpath("//*[contains(text(),'Success!')]").is_displayed()
#
#
# self.driver.get_screenshot_as_file("C:/Users/RakeshE-1763/PycharmProjects/SeleniumPythonPyTest/screenshot.png")
"""
re-writing the code using POM
"""
class TestOne(BaseClass):
def test_e2e(self):
# creating log obj for logging
log = self.getLogger()
# sending driver object to page object class
home_page = HomePage(self.driver)
# clicks and returns the next page's object ( checkout page )
check_out_page = home_page.shopItems()
card_name = check_out_page.addName
card_footer = check_out_page.addFooter
log.info("getting all the card titles")
cards = check_out_page.getCardTitles()
for card in cards:
each_card_name = card.find_element_by_xpath(card_name).text
log.info("card name is :"+each_card_name)
if each_card_name == "Blackberry":
card.find_element_by_xpath(card_footer).click()
# click checkout button after adding to cart
check_out_page.checkoutButton().click()
# check for blackberry ( VALIDATE ) and click checkout again
assert self.driver.find_element_by_link_text("Blackberry").is_displayed()
confirm_page = check_out_page.validateCheckoutButton()
# confirm page actions
log.info("entering country name as India")
confirm_page.countryInput().send_keys("India")
# wait for india to be displayed and then click india
self.verifyLinkPresence("India")
confirm_page.selectIndia().click()
# select agree , click purchase button, verify string "success"
confirm_page.selectAgree().click()
confirm_page.purchaseButton().click()
log.info("verifying by searching for success word")
assert confirm_page.successPurchase().is_displayed()
self.driver.get_screenshot_as_file("C:/Users/RakeshE-1763/PycharmProjects/SeleniumPythonPyTest/screenshot.png")
|
[
"noreply@github.com"
] |
rakesh-eshwar.noreply@github.com
|
e45c5fe41a64198174c654f5ee1615175f1f9e25
|
d017755bb4b6439319f5664425be531f4ef72f3e
|
/mysite/VotingDay/models.py
|
0b4c6ee7d5ab476606bbd14652d37f6d2fcb06f0
|
[] |
no_license
|
koishore/VotingDay
|
bcd304603ab59015baed96e16d1dff4ac3a414f6
|
5c7122346476cced1c739b65b882f3e8b4b49427
|
refs/heads/master
| 2020-06-10T16:08:07.674559
| 2019-10-08T17:43:47
| 2019-10-08T17:43:47
| 75,942,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,651
|
py
|
from __future__ import unicode_literals
from django.db import models
#different tables of the database are declared here
#this table stores the users who are registered to vote
class User(models.Model):
email = models.CharField(max_length=50)
userid = models.CharField(max_length=15)
def __unicode__(self):
return self.userid
#this table stores the details of the candidates and the party they belong to
class Candidate(models.Model):
party_name = models.CharField(max_length=100, null=False, blank=False)
candidate1 = models.CharField(max_length=50, null=False, blank=False)
candidate2 = models.CharField(max_length=50, null=True, blank=True)
candidate3 = models.CharField(max_length=50, null=True, blank=True)
candidate4 = models.CharField(max_length=50, null=True, blank=True)
candidate5 = models.CharField(max_length=50, null=True, blank=True)
candidate6 = models.CharField(max_length=50, null=True, blank=True)
candidate7 = models.CharField(max_length=50, null=True, blank=True)
candidate8 = models.CharField(max_length=50, null=True, blank=True)
candidate9 = models.CharField(max_length=50, null=True, blank=True)
candidate10 = models.CharField(max_length=50, null=True, blank=True)
candidate11 = models.CharField(max_length=50, null=True, blank=True)
candidate12 = models.CharField(max_length=50, null=True, blank=True)
candidate13 = models.CharField(max_length=50, null=True, blank=True)
candidate14 = models.CharField(max_length=50, null=True, blank=True)
candidate15 = models.CharField(max_length=50, null=True, blank=True)
def __unicode__(self):
return self.party_name
#this table stores the details of how many votes the respective numbered candidate in a list has got
class Vote(models.Model):
partyname = models.CharField(max_length=100, null=False, blank=False)
totalvotes = models.IntegerField(default=0)
vote1 = models.IntegerField(default=0)
vote2 = models.IntegerField(default=-1)
vote3 = models.IntegerField(default=-1)
vote4 = models.IntegerField(default=-1)
vote5 = models.IntegerField(default=-1)
vote6 = models.IntegerField(default=-1)
vote7 = models.IntegerField(default=-1)
vote8 = models.IntegerField(default=-1)
vote9 = models.IntegerField(default=-1)
vote10 = models.IntegerField(default=-1)
vote11 = models.IntegerField(default=-1)
vote12 = models.IntegerField(default=-1)
vote13 = models.IntegerField(default=-1)
vote14 = models.IntegerField(default=-1)
vote15 = models.IntegerField(default=-1)
def __unicode__(self):
return self.partyname
|
[
"koishore.roy@ashoka.edu.in"
] |
koishore.roy@ashoka.edu.in
|
1210e7360134b655175e57ae56324fe180e8c0be
|
c6320735f140944d2c282729c008a7cf7cf1e98f
|
/docs/samples/explanation/income/train.py
|
1f390f64d00d252386861f2eb8e6c0452dd63fec
|
[
"Apache-2.0"
] |
permissive
|
gipster/kfserving
|
66d2dffd8917ba9029ca2e96f199e1f56df6e41b
|
bbd3da47a708403fb2a203e28955d5454bc2a1d5
|
refs/heads/master
| 2020-06-10T18:43:57.148347
| 2019-08-19T00:24:03
| 2019-08-19T00:24:03
| 193,709,786
| 0
| 0
|
Apache-2.0
| 2019-06-25T13:08:50
| 2019-06-25T13:08:49
| null |
UTF-8
|
Python
| false
| false
| 2,400
|
py
|
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from alibi.datasets import adult
import joblib
import dill
from sklearn.pipeline import Pipeline
import alibi
# load data
data, labels, feature_names, category_map = adult()
# define train and test set
np.random.seed(0)
data_perm = np.random.permutation(np.c_[data, labels])
data = data_perm[:, :-1]
labels = data_perm[:, -1]
idx = 30000
X_train, Y_train = data[:idx, :], labels[:idx]
X_test, Y_test = data[idx + 1:, :], labels[idx + 1:]
# feature transformation pipeline
ordinal_features = [x for x in range(len(feature_names)) if x not in list(category_map.keys())]
ordinal_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_features = list(category_map.keys())
categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='median')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(transformers=[('num', ordinal_transformer, ordinal_features),
('cat', categorical_transformer, categorical_features)])
# train an RF model
print("Train random forest model")
np.random.seed(0)
clf = RandomForestClassifier(n_estimators=50)
pipeline = Pipeline([('preprocessor', preprocessor),
('clf', clf)])
pipeline.fit(X_train, Y_train)
print("Creating an explainer")
predict_fn = lambda x: clf.predict(preprocessor.transform(x))
explainer = alibi.explainers.AnchorTabular(predict_fn=predict_fn,
feature_names=feature_names,
categorical_names=category_map)
explainer.fit(X_train)
explainer.predict_fn = None # Clear explainer predict_fn as its a lambda and will be reset when loaded
with open("explainer.dill", 'wb') as f:
dill.dump(explainer,f)
print("Saving individual files")
# Dump files - for testing creating an AnchorExplainer from components
joblib.dump(pipeline, 'model.joblib')
joblib.dump(X_train, "train.joblib")
joblib.dump(feature_names, "features.joblib")
joblib.dump(category_map, "category_map.joblib")
|
[
"k8s-ci-robot@users.noreply.github.com"
] |
k8s-ci-robot@users.noreply.github.com
|
efa895e0ad398c80b47d9aea7dd2dfa0c1a232a8
|
971752c0a0d7aa3f6ca224be041ff100ea96ae29
|
/client/app/dialognlu/readers/dataset.py
|
0eb10a9acbc0c375cc0c3ac393d235bb65752845
|
[] |
no_license
|
zalkikar/DB-Final
|
6aec7653a2a7a24d948d06e487d146b84e226360
|
a297caadc1c5af84ad430461bf6dfeb9ea52e74a
|
refs/heads/main
| 2023-07-26T12:00:53.096585
| 2021-09-01T22:02:26
| 2021-09-01T22:02:26
| 395,816,499
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
# -*- coding: utf-8 -*-
"""
@author: mwahdan
"""
class NluDataset:
def __init__(self, text, tags, intents):
self.text = text
self.tags = tags
self.intents = intents
|
[
"rayzck9@gmail.com"
] |
rayzck9@gmail.com
|
a1779179c2054f6e836a42d2ce8325c8f5b7106b
|
234a7517b819d7291dfbcda8d8cbc46a53509dd8
|
/sgr/threads/models.py
|
c079cb09c372a1e8188fcfa3a93fd1c46490dfd9
|
[] |
no_license
|
hkaranjule77/Student-Grievance-Redressal
|
7608ea488e47d22691adfa12daa5e2c980cec223
|
a8ea6cd773f53cbd7adee468749a67639493da0c
|
refs/heads/master
| 2022-11-20T12:24:15.542252
| 2020-07-27T18:38:11
| 2020-07-27T18:38:11
| 268,457,636
| 1
| 2
| null | 2020-07-16T21:54:11
| 2020-06-01T07:47:31
|
Python
|
UTF-8
|
Python
| false
| false
| 15,780
|
py
|
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import Q
from django.db.utils import IntegrityError
from django.utils import timezone
from datetime import date
import os
from sgr.settings import BASE_DIR
from user.models import Member
# GLOBAL CONSTANTS
FILE_PATH= os.path.join( BASE_DIR, 'data_files' )
class Category( models.Model ) :
code = models.CharField( primary_key = True, max_length = 1 )
name = models.CharField( max_length = 25 )
#Constants
global FILE_PATH
CATEGORY_PATH = os.path.join( FILE_PATH, 'categories.txt' )
def get_category( request, category ) :
''' Returns Category Object of specified category string. '''
try :
category_obj = Category.objects.get( name = category )
except ObjectDoesNotExist :
messages.error( request, f' No such Category exists. ' )
return None
else :
return category_obj
def get_code_name_list() :
''' Returns a List of code, name of all Category objects. '''
cat_obj = Category.objects.all()
category_list = [ [ obj.code, obj.name ] for obj in cat_obj ]
return category_list
def get_list():
''' Returns a List of all Category name. '''
category_qs = Category.objects.all()
category_list = [ category.name for category in category_qs ]
return category_list
def load_data() :
''' Loads Categories from file in Database. '''
# loading data from file & preprocessing the data
print( "Loading Categories from file into Database..." )
category_file = open( Category.CATEGORY_PATH, 'r' ) # opens file in read mode
category_data = category_file.read() # reads all data from file
list_of_code_name = category_data.split( ';\n' ) # divides data in category data
list_of_code_name = list_of_code_name[ : len( list_of_code_name ) -1 ] # removing last empty line
for index in range( len( list_of_code_name ) ) :
list_of_code_name[ index ] = list_of_code_name[ index ].split( ',' ) # separates code and name
# adding category into database
categories_added = 0 # counts category updated in database
for code_name in list_of_code_name :
# checks if category already exist in DB
try :
category_obj = Category.objects.get( name = code_name[ 1 ] )
except ObjectDoesNotExist : # occurs when if fetched object is not in DB
# adds category in DB
category_obj = Category( name = code_name[ 1 ], code = code_name[0] ) # initialization
try :
category_obj.save() # saves Category object
categories_added += 1 # updating category update count
print( f"Category '{ category_obj.name }' is added with code {category_obj.code }. ")
except IntegrityError :
print( f" Code '{ code_name[ 0 ] }' already exist with another Category ' skipped Category '{ code_name[ 1 ] }' for now change. " )
if categories_added != 0 :
print( f' Added { categories_added } new Categories in Database Successfully. ' )
else :
print( ' Categories are already up-to date. ' )
class SubCategory( models.Model ) :
''' Model Class for storing different categories of Complain and Thread Model '''
code = models.CharField( max_length = 1 )
name = models.CharField( max_length = 25 )
category = models.ForeignKey( Category, on_delete = models.CASCADE )
# constant
global FILE_PATH
SUBCATEGORY_PATH = os.path.join( FILE_PATH, 'subcatgories.txt')
def get_code_name_list() :
''' Returns a List of [ code, name ] of Sub Category object nested according to category. '''
final_list = list()
for cat_obj in Category.objects.all() :
sub_cat_obj = SubCategory.objects.filter( category = cat_obj )
sub_list = [ [ sub_category.code, sub_category.name ] for sub_category in sub_cat_obj ]
final_list.append( sub_list )
return final_list
def get_list( request, category ):
''' Returns a List of Subcategories based on passed category string. '''
category_obj = Category.get_category( request, category )
if category_obj is not None :
print( category_obj.name )
sub_cat_obj = SubCategory.objects.filter( category = category_obj )
sub_cat_list = [ sub_category.name for sub_category in sub_cat_obj ]
print( sub_cat_list )
return sub_cat_list
def load_data() :
''' Loads Subcategories from file into DB. '''
subcategory_file = open( SubCategory.SUBCATEGORY_PATH, 'r' )
subcategory_data = subcategory_file.read()
subcategory_data = subcategory_data.split( ';\n') # divides subcategory, category-wise
subcategory_data = subcategory_data[ : len( subcategory_data ) - 1 ] # deleting last blank line
for category_wise in subcategory_data :
category_wise = category_wise.split( ';' )
category_wise[0] = category_wise[0].split( ',' )
subcategories = category_wise[ 1 : ]
try :
category_obj = Category.objects.get( code = category_wise[0][0], name = category_wise[0][1] )
except ObjectDoesNotExist :
print( f"Error : No category exist by name '{ category_wise[0] }' so can't update sub-categories { subcategories }. " )
else :
for subcategory in subcategories :
subcategory = subcategory.split( ',' )
# checks if SubCategory already exists in DB
try :
subcategory_obj = SubCategory.objects.get( name = subcategory[ 1 ], category = category_obj )
print( f" SubCategory '{ subcategory_obj.name }' already exist in DB. " )
continue
except ObjectDoesNotExist :
#checks if code is not taken by other SubCategory in a category_wise manner
try :
subcategory_obj = SubCategory.objects .get( code = subcategory[0], category = category_obj )
print( f" Another SubCategory '{ subcategory_obj.name }' exist with code '{ subcategory_obj.code }'. " )
continue
except ObjectDoesNotExist :
# saves subcategory if not present
subcategory_obj = SubCategory( code = subcategory[0] )
subcategory_obj.name = subcategory[1]
subcategory_obj.category = category_obj
subcategory_obj.save()
print( f" Added new SubCategory with code '{ subcategory_obj.code }' and name { subcategory_obj.name } " )
class Redressal( models.Model ) :
''' Redressal Model for Complain / Thread Model with actions for HOD / Principal '''
text = models.TextField( null = True, blank = True )
file = models.FileField( upload_to = 'thread-redressal/', null = True, blank = True )
added_by = models.ForeignKey(
Member,
related_name = '+',
on_delete = models.CASCADE,
null = True,
blank = True
)
added_at = models.DateTimeField( null = True, blank = True )
# action of accept / reject of redressal by HOD / Principal.
action = models.CharField( default = '', max_length = 15 ) # actions - APPROVE / REJECT
action_msg = models.TextField( null = True )
action_by = models.ForeignKey(
Member,
related_name = 'member_on_ thread+',
on_delete = models.CASCADE,
null = True,
blank = True
)
action_at = models.DateTimeField( null = True, blank = True )
def approve( self, member ):
''' Approves the redressal and saves changes for approval in Thread object. '''
self.action = 'APPROVE'
self.action_by = member
self.action_at = timezone.now()
self.action_msg = ''
self.save( update_fields = [
'action',
'action_at',
'action_by',
'action_msg',
]
)
def init_for_reject( self, request, member ):
''' Initialize the thread object with rejection data received by post method. '''
self.action = 'REJECT'
self.action_msg = request.POST.get( 'rejection_msg')
self.action_at = timezone.now()
self.action_by = member
def is_reject_valid( self ):
''' Checks if rejection message if not blank. '''
if self.action_msg == '' or self.action_msg == None:
return False
return True
def reject( self ):
''' Rejects redressal and saves the changes accordingly in Thread model. '''
self.save( update_fields = [
'action',
'action_msg',
'action_by',
'action_at',
]
)
class Thread( models.Model ) :
# required data
id = models.CharField( primary_key = True, max_length = 15 )
title = models.CharField(max_length = 25)
category = models.CharField( max_length = 25 )
sub_category = models.CharField( max_length = 25 )
description = models.TextField()
complain_count = models.IntegerField( default = 0 )
note_count = models.IntegerField( default = 0 )
created_by = models.ForeignKey( Member, on_delete = models.CASCADE )
created_at = models.DateTimeField( default = timezone.now )
# for solving
solver = models.ForeignKey(
Member,
related_name = 'solver_member+',
on_delete = models.SET_NULL,
null = True,
blank = True
)
solving_date = models.DateField( null = True, blank = True )
# redressal
redressal = models.OneToOneField( Redressal, on_delete = models.CASCADE, null = True, blank = True )
# constants
SEARCH_TYPES = ( 'All', 'Title', 'Description', 'Created by', )
FILTER_OPTIONS = ( 'All', 'Approved', 'Redressed', 'Rejected', 'Unredressed' )
def __str__( self ) :
''' return a string of Thread id when object is called for printing purpose. '''
return str( self.id )
def generate_id(self, category, sub_category):
''' Generates and initialize id for object when called. '''
categories = Category.get_code_name_list()
sub_categories = SubCategory.get_code_name_list()
today = date.today()
curr_date = today.strftime('%y%m%d')
# opening file in reading mode
count_file = open(os.path.join(BASE_DIR, 'count_files/thread_id.txt'), 'r')
# preprocessing of data - splitting a single string into list of lines
count_data = count_file.read().split('\n')
# opening file in writing mode
count_file = open(os.path.join(BASE_DIR, 'count_files/thread_id.txt'), 'w')
# if first line of date does not match with current date
if curr_date != count_data[0]:
print( 1 )
data = ''
category_index = 0
for category_wise in sub_categories:
for sub_code, sub_cat in category_wise:
if sub_cat == sub_category:
data+='1 '
code = categories[ category_index ][ 0 ] + sub_code
else:
data+='0 '
data+='\n'
category_index += 1
data = curr_date+ '\n' + data
count_file.write(data)
count_file.close()
generated_id = '0'
else:
print( 2 )
# preprocessing of data / conversion into list of counts from string
for index in range(len(count_data)):
count_data[index] = count_data[index].split(' ')
# writes date in first line of the opened count file
count_file.write(curr_date+'\n')
print( count_data, 'count_data' )
# count incrementing part
cat_index = 1
for cat_code, cat in categories:
sub_index = 0
for sub_cat_code, sub in sub_categories[cat_index-1]:
if (sub == sub_category and cat == category):
print(sub, cat)
try:
generated_id = count_data[cat_index][sub_index]
except IndexError:
count_data[cat_index][sub_index] = '1'
generated_id = '0'
else:
count_data[cat_index][sub_index] = str( int(generated_id) + 1 )
# generates code from category, sub_category, required for id
code = cat_code + sub_cat_code
# writes count for every sub_category in file
count_file.write(count_data[cat_index][sub_index]+' ')
sub_index += 1
#creates new line in count file before start iterating for next category
count_file.write('\n')
cat_index += 1
count_file.close()
if int( generated_id ) < 10 :
generated_id = curr_date + code + generated_id
print(generated_id, 'id id')
self.id = generated_id
def get_thread( request, id_no ):
''' Returns Thread with specified id if present or else returns messages and None. '''
try:
#id_no = int( id_no )
thread = Thread.objects.get( id = id_no )
except ObjectDoesNotExist:
messages.error( request, f' Thread { id } does not exist. ' )
thread = None
return thread
def increase_complain_count( self ):
''' Increasea and saves the count of complaint in Thread model. '''
self.complain_count += 1
self.save( update_fields=[ 'complain_count' ] )
def increase_note_count( self ):
''' Increases and saves the count of note in Thread model. '''
self.note_count += 1
self.save( update_fields = [ 'note_count' ] )
def init_for_add( request, member ):
''' Initializes new Thread object with data received by post method. '''
thread = Thread()
thread.title = request.POST.get( 'title' )
thread.category = request.POST.get( 'category' )
thread.sub_category = request.POST.get( 'sub_category' )
thread.description = request.POST.get( 'description' )
thread.created_by = member
return thread
def init_for_redressal( self , request, member ):
''' Initializes the Thread object with the redressal data recieved through post method. '''
redressal = Redressal()
redressal.text = request.POST.get( 'redressal' )
redressal.file = request.FILES.get( 'redressal_file' )
redressal.added_by = member
redressal.added_at = timezone.now()
self.redressal = redressal
def is_add_valid( self, request ):
''' Validates data initialized by method 'init_for_all' before saving in DB. '''
valid = True
if self.title == '' or self.title == None:
valid = False
elif self.category == '' or self.category == None or self.category == 'Select Category':
valid = False
elif self.sub_category == '' or self.sub_category == None or self.sub_category == 'Select Sub Category':
valid = False
elif self.description == '' or self.description == None or self.description == 'Add description here...':
valid = False
elif self.created_by == None:
valid = False
if valid == True :
self.generate_id( self.category, self.sub_category )
return valid
def is_redress_valid( self ):
''' Returns True if initialized redressal data of Thread object is valid or else returns False. '''
valid = True
if self.redressal is None :
valid = False
elif self.redressal.text == '' or self.redressal.text == None:
valid = False
elif self.redressal.added_by is None:
valid = False
elif self.redressal.added_at is None:
valid = False
return valid
def redress( self ):
''' Saves initialized redressal data in Thread model. '''
self.redressal.save()
self.save( update_fields = [ 'redressal' ] )
def search( query, search_type ):
''' Single function for search of Thread objects. '''
search_qs = Thread.objects.none()
if search_type == Thread.SEARCH_TYPES[0] or search_type == Thread.SEARCH_TYPES[1] :
search_qs.union( Q( title__icontains = query ) )
elif search_type == Thread.SEARCH_TYPES[0] or search_type == Thread.SEARCH_TYPES[2] :
search_qs.union( Q( description__icontains = query ) )
elif search_type == Thread.SEARCH_TYPES[0] or search_type == Thread.SEARCH_TYPES[3] :
search_qs.union( ( Q( created_by__mid__icontains = query ) |
Q( created_by__user__first_name__icontains = query ) |
Q( created_by__user__last_name__icontains = query )
)
)
return search_qs
def filter_qs( queryset, filter_option ) :
''' Filters the passed queryset according to passed filter_option. '''
print( filter_option, queryset )
if filter_option == Thread.FILTER_OPTIONS[ 1 ] :
final_qs = queryset.exclude( redressal = None ).filter( redressal__action = 'APPROVE' )
print(1)
elif filter_option == Thread.FILTER_OPTIONS[ 2 ] :
print(2)
final_qs = queryset.exclude( redressal = None )
elif filter_option == Thread.FILTER_OPTIONS[ 3 ] :
final_qs = queryset.exclude( redressal = None ).filter( redressal__action = 'REJECT' )
print(3)
elif filter_option == Thread.FILTER_OPTIONS[ 4 ] :
print(4)
final_qs = queryset.filter( redressal = None )
print( final_qs )
return final_qs
|
[
"hkaranjule77@gmail.com"
] |
hkaranjule77@gmail.com
|
117840b7f746720a7d129a75f2f7cde774ba515a
|
6b623a03ebb88e48a8e9f0528317ba28e8298e08
|
/node_modules/mongodb/node_modules/bson/build/config.gypi
|
91ce77280c69123cf9bbbb5b50e294bcc78a8108
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
AlexanderPolus/ixd-Delphi-official
|
19ea568344f4151c6beb28f7ee989bb8b00a66ad
|
77bac752542d3cc8a66cc6b89cf90c096f2a5b8d
|
refs/heads/master
| 2020-04-23T01:32:54.504608
| 2019-03-15T02:36:07
| 2019-03-15T02:36:07
| 169,649,114
| 2
| 1
|
MIT
| 2019-02-07T22:13:46
| 2019-02-07T21:57:42
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,028
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 46,
"host_arch": "ia32",
"node_install_npm": "true",
"node_prefix": "/usr/local",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"python": "/usr/bin/python",
"target_arch": "ia32",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "true",
"nodedir": "/home/vagrant/.node-gyp/0.10.24",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"always_auth": "",
"user_agent": "node/v0.10.24 linux ia32",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"user": "",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"cache_max": "null",
"userconfig": "/home/vagrant/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/home/vagrant/tmp",
"depth": "null",
"save_dev": "",
"usage": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr/local",
"registry": "https://registry.npmjs.org/",
"browser": "",
"cache_lock_wait": "10000",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/vagrant/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "18",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"cert": "",
"global": "",
"link": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "v0.10.24",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"strict_ssl": "true",
"username": "",
"dev": "",
"globalconfig": "/usr/local/etc/npmrc",
"init_module": "/home/vagrant/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/local/etc/npmignore",
"cache_lock_retries": "10",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"email": "",
"json": ""
}
}
|
[
"cyang@godaddy.com"
] |
cyang@godaddy.com
|
0c5a8e18c89224eccf505020eafb6f868ac103a1
|
641b49ff4a4839aa70083ef169e44302574f8651
|
/app/snippets/models.py
|
bd24e78274688f04dad1b52b28803be35b8f4099
|
[] |
no_license
|
bear-engineer/rest_framework_Django_tutorial
|
ca06c7ce13b5d9028e52c603c92f5ae345300dfb
|
17d87b6930f1fbcab7d04d59c2453db34680a1e3
|
refs/heads/master
| 2021-09-19T06:42:16.102751
| 2018-07-24T08:04:35
| 2018-07-24T08:04:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,834
|
py
|
from django.conf import settings
from django.db import models
from pygments import highlight
from pygments.formatters.html import HtmlFormatter
from pygments.lexers import get_all_lexers, get_lexer_by_name
from pygments.styles import get_all_styles
LEXERS = [item for item in get_all_lexers() if item[1]]
LANGUAGE_CHOICES = sorted([(item[1][0], item[0]) for item in LEXERS])
STYLE_CHOICES = sorted((item, item) for item in get_all_styles())
class Snippet(models.Model):
created = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=100, blank=True, default='')
code = models.TextField()
linenos = models.BooleanField(default=False)
language = models.CharField(choices=LANGUAGE_CHOICES, default='python', max_length=100)
style = models.CharField(choices=STYLE_CHOICES, default='friendly', max_length=100)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='snippets')
highlighted = models.TextField()
class Meta:
ordering = ('created',)
def save(self, *args, **kwargs):
## 이렇게 하면 html이 저장됩니다.
# 지정한 언어(language)에 대한 분석기 (lexer)할당
lexer = get_lexer_by_name(self.language)
# 줄 표시 여부
linenos = 'table' if self.linenos else False
# self.title이 존재하면 options에 'title'키가 들어있는 dict를 전달
options = {'title': self.title} if self.title else {}
# 위에서 지정한 여러 변수를 사용해서 formatter객체 생성
formatter = HtmlFormatter(
style=self.style,
linenos=linenos,
full=True,
**options,
)
self.highlighted = highlight(self.code, lexer, formatter)
super().save(*args, **kwargs)
|
[
"d.sehyeon@gmail.com"
] |
d.sehyeon@gmail.com
|
c9d59463ba63d661defdc082ac220006f7beb760
|
2cea7251aac4b05f8169d7b5d26cadbf518136cb
|
/models/Card.py
|
ed2b7e946d6bff97782ef8698b699e92caaa5f57
|
[] |
no_license
|
VanDenHendeSimon/blackjack
|
85e3221ab0fa036b7f78a5cd141e146771f754ed
|
3834edf87ccb7f1ec8672c2943c10e0279387741
|
refs/heads/master
| 2021-05-25T15:14:56.032834
| 2020-04-22T19:48:50
| 2020-04-22T19:48:50
| 253,804,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 988
|
py
|
class Card:
character_aliasses = {
1: 'Ace',
11: 'Jack',
12: 'Queen',
13: 'King',
}
def __init__(self, character, suit):
self.character = character
self.suit = suit
@property
def suit(self):
return self._suit
@suit.setter
def suit(self, value):
self._suit = value
@property
def character(self):
# 10s and jacks/queens/kings are treated the same overall (also for splitting)
return min(self._character, 10)
# return self._character
@character.setter
def character(self, value):
self._character = value
@property
def character_alias(self):
# Lookup the character, if its not in the table, set it to the given value
return Card.character_aliasses.get(self.character, self.character)
def __str__(self):
return '%s of %s' % (self.character_alias, self.suit)
def __repr__(self):
return self.__str__()
|
[
"simonvdhende@outlook.com"
] |
simonvdhende@outlook.com
|
950e9fce4dcbd3c0bc732cdc70d82b7bb4f0e7c3
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayIserviceIsresourceTenantquerybytntidQueryModel.py
|
bf348d94e07635b10d4f588191dab57c1660c589
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,400
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayIserviceIsresourceTenantquerybytntidQueryModel(object):
def __init__(self):
self._tnt_inst_id = None
self._ur_id = None
@property
def tnt_inst_id(self):
return self._tnt_inst_id
@tnt_inst_id.setter
def tnt_inst_id(self, value):
self._tnt_inst_id = value
@property
def ur_id(self):
return self._ur_id
@ur_id.setter
def ur_id(self, value):
self._ur_id = value
def to_alipay_dict(self):
params = dict()
if self.tnt_inst_id:
if hasattr(self.tnt_inst_id, 'to_alipay_dict'):
params['tnt_inst_id'] = self.tnt_inst_id.to_alipay_dict()
else:
params['tnt_inst_id'] = self.tnt_inst_id
if self.ur_id:
if hasattr(self.ur_id, 'to_alipay_dict'):
params['ur_id'] = self.ur_id.to_alipay_dict()
else:
params['ur_id'] = self.ur_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayIserviceIsresourceTenantquerybytntidQueryModel()
if 'tnt_inst_id' in d:
o.tnt_inst_id = d['tnt_inst_id']
if 'ur_id' in d:
o.ur_id = d['ur_id']
return o
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
5c4b6a31a7e7502af119aafb08459ae1c81c95a7
|
44c30405bf6df4f42bc0fcc04a0c3c72a6a42128
|
/src/Functions.py
|
edc4a0ff66b62a674f218e9a19ddc654883af84b
|
[] |
no_license
|
sgh1/kindablog
|
4e97673d413b5b200fe17353142e0315cacf9db5
|
72aa6843776f8c619b5a0bb570a5c1466ad6c3b6
|
refs/heads/master
| 2021-09-06T08:03:24.106495
| 2018-02-04T04:25:31
| 2018-02-04T04:25:31
| 113,723,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,210
|
py
|
#
# Functions.py
# Some helper functions to keep clutter out of Main.py.
#
# Imports
import Settings
import markdown2
from markdown2Mathjax import sanitizeInput, reconstructMath
import os.path
import pprint
import web
class Functions(object):
@staticmethod
def GetRelatedPostsByTag(allMetaDataDict, tags):
"""
Get related posts by tags.
@param allMetaDataDict: Meta data from the meta pickle.
@param tags: Dictionary of tags for which we want related articles.
@return: Dictionary of filenames : titles of related articles.
"""
# Create dictionary for related articles.
relatedArticlesDict = {}
# Get related articles from meta data.
for tag in tags:
# See if the tag is in the meta data. In theory there should be at least 'this' article, but
# we don't explicitly enforce updating the meta info, so it might not be.
if tag in allMetaDataDict["byTag"]:
# Create filename / title entry in dictionary
for relatedArticle in allMetaDataDict["byTag"][tag]:
relatedArticlesDict[relatedArticle] = allMetaDataDict["byTitle"][relatedArticle]
# Break if we have enough related articles.
if len(relatedArticlesDict) >= Settings.Settings.relatedArticleListSize:
break
# Break if we have enough related articles.
if len(relatedArticlesDict) >= Settings.Settings.relatedArticleListSize:
break
return relatedArticlesDict
@staticmethod
def CreateMarkdownFromText(text):
"""
Create markdown from raw text that was read from file.
@param text: Raw text from .md file.
@return: HTML string containing processed markdown text, and metadata from .md file.
"""
# Do mathjax sanitizeInput idiom.
# Note, this seems broken, so it is worthless right now.
tmp = sanitizeInput(text)
# Create markdown.
markedDownText = markdown2.markdown(tmp[0], extras=["fenced-code-blocks", "metadata"])
# Load just this post's meta data.
myMeta = markedDownText.metadata
# Create final output ... md + mathjax.
finalOutput = reconstructMath(markedDownText,tmp[1])
return (finalOutput, myMeta)
@staticmethod
def ReadFile(pageName):
"""
Reads a file and returns contents of that file. If the file is not found, throw a 404.
@param pageName: File to read with respect to web-root.
@return: Contents of file.
"""
# Make sure the file exists.
if not os.path.exists(Settings.Settings.webRoot + "/" + pageName):
raise web.notfound()
# Just return the text of the .html file.
with open(Settings.Settings.webRoot + "/" + pageName, 'r') as myfile:
return myfile.read()
|
[
"ec2-user@ip-172-31-41-216.us-west-2.compute.internal"
] |
ec2-user@ip-172-31-41-216.us-west-2.compute.internal
|
74ca68420b60222f058228f98a1a446f42d5311d
|
0e3f14722cd87767d29f794530dc1eabc4678a14
|
/projects/migrations/0001_initial.py
|
bf6df9a575080f5727e6d0d3115ebfc864eafca8
|
[] |
no_license
|
Mostaquim/mycampaign
|
e807386b5bc034c0bf8689f29da07bae752ef971
|
4343ff08cb7d86de3efcc3e81b49ca93d01e7ae9
|
refs/heads/master
| 2020-05-09T23:51:06.345794
| 2019-05-09T10:24:22
| 2019-05-09T10:24:22
| 181,513,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,738
|
py
|
# Generated by Django 2.1 on 2019-05-06 18:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Invoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('currency', models.IntegerField(choices=[(1, '£')])),
('sent_date', models.DateField(auto_now_add=True)),
('issue_date', models.DateField()),
('due_date', models.DateField()),
('paid_date', models.DateField(null=True)),
('terms', models.TextField()),
('discount', models.DecimalField(decimal_places=2, max_digits=11)),
('tax', models.DecimalField(decimal_places=2, max_digits=11)),
('total', models.DecimalField(decimal_places=2, max_digits=11)),
('status', models.IntegerField(choices=[(1, 'Sent'), (2, 'Open'), (3, 'Paid'), (4, 'Partially paid'), (5, 'Cancelled')])),
('second_tax', models.DecimalField(decimal_places=2, max_digits=11)),
],
),
migrations.CreateModel(
name='InvoiceItems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=2, max_digits=11)),
('description', models.TextField()),
('value', models.DecimalField(decimal_places=2, max_digits=11)),
('name', models.CharField(max_length=255, null=True)),
('item_type', models.CharField(max_length=255, null=True)),
],
),
migrations.CreateModel(
name='PrintingOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pages', models.IntegerField(choices=[(1, 'Single Sided'), (2, 'Double Sided'), (3, '2 Pages'), (4, '4 Pages'), (5, '6 Pages'), (6, '8 Pages'), (7, '10 Pages'), (8, '12 Pages')])),
('page_orientation', models.IntegerField(choices=[(1, 'Portrait'), (2, 'Landscape')])),
('colours', models.IntegerField(choices=[(1, '1/0-coloured Black'), (2, '2/0-coloured Black + Pantone'), (3, '2/0-coloured Black + Gold'), (4, '4/0-coloured CMYK')])),
('processing', models.IntegerField(choices=[(1, 'Trimming'), (2, 'Trimming Corner Rounded')])),
('priority', models.IntegerField(choices=[(1, 'Low'), (2, 'Normal'), (3, 'High'), (4, 'Urgent')], default=1)),
('created', models.DateTimeField(auto_now_add=True, null=True)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type_of_service', models.IntegerField(choices=[(1, 'Business To Business'), (2, 'Hand To Hand'), (3, 'Direct Mail'), (4, 'Residential Homes'), (5, 'Shared Distribution'), (6, 'Consultation Distribution')], default=1)),
('number_of_boxes', models.IntegerField(choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4 or more'), (5, 'N/A')], default=1)),
('type_of_media', models.IntegerField(choices=[(1, 'Flyer'), (2, 'Leaflet'), (3, 'Folded Leaflet'), (4, 'Other')], default=1)),
('require_collection', models.IntegerField(choices=[(1, 'Yes'), (2, 'No')], default=1)),
('quantity_of_flyers', models.IntegerField(null=True)),
('title_of_media', models.CharField(max_length=255, null=True)),
('campaign_details', models.TextField(max_length=255)),
('agreed_cost', models.DecimalField(decimal_places=2, max_digits=11)),
('campaign_start_date', models.DateField()),
('campaign_finish_date', models.DateField()),
('special_instruction', models.TextField()),
('progress', models.IntegerField(default=1)),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('attachments', models.ManyToManyField(to='core.Attachments')),
('company', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='company', to='accounts.Company')),
('project_admin', models.ForeignKey(limit_choices_to={'staff': True}, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='project_admin', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ProjectActivity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('subject', models.CharField(max_length=255)),
('message', models.TextField()),
('acitivity_type', models.CharField(max_length=255)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.Project')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"princemostaquim@gmail.com"
] |
princemostaquim@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.