blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
30d323492b66e275e6cd4f00ff0c36c3b7b6df57 | 33e8dd262bd52681eae5f79b9424a09733d1591c | /swetha/Tutorial_2/Operator_Precedence/app.py | ab051c0ee038fc1c5bf5543c35e059d10ef8e811 | [] | no_license | pradeep122/LearnPython | 50e2dd1b5b4c300a0d8b60b8cdea53bc3426375f | 2cd5c4b0845bfa5f8daee778d69cb01ca077eee9 | refs/heads/master | 2020-03-19T00:07:07.973077 | 2019-05-18T17:51:47 | 2019-05-18T17:51:47 | 135,454,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | # Operator Precedence which means order of operations
# Order of Operations
# 1. Parenthesis takes higher priority
# 2. Exponentiation 2 ** 3
# 3. Multiplication or Division
# 4. Addition or Subtraction
x = 10 + 3 * 2
print(x) # 16 first multiplication and then addition(10 + 6)
x = 10 + 3 * 2 ** 2
print(x) # 22
# Exercise
x = (2 + 3) * 10 - 3
print(x) # 47
| [
"iswetha522@gmail.com"
] | iswetha522@gmail.com |
913947a5c6adf7a1e73a1da9ff5898b07a1e6405 | 22cc26c10943bc25f50f36cb2b36a28948ef885b | /alphavantage01.py | 70ddefb04ababbf652d48fe5de6d746ff920d825 | [
"MIT"
] | permissive | jpsura/pyapi | aef4c74d7e916f2fb00ac1b4ab2af3041091d421 | 4dce3eede001abd217ff9ff5101219e13efb6a3d | refs/heads/master | 2020-06-26T02:07:58.104760 | 2019-08-01T20:15:15 | 2019-08-01T20:15:15 | 199,493,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | #!/usr/bin/env python3
import requests
import alpha_vantage
from pprint import pprint
API_URL = "https://www.alphavantage.co/query"
data = {
"function": "TIME_SERIES_DAILY",
"symbol": "vz",
"outputsize": "compact",
"datatype": "json",
"apikey": "NJTO68R6JE6UHSO0",
}
response = requests.get(API_URL, data)
pprint(response.json())
| [
"joshua.p.sura@verizon.com"
] | joshua.p.sura@verizon.com |
63a2bb85fdf0fe9f7bfdb8230c87617f54d240cd | d24aba61db9f0a2bfd3d2c417d873b0e60529fd9 | /commands/images.py | 4a3153ce3b609aadf53a39a4a0cf2601a0782995 | [] | no_license | rafarafa200/DiscordBot-TocaMandelao | 8fb428808fc10d154f1c5b4244684cd169bd67ae | 6edf1b48ae16f4a8b8cc1576a1ef8cf7de1d05ae | refs/heads/main | 2023-08-30T12:19:54.781446 | 2021-10-31T14:11:05 | 2021-10-31T14:11:05 | 423,175,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | from discord.ext import commands
import discord
class Images(commands.Cog):
"""Works with images"""
def __init__(self, bot):
self.bot = bot
@commands.command(name="foto")
async def get_random_image(self, ctx):
url_image = "https://picsum.photos/1920/1080"
embed_image = discord.Embed(
title = "Resultado da Busca por Imagem",
description = "A busca é totalmente aleatória",
color = 0x0000FF,
)
embed_image.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar_url)
embed_image.set_footer(text="Feito por " + self.bot.user.name, icon_url=self.bot.user.avatar_url)
embed_image.add_field(name="API", value="Usamos a API do https://picsum.photos")
embed_image.add_field(name="Parâmetros", value="{largura}/{altura}")
embed_image.add_field(name="Exemplo", value=url_image, inline=False)
embed_image.set_image(url=url_image)
await ctx.send(embed=embed_image)
def setup(bot):
bot.add_cog(Images(bot)) | [
"rafa_menucci@hotmail.com"
] | rafa_menucci@hotmail.com |
c3ad296986ee71c397a1a7186f93d3b260045db8 | 44c85fd2786ae79b11f709b5b02296f158214e9f | /katahack/settings.py | deb583c5a38fc4ded5f71e566dec9558c41cec19 | [] | no_license | bagus-aulia/img-to-text | 377261bcf9c15da93d3805b84c019895d8deac7b | 4485f14dd1f5771c24b0c1871691a4a581b3e638 | refs/heads/master | 2020-08-14T19:23:58.245403 | 2019-10-30T00:42:06 | 2019-10-30T00:42:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,879 | py | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-@uhh2&et_+p(+(_ms62ua&!ziwbbcyau-k)de++vj=7^i#usf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'chats',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'katahack.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'katahack.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Jakarta'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'assets'),
) | [
"bagusaulia40@gmail.com"
] | bagusaulia40@gmail.com |
71bed782d65e16e57cce9c58488bb482bc0bb31b | 5a651475ccf9f56174516bc8fee7c9b76bc83677 | /tableread/writer.py | 159de878a217ee9eeb16e7dc0f39259e5e9d4c7e | [] | no_license | bradsbrown/tableread | 43a9ae5015060d6f288d5b66525a05c222318cab | 566507bbf090a187695cf4c3f9dca6bc04488711 | refs/heads/master | 2020-06-24T08:03:41.287935 | 2019-07-24T16:09:31 | 2019-07-24T16:09:31 | 191,936,188 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,270 | py | """Tableread module to write a text file from a Python object."""
import io
import os
from typing import List, Tuple
RowData = List[dict]
class SimpleRSTTableWriteable(object):
"""Single rST table object to be written."""
divider_char = "="
title_marker = "~"
def __init__(self, title: str, row_data: RowData):
self.title = title
self._headers = list(row_data[0].keys())
self.col_widths = self._col_widths(row_data)
self.col_mappings = list(zip(self._headers, self.col_widths))
self.rows = self._dict_to_lines(row_data)
def _format_row(self, row: dict):
return " ".join(
["{:{c}}".format(row.get(k, ""), c=c) for k, c in self.col_mappings]
)
def _dict_to_lines(self, row_data: RowData):
return [self._format_row(row) for row in row_data]
def _col_widths(self, row_data: RowData):
return [
max(len(col), *[len(str(row.get(col, ""))) for row in row_data])
for col in self._headers
]
@property
def headers(self):
"""Headers for the table, formatted as a spaced string."""
return " ".join(["{:{c}}".format(k, c=c) for k, c in self.col_mappings])
@property
def divider(self):
"""Format divider row as a spaced string."""
return " ".join([self.divider_char * x for x in self.col_widths])
def write_table(self, writer: io.TextIOBase):
"""Write table out to file using the provided writer.
Args:
writer: file-like object to be written to
"""
self._write_title(writer)
self._write_headers(writer)
for row in self.rows:
self._write(writer, row)
self._write(writer, self.divider)
def _write_title(self, writer: io.TextIOBase):
self._write(writer, self.title)
self._write(writer, self.title_marker * len(self.title))
self._write(writer, "")
def _write_headers(self, writer: io.TextIOBase):
self._write(writer, self.divider)
self._write(writer, self.headers)
self._write(writer, self.divider)
def _write(self, writer: io.TextIOBase, string: str):
writer.write("{}\n".format(string))
class SimpleRSTWriter(object):
"""Write a .rst file from a list of tables."""
def __init__(self, file_path: str, *tables: Tuple[str, RowData]):
"""Accept a list of table information and write to file.
Args:
file_path: the path to write the output file
tables: Each a tuple of table title (str) and list of row dicts
"""
self.file_path = file_path
self.tables = [SimpleRSTTableWriteable(title, rows) for title, rows in tables]
def write_tables(self):
"""Write provided tables out to .rst file."""
if os.path.exists(self.file_path):
os.remove(self.file_path)
dirname = os.path.dirname(self.file_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(self.file_path, "a") as writer:
for idx, table in enumerate(self.tables):
if idx:
writer.write("\n")
writer.write("\n")
table.write_table(writer)
| [
"brad@bradsbrown.com"
] | brad@bradsbrown.com |
ef40ec48bf2a0cb2ff75da74ffa77734efd92e46 | 7e34f45c4c046f01764583b6317f85200ddf2bcf | /tests/settings.py | 6b203371768fca78fd5a3bcd4c863f83fbb1ae04 | [
"BSD-3-Clause"
] | permissive | MarkusH/django-jellyglass | 3953de9fb840320db23a8b748df089da2aeb1013 | 2b7c8fcaac76f8833f2880b10f687552530a3ccb | refs/heads/master | 2021-01-18T15:09:08.904899 | 2018-12-03T16:41:06 | 2018-12-03T16:41:06 | 49,637,243 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,540 | py | ALLOWED_HOSTS = []
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ":memory:",
}
}
DEBUG = True
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'jellyglass.apps.JellyGlassConfig',
]
LANGUAGE_CODE = 'en-us'
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tests.urls'
SECRET_KEY = 'test-secret-key'
STATIC_URL = '/static/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
| [
"info@markusholtermann.eu"
] | info@markusholtermann.eu |
8f204d1087c6d7598468d38b33dfb2e66a262381 | c78957074e53fa4bcf58f9c609fb95741707b0fc | /catalog/migrations/0003_book_description.py | 863cdf8cbd3c7d09a49fa31bfe390d9aab83c86b | [] | no_license | nutakoooye/django_local_library | 07a61e97caaddbf1df3b6c0d93aaff5cd7292507 | 470992039248b0e38182bcd60c2721c0e0264f61 | refs/heads/master | 2023-08-11T14:08:06.923049 | 2021-10-09T18:24:29 | 2021-10-09T18:24:29 | 415,385,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | # Generated by Django 3.2.7 on 2021-10-06 10:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0002_auto_20211005_1751'),
]
operations = [
migrations.AddField(
model_name='book',
name='description',
field=models.TextField(blank=True, null=True),
),
]
| [
"andrkool151@gmail.com"
] | andrkool151@gmail.com |
26617b9cdb5592690aa2142c5ca215abaa4c7587 | b40c4c8478c86a8b1ac792429a385523d9e27f77 | /Python Spider/pyquery/6 pyquery foreach.py | 575af2e78cc59bcb67869e66d7ae534fafc9c040 | [
"Apache-2.0"
] | permissive | CodingGorit/Coding-With-Python | 2f100a393b8222354cf319b5e1ea0c3ffe7351df | b0f1d5d704b816a85b0ae57b46d00314de2a67b9 | refs/heads/master | 2020-12-19T13:17:28.129447 | 2020-05-31T12:29:43 | 2020-05-31T12:29:43 | 235,744,210 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | #!/usr/bin/python
# -*- coding: utf-8 --
#@File: 6 pyquery foreach.py
#@author: Gorit
#@contact: gorit@qq.com
#@time: 2020/5/27 0:03
'''
遍历
pyquery 的选择结果可能是多个节点,也有可能是单个节点
'''
from pyquery import PyQuery as pq
doc = pq(filename="test.html")
# 通过 items 获得每一项, 得到一个生成器
lis = doc('li').items()
print(type(lis))
for li in lis:
print(li.text(), type(li)) | [
"gorit@qq.com"
] | gorit@qq.com |
2c5521c3de850327de5dcd01344f18bbb8b19f26 | 260a46e010562fd49386cf4fe831c5a3e13aff8f | /Logistic/logisticRegression.py | 33be4bcb01f1b02edf2b68a0897d53ec0becf0a8 | [] | no_license | NanWangAC/MLCoding | bfc15807b3db49cccce53cc358e169d508779eba | 1517ce45956907b6a4cc63f7ece35fa40f7193da | refs/heads/master | 2020-03-10T13:21:26.442020 | 2018-10-02T14:23:48 | 2018-10-02T14:23:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,650 | py | from numpy import *
# 加载数据
def loadDataSet():
# 创建初级与标签列表
dataMat = []; labelMat = []
# 打开文本数据集
fr = open('testSet.txt')
# 遍历文本的每一行
for line in fr.readlines():
# 对当前行除去首尾空格之后按空格进行分离
lineArr = line.strip().split()
# 将每一行的两个特征x1,x2,加上x0=1,组成列表并添加到数据集列表中
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
# 将当前行标签添加到标签列表
labelMat.append(int(lineArr[2]))
# 返回数据列表,标签列表
return dataMat,labelMat
#定义sigmoid函数
def sigmoid(inX):
return 1.0/(1+exp(-inX))
#梯度上升法更新最优拟合参数
#@dataMatIn:数据集
#@classLabels:数据标签
def gradAscent(dataMatIn, classLabels):
# 将数据集列表转为Numpy矩阵
dataMatrix = mat(dataMatIn)
# 将数据集标签列表转为Numpy矩阵,并转置
labelMat = mat(classLabels).transpose()
# 获取数据集矩阵的行数和列数
m,n = shape(dataMatrix)
# 学习率
alpha = 0.001
# 最大迭代次数
maxCycles = 500
# 初始化权值参数向量每个维度均为1.0
weights = ones((n,1))
# 循环迭代以及向量化计算
for k in range(maxCycles):
# 求当前的sigmoid函数预测概率
h = sigmoid(dataMatrix*weights)
# 计算真实类别和预测类别的差值
error = (labelMat - h)
# 更新权值参数,更新公式通过求导得到
weights = weights + alpha * dataMatrix.transpose()* error
return weights
# 画出数据集合logistics回归最佳拟合直线
def plotBestFit(weights):
import matplotlib.pyplot as plt
dataMat,labelMat=loadDataSet()
dataArr = array(dataMat)
n = shape(dataArr)[0]
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
for i in range(n):
if int(labelMat[i])== 1:
xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
ax.scatter(xcord2, ycord2, s=30, c='green')
x = arange(-3.0, 3.0, 0.1)
y = (-weights[0]-weights[1]*x)/weights[2]
ax.plot(x, y)
plt.xlabel('X1'); plt.ylabel('X2');
plt.show()
# 随机梯度上升算法
def stocGradAscent0(dataMatrix, classLabels):
# 获取数据集的行数和列数
m,n = shape(dataMatrix)
# 设置学习率为0.01
alpha = 0.01
# 初始化权值向量各个参数为1.0
weights = ones(n)
# 循环m次,每次选取数据集一个样本更新参数
for i in range(m):
h = sigmoid(sum(dataMatrix[i]*weights))
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights
# 改进的随机梯度上升算法
#@dataMatrix:数据集列表
#@classLabels:标签列表
#@numIter:迭代次数,默认150
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
# 获取数据集的行数和列数
m,n = shape(dataMatrix)
# 初始化权值参数向量每个维度均为1
weights = ones(n)
# 循环每次迭代次数
for j in range(numIter):
# 获取数据集行下标列表
dataIndex = list(range(m))
# 遍历行列表
for i in range(m):
# 每次更新参数时设置动态的步长,保证随着更新次数的增多,步长变小,避免在最小值处徘徊
alpha = 4/(1.0+j+i)+0.0001
# 随机获取样本
randIndex = int(random.uniform(0,len(dataIndex)))
# 计算权值更新
h = sigmoid(sum(dataMatrix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
# 选取该样本后,将该样本下标删除,确保每次迭代时只使用一次
del(dataIndex[randIndex])
return weights
# 分类决策函数
def classifyVector(inX, weights):
prob = sigmoid(sum(inX*weights))
if prob > 0.5: return 1.0
else: return 0.0
#logistic回归预测算法
def colicTest():
# 打开训练数据集
frTrain = open('horseColicTraining.txt')
# 打开测试数据集
frTest = open('horseColicTest.txt')
# 新建两个孔列表,用于保存训练数据集和标签
trainingSet = []; trainingLabels = []
# 读取训练集文档的每一行
for line in frTrain.readlines():
# 对当前行进行特征分割
currLine = line.strip().split('\t')
# 新建列表存储每个样本的特征向量
lineArr =[]
for i in range(21):
# 将该样本的特征存入lineArr列表
lineArr.append(float(currLine[i]))
# 将该样本的特征向量添加到数据集列表
trainingSet.append(lineArr)
# 将该样本标签存入标签列表
trainingLabels.append(float(currLine[21]))
# 调用随机梯度上升法更新logistic回归的权值参数
trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 5000)
# 统计测试数据集预测错误样本数量和样本总数
errorCount = 0; numTestVec = 0.0
# 遍历测试数据集的每个样本
for line in frTest.readlines():
# 样本总数加1
numTestVec += 1.0
# 对当前行进行处理,分割出各个特征及样本标签
currLine = line.strip().split('\t')
# 新建特征向量
lineArr =[]
# 将各个特征构成特征向量
for i in range(21):
lineArr.append(float(currLine[i]))
# 利用分类预测函数对该样本进行预测,并与样本标签进行比较
if int(classifyVector(array(lineArr), trainWeights))!= int(currLine[21]):
# 如果预测错误,错误数加1
errorCount += 1
# 计算测试集总的预测错误率
errorRate = (float(errorCount)/numTestVec)
# 打印错误率大小
print("the error rate of this test is: %f" % errorRate)
# 返回错误率
return errorRate
#多次测试算法求取预测误差平均值
def multiTest():
# 设置测试次数为10次,并统计错误率总和
numTests = 10
errorSum=0.0
for k in range(numTests):
errorSum += colicTest()
# 打印出测试10次预测错误率平均值
print("after %d iterations the average error rate is: %f" % (numTests, errorSum/float(numTests)))
| [
"794202109@qq.com"
] | 794202109@qq.com |
3ce09fb2ccef610d6748908940c731ae6bd65fc4 | 3758a30f9e7dceed1eb3f1bc5624324384b1e795 | /augmentation/mask_morph_question.py | 73c7ee0212326c77a2aad9f670616e835fd38562 | [] | no_license | boostcampaitech2/mrc-level2-nlp-05 | 6682134c45f545126c724d09d0d5527530b854d8 | e648d5ec6dd2af5aa2b463c4960511a1f69ec230 | refs/heads/master | 2023-09-04T18:36:01.213469 | 2021-11-10T15:31:09 | 2021-11-10T15:31:09 | 416,144,706 | 1 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,820 | py | from transformers import AutoTokenizer
import numpy as np
from datasets import load_from_disk
from pororo import Pororo
import logging
logger = logging.getLogger(__name__)
model_name = "klue/roberta-large"
tokenizer = AutoTokenizer.from_pretrained(model_name)
MASK_TOKEN = tokenizer.mask_token
MASK_RATIO = 0.2
MAX_MASK_NUM = 2
LOAD_DIR = "/opt/ml/data/train_dataset"
SAVE_DIR_WORD = "/opt/ml/data/mask_morph_q_train_dataset"
ner = Pororo(task="ner", lang="ko")
def mask_ner_tag(examples):
questions = examples["question"]
aug_questions = []
q_mark = '?'
for question in questions:
pairs = ner(question)
mask = [False if tag == 'O' else True for _, tag in pairs]
mask = np.array(mask)
# set maximum num of masks to MAX_MASK_NUM
if sum(mask) > MAX_MASK_NUM:
mask_idx = np.where(mask)
set_false_pos = np.random.choice(mask_idx[0], sum(mask) - MAX_MASK_NUM, replace=False)
mask[set_false_pos] = False
masked_text = [MASK_TOKEN if mask else pair[0] for pair, mask in zip(pairs, mask)]
if mask[-1]: masked_text.append(q_mark) # if the last word is masked, append '?'
aug_questions.append("".join(masked_text))
return {"question": aug_questions}
# load & map augmentation
datasets = load_from_disk("/opt/ml/data/train_dataset")
train_dataset = datasets["train"]
# train_dataset = train_dataset.select(range(30)) # sample
train_dataset_aug = train_dataset.map(
mask_ner_tag,
batched=True,
batch_size=8,
# num_proc=4
)
# save datasets
train_dataset_aug.save_to_disk(SAVE_DIR_WORD)
logger.info(f'Created a new train dataset of size {len(train_dataset_aug)}')
| [
"kkmjkim1528@gmail.com"
] | kkmjkim1528@gmail.com |
c757d357b25a4544e138f19d00005eba273480de | cc32c03739d4692a9e60a6cc178fbad75e15d735 | /src/fib_common/fib_msg.py | defc5028bbb6e3bde725d99b99fd75550728d536 | [] | no_license | lvrixp/fib_service | 3431210236b322815fe1e0a1ccab0496380fc8c7 | 82970b2864fc29d93a45c6c36ee9acf0eb33f019 | refs/heads/master | 2021-01-10T07:39:25.690998 | 2015-06-04T05:43:10 | 2015-06-04T05:43:10 | 36,613,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | '''Simple message definition for the client-server communication
TODO:
Add more sophicated schema for the message
Eg, for server-client message, add return code, error mssage.
'''
import json
class FibMsg(object):
'''Base message class placeholder
'''
def serialize(self):
pass
class FibCliSrvMsg(FibMsg):
'''Client-Server message definition
Schema:
{
'N' : <integer> # front N number is required
}
'''
def __init__(self, n = 0):
self.N = int(n)
def serialize(self):
return json.dumps({'N' : self.N})
@staticmethod
def deserialize(msg):
res = json.loads(msg)
return FibCliSrvMsg(res['N'])
class FibSrvCliMsg(FibMsg):
'''Server-Client message definition
Schema:
{
'N' : <integer> # original value sent by client
'result' : <string> # front N fibonacci numbers
}
'''
def __init__(self, n = 0, res = ''):
self.N = int(n)
self.result = res
def serialize(self):
return json.dumps({'N' : self.N, 'result' : self.result})
@staticmethod
def deserialize(msg):
res = json.loads(msg)
return FibSrvCliMsg(res['N'], res['result'])
| [
"lvrixp@163.com"
] | lvrixp@163.com |
06a9fdc73d9799df7dacc77be61a90f5f6e0389e | 645d86b54a777f2b3762206c4fdfaa276acbf30c | /gui.py | 2b54ac36ccf352dfb0e3f3b5c700214b648829a0 | [] | no_license | jacobgb24/airport522 | 15218004ce1eab1290f9f04fba17c535244d4782 | d6bc37797bcde8d342573128009ad011286c9995 | refs/heads/master | 2021-03-28T20:16:07.687928 | 2020-04-21T22:54:11 | 2020-04-21T22:54:11 | 247,892,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,399 | py | import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
from dash.dependencies import Input, Output, State
import utils
import logging
from typing import Union, List
import time
from radio import BaseRadio
from message import Message
from aircraft import Aircraft
# suppress logging of POST requests
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
class GUIData:
""" Class for holding global data for display on GUI. This helps with loading data after refresh """
aircraft: List[Aircraft] = []
msg_log: List[str] = []
radio: Union[None, BaseRadio] = None
@classmethod
def add_message(cls, msg: Message) -> None:
""" add message to data. Updates msg_log and relevant aircraft """
cls.msg_log.insert(0, str(msg))
cls._update_aircraft(msg)
if len(cls.msg_log) > 1000:
cls.msg_log = cls.msg_log[100:]
@classmethod
def _update_aircraft(cls, msg: Message) -> None:
for craft in cls.aircraft:
if craft == msg.icao:
craft.update(msg.data)
break
else:
cls.aircraft.insert(0, Aircraft(msg.icao, msg.data))
if len(cls.aircraft) > 100:
cls.aircraft = cls.aircraft[25:]
@classmethod
def remove_old(cls) -> List[str]:
""" Removes any aircraft that haven't recently changed. Returns list of removed ICAOs"""
to_remove = []
for craft in cls.aircraft:
if round(time.time()) - craft.last_update > 180:
to_remove.append(craft)
for craft in to_remove:
cls.aircraft.remove(craft)
return [c.icao for c in to_remove]
@classmethod
def get_msg_log(cls) -> str:
return '\n'.join(cls.msg_log)
plot_map = go.Figure()
app = dash.Dash('Airport 522', assets_folder='gui_assets')
app.title = 'Airport 522'
app.layout = html.Div(style={'display': 'flex', 'flexDirection': 'column', 'width': '100vw', 'height': '100vh'},
children=[
html.Div(style={'flex': 8, 'display': 'flex'}, children=[
html.Div(style={'flex': 1, 'padding': 8, 'display': 'flex', 'flexDirection': 'column'},
children=[
html.H2('Aircraft'),
html.Ul(id='aircraft-list',
style={'flex': 1, 'borderStyle': 'solid', 'borderWidth': 2,
'margin': 0,
'list-style-type': 'none', 'padding': 0}, children=[])
]),
html.Div(style={'flex': 1, 'padding': 8, 'display': 'flex', 'flexDirection': 'column'},
children=[
html.H2('Map View'),
dcc.Graph(id='map', style={'flex': 1}, figure=plot_map)
])
]),
html.Div(style={'flex': 2, 'padding': 8}, children=[
html.H2('Raw Message Log'),
dcc.Textarea(id='message-log',
style={'height': '70%', 'width': '100%', 'padding': 0, 'resize': 'none'},
placeholder='Raw Messages appear here', readOnly=True)
]),
dcc.Interval(id='interval', interval=1000, n_intervals=0)
])
@app.callback([Output('message-log', 'value'), Output('map', 'figure'), Output('aircraft-list', 'children')],
[Input('interval', 'n_intervals')],
[State('message-log', 'value'), State('map', 'figure'), State('aircraft-list', 'children')])
def get_messages(n, old_msgs, fig, craft_list):
"""
Main callback function for dash
:param n: unused, but needed since we trigger function via interval
:param old_msgs: string value inside raw message log. Used for no-op calls to keep log the same
:param fig: the map. Can update data dictionary directly
:param craft_list: the children of the aircraft list. Used to determine if refresh occured
:return: message log, map figure, aircraft list (list of <li> elements)
"""
msgs = GUIData.radio.get_all_queue()
valid = [m for m in msgs if m is not None and m.valid]
# refresh data to remove old aircraft and delete them from map
for r in GUIData.remove_old():
remove_aircraft_map(fig, r)
if len(valid) == 0 and not (len(craft_list) < len(GUIData.aircraft)):
# print('No msgs')
return old_msgs, fig, build_aircraft_ul()
print(f"[{' '.join([m.icao for m in valid])}]")
# add messages/aircraft to global data
for m in reversed(valid):
GUIData.add_message(m)
# set locations on map for all known aircraft
for craft in GUIData.aircraft:
if craft['lat'].value_str != 'Unknown':
update_aircraft_map(fig, craft['lat'].value, craft['lon'].value, craft.icao)
return GUIData.get_msg_log(), fig, build_aircraft_ul()
def build_aircraft_ul() -> List[html.Li]:
"""
Builds the aircraft list for the GUI
:return: a list of <li> elements
"""
children = []
for craft in GUIData.aircraft:
li = html.Li(id=f'li-{craft.icao}', style={'display': 'flex', 'padding': 8, 'border-bottom': '2px solid gray'},
children=[
html.Div(style={'flex': 3}, children=[
html.P(style={'margin': 0}, children=[
html.H3(f'{craft.model}', title="Model", style={'display': 'inline', 'margin': 0}),
html.H4(f' | {craft.operator}', title='Operator', style={'display': 'inline'}),
html.I(f' (Updated: {round(time.time()) - craft.last_update}s ago)',
style={'display': 'inline'})
]),
html.P(style={'margin': 0, 'marginLeft': 12}, children=[
html.P(f'ID: {craft["id"].value}', title='Flight ID',
style={'display': 'inline', 'margin': 0}),
html.P(f' ({craft.icao})', title='ICAO ID', style={'display': 'inline'}),
html.P(f'Horz. Vel.: {craft["horz_vel"].value_str} {craft["horz_vel"].unit}',
title='Horizontal Velocity', style={'margin': 0}),
html.P(f'Vert. Vel.: {craft["vert_vel"].value_str} {craft["vert_vel"].unit}',
title='Vertical Velocity', style={'margin': 0}),
html.P(f'Heading: {craft["heading"].value_str} {craft["heading"].unit}',
title='Heading', style={'margin': 0})
])
]),
html.P(style={'flex': '0 1 auto', 'borderBottom': f'6px solid #{craft.icao}', 'height': 'auto',
'textAlign': 'right'}, children=[
html.P(f'Lat: {craft["lat"].value_str} {craft["lat"].unit}', style={'margin': 0}),
html.P(f'Lon: {craft["lon"].value_str} {craft["lon"].unit}', style={'margin': 0}),
html.P(f'Alt: {craft["alt"].value} {craft["alt"].unit}', style={'margin': 0})
])
])
children.append(li)
return children
def update_aircraft_map(fig: go.Figure, lat: float, lon: float, icao_id: str) -> None:
""" Updates position of an aircraft on the map (or adds it if new) """
lat, lon = round(lat, 4), round(lon, 4)
for trace in fig['data']:
if trace['name'] == icao_id:
trace['lat'] = [lat]
trace['lon'] = [lon]
break
else:
fig['data'].append(go.Scattermapbox(lat=[lat], lon=[lon], mode='markers', hoverinfo='lat+lon+name',
marker=dict(size=12, color=f'#{icao_id}'), name=icao_id))
def remove_aircraft_map(fig: go.Figure, icao_id: str) -> None:
""" Removes given aircraft from the map """
fig['data'] = [t for t in fig['data'] if t['name'] != icao_id]
def run_gui(radio: BaseRadio, debug: bool):
""" Runs the GUI """
GUIData.radio = radio
plot_map.add_trace(go.Scattermapbox(lat=[utils.REF_LAT], lon=[utils.REF_LON], mode='markers',
marker=dict(size=16, color='rgb(255,0,0)'), hoverinfo='lat+lon+name',
name='Ref. Loc.'))
plot_map.update_layout(mapbox=dict(style='stamen-terrain', bearing=0, zoom=8,
center=dict(lat=utils.REF_LAT, lon=utils.REF_LON)),
legend=dict(x=0, y=1, bgcolor='rgba(224,224,224,0.85)'),
margin=dict(l=0, r=0, t=0, b=0, pad=0))
app.run_server(debug=debug)
| [
"jacobgb24@yahoo.com"
] | jacobgb24@yahoo.com |
573cb4a3fb8baf2bee9bb13770e07a47308e1816 | c404669933e62c085f8c0c9f765b1ccc199be5bd | /MyWebsite/read/migrations/0001_initial.py | 5016507dce65b5d1993cf04fa7dcd99d7e65f8aa | [] | no_license | fenghuoxiguozu/Django | c32d14f6762a29b7cacd998ac19bba61322397f9 | de240c89eeb21b3463d84f40f4f18260981d6dcd | refs/heads/master | 2022-10-01T09:40:15.745046 | 2020-06-09T16:42:12 | 2020-06-09T16:42:12 | 263,092,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,751 | py | # Generated by Django 2.2.1 on 2020-06-06 22:10
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='ReadNum',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('read_num', models.IntegerField(default=0)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
options={
'verbose_name': '文章阅读量',
'verbose_name_plural': '文章阅读量',
'db_table': 'ReadNum',
},
),
migrations.CreateModel(
name='ReadDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('read_num', models.IntegerField(default=0)),
('read_date', models.DateField(default=django.utils.timezone.now)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
options={
'verbose_name': '文章详情阅读量',
'verbose_name_plural': '文章详情阅读量',
'db_table': 'ReadDetailNum',
},
),
]
| [
"1058247664@qq.com"
] | 1058247664@qq.com |
bdf1103898da8016469f1560497ec30b9ef7d229 | 6d2b0400ff1673619cdf050625d9e2f14a0c8c2b | /Algorithms/Directed_Graph.py | 80d238b3d176c0906f758cd76b4e30a20ebd28bb | [] | no_license | Anekjain/Programming-Solution | 1ec1f868eda600608488a4cbbe2f76c03257167d | b8c2073021c7ddac1271e32ba817c8be63d7a3a4 | refs/heads/master | 2022-11-24T21:59:01.996337 | 2020-08-04T18:19:19 | 2020-08-04T18:19:19 | 280,907,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | #GRAPH IMPLEMENTATION [DIRECTED GRAPH] USING ADJACENCY LIST
def add_vertex(v):
global graph
global vertices_no
if v in graph:
print("Vertex ", v , " alreadu exists.")
else:
vertices_no += 1
graph[v] = []
def add_edge(v1,v2,e):
global graph
#global vertices_no
if v1 not in graph:
print("Vertex ", v1 , " not in Graph")
elif v2 not in graph:
print("Vertex ", v2 , " not in Graph")
else:
temp = [v2,e]
graph[v1].append(temp)
def print_graph():
global graph
for vertex in graph:
for edges in graph[vertex]:
print(vertex, " -> ", edges[0], " edges weight: ", edges[1])
if __name__ == "__main__":
graph = {}
vertices_no = 0
#ADDING VERTICES
add_vertex(1)
add_vertex(2)
add_vertex(3)
add_vertex(4)
#ADDING EDGES
add_edge(1, 2, 1)
add_edge(1, 3, 1)
add_edge(2, 3, 3)
add_edge(3, 4, 4)
add_edge(4, 1, 5)
#PRINT GRAPH
print_graph()
#PRINTING INTERNAL GRAPH
print("Internal representation: ", graph)
| [
"jainanek@gmail.com"
] | jainanek@gmail.com |
a1b11d09ddf6368b66e674379e2446e23184d0ec | 9e536df5a2f8fc79e2e7f813dd3f53d77c18f1c7 | /python/excel/bankRecSecondary/bankRecSecondary3.py | d234e24fdfb378ba7f6fc48a3dbe0ee04bc70796 | [] | no_license | creednaylor/publicProjects | 4b2d82b2c7aa5fbd6afbcf8bf1c911c0c4c89265 | 3a9498f5df023d9f857e6352028f361444c5e63f | refs/heads/master | 2023-03-16T01:21:49.483451 | 2022-09-06T04:54:17 | 2022-09-06T04:54:17 | 217,166,925 | 0 | 0 | null | 2023-03-06T07:49:12 | 2019-10-23T22:47:20 | Python | UTF-8 | Python | false | false | 4,219 | py | #on comparison sheet, could write only the necessary columns
#local application imports
from pathlib import Path
import sys
pathToThisPythonFile = Path(__file__).resolve()
sys.path.append(str(pathToThisPythonFile.parents[3]))
import herokuGorilla.backend.python.myPythonLibrary._myPyFunc as _myPyFunc
startTime = _myPyFunc.printElapsedTime(False, "Starting code")
from pprint import pprint as pp
import win32com.client
excelApp = win32com.client.gencache.EnsureDispatch('Excel.Application')
excelApp.Visible = True
excelApp.DisplayAlerts = False
# pp("Manual printout: " + str(Path.cwd().parents[3]) + "\\privateData\\python\\excel\\bankRecSecondary")
filePath = _myPyFunc.replacePartOfPath(pathToThisPythonFile.parents[0], 'publicProjects', 'privateData')
fileName = "Bank Rec"
fileExtension = ".xlsx"
excelApp.Workbooks.Open(Path(filePath, fileName + fileExtension))
excelApp.Calculation = win32com.client.constants.xlCalculationManual
excelBackupWb = excelApp.Workbooks(fileName + fileExtension)
excelBackupWb.SaveAs(Filename=str(Path(filePath, fileName + " Before Running 2" + fileExtension)), FileFormat=51)
excelApp.Calculation = win32com.client.constants.xlCalculationAutomatic
excelBackupWb.Close()
excelApp.Workbooks.Open(Path(filePath, fileName + fileExtension))
excelApp.Calculation = win32com.client.constants.xlCalculationManual
excelWb = excelApp.Workbooks(fileName + fileExtension)
excelGPTableSheet = excelWb.Worksheets("GP Table")
excelBankTableSearchSheet = excelWb.Worksheets("Bank Table Search")
excelCompSheet = excelWb.Worksheets("Comparison")
excelCompSheet.UsedRange.Clear()
rowAfterHeader = 2
bankColumns = 8
bankTableSearchCol = 8
gpColumns = 17
gpSearchValueCol = 6
splitTime = _myPyFunc.printElapsedTime(startTime, "Finished importing modules and intializing variables")
excelBankTableSearchSheet.Range(excelBankTableSearchSheet.Cells(1, 1), excelBankTableSearchSheet.Cells(1, bankColumns)).Copy(excelCompSheet.Cells(1, 1))
excelGPTableSheet.Range(excelGPTableSheet.Cells(1, 1), excelGPTableSheet.Cells(1, gpColumns)).Copy(excelCompSheet.Cells(1, bankColumns + 1))
gpRow = rowAfterHeader
while excelGPTableSheet.Cells(gpRow, 1).Value:
#put in GP data
excelGPTableSheet.Range(excelGPTableSheet.Cells(gpRow, 1), excelGPTableSheet.Cells(gpRow, gpColumns)).Copy(excelCompSheet.Cells(gpRow, bankColumns + 1))
#check bank data
rowsToCheck = []
startingSearchRow = 2
endingSearchRow = excelBankTableSearchSheet.Cells(2, bankTableSearchCol).End(win32com.client.constants.xlDown).Row
searchText = excelGPTableSheet.Cells(gpRow, gpSearchValueCol).Value
endingSearchRow = excelBankTableSearchSheet.Cells(rowAfterHeader, bankTableSearchCol).End(win32com.client.constants.xlDown).Row
while startingSearchRow <= endingSearchRow:
startingSearchCell = excelBankTableSearchSheet.Cells(startingSearchRow, bankTableSearchCol)
endingSearchCell = excelBankTableSearchSheet.Cells(startingSearchRow, bankTableSearchCol).End(
win32com.client.constants.xlDown)
# pp(startingSearchCell.Address)
# pp(endingSearchCell.Address)
foundRange = excelBankTableSearchSheet.Range(startingSearchCell, endingSearchCell).Find(What=searchText, LookAt=win32com.client.constants.xlWhole)
pp("foundRange is " + str(foundRange))
if foundRange:
rowsToCheck.append(foundRange.Row)
startingSearchRow = foundRange.Row + 1
pp(foundRange)
else:
break
if len(rowsToCheck) == 1:
excelBankTableSearchSheet.Range(excelBankTableSearchSheet.Cells(rowsToCheck[0], 1), excelBankTableSearchSheet.Cells(rowsToCheck[0], bankColumns)).Copy(excelCompSheet.Cells(gpRow, 1))
excelBankTableSearchSheet.Cells(rowsToCheck[0], 1).EntireRow.Delete()
gpRow = gpRow + 1
excelCompSheet.Cells.EntireColumn.AutoFit()
excelApp.DisplayAlerts = True
excelApp.Calculation = win32com.client.constants.xlCalculationAutomatic
excelWb.Save()
excelApp.Visible = True
_myPyFunc.printElapsedTime(startTime, "Total time to run code")
| [
"16467643+creednaylor@users.noreply.github.com"
] | 16467643+creednaylor@users.noreply.github.com |
032be5db944974a1f32618e9395669e88e00c17e | 5dfbfa153f22b3f58f8138f62edaeef30bad46d3 | /old_ws/build/catkin_generated/order_packages.py | e6748be72199deb25e033f5d33a964cf1bf10700 | [] | no_license | adubredu/rascapp_robot | f09e67626bd5a617a569c9a049504285cecdee98 | 29ace46657dd3a0a6736e086ff09daa29e9cf10f | refs/heads/master | 2022-01-19T07:52:58.511741 | 2019-04-01T19:22:48 | 2019-04-01T19:22:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | # generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = "/home/bill/ros_ws/src"
whitelisted_packages = "".split(';') if "" != "" else []
blacklisted_packages = "".split(';') if "" != "" else []
underlay_workspaces = "/home/bill/ros_ws/devel;/opt/ros/kinetic".split(';') if "/home/bill/ros_ws/devel;/opt/ros/kinetic" != "" else []
| [
"alphonsusbq436@gmail.com"
] | alphonsusbq436@gmail.com |
801d95f3536632bed21bd2dd124eb95aae92e76c | d0c66cfc37d0f7652874fc95cf1c4fef348169e1 | /chembase/migrations/0003_auto_20170728_1055.py | fcb9ef24fbb89ec1184ba54188dacdc97ca1d714 | [] | no_license | mpustula/ChemBase | 1b9c56ea5f988d27f13e099be0f5294419f3cc9c | 4171d99e176464ff451cb26d8fdebf2de1204e2d | refs/heads/master | 2020-03-12T22:50:16.351950 | 2019-07-10T20:10:21 | 2019-07-10T20:10:21 | 130,854,464 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-28 10:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('chembase', '0002_auto_20170728_0645'),
]
operations = [
migrations.RenameModel(
old_name='Groups',
new_name='Group',
),
migrations.AddField(
model_name='item',
name='group',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.PROTECT, to='chembase.Group'),
preserve_default=False,
),
]
| [
"pustula.marcin@gmail.com"
] | pustula.marcin@gmail.com |
f2ab4a4f65e8c7144dc70fe2702bc225a23aa402 | 54be402879d53a484ac22ba283ee87aeb602f5ec | /src/lab1/nodes/pursuer.py | dc8fcdccba56d389b0e77c7dcdfd72e634631613 | [] | no_license | fazildgr8/robot_algorithms_projects | 15924a20ccf55f1cc6ef161bbff5d8e8bad7b5a6 | b5489a96d9275644f92bc73abf2beda8bf3f145a | refs/heads/main | 2023-07-19T14:45:19.001534 | 2021-09-08T04:58:02 | 2021-09-08T04:58:02 | 305,238,567 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,042 | py | #!/usr/bin/python
import roslib
roslib.load_manifest('lab1')
import rospy
import tf
import math
import random
import geometry_msgs.msg
from sensor_msgs.msg import LaserScan
import numpy as np
import time
from nav_msgs.msg import Odometry
from tf.transformations import euler_from_quaternion, quaternion_from_euler
left = 0
right = 0
front = 0
prev_location = [0,0] #x,y
pursuer_location = [3,3] #x,y
pursuer_rot = [0,0,0] #roll, pitch, yaw
distance = 1000
prev_heading = 0
def callback(msg):
global front, right, left
range_arr = np.array(msg.ranges)
left = np.mean(range_arr[241:]) # Mean range of left 120 values
front = np.mean(range_arr[121:240]) # Mean range of front 120 values
right = np.mean(range_arr[0:120]) # Mean range of right 120 values
# print('robot_1 Laser-',(left,front,right))
def callback_odom(msg):
global pursuer_location, pursuer_rot
pursuer_location = [msg.pose.pose.position.x, msg.pose.pose.position.y]
orientation = [msg.pose.pose.orientation.x, msg.pose.pose.orientation.y, msg.pose.pose.orientation.z, msg.pose.pose.orientation.w]
(roll, pitch, yaw) = euler_from_quaternion (orientation)
pursuer_rot = [roll, pitch, yaw]
# Persuer Velocity Computation
def vel_compute(trans,rot,goal_loc):
global distance, theta_d, prev_heading
# Propotional Controller
# Parameters
d_tolerence = 0.01
v_max = 10
kl = 1 # Linear V tune
ka = 4 # Angular V tune
X = goal_loc[0]
Y = goal_loc[1]
x = trans[0]
y = trans[1]
theta = rot[2]
v_x = 0
theta_d = 0
v_theta = 0
err_theta = 0
d = math.sqrt(((X-x)**2)+((Y-y)**2)) # Distance
quad = quadrant_check(trans,goal_loc) # Quadrant Find
if(quad == 3):
theta = theta + math.pi
if(quad == 2):
theta = theta - math.pi
if((X-x)==0):
theta_d = 0
err_theta = 0
else:
theta_d = math.atan((Y-y)/(X-x))
err_theta = theta_d-theta
err_theta = theta_d-theta
distance = d
if(d>=d_tolerence):
# Linear Velocity
v_x = kl*d
if(v_x>v_max):
v_x = v_max
# Angular Velocity
v_theta = ka*err_theta
print('v_x - ',v_x,' v_theta - ',v_theta,'Distance -',distance,' error -',err_theta)
print('Current Heading-',math.degrees(theta),'Goal Heeading -',math.degrees(theta_d))
print('Current - ',trans,'Goal -',goal_loc )
print('Quad -',quad)
print('\n')
prev_heading = theta_d
return v_x,v_theta # linear velocity Angular Velocity
def Obstacle_avoid(vel_1):
print('Obstacle Pursuer')
k_a = 1 # Obstacle avoidance turn factor
cmd = geometry_msgs.msg.Twist()
if(left < right):
angular = -1*k_a* right
cmd.angular.z = angular
vel_1.publish(cmd)
if(right < left):
angular = k_a* left
cmd.angular.z = angular
vel_1.publish(cmd)
# 360 degree turn
def turn():
begin=rospy.Time.now()
angular = random.choice([3.14,-3.14]) # 180 degrees in 0.5 seconds
linear = 0
cmd.angular.z = angular
cmd.linear.x = linear
# print('U Turn')
cmd.angular.z = angular
cmd.linear.x = linear
while((rospy.Time.now()-begin) < rospy.Duration(0.5)):
vel_1.publish(cmd)
t = 1
if(front < t or right < t or left < t):
turn()
if(front < t or right < t):
turn()
if(front < t or left < t):
turn()
# Check the Quadrant of Goal Pose wrt Robot
def quadrant_check(robot_pose,goal_pose):
x = robot_pose[0]
y = robot_pose[1]
X = goal_pose[0]
Y = goal_pose[1]
if(X-x > 0 and Y-y > 0):
return 1
if(X-x < 0 and Y-y > 0):
return 2
if(X-x < 0 and Y-y < 0):
return 3
if(X-x > 0 and Y-y < 0):
return 4
if(X-x == 0 and Y-y == 0):
return 0
def uturn(vel_1):
cmd = geometry_msgs.msg.Twist()
begin=rospy.Time.now()
angular = random.choice([6.3,-6.3]) # 360 degrees in 0.5 seconds
linear = 0
cmd.angular.z = angular
cmd.linear.x = linear
# print('U Turn')
cmd.angular.z = angular
cmd.linear.x = linear
while((rospy.Time.now()-begin) < rospy.Duration(0.5)):
vel_1.publish(cmd)
if __name__ == '__main__':
rospy.init_node('pursuer')
listener = tf.TransformListener() # Listening to transformation msg from stage
sub = rospy.Subscriber('/robot_1/base_scan', LaserScan, callback) # Receive Laser Msg from /stage
sub_odom = rospy.Subscriber('/robot_1/odom', Odometry, callback_odom)
vel_1 = rospy.Publisher('/robot_1/cmd_vel', geometry_msgs.msg.Twist,queue_size=10) # Publish Command to robot_1
rate = rospy.Rate(10.0)
begin = rospy.get_time()
while not rospy.is_shutdown():
try:
(trans_0,rot_0) = listener.lookupTransform('world', '/robot_0/odom', rospy.Time(0)) # robot_0 translation and rotation
(trans_1,rot_1) = listener.lookupTransform('world', '/robot_1/odom', rospy.Time(0)) # robot_1 translation and rotation
# print('robot_0 Trans-',trans_0,' Rot-',rot_0)
# print('robot_1 Trans-',trans_1,' Rot-',rot_1)
# print('\n')
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
# Update Previous Location every second
if((rospy.get_time()-begin) > 0.5):
prev_location = [trans_0[0],trans_0[1]]
begin = rospy.get_time()
# Pursuer Obstacle Avoidance System
obstacle_tolerence = 1
if(front < obstacle_tolerence or right < obstacle_tolerence or left < obstacle_tolerence):
Obstacle_avoid(vel_1)
# Persuer Velocity Computation
cmd = geometry_msgs.msg.Twist()
linear,angular = vel_compute(pursuer_location,pursuer_rot,prev_location)
cmd.linear.x = linear
cmd.angular.z = angular
vel_1.publish(cmd)
rate.sleep()
# rospy.spin()
| [
"mohamedfazilsulaiman@gmail.com"
] | mohamedfazilsulaiman@gmail.com |
b6878adbe31ff098d212e885005ceb7b80104e5d | f99240b18be16cce9b01704948fb670ddea9a100 | /main.py | 3b20512c66ec7a656be4ddc473eb2934aca0f6c6 | [] | no_license | dogusural/trade-summary | 419972f93a97d60eb08831be866dc544c468ca4a | e7be5c35b1930cbbc66f6d4dea5fdbf81d28aa51 | refs/heads/master | 2023-02-13T21:32:39.502217 | 2021-01-04T22:53:14 | 2021-01-04T22:53:14 | 326,517,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | import hmac
import hashlib
import time
import requests
import json
import configs
setup = configs.setup()
API_URL="api/v3/myTrades"
PAIR="ETHUSDT"
TIMESTAMP=int(round(time.time() * 1000))
AVAX_BODY="symbol="+PAIR+"&recvWindow=60000×tamp="+str(TIMESTAMP)
signature = hmac.new(setup.get_api_secret().encode(), AVAX_BODY.encode(), hashlib.sha256).hexdigest()
BODY=setup.get_api_url()+API_URL+"?"+AVAX_BODY+"&signature="+signature
x = requests.get(BODY, headers = {"X-MBX-APIKEY": setup.get_api_key()})
json_object = json.loads(x.text)
json_formatted_str = json.dumps(json_object,sort_keys=True, indent=2)
print(json_formatted_str)
| [
"dogusural@gmail.com"
] | dogusural@gmail.com |
fb5386db562a566e74bd8eb37a6cae500e17cb19 | 821e093f396a675a2c141ec4c5b510870ce58841 | /image_classifier.py | d63719d08a1d5c5e63d00261f2c9c493ad2680b2 | [] | no_license | cyrusclarke/sad | 5a7ceec2dd9c5b31625342042c3b483e8f3da906 | 0d5b0826cf3346ec94663a2e9e4bdf3475be023f | refs/heads/master | 2021-08-07T18:21:38.835382 | 2017-11-08T18:06:52 | 2017-11-08T18:06:52 | 109,606,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,200 | py | import numpy as np
import base64
from flask import Flask, abort, jsonify, render_template, request, make_response
import cPickle as pickle
from io import BytesIO
import vgg16
from vgg16 import Vgg16
from skimage import io as skio
from skimage.transform import resize
app = Flask(__name__)
my_model = pickle.load(open("data.pkl", "rb"))
#upload image template
@app.route("/")
def index():
return render_template("upload.html")
#call api
@app.route('/api', methods=['POST'])
def make_predict():
#get JSON from the post
data = request.get_json(silent=True)['image']
console.log(data)
#convert our JSON to a numpy array
# data = data[22:]
img = skio.imread(BytesIO(base64.b64decode(data)))[:,:,3]
img = resize(img, (224,224))
number = my_model.predict(x, True)
print(number)
return make_response(str(number))
# # predict_request = data['v1']
# #put the data into a np array
# # predict_request = np.array[predict_request]
# #run the np array through the pickled data
# y_hat = my_data.predict(predict_request)
# #return predicion (only 1)
# output = [y_hat[0]]
# #take the list and convert to JSON
# return jsonify(results=output)
if __name__ == '__main__':
app.run() | [
"cyrusjclarke@gmail.com"
] | cyrusjclarke@gmail.com |
fef6724427f4484ae33fcae3bc0446018c9f46c8 | 8bf3706b166a780b1257190c7c04b3c715ed9bb9 | /prikaz.py | f0fac7413dc2c8fb8863a014fdc8becc141c3eb9 | [] | no_license | filipbartek6417/cviceniaMaturitySEN | 8ae1a38b7ecff4b4ed9801fa11881540fb6d6427 | e99b428b4ae8f7ef9bce4725cf604817cfef5758 | refs/heads/master | 2023-03-29T20:55:53.675486 | 2021-04-04T07:21:48 | 2021-04-04T07:21:48 | 351,505,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | class Prikaz():
def __init__(self,**kwargs):
for key in kwargs.keys():
self.__setattr__(key,kwargs[key])
| [
"66736221+filipbartek6417@users.noreply.github.com"
] | 66736221+filipbartek6417@users.noreply.github.com |
9735e43307e1d83c7caaae220326c7390fa42417 | 2662d3a5d2f19248089d346850197f47d09ed80d | /LevelEditor/scripts/world/layer.py | 5259ac5a6938bbe85be68e5d0bd05ecbfbb86c03 | [] | no_license | pratripat/Zombie-World | c5ad1dbf5aad61dfbee68c81b2a5548fd1825ae5 | 166cb9878ef148d338bf84a90684ed151a093b95 | refs/heads/master | 2023-03-31T17:30:01.531287 | 2021-04-09T08:01:11 | 2021-04-09T09:51:43 | 352,869,435 | 1 | 0 | null | 2021-04-09T09:51:44 | 2021-03-30T04:25:55 | Python | UTF-8 | Python | false | false | 2,742 | py | from LevelEditor.settings import *
from .image import Image
class Layer:
def __init__(self, n):
self.images = []
self.updates = []
self.undo_cooldown = 0
self.initial_undo_cooldown = 50
self.n = n
def show(self, surface=screen):
#Renders images
for image in self.images:
image.show(surface)
if self.undo_cooldown > 0:
self.undo_cooldown -= 1
def add_image(self, position):
#Adds an image if there is already a selected image from the selector panel
if selection['image']:
j, i = (position[0]+scroll[0])//res, (position[1]+scroll[1])//res
image = self.get_image_with_index(i, j)
if image:
img = Image(j, i, j*res, i*res)
self.images.append(img)
self.images.remove(image)
return img
else:
image = Image(j, i, j*res, i*res)
self.images.append(image)
return image
def fill(self, position):
#Fills images at the required location
image = self.add_image(position)
image.fill(self.images)
def autotile(self, images, selector_panel):
#Auto tile all the images within the rectangle
for image in images:
selector_panel_images = selector_panel.get_image_with_id(image.id)
image.autotile(self.images, selector_panel_images)
def update(self):
#Adds a copy of images for later undoing
if selection['image']:
images = []
for image in self.images:
img = Image(image.j, image.i, image.position[0], image.position[1], {'id': image.id, 'image': image.image, 'index': image.index})
img.image = image.image
img.id = image.id
img.autotile_config = image.autotile_config
images.append(img)
self.updates.append(images)
self.updates[-200:]
def undo(self):
#Undos
if self.undo_cooldown == 0 and len(self.updates) != 0:
self.images = self.updates.pop()
self.undo_cooldown = self.initial_undo_cooldown
def remove(self, images):
#Removes images within the rectangle
for image in images:
if image in self.images[:]:
self.images.remove(image)
def get_image_with_index(self, i, j):
#Returns images within rectangle
for image in self.images:
if image.i == i and image.j == j:
return image
return None
def is_empty(self):
#Returns if the layer has no images currently
return len(self.images) == 0
| [
"pratyusht2006@gmail.com"
] | pratyusht2006@gmail.com |
9d7cf6aff297506f636428324119d19515dbf1c1 | 4a5999ce904734a0d7beeeec04edf63f92bc1ddf | /CodeCraft-2019/src/data_temp.py | 17e6ce9d0f8e0718cf1b7613655faed5c24d041c | [] | no_license | xixiU/HuaWeiCodecraft | 746b47e0cf2c0de232c2d7b802e5c3c291e01ae0 | 21bcd94c3c59fc96ee465951904b43a29fda31d5 | refs/heads/master | 2023-01-23T10:09:33.904085 | 2019-04-20T02:35:03 | 2019-04-20T02:35:03 | 176,402,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,141 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-03-20 09:29:45
# @Author : yuan
# @Version : 1.0.0
# @describe:
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 16 09:35:21 2019
@author: ym
"""
import pandas as pd
#from dijkstra import DijkstraExtendPath
import numpy as np
from io import StringIO
class DijkstraExtendPath():
def __init__(self, node_map):
self.node_map = node_map
self.node_length = len(node_map)
self.used_node_list = []
self.collected_node_dict = {}
def __call__(self, from_node, to_node):
self.from_node = from_node
self.to_node = to_node
self._init_dijkstra()
return self._format_path()
def _init_dijkstra(self):
self.used_node_list.append(self.from_node)
self.collected_node_dict[self.from_node] = [0, -1]
for index1, node1 in enumerate(self.node_map[self.from_node]):
if node1:
self.collected_node_dict[index1] = [node1, self.from_node]
self._foreach_dijkstra()
def _foreach_dijkstra(self):
if len(self.used_node_list) == self.node_length - 1:
return
temp_dic=self.collected_node_dict.copy()
for key, val in temp_dic.items(): # 遍历已有权值节点
if key not in self.used_node_list and key != self.to_node:
self.used_node_list.append(key)
else:
continue
for index1, node1 in enumerate(self.node_map[key]): # 对节点进行遍历
# 如果节点在权值节点中并且权值大于新权值
if node1 and index1 in self.collected_node_dict and self.collected_node_dict[index1][0] > node1 + val[0]:
self.collected_node_dict[index1][0] = node1 + val[0] # 更新权值
self.collected_node_dict[index1][1] = key
elif node1 and index1 not in self.collected_node_dict:
self.collected_node_dict[index1] = [node1 + val[0], key]
self._foreach_dijkstra()
def _format_path(self):
node_list = []
temp_node = self.to_node
node_list.append((temp_node, self.collected_node_dict[temp_node][0]))
while self.collected_node_dict[temp_node][1] != -1:
temp_node = self.collected_node_dict[temp_node][1]
node_list.append((temp_node, self.collected_node_dict[temp_node][0]))
node_list.reverse()
return node_list
class solve:
def __init__(self,car_txt,cross_txt,road_txt):
self.car=pd.read_csv(car_txt,sep=',|\#\(|\(|\)',engine='python')
self.cross=pd.read_csv(cross_txt,sep=',|\#\(|\(|\)',engine='python')
self.road=pd.read_csv(road_txt,sep=',|\#\(|\(|\)',engine='python')
# self.car = self.processFirstLine(car_txt)
# self.cross = self.processFirstLine(cross_txt)
# self.road = self.processFirstLine(road_txt)
self.node=self.cross['id'].tolist()
self.node_map_dic={}
self.node_map = [[0 for val in range(len(self.node))] for val in range(len(self.node))]
def processFirstLine(self,filepath):
with open(filepath,'r') as f_r:
data = f_r.read()
data = data.replace('(','')
data = data.replace(')','')
data = data.replace('#','')
return pd.read_csv(StringIO(data))
def grt_map(self,one_car):
#speed=onecar['speed']
def speed(speed,lim_spd,length):
return length/min(speed,lim_spd)
node_list=[]
#print(one_car)
for i in range(len(self.road)):
node_list.append(((self.road.ix[i]['from']),self.road.ix[i]['to'],speed(one_car['speed'],self.road.ix[i]['speed'],self.road.ix[i]['length'])))
if self.road.ix[i]['isDuplex']==1:
node_list.append((self.road.ix[i]['to'],self.road.ix[i]['from'],speed(one_car['speed'],self.road.ix[i]['speed'],self.road.ix[i]['length'])))
return node_list
def get_path(self,one_car):
from_node = self.node.index(one_car['from'])
to_node = self.node.index(one_car['to'])
if (from_node,to_node) in self.node_map_dic.keys():
path=self.node_map_dic[(from_node,to_node)]
#print(one_car)
return path
else:
node=self.node
#print(one_car)
node_list=self.grt_map(one_car)
#print(node,node_list)
def set_node_map(node_map, node, node_list):
for x, y, val in node_list:
node_map[node.index(x)][node.index(y)]= val
set_node_map(self.node_map, node, node_list)
# A -->; D
#print(from_node,to_node,node_map)
#else:
dijkstrapath = DijkstraExtendPath(self.node_map)
path = dijkstrapath(from_node, to_node)
self.node_map_dic[(from_node,to_node)]=path
return path
def get_pathes(self):
return [self.get_path(self.car.ix[i]) for i in range(len(self.car))]
def main_process(car_path,road_path,cross_path,answer_path):
"""
car_path,road_path,cross_path,answer_path
"""
Solve=solve(car_path,cross_path,road_path)#car_txt,cross_txt,road_txt
pathes=Solve.get_pathes()
#(carId,StartTime,RoadId...)
result =[]
car_list = Solve.car['id']
planTime_list = Solve.car['planTime']
# getRoadList = lambda id :Solve.cross.loc[Solve.cross['id']==id].iloc[:,2:6].values.astype(np.int64)[0].tolist()
# cross_road =dict()
# for key,carvalue in enumerate(pathes):
# one_path=[]
# one_path.extend([car_list[key],planTime_list[key]+np.random.randint(0,10000)])
# prevalue = carvalue[0][0]
# for current_value,_time in carvalue[1:]:
# if tuple([prevalue+1,current_value+1]) in cross_road.keys() or tuple([current_value+1,prevalue+1]) in cross_road.keys():
# roadId = cross_road[tuple([prevalue+1,current_value+1])] if tuple([prevalue+1,current_value+1]) in cross_road.keys() else cross_road[tuple([current_value+1,prevalue+1])]
# else:
# roadId = list(set(getRoadList(prevalue+1))&set(getRoadList(current_value+1)))
# cross_road[tuple([prevalue+1,current_value+1])] = roadId
# if -1 in roadId:
# roadId.remove(-1)
# one_path.extend(roadId)
# prevalue = current_value
# result.append(tuple(one_path))
# planTime_list = Solve.car['planTime']
road_cross_id={}
getRoadList = lambda id :Solve.cross.loc[Solve.cross['id']==id].iloc[:,2:6].values.astype(np.int64)[0].tolist()
for key,carvalue in enumerate(pathes):
one_path=[]
one_path.extend([car_list[key],planTime_list[key]+np.random.randint(0,1000)])
prevalue = carvalue[0][0]
for current_value,_time in carvalue[1:]:
if (prevalue+1,current_value+1) in road_cross_id.keys():
roadId=road_cross_id[(prevalue+1,current_value+1)]
else:
roadId = list(set(getRoadList(prevalue+1))&set(getRoadList(current_value+1)))
road_cross_id[(prevalue+1,current_value+1)]=roadId
if -1 in roadId:
roadId.remove(-1)
one_path.extend(roadId)
prevalue = current_value
result.append(tuple(one_path))
with open(answer_path,'w') as f_w:
f_w.write('#(carId,StartTime,RoadId...)\n')
for t in result:
f_w.write('('+ ','.join(str(s) for s in t) +')'+ '\n')
if __name__=='__main__':
main_process('~/Documents/code/competition/2019huawei/2019软挑-初赛-SDK/SDK/SDK_python/CodeCraft-2019/config/car.txt','~/Documents/code/competition/2019huawei/2019软挑-初赛-SDK/SDK/SDK_python/CodeCraft-2019/config/cross.txt','~/Documents/code/competition/2019huawei/2019软挑-初赛-SDK/SDK/SDK_python/CodeCraft-2019/config/road.txt','/home/xi/Documents/code/competition/2019huawei/2019软挑-初赛-SDK/SDK/SDK_python/CodeCraft-2019/src/answer.txt')
# Solve=solve('../config/car.txt', '../config/road.txt',' ../config/cross.txt',' ../config/answer.txt')
# pathes=Solve.get_pathes()
# #(carId,StartTime,RoadId...)
# result =[]
# car_list = Solve.car['id']
# planTime_list = Solve.car['planTime']
# getRoadList = lambda id :Solve.cross.loc[Solve.cross['id']==id].iloc[:,2:6].values.astype(np.int64)[0].tolist()
# for key,carvalue in enumerate(pathes):
# one_path=[]
# one_path.extend([car_list[key],planTime_list[key]])
# prevalue = carvalue[0][0]
# for current_value,_time in carvalue[1:]:
# roadId = list(set(getRoadList(prevalue+1))&set(getRoadList(current_value+1)))
# if -1 in roadId:
# roadId.remove(-1)
# one_path.extend(roadId)
# prevalue = current_value
# result.append(tuple(one_path))
# with open('answer.txt','w') as f_w:
# f_w.write('#(carId,StartTime,RoadId...)\n')
# for t in result:
# f_w.write('('+ ','.join(str(s) for s in t) +')'+ '\n')
| [
"2086774733@qq.com"
] | 2086774733@qq.com |
d76c92d2e6fa470195d605267db18d17b85826d6 | f16341f66d467cf21e4ff9826e508267770080b3 | /tasks.py | 84e3e3a5293ca05fcb8a18e0f06729a282f1d4c0 | [] | no_license | ianonavy/celery-redis-example | 37b27ebbe43ae63cf6bafbd2309a1d15a68439ad | 6bf873ffa81048cee5fa89c22baff504886dbaf2 | refs/heads/master | 2020-03-11T15:22:32.768660 | 2018-04-18T15:21:04 | 2018-04-18T15:21:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | import celery
celery_app = celery.Celery()
celery_app.conf.broker_url = 'redis://celery_redis:6379/0'
celery_app.conf.result_backend = 'redis://celery_redis:6379/1'
@celery_app.task
def add(a, b):
print("Starting task 1")
import time; time.sleep(5)
print("Finished task 1")
return a + b
@celery_app.task
def step2(total):
import time; time.sleep(5)
return -total
| [
"ian@everquote.com"
] | ian@everquote.com |
74c1f4bbb34d65beac68174e7c7ab0e18c3f36e6 | f63c4eb29ce57319441f5469d1d049b63bc220de | /swu_cycle_variance/run333.py | 96b5bc1ea46f182e5b80cfcab1a1271e253d1f12 | [] | no_license | a-co/diversion_models | 0237642153668b16035699e9e734ff0538568582 | 69eed2687b1cd2b48f5717d15919eccd24a0eabc | refs/heads/main | 2023-05-02T19:04:26.333677 | 2020-06-18T20:50:18 | 2020-06-18T20:50:18 | 216,904,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,361 | py | SIMULATION = {'simulation': {'agent': [{'name': 'deployer_military', 'prototype': 'military_deployer'}, {'name': 'deployer_civilian', 'prototype': 'civilian_deployer'}, {'name': 'deployer_shared', 'prototype': 'shared_deployer'}], 'archetypes': {'spec': [{'lib': 'cycamore', 'name': 'DeployInst'}, {'lib': 'cycamore', 'name': 'Source'}, {'lib': 'cycamore', 'name': 'Sink'}, {'lib': 'cycamore', 'name': 'Storage'}, {'lib': 'cycamore', 'name': 'Reactor'}, {'lib': 'cycamore', 'name': 'Separations'}, {'lib': 'cycamore', 'name': 'Enrichment'}]}, 'control': {'duration': '144', 'explicit_inventory': 'true', 'startmonth': '1', 'startyear': '2020'}, 'prototype': [{'config': {'Source': {'inventory_size': '1e30', 'outcommod': 'u_ore', 'outrecipe': 'r_u_ore', 'throughput': '1e10'}}, 'name': 'mine'}, {'config': {'Separations': {'feed_commod_prefs': {'val': ['1.0', '10.0', '100.0']}, 'feed_commods': {'val': ['u_ore', 'u_ore1', 'u_ore2']}, 'feedbuf_size': '2e8', 'leftover_commod': 'waste', 'streams': {'item': {'commod': 'u_nat', 'info': {'buf_size': '150000', 'efficiencies': {'item': [{'comp': 'U', 'eff': '.99'}, {'comp': 'O', 'eff': '.99'}]}}}}, 'throughput': '2e8'}}, 'name': 'milling'}, {'config': {'Separations': {'feed_commod_prefs': {'val': '1.0'}, 'feed_commods': {'val': 'u_nat'}, 'feedbuf_size': '200000', 'leftover_commod': 'waste', 'streams': {'item': {'commod': 'uf6', 'info': {'buf_size': '200000', 'efficiencies': {'item': {'comp': 'U', 'eff': '.99'}}}}}, 'throughput': '200000'}}, 'name': 'conversion'}, {'config': {'Enrichment': {'feed_commod_prefs': {'val': '1'}, 'feed_commods': {'val': 'uf6'}, 'feed_recipe': 'r_uox', 'max_feed_inventory': '20000', 'product_commod': 'mil_fiss', 'swu_capacity': '17314.83583510541', 'tails_assay': '0.003', 'tails_commod': 'mil_u_dep'}}, 'name': 'mil_enrichment'}, {'config': {'Storage': {'in_commods': {'val': 'mil_u_dep'}, 'out_commods': {'val': 'mil_u_dep_str'}, 'residence_time': '0'}}, 'name': 'mil_str_u_dep'}, {'config': {'Storage': {'in_commod_prefs': {'val': '1'}, 'in_commods': {'val': 'uf6'}, 'in_recipe': 'r_mil_uox', 'max_inv_size': '30000', 'out_commods': {'val': 'mil_uox'}, 'residence_time': '0'}}, 'name': 'mil_uox_fabrication'}, {'config': {'Reactor': {'assem_size': '14000', 'cycle_time': '-6', 'fuel_incommods': {'val': 'mil_uox'}, 'fuel_inrecipes': {'val': 'r_mil_uox'}, 'fuel_outcommods': {'val': 'mil_uox_spent'}, 'fuel_outrecipes': {'val': 'r_mil_uox_spent'}, 'fuel_prefs': {'val': '1'}, 'n_assem_batch': '1', 'n_assem_core': '1', 'power_cap': '0.15', 'refuel_time': '0'}}, 'lifetime': '960', 'name': 'mil_lwr'}, {'config': {'Storage': {'in_commods': {'val': 'mil_mox_spent'}, 'out_commods': {'val': 'mil_mox_spent_str'}, 'residence_time': '60'}}, 'name': 'mil_str_mox_spent'}, {'config': {'Separations': {'feed_commod_prefs': {'val': '1.0'}, 'feed_commods': {'val': 'mil_uox_spent'}, 'feedbuf_size': '30000000000', 'leftover_commod': 'waste', 'streams': {'item': {'commod': 'mil_fiss', 'info': {'buf_size': '3000000000', 'efficiencies': {'item': {'comp': 'Pu', 'eff': '.95'}}}}}, 'throughput': '1e100'}}, 'name': 'reprocessing'}, {'config': {'Storage': {'in_commod_prefs': {'val': '10'}, 'in_commods': {'val': 'mil_fiss'}, 'in_recipe': 'r_mil_heu', 'max_inv_size': '1e100', 'out_commods': {'val': 'mil_heu'}, 'residence_time': '0'}}, 'name': 'mil_str_fiss'}, {'config': {'Enrichment': {'feed_commod_prefs': {'val': ['1', '20']}, 'feed_commods': {'val': ['uf6', 'mil_uf6']}, 'feed_recipe': 'r_natl_u', 'max_feed_inventory': '100000', 'product_commod': 'civ_leu', 'swu_capacity': '35000', 'tails_assay': '0.003', 'tails_commod': 'u_dep'}}, 'name': 'civ_enrichment'}, {'config': {'Storage': {'in_commods': {'val': 'u_dep'}, 'out_commods': {'val': 'u_dep_str'}, 'residence_time': '0'}}, 'name': 'civ_str_u_dep'}, {'config': {'Storage': {'in_commod_prefs': {'val': '1000'}, 'in_commods': {'val': 'civ_leu'}, 'in_recipe': 'r_uox', 'max_inv_size': '30000', 'out_commods': {'val': 'uox'}, 'residence_time': '1'}}, 'name': 'civ_fabrication'}, {'config': {'Reactor': {'assem_size': '29565', 'cycle_time': '18', 'fuel_incommods': {'val': 'uox'}, 'fuel_inrecipes': {'val': 'r_uox'}, 'fuel_outcommods': {'val': 'uox_spent'}, 'fuel_outrecipes': {'val': 'r_uox_spent'}, 'n_assem_batch': '1', 'n_assem_core': '3', 'power_cap': '900', 'refuel_time': '0'}}, 'lifetime': '960', 'name': 'civ_lwr'}, {'config': {'Storage': {'in_commods': {'val': 'uox_spent'}, 'out_commods': {'val': 'uox_spent_str'}, 'residence_time': '60'}}, 'name': 'civ_str_uox_spent'}, {'config': {'DeployInst': {'build_times': {'val': ['37', '37', '61', '73']}, 'n_build': {'val': ['1', '1', '1', '1']}, 'prototypes': {'val': ['mil_enrichment', 'mil_str_u_dep', 'mil_uox_fabrication', 'mil_str_fiss']}}}, 'name': 'military_deployer'}, {'config': {'DeployInst': {'build_times': {'val': ['121', '121', '121', '145', '157', '169']}, 'n_build': {'val': ['1', '1', '1', '1', '1', '1']}, 'prototypes': {'val': ['civ_enrichment', 'civ_str_u_dep', 'civ_fabrication', 'civ_lwr', 'civ_str_uox_spent', 'civ_lwr']}}}, 'name': 'civilian_deployer'}, {'config': {'DeployInst': {'build_times': {'val': ['1', '1', '1']}, 'n_build': {'val': ['1', '1', '1']}, 'prototypes': {'val': ['mine', 'milling', 'conversion']}}}, 'name': 'shared_deployer'}], 'recipe': [{'basis': 'mass', 'name': 'r_u_ore', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9929', 'id': '922380000'}, {'comp': '999', 'id': '120240000'}]}, {'basis': 'mass', 'name': 'r_natl_u', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9929', 'id': '922380000'}]}, {'basis': 'mass', 'name': 'r_uox', 'nuclide': [{'comp': '0.05', 'id': '922350000'}, {'comp': '0.95', 'id': '922380000'}]}, {'basis': 'mass', 'name': 'r_uox_spent', 'nuclide': [{'comp': '0.01', 'id': '922350000'}, {'comp': '0.94', 'id': '922380000'}, {'comp': '0.01', 'id': '942390000'}, {'comp': '0.001', 'id': '952410000'}, {'comp': '0.03', 'id': '551350000'}]}, {'basis': 'mass', 'name': 'r_mil_uox', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9929', 'id': '922380000'}]}, {'basis': 'mass', 'name': 'r_mil_uox_spent', 'nuclide': [{'comp': '0.0071', 'id': '922350000'}, {'comp': '0.9919', 'id': '922380000'}, {'comp': '0.001', 'id': '942390000'}]}, {'basis': 'mass', 'name': 'r_mil_heu', 'nuclide': [{'comp': '0.90', 'id': '922350000'}, {'comp': '0.10', 'id': '922380000'}]}]}} | [
"acaldwel@wellesley.edu"
] | acaldwel@wellesley.edu |
8d1e038e0f78317b6d28ea2cc9af6ca9ec4a162f | d61420b44407ada9cf9dd469621f57e945c86f96 | /ubuntu18-helper-scripts-from-gitlab/helper-scripts-master/speedTestDataScripts/networkDataGatherer.py | 60aed7034084bca575aeac41f100b23312c605d5 | [] | no_license | lowkeyop/helper-scripts | 892ed8dd0722ce958e86233411c84e0f3204d6b9 | 046026b89a2c7a93873dd6d4ea8fa2f82597c402 | refs/heads/master | 2021-10-20T15:44:55.163679 | 2021-10-17T19:18:45 | 2021-10-17T19:18:45 | 112,807,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | import subprocess
import os #this is used to concatenate file path
import subprocess #to envoke the speedtest-cli command
import time #get the timestamp for the file
timeStamp=time.strftime("%m-%d-%Y")
baseDir="/share/data/networkData/"
fileName = "speedTest" + timeStamp + ".json"
filePath = baseDir + fileName
print "Getting speed test data"
if not os.path.exists(baseDir):
print "creating missing directory to store data"
os.makedirs(baseDir)
cmd = ["speedtest-cli", "--json", "--share"]
p = subprocess.Popen( cmd, stdout=subprocess.PIPE).communicate()[0]
out = p
f = open( filePath, 'a+')
f.write(p)
f.close()
print out
| [
"ckennerly9@gmail.com"
] | ckennerly9@gmail.com |
854b2d6ef9b6a28a86fbd279304f53ccb47bc58f | 428a163914797f736f6010a7122c713ec808735d | /main/foodcourt/migrations/0007_auto_20210709_1314.py | ba90b79de3952ae62ae20cbe755c3abee7e03a2e | [] | no_license | jasshanda1/foodcourt | 04f4cd18900fdfcf8a2adaeea0c48cb10b61e5c9 | 230dd5a43a2eafde13ad92a0e1790d6d82689ca6 | refs/heads/main | 2023-06-20T01:01:43.737378 | 2021-07-13T17:32:55 | 2021-07-13T17:32:55 | 384,517,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | # Generated by Django 3.2.5 on 2021-07-09 13:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('foodcourt', '0006_alter_order_name'),
]
operations = [
migrations.AddField(
model_name='order',
name='username',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='foodcourt.user'),
),
migrations.AlterField(
model_name='order',
name='name',
field=models.CharField(max_length=100),
),
]
| [
"jaskaranhanda3099@gmail.com"
] | jaskaranhanda3099@gmail.com |
7935285b6302c1b7277f4c9d4939535c9636fe0d | 159da3fc63ccf20b80dc17bb44b53e9a5578bcfd | /arkav_is_api/arkavauth/migrations/0005_refactor_auth.py | f92058d0d724b71e6dab65b111c6f5e8e2a4a7d7 | [
"MIT"
] | permissive | arkavidia5/arkav-is | 4338829e7c0a9446393545316e46395e9df111fd | 6c6e8d091ead5bfff664d86f7903c62209800031 | refs/heads/master | 2021-07-16T03:49:15.900812 | 2019-02-08T18:08:32 | 2019-02-08T18:08:32 | 149,406,261 | 3 | 2 | MIT | 2018-11-09T16:49:17 | 2018-09-19T06:58:16 | Python | UTF-8 | Python | false | false | 2,458 | py | import arkav_is_api.arkavauth.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('arkavauth', '0004_user_email_confirmed'),
]
operations = [
migrations.RenameModel('EmailConfirmationAttempt', 'RegistrationConfirmationAttempt'),
migrations.RenameModel('PasswordResetAttempt', 'PasswordResetConfirmationAttempt'),
migrations.RenameField(
model_name='user',
old_name='email_confirmed',
new_name='is_email_confirmed',
),
migrations.RenameField(
model_name='passwordresetconfirmationattempt',
old_name='used',
new_name='is_confirmed',
),
migrations.RenameField(
model_name='registrationconfirmationattempt',
old_name='confirmed',
new_name='is_confirmed',
),
migrations.AddField(
model_name='passwordresetconfirmationattempt',
name='email_last_sent_at',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='registrationconfirmationattempt',
name='email_last_sent_at',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='passwordresetconfirmationattempt',
name='token',
field=models.CharField(default=arkav_is_api.arkavauth.models.generate_email_confirmation_token, max_length=30),
),
migrations.AlterField(
model_name='passwordresetconfirmationattempt',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='password_reset_confirmation_attempt', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='registrationconfirmationattempt',
name='token',
field=models.CharField(default=arkav_is_api.arkavauth.models.generate_email_confirmation_token, max_length=30),
),
migrations.AlterField(
model_name='registrationconfirmationattempt',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='registration_confirmation_attempt', to=settings.AUTH_USER_MODEL),
),
]
| [
"jonathan.christopher@outlook.com"
] | jonathan.christopher@outlook.com |
15b02c9afb099bd7fd8c150df2fecd2731735b0c | 05513b528119d92863c092bfa836508998cb92be | /theory/truthtable.py | c6ce2563be6658234daef0a719114bde4e0764d0 | [] | no_license | lola-lambda/Sprint-Challenge--Hash-Theory | 62e10c458beb52cf870cc79643435693dd27775a | 812d3705414bcdd4ef692b2c0445203c940493fb | refs/heads/master | 2020-05-15T06:45:43.604726 | 2019-04-01T21:25:34 | 2019-04-01T21:25:34 | 182,129,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | # !(A || B) || ( (A || C) && !(B || !C) )
# not (A or B) or ((A or C) and not (B or not C))
for A in [False, True]:
for B in [False, True]:
for C in [False, True]:
print(f"A: {A} B: {B} C: {C} ---- {not (A or B) or ((A or C) and not (B or not C))}") | [
"lolahef@gmail.com"
] | lolahef@gmail.com |
d57bafa6b041e14b363221f5424fcc938e2a081a | 4d21da5a3d07f4d05b997e80119cd79692ac0d25 | /Leetcode/201-300/259. 3Sum Smaller.py | fc6828a904244248c20e44b9f93c23460bea2b66 | [] | no_license | ErinC123/Algorithm | 92b2789ec3b36c49f9e65f2e7a702bb4b732e8ba | 4544fee91e811a6625000921c32ad054df550f1e | refs/heads/master | 2021-06-17T14:03:33.955233 | 2017-06-18T21:20:55 | 2017-06-18T21:20:55 | 75,894,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | class Solution(object):
def threeSumSmaller(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
nums.sort()
ret = 0
for i in range(len(nums)):
j, k = i+1, len(nums)-1
while j < k:
if nums[i]+nums[j]+nums[k] < target:
ret += k-j
j += 1
else:
k -= 1
return ret | [
"zhencao93@gmail.com"
] | zhencao93@gmail.com |
bd953c2791a25f282deb6ac1ef7635111c439e37 | 9109eac9b0907a9e586129b49fd1d84d6bb9dc16 | /baseline.py | fe540177edd6f2f524b8612d32c2f82abbe44a8d | [] | no_license | mjp92067/text-generation | 5d34edcb9b1e4a7039fc3916695eefacf1d84424 | 20000eb95d776cc17dba607c983be37f38264a6c | refs/heads/master | 2023-05-24T08:41:13.504014 | 2019-07-05T18:02:52 | 2019-07-05T18:02:52 | 195,446,016 | 0 | 0 | null | 2023-05-22T22:16:31 | 2019-07-05T17:28:42 | Python | UTF-8 | Python | false | false | 2,244 | py | import os
import json
import fire
import numpy as np
from scipy import sparse
from sklearn.model_selection import PredefinedSplit, GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
def _load_split(data_dir, source, split, n=np.inf):
path = os.path.join(data_dir, f'{source}.{split}.jsonl')
texts = []
for i, line in enumerate(open(path)):
if i >= n:
break
texts.append(json.loads(line)['text'])
return texts
def load_split(data_dir, source, split, n=np.inf):
webtext = _load_split(data_dir, 'webtext', split, n=n//2)
gen = _load_split(data_dir, source, split, n=n//2)
texts = webtext+gen
labels = [0]*len(webtext)+[1]*len(gen)
return texts, labels
def main(data_dir, log_dir, source='xl-1542M-k40', n_train=500000, n_valid=10000, n_jobs=None, verbose=False):
train_texts, train_labels = load_split(data_dir, source, 'train', n=n_train)
valid_texts, valid_labels = load_split(data_dir, source, 'valid', n=n_valid)
test_texts, test_labels = load_split(data_dir, source, 'test')
vect = TfidfVectorizer(ngram_range=(1, 2), min_df=5, max_features=2**21)
train_features = vect.fit_transform(train_texts)
valid_features = vect.transform(valid_texts)
test_features = vect.transform(test_texts)
model = LogisticRegression(solver='liblinear')
params = {'C': [1/64, 1/32, 1/16, 1/8, 1/4, 1/2, 1, 2, 4, 8, 16, 32, 64]}
split = PredefinedSplit([-1]*n_train+[0]*n_valid)
search = GridSearchCV(model, params, cv=split, n_jobs=n_jobs, verbose=verbose, refit=False)
search.fit(sparse.vstack([train_features, valid_features]), train_labels+valid_labels)
model = model.set_params(**search.best_params_)
model.fit(train_features, train_labels)
valid_accuracy = model.score(valid_features, valid_labels)*100.
test_accuracy = model.score(test_features, test_labels)*100.
data = {
'source':source,
'n_train':n_train,
'valid_accuracy':valid_accuracy,
'test_accuracy':test_accuracy
}
print(data)
json.dump(data, open(os.path.join(log_dir, f'{source}.json'), 'w'))
if __name__ == '__main__':
fire.Fire(main) | [
"woomniazhko@gmail.com"
] | woomniazhko@gmail.com |
78532231f0563364803c38e1461c1364b169d31a | e25dc1f801ff9ae8d2baaffc9c726339437a7ff8 | /dbms_submissions/dbms_assignment_004/query.py | 92ac497b519520830a1f12d678bca3e0f6a3152e | [] | no_license | naresh508/dbms | e724636fb26f493b1e61eeaf5182e5c98dd3c0e7 | d0733743137a86ca0c1101a7ae6527e39b12d393 | refs/heads/master | 2022-08-24T07:34:00.634978 | 2020-05-27T11:56:45 | 2020-05-27T11:56:45 | 267,305,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | Q1="SELECT COUNT(id) from Movie WHERE year=2002 AND name LIKE 'ha%' AND rank>2;"
Q2="SELECT MAX(rank) from Movie WHERE name LIKE 'Autom%' AND (year=1983 OR year=1994);"
Q3="SELECT COUNT(id) from Actor WHERE gender='M' AND (fname LIKE '%ei' OR lname LIKE 'ei%');"
Q4="SELECT AVG(rank) from movie WHERE (year=1993 OR year=1995 OR year=2000) AND rank>=4.2;"
Q5="SELECT SUM(rank) from movie WHERE name LIKE '%HAry%' AND (year BETWEEN 1981 AND 1984) AND rank<9;"
Q6="SELECT MIN(year) from Movie WHERE rank=5;"
Q7="SELECT COUNT(id) from ACTOR WHERE gender='F' AND fname==lname;"
Q8="SELECT DISTINCT fname from Actor WHERE lname LIKE '%ei' ORDER BY fname ASC LIMIT 100;"
Q9="SELECT id,name AS movie_title,year from Movie WHERE year IN(2001,2002,2005,2006) LIMIT 25 ;"
Q10="SELECT DISTINCT lname from Director WHERE fname IN('Yeud','Wolf','Vicky') ORDER BY lname ASC LIMIT 5;" | [
"ec2-user@ip-172-31-22-170.ap-southeast-1.compute.internal"
] | ec2-user@ip-172-31-22-170.ap-southeast-1.compute.internal |
4638fdbdc7160647895db0fde527788b32829a33 | bc95e6b5d5edbde9cdb9ea06cc51f2a171eb1939 | /Ejercicios_Lambda/ejercicios3.py | 00ebf77d94b422f7d5ade7993eee360c19c05cdf | [] | no_license | Pradas137/Python | 18e0418c735765da5964319cbe85bed9ac9cce6b | ccd3624bf95ca2379593ef7c9085abd122b5dd37 | refs/heads/main | 2023-03-12T01:54:24.463332 | 2021-03-02T20:00:18 | 2021-03-02T20:00:18 | 338,836,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | """
Ordenar una llista de strings per la segona lletra de cada string, utilitzant el mètode sort/sorted i la funció lambda:
Exemple:
mesos = ['gener', 'març', 'abril', 'juny', 'agost']
SOLUCIO:
mesos = ['març', 'abril', 'gener', 'agost', 'juny']
"""
mesos = ['gener', 'març', 'abril', 'juny', 'agost']
ordenar = sorted(mesos, key=lambda segundaletra: segundaletra[1])
print(ordenar)
| [
"pradas137@gmail.com"
] | pradas137@gmail.com |
c6866ffcb6663df60970fd0041ee61d604f921a5 | 930c207e245c320b108e9699bbbb036260a36d6a | /BRICK-RDFAlchemy/generatedCode/brick/brickschema/org/schema/_1_0_2/Brick/Differential_Pressure_Load_Shed_Status.py | 4f44b20dc9c3108fb3505cc8f10804105b148f22 | [] | no_license | InnovationSE/BRICK-Generated-By-OLGA | 24d278f543471e1ce622f5f45d9e305790181fff | 7874dfa450a8a2b6a6f9927c0f91f9c7d2abd4d2 | refs/heads/master | 2021-07-01T14:13:11.302860 | 2017-09-21T12:44:17 | 2017-09-21T12:44:17 | 104,251,784 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Load_Shed_Status import Load_Shed_Status
class Differential_Pressure_Load_Shed_Status(Load_Shed_Status):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Differential_Pressure_Load_Shed_Status
| [
"Andre.Ponnouradjane@non.schneider-electric.com"
] | Andre.Ponnouradjane@non.schneider-electric.com |
32deaed41a4e6581445f42876563cf802299ebe7 | da7149f3182d2421e046828d30fc1c91c14d496d | /chapter16/coro_exc_demo.py | c9aa826582f4e4609e85504e132a7eb87f93559b | [] | no_license | tangkaiyang/fluent_python | 6db2825cfadccb70a886cb822026d69be4b03cc9 | 5f07072d8db5ddf43bfe913b3262b325a8f1ad35 | refs/heads/master | 2020-05-02T20:21:00.404872 | 2019-04-18T02:35:55 | 2019-04-18T02:35:55 | 178,188,495 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,724 | py | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/4/12 16:18
# @Author : tangky
# @Site :
# @File : coro_exc_demo.py
# @Software : PyCharm
# 示例16-8 coro_exc_demo.py:学习在协程中处理异常的测试代码
class DemoException(Exception):
"""为这次演示定义的异常类型"""
def demo_exc_handling():
print('-> coroutine started')
while True:
try:
x = yield
except DemoException: # 特别处理DemoException异常
print('*** DemoExceptiion handled. Continuing...')
else: # 如果没有异常,那么显示接受到的值
print('-> coroutine received: {!r}'.format(x))
raise RuntimeError('This line should never run.') # 这一行永远不会执行
# 示例16-8 中的最后一行代码不会执行,因为只有未处理的异常才会中止那个无限循环,而一旦出现未处理的异常,协程会立即终止
# 示例16-9 激活和关闭demo_exc_handling,没有异常
exc_coro = demo_exc_handling()
next(exc_coro)
exc_coro.send(11)
exc_coro.send(22)
exc_coro.close()
from inspect import getgeneratorstate
print(getgeneratorstate(exc_coro))
# 示例16-10 把DemoException异常传入demo_exc_handling不会导致协程中止
exc_coro = demo_exc_handling()
next(exc_coro)
exc_coro.send(11)
exc_coro.throw(DemoException)
print(getgeneratorstate(exc_coro))
# 如果传入协程的异常没有处理,协程会停止,即状态变成'GEN_CLOSED'
# 示例16-11 如果无法处理传入的异常,协程会终止
exc_coro = demo_exc_handling()
next(exc_coro)
exc_coro.send(11)
try:
exc_coro.throw(ZeroDivisionError)
except Exception:
print(ZeroDivisionError)
print(getgeneratorstate(exc_coro))
| [
"945541696@qq.com"
] | 945541696@qq.com |
9444d9f3d6dc9cbef1fc27bece5d2b1d18411707 | dbc543e60bea327f32fd1549fe0ccf090e82952f | /tests/drawing_test.py | 521fe7e0b96366a27ce6252c42ae07cc4da572da | [
"MIT"
] | permissive | kcdbaba/consoledraw | 22dc772e4f98057652c7a3ef4806ac77ba397918 | bb5524743d7980ab877b7ed5d7d3677300984d42 | refs/heads/master | 2021-08-24T02:07:29.309627 | 2017-12-07T15:37:44 | 2017-12-07T15:37:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,023 | py | from drawing import *
import unittest
class TestDrawing(unittest.TestCase):
@classmethod
def setup_class(cls):
cls.C = DEFAULT_COLOUR
cls.E = DEFAULT_EMPTY
@classmethod
def teardown_class(cls):
del cls.C
def setUp(self):
self.canvas_5_5 = canvas(5, 5)
def tearDown(self):
del self.canvas_5_5
def assert_helper(self, actual, expected):
"""
switches argument order from intuitive one to nose's canonical form
"""
self.assertEqual(expected, actual)
def assert_helper_pretty(self, actual, expected):
"""
builds from expected pattern into canonical form:
- list of lists of colours
- with correct empty and default string colour
and applies assertEquals against actual
"""
expected = [list(x.strip("|"). replace(" ", self.E).replace("x", self.C)) for x in expected]
self.assertEqual(expected, actual)
def test_canvas_1_1(self):
self.assert_helper(canvas(1, 1), [[self.E]])
def test_canvas_5_5(self):
self.assert_helper(canvas(5, 5), [[self.E] * 5] * 5)
def test_point_top_left(self):
point(self.canvas_5_5, 1, 1)
expected = ["|x |",
"| |",
"| |",
"| |",
"| |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_point_top_right(self):
point(self.canvas_5_5, 5, 1)
expected = ["| x|",
"| |",
"| |",
"| |",
"| |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_point_bottom_left(self):
point(self.canvas_5_5, 1, 5)
expected = ["| |",
"| |",
"| |",
"| |",
"|x |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_point_bottom_right(self):
point(self.canvas_5_5, 5, 5)
expected = ["| |",
"| |",
"| |",
"| |",
"| x|"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_point_center(self):
point(self.canvas_5_5, 3, 3)
expected = ["| |",
"| |",
"| x |",
"| |",
"| |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_point_external(self):
point(self.canvas_5_5, 6, 1)
self.assert_helper(self.canvas_5_5, [[self.E] * 5] * 5)
def test_not_vertical_and_not_horizontal_line(self):
with self.assertRaises(LineTypeError):
line(self.canvas_5_5, 1, 1, 2, 2)
def test_line_upper_border(self):
line(self.canvas_5_5, 1, 1, 5, 1)
expected = ["|xxxxx|",
"| |",
"| |",
"| |",
"| |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_line_lower_border(self):
line(self.canvas_5_5, 1, 5, 5, 5)
expected = ["| |",
"| |",
"| |",
"| |",
"|xxxxx|"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_line_left_border(self):
line(self.canvas_5_5, 1, 1, 1, 5)
expected = ["|x |",
"|x |",
"|x |",
"|x |",
"|x |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_line_right_border(self):
line(self.canvas_5_5, 5, 1, 5, 5)
expected = ["| x|",
"| x|",
"| x|",
"| x|",
"| x|"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_line_horizontal_row_3(self):
line(self.canvas_5_5, 2, 3, 4, 3)
expected = ["| |",
"| |",
"| xxx |",
"| |",
"| |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_line_vertical_col_3(self):
line(self.canvas_5_5, 3, 2, 3, 4)
expected = ["| |",
"| x |",
"| x |",
"| x |",
"| |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_line_horizontal_external(self):
line(self.canvas_5_5, 6, 2, 6, 4)
self.assert_helper(self.canvas_5_5, [[self.E] * 5] * 5)
def test_line_vertical_external(self):
line(self.canvas_5_5, 2, 6, 4, 6)
self.assert_helper(self.canvas_5_5, [[self.E] * 5] * 5)
def test_line_horizontal_partial_external_right(self):
line(self.canvas_5_5, 2, 5, 6, 5)
expected = ["| |",
"| |",
"| |",
"| |",
"| xxxx|"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_line_horizontal_partial_external_left(self):
line(self.canvas_5_5, 0, 5, 4, 5)
expected = ["| |",
"| |",
"| |",
"| |",
"|xxxx |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_line_horizontal_partial_external_left_right(self):
line(self.canvas_5_5, 0, 5, 6, 5)
expected = ["| |",
"| |",
"| |",
"| |",
"|xxxxx|"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_line_vertical_partial_external_top(self):
line(self.canvas_5_5, 5, 0, 5, 4)
expected = ["| x|",
"| x|",
"| x|",
"| x|",
"| |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_line_vertical_partial_external_bottom(self):
line(self.canvas_5_5, 5, 2, 5, 6)
expected = ["| |",
"| x|",
"| x|",
"| x|",
"| x|"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_line_vertical_partial_external_top_bottom(self):
line(self.canvas_5_5, 5, 0, 5, 6)
expected = ["| x|",
"| x|",
"| x|",
"| x|",
"| x|"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_not_ordered_coordinates_rectangle(self):
with self.assertRaises(RectangleTypeError):
rectangle(self.canvas_5_5, 2, 2, 1, 1)
def test_rectangle_canvas_border(self):
rectangle(self.canvas_5_5, 1, 1, 5, 5)
expected = ["|xxxxx|",
"|x x|",
"|x x|",
"|x x|",
"|xxxxx|"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_rectangle_internal(self):
rectangle(self.canvas_5_5, 2, 2, 4, 4)
expected = ["| |",
"| xxx |",
"| x x |",
"| xxx |",
"| |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_rectangle_fill_internal(self):
rectangle(self.canvas_5_5, 2, 2, 4, 4, self.C, True, "y")
expected = ["| |",
"| xxx |",
"| xyx |",
"| xxx |",
"| |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_rectangle_external(self):
rectangle(self.canvas_5_5, 6, 1, 7, 2)
self.assert_helper(self.canvas_5_5, [[self.E] * 5] * 5)
def test_rectangle_external_surrounding(self):
rectangle(self.canvas_5_5, 0, 0, 6, 6)
self.assert_helper(self.canvas_5_5, [[self.E] * 5] * 5)
def test_rectangle_fill_external(self):
rectangle(self.canvas_5_5, 6, 1, 7, 2, self.C, True, "y")
self.assert_helper(self.canvas_5_5, [[self.E] * 5] * 5)
def test_rectangle_fill_external_surrounding(self):
rectangle(self.canvas_5_5, 0, 0, 6, 6, self.C, True, "y")
self.assert_helper(self.canvas_5_5, [["y"] * 5] * 5)
def test_rectangle_partial_external_top_left(self):
rectangle(self.canvas_5_5, 0, 0, 4, 2)
expected = ["| x |",
"|xxxx |",
"| |",
"| |",
"| |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_rectangle_partial_external_top_right(self):
rectangle(self.canvas_5_5, 2, 0, 6, 2)
expected = ["| x |",
"| xxxx|",
"| |",
"| |",
"| |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_rectangle_partial_external_bottom_right(self):
rectangle(self.canvas_5_5, 3, 4, 6, 6)
expected = ["| |",
"| |",
"| |",
"| xxx|",
"| x |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_rectangle_partial_external_bottom_left(self):
rectangle(self.canvas_5_5, 0, 4, 3, 6)
expected = ["| |",
"| |",
"| |",
"|xxx |",
"| x |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_bucket_rectangle_fill_size_1_1(self):
rectangle(self.canvas_5_5, 2, 2, 4, 4)
bucket_fill(self.canvas_5_5, 3, 3, ".")
expected = ["| |",
"| xxx |",
"| x.x |",
"| xxx |",
"| |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_bucket_rectangle_fill_size_1_2(self):
rectangle(self.canvas_5_5, 2, 2, 4, 5)
bucket_fill(self.canvas_5_5, 3, 3, ".")
expected = ["| |",
"| xxx |",
"| x.x |",
"| x.x |",
"| xxx |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_bucket_rectangle_fill_size_1_2_other_point(self):
rectangle(self.canvas_5_5, 2, 2, 4, 5)
bucket_fill(self.canvas_5_5, 3, 4, ".")
expected = ["| |",
"| xxx |",
"| x.x |",
"| x.x |",
"| xxx |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_bucket_rectangle_fill_size_2_1(self):
rectangle(self.canvas_5_5, 2, 2, 5, 4)
bucket_fill(self.canvas_5_5, 3, 3, ".")
expected = ["| |",
"| xxxx|",
"| x..x|",
"| xxxx|",
"| |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_bucket_rectangle_fill_size_2_1_other_point(self):
rectangle(self.canvas_5_5, 2, 2, 5, 4)
bucket_fill(self.canvas_5_5, 4, 3, ".")
expected = ["| |",
"| xxxx|",
"| x..x|",
"| xxxx|",
"| |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_bucket_rectangle_fill_size_3_3(self):
rectangle(self.canvas_5_5, 1, 1, 5, 5)
bucket_fill(self.canvas_5_5, 3, 3, ".")
expected = ["|xxxxx|",
"|x...x|",
"|x...x|",
"|x...x|",
"|xxxxx|"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_bucket_fill_rectangle_border(self):
rectangle(self.canvas_5_5, 2, 2, 4, 4)
bucket_fill(self.canvas_5_5, 4, 4, "!")
expected = ["| |",
"| !!! |",
"| ! ! |",
"| !!! |",
"| |"]
self.assert_helper_pretty(self.canvas_5_5, expected)
def test_bucket_fill_complex(self):
c = canvas(20, 4)
line(c, 1, 2, 6, 2)
expected = ["| |",
"|xxxxxx |",
"| |",
"| |"]
self.assert_helper_pretty(c, expected)
line(c, 6, 3, 6, 4)
expected = ["| |",
"|xxxxxx |",
"| x |",
"| x |"]
self.assert_helper_pretty(c, expected)
rectangle(c, 14, 1, 18, 3)
expected = ["| xxxxx |",
"|xxxxxx x x |",
"| x xxxxx |",
"| x |"]
self.assert_helper_pretty(c, expected)
bucket_fill(c, 10, 3, "o")
expected = ["|oooooooooooooxxxxxoo|",
"|xxxxxxooooooox xoo|",
"| xoooooooxxxxxoo|",
"| xoooooooooooooo|"]
self.assert_helper_pretty(c, expected)
| [
"kunal.chowdhury@gmail.com"
] | kunal.chowdhury@gmail.com |
376cb7e68c7bab2659ec8cc35ce8928d99e1644e | 6004fd6067f49b2eb74c650fb847ab1a8799788f | /codepkg/Charting/weixutu.py | 05ea79d12b57adc3d71949cb0c57ebee55994007 | [] | no_license | zhongxiangpku/python_gradproj | 4bf2d41f4075160cc1f84f7dfca6c3901517e3f2 | a4f1f2d8a4dc55f1ee3e8488945ab080b60c2f45 | refs/heads/master | 2020-07-19T11:28:15.640497 | 2017-06-20T02:08:36 | 2017-06-20T02:08:36 | 73,766,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,424 | py | # encoding: UTF-8
import codecs
import os
import sys
import MySQLdb
import string
from codepkg import mod_config
reload(sys)
sys.setdefaultencoding( "utf-8" )
def mappingUserCountRankPlot(file):
fs = codecs.open(file, 'w+', encoding='utf8')
db = MySQLdb.connect(mod_config.dbhost, mod_config.dbuser, mod_config.dbpassword, mod_config.dbname,
charset=mod_config.dbcharset)
cursor = db.cursor()
queryUserCountSQL = 'select cname,usercount from city order by usercount desc'
cursor.execute(queryUserCountSQL)
rows = cursor.fetchall()
index = 1
for row in rows:
key = str(row[0])
fs.write(str(index)+";" +key+";"+str(row[1])+ "\r\n")
index+=1
fs.close()
def mappingMobileCountRankPlot(file):
fs = codecs.open(file, 'w+', encoding='utf8')
db = MySQLdb.connect(mod_config.dbhost, mod_config.dbuser, mod_config.dbpassword, mod_config.dbname,
charset=mod_config.dbcharset)
cursor = db.cursor()
queryUserCountSQL = 'select fromcity,count(*) from pythondb.citytravel where fromcity != toccity group by fromcity having count(*) > 0 order by count(*) desc'
cursor.execute(queryUserCountSQL)
rows = cursor.fetchall()
index = 1
for row in rows:
key = str(row[0])
fs.write(str(index)+";" +key+";"+str(row[1])+ "\r\n")
index+=1
fs.close()
pwd = os.getcwd()
pwd = os.path.dirname(pwd)
pwd = os.path.dirname(pwd)
print pwd
def mappingCityVisitCountRankPlot(file):
fs = codecs.open(file, 'w+', encoding='utf8')
db = MySQLdb.connect(mod_config.dbhost, mod_config.dbuser, mod_config.dbpassword, mod_config.dbname,
charset=mod_config.dbcharset)
cursor = db.cursor()
queryUserCountSQL = 'select obode,avg(gyration),avg(visitcity) from pythondb.user where visitcity>=1 group by obode'
cursor.execute(queryUserCountSQL)
rows = cursor.fetchall()
index = 1
for row in rows:
key = str(row[0])
fs.write(str(index)+";" +key+";"+str(row[1])+ ";"+str(row[2])+"\r\n")
index+=1
fs.close()
# rankUserCountFile = pwd + '\\rankusercountplot.txt'
# mappingUserCountRankPlot(rankUserCountFile)
# rankMobileCountFile = pwd + '\\rankMobileCount.txt'
# mappingMobileCountRankPlot(rankMobileCountFile)
rankCityVisitCountFile = pwd + '\\rankCityVisitCount.txt'
mappingCityVisitCountRankPlot(rankCityVisitCountFile)
| [
"zhongxiang@pku.edu.cn"
] | zhongxiang@pku.edu.cn |
c284691cf68ff52a518de0b7adbd913d0ce9e4b4 | 953d68ce72a8b4fd82280a00361cec3848769a79 | /code/17_decision_trees_nb.py | 566835ef8ed0584c0219e2cdbe11213b6382671a | [] | no_license | JamesByers/DAT8 | b28256c04d4b26c61d107a2c6501013a509cee66 | d7cc9d044b32ffbadaa5db1d11f28a99e0785a0d | refs/heads/master | 2021-01-20T01:35:34.214391 | 2015-10-09T14:57:29 | 2015-10-09T14:57:29 | 43,983,236 | 0 | 0 | null | 2015-10-09T22:32:54 | 2015-10-09T22:32:54 | null | UTF-8 | Python | false | false | 18,518 | py | # # Decision Trees
#
# *Adapted from Chapter 8 of [An Introduction to Statistical Learning](http://www-bcf.usc.edu/~gareth/ISL/)*
# Why are we learning about decision trees?
#
# - Can be applied to both regression and classification problems
# - Many useful properties
# - Very popular
# - Basis for more sophisticated models
# - Have a different way of "thinking" than the other models we have studied
# ## Lesson objectives
#
# Students will be able to:
#
# - Explain how a decision tree is created
# - Build a decision tree model in scikit-learn
# - Tune a decision tree model and explain how tuning impacts the model
# - Interpret a tree diagram
# - Describe the key differences between regression and classification trees
# - Decide whether a decision tree is an appropriate model for a given problem
# # Part 1: Regression trees
#
# Major League Baseball player data from 1986-87:
#
# - **Years** (x-axis): number of years playing in the major leagues
# - **Hits** (y-axis): number of hits in the previous year
# - **Salary** (color): low salary is blue/green, high salary is red/yellow
# 
# Group exercise:
#
# - The data above is our **training data**.
# - We want to build a model that predicts the Salary of **future players** based on Years and Hits.
# - We are going to "segment" the feature space into regions, and then use the **mean Salary in each region** as the predicted Salary for future players.
# - Intuitively, you want to **maximize** the similarity (or "homogeneity") within a given region, and **minimize** the similarity between different regions.
#
# Rules for segmenting:
#
# - You can only use **straight lines**, drawn one at a time.
# - Your line must either be **vertical or horizontal**.
# - Your line **stops** when it hits an existing line.
# 
# Above are the regions created by a computer:
#
# - $R_1$: players with **less than 5 years** of experience, mean Salary of **\$166,000 **
# - $R_2$: players with **5 or more years** of experience and **less than 118 hits**, mean Salary of **\$403,000 **
# - $R_3$: players with **5 or more years** of experience and **118 hits or more**, mean Salary of **\$846,000 **
#
# **Note:** Years and Hits are both integers, but the convention is to use the **midpoint** between adjacent values to label a split.
#
# These regions are used to make predictions on **out-of-sample data**. Thus, there are only three possible predictions! (Is this different from how **linear regression** makes predictions?)
#
# Below is the equivalent regression tree:
# 
# The first split is **Years < 4.5**, thus that split goes at the top of the tree. When a splitting rule is **True**, you follow the left branch. When a splitting rule is **False**, you follow the right branch.
#
# For players in the **left branch**, the mean Salary is \$166,000, thus you label it with that value. (Salary has been divided by 1000 and log-transformed to 5.11.)
#
# For players in the **right branch**, there is a further split on **Hits < 117.5**, dividing players into two more Salary regions: \$403,000 (transformed to 6.00), and \$846,000 (transformed to 6.74).
# 
# **What does this tree tell you about your data?**
#
# - Years is the most important factor determining Salary, with a lower number of Years corresponding to a lower Salary.
# - For a player with a lower number of Years, Hits is not an important factor determining Salary.
# - For a player with a higher number of Years, Hits is an important factor determining Salary, with a greater number of Hits corresponding to a higher Salary.
#
# **Question:** What do you like and dislike about decision trees so far?
# ## Building a regression tree by hand
#
# Your **training data** is a tiny dataset of [used vehicle sale prices](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/vehicles_train.csv). Your goal is to **predict price** for testing data.
#
# 1. Read the data into a Pandas DataFrame.
# 2. Explore the data by sorting, plotting, or split-apply-combine (aka `group_by`).
# 3. Decide which feature is the most important predictor, and use that to create your first splitting rule.
# - Only binary splits are allowed.
# 4. After making your first split, split your DataFrame into two parts, and then explore each part to figure out what other splits to make.
# 5. Stop making splits once you are convinced that it strikes a good balance between underfitting and overfitting.
# - Your goal is to build a model that generalizes well.
# - You are allowed to split on the same variable multiple times!
# 6. Draw your tree, labeling the leaves with the mean price for the observations in that region.
# - Make sure nothing is backwards: You follow the **left branch** if the rule is true, and the **right branch** if the rule is false.
# ## How does a computer build a regression tree?
#
# **Ideal approach:** Consider every possible partition of the feature space (computationally infeasible)
#
# **"Good enough" approach:** recursive binary splitting
#
# 1. Begin at the top of the tree.
# 2. For **every feature**, examine **every possible cutpoint**, and choose the feature and cutpoint such that the resulting tree has the lowest possible mean squared error (MSE). Make that split.
# 3. Examine the two resulting regions, and again make a **single split** (in one of the regions) to minimize the MSE.
# 4. Keep repeating step 3 until a **stopping criterion** is met:
# - maximum tree depth (maximum number of splits required to arrive at a leaf)
# - minimum number of observations in a leaf
# ### Demo: Choosing the ideal cutpoint for a given feature
# vehicle data
import pandas as pd
url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/vehicles_train.csv'
train = pd.read_csv(url)
# before splitting anything, just predict the mean of the entire dataset
train['prediction'] = train.price.mean()
train
# calculate RMSE for those predictions
from sklearn import metrics
import numpy as np
np.sqrt(metrics.mean_squared_error(train.price, train.prediction))
# define a function that calculates the RMSE for a given split of miles
def mileage_split(miles):
lower_mileage_price = train[train.miles < miles].price.mean()
higher_mileage_price = train[train.miles >= miles].price.mean()
train['prediction'] = np.where(train.miles < miles, lower_mileage_price, higher_mileage_price)
return np.sqrt(metrics.mean_squared_error(train.price, train.prediction))
# calculate RMSE for tree which splits on miles < 50000
print 'RMSE:', mileage_split(50000)
train
# calculate RMSE for tree which splits on miles < 100000
print 'RMSE:', mileage_split(100000)
train
# check all possible mileage splits
mileage_range = range(train.miles.min(), train.miles.max(), 1000)
RMSE = [mileage_split(miles) for miles in mileage_range]
# allow plots to appear in the notebook
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (6, 4)
plt.rcParams['font.size'] = 14
# plot mileage cutpoint (x-axis) versus RMSE (y-axis)
plt.plot(mileage_range, RMSE)
plt.xlabel('Mileage cutpoint')
plt.ylabel('RMSE (lower is better)')
# **Recap:** Before every split, this process is repeated for every feature, and the feature and cutpoint that produces the lowest MSE is chosen.
# ## Building a regression tree in scikit-learn
# encode car as 0 and truck as 1
# define X and y
# instantiate a DecisionTreeRegressor (with random_state=1)
# use leave-one-out cross-validation (LOOCV) to estimate the RMSE for this model
# ## What happens when we grow a tree too deep?
#
# - Left: Regression tree for Salary **grown deeper**
# - Right: Comparison of the **training, testing, and cross-validation errors** for trees with different numbers of leaves
# 
# The **training error** continues to go down as the tree size increases (due to overfitting), but the lowest **cross-validation error** occurs for a tree with 3 leaves.
# ## Tuning a regression tree
#
# Let's try to reduce the RMSE by tuning the **max_depth** parameter:
# try different values one-by-one
treereg = DecisionTreeRegressor(max_depth=1, random_state=1)
scores = cross_val_score(treereg, X, y, cv=14, scoring='mean_squared_error')
np.mean(np.sqrt(-scores))
# Or, we could write a loop to try a range of values:
# list of values to try
max_depth_range = range(1, 8)
# list to store the average RMSE for each value of max_depth
RMSE_scores = []
# use LOOCV with each value of max_depth
for depth in max_depth_range:
treereg = DecisionTreeRegressor(max_depth=depth, random_state=1)
MSE_scores = cross_val_score(treereg, X, y, cv=14, scoring='mean_squared_error')
RMSE_scores.append(np.mean(np.sqrt(-MSE_scores)))
# plot max_depth (x-axis) versus RMSE (y-axis)
plt.plot(max_depth_range, RMSE_scores)
plt.xlabel('max_depth')
plt.ylabel('RMSE (lower is better)')
# max_depth=3 was best, so fit a tree using that parameter
treereg = DecisionTreeRegressor(max_depth=3, random_state=1)
treereg.fit(X, y)
# "Gini importance" of each feature: the (normalized) total reduction of error brought by that feature
pd.DataFrame({'feature':feature_cols, 'importance':treereg.feature_importances_})
# ## Creating a tree diagram
# create a Graphviz file
from sklearn.tree import export_graphviz
export_graphviz(treereg, out_file='tree_vehicles.dot', feature_names=feature_cols)
# At the command line, run this to convert to PNG:
# dot -Tpng tree_vehicles.dot -o tree_vehicles.png
# 
# Reading the internal nodes:
#
# - **samples:** number of observations in that node before splitting
# - **mse:** MSE calculated by comparing the actual response values in that node against the mean response value in that node
# - **rule:** rule used to split that node (go left if true, go right if false)
#
# Reading the leaves:
#
# - **samples:** number of observations in that node
# - **value:** mean response value in that node
# - **mse:** MSE calculated by comparing the actual response values in that node against "value"
# ## Making predictions for the testing data
# read the testing data
# **Question:** Using the tree diagram above, what predictions will the model make for each observation?
# use fitted model to make predictions on testing data
# calculate RMSE
# calculate RMSE for your own tree!
# # Part 2: Classification trees
#
# **Example:** Predict whether Barack Obama or Hillary Clinton will win the Democratic primary in a particular county in 2008:
# 
# **Questions:**
#
# - What are the observations? How many observations are there?
# - What is the response variable?
# - What are the features?
# - What is the most predictive feature?
# - Why does the tree split on high school graduation rate twice in a row?
# - What is the class prediction for the following county: 15% African-American, 90% high school graduation rate, located in the South, high poverty, high population density?
# - What is the predicted probability for that same county?
# ## Comparing regression trees and classification trees
#
# |regression trees|classification trees|
# |---|---|
# |predict a continuous response|predict a categorical response|
# |predict using mean response of each leaf|predict using most commonly occuring class of each leaf|
# |splits are chosen to minimize MSE|splits are chosen to minimize Gini index (discussed below)|
# ## Splitting criteria for classification trees
#
# Common options for the splitting criteria:
#
# - **classification error rate:** fraction of training observations in a region that don't belong to the most common class
# - **Gini index:** measure of total variance across classes in a region
# ### Example of classification error rate
#
# Pretend we are predicting whether someone buys an iPhone or an Android:
#
# - At a particular node, there are **25 observations** (phone buyers), of whom **10 bought iPhones and 15 bought Androids**.
# - Since the majority class is **Android**, that's our prediction for all 25 observations, and thus the classification error rate is **10/25 = 40%**.
#
# Our goal in making splits is to **reduce the classification error rate**. Let's try splitting on gender:
#
# - **Males:** 2 iPhones and 12 Androids, thus the predicted class is Android
# - **Females:** 8 iPhones and 3 Androids, thus the predicted class is iPhone
# - Classification error rate after this split would be **5/25 = 20%**
#
# Compare that with a split on age:
#
# - **30 or younger:** 4 iPhones and 8 Androids, thus the predicted class is Android
# - **31 or older:** 6 iPhones and 7 Androids, thus the predicted class is Android
# - Classification error rate after this split would be **10/25 = 40%**
#
# The decision tree algorithm will try **every possible split across all features**, and choose the split that **reduces the error rate the most.**
# ### Example of Gini index
#
# Calculate the Gini index before making a split:
#
# $$1 - \left(\frac {iPhone} {Total}\right)^2 - \left(\frac {Android} {Total}\right)^2 = 1 - \left(\frac {10} {25}\right)^2 - \left(\frac {15} {25}\right)^2 = 0.48$$
#
# - The **maximum value** of the Gini index is 0.5, and occurs when the classes are perfectly balanced in a node.
# - The **minimum value** of the Gini index is 0, and occurs when there is only one class represented in a node.
# - A node with a lower Gini index is said to be more "pure".
#
# Evaluating the split on **gender** using Gini index:
#
# $$\text{Males: } 1 - \left(\frac {2} {14}\right)^2 - \left(\frac {12} {14}\right)^2 = 0.24$$
# $$\text{Females: } 1 - \left(\frac {8} {11}\right)^2 - \left(\frac {3} {11}\right)^2 = 0.40$$
# $$\text{Weighted Average: } 0.24 \left(\frac {14} {25}\right) + 0.40 \left(\frac {11} {25}\right) = 0.31$$
#
# Evaluating the split on **age** using Gini index:
#
# $$\text{30 or younger: } 1 - \left(\frac {4} {12}\right)^2 - \left(\frac {8} {12}\right)^2 = 0.44$$
# $$\text{31 or older: } 1 - \left(\frac {6} {13}\right)^2 - \left(\frac {7} {13}\right)^2 = 0.50$$
# $$\text{Weighted Average: } 0.44 \left(\frac {12} {25}\right) + 0.50 \left(\frac {13} {25}\right) = 0.47$$
#
# Again, the decision tree algorithm will try **every possible split**, and will choose the split that **reduces the Gini index (and thus increases the "node purity") the most.**
# ### Comparing classification error rate and Gini index
#
# - Gini index is generally preferred because it will make splits that **increase node purity**, even if that split does not change the classification error rate.
# - Node purity is important because we're interested in the **class proportions** in each region, since that's how we calculate the **predicted probability** of each class.
# - scikit-learn's default splitting criteria for classification trees is Gini index.
#
# Note: There is another common splitting criteria called **cross-entropy**. It's numerically similar to Gini index, but slower to compute, thus it's not as popular as Gini index.
# ## Building a classification tree in scikit-learn
# We'll build a classification tree using the Titanic data:
# read in the data
url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/titanic.csv'
titanic = pd.read_csv(url)
# encode female as 0 and male as 1
titanic['Sex'] = titanic.Sex.map({'female':0, 'male':1})
# fill in the missing values for age with the median age
titanic.Age.fillna(titanic.Age.median(), inplace=True)
# create a DataFrame of dummy variables for Embarked
embarked_dummies = pd.get_dummies(titanic.Embarked, prefix='Embarked')
embarked_dummies.drop(embarked_dummies.columns[0], axis=1, inplace=True)
# concatenate the original DataFrame and the dummy DataFrame
titanic = pd.concat([titanic, embarked_dummies], axis=1)
# print the updated DataFrame
titanic.head()
# - **Survived:** 0=died, 1=survived (response variable)
# - **Pclass:** 1=first class, 2=second class, 3=third class
# - What will happen if the tree splits on this feature?
# - **Sex:** 0=female, 1=male
# - **Age:** numeric value
# - **Embarked:** C or Q or S
# define X and y
feature_cols = ['Pclass', 'Sex', 'Age', 'Embarked_Q', 'Embarked_S']
X = titanic[feature_cols]
y = titanic.Survived
# fit a classification tree with max_depth=3 on all data
from sklearn.tree import DecisionTreeClassifier
treeclf = DecisionTreeClassifier(max_depth=3, random_state=1)
treeclf.fit(X, y)
# create a Graphviz file
export_graphviz(treeclf, out_file='tree_titanic.dot', feature_names=feature_cols)
# At the command line, run this to convert to PNG:
# dot -Tpng tree_titanic.dot -o tree_titanic.png
# 
# Notice the split in the bottom right: the **same class** is predicted in both of its leaves. That split didn't affect the **classification error rate**, though it did increase the **node purity**, which is important because it increases the accuracy of our predicted probabilities.
# compute the feature importances
pd.DataFrame({'feature':feature_cols, 'importance':treeclf.feature_importances_})
# # Part 3: Comparing decision trees with other models
#
# **Advantages of decision trees:**
#
# - Can be used for regression or classification
# - Can be displayed graphically
# - Highly interpretable
# - Can be specified as a series of rules, and more closely approximate human decision-making than other models
# - Prediction is fast
# - Features don't need scaling
# - Automatically learns feature interactions
# - Tends to ignore irrelevant features
# - Non-parametric (will outperform linear models if relationship between features and response is highly non-linear)
# 
# **Disadvantages of decision trees:**
#
# - Performance is (generally) not competitive with the best supervised learning methods
# - Can easily overfit the training data (tuning is required)
# - Small variations in the data can result in a completely different tree (high variance)
# - Recursive binary splitting makes "locally optimal" decisions that may not result in a globally optimal tree
# - Doesn't tend to work well if the classes are highly unbalanced
# - Doesn't tend to work well with very small datasets
| [
"justmarkham@gmail.com"
] | justmarkham@gmail.com |
723adae948b66dc0086ffea876f8075c4c995a52 | 95a8d3b8448f40303541c463eb930b6d74348c9d | /Updated_encrypt.py | 288257085d94fb0d38b71c431cf8417595bda57f | [] | no_license | empero6/CyberSecurity | 9aedf54c81abc2a466b06c71be47150a9460acb9 | ca392395bad4565b028ef3a92cb6ad38aafea766 | refs/heads/master | 2020-07-29T00:20:38.182700 | 2019-12-16T20:36:32 | 2019-12-16T20:36:32 | 209,595,009 | 0 | 1 | null | 2019-11-21T14:21:06 | 2019-09-19T16:01:15 | Python | UTF-8 | Python | false | false | 10,316 | py | from PIL import Image
from scipy.integrate import odeint
import numpy as np
import random as rand
import os, hashlib
def encrypt(filepath, key):
dbImage = np.array(Image.open(filepath))
luImage = np.array(Image.open(filepath))
outputfile = os.path.join(os.path.dirname(filepath), "(encrypted)"+os.path.basename(filepath))
M = dbImage.shape[0]; N = dbImage.shape[1]
rand.seed()
S = rand.randint(32, min(M,N))
t_x = rand.randint(1, S-1); t_y = rand.randint(1, S-1)
n_x = int((N-t_x)/(S-t_x)) if (N-t_x)%(S-t_x) == 0 else ((N-t_x)//(S-t_x))+1
n_y = int((M-t_y)/(S-t_y)) if (M-t_y)%(S-t_y) == 0 else ((M-t_y)//(S-t_y))+1
n_total = n_x*n_y
Y, X, Z = blockCoordinates(S, t_y, t_x, n_y, n_x, M, N)
index, order = randOrder(key, n_total)
rand.seed(key)
key_henx = rand.uniform(0,1)
key_heny = rand.uniform(0,1)
D, F = makeDnF(np.array([key_heny]), np.array([key_henx]), S, n_total)
for key_ord in order:
i = order[key_ord]
block = makeBlock(dbImage, Z[i, 0], Z[i, 1], S)
block = doublescan(block, D[i], F[i])
dbImage = swapblock(dbImage, block, Z[i, 0], Z[i, 1])
print("Block:{} of {} done".format(key_ord+1, len(order)))
key_lumap = luMapKeys(luImage)
luImage = secretMatrix(luImage.shape, key_lumap)
dbImage = np.bitwise_xor(dbImage, luImage)
with open("keys.txt", "a") as keyfile:
keyfile.write("File: {}, Key: {}, S: {}, t_y: {}, t_x: {}, x: {}, y: {}, z: {}\n"
.format(outputfile, key, S, t_y, t_x, key_lumap[0], key_lumap[1], key_lumap[2]))
image = Image.fromarray(np.uint8(dbImage))
image.show
image.save(outputfile)
print(outputfile)
return dbImage
def decrypt(filepath, key, S, t_y, t_x, x, y, z):
dbImage = np.array(Image.open(filepath))
luImage = np.array(Image.open(filepath))
outputfile = os.path.join(os.path.dirname(filepath), "(decrypted)"+os.path.basename(filepath))
M = dbImage.shape[0]; N = dbImage.shape[1]
n_x = int((N-t_x)/(S-t_x)) if (N-t_x)%(S-t_x) == 0 else ((N-t_x)//(S-t_x))+1
n_y = int((M-t_y)/(S-t_y)) if (M-t_y)%(S-t_y) == 0 else ((M-t_y)//(S-t_y))+1
n_total = n_x*n_y
Y, X, Z = blockCoordinates(S, t_y, t_x, n_y, n_x, M, N)
index, order = randOrder(key, n_total)
rand.seed(key)
key_henx = rand.uniform(0,1)
key_heny = rand.uniform(0,1)
D, F = makeDnF(np.array([key_heny]), np.array([key_henx]), S, n_total)
luImage = secretMatrix(luImage.shape, [x, y, z])
dbImage = np.bitwise_xor(dbImage, luImage)
for key_ord in order:
i = order[(len(order)-1)-key_ord]
block = makeBlock(dbImage, Z[i, 0], Z[i, 1], S)
block = reversedoublescan(block, D[i], F[i])
dbImage = swapblock(dbImage, block, Z[i, 0], Z[i, 1])
print("Block:{} of {} done".format(key_ord+1, len(order)))
print("Decrypted file saved as: "+outputfile)
image = Image.fromarray(np.uint8(dbImage))
image.show
image.save(outputfile)
return dbImage
def swapblock(image, block, startY, startX):
for i in range(block.shape[0]):
for j in range(block.shape[1]):
image[startY+i,startX+j] = block[i,j]
return image
def doublescan(block, startY, startX):
result = np.empty(0, dtype= np.uint8)
minX = minY = 0
maxX = block.shape[1]-1; maxY = block.shape[0]-1
startY = maxY if startY > maxY else startY; startX = maxX if startX > maxX else startX
initialX = startX
x1 = 0; x2 = maxX
y1 = startY; y2 = startY
for i in range(initialX, x2+1):
result = np.append(result, block[y2, i])
y2 -= 1
if(y2 > minY):
for i in range(y2, minY-1, -1):
result = np.append(result, block[i, x2])
x2 -= 1
for i in range(x2, minX-1, -1):
result = np.append(result, block[minY, i])
minY += 1
if(minX < initialX):
for i in range(minY, maxY+1):
result = np.append(result, block[i, minX])
minX += 1
x1 += 1
while(y1 < maxY):
for i in range(x1, maxX+1):
result = np.append(result, block[maxY, i])
maxY -= 1
if(y1 < maxY and x1 < maxX):
for i in range(maxY, y1, -1):
result = np.append(result, block[i, maxX])
maxX -= 1
if(x1 < maxX and y1 < maxY):
for i in range(maxX, x1-1, -1):
result = np.append(result, block[y1+1, i])
y1 += 1
if(y1 < maxY and x1 < maxX):
for i in range(y1+1, maxY+1):
result = np.append(result, block[i, x1])
x1 += 1
if((initialX)> minX):
for i in range(initialX-1, minX-1, -1):
result = np.append(result, block[y2+1, i])
while(x2 >= minX and y2 >= minY):
for i in range(y2, minY-1, -1):
result = np.append(result, block[i, minX])
minX += 1
if(minX < x2 and minY < y2):
for i in range(minX, x2+1):
result = np.append(result, block[minY, i])
minY += 1
if(minY < y2 and minX < x2):
for i in range(minY, y2+1):
result = np.append(result, block[i, x2])
x2 -= 1
if(minX < x2 and minY < y2):
for i in range(x2, minX-1, -1):
result = np.append(result, block[y2, i])
y2 -= 1
result = result.reshape(block.shape)
return result
def reversedoublescan(block, startY, startX):
result = np.empty(block.shape, dtype= block.dtype)
minX = minY = 0
maxX = block.shape[1]-1; maxY = block.shape[0]-1
startY = maxY if startY > maxY else startY; startX = maxX if startX > maxX else startX
initialX = startX
x1 = 0; x2 = maxX
y1 = startY; y2 = startY
flat = block.reshape(-1,3)
j = 0
for i in range(initialX, x2+1):
result[y2, i] = flat[j]
j += 1
y2 -= 1
if(y2 > minY):
for i in range(y2, minY-1, -1):
result[i, x2] = flat[j]
j += 1
x2 -= 1
for i in range(x2, minX-1, -1):
result[minY, i] = flat[j]
j += 1
minY += 1
if(minX < initialX):
for i in range(minY, maxY+1):
result[i, minX] = flat[j]
j += 1
minX += 1
x1 += 1
while(y1 < maxY):
for i in range(x1, maxX+1):
result[maxY, i] = flat[j]
j += 1
maxY -= 1
if(y1 < maxY and x1 < maxX):
for i in range(maxY, y1, -1):
result[i, maxX] = flat[j]
j += 1
maxX -= 1
if(x1 < maxX and y1 < maxY):
for i in range(maxX, x1-1, -1):
result[y1+1, i] = flat[j]
j += 1
y1 += 1
if(y1 < maxY and x1 < maxX):
for i in range(y1+1, maxY+1):
result[i, x1] = flat[j]
j += 1
x1 += 1
if((initialX)> minX):
for i in range(initialX-1, minX-1, -1):
result[y2+1, i] = flat[j]
j += 1
while(x2 >= minX and y2 >= minY):
for i in range(y2, minY-1, -1):
result[i, minX] = flat[j]
j += 1
minX += 1
if(minX < x2 and minY < y2):
for i in range(minX, x2+1):
result[minY, i] = flat[j]
j += 1
minY += 1
if(minY < y2 and minX < x2):
for i in range(minY, y2+1):
result[i, x2] = flat[j]
j += 1
x2 -= 1
if(minX < x2 and minY < y2):
for i in range(x2, minX-1, -1):
result[y2, i] = flat[j]
j += 1
y2 -= 1
return result
def randOrder(key, n_t):
rand.seed(key)
x = np.array(rand.sample(range(0, n_t), n_t))
y = {i:x[i] for i in range(x.size)}
x = np.sort(x)
return x, y
def makeDnF(Y, X, S, n_t):
for i in range(n_t):
x = 1-((1.4)*(X[i]**2))+Y[i]
X = np.append(X, x)
y = (.3)*X[i]
Y = np.append(Y, y)
D = np.empty(0, dtype=np.uint64)
F = np.empty(0, dtype=np.uint64),
for i in range(1, n_t+1):
D = np.append(D, (((X[i]*(2**48))%S)//1))
F = np.append(F, (((Y[i]*(2**48))%S)//1))
D = D.astype("int32")
F = F.astype("int32")
return D, F
def blockCoordinates(S, t_y, t_x, n_y, n_x, M, N):
Y = np.array([(i-1)*(S-t_y) for i in range(1, n_y+2)] if (M-t_y)%(S-t_y) == 0 else [(i-1)*(S-t_y) for i in range(1, n_y+1)])
X = np.array([(i-1)*(S-t_x) for i in range(1, n_x+2)] if (N-t_x)%(S-t_x) == 0 else [(i-1)*(S-t_x) for i in range(1, n_x+1)])
Z = np.empty(0, dtype = np.uint32)
for i in range(Y.size):
for j in range(X.size):
Z = np.append(Z, [Y[i], X[j]])
Z = Z.reshape(Y.size*X.size, 2)
return Y, X, Z
def makeBlock(image, startY, startX, S):
M = image.shape[0]; N = image.shape[1]
block = [[[x for x in column] for column in row[startX:(N if(startX+S > N) else (startX+S))]] for row in image[startY:(M if(startY+S > M) else (startY+S))]]
block = np.asarray(block)
return block
def luMapKeys(image):
M = image.shape[0]; N = image.shape[1]; O = image.shape[2]
sums = 0
for i in range(M):
for j in range(N):
for k in range(O):
sums += image[i, j, k]
xor = np.uint8
for i in range(M):
for j in range(N):
for k in range(O):
if(i == j == k == 0):
xor = image[i, j, k]
else:
xor = xor^image[i, j, k]
x = (sums/(M*N))/255
y = xor/255
z = x+y
return x, y, z
def f(state, t):
a = 36; b = 3; c = 20
x, y, z = state
return a * (y - x), (c*y)-(x*z), (x*y)-(b*z)
def secretMatrix(shape, key):
M = shape[0]; N = shape[1]
t = np.arange(0, ((M*N)/50), .02)
states = odeint(f, key, t)
states = states.reshape(shape)
return states.astype(np.uint8)
| [
"noreply@github.com"
] | noreply@github.com |
20f656e4053fc210247251dfcef186225ccfd050 | a08ea65c1ef699171e11c10da75f8291cb0c743c | /Numpy code/numpy1.py | de5eae1343112cb085c004920070b96f1d5495af | [] | no_license | hussainMansoor876/Numpy-And-Falsk-Exercise | 476551257d940965eadbea27f5c61d978475ed1f | 1930dee5ac07dc9a18c30c45e196060cf73095d0 | refs/heads/master | 2020-03-28T19:23:50.718360 | 2018-09-16T07:17:39 | 2018-09-16T07:17:39 | 148,971,926 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | # import numpy as np
from functools import partial
# def numpysum(n):
# a = np.arange(n) ** 2
# b = np.arange(n) ** 3
# c = a + b
# return c
# numpysum(10)
# print(numpysum(10))
# a = np.arange(5)
# print(a.dtype)
# # print(a)
# n=[1,2,3,4,5]
# a,b,c,d,e=n
# print(a)
# print(b)
# print(c)
# print(d)
# print(e)
s=list(range(1,11))
print(sum(s))
| [
"“hussainmansoor876@gmail.com”"
] | “hussainmansoor876@gmail.com” |
cdd5a31a1454daea675c492521e6a22eed8d06bc | 8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a | /unistream_examples/get_agent_point_info.py | 0278d0f98ae5b4b9aca91d42c4b6f9d5fe4f01f8 | [
"CC-BY-4.0"
] | permissive | stepik/SimplePyScripts | 01092eb1b2c1c33756427abb2debbd0c0abf533f | 3259d88cb58b650549080d6f63b15910ae7e4779 | refs/heads/master | 2023-05-15T17:35:55.743164 | 2021-06-11T22:59:07 | 2021-06-11T22:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
"""
Получение информации о точке предоставления услуги.
GET https://test.api.unistream.com/v1/agents/{agentId}/poses/{posId}
"""
if __name__ == '__main__':
from utils import get_today_RFC1123_date, get_authorization_header
from config import APPLICATION_ID, SECRET
# Идентификатор партнера
AGENT_ID = -1
# Идентификатор точки предоставления услуги
POS_ID = -1
params = {
'agentId': AGENT_ID,
'posId': POS_ID,
}
URL = 'https://test.api.unistream.com/v1/agents/{agentId}/poses/{posId}'.format(**params)
TODAY_DATE = get_today_RFC1123_date()
headers = dict()
headers['Date'] = TODAY_DATE
headers['Authorization'] = get_authorization_header(APPLICATION_ID, SECRET, TODAY_DATE, URL, headers)
import requests
rs = requests.get(URL, headers=headers)
print(rs)
print(rs.text)
| [
"gil9red@gmail.com"
] | gil9red@gmail.com |
f68e80676e72be2e4597cabb98f6e8312c69fc60 | d9cd697f76565e8230a98909204a5c516437f977 | /tutorial/tutorial/settings.py | 7b7807db8743db25310455fe110dcac0eed68dba | [] | no_license | huazhicai/webspider | be20d0d3a248ef8cbfaab8e3d1fd0e8ac7551352 | a1defa3778956accbb7617c9a3798d02e0b175f6 | refs/heads/master | 2020-03-22T09:00:23.518744 | 2019-07-11T14:53:37 | 2019-07-11T14:53:37 | 139,807,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,207 | py | # -*- coding: utf-8 -*-
# Scrapy settings for tutorial project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'tutorial'
SPIDER_MODULES = ['tutorial.spiders']
NEWSPIDER_MODULE = 'tutorial.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'tutorial (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'tutorial.middlewares.TutorialSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'tutorial.middlewares.TutorialDownloaderMiddleware': 543,
# }
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'tutorial.pipelines.TextPipeline': 300,
'tutorial.pipelines.MongoPipeline': 400,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
MONGO_URI = '192.168.11.138'
MONGO_DB = 'tutorial'
| [
"936844218@qq.com"
] | 936844218@qq.com |
382095ec11a67627f57ad1c851361509c9a2110d | 1fdf153c14eb70eba8ad59b41ac0e53cf7611bd0 | /backend/scholar/settings.py | bf15410d2201d4650f825e1a4f37869f823929eb | [] | no_license | kegeer/xsv2 | b0814ff853e54c260701b6945f1e3083d66169f1 | 1a8bdea0fe946727ae8283c5a5346db88ddeb783 | refs/heads/master | 2022-12-15T00:22:24.467965 | 2018-04-12T23:48:52 | 2018-04-12T23:48:52 | 129,317,782 | 0 | 0 | null | 2022-12-08T01:00:45 | 2018-04-12T22:27:28 | JavaScript | UTF-8 | Python | false | false | 5,660 | py | """
Django settings for scholar project.
Generated by 'django-admin startproject' using Django 1.11.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w*&68njj_@0s#xkv_as1rm5+q$rn0vmnn)^#hz#@r&ut7)v-(&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*',]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'django_extensions',
'rest_framework',
'polymorphic',
'scholar.apps.profiles',
'scholar.apps.authentication',
'scholar.apps.projects',
'scholar.apps.highlights',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'scholar.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'scholar.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# },
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME':os.environ["POSTGRES_DB"],
'USER':os.environ["POSTGRES_USER"],
'PASSWORD':os.environ["POSTGRES_PASSWORD"],
'HOST': 'postgres',
'PORT': '',
},
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
CORS_ORIGIN_ALLOW_ALL = True
""" 自定义的用户表格 """
AUTH_USER_MODEL = 'authentication.User'
# Rest framework definition
REST_FRAMEWORK = {
# 'DEFAULT_FILTER_BACKENDS': (
# 'django_filters.rest_framework.DjangoFilterBackend',
# 'rest_framework.filters.SearchFilter',
# 'rest_framework.filters.OrderingFilter',
# ),
'DEFAULT_PARSER_CLASSES': (
'rest_framework_json_api.parsers.JSONParser',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'scholar.apps.authentication.backends.JWTAuthentication',
),
'DEFAULT_METADATA_CLASS':
'rest_framework_json_api.metadata.JSONAPIMetadata',
'EXCEPTION_HANDLER':
'rest_framework_json_api.exceptions.exception_handler',
'DEFAULT_PAGINATION_CLASS':
'rest_framework_json_api.pagination.PageNumberPagination',
'DEFAULT_RENDERER_CLASSES': (
'rest_framework_json_api.renderers.JSONRenderer',
),
}
""" JOSNAPI 设置 """
JSON_API_FORMAT_KEYS = 'dasherize'
JSON_API_FORMAT_TYPES = 'dasherize'
JSON_API_PLURALIZE_TYPES = True
""" 文献识别模块 """
# Docker compose 配置
GROBID_HOST = 'http://grobid:8070/api'
# 本地开发配置
# GROBID_HOST = 'http://localhost:8070/api'
GROBID_THREADS = 4
""" 上传目录,会逐步更改为OSS """
MEDIA_ROOT = os.path.join(BASE_DIR, 'uploads/')
# Celery settings
# CELERY_BROKER_URL = 'redis://localhost:6379/0'
CELERY_BROKER_URL = 'redis://redis/0'
# use json format for everything
# CELERY_ACCEPT_CONTENT = ['json']
# CELERY_TASK_SERIALIZER = 'jso
# CELERY_RESULT_SERIALIZER = 'json'
# Aliyun Api Setting
ALIYUN_ACCESSKEYID = 'LTAIVcC4QysIyRpu'
ALIYUN_ACCESSKEYSECRET = '8XDh1d1t8ebgSV2JMYB0SdygIkJzaJ'
ALIYUN_DM_ACCOUNT = 'dm.aliyuncs.com'
ALIYUN_DM_ENDPOINT = 'test@test.com'
ALIYUN_SMS_SIGNNAME = 'xueshu'
ALIYUN_SMS_ENDPOINT = 'sms.aliyuncs.com'
ALIYUN_OSS_END_POINT = 'oss-cn-beijing.aliyuncs.com'
ALIYUN_OSS_END_INTERNAL_POINT = 'oss-cn-beijing-internal.aliyuncs.com'
ALIYUN_OSS_DOMAIN = 'oss.linkick.com'
| [
"zhangkeger@qq.com"
] | zhangkeger@qq.com |
807c84ca2a5d2e55897e76ad73c802210aa7bde3 | 2764319f07b328d0c0bb4bf7300d8b499621b121 | /Missions_to_Mars/scrape_mars.py | bd128cfe23fc2a09536dda2e8dd0e6fdcdd1cf5f | [] | no_license | erik-hernandez-25/web-scraping-challenge | 8a098fc62474544ed98e72d69002f15090a27ae4 | d086059035ba143e7642d892d1b147f8313132f2 | refs/heads/main | 2023-04-06T23:57:17.983696 | 2021-04-10T23:46:21 | 2021-04-10T23:46:21 | 356,629,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,211 | py | import pandas as pd
from bs4 import BeautifulSoup
from splinter import Browser
from webdriver_manager.chrome import ChromeDriverManager
def init_browser():
executable_path = {'executable_path': ChromeDriverManager().install()}
return Browser('chrome', **executable_path, headless=False)
def scrape():
browser = init_browser()
# Visit the following URL
url = "https://redplanetscience.com/"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
current_news_title = soup.find('div', id='news').find('div', class_='content_title').text
current_news_p = soup.find('div', id='news').find('div', class_='article_teaser_body').text
# # EXTRACT NASA SPACE IMAGES
url = "https://spaceimages-mars.com"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
# Image source
img_source = soup.findAll('img','headerimage fade-in')
image_url =[]
for image in img_source:
#print image source
image_url.append(image['src'])
#Build Complete Path
base_url = "https://spaceimages-mars.com/"
# the image URL
featured_image_url = base_url + image_url[0]
# # EXTRACT MARS FACTS
url = "https://galaxyfacts-mars.com"
browser.visit(url)
#html = browser.html
#soup = BeautifulSoup(html, "html.parser")
#table = soup.find_all('table')[0]
mars_facts = pd.read_html(url)[0]
# Converting table to html
mars_table = mars_facts.to_html()
# # MARS HEMISPHERES PICTURES
base_url = "https://marshemispheres.com/"
cerberus_emisphere_url = "https://marshemispheres.com/cerberus.html"
schiaparelli_emisphere_url = "https://marshemispheres.com/schiaparelli.html"
syrtis_emisphere_url = "https://marshemispheres.com/syrtis.html"
valles_emisphere_url= "https://marshemispheres.com/valles.html"
# Image source
browser.visit(cerberus_emisphere_url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
hemispheres = []
img_source = soup.find('div',class_='downloads').a['href']
hemispheres.append(base_url + img_source)
# Image source
browser.visit(schiaparelli_emisphere_url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
img_source = soup.find('div',class_='downloads').a['href']
hemispheres.append(base_url + img_source)
# Image source
browser.visit(syrtis_emisphere_url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
img_source = soup.find('div',class_='downloads').a['href']
hemispheres.append(base_url + img_source)
# Image source
browser.visit(valles_emisphere_url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
img_source = soup.find('div',class_='downloads').a['href']
hemispheres.append(base_url + img_source)
#Python dictionary to store the data using the keys `img_url` and `title`.
hemispheres_list =['Cerberus Hemisphere', 'Schiaparelli Hemisphere','Syrtis Major Hemisphere','Valles Marineris']
hemisphere_data_1 = [{'title': hemispheres_list[0], 'img_url':hemispheres[0]}]
hemisphere_data_2 = [{'title': hemispheres_list[1], 'img_url':hemispheres[1]}]
hemisphere_data_3 = [{'title': hemispheres_list[2], 'img_url':hemispheres[2]}]
hemisphere_data_4 = [{'title': hemispheres_list[3], 'img_url':hemispheres[3]}]
#Close the browser after scraping
browser.quit()
#Dictionary
mars_data = {
"mars_news_headlines": current_news_title,
"mars_news_abstract": current_news_p,
"mars_featured_image": featured_image_url,
"mars_facts":mars_table,
"mars_hemisphere_1_name": hemispheres_list[0],
"mars_hem_1_url": hemispheres[0],
"mars_hemisphere_2_name": hemispheres_list[1],
"mars_hem_2_url": hemispheres[1],
"mars_hemisphere_3_name": hemispheres_list[2],
"mars_hem_3_url": hemispheres[2],
"mars_hemisphere_4_name": hemispheres_list[3],
"mars_hem_4_url": hemispheres[3]
}
return mars_data
#if __name__ == "__main__":
# data = scrape()
# print(data)
| [
"erik.hernandez.enriquez@gmail.com"
] | erik.hernandez.enriquez@gmail.com |
2068e5ba0f55d91f5d12e1ef5b42fb4d6fbf661d | f03394e5840ee71ef9d1471718375d076d8e7ead | /wiki/encyclopedia/views.py | 371ac0333d52261f84793b994e858ba905835137 | [] | no_license | kane090/project1-cs50 | bef230d91dce573063cee3e2c5d22c40fc6235a8 | 8133d99cdbd70ed54efee46929d6345d22aa1726 | refs/heads/master | 2023-06-05T01:41:31.610041 | 2021-07-03T21:06:01 | 2021-07-03T21:06:01 | 371,736,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,544 | py | from django.http.response import HttpResponseRedirect
from django.shortcuts import render
from markdown2 import markdown
from django.urls import reverse
from random import randrange
from . import util
def index(request):
page_name = request.GET.get('q')
if page_name != None:
return search(request, page_name)
else:
return render(request, "encyclopedia/index.html", {
"entries": util.list_entries()
})
def entry(request, page_name):
try:
return render(request, "encyclopedia/entry.html", {
"title": page_name,
"entry": markdown(util.get_entry(page_name))
})
except TypeError:
return render(request, "encyclopedia/error.html")
def search(request, page_name):
subset_entries = []
og_entries_list = util.list_entries()
entries_list = [entry.lower() for entry in og_entries_list]
if (page_name in entries_list) or (page_name in og_entries_list):
return entry(request, page_name)
else:
for i in range(len(entries_list)):
if page_name in entries_list[i]:
subset_entries.append(og_entries_list[i])
return render(request, "encyclopedia/search_results.html", {
"entries": subset_entries
})
def new_entry(request):
current_entries = [entry.lower() for entry in util.list_entries()]
title = request.POST.get('title')
content = request.POST.get('markdown')
if title != None:
lower_title = title.lower()
if lower_title in current_entries:
return render(request, "encyclopedia/error_alreadyexists.html")
else:
content = content.replace("\r", "")
util.save_entry(title, content)
return HttpResponseRedirect(reverse("entry", args=[title]))
else:
return render(request, "encyclopedia/new.html")
def edit_entry(request, page_name):
content = util.get_entry(page_name)
new_content = request.POST.get('markdown')
if new_content != None:
new_content = new_content.replace("\r", "")
util.save_entry(page_name, new_content)
return HttpResponseRedirect(reverse("entry", args=[page_name]))
else:
return render(request, "encyclopedia/edit.html", {
"title": page_name,
"content": content
})
def random_page(request):
current_entries = util.list_entries()
index = randrange(0, len(current_entries))
return HttpResponseRedirect(reverse("entry", args=[current_entries[index]]))
| [
"rishu.ks@gmail.com"
] | rishu.ks@gmail.com |
a8fa9cda3bebfc92ff9f07a8a13bbb90dcc9400b | b1df343d5dbc1066913e6133b191f05706c71f0e | /17.9.1.py | 10f9696c1b51a72e09aa3bef9ac32d61d7209d38 | [] | no_license | artemovalex/pubic_qap-13 | c41958a737e8fa753c6138e15b84ce2d741d89b1 | 7d444fdbe24cef989f940f1624cada8e60479432 | refs/heads/master | 2023-04-25T11:39:24.675277 | 2021-05-24T10:12:07 | 2021-05-24T10:12:07 | 323,099,153 | 2 | 1 | null | 2020-12-24T08:21:41 | 2020-12-20T15:08:35 | null | UTF-8 | Python | false | false | 1,924 | py | string = input("Введите числа через пробелов:")
my_list = list(map(int, string.split())) # cписок чисел
element =int(input("Введите любое число:"))
def sort(array): # функциия сортировки массива по возрастанию
for i in range(1, len(array)):
x = array[i]
idx = i
while idx > 0 and array[idx-1] > x:
array[idx] = array[idx-1]
idx -= 1
array[idx] = x
def binary_search(array, element, left, right): # функция поиска индекса введоного числа
if left > right: # если левая граница превысила правую,
return False # значит элемент отсутствует
middle = (right + left) // 2 # находимо середину
if array[middle] == element: # если элемент в середине,
return middle # возвращаем этот индекс
elif element < array[middle]: # если элемент меньше элемента в середине
# рекурсивно ищем в левой половине
return binary_search(array, element, left, middle - 1)
else: # иначе в правой
return binary_search(array, element, middle + 1, right)
sort(my_list)
ind = binary_search(my_list, element, 0, len(my_list))
if ind == 0:
print("Нет элемента меньше введеного")
else:
print("Позиция элемента, который меньше введенного Вами числа: ",ind - 1)
if ind == len(my_list)-1:
print("Нет элемента больше введеного")
else:
print("Позиция элемента, который больше или равен введенному Вами числу: ",ind + 1) | [
"noreply@github.com"
] | noreply@github.com |
01d3691dce55255c364bb881f05bb97a3c770ca9 | 5982cd8db693927e83cd99f8ea1acf4fc90b8b9b | /Configurations/ControlRegions/WgS/torqueBatch/configuration1.py | 7d9ce4b429ce76b6bac43226395ae897e4ab9636 | [] | no_license | cedricpri/PlotsConfigurations | 61fc78ce9f081fd910a25f8101ea8150a7312f25 | 5cb0a87a17f89ea89003508a87487f91736e06f4 | refs/heads/master | 2021-01-17T09:46:55.026779 | 2016-09-01T09:30:09 | 2016-09-01T09:30:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | # example of configuration file
tag = 'WgS'
# used by mkShape to define output directory for root files
outputDir = 'rootFile1'
# file with list of variables
variablesFile = '../variables.py'
# file with list of cuts
cutsFile = '../cuts.py'
# file with list of samples
samplesFile = 'samples1.py'
# file with list of samples
plotFile = 'plot.py'
# luminosity to normalize to (in 1/fb)
# lumi = 2.264
#lumi = 2.318
lumi = 2.6
# used by mkPlot to define output directory for plots
# different from "outputDir" to do things more tidy
outputDirPlots = 'plotWgS'
# used by mkDatacards to define output directory for datacards
outputDirDatacard = 'datacards'
# structure file for datacard
structureFile = 'structure.py'
# nuisances file for mkDatacards and for mkShape
nuisancesFile = 'nuisances.py'
| [
"d4space@gmail.com"
] | d4space@gmail.com |
f872ee5f774e2e558b2c18dfd74ac182fe9c8e89 | fc6d68e54506f285a8b537a3e3a10f6645301ac2 | /fixture/mail.py | 5abf23d2bd0de65285df193f979682606c06c517 | [] | no_license | alpikin63/pyton_lessons | 94df17a7456ae3efdf8d63dfa843d165f3aba59c | c6fce6f9d2884ef79ee80d43d10acb6709951ea5 | refs/heads/master | 2020-04-09T20:07:47.997333 | 2019-02-02T09:52:52 | 2019-02-02T09:52:52 | 160,564,777 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,221 | py | from __future__ import print_function
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from apiclient import errors
# If modifying these scopes, delete the file token.json.
class Gmail:
SCOPES = 'https://www.googleapis.com/auth/gmail.modify'
SCOPES_FILTER = 'https://www.googleapis.com/auth/gmail.settings.basic'
def __init__(self):
"""Shows basic usage of the Gmail API.
Lists the user's Gmail labels.
"""
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# # time.
store = file.Storage('../token.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('../credentials.json', self.SCOPES)
creds = tools.run_flow(flow, store)
self.service = build('gmail', 'v1', http=creds.authorize(Http()))
def auth_settings(self):
store = file.Storage('../token_auth.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('../credentials_auth.json', self.SCOPES_FILTER)
creds = tools.run_flow(flow, store)
self.service = build('gmail', 'v1', http=creds.authorize(Http()))
print(self.service.users().settings().filters().list(userId='me').execute())
def get_labels(self):
# Call the Gmail API
results = self.service.users().labels().list(userId='me').execute()
labels = results.get('labels', [])
if not labels:
print('No labels found.')
else:
print(labels)
return labels
def createLabel(self, label_name=''):
label_object = self.MakeLabel(label_name=label_name)
label = self.service.users().labels().create(userId='me', body=label_object).execute()
return label
def MakeLabel(self, label_name, mlv='show', llv='labelShow'):
"""Create Label object."""
label = {'messageListVisibility': mlv,
'name': label_name,
'labelListVisibility': llv}
return label
def deletelabel(self, lable_name):
label_list = self.get_labels()
lable_id = ''
for label in label_list:
if label['name'] == lable_name:
lable_id = label['id']
self.service.users().labels().delete(userId='me', id=lable_id).execute()
def makeFilter(self, sender, label):
filter = {
"id": 'test',
"criteria": {
"from": sender,
},
"action": {
"addLabelIds": [
label
]
}
}
return filter
def createFilter(self, sender, labdel):
filterobj = self.makeFilter(sender, labdel)
filter = self.service.users().settings().filters().create(userId='me', body=filterobj).execute()
return filter
a = Gmail()
#a.get_labels()
a.auth_settings()
#a.createFilter("pikin@aeroidea.ru", 'Label_2')
#a.createLabel(label_name='test1234')
#a.deletelabel('test1234')
| [
"alpikin63@gmail.com"
] | alpikin63@gmail.com |
fad12f4564f15cf7dbae7692278ba828cd5fd714 | efb6c4b02f29d164bc66f07eeda3c8026c797899 | /ec2stuff/email_when_fin.py | 651ef541bf5251f54dc6552296acb4887408161e | [] | no_license | swederik/AmazonEC2scripts | 6e2c7742300e077c68b8e205ea732696ee8a3fd6 | b74504388d6b937c9dbf27c345609b38ae04eab2 | refs/heads/master | 2016-09-05T17:38:34.579620 | 2013-07-14T23:26:14 | 2013-07-14T23:26:14 | 11,410,111 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | import smtplib
#itisfinishedprocessing@gmail.com.
def send_email(TO=["erik.ziegler@ulg.ac.be"], SUBJECT="DONE", TEXT="Finished"):
SERVER = "smtp.gmail.com:587"
username = "itisfinishedprocessing"
password = "throwawaypassword"
FROM = "itisfinishedprocessing@gmail.com"
#TEXT = "This message was sent with Python's smtplib."
# Prepare actual message
message = """\
From: %s
To: %s
Subject: %s
%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
# Send the mail
server = smtplib.SMTP(SERVER)
server.starttls()
server.login(username,password)
server.sendmail(FROM, TO, message)
server.quit() | [
"erik.ziegler@ulg.ac.be"
] | erik.ziegler@ulg.ac.be |
e70c2c624145508c69b85b1532869a38a84ba988 | 5ea4d0bcb8d493f37cc1d04bdec6a99b9afd38ed | /django_config/settings.py | 5dfa2afce3733969d20e13e1f52f2a29fa271052 | [] | no_license | youssriaboelseod/ERMS_Django_Web_Multi_DB | e5042ae20d42855143a9f3b3ecd28913cdae99f8 | 8f2354c095412859a15b3f5072c193fa74e3c6e3 | refs/heads/master | 2022-04-19T22:33:31.243690 | 2020-04-16T09:23:38 | 2020-04-16T09:23:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,421 | py | """
Django settings for django_config project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!_fmhyco5=5!iv4lfq2s2c&9f$ub6r*kq0gwdb(^(i(^s#4m64'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app_django',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASE_ROUTERS=['app_django.dbrouters.DBRouter']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
'admindata': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'admindb'),
},
'empdata':{
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'empdb'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"lalithaprasad@Lalithas-MBP.domain.name"
] | lalithaprasad@Lalithas-MBP.domain.name |
fcdcbeba752542d4e128ddebf54c68d5df123be8 | 385c01f7337cf5031093147f6731251bfbf17430 | /lms/level/containers/get_by_id.py | d7e05c88e2f38349997208a7d052eb38bf54862a | [] | no_license | lucassimon/lmswebaula | 23a73d6d2d43c78a2f9e3b552113cf50a11a3587 | 671276426685968458f240faa93b313427fa32d9 | refs/heads/master | 2021-01-19T13:26:12.352308 | 2017-08-16T21:07:43 | 2017-08-16T21:07:43 | 88,088,474 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import six
class GetByIdRQ(object):
_level_id = None
_lms_level_id = None
def __init__(self, lms_level_id=None, level_id=None):
if lms_level_id:
if not isinstance(lms_level_id, six.integer_types):
raise ValueError(
'O lms id do nivel precisa ser um inteiro'
)
self._lms_level_id = lms_level_id
if level_id:
self._level_id = level_id
@property
def lms_level_id(self):
return self._lms_level_id
@lms_level_id.setter
def lms_level_id(self, value):
if not isinstance(value, six.integer_types):
raise ValueError(
'O lms id do nível precisa ser um inteiro'
)
self._lms_level_id = value
@property
def level_id(self):
return self._level_id
@level_id.setter
def level_id(self, value):
self._level_id = value
| [
"lucassrod@gmail.com"
] | lucassrod@gmail.com |
ae734d529bcbe273e29551f3ccd8c250513c04ad | f0aba1aa9949cc6a8d3678c0b3ecb5503b470c17 | /dtc/__init__.py | d9f20e7ca90c628c2cccd70a6d6bceb53e2bd4f4 | [] | no_license | hugosenari/dtc | 788eafc1a92701332ae54e2f2d74491566d635dd | 9bb2e6f4f9180b7291a5daf6a35903e5c59e3fc4 | refs/heads/master | 2020-12-24T17:44:45.474422 | 2012-08-03T20:02:28 | 2012-08-03T20:02:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,477 | py | '''
Created on Jun 30, 2012
@author: hugosenari
'''
import plugnplay
import logging
from os import path
from dtc.core import interfaces
from dtc.core.interfaces.module import _CoreModule
from dtc.core.interfaces.mainloop import MainLoop
from dtc import modules
class Dtc(object):
def __init__(self, dirs = []):
self.loaded_dirs = []
def set_plugin_dirs(arg, dirpath, files):
logging.debug('add dir: %s to path', dirpath)
self.loaded_dirs.append(dirpath)
path.walk(interfaces.__path__[0], set_plugin_dirs, None)
path.walk(modules.__path__[0], set_plugin_dirs, None)
for directory in dirs:
path.walk(directory, set_plugin_dirs, None)
plugnplay.set_plugin_dirs(*self.loaded_dirs)
logging.debug('Set up plugnplay')
def run(self, logger=logging, *args, **vargs):
logging.debug('load modules')
plugnplay.load_plugins(logger)
#get mainloop implementation
mainloop = vargs.get('mainloop', None)
if not mainloop:
loops = MainLoop.implementors()
if len(loops) > 0:
mainloop = loops[0]
vargs['mainloop'] = mainloop
for module in _CoreModule.implementors():
logging.debug('execute core module: %s', module)
module.execute_modules(*args, **vargs)
if mainloop:
mainloop.run() | [
"hugosenari@gmail.com"
] | hugosenari@gmail.com |
05f1381ac472766b4cd06fbd8153c99cc502c0e1 | 84d891b6cb6e1e0d8c5f3e285933bf390e808946 | /Demo/PO_V6/TestCases/test_login_pytest.py | 174dcfab88ddcee38ccb2eb6b5ea51c6f4e0d99d | [] | no_license | zzlzy1989/web_auto_test | 4df71a274eb781e609de1067664264402c49737e | 3e20a55836144e806496e99870f5e8e13a85bb93 | refs/heads/master | 2020-05-24T10:37:29.709375 | 2019-10-28T06:14:31 | 2019-10-28T06:14:31 | 187,230,775 | 2 | 0 | null | 2019-06-20T11:06:32 | 2019-05-17T14:29:11 | null | UTF-8 | Python | false | false | 2,400 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Name: test_login_pytest
# Author: 简
# Time: 2019/6/20
from Demo.PO_V6.PageObjects.login_page import LoginPage
from Demo.PO_V6.PageObjects.index_page import IndexPage
from Demo.PO_V6.TestDatas import login_datas as ld
from Demo.PO_V6.TestDatas import Comm_Datas as cd
import pytest
# pytestmark = pytest.mark.model # 模块级别的标签名
@pytest.mark.demo
@pytest.mark.usefixtures("session_action")
def test_demo():
print("111111111111111")
@pytest.mark.parametrize("a,b,c",[(1,3,4),(10,35,45),(22.22,22.22,44.44)])
def test_add(a,b,c):
res = a + b
assert res == c
# 用例三步曲:前置 、步骤 、 断言
# @ddt.ddt
# @pytest.mark.login # 整个TestLogin类里面,所有测试用例都有login标签。
@pytest.mark.usefixtures("open_url") # 使用函数名称为open_url的fixture
@pytest.mark.usefixtures("refresh_page")
class TestLogin:
pytestmark=pytest.mark.login # 整个TestLogin类里面,所有测试用例都有
# 异常用例 -....
@pytest.mark.parametrize("data", ld.wrong_datas)
def test_login_0_failed_by_wrong_datas(self, data):
# 步骤 - 登陆操作 - 登陆页面 - 密码为空 18684720553
LoginPage(self.driver).login(data["user"], data["passwd"])
# 断言 - 页面的提示内容为:请输入密码
self.assertEqual(data["check"], LoginPage(self.driver).get_error_msg_from_loginForm())
# 正常用例 - 登陆+首页
@pytest.mark.smoke
def test_login_2_success(self,open_url): # open_url = driver
# logging.info("用例1-正常场景-登陆成功-使用到测试数据-")
# 步骤 - 登陆操作 - 登陆页面 - 18684720553、python
LoginPage(open_url).login(ld.success_data["user"],ld.success_data["passwd"]) # 测试对象+测试数据
# 断言 - 页面是否存在 我的帐户 元素 元素定位+元素操作
assert IndexPage(open_url).check_nick_name_exists() == True # 测试对象+测试数据
# url跳转
assert open_url.current_url == ld.success_data["check"] # 测试对象+测试数据 # # 正常用例 - 登陆+首页
class TestTT:
pytestmark = pytest.mark.demo
# pytestmark = [pytest.mark.demo,pytest.mark.demo2]
def test_add(self):
c = 100 +200
assert c == 300
def test_demo(self):
print("demo!!!") | [
"394845369@qq.com"
] | 394845369@qq.com |
bcf22f01407a86941d648b53d961048f0bed20bc | 397f7b9f3e2381a1e6726cc2704c137ce86289a6 | /scripts/original_submission_scripts/z_old_sgv.Fig2C.py | 1e341832317f9af2e161d76c4eb7188c9512b93c | [] | no_license | Ken-A-Thompson/SVS | f7f0c9e39d86e9c1d33b8e21d68a5701ea9a5283 | 93562c2fce3769df4390f65034408b6b2dd3e969 | refs/heads/master | 2021-01-22T19:54:24.785353 | 2019-07-25T21:21:14 | 2019-07-25T21:21:14 | 85,253,057 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,984 | py | #Authors: Matthew Osmond <mmosmond@zoology.ubc.ca> & Ken A. Thompson <ken.thompson@zoology.ubc.ca>
#Description: The role of standing genetic variance in speciation
#Function: This script has populations adapt in parallel while varying d and n. Goal is to determine the value of n that minimizes variance load for a given d
import numpy as np
import time
import csv
import random
# import matplotlib.pyplot as plt
######################################################################
##FUNCTIONS##
######################################################################
def open_output_files(n, N, alpha, u, sigma, data_dir):
"""
This function opens the output files and returns file
handles to each.
"""
sim_id = 'n%d_N%d_alpha%.4f_u%.4f_sigma%.4f' %(n, N, alpha, u, sigma)
outfile_A = open("%s/Fig2B_%s.csv" %(data_dir, sim_id), "w")
return outfile_A
def write_data_to_output(fileHandles, data):
"""
This function writes a (time, data) pair to the
corresponding output file. We write densities
not abundances.
"""
writer = csv.writer(fileHandles)
writer.writerow(data)
def close_output_files(fileHandles):
"""
This function closes all output files.
"""
fileHandles.close()
def found(n_muts, ancestor_muts, ancestor_freqs, N_adapt, n):
"""
This function creates a founding population from an ancestral one
"""
#make ancestor
if n_muts > 0:
seg_id = [ancestor_freqs < 1] #indices for segregating mutations in ancestor
nmuts_max = np.sum(seg_id) #number of segregating mutations in ancestor
probs = ancestor_freqs[seg_id]/ancestor_freqs[seg_id].sum() #probability of choosing each mutation in sgv (make pdf)
mut_choice = np.random.choice(nmuts_max, size=n_muts, replace=False, p=probs) #indices of mutations to take from ancestor
mutfound = (ancestor_muts[seg_id])[mut_choice] #mutational effects
p_mut = (ancestor_freqs[seg_id])[mut_choice] #expected frequency of these mutations
popfound = np.random.binomial(1, p_mut, (N_adapt, n_muts)) #p_mut chance of having each of n_muts mutations, for all K_adapt individuals
fix_id = [ancestor_freqs == 1] #indices for fixed mutations in ancestor
mutfound = np.append(mutfound, ancestor_muts[fix_id], axis=0) #add fixed mutations to founding mutation matrix
addpop = np.array([1]*N_adapt*np.sum(fix_id)).reshape(N_adapt,np.sum(fix_id)) #matrix of 1s for fixed mutations
popfound = np.append(addpop, popfound, axis=1) #add fixed mutations to founding pop matrix
else: #de novo only, even if p_mut>0
popfound = np.array([[1]] * N_adapt)
mutfound = np.array([[0] * n])
return [popfound, mutfound]
def fitness(phenos, theta, sigma):
"""
This function determines relative fitness
"""
dist = np.linalg.norm(phenos - theta, axis=1) #phenotypic distance from optimum
w = np.exp(-0.5 * sigma * dist**2) #fitness
return w
def recomb(surv):
"""
This function creates offspring through pairing of parents (haploid) and recombination (i.e, meiosis)
"""
pairs = np.resize(np.random.choice(len(surv), size=len(surv), replace=False), (int(len(surv)/2), 2)) #random mate pairs (each mates at most once and not with self)
rand2 = np.random.randint(2, size=(len(pairs), len(surv[0]))) #from which parent each offspring inherits each allele (free recombination, fair transmission)
rec = np.resize(np.append(rand2, 1-rand2, axis=1),(len(rand2), 2, len(rand2[0]))) #reshape
off_1 = np.sum(surv[pairs] * rec, axis=1) #one product of meiosis
off_2 = np.sum(surv[pairs] * (1-rec), axis=1) #other product of meiosis
off = np.append(off_1, off_2, axis=0) #each product of meiosis
return off
def mutate(off, u, alpha, n, mut):
"""
This function creates mutations and updates population
"""
rand3 = np.random.uniform(size = len(off)) #random uniform number in [0,1] for each offspring
nmuts = sum(rand3 < u) # mutate if random number is below mutation rate; returns number of new mutations
whomuts = np.where(rand3 < u) #indices of mutants
newmuts = np.random.normal(0, alpha, (nmuts, n)) #phenotypic effect of new mutations
pop = np.append(off, np.transpose(np.identity(len(off), dtype=int)[whomuts[0]]), axis=1) #add new loci and identify mutants
mut = np.append(mut, newmuts, axis=0) #append effect of new mutations to mutation list
return [pop, mut]
def remove_muts(remove, remove_lost, pop, mut, mutfound):
"""
This function creates mutations and updates population
"""
if remove_lost:
if remove == 'any':
keep = pop.any(axis=0)
mut = mut[keep]
pop = pop[:, keep]
elif remove == 'derived':
segregating = pop.any(axis=0)
ancestral = np.array(range(len(mut))) < len(mutfound)
keep = np.add(segregating, ancestral)
mut = mut[keep]
pop = pop[:, keep]
return [pop, mut]
######################################################################
##UNIVERSAL PARAMETERS##
######################################################################
nreps = 1 #number of replicates for each set of parameters
ns = [2] #phenotypic dimensions (positive integer >=1)
data_dir = 'data'
######################################################################
##PARAMETERS OF ANCESTOR##
######################################################################
n_reps = 1 #number of reps of ancestor that exist
N = 10**3 #number of individuals (positive integer >=1)
alpha = 10**(-2) #mutational sd (positive real number)
u = 10**(-3) #mutation probability per generation per genome (0<u<1)
sigma = 10**(0) #selection strength
burn_dir = 'data/'
rrep = np.random.choice(n_reps, nreps, replace = False) #randomly assign each rep an ancestor, without or with replacement (i.e., unique ancestor for each sim or not)
######################################################################
##PARAMETERS FOR ADAPTING POPULATIONS##
######################################################################
# n_mut_list = list(np.arange(0, 51, 3)) #starting nmuts, final n_muts, interval
n_mut_list = [[10]] #list for each n value
N_adapts = [10**3] #number of haploid individuals (positive integer)
alpha_adapt = alpha #mutational sd (positive real number)
u_adapt = u #mutation probability per generation per genome (0<u<1)
sigma_adapts = [10**0] #selection strengths
# opt_dists = list(np.arange(0.2, 1.01, 0.025)) #distances to optima
opt_dists = [0.1] #distances to optima
# selection = 'divergent' #divergent selection (angle = 180 deg)
selection = 'parallel' #parallel selection (angle = 0)
# selection = 'both' #both divergent and parallel selection
# maxgen = 2000 #total number of generations populations adapt for
maxgen = 10**3 #total number of generations populations adapt for
remove_lost = True #If true, remove mutations that are lost (0 for all individuals)
remove = 'derived' #.. any derived (not from ancestor) mutation that is lost
######################################################################
##PARAMETERS FOR HYBRIDS##
######################################################################
nHybrids = 100 #number of hybrids to make at end of each replicate
######################################################################
##FUNCTION FOR POPULATIONS TO ADAPT##
######################################################################
def main():
#loop over population size
i_N = 0
while i_N < len(N_adapts):
N_adapt = N_adapts[i_N]
#loop over selection strength
i_sigma = 0
while i_sigma < len(sigma_adapts):
sigma_adapt = sigma_adapts[i_sigma]
#loop over dimensions
l = 0
while l < len(ns):
n = ns[l]
# open output files
fileHandles = open_output_files(n, N_adapt, alpha_adapt, u_adapt, sigma_adapt, data_dir)
if selection == 'both':
k = 0
kmax = 1
elif selection == 'parallel':
k = 0
kmax = 0
elif selection == 'divergent':
k = 1
kmax = 1
#loop of selection styles
while k < kmax + 1:
#loop over optima
j = 0
while j < len(opt_dists):
#set optima
theta1 = np.append(opt_dists[j],[0]*(n-1)) #set one optima
if k == 0: #parallel
theta2 = theta1
elif k == 1: #divergent
theta2 = np.append(-opt_dists[j],[0]*(n-1))
# #set up plot of hybrid load versus number of ancestral mutations (n_muts)
# plt.axis([0, max(n_mut_list)+1, 0, 0.1])
# plt.ylabel('hybrid load at generation %d (mean $\pm$ SD of %d replicates)' %(maxgen,nreps))
# plt.xlabel('number of ancestral mutations')
# plt.ion()
#loop over all n_muts values
i = 0
while i < len(n_mut_list[l]):
n_muts = n_mut_list[l][i] #set number of mutations in ancestor (ie how much SGV)
# hyloads = [0] * nreps #initialize vector to store hybrid loads in from each replicate
#loop over all replicates
rep = 0
while rep < nreps:
#load ancestor
burn_id = 'n%d_N%d_alpha%.4f_u%.4f_sigma%.4f_rep%d' %(n, N, alpha, u, sigma, rrep[rep]+1)
filename = "%s/Muts_%s.npy" %(burn_dir, burn_id)
ancestor_muts = np.load(filename) #load mutations
filename = "%s/Freqs_%s.npy" %(burn_dir, burn_id)
ancestor_freqs = np.load(filename) #load frequencies
#found adapting populations
# [popfound1, mutfound1] = found(n_muts, nmuts_max, ancestor_muts, ancestor_freqs, K, n)
# [popfound2, mutfound2] = found(n_muts, nmuts_max, ancestor_muts, ancestor_freqs, K, n)
#initialize adapting populations
# [pop1, mut1] = [popfound1, mutfound1]
# [pop2, mut2] = [popfound2, mutfound2]
#found identical populations
[popfound, mutfound] = found(n_muts, ancestor_muts, ancestor_freqs, N_adapt, n)
[pop1, mut1] = [popfound, mutfound]
[pop2, mut2] = [popfound, mutfound]
#intitialize generation counter
gen = 0
#run until maxgen
while gen < maxgen + 1:
# genotype to phenotype
phenos1 = np.dot(pop1, mut1) #sum mutations held by each individual
phenos2 = np.dot(pop2, mut2) #sum mutations held by each individual
# phenotype to fitness
w1 = fitness(phenos1, theta1, sigma_adapt)
w2 = fitness(phenos2, theta2, sigma_adapt)
# wright-fisher (multinomial) sampling
parents1 = np.random.multinomial(N_adapt, w1/sum(w1)) #number of times each parent chosen
off1 = np.repeat(pop1, parents1, axis=0) #offspring genotypes
parents2 = np.random.multinomial(N_adapt, w2/sum(w2)) #number of times each parent chosen
off2 = np.repeat(pop2, parents2, axis=0) #offspring genotypes
# mating and recombination
off1 = recomb(off1)
off2 = recomb(off2)
# mutation and population update
[pop1, mut1] = mutate(off1, u_adapt, alpha_adapt, n, mut1)
[pop2, mut2] = mutate(off2, u_adapt, alpha_adapt, n, mut2)
# remove lost mutations (all zero columns in pop)
[pop1, mut1] = remove_muts(remove, remove_lost, pop1, mut1, mutfound)
[pop2, mut2] = remove_muts(remove, remove_lost, pop2, mut2, mutfound)
# go to next generation
gen += 1
#make variables to hold offspring phenotypes
offphenos = dict()
offpheno = []
#make each of nHybrids hybrids
for m in range(nHybrids):
# choose random parents
randpar1 = pop1[np.random.choice(len(pop1))]
randpar2 = pop2[np.random.choice(len(pop2))]
# get random parent phenotypes
phenpar1 = np.dot(randpar1, mut1)
phenpar2 = np.dot(randpar2, mut2)
# get mutations held by random parents
mutpar1 = mut1 * randpar1[:, None]
mutpar2 = mut2 * randpar2[:, None]
setA = set(tuple(x) for x in mutpar1)
setB = set(tuple(x) for x in mutpar2)
# find mutations shared by two parents (all in offspring)
sharedmuts = np.array([x for x in setA & setB])
if len(sharedmuts) < 1:
sharedmuts = np.array([[0] * n]) #give something in case empty
# find mutations not shared by two parents
unsharedmuts = np.array([x for x in setA ^ setB])
# which unshared mutations in offspring (free recombination between all loci, therefore gets each with 0.5 probability)
randmuts = np.random.randint(2, size = (len(unsharedmuts)))
unsharedoffmuts = unsharedmuts * randmuts[:, None]
if len(unsharedoffmuts) < 1:
unsharedoffmuts = np.array([[0] * n]) #give something in case empty
# offspring phenotype is collection of shared and random unshared mutations
offpheno.append(sum(np.append(sharedmuts, unsharedoffmuts, axis = 0)))
offpheno = np.array(offpheno) #reformat correctly
dist = np.linalg.norm(offpheno - np.mean(offpheno, axis=0), axis=1) #phenotypic distance from mean hybrid
# hyload = np.log(1*B) - np.mean(np.log(survival(dist)*B)) #hybrid load as defined by Chevin et al 2014
segvar = np.mean(np.var(offpheno, axis = 0))
#calculate genetic parallelism across ancestrally-segregating loci that have been segregating in adapting populations since divergence
p = sum(pop1[:, len(mutfound)-n_muts:len(mutfound)]) / N_adapt #frequency of derived alleles in pop1
q = sum(pop2[:, len(mutfound)-n_muts:len(mutfound)]) / N_adapt #frequency of derived alleles in pop2
EH = np.mean(p*(1-q)+(1-p)*q) #expected heterozygosity in hybrids
#calculate genetic parallelism across ancestrally-shared segregating that have been segregating in adapting populations since divergence plus those loci that have mutations unique to one adapting population
p = sum(pop1[:, len(mutfound)-n_muts:len(mutfound)]) / N_adapt #frequency of derived alleles in pop1
q = sum(pop2[:, len(mutfound)-n_muts:len(mutfound)]) / N_adapt #frequency of derived alleles in pop2
EH_1 = p*(1-q)+(1-p)*q #expected heterozygosities at those loci
p = sum(pop1[:, len(mutfound):]) / N_adapt #frequency of unique derived alleles in pop1 = expected heterozygosity at loci with mutations unique to pop1
q = sum(pop2[:, len(mutfound):]) / N_adapt #frequency of unique derived alleles in pop2 = expected heterozygosity at loci with mutations unique to pop2
EH_2 = np.append(p,q) #list of expected heterozygosities at unique loci
EH_all = np.mean(np.append(EH_1,EH_2)) #expected heterozygosity across all loci considered
#print an update
print('N=%d, sigma=%.2f, n=%d, opt1=%r, opt2=%r, rep=%d, n_muts=%d, distance=%.3f, selection=%r, segregation variance=%.3f, expected heterozygosity (shared)=%.4f, expected heterozygosity (all)=%.4f' %(N_adapt, sigma_adapt, n, theta1, theta2, rep+1, n_muts, opt_dists[j], ['parallel','divergent'][k], segvar, EH, EH_all))
#save data
write_data_to_output(fileHandles, [theta1, theta2, rep+1, n_muts, opt_dists[j], ['parallel','divergent'][k], segvar, EH, EH_all])
# hyloads[rep] = hyload #save hybrid load for this replicate
# go to next rep
rep += 1
# #plot mean and SD hybrid load over all replicates for this n_muts value
# plt.errorbar(n_mut_list[i], np.mean(hyloads), yerr=np.var(hyloads)**0.5, fmt='o', color='k')
# plt.pause(0.01)
#go to next n_muts value
i += 1
# plt.pause(1) #pause on finished plot for a second
# plt.savefig('Figs/HLvsNMUT.png') #save finished plot
#go to next optima
j += 1
#go to next type of selection
k += 1
# cleanup
close_output_files(fileHandles)
#go to next dimension
l += 1
#go to next sigma value
i_sigma += 1
#go to next N value
i_N += 1
######################################################################
##RUNNING ADAPTATION FUNCTION##
######################################################################
start = time.time()
main()
end = time.time()
print('this took %.2f seconds to complete' %(end-start))
| [
"ken.thompson@zoology.ubc.ca"
] | ken.thompson@zoology.ubc.ca |
67ddb0e2a54f5eb15dcef4077a242b6570051ce6 | c1887805b9cfc7fbe495579151e099727ec5e7e1 | /lib/python2.6/site-packages/tw.jquery-0.9.9-py2.6.egg/tw/jquery/samples.py | e19ed82a9c4b333531d8d87063e82fb8d24a57df | [] | no_license | desarrollo1/tg2env | 2b684ed15c4a946ade343c12c6ab045e89d10539 | 6f8fa6517d7201c38a0769d5e253c70899be3413 | refs/heads/master | 2021-03-12T19:20:32.848029 | 2011-01-31T04:48:26 | 2011-01-31T04:48:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,469 | py | # Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from tw.jquery.widgets import FlotWidget
class DemoFlotWidget(FlotWidget):
demo_for = FlotWidget
@staticmethod
def data():
"""
This is a static method that calculates data to feed the demo
widget. Since data is a parameter (listed at :attr:`params`) and
can be called without parameters it wiill be called by
:meth:`update_params` to provide fresh values on every request.
"""
import math
from datetime import datetime
d1 = []
i = 0
for x in range(26):
d1.append((i, math.sin(i)))
i += 0.5
d2 = [[0, 3], [4, 8], [8, 5], [9, 13]]
d3 = []
i = 0
for x in range(26):
d3.append((i, math.cos(i)))
i += 0.5
d4 = []
i = 0
for x in range(26):
d4.append((i, math.sqrt(i * 10)))
i += 0.5
d5 = []
i = 0
for x in range(26):
d5.append((i, math.sqrt(i)))
i += 0.5
return [
{ 'data' : d1, 'lines' : { 'show' : 'true', 'fill' : 'true' } },
{ 'data' : d2, 'bars' : { 'show' : 'true' } },
{ 'data' : d3, 'points' : { 'show' : 'true' } },
{ 'data' : d4, 'lines' : { 'show' : 'true' } },
{ 'data' : d5, 'lines' : { 'show' : 'true' }, 'points' : { 'show' : 'true' } }
]
| [
"jaglame@gmail.com"
] | jaglame@gmail.com |
6da75afc662601dd4bc0b2aaf0413dede6a4ac94 | 6df18031547b1fde808944b4c8f83d2766c95251 | /UoM_databases_with_python/assignment_2/tracks.py | 094f7bf6650220142fd1d777f5317ba3710277e3 | [] | no_license | skreynolds/UoM_data_science | 6edce9b3d3bf03b6dab6471346e40965464d6adb | 9636c0a784079445f585b830a1d093acea608d6a | refs/heads/master | 2020-05-20T23:06:36.560299 | 2019-06-01T02:30:09 | 2019-06-01T02:30:09 | 185,794,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,719 | py | import xml.etree.ElementTree as ET
import sqlite3
conn = sqlite3.connect('../databases/sqldb_5.sqlite')
cur = conn.cursor()
# Make some fresh tables using executescript()
cur.executescript('''
DROP TABLE IF EXISTS Artist;
DROP TABLE IF EXISTS Genre;
DROP TABLE IF EXISTS Album;
DROP TABLE IF EXISTS Track;
CREATE TABLE Artist (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE Genre (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE Album (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
artist_id INTEGER,
title TEXT UNIQUE
);
CREATE TABLE Track (
id INTEGER NOT NULL PRIMARY KEY
AUTOINCREMENT UNIQUE,
title TEXT UNIQUE,
album_id INTEGER,
genre_id INTEGER,
len INTEGER, rating INTEGER, count INTEGER
);
''')
fname = input('Enter file name: ')
if ( len(fname) < 1 ) : fname = '../raw_data/Library.xml'
# <key>Track ID</key><integer>369</integer>
# <key>Name</key><string>Another One Bites The Dust</string>
# <key>Artist</key><string>Queen</string>
def lookup(d, key):
found = False
for child in d:
if found : return child.text
if child.tag == 'key' and child.text == key :
found = True
return None
stuff = ET.parse(fname)
all = stuff.findall('dict/dict/dict')
print('Dict count:', len(all))
for entry in all:
if ( lookup(entry, 'Track ID') is None ) : continue
name = lookup(entry, 'Name')
artist = lookup(entry, 'Artist')
album = lookup(entry, 'Album')
genre = lookup(entry, 'Genre')
count = lookup(entry, 'Play Count')
rating = lookup(entry, 'Rating')
length = lookup(entry, 'Total Time')
if name is None or artist is None or album is None or genre is None :
continue
print(name, artist, album, genre, count, rating, length)
cur.execute('''INSERT OR IGNORE INTO Artist (name)
VALUES ( ? )''', ( artist, ) )
cur.execute('SELECT id FROM Artist WHERE name = ? ', (artist, ))
artist_id = cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO Genre (name)
VALUES ( ? )''', (genre, ) )
cur.execute('SELECT id FROM Genre WHERE name = ?', (genre, ))
genre_id = cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO Album (title, artist_id)
VALUES ( ?, ? )''', ( album, artist_id ) )
cur.execute('SELECT id FROM Album WHERE title = ? ', (album, ))
album_id = cur.fetchone()[0]
cur.execute('''INSERT OR REPLACE INTO Track
(title, album_id, genre_id, len, rating, count)
VALUES ( ?, ?, ?, ?, ?, ? )''',
( name, album_id, genre_id, length, rating, count ) )
conn.commit()
| [
"shane.k.reynolds@gmail.com"
] | shane.k.reynolds@gmail.com |
8d3122e20690347d74a45893dbb6e1f218b56713 | 5474939d012128f31f1df0516cfa3af856bc5691 | /ProjMgmt/wsgi.py | 6c20ff12d0f4fcb7abe3ed01edd8dbf233dee72a | [] | no_license | Ckirk617/RemProjMgmt | aa765ed225372b2e830c637e32770f416596be68 | 7aa87665c58c79669dc58384677e103d38f0a150 | refs/heads/master | 2020-04-06T16:17:17.933760 | 2018-11-14T21:35:30 | 2018-11-14T21:35:30 | 157,610,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for ProjMgmt project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ProjMgmt.settings')
application = get_wsgi_application()
| [
"cmariekirk@Christinas-MacBook-Pro.local"
] | cmariekirk@Christinas-MacBook-Pro.local |
a582091097bfd7e00fa4a9f6681f38995dda1943 | 37313e86e23cd6ad46afa84ca5399c7535079dd7 | /TheLab_03_TextInput/main.py | 100caf6ce96b04119c4991b15929bf3887a42744 | [] | no_license | liloFashion/TheLab_firstSteps | 5d84234c514fceb92735062145f657f460acd24e | c41632f928f2211902aeb53bbd2926985c9c48dc | refs/heads/master | 2023-08-26T12:10:01.128075 | 2021-11-11T11:05:24 | 2021-11-11T11:05:24 | 426,171,357 | 0 | 0 | null | 2021-11-11T11:05:25 | 2021-11-09T09:44:06 | Python | UTF-8 | Python | false | false | 1,529 | py | # from io import StringIO
import os
import sys
from pathlib import Path
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
# from kivy.uix.togglebutton import ToggleButton
from kivy.properties import StringProperty, BooleanProperty
root_path: Path = os.path.split((os.path.dirname(__file__)))[0]
sys.path.append(root_path)
# for import of Fonts dir
font_path: Path = os.path.join(root_path, f'Main/Fonts{os.sep}')
image_path: Path = os.path.join(root_path, f'Main/images{os.sep}')
class WidgetsExamples(GridLayout):
count = 0
count_enabled = BooleanProperty(False)
my_text = StringProperty("0")
text_input_str = StringProperty("foo")
# slider_value_txt = StringProperty("0")
font_lcd: Path = os.path.join(font_path, "Lcd.ttf")
image_bg1: Path = os.path.join(image_path, "bg1.jpg")
print(image_bg1)
def on_button_click(self):
# print("Button clicked")
if self.count_enabled:
self.count += 1
self.my_text = str(self.count)
def on_toggle_button_state(self, widget):
# print("toggle state" + widget.state)
if widget.state == "normal":
widget.text = "OFF"
self.count_enabled = False
else:
widget.text = "ON"
self.count_enabled = True
def on_switch_active(self, widget):
print("Switch: " + str(widget.active))
def on_text_validate(self, widget):
self.text_input_str = widget.text
class TheLabApp(App):
pass
TheLabApp().run()
| [
"lisaloerinci@gmail.com"
] | lisaloerinci@gmail.com |
d55719fc88b337b0fec3f2afbaeb087d1156c0a9 | 316dbb3baac4a2084dfc5693677fa4baaa8b76b5 | /DSandALGOs/DP/edit_distance.py | d24c79ecc72ef7a49615020cddcaaaf4c325ad11 | [] | no_license | nsjethani/Python | 27748ff5e420823c6a1a73183aead612e3e6db4d | 7d1cf6382d18942bd5ddd91b476fae009060b961 | refs/heads/master | 2020-03-10T02:14:19.836172 | 2019-03-05T21:58:46 | 2019-03-05T21:58:46 | 129,131,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | def minDistance(word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
# from one word to another
if not word1:
return len(word2) + 1
if not word2:
return len(word1) + 1
buffer_array = [[0] * (len(word2)+1) for i in range(len(word1)+1)]
for i in range(len(word1) + 1):
for j in range(len(word2) + 1):
if i is 0 and j is 0:
buffer_array[i][j] = 0
elif i is 0:
buffer_array[i][j] = buffer_array[i][j - 1] + 1
elif j is 0:
buffer_array[i][j] = buffer_array[i - 1][j] + 1
else:
if word1[i - 1] == word2[j - 1]:
buffer_array[i][j] = buffer_array[i - 1][j - 1]
else:
buffer_array[i][j] = min(buffer_array[i - 1][j - 1],
buffer_array[i - 1][j],
buffer_array[i][j - 1]) + 1
return buffer_array[-1][-1]
ans = minDistance("abc","ab")
print(ans) | [
"neha.jethani6627@gmail.com"
] | neha.jethani6627@gmail.com |
3ad2e3be5bbf20a847adee6493f968ebf3c1c699 | f3685b0ec3ed8302d92e8b3d7cb6984bf6b4ec2f | /black_jack_auto.py | a6a5b0f7b58a8168eb2fcd76313fcdbe1f65d45b | [
"Unlicense"
] | permissive | TomareUtsuZo/python_learning | 86a9ff97ebe129e31f78b1dfa39ca1a9ac89d928 | fb73abc4efb8d2352a2280c3043c43af6746491b | refs/heads/main | 2023-07-07T23:36:08.837648 | 2021-08-07T14:05:57 | 2021-08-07T14:05:57 | 389,968,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,010 | py | from random import shuffle
def cls(): print ("\n" * 100)
BLACK_JACK_PAYS = 2.0
class Player:
def __init__(self, name, hand=[], money=100):
self.name = name
self.hand = hand
self.score = 0
self.money = money
self.bet = 0
def __str__(self):
print(f'{self.name} has: ')
#self.Print_hand(self.hand)
return ("and a score of: " + str(self.score))
def Set_Score(self):
self.score = 0 # recaculate score from zero
unbroken_ace = 0 # to be used to count Aces worth 11 points
for card in self.hand:
try:
self.score += int(card[1]) # if card is numbered no problem
except:
if card[1][0] == "A": # if card is Ace
self.score += 11 # add 11
unbroken_ace += 1 # increment number of unbroken Aces by 1
else:
self.score += 10 # else, is J - K, and add 10
if self.score > 21: # if score greater than 21, and there are aces worth 11,
if unbroken_ace > 0: # if there are unbroken aces
self.score -= 10 # decrement score by 10
unbroken_ace -= 1 # decrement unbroken_ace count by 1
else: # if no aces to decrement
return True # hand is busted
return False # else, hand is not busted
def Hit(self, card): # this will recieve a card,
self.hand.append(card) # append it to hand,
self.Set_Score() # and set the score again
def Print_hand(self, hand):
current_hand = []
for card in hand:
current_hand.append(self.Ascii_Card_List(card))
for i in range(4):
for n in range(len(current_hand)):
print(current_hand[n][i], end=' ')
print('')
def Ascii_Card_List(self, card):
if card[1] == '10':
return ['-----',f'|{card[1]} |',f'| {card[0][0]} |', '-----']
else:
return ['-----', f'| {card[1][0]} |', f'| {card[0][0]} |', '-----']
def Play(self, new_hand):
self.hand = new_hand
self.Set_Score()
def Bet_Money(self, amount):
self.money -= amount
self.bet += amount
def Win(self, dealer_score, dealer_hand):
lost = False
if self.score > 21:
lost = True
elif dealer_score > self.score and not dealer_score > 21:
lost = True
elif dealer_score == self.score: # both scores are equal
if dealer_hand == 2 and len(self.hand) == 2: # and in case both hit black jack
self.money = self.bet # push
elif self.score == 21 and len(self.hand) == 2: # or if both didn't hit black jack but the player did
self.money = self.bet * BLACK_JACK_PAYS # pay extra
elif dealer_hand == 2: # or if deal hit black jack, but player didn't
lost = True
else: # they both have the same score, and neither have black jack
self.money = self.bet
self.bet = 0 # reset bet in all cases.
else: # all other cases, player won
self.money += self.bet * 2
self.bet = 0
return lost
def Print_Dealer(self, card):
current_hand = []
current_hand.append(self.Ascii_Card_List(card))
for i in range(4):
for n in range(len(current_hand)):
print(current_hand[n][i], end=' ')
print('')
dealer_score = 0
try:
dealer_score += int(card) # if card is numbered no problem
except:
if card[1][0] == "A": # if card is Ace
dealer_score += 11 # add 11 # increment number of unbroken Aces by 1
else:
dealer_score+= 10 # else, is J - K, and add 10
print(f'Dealer show\'s a score of: {dealer_score}')
def create_deck(shuffle=True):
deck = []
card_valu = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack', 'Queen', 'King', 'Ace']
card_suit = ['Heart', 'Spade', 'Club', 'Diamond']
for suit in card_suit:
for valu in card_valu:
deck.append([suit, valu])
if shuffle == True:
try:
shuffle(deck)
except:
try:
random.shuffle(deck)
except:
from random import shuffle
shuffle(deck)
return deck
game_deck = create_deck()
carl = Player('Carl', [game_deck.pop(0), game_deck.pop(0)], 0)
dealer = Player('Dealer', [game_deck.pop(), game_deck.pop()])
for i in range(100000000):
carl.Set_Score()
#dealer.Print_Dealer(dealer.hand[0])
carl.Bet_Money(1)
#print(carl)
while carl.score < 21:
hit_it = 'y' if carl.score < 17 else 'n'
if hit_it.lower() == 'y':
carl.Hit(game_deck.pop())
else:
break
#dealer.Print_Dealer(dealer.hand[0])
carl.Set_Score()
#print(carl)
dealer.Set_Score()
while dealer.score < 17:
dealer.Hit(game_deck.pop())
dealer.Set_Score()
# if not (carl.Win(dealer.score, dealer.hand)):
# print(f'You win! Your score was {carl.score}, and the dealers score was {dealer.score}!')
# print(f'You have ${carl.money}')
# else:
# print(f'You lost! Your score was {carl.score}, and the dealers score was {dealer.score}.')
# print(f'You have ${carl.money}')
game_deck = create_deck()
carl.Play([game_deck.pop(), game_deck.pop()])
dealer.Play([game_deck.pop(),game_deck.pop()])
print(carl)
| [
"noreply@github.com"
] | noreply@github.com |
d5102b50bc26edc64a913b2c41843f190397541c | 6a710916cf2b49e1210b108e03b114779967a5a8 | /crop-AI/cropAI/dashboard/views.py | a82d4760a6a32cff30464e999dfb41c4527fcafa | [
"MIT"
] | permissive | selvvm/crop-recommendation-system | 58f2b83f7ceba01fa35fe49adf22570734268a72 | bc8f91cb222ebce36aec881f72fdd85e5054e528 | refs/heads/main | 2022-12-30T22:27:00.253575 | 2020-10-16T21:07:00 | 2020-10-16T21:07:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, 'dashboard/landingpage.html') | [
"sanket.mhatre@vit.edu.in"
] | sanket.mhatre@vit.edu.in |
3e108d215330ee3c14ab7f7957e3cbc55dfcb5f9 | aecad2b0e89d72aca6c80bf63c424ee7904257ce | /pending_deletes/named_entity_recognition/NER_co_occurrence.py | deeeaa377fae611e679786374f32a94ecd4dcd2c | [] | no_license | humlab/text_analytic_tools | fdf4ba814263672b05ec188aac9a059b55d085b6 | 32fc444ed11649a948a7bf59653ec792396f06e3 | refs/heads/master | 2022-03-02T06:56:29.223039 | 2019-10-28T13:06:49 | 2019-10-28T13:06:49 | 74,679,680 | 2 | 1 | null | 2019-10-26T21:33:23 | 2016-11-24T14:19:46 | Python | UTF-8 | Python | false | false | 2,705 | py |
import pandas as pd
# %%
writer = pd.ExcelWriter('C:\\TEMP\\papacy.xlsx')
# %%
#pope = 'benedict-xvi'
#pope = 'francesco'
pope = 'john-paul-ii'
#df = pd.read_excel('./Data/' + pope + '.xlsx', 'Data')
df = pd.read_excel('C:\\Users\\roma0050\\Documents\\Projects\\papacy_scraper\\data\\' + pope + '.xlsx', 'Data', dtype={'Concept': 'str'})
# %%
df_locations = df.loc[(df.Classifier=='LOCATION')]
# %%
df_place_occurrences_counts = df_locations.groupby(['Document', 'Year', 'Genre', 'Concept'])[['Count']].sum().reset_index()
df_place_occurrences_counts.columns = ['Document', 'Year', 'Genre', 'Concept', 'PlaceOccurenceCount']
df_place_distinct_counts = df_locations.groupby(['Document', 'Year', 'Genre'])[['Count']].sum().reset_index()
df_place_distinct_counts.columns = ['Document', 'Year', 'Genre', 'PlaceCount']
# %%
df_place_counts = pd.merge(df_place_distinct_counts, df_place_occurrences_counts, left_on="Document", right_on="Document")[['Document', 'Year_x', 'Concept', 'PlaceOccurenceCount', 'PlaceCount']]
df_place_counts.columns = ['Document', 'Year', 'Concept', 'PlaceOccurenceCount', 'PlaceCount']
df_place_counts['Weight'] = df_place_counts['PlaceOccurenceCount'] / df_place_counts['PlaceCount']
# %%
#df_place_counts.loc[(df_place_counts.Document=='benedict-xvi_en_travels_2008_trav-ben-xvi-usa-program-20080415')]
df_place_cooccurrence_document = pd.merge(df_place_counts,
df_place_counts,
left_on=["Document", "Year"],
right_on=["Document", "Year"])[[ 'Document', 'Year', 'Concept_x', 'Concept_y', 'Weight_x', 'Weight_y' ]]
# %%
df_place_cooccurrence_document['Weight'] = df_place_cooccurrence_document['Weight_x'] * df_place_cooccurrence_document['Weight_y']
# Note: Concept had set as string to allow for comparison below, i.e. to use '<'
df_place_cooccurrence_document = df_place_cooccurrence_document.loc[(df_place_cooccurrence_document.Concept_x < df_place_cooccurrence_document.Concept_y)]
df_place_cooccurrence_document = df_place_cooccurrence_document [['Document', 'Year', 'Concept_x', 'Concept_y', 'Weight']]
# %%
df_place_cooccurrence_document.to_excel(writer, pope + '_cooc_doc')
# %%
df_place_cooccurrence_document = df_place_cooccurrence_document.set_index(['Concept_x', 'Concept_y'])
# %%
df_place_cooccurrence_corpus = df_place_cooccurrence_document.groupby(['Concept_x', 'Concept_y'])[['Weight']].sum().reset_index()
# %%
#df_place_cooccurrence_corpus = df_place_cooccurrence_document [['Document', 'Year', 'Concept_x', 'Concept_y', 'Weight']]
df_place_cooccurrence_corpus.to_excel(writer, pope + '_cooc_corpus')
#%%
writer.save() | [
"roger.mahler@hotmail.com"
] | roger.mahler@hotmail.com |
53014b08b5a0417f9cdad166774bd2d3dddbf4a7 | c0a1da7cd7bcc987f28d746679664a163ea84e6a | /app/forms.py | 0cef18f88bc9b986212729252be10ed4a9698605 | [] | no_license | pavan19a97/flask-todolist-login | fcbf1d96dd0f586d5e399645e1faedfd3cb4f28f | e05a2e7bdaae8bcf4567a3ae3e90aab62ef068d6 | refs/heads/master | 2023-02-26T03:06:58.972469 | 2021-01-30T16:51:56 | 2021-01-30T16:51:56 | 333,224,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
from app.models import User
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField(
'Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address.')
| [
"pavan19a97@gmail.com"
] | pavan19a97@gmail.com |
75f03f05b3f59195d3d3e9487faea50be5eac82f | c28b86c750612e88deb1de6fc43d2a340d09f5ed | /s2lib/forcefields/eam.py | fa6bf922a13f8a1471b28f20752da4c558869e54 | [] | no_license | vlcekl/s2lib | db2e99ae05cf9148215a93fddaf731be8e5b2e5d | 47f70ec27e917c41ce63c2f6ca15b235878e7ce3 | refs/heads/master | 2022-11-04T11:58:02.302899 | 2020-06-21T03:11:55 | 2020-06-21T03:11:55 | 273,824,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,476 | py | from __future__ import print_function #, unicode_literals
from __future__ import absolute_import, division
try:
xrange = xrange
# We have Python 2
except:
xrange = range
# We have Python 3
"""
Collection of EAM functions
"""
import copy
import numpy as np
from .potentials import f_spline3
# Functional form for the embedding potential and its scaled form
f_embed = lambda d, a: a[0]*d**0.5 + a[1]*d**2
f_embed_lin = lambda d, a: a[0]*d**0.5 + a[1]*d + a[2]*d**2
f_embed_s = lambda d, a: f_embed_lin(d/S, a) - C*d/S
# Density function and its scaled form
f_dens = lambda x, dens_a, dens_r: f_spline3(x, dens_a, dens_r)
f_dens_s = lambda x, dens_a, dens_r: f_spline3(x, dens_a, dens_r)*S
def u_equi(r, pair_a, pair_r, dens_a, dens_r):
"""
Equilibrium (normal, adjustable) part of the pair potential
based on cubic splines.
"""
# cubic spline pair potential
u = f_spline3(x, pair_a, pair_r)
# gauge transformation into regular form
u += 2*C*f_spline3(x, dens_a, dens_r)
return u
# Define the core parts of the potential (kept constant)
def u_core(r, za=74, zb=74):
"""
Repulsive potential of the atomic cores. Default atomic numbers for W
Parameters
----------
r: float
atom pair distance
za, zb: floats
atomic numbers of the two atoms
Returns
------
u: float
pair energy at pair distance r
"""
qe_sq = 14.3992 # squared electron charge
rs = 0.4683766/(za**(2/3) + zb**(2/3))**0.5
x = r/rs
u = 0.0
if r > 0.0:
u += 0.1818*np.exp(-3.2*x)
u += 0.5099*np.exp(-0.9423*x)
u += 0.2802*np.exp(-0.4029*x)
u += 0.02817*np.exp(-0.2016*x)
u *= za*zb*qe_sq/r
return u
def f_pair(r, param_a, param_r, za=78, zb=78, ri=1.0, ro=2.0):
"""
Overall EAM pair potential combining inner, transition, and outer parts.
The inner part is fixed, while the outer part is based on supplied spline
function (cubic by default). Transition part ensures smooth change from
inner to outer.
Parameters
----------
r: float
pair distance
param_a, param_r: lists of floats
Parameters of the cubic spline for
za, zb: floats
atomic numbers of the two atoms (default to W-W)
ri, ro: floats
inner and outer boundary of the transition region
Returns
-------
u: float
value of the pair potential at pair distance r
"""
if r < ri:
u = u_core(r, za, zb)
elif r < ro:
x = (ro + ri - 2*r)/(ro - ri)
eta = 3/16*x**5 - 5/8*x**3 + 15/16*x + 1/2
unucl = u_core(r, za, zb)
uequi = u_equi(r, pair_a, pair_r, dens_a, dens_r)
u = uequi + eta*(unucl - uequi)
else:
u = u_equi(r, pair_a, pair_r, dens_a, dens_r)
return u
def utot_EAM_per_atom(params, ustats, hparams=None):
"""
Calculates configurational energy from EAM sufficient statistics and model parameters
Parameters
----------
params : list of lists and floats
EAM interaction parameters (spline coefficients array and embedding function parameters)
ustats : list of lists and floats
Sufficient statistics for a trajectory of configurations
hparams: dict of lists
hyperparameters - spline knots for pair potential and density function
Returns
-------
u_total: float
total configurational energy (sum of pair and manybody interactions) for trajectory of configurations
"""
n_sample = len(ustats)
# assign parameters to different functions
if not hparams: # no hparams given
hp = params[2:] # pair interaction coefficients
hd = [1.0] # electronic density coefficients. Default single coefficient with value 1
#hc = [0.0]
else:
# pair interaction coefficients
npair = len(hparams['pair'])
hp = params[2:2+npair]
# electronic density coefficients. The first coefficient is always 1
ndens = len(hparams['edens'])
if ndens < 1:
hd = [1.0]
else:
#if len(params[2+npair:-1]) == ndens:
if len(params[2+npair:]) == ndens:
hd = params[2+npair:2+npair+ndens]
else:
#assert 2+npair+ndens-1 == len(params), f"Wrong number of parameters: {len(params)} vs. {2+npair+ndens-1}"
assert 2+npair+ndens-1 == len(params), "Wrong number of parameters: {} vs. {}".format(len(params), 2+npair+ndens-1)
hd = np.concatenate((params[2+npair:2+npair+ndens-1], [1.0]))
#hc = params[-1:]
#!!! not general !!! limit last edens parameter to positive values to preserve positive discriminant
if hd[-1] < 0.0:
hd[-1] = 0.0
params[2+npair:2+npair+ndens] = 0.0
# pair interactions (per box) from array of spline coefficeints and corresponding statistic
# sum over spline components, make array over samples
u_pair = np.array([sum([a*s for a, s in zip(hp, ustats[i][2][:])]) for i in range(n_sample)])
# cycle over samples for manybody interactions
embed_r = []
embed_2 = []
for i in range(n_sample):
# calculate electronic density for each atom
# coefficient for the last spline section is 1 by definition
# rho_func.shape should be (n_atom, )
# rho_func = sum([p*s if p*s < rhomax else rhomax for p, s in zip(hd, ustats[i][3][:])])
# cycles over knots, and each parameter multiplies all atoms at the same time
rho_func = sum([p*s for p, s in zip(hd, ustats[i][3][:])])
rhomax = 70.51
rho_func = np.where(rho_func > rhomax, rhomax, rho_func)
#print('rho_func:\n', sum(rho_func)/len(rho_func))
#assert rho_func.shape[0] == ustats[i][3][0].shape[0], f"rho_func shape {rho_func_shape[0]} does not match number of atoms == ustats shape {ustats[i][2][0].shape[0]}"
assert rho_func.shape[0] == ustats[i][3][0].shape[0], "rho_func shape {} does not match number of atoms == ustats shape {}".format(rho_func_shape[0], ustats[i][2][0].shape[0])
# sum sqrt and squared atom contributions to embedding function
embed_r.append(np.sum(np.sqrt(rho_func)))
embed_2.append(np.sum(rho_func**2))
# manybody interactions from embedding function parameters and corresponding statistics
# u_many = np.array([params[0]*ustats[i][0][hp] + params[1]*ustats[i][1][hp] for i in range(n_sample)])
u_many = np.array([params[0]*embed_r[i] + params[1]*embed_2[i] for i in range(n_sample)])
#assert u_pair.shape == u_many.shape, f"Shapes of u_pair ({u_pair.shape}) and u_many ({u_many.shape}) do not match."
assert u_pair.shape == u_many.shape, "Shapes of u_pair ({}) and u_many ({}) do not match.".format(u_pair.shape, u_many.shape)
u_total = 0.5*u_pair + u_many
# apply long range correction: correction * number of particles * concentration * density (stored in ustats[4])
#u_total += np.array([sum([a*s for a, s in zip(hc, ustats[i][4][:])]) for i in range(n_sample)])
return u_total
def utot_EAM_per_box(params, ustats, hparams=[-1]):
"""
Calculates configurational energy from EAM sufficient statistics and model parameters
Parameters
----------
params : list of lists and floats
EAM interaction parameters (spline coefficients array and embedding function parameters)
ustats : list of lists and floats
Sufficient statistics for a trajectory of configurations
hparams: list of ints
hyperparameters - distance cutoff of the density function
Returns
-------
u_total: float
total configurational energy (sum of pair and manybody interactions) for trajectory of configurations
"""
n_sample = len(ustats)
# pair interaction coefficients
npair = len(hparams['pair'])
hp = params[2:2+npair]
# electronic density coefficients. The first coefficient is always 1
assert len(hparams['edens']) == 1, 'number of edens knots not equal to 1: {0}'.format(len(hparams['edens']))
if len(hparams['edens']) != 1:
assert len(params) == 2 + len(hp), 'number of params does not sum to 2+pair params: {0}'.format(len(params))
# pair interactions from array of spline coefficeints and corresponding statistic
u_pair = np.array([sum([a*s for a, s in zip(hp, ustats[i][2][:])]) for i in range(n_sample)])
# manybody interactions from embedding function parameters and corresponding statistics
u_many = np.array([params[0]*ustats[i][0][0] + params[1]*ustats[i][1][0] for i in range(n_sample)])
u_total = 0.5*u_pair + u_many
#print(u_pair, u_many, u_total)
return u_total
def ftot_EAM(params, fstats):
"""
Calculates configurational energy from EAM sufficient statistics and model parameters
Parameters
----------
params : list of lists and floats
EAM interaction parameters (spline coefficients array and embedding function parameters)
fstats : list of lists and floats
Sufficient statistics
Returns
-------
f_total: float
total configurational energy (sum of pair and manybody interactions)
"""
# number of samples and atoms
n_sample = len(fstats)
# cycle over samples
f_total = []
for i in range(n_sample):
# pair interactions from array of spline coefficeints and corresponding statistic
f_pair = sum([p*s for p, s in zip(params[2:], fstats[i][2][:])])
# manybody interactions from embedding function parameters and corresponding statistics
f_many = params[0]*fstats[i][0][-1] + params[1]*fstats[i][1][-1]
n_atom = fstats[i][0][0].shape[0]
# Create a 6N + 1 array of 0, f, and -f
fx = np.zeros((6*n_atom + 1), dtype=float)
fx[1:3*n_atom+1] = 0.5*f_pair.flatten() + f_many.flatten()
fx[3*n_atom+1:] = -fx[1:3*n_atom+1]
#print('natom', n_atom, type(f_pair), type(f_many), f_pair.shape, f_many.shape)
f_total.append(fx)
return np.array(f_total)
def sd2_loss(params_test, targets, stats, utot_func, ftot_func=None, dl=0.05, verbose=0, params_fix=[]):
"""
Calculates squared statistical distance loss function for configurational energies and forces.
Parameters
----------
params : ndarray of floats
EAM interaction parameters (spline coefficients array and embedding function parameters)
targets: dict of dicts
target energies and forces
stats : dict of dicts
Statistics describing particle configurations
utot_func: function
takes parameters and statistics and returns configurational energies
ftot_func: function
takes parameters and statistics and returns atomic forces
dl: float
coordinate perturbation magnitude for energy calculation from forces: du = f*dl
Returns
-------
sd2, sd2f: float
Squared statistical distances between model and target (energy and force-based)
"""
if params_fix:
# same embedding
params = copy.copy(params_fix)
params[0:2] = params_test[0:2]
#params[2:6] = params_test[0:4]
#params[23:26] = params_test[4:7]
else:
params = params_test
# apply bounds on parametes
#p = np.where(p < -1.0, -1.0, p)
#p = np.where(p > 1.0, 1.0, p)
hparams = stats['hyperparams']
# cycle over target system trajectories and statistics to determine SD
sd2 = sd2f = 0.0
for key in targets.keys():
targ = targets[key]
stat = stats[key]
beta = np.mean(targ['beta']) # system inverse temperature
u_targ = np.array(targ['energy']) # target energies
u_stat = stat['energy'] # energy statistics
n_sample = len(u_targ)
w = targ.get('weight', 1.0)
#print('len', len(u_targ), len(u_stat))
if w == 0.0:
continue
# energy diference array for a given target trajectory
#print('all hparams:', key, hparams)
uuu = beta*(utot_func(params, u_stat, hparams) - u_targ) # array(n_sample)
uuu -= np.mean(uuu)
eee = np.exp(-uuu)
# are we using forces?
if (not ftot_func) or ('forcesx' not in targ):
# energy-based free energy difference and statistical distance
ge = -np.log(np.mean(eee)) # free energy difference (shifted)
cb = np.mean(np.exp(-0.5*(uuu - ge))) # Bhattacharyya coefficient
sd2 += w*np.arccos(cb)**2 # statistical distance
else:
betad = beta*dl # beta * dl
f_targ = targ['forces'] # target forces (n_sample, 1+6N) (0, 3Nf, -3Nf)
f_stat = stat['forces'] # force statistics (n_sample, npars, 3N)
eeh = np.exp(-0.5*uuu)
fff = ftot_func(params, f_stat) # n_sample *(6N + 1) force contributions
# target and model force terms
fpave = np.mean([np.mean(np.exp(betad*f_targ[i])) for i in range(n_sample)])
fqave = np.mean([eee[i]*np.mean(np.exp(betad*fff[i])) for i in range(n_sample)])
fhave = np.mean([eeh[i]*np.mean(np.exp(0.5*betad*(fff[i]+f_targ[i]))) for i in range(n_sample)])
# force-based free energy difference and statistical distance
gef = -np.log(fqave/fpave)
cb = fhave/(fqave*fpave)**0.5
if cb > 1: cb = 1
sd2f += w*np.arccos(cb)**2
if verbose > 0:
print('loss', sd2+sd2f, sd2, sd2f)
#print('params', params, type(params))
# add regularization condition
return sd2 + sd2f
def udif_print(params, targets, stats, utot_func):
"""
Calculates squared statistical distance loss function for configurational energies and forces.
Parameters
----------
params : list of lists and floats
EAM interaction parameters (spline coefficients array and embedding function parameters)
targets: list of lists and floats
target energies and forces
stats : list of lists and floats
Sufficient statistics
utot_func: function
takes parameters and statistics and returns configurational energies
Returns
-------
opti_out, targ_out: lists of floats
model and target configurational energies
"""
hparams = stats['hyperparams']
opti_out = {}
targ_out = {}
# cycle over target system trajectories and statistics to determine SD
for key in targets.keys():
targ = targets[key]
stat = stats[key]
u_targ = targ['energy'] # target energies
u_stat = stat['energy'] # energy statistics
opti_out[key] = list(utot_func(params, u_stat, hparams))
targ_out[key] = list(u_targ)
return opti_out, targ_out
def u_components(all_params, stats):
"""
Calculates configurational energy from EAM sufficient statistics and model parameters
Parameters
----------
all_params : dict of lists of floats
EAM interaction parameters and hperparameters
stats : list of lists and floats
Sufficient statistics for a trajectory of configurations
Returns
-------
u_parts: dict of lists of floats
components of total energy (+electronic density) for each
configuration in trajectory
"""
hparams = all_params['hyperparams']
params = all_params['params']
ustats = stats['energy']
n_sample = len(ustats)
# pair interaction coefficients
npair = len(hparams['pair'])
hp = params['pair']
#print('hp', hp)
#print(ustats)#), len(ustats[0][2]))
# electronic density coefficients. The first coefficient is always 1
ndens = len(hparams['edens'])
hd = params['edens']
# embedding function
he = params['embed']
#print('n', n_sample, npair, ndens)
# pair interactions (per box) from array of spline coefficeints and corresponding statistic
# sum over spline components, make array over samples
u_pair = np.array([sum([a*s for a, s in zip(hp, ustats[i][2][:])]) for i in range(n_sample)])
# cycle over samples for manybody interactions
embed_r = []
embed_2 = []
edens = []
for i in range(n_sample):
# calculate electronic density for each atom
# coefficient for the last spline section is 1 by definition
# rho_func.shape should be (n_atom, )
rho_func = sum([p*s for p, s in zip(hd, ustats[i][3][:])])
#print('type', type(rho_func), rho_func.shape)
edens.append(np.sum(rho_func)/rho_func.shape[0])
#assert rho_func.shape[0] == ustats[i][3][0].shape[0], f"rho_func shape {rho_func_shape[0]} does not match number of atoms == ustats shape {ustats[i][2][0].shape[0]}"
assert rho_func.shape[0] == ustats[i][3][0].shape[0], "rho_func shape {} does not match number of atoms == ustats shape {}".format(rho_func_shape[0], ustats[i][2][0].shape[0])
# sum sqrt and squared atom contributions to embedding function
embed_r.append(np.sum(np.sqrt(rho_func)))
embed_2.append(np.sum(rho_func**2))
u_many = np.array([he[0]*embed_r[i] + he[1]*embed_2[i] for i in range(n_sample)])
assert u_pair.shape == u_many.shape, "Shapes of u_pair ({}) and u_many ({}) do not match.".format(u_pair.shape, u_many.shape)
u_total = 0.5*u_pair + u_many
u_parts = {'u_pair':u_pair, 'edens':edens, 'u_many':u_many, 'u_total': u_total}
return u_parts
def u_components_per_box(all_params, stats, knot_id):
"""
Calculates configurational energy from EAM sufficient statistics and model parameters
Parameters
----------
all_params : dict of lists of floats
EAM interaction parameters and hperparameters
stats : list of lists and floats
Sufficient statistics for a trajectory of configurations
Returns
-------
u_parts: dict of lists of floats
components of total energy (+electronic density) for each
configuration in trajectory
"""
hparams = all_params['hyperparams']
params = all_params['params']
ustats = stats['energy']
n_sample = len(ustats)
# pair interaction coefficients
npair = len(hparams['pair'])
hp = params['pair']
#print('hp', hp)
#print(ustats)#), len(ustats[0][2]))
# electronic density coefficients. The first coefficient is always 1
ndens = len(hparams['edens'])
hd = params['edens']
# embedding function
he = params['embed']
#print('n', n_sample, npair, ndens)
# pair interactions (per box) from array of spline coefficeints and corresponding statistic
# sum over spline components, make array over samples
u_pair = np.array([sum([a*s for a, s in zip(hp, ustats[i][2][:])]) for i in range(n_sample)])
# cycle over samples for manybody interactions
embed_r = []
embed_2 = []
edens = []
for i in range(n_sample):
# calculate electronic density for each atom
# coefficient for the last spline section is 1 by definition
# rho_func.shape should be (n_atom, )
#rho_func = sum([p*s for p, s in zip(hd, ustats[i][3][:])])
#print('type', type(rho_func), rho_func.shape)
#edens.append(np.sum(rho_func)/rho_func.shape[0])
rho_func = ustats[i][2][knot_id]
edens.append(rho_func/128)
#assert rho_func.shape[0] == ustats[i][3][0].shape[0], f"rho_func shape {rho_func_shape[0]} does not match number of atoms == ustats shape {ustats[i][2][0].shape[0]}"
#assert rho_func.shape[0] == ustats[i][3][0].shape[0], "rho_func shape {} does not match number of atoms == ustats shape {}".format(rho_func_shape[0], ustats[i][2][0].shape[0])
# sum sqrt and squared atom contributions to embedding function
embed_r.append(np.sum(np.sqrt(rho_func)))
embed_2.append(np.sum(rho_func**2))
u_many = np.array([he[0]*embed_r[i] + he[1]*embed_2[i] for i in range(n_sample)])
assert u_pair.shape == u_many.shape, "Shapes of u_pair ({}) and u_many ({}) do not match.".format(u_pair.shape, u_many.shape)
u_total = 0.5*u_pair + u_many
u_parts = {'u_pair':u_pair, 'edens':edens, 'u_many':u_many, 'u_total': u_total}
return u_parts
| [
"lukas_vlcek@yahoo.com"
] | lukas_vlcek@yahoo.com |
4fd59a1ac585a5f6e6b546108569a098170199e8 | 3c8df871fe6d24872bc0fdf8d43d4b8e25cc7a79 | /SkillFile1.py | 8ddef8cd615412b598c7ddbd2df0ad6d2b15ddaa | [] | no_license | Wechtomka/SkillBox | 218ac19718db00e99e80ca3ae342a0a3ab407e6e | 31eb2ee03ce5267c8b766ad9e5acb4d5e640364e | refs/heads/master | 2021-03-05T01:12:59.659956 | 2020-03-09T15:44:25 | 2020-03-09T15:44:25 | 246,083,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | import random
n=25
a= [0]*n
for i in range (n):
a[i]= random.randint(0,10000)
print(a)
# vot
| [
"valeravechtomov@gmail.com"
] | valeravechtomov@gmail.com |
f1b9d705860e3e5f69e290b188025d10c52789f1 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/tensorflow/python/autograph/core/unsupported_features_checker.py | 9ecab32e6c5f01db35c77233cc55c757a9f80212 | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:d0644b74e1e6d4d41084b1c1d32d62fc2a1adb15cd7c6141bd2a62448c182854
size 1815
| [
"github@cuba12345"
] | github@cuba12345 |
6b36768e7c4ad8bb652b0eac3b2eef7fd91dc64a | dfd43751bc89a8662a3ee37120412176992bb5e4 | /codes/flower_cls/solver.py | 8694129db89b8e2bdbfd9a8530cd30c27bfa29d7 | [] | no_license | AbinayaKumar25/pytorch-exercise | 90ea7c20bd07a90a204eb78f8785c1f876a65879 | 219d8818703687687a4de562c737c833e6fe534d | refs/heads/master | 2022-05-27T12:24:32.338401 | 2020-05-03T15:24:48 | 2020-05-03T15:24:48 | 258,814,500 | 0 | 0 | null | 2020-04-25T15:51:54 | 2020-04-25T15:51:53 | null | UTF-8 | Python | false | false | 3,087 | py | import os
import numpy as np
import torch
from torch.utils.data import DataLoader
from net import Net
from dataset import Dataset
class Solver():
def __init__(self, args):
# prepare a dataset
self.train_data = Dataset(train=True,
data_root=args.data_root,
size=args.image_size)
self.test_data = Dataset(train=False,
data_root=args.data_root,
size=args.image_size)
self.train_loader = DataLoader(self.train_data,
batch_size=args.batch_size,
num_workers=1,
shuffle=True, drop_last=True)
# turn on the CUDA if available
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.net = Net().to(self.device)
self.loss_fn = torch.nn.CrossEntropyLoss()
self.optim = torch.optim.Adam(self.net.parameters(), args.lr)
self.args = args
if not os.path.exists(args.ckpt_dir):
os.makedirs(args.ckpt_dir)
def fit(self):
args = self.args
for epoch in range(args.max_epochs):
self.net.train()
for step, inputs in enumerate(self.train_loader):
images = inputs[0].to(self.device)
labels = inputs[1].to(self.device)
preds = self.net(images)
loss = self.loss_fn(preds, labels)
self.optim.zero_grad()
loss.backward()
self.optim.step()
if (epoch+1) % args.print_every == 0:
train_acc = self.evaluate(self.train_data)
test_acc = self.evaluate(self.test_data)
print("Epoch [{}/{}] Loss: {:.3f} Train Acc: {:.3f}, Test Acc: {:.3f}".
format(epoch+1, args.max_epochs, loss.item(), train_acc, test_acc))
self.save(args.ckpt_dir, args.ckpt_name, epoch+1)
def evaluate(self, data):
args = self.args
loader = DataLoader(data,
batch_size=args.batch_size,
num_workers=1,
shuffle=False)
self.net.eval()
num_correct, num_total = 0, 0
with torch.no_grad():
for inputs in loader:
images = inputs[0].to(self.device)
labels = inputs[1].to(self.device)
outputs = self.net(images)
_, preds = torch.max(outputs.detach(), 1)
num_correct += (preds == labels).sum().item()
num_total += labels.size(0)
return num_correct / num_total
def save(self, ckpt_dir, ckpt_name, global_step):
save_path = os.path.join(
ckpt_dir, "{}_{}.pth".format(ckpt_name, global_step))
torch.save(self.net.state_dict(), save_path)
| [
"nmhkahn@gmail.com"
] | nmhkahn@gmail.com |
5d9e0075253122f9ebcf9e7151e8c02a54560e84 | 6ae747b190fd05491a865b55a5c8c5577b9c3e88 | /options.py | 058e1888bd7b6f1b214ec9d1915026f9ba99fb4d | [] | no_license | Ponts/MAESTRO | c720938317ef6bd1ce9a4d3f4ff13ffde6fbaff4 | cea40e3ff0bd59fea19deec07f1f7f9797e90b33 | refs/heads/master | 2020-04-30T12:49:56.920146 | 2019-06-14T06:43:44 | 2019-06-14T06:43:44 | 176,836,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | options = {
"filter_width": 2,
"sample_rate": 16000,
"dilations": [1, 2, 4, 8, 16, 32, 64, 128, 256, 512,
1, 2, 4, 8, 16, 32, 64, 128, 256, 512,
1, 2, 4, 8, 16, 32, 64, 128, 256, 512,
1, 2, 4, 8, 16, 32, 64, 128, 256, 512,
1, 2, 4, 8, 16, 32, 64, 128, 256, 512],
"residual_channels": 32,
"dilation_channels": 32,
"quantization_channels": 1,
#"skip_channels": 512,
"skip_channels":256,
"use_biases": False, #Maybe change?
"scalar_input": True,
"initial_filter_width": 32,
"batch_size" : 8, #32 gives OOM
"noise_dimensions" : 100,
"noise_variance" : 1.0,
"noise_mean" : 0.0,
"sample_size" : 1000,
"final_layer_size" : 256,
} | [
"pbrink@kth.se"
] | pbrink@kth.se |
7b55e1953289ae47bc0cf1c4b4d91e477896cb45 | c9638b7a7898ffdd86ec31a778b0e52ded5fb68a | /Assignment05_Starter.py | ab4bf12633cd67e6ad0df25da7af61c8ec3eb8e4 | [] | no_license | roslynm/IntroToProg-Python | 9660469365ef78fceddafb328f7ae806af154467 | 6c5db22811b5c47bf67e3fc862ddd7c86d5100e1 | refs/heads/master | 2022-07-13T22:05:28.399021 | 2020-05-16T18:24:33 | 2020-05-16T18:24:33 | 264,487,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,862 | py | # ------------------------------------------------------------------------ #
# Title: Assignment 05
# Description: Working with Dictionaries and Files
# When the program starts, load each "row" of data
# in "ToDoToDoList.txt" into a python Dictionary.
# Add the each dictionary "row" to a python list "table"
# ChangeLog (Who,When,What):# RRoot,1.1.2030,Created started script
# Roslyn Melookaran,5/14/20,Added code to complete assignment 5
# ------------------------------------------------------------------------ #
# -- Data -- #
# Declare variables and constants
strFile = "ToDoList.txt" # File Name
lstData = "" # A row of text data from the file, ** Note, I changed this from the initial "strData" because I am reading in a row from file as a list and not a string
dicRow = {} # A row of data separated into elements of a dictionary {Task,Priority}
lstTable = [] # A list that acts as a 'table' of rows
strMenu = "" # A menu of user options
strChoice = "" # A Capture the user option selection
ObjFile= None # Object that represents the file
# Add information into the .txt file
objFile=open(strFile, "w")
dicRow = {"Task":"Laundry","Priority": "High"}
objFile.write(dicRow["Task"]+ ',' + dicRow["Priority"] + '\n')
dicRow = {"Task":"Dishes","Priority": "Low"}
objFile.write(dicRow["Task"]+ ',' + dicRow["Priority"] + '\n')
dicRow = {"Task":"Homework","Priority": "Medium"}
objFile.write(dicRow["Task"]+ ',' + dicRow["Priority"] + '\n')
objFile.close()
# Define menu of user options
strMenu = """
Menu of Options
1) Show current data
2) Add a new item.
3) Remove an existing item.
4) Save Data to File
5) Exit Program """
# -- Processing -- #
# Step 1 - When the program starts, load the any data you have in a text file called ToDoList.txt into a python list of dictionaries rows (like Lab 5-2)
objFile=open(strFile, "r")
for row in objFile:
lstData = row.split(",") # Returns row as a list
dicRow = {"Task": lstData[0].strip(), "Priority": lstData[1].strip()} # Note strip function to get rid of extra spaces and carraige return
lstTable.append(dicRow)
objFile.close()
# -- Input/Output -- #
# Step 2 - Display a menu of choices to the user
while (True):
print(strMenu)
strChoice = str(input("Which option would you like to perform? [1 to 5] - "))
print() # adding a new line for looks
# Step 3 - Show the current items in the table
if (strChoice.strip() == '1'):
for row in lstTable:
print("Task- "+row.get("Task")+", Priority- "+row.get("Priority"))
continue
# Step 4 - Add a new item to the list/Table
elif (strChoice.strip() == '2'):
strTask=input("Please enter a task: ")
strPriority=input("Please enter a priority: ")
dicRow={"Task":strTask,"Priority":strPriority}
lstTable.append(dicRow)
continue
# Step 5 - Remove a new item from the list/Table
elif (strChoice.strip() == '3'):
removeChoice=input("Please type in the task you would like to remove: ")
for i in range(len(lstTable)):
if lstTable[i]['Task'] == removeChoice:
print("Task: " + lstTable[i]["Task"] +", Priority: " + lstTable[i]["Priority"]+", has been removed")
del lstTable[i]
break
continue
# Step 6 - Save tasks to the ToDoToDoList.txt file
elif (strChoice.strip() == '4'):
objFile = open(strFile, "w")
print("Your tasks have been written to the file!")
for row in lstTable:
objFile.write(row.get("Task") + ", " + row.get("Priority")+"\n")
objFile.close()
continue
# Step 7 - Exit program
elif (strChoice.strip() == '5'):
print("Thanks for using this program!")
break # and Exit the program | [
"noreply@github.com"
] | noreply@github.com |
f954afca286ead0f30eadda260fb7ed77017edd1 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/FJZJJMM/YW_FJZJJMM_SZSJ_258.py | e1df1921dc57e77398bc293709c609467ced5724 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,338 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
from SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from env_restart import *
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_FJZJJMM_SZSJ_258(xtp_test_case):
def setUp(self):
sql_transfer = SqlData_Transfer()
sql_transfer.transfer_fund_asset('YW_FJZJJMM_SZSJ_258')
clear_data_and_restart_sz()
Api.trade.Logout()
Api.trade.Login()
def test_YW_FJZJJMM_SZSJ_258(self):
title = '可用资金正好-深A本方最优买(可用资金=下单金额+费用)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': ['未成交','全成','部成'][trade_type],
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('151133', '2', '24', '2', '0', 'B', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':trade_type + 1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_FORWARD_BEST'],
'price': stkparm['涨停价'],
'quantity': 200,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 5
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
62b3e8c98f69413f4928500748c9d216dbf81623 | 9ac31b906922cfddc223354f90aea8d9c2d91877 | /puntuacionCondicional.py | a3435ff1de3bccdf74dff03a3bdf673bdfe8f86a | [] | no_license | VAEH/TiempoLibre | 93210c80e2457d358500b790ac795975997fc7f3 | 0994cd3abb9f815cd8e9eaa485f53135f46088ca | refs/heads/master | 2023-05-01T16:42:23.355973 | 2021-05-17T06:25:08 | 2021-05-17T06:25:08 | 298,337,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | try:
score = float (input("Enter Score: "))
if score >= 0.0 and score <=1.0:
if score>=0.9:
print('A')
elif score >=0.8:
print('B')
elif score >= 0.7:
print('C')
elif score >= 0.6:
print('D')
else:
print('F')
else:
print('Casi pero no, no es el Rango ')
except:
print('Por Favor ingrese el Rango Correspondiente')
| [
"jpnewton12@gmail.com"
] | jpnewton12@gmail.com |
6a7750d3d923482fa6bc1912c5e162ef293f28e1 | 74a4d13ad5228cfe8cc2ffeb4c795a8ca16ed960 | /Apriori/apyori.py | 2543aa8cb9cb58faa2a7a9833f2a4be3a20b0de7 | [] | no_license | kaustubsinha/Machine-Learning-Basic | 6e18e4cdc8b5a7100400cec0e831d4ef226b363f | e3b4639c70e800c2e85f04884a7d6edd78aa8dc5 | refs/heads/Linear-Regresssion-Algorithm | 2020-03-21T10:01:16.159560 | 2019-12-19T11:08:15 | 2019-12-19T11:08:15 | 138,429,935 | 1 | 0 | null | 2019-12-19T10:56:18 | 2018-06-23T20:02:20 | Python | UTF-8 | Python | false | false | 14,995 | py | #!/usr/bin/env python
"""
a simple implementation of Apriori algorithm by Python.
"""
import sys
import csv
import argparse
import json
import os
from collections import namedtuple
from itertools import combinations
from itertools import chain
# Meta informations.
__version__ = '1.1.1'
__author__ = 'Yu Mochizuki'
__author_email__ = 'ymoch.dev@gmail.com'
################################################################################
# Data structures.
################################################################################
class TransactionManager(object):
"""
Transaction managers.
"""
def __init__(self, transactions):
"""
Initialize.
Arguments:
transactions -- A transaction iterable object
(eg. [['A', 'B'], ['B', 'C']]).
"""
self.__num_transaction = 0
self.__items = []
self.__transaction_index_map = {}
for transaction in transactions:
self.add_transaction(transaction)
def add_transaction(self, transaction):
"""
Add a transaction.
Arguments:
transaction -- A transaction as an iterable object (eg. ['A', 'B']).
"""
for item in transaction:
if item not in self.__transaction_index_map:
self.__items.append(item)
self.__transaction_index_map[item] = set()
self.__transaction_index_map[item].add(self.__num_transaction)
self.__num_transaction += 1
def calc_support(self, items):
"""
Returns a support for items.
Arguments:
items -- Items as an iterable object (eg. ['A', 'B']).
"""
# Empty items is supported by all transactions.
if not items:
return 1.0
# Empty transactions supports no items.
if not self.num_transaction:
return 0.0
# Create the transaction index intersection.
sum_indexes = None
for item in items:
indexes = self.__transaction_index_map.get(item)
if indexes is None:
# No support for any set that contains a not existing item.
return 0.0
if sum_indexes is None:
# Assign the indexes on the first time.
sum_indexes = indexes
else:
# Calculate the intersection on not the first time.
sum_indexes = sum_indexes.intersection(indexes)
# Calculate and return the support.
return float(len(sum_indexes)) / self.__num_transaction
def initial_candidates(self):
"""
Returns the initial candidates.
"""
return [frozenset([item]) for item in self.items]
@property
def num_transaction(self):
"""
Returns the number of transactions.
"""
return self.__num_transaction
@property
def items(self):
"""
Returns the item list that the transaction is consisted of.
"""
return sorted(self.__items)
@staticmethod
def create(transactions):
"""
Create the TransactionManager with a transaction instance.
If the given instance is a TransactionManager, this returns itself.
"""
if isinstance(transactions, TransactionManager):
return transactions
return TransactionManager(transactions)
# Ignore name errors because these names are namedtuples.
SupportRecord = namedtuple( # pylint: disable=C0103
'SupportRecord', ('items', 'support'))
RelationRecord = namedtuple( # pylint: disable=C0103
'RelationRecord', SupportRecord._fields + ('ordered_statistics',))
OrderedStatistic = namedtuple( # pylint: disable=C0103
'OrderedStatistic', ('items_base', 'items_add', 'confidence', 'lift',))
################################################################################
# Inner functions.
################################################################################
def create_next_candidates(prev_candidates, length):
"""
Returns the apriori candidates as a list.
Arguments:
prev_candidates -- Previous candidates as a list.
length -- The lengths of the next candidates.
"""
# Solve the items.
item_set = set()
for candidate in prev_candidates:
for item in candidate:
item_set.add(item)
items = sorted(item_set)
# Create the temporary candidates. These will be filtered below.
tmp_next_candidates = (frozenset(x) for x in combinations(items, length))
# Return all the candidates if the length of the next candidates is 2
# because their subsets are the same as items.
if length < 3:
return list(tmp_next_candidates)
# Filter candidates that all of their subsets are
# in the previous candidates.
next_candidates = [
candidate for candidate in tmp_next_candidates
if all(
True if frozenset(x) in prev_candidates else False
for x in combinations(candidate, length - 1))
]
return next_candidates
def gen_support_records(transaction_manager, min_support, **kwargs):
"""
Returns a generator of support records with given transactions.
Arguments:
transaction_manager -- Transactions as a TransactionManager instance.
min_support -- A minimum support (float).
Keyword arguments:
max_length -- The maximum length of relations (integer).
"""
# Parse arguments.
max_length = kwargs.get('max_length')
# For testing.
_create_next_candidates = kwargs.get(
'_create_next_candidates', create_next_candidates)
# Process.
candidates = transaction_manager.initial_candidates()
length = 1
while candidates:
relations = set()
for relation_candidate in candidates:
support = transaction_manager.calc_support(relation_candidate)
if support < min_support:
continue
candidate_set = frozenset(relation_candidate)
relations.add(candidate_set)
yield SupportRecord(candidate_set, support)
length += 1
if max_length and length > max_length:
break
candidates = _create_next_candidates(relations, length)
def gen_ordered_statistics(transaction_manager, record):
"""
Returns a generator of ordered statistics as OrderedStatistic instances.
Arguments:
transaction_manager -- Transactions as a TransactionManager instance.
record -- A support record as a SupportRecord instance.
"""
items = record.items
for combination_set in combinations(sorted(items), len(items) - 1):
items_base = frozenset(combination_set)
items_add = frozenset(items.difference(items_base))
confidence = (
record.support / transaction_manager.calc_support(items_base))
lift = confidence / transaction_manager.calc_support(items_add)
yield OrderedStatistic(
frozenset(items_base), frozenset(items_add), confidence, lift)
def filter_ordered_statistics(ordered_statistics, **kwargs):
"""
Filter OrderedStatistic objects.
Arguments:
ordered_statistics -- A OrderedStatistic iterable object.
Keyword arguments:
min_confidence -- The minimum confidence of relations (float).
min_lift -- The minimum lift of relations (float).
"""
min_confidence = kwargs.get('min_confidence', 0.0)
min_lift = kwargs.get('min_lift', 0.0)
for ordered_statistic in ordered_statistics:
if ordered_statistic.confidence < min_confidence:
continue
if ordered_statistic.lift < min_lift:
continue
yield ordered_statistic
################################################################################
# API function.
################################################################################
def apriori(transactions, **kwargs):
"""
Executes Apriori algorithm and returns a RelationRecord generator.
Arguments:
transactions -- A transaction iterable object
(eg. [['A', 'B'], ['B', 'C']]).
Keyword arguments:
min_support -- The minimum support of relations (float).
min_confidence -- The minimum confidence of relations (float).
min_lift -- The minimum lift of relations (float).
max_length -- The maximum length of the relation (integer).
"""
# Parse the arguments.
min_support = kwargs.get('min_support', 0.1)
min_confidence = kwargs.get('min_confidence', 0.0)
min_lift = kwargs.get('min_lift', 0.0)
max_length = kwargs.get('max_length', None)
# Check arguments.
if min_support <= 0:
raise ValueError('minimum support must be > 0')
# For testing.
_gen_support_records = kwargs.get(
'_gen_support_records', gen_support_records)
_gen_ordered_statistics = kwargs.get(
'_gen_ordered_statistics', gen_ordered_statistics)
_filter_ordered_statistics = kwargs.get(
'_filter_ordered_statistics', filter_ordered_statistics)
# Calculate supports.
transaction_manager = TransactionManager.create(transactions)
support_records = _gen_support_records(
transaction_manager, min_support, max_length=max_length)
# Calculate ordered stats.
for support_record in support_records:
ordered_statistics = list(
_filter_ordered_statistics(
_gen_ordered_statistics(transaction_manager, support_record),
min_confidence=min_confidence,
min_lift=min_lift,
)
)
if not ordered_statistics:
continue
yield RelationRecord(
support_record.items, support_record.support, ordered_statistics)
################################################################################
# Application functions.
################################################################################
def parse_args(argv):
"""
Parse commandline arguments.
Arguments:
argv -- An argument list without the program name.
"""
output_funcs = {
'json': dump_as_json,
'tsv': dump_as_two_item_tsv,
}
default_output_func_key = 'json'
parser = argparse.ArgumentParser()
parser.add_argument(
'-v', '--version', action='version',
version='%(prog)s {0}'.format(__version__))
parser.add_argument(
'input', metavar='inpath', nargs='*',
help='Input transaction file (default: stdin).',
type=argparse.FileType('r'), default=[sys.stdin])
parser.add_argument(
'-o', '--output', metavar='outpath',
help='Output file (default: stdout).',
type=argparse.FileType('w'), default=sys.stdout)
parser.add_argument(
'-l', '--max-length', metavar='int',
help='Max length of relations (default: infinite).',
type=int, default=None)
parser.add_argument(
'-s', '--min-support', metavar='float',
help='Minimum support ratio (must be > 0, default: 0.1).',
type=float, default=0.1)
parser.add_argument(
'-c', '--min-confidence', metavar='float',
help='Minimum confidence (default: 0.5).',
type=float, default=0.5)
parser.add_argument(
'-t', '--min-lift', metavar='float',
help='Minimum lift (default: 0.0).',
type=float, default=0.0)
parser.add_argument(
'-d', '--delimiter', metavar='str',
help='Delimiter for items of transactions (default: tab).',
type=str, default='\t')
parser.add_argument(
'-f', '--out-format', metavar='str',
help='Output format ({0}; default: {1}).'.format(
', '.join(output_funcs.keys()), default_output_func_key),
type=str, choices=output_funcs.keys(), default=default_output_func_key)
args = parser.parse_args(argv)
args.output_func = output_funcs[args.out_format]
return args
def load_transactions(input_file, **kwargs):
"""
Load transactions and returns a generator for transactions.
Arguments:
input_file -- An input file.
Keyword arguments:
delimiter -- The delimiter of the transaction.
"""
delimiter = kwargs.get('delimiter', '\t')
for transaction in csv.reader(input_file, delimiter=delimiter):
yield transaction if transaction else ['']
def dump_as_json(record, output_file):
"""
Dump an relation record as a json value.
Arguments:
record -- A RelationRecord instance to dump.
output_file -- A file to output.
"""
def default_func(value):
"""
Default conversion for JSON value.
"""
if isinstance(value, frozenset):
return sorted(value)
raise TypeError(repr(value) + " is not JSON serializable")
converted_record = record._replace(
ordered_statistics=[x._asdict() for x in record.ordered_statistics])
json.dump(
converted_record._asdict(), output_file,
default=default_func, ensure_ascii=False)
output_file.write(os.linesep)
def dump_as_two_item_tsv(record, output_file):
"""
Dump a relation record as TSV only for 2 item relations.
Arguments:
record -- A RelationRecord instance to dump.
output_file -- A file to output.
"""
for ordered_stats in record.ordered_statistics:
if len(ordered_stats.items_base) != 1:
continue
if len(ordered_stats.items_add) != 1:
continue
output_file.write('{0}\t{1}\t{2:.8f}\t{3:.8f}\t{4:.8f}{5}'.format(
list(ordered_stats.items_base)[0], list(ordered_stats.items_add)[0],
record.support, ordered_stats.confidence, ordered_stats.lift,
os.linesep))
def main(**kwargs):
"""
Executes Apriori algorithm and print its result.
"""
# For tests.
_parse_args = kwargs.get('_parse_args', parse_args)
_load_transactions = kwargs.get('_load_transactions', load_transactions)
_apriori = kwargs.get('_apriori', apriori)
args = _parse_args(sys.argv[1:])
transactions = _load_transactions(
chain(*args.input), delimiter=args.delimiter)
result = _apriori(
transactions,
max_length=args.max_length,
min_support=args.min_support,
min_confidence=args.min_confidence)
for record in result:
args.output_func(record, args.output)
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | noreply@github.com |
2ef45d0901e7aa9952c147ec2d1daccaef373028 | e5d130e183b5dea1b7aad23a047c703fa0d2b3bf | /lightbus/transports/pool.py | b680ac6858890624f59a6c14e96bddbe072a9cae | [
"Apache-2.0"
] | permissive | adamcharnock/lightbus | 4a86428b8203bfe98f77a32375ac961ef398ce16 | cf892779a9a9a8f69c789ffa83c24acfb7f9a336 | refs/heads/master | 2023-08-26T04:19:39.395735 | 2023-08-23T11:07:44 | 2023-08-23T11:07:44 | 94,617,214 | 193 | 22 | Apache-2.0 | 2023-08-10T21:21:51 | 2017-06-17T10:39:23 | Python | UTF-8 | Python | false | false | 7,438 | py | import threading
from inspect import iscoroutinefunction, isasyncgenfunction
from typing import NamedTuple, List, TypeVar, Type, Generic, TYPE_CHECKING
from lightbus.exceptions import (
TransportPoolIsClosed,
CannotShrinkEmptyPool,
CannotProxySynchronousMethod,
CannotProxyPrivateMethod,
CannotProxyProperty,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,cyclic-import
from lightbus.config import Config
from lightbus.transports.base import Transport
VT = TypeVar("VT", bound=Transport)
else:
VT = TypeVar("VT")
class TransportPool(Generic[VT]):
"""Pool for managing access to transports
This pool with function as a transparent proxy to the underlying transports.
In most cases you shouldn't need to access the underlying transports. If you
do you can use the context manage as follows:
async with transport_pool as transport:
transport.send_event(...)
Note that this pool will only perform pooling within the thread in which the
pool was created. If another thread uses the pool then the pool will be bypassed.
In this case, a new transport will always be created on checkout, and this
transport will then be immediately closed when checked back in.
This is because the pool will normally be closed sometime after the thread has
completed, at which point each transport in the pool will be closed. However, closing
the transport requires access to the event loop for the specific transport, but that
loop would have been closed when the thread shutdown. It therefore becomes impossible to
the transport cleanly. Therefore, in the case of threads, we create new transports on
checkout, and close and discard the transport on checkin.
This will have some performance impact for non-async user-provided-callables which need to
access the bus. These callables area run in a thread, and so will need fresh connections.
"""
def __init__(self, transport_class: Type[VT], transport_config: NamedTuple, config: "Config"):
self.transport_class = transport_class
self.transport_config = transport_config
self.config = config
self.closed = False
self.lock = threading.RLock()
self.pool: List[VT] = []
self.checked_out = set()
self.context_stack: List[VT] = []
self.home_thread = threading.current_thread()
def __repr__(self):
return f"<Pool of {self.transport_class.__name__} at 0x{id(self):02x} to {self}>"
def __hash__(self):
return hash((self.transport_class, self.transport_config))
def __eq__(self, other):
return hash(self) == hash(other)
def __str__(self):
# Here we create an un-opened transport and stringify it.
# This means we can display nice redis URLs when displaying the pool
# for debugging output.
transport = self._instantiate_transport()
return str(transport)
async def grow(self):
with self.lock:
new_transport = await self._create_transport()
self.pool.append(new_transport)
async def shrink(self):
with self.lock:
try:
old_transport = self.pool.pop(0)
except IndexError:
raise CannotShrinkEmptyPool(
"Transport pool is already empty, cannot shrink it further"
)
await self._close_transport(old_transport)
async def checkout(self) -> VT:
if self.closed:
raise TransportPoolIsClosed("Cannot get a connection, transport pool is closed")
if threading.current_thread() != self.home_thread:
return await self._create_transport()
else:
with self.lock:
if not self.pool:
await self.grow()
transport = self.pool.pop(0)
self.checked_out.add(transport)
return transport
async def checkin(self, transport: VT):
if threading.current_thread() != self.home_thread:
return await self._close_transport(transport)
else:
with self.lock:
self.checked_out.discard(transport)
self.pool.append(transport)
if self.closed:
await self._close_all()
@property
def free(self) -> int:
return len(self.pool)
@property
def in_use(self) -> int:
return len(self.checked_out)
@property
def total(self) -> int:
return self.free + self.in_use
async def __aenter__(self) -> VT:
transport = await self.checkout()
self.context_stack.append(transport)
return transport
async def __aexit__(self, exc_type, exc_val, exc_tb):
transport = self.context_stack.pop()
await self.checkin(transport)
async def close(self):
with self.lock:
self.closed = True
await self._close_all()
async def _close_all(self):
with self.lock:
while self.pool:
await self._close_transport(self.pool.pop())
def _instantiate_transport(self) -> VT:
"""Instantiate a transport without opening it"""
return self.transport_class.from_config(
config=self.config, **self.transport_config._asdict()
)
async def _create_transport(self) -> VT:
"""Return an opened transport"""
new_transport = self._instantiate_transport()
await new_transport.open()
return new_transport
async def _close_transport(self, transport: VT):
"""Close a specific transport"""
await transport.close()
def __getattr__(self, item):
async def fn_pool_wrapper(*args, **kwargs):
async with self as transport:
return await getattr(transport, item)(*args, **kwargs)
async def gen_pool_wrapper(*args, **kwargs):
async with self as transport:
async for value in getattr(transport, item)(*args, **kwargs):
yield value
attr = getattr(self.transport_class, item, None)
if not attr:
raise AttributeError(
f"Neither the transport pool {repr(self)} nor the transport with class "
f"{repr(self.transport_class)} has an attribute named {item}"
)
elif item[0] == "_":
raise CannotProxyPrivateMethod(
f"Cannot proxy private method calls to transport. Use the pool's async context or "
f"checkout() method if you really need to access private methods. (Private methods "
f"are ones whose name starts with an underscore)"
)
elif not callable(attr):
raise CannotProxyProperty(
f"Cannot proxy property access on transports. Use the pool's async context or "
f"checkout() method to get access to a transport directly."
)
else:
if iscoroutinefunction(attr):
return fn_pool_wrapper
elif isasyncgenfunction(attr):
return gen_pool_wrapper
else:
raise CannotProxySynchronousMethod(
f"{self.transport_class.__name__}.{item}() is synchronous "
"and must be accessed directly and not via the pool"
)
| [
"adam@adamcharnock.com"
] | adam@adamcharnock.com |
7933e38da4d9057e66aacf8c9acc9ba0b3e8b4e3 | af61c369e3550643d47fba2445d9f279e412e15c | /basicSprite.py | 2f0bfb2df06b261977e9f782873c230385348b8d | [] | no_license | Rabidza/pygame_learningpython | 45e900b5a8458a14e7df317de16a9e7cd18737fa | ef58d9ca977e2ea1406200ce04c3a32a440be66a | refs/heads/master | 2020-06-03T15:13:38.419015 | 2015-02-18T16:16:30 | 2015-02-18T16:16:30 | 30,924,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | import pygame
from helpers import *
class Sprite(pygame.sprite.Sprite):
def __init__(self, centerPoint, image):
pygame.sprite.Sprite.__init__(self)
# Set the image and the rect
self.image = image
self.rect = image.get_rect()
# Move the rect into the correct position
self.rect.center = centerPoint
class Pellet(pygame.sprite.Sprite):
def __init__(self, top_left, image = None):
pygame.sprite.Sprite.__init__(self)
if image == None:
self.image, self.rect = load_image('pellet.png',-1)
else:
self.image = image
self.rect = image.get_rect()
self.rect.topleft = top_left | [
"neillhenning@gmail.com"
] | neillhenning@gmail.com |
ca72163d672b64afaf83f1f5891e2c4f1c2d573c | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Shapely_numpy/source/numpy/doc/structured_arrays.py | 5289e6d0bd859f00231e416fc338c3c4d6e6ee3e | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 11,442 | py | """
=================
Structured Arrays
=================
Introduction
============
NumPy provides powerful capabilities to create arrays of structured datatype.
These arrays permit one to manipulate the data by named fields. A simple
example will show what is meant.: ::
>>> x = np.array([(1,2.,'Hello'), (2,3.,"World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
>>> x
array([(1, 2.0, 'Hello'), (2, 3.0, 'World')],
dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
Here we have created a one-dimensional array of length 2. Each element of
this array is a structure that contains three items, a 32-bit integer, a 32-bit
float, and a string of length 10 or less. If we index this array at the second
position we get the second structure: ::
>>> x[1]
(2,3.,"World")
Conveniently, one can access any field of the array by indexing using the
string that names that field. ::
>>> y = x['bar']
>>> y
array([ 2., 3.], dtype=float32)
>>> y[:] = 2*y
>>> y
array([ 4., 6.], dtype=float32)
>>> x
array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
In these examples, y is a simple float array consisting of the 2nd field
in the structured type. But, rather than being a copy of the data in the structured
array, it is a view, i.e., it shares exactly the same memory locations.
Thus, when we updated this array by doubling its values, the structured
array shows the corresponding values as doubled as well. Likewise, if one
changes the structured array, the field view also changes: ::
>>> x[1] = (-1,-1.,"Master")
>>> x
array([(1, 4.0, 'Hello'), (-1, -1.0, 'Master')],
dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
>>> y
array([ 4., -1.], dtype=float32)
Defining Structured Arrays
==========================
One defines a structured array through the dtype object. There are
**several** alternative ways to define the fields of a record. Some of
these variants provide backward compatibility with Numeric, numarray, or
another module, and should not be used except for such purposes. These
will be so noted. One specifies record structure in
one of four alternative ways, using an argument (as supplied to a dtype
function keyword or a dtype object constructor itself). This
argument must be one of the following: 1) string, 2) tuple, 3) list, or
4) dictionary. Each of these is briefly described below.
1) String argument.
In this case, the constructor expects a comma-separated list of type
specifiers, optionally with extra shape information. The fields are
given the default names 'f0', 'f1', 'f2' and so on.
The type specifiers can take 4 different forms: ::
a) b1, i1, i2, i4, i8, u1, u2, u4, u8, f2, f4, f8, c8, c16, a<n>
(representing bytes, ints, unsigned ints, floats, complex and
fixed length strings of specified byte lengths)
b) int8,...,uint8,...,float16, float32, float64, complex64, complex128
(this time with bit sizes)
c) older Numeric/numarray type specifications (e.g. Float32).
Don't use these in new code!
d) Single character type specifiers (e.g H for unsigned short ints).
Avoid using these unless you must. Details can be found in the
NumPy book
These different styles can be mixed within the same string (but why would you
want to do that?). Furthermore, each type specifier can be prefixed
with a repetition number, or a shape. In these cases an array
element is created, i.e., an array within a record. That array
is still referred to as a single field. An example: ::
>>> x = np.zeros(3, dtype='3int8, float32, (2,3)float64')
>>> x
array([([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])],
dtype=[('f0', '|i1', 3), ('f1', '>f4'), ('f2', '>f8', (2, 3))])
By using strings to define the record structure, it precludes being
able to name the fields in the original definition. The names can
be changed as shown later, however.
2) Tuple argument: The only relevant tuple case that applies to record
structures is when a structure is mapped to an existing data type. This
is done by pairing in a tuple, the existing data type with a matching
dtype definition (using any of the variants being described here). As
an example (using a definition using a list, so see 3) for further
details): ::
>>> x = np.zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')]))
>>> x
array([0, 0, 0])
>>> x['r']
array([0, 0, 0], dtype=uint8)
In this case, an array is produced that looks and acts like a simple int32 array,
but also has definitions for fields that use only one byte of the int32 (a bit
like Fortran equivalencing).
3) List argument: In this case the record structure is defined with a list of
tuples. Each tuple has 2 or 3 elements specifying: 1) The name of the field
('' is permitted), 2) the type of the field, and 3) the shape (optional).
For example::
>>> x = np.zeros(3, dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))])
>>> x
array([(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]])],
dtype=[('x', '>f4'), ('y', '>f4'), ('value', '>f4', (2, 2))])
4) Dictionary argument: two different forms are permitted. The first consists
of a dictionary with two required keys ('names' and 'formats'), each having an
equal sized list of values. The format list contains any type/shape specifier
allowed in other contexts. The names must be strings. There are two optional
keys: 'offsets' and 'titles'. Each must be a correspondingly matching list to
the required two where offsets contain integer offsets for each field, and
titles are objects containing metadata for each field (these do not have
to be strings), where the value of None is permitted. As an example: ::
>>> x = np.zeros(3, dtype={'names':['col1', 'col2'], 'formats':['i4','f4']})
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[('col1', '>i4'), ('col2', '>f4')])
The other dictionary form permitted is a dictionary of name keys with tuple
values specifying type, offset, and an optional title. ::
>>> x = np.zeros(3, dtype={'col1':('i1',0,'title 1'), 'col2':('f4',1,'title 2')})
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[(('title 1', 'col1'), '|i1'), (('title 2', 'col2'), '>f4')])
Accessing and modifying field names
===================================
The field names are an attribute of the dtype object defining the structure.
For the last example: ::
>>> x.dtype.names
('col1', 'col2')
>>> x.dtype.names = ('x', 'y')
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[(('title 1', 'x'), '|i1'), (('title 2', 'y'), '>f4')])
>>> x.dtype.names = ('x', 'y', 'z') # wrong number of names
<type 'exceptions.ValueError'>: must replace all names at once with a sequence of length 2
Accessing field titles
====================================
The field titles provide a standard place to put associated info for fields.
They do not have to be strings. ::
>>> x.dtype.fields['x'][2]
'title 1'
Accessing multiple fields at once
====================================
You can access multiple fields at once using a list of field names: ::
>>> x = np.array([(1.5,2.5,(1.0,2.0)),(3.,4.,(4.,5.)),(1.,3.,(2.,6.))],
dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))])
Notice that `x` is created with a list of tuples. ::
>>> x[['x','y']]
array([(1.5, 2.5), (3.0, 4.0), (1.0, 3.0)],
dtype=[('x', '<f4'), ('y', '<f4')])
>>> x[['x','value']]
array([(1.5, [[1.0, 2.0], [1.0, 2.0]]), (3.0, [[4.0, 5.0], [4.0, 5.0]]),
(1.0, [[2.0, 6.0], [2.0, 6.0]])],
dtype=[('x', '<f4'), ('value', '<f4', (2, 2))])
The fields are returned in the order they are asked for.::
>>> x[['y','x']]
array([(2.5, 1.5), (4.0, 3.0), (3.0, 1.0)],
dtype=[('y', '<f4'), ('x', '<f4')])
Filling structured arrays
=========================
Structured arrays can be filled by field or row by row. ::
>>> arr = np.zeros((5,), dtype=[('var1','f8'),('var2','f8')])
>>> arr['var1'] = np.arange(5)
If you fill it in row by row, it takes a take a tuple
(but not a list or array!)::
>>> arr[0] = (10,20)
>>> arr
array([(10.0, 20.0), (1.0, 0.0), (2.0, 0.0), (3.0, 0.0), (4.0, 0.0)],
dtype=[('var1', '<f8'), ('var2', '<f8')])
Record Arrays
=============
For convenience, numpy provides "record arrays" which allow one to access
fields of structured arrays by attribute rather than by index. Record arrays
are structured arrays wrapped using a subclass of ndarray,
:class:`numpy.recarray`, which allows field access by attribute on the array
object, and record arrays also use a special datatype, :class:`numpy.record`,
which allows field access by attribute on the individual elements of the array.
The simplest way to create a record array is with :func:`numpy.rec.array`: ::
>>> recordarr = np.rec.array([(1,2.,'Hello'),(2,3.,"World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
>>> recordarr.bar
array([ 2., 3.], dtype=float32)
>>> recordarr[1:2]
rec.array([(2, 3.0, 'World')],
dtype=[('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')])
>>> recordarr[1:2].foo
array([2], dtype=int32)
>>> recordarr.foo[1:2]
array([2], dtype=int32)
>>> recordarr[1].baz
'World'
numpy.rec.array can convert a wide variety of arguments into record arrays,
including normal structured arrays: ::
>>> arr = array([(1,2.,'Hello'),(2,3.,"World")],
... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')])
>>> recordarr = np.rec.array(arr)
The numpy.rec module provides a number of other convenience functions for
creating record arrays, see :ref:`record array creation routines
<routines.array-creation.rec>`.
A record array representation of a structured array can be obtained using the
appropriate :ref:`view`: ::
>>> arr = np.array([(1,2.,'Hello'),(2,3.,"World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')])
>>> recordarr = arr.view(dtype=dtype((np.record, arr.dtype)),
... type=np.recarray)
For convenience, viewing an ndarray as type `np.recarray` will automatically
convert to `np.record` datatype, so the dtype can be left out of the view: ::
>>> recordarr = arr.view(np.recarray)
>>> recordarr.dtype
dtype((numpy.record, [('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')]))
To get back to a plain ndarray both the dtype and type must be reset. The
following view does so, taking into account the unusual case that the
recordarr was not a structured type: ::
>>> arr2 = recordarr.view(recordarr.dtype.fields or recordarr.dtype, np.ndarray)
Record array fields accessed by index or by attribute are returned as a record
array if the field has a structured type but as a plain ndarray otherwise. ::
>>> recordarr = np.rec.array([('Hello', (1,2)),("World", (3,4))],
... dtype=[('foo', 'S6'),('bar', [('A', int), ('B', int)])])
>>> type(recordarr.foo)
<type 'numpy.ndarray'>
>>> type(recordarr.bar)
<class 'numpy.core.records.recarray'>
Note that if a field has the same name as an ndarray attribute, the ndarray
attribute takes precedence. Such fields will be inaccessible by attribute but
may still be accessed by index.
"""
from __future__ import division, absolute_import, print_function
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
38a8e20fc7ef3ca1544ce24c9cc622c673959db8 | 889567668a39d069793bec6fc91b7a8b7b998f3a | /courses/migrations/0007_video_learn_times.py | 51e4cfd6a6a2544fb32c9710d99878ee20ec0edc | [] | no_license | SheriffHood/yxonline | 4b5637fd685492bb8d99b9228ac182b2bc7b7dfd | 476d864ecfc924d392ae5478f9f68493057f37fa | refs/heads/master | 2020-03-23T17:26:14.331322 | 2019-01-13T13:42:16 | 2019-01-13T13:42:16 | 141,859,919 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | # Generated by Django 2.0.7 on 2018-10-30 22:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0006_auto_20181023_0612'),
]
operations = [
migrations.AddField(
model_name='video',
name='learn_times',
field=models.IntegerField(default=0, verbose_name='学习时长(分钟数)'),
),
]
| [
"h77max@163.com"
] | h77max@163.com |
a0b40c1e4cfc595d8bc11fa49ffb5e77e2d600c3 | 238ebc43c3d54d2842de75fd8ddf0b0b0261906e | /SimulateData.py | eb704550b17512faa02f5b718ec6ed67b6f373b5 | [] | no_license | johndowen/CrossMgr | 17c114ab80382b24ce0cdd228782bd000f513ea8 | fc9eaf8ae5d4919cef3f1a3680c169be70cf356b | refs/heads/master | 2021-06-28T03:14:41.682880 | 2017-09-17T00:35:26 | 2017-09-17T00:35:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,686 | py | import random
import bisect
from Names import GetNameTeam
def SimulateData( riders=200 ):
# Generate random rider events.
random.seed( 10101021 )
raceMinutes = 8
mean = 8*60.0 / 8 # Average lap time.
var = mean/20.0 # Variance between riders.
lapsTotal = int(raceMinutes * 60 / mean + 3)
raceTime = mean * lapsTotal
errorPercent = 1.0/25.0
for nMid in (10,100,200,500,1000,2000,5000,10000,20000,50000):
if nMid >= riders:
break
numStart = nMid - riders//2
startOffset = 10
lapTimes = []
riderInfo = []
for num in xrange(numStart,numStart+riders+1):
t = 0
if num < numStart + riders // 2:
mu = random.normalvariate( mean, mean/20.0 ) # Rider's random average lap time.
riderInfo.append( [num] + list(GetNameTeam(True)) )
else:
mu = random.normalvariate( mean * 1.15, mean/20.0 ) # These riders are slower, on average.
riderInfo.append( [num] + list(GetNameTeam(False)) )
t += startOffset # Account for offset start.
for laps in xrange(lapsTotal):
t += random.normalvariate( mu, var/2.0 ) # Rider's lap time.
if random.random() > errorPercent: # Respect error rate.
lapTimes.append( (t, num) )
lapTimes.sort()
# Get the times and leaders for each lap.
leaderTimes = [lapTimes[0][0]]
leaderNums = [lapTimes[0][1]]
numSeen = set()
for t, n in lapTimes:
if n in numSeen:
leaderTimes.append( t )
leaderNums.append( n )
numSeen.clear()
numSeen.add( n )
# Find the leader's time after the end of the race.
iLast = bisect.bisect_left( leaderTimes, raceMinutes * 60.0, hi = len(leaderTimes) - 1 )
if leaderTimes[iLast] < raceMinutes * 60.0:
iLast += 1
# Trim out everything except next arrivals after the finish time.
tLeaderLast = leaderTimes[iLast]
numSeen = set()
afterLeaderFinishEvents = [evt for evt in lapTimes if evt[0] >= tLeaderLast]
lapTimes = [evt for evt in lapTimes if evt[0] < tLeaderLast]
# Find the next unique arrival of all finishers.
lastLapFinishers = []
tStop = raceMinutes * 60.0
numSeen = set()
for t, n in afterLeaderFinishEvents:
if n not in numSeen:
numSeen.add( n )
lastLapFinishers.append( (t, n) )
lapTimes.extend( lastLapFinishers )
categories = [
{'name':'Junior', 'catStr':'{}-{}'.format(nMid-riders//2,nMid-1), 'startOffset':'00:00', 'distance':0.5, 'gender':'Men', 'numLaps':5},
{'name':'Senior', 'catStr':'{}-{}'.format(nMid,nMid+riders//2), 'startOffset':'00:{:02d}'.format(startOffset), 'distance':0.5, 'gender':'Women', 'numLaps':4}
]
return {
'raceMinutes': raceMinutes,
'lapTimes': lapTimes,
'categories': categories,
'riderInfo': riderInfo,
}
if __name__ == '__main__':
print SimulateData()['riderInfo']
| [
"edward.sitarski@gmail.com"
] | edward.sitarski@gmail.com |
bdb391dcd7e614ad7a0fca39d8eb44b3a36c675e | d4a2ec804605c3adc1748be482d9f4ada770d2f0 | /packaging/models.py | 8a86a591afd49ca1fc61fb0fe2b0a5ec45213393 | [] | no_license | premierrufus/localbrewery | 2af9cc568f7fbdbf1c5e90a066d567859c626e9c | 58d87be09877dd0966cb1e291fa5c90449bb8a8d | refs/heads/master | 2021-09-06T18:26:38.693888 | 2018-02-09T17:10:00 | 2018-02-09T17:10:00 | 119,450,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,760 | py | from django.db import models
from django.utils import timezone
from decimal import *
from brewing.models import BrewingEvent
# Global Variables
SEP = ' | '
# Methods
def to_bbl(f, q):
"""
Takes two arguments: format(f), quantity(q)
Returns value(v) as a Decimal object, rounded to two places.
"""
BBL_CONVERSION_MAPPING = {
'12oz/CS': 0.07258064516129033,
'16oz/CS': 0.0967741935483871,
'375ml/CS': 0.038387096774193545,
'500ml/CS': 0.05112903225806451,
'750ml/CS': 0.07677419354838709,
'1/6bbl': 0.16666666666666666,
'1/4bbl': 0.25,
'1/2bbl': 0.5,
'50l': 0.426,
'Firkin (10.8g)': 0.34838709677419355,
'Pin (5.4g)': 0.17419354838709677
}
if f in BBL_CONVERSION_MAPPING:
v = Decimal(q) * Decimal(BBL_CONVERSION_MAPPING[f])
return v.quantize(Decimal('1.00'))
def get_many_objects(queryset):
"""
gets all objects from a queryset (manytomanyfields)
returns comma-separated strings
"""
return ", ".join([str(p) for p in queryset])
# Classes
class Format(models.Model):
"""
Generic Format Model
"""
TYPE = (
('12oz/CS', '12oz/CS'),
('16oz/CS', '16oz/CS'),
('375ml/CS', '375ml/CS'),
('500ml/CS', '500ml/CS'),
('750ml/CS', '750ml/CS'),
('1/6bbl', '1/6bbl'),
('1/4bbl', '1/4bbl'),
('1/2bbl', '1/2bbl'),
('50l', '50l'),
('Firkin (10.8g)', 'Firkin (10.8g)'),
('Pin (5.4g)', 'Pin (5.4g)'),
)
# NOTE: type is a reserved python word.
format_type = models.CharField("format type", max_length=20, choices=TYPE)
def __str__(self):
return self.format_type
class PackagedFormat(Format):
"""
An instance of Format used for packaging events
"""
quantity = models.DecimalField("quantity", max_digits=10, decimal_places=2,
help_text="Total packaged quantity for this format.")
def __str__(self):
return self.format_type + ': ' + str(to_bbl(self.format_type, self.quantity))
#return str(to_bbl(self.format_type, self.quantity))
class PackagingEvent(models.Model):
NAME = (
("tyler", "Tyler"),
("ron", "Ron"),
("chris", "Chris"),
("shea", "Shea"),
("ian", "Ian"),
("brandon", "Brandon")
)
packager = models.CharField("packager", max_length=100, choices=NAME, blank=True, null=True)
brewing_event = models.ForeignKey('brewing.BrewingEvent', help_text="BrewingEvent associated with this packaging event.",
blank=True, null=True, verbose_name="Packaging Source")
packaging_date = models.DateField(default=timezone.now, blank=True, verbose_name="Date Packaged")
formats = models.ManyToManyField('PackagedFormat', blank=True, null=True, help_text="Zero or more formats")
def package(self):
self.save()
def get_formats(self):
#return "\n".join([str(p) for p in self.formats.all()])
return ", ".join([str(p) for p in self.formats.all()])
get_formats.short_description = 'Format: Volume(bbl)'
def __str__(self):
# packaged_quantity_bbl = to_bbl(self.packaged_beer_format, self.packaged_quantity)
return str(self.brewing_event) + SEP + self.packager + SEP + get_many_objects(self.formats.all())
# + SEP + str(to_bbl(self.packaged_beer_format, self.packaged_quantity)) + ' bbl'
# return str(type(self.packaged_beer_format)) + str(type(self.packaged_quantity))
# return str(to_bbl(self.packaged_beer_format, self.packaged_quantity))
class Meta:
verbose_name = "Packaging Event"
| [
"banded.gabriel@gmail.com"
] | banded.gabriel@gmail.com |
07acda20de91fccf2fd365140062d327451bc05f | d6ea1431a1809ca532b4ac61e4e1191abe129e45 | /flaskr/blog.py | 5836f95edabbf05ea162617ac548afe9e3c380cf | [] | no_license | jpeter17/Blog-Flask-MongoDB | 414b4556d4e492185c5deefe4b90d747cb7e7aa9 | 4a5ddd967517726fe0a9658683781b70a4524f3d | refs/heads/master | 2020-05-18T20:32:54.690536 | 2019-05-05T02:49:02 | 2019-05-05T02:49:02 | 184,637,267 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,602 | py | from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for
)
from werkzeug.exceptions import abort
from flaskr.auth import login_required
from flaskr.db import get_db
import datetime
from bson import ObjectId
bp = Blueprint('blog', __name__)
@bp.route('/')
def index():
db = get_db()
for post in db.posts.find({}):
pidStr = post.get('id')
if not pidStr:
pid = post.get('_id')
pidStr = str(post.get('_id'))
db.posts.update({'_id': pid}, {'$set': {'id': pidStr}})
posts = db.posts.find({'$query': {}, '$orderby': {'created': -1}})
return render_template('blog/index.html', posts=posts)
@bp.route('/create', methods=('GET', 'POST'))
@login_required
def create():
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
created = datetime.datetime.now()
error = None
if not title:
error = 'Title is required.'
if error is not None:
flash(error)
else:
db = get_db()
db.posts.insert_one(
{'title': title,
'body': body,
'username': g.user['username'],
'author_id': g.user['_id'],
'created': created}
)
return redirect(url_for('blog.index'))
return render_template('blog/create.html')
def get_post(id, check_author=True):
db = get_db()
post = db.posts.find_one({'_id': id})
if post is None:
abort(404, "Post id {0} doesn't exist.".format(id))
if check_author and post['author_id'] != g.user['_id']:
abort(403)
return post
@bp.route('/<string:id>/update', methods=('GET', 'POST'))
@login_required
def update(id):
id = ObjectId(id)
post = get_post(id)
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required.'
if error is not None:
flash(error)
else:
db = get_db()
db.posts.update_one(
{'_id': id},
{'$set': {'title': title, 'body': body}})
return redirect(url_for('blog.index'))
return render_template('blog/update.html', post=post)
@bp.route('/<string:id>/delete', methods=('POST',))
@login_required
def delete(id):
id = ObjectId(id)
get_post(id)
db = get_db()
db.posts.delete_one({'_id': id})
return redirect(url_for('blog.index')) | [
"jakepetersen1221@gmail.com"
] | jakepetersen1221@gmail.com |
3198301171ba3b98d74bf2f7f604ae48fff451a0 | 80dde7133759f3f7f006babc90c377edfbda2390 | /what conv layers see/Activation_maximization_using_keras_vis_toolkit.py | d8804b86e356cf285603839dee3e6c0041ff6abf | [] | no_license | akash027/CNN | b8146e062e8901557edbc473a8c2607cf736adeb | 79cab06e12cf5da52c7c9c32456c791e4df2c9e2 | refs/heads/master | 2021-05-23T09:51:35.662520 | 2020-07-16T13:16:49 | 2020-07-16T13:16:49 | 253,229,652 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,086 | py | import numpy as np
import keras
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Activation, Input
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs =1
#input image dimension
img_rows, img_cols = 28, 28
#the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channel_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0],1 , img_rows, img_cols)
input_shape = (1,img_rows,img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols,1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols,1)
input_shape = (img_rows,img_cols,1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape: ', x_train.shape)
print(x_train.shape[0], " train samples")
print(x_test.shape[0], " test samples")
#convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
#Model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax', name='preds'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
print(model.summary())
model.fit(x_train,y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1)
###########################################
## VISUALIZE OUR DENSE LAYERS
from vis.visualization import visualize_activation
from vis.utils import utils
from keras import activations
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (18,6)
# Utility to search for layer index by name
# Alternatively we can specify this as -1 since it corresponds to the last layer
layer_idx = utils.find_layer_idx(model, 'preds')
# Swap softmax with linear
model.layers[layer_idx].activation = activations.linear
model = utils.apply_modifications(model)
# This is the output node we want to maximize.
filter_idx = 0
img = visualize_activation(model, layer_idx,filter_indices=filter_idx)
plt.imshow(img[...,0] )
# Lets Specify the input range
img = visualize_activation(model, layer_idx,filter_indices=filter_idx,input_range=(0.,1.), verbose=True)
plt.imshow(img[...,0] )
#do it for all class
for output_idx in np.arange(10):
img = visualize_activation(model, layer_idx,filter_indices=output_idx,input_range=(0.,1.))
plt.figure()
plt.title('Networks perception of {}'.format(output_idx))
plt.imshow(img[..., 0])
| [
"noreply@github.com"
] | noreply@github.com |
7f3905346e32c93dbb08ca229d4d8268b9dfdb00 | 7f71d8ee1fbb15b44e202230ddde32133b9f386a | /backend/backend/stock_api/builders/__init__.py | c0e756957d428896e89b90526ecf315d6f0b2aee | [] | no_license | exe01/vkstock | 358229405694a75584a463dcdbbc975e3f84ac56 | 2711ddf11d6c2a071940405060c26f4c8878efde | refs/heads/master | 2023-04-04T14:36:34.438216 | 2021-04-18T15:11:41 | 2021-04-18T15:11:41 | 273,328,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,978 | py | from PIL import Image, ImageDraw, ImageFont
from backend.stock_api.utils.censor import PartialCensor
from backend.stock_api.constants import *
from backend.stock_api.models import (
Post,
Comment,
RenderedPost,
RenderedImage,
)
from django.core.files import File
import io
import random
import string
import textwrap
import re
class ImageBuilder:
FONT_POINTS = 40
SYMBOL_WIDTH = 22
_image = None
def reset(self, width=800):
self._image = Image.new('RGB', (width, 1), color="white")
def build(self, format='jpeg'):
self._image.format = format
return self._image
def add_text(self, text, text_margin=30, font_name='anonymouspro.ttf', align='left', location='center', points=None):
if text == '':
return
points = points if points else self.FONT_POINTS
font = ImageFont.truetype('fonts/' + font_name, points)
text_width = self._image.width - 2 * text_margin
symbols_per_line = text_width / self.SYMBOL_WIDTH
text = self._separate_text_by_lines(text, symbols_per_line)
real_width, real_height = self._textsize(text, font)
full_text_block_height = real_height + 2 * text_margin
background_of_text = Image.new('RGB', (real_width, real_height), color="white")
background_of_text_drawer = ImageDraw.Draw(background_of_text)
background_of_text_drawer.multiline_text(
(0, 0),
text,
fill=(0, 0, 0),
font=font,
align=align
)
background_of_text = self._locate(location,
background_of_text,
self._image.width,
margin_left=text_margin,
margin_top=text_margin,
margin_bot=text_margin,
margin_right=text_margin)
self.vertically_concatenate_image(background_of_text)
def add_image(self, image, width=800, location='center', margin=30):
if image is None:
return
resized_img = self._resize_img_by_width(image, width)
resized_img = self._locate(location,
resized_img,
self._image.width,
margin_left=margin,
margin_top=margin,
margin_bot=margin,
margin_right=margin)
self.vertically_concatenate_image(resized_img)
def vertically_concatenate_image(self, img):
result_img = Image.new(
'RGB',
(self._image.width, self._image.height + img.height),
color="white",
)
result_img.paste(
self._image,
(0, 0)
)
result_img.paste(
img,
(0, self._image.height)
)
self._image = result_img
def _resize_img_by_width(self, img, width: int):
orig_width, orig_height = img.size
scale_factor = orig_width / width
height = int(orig_height / scale_factor)
resized_img = img.resize((width, height))
return resized_img
def _textsize(self, text, font):
test_img = Image.new('RGB', (1, 1), color="white")
test_img_driwer = ImageDraw.Draw(test_img)
width, height = test_img_driwer.multiline_textsize(text, font=font)
height += 10 # serif
return width, height
def _separate_text_by_lines(self, text, symbols_per_line):
paragraphs = text.split('\n')
new_paragraphs = []
for paragraph in paragraphs:
paragraph_lines = textwrap.wrap(paragraph, width=symbols_per_line)
new_paragraph = '\n'.join(paragraph_lines)
new_paragraphs.append(new_paragraph)
return '\n'.join(new_paragraphs)
def _locate(self, location, *args, **kwargs):
if location == 'center':
return self._center(*args, **kwargs)
elif location == 'left':
return self._left(*args, **kwargs)
elif location == 'right':
return self._right(*args, **kwargs)
return self._left(*args, **kwargs)
def _center(self, img, width, margin_top=0, margin_bot=0, margin_left=0, margin_right=0):
centered_img = Image.new('RGB', (width, img.height + margin_bot + margin_top), color="white")
img_x = (width - img.width) / 2
img_x = int(img_x)
if img_x < 0:
centered_img.paste(img, (0, margin_top))
else:
centered_img.paste(img, (img_x, margin_top))
return centered_img
def _left(self, img, width, margin_top=0, margin_bot=0, margin_left=0, margin_right=0):
left_img = Image.new('RGB', (width, img.height + margin_bot + margin_top), color="white")
left_img.paste(img, (margin_left, margin_top))
return left_img
def _right(self, img, width, margin_top=0, margin_bot=0, margin_left=0, margin_right=0):
right_img = Image.new('RGB', (width, img.height + margin_bot + margin_top), color="white")
img_x = (width - img.width - margin_right)
img_x = int(img_x)
if img_x < 0:
right_img.paste(img, (0, margin_top))
else:
right_img.paste(img, (img_x, margin_top))
return right_img
class TextFormatter:
def format_text(self, text, ref_text='', wrapper='*'):
text = text.capitalize()
ref_text = ref_text.capitalize()
if len(text) == 0 and len(ref_text) == 0:
return ''
if len(text) != 0 and len(ref_text) == 0:
return text
if len(text) == 0 and len(ref_text) != 0:
return ref_text
if len(text) != 0 and len(ref_text) != 0:
text = self.wrap_text(text, wrapper)
ref_text = self.wrap_text(ref_text, wrapper)
return '{}\n\n{}'.format(ref_text, text)
return ''
def wrap_text(self, text, wrapper=''):
return '{}{}{}'.format(wrapper, text, wrapper)
def get_random_name(self, length=32, format=None):
letters_and_digits = string.ascii_letters + string.digits
random_name = ''.join((random.choice(letters_and_digits) for _ in range(length)))
if format:
random_name += '.'+format.lower()
return random_name
def delete_emoji(self, text):
try:
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002500-\U00002BEF" # chinese char
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f" # dingbats
u"\u3030"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', text)
except:
return text
def censor(self, text, replace='*'):
return PartialCensor.censor(text, repl=replace)
class PostTypeIsNotDefined(Exception):
pass
class PostCreator:
image_builder = ImageBuilder()
text_builder = TextFormatter()
def create(self, post_config):
"""Create a new post. Take info from post config
:param post_config: Configuration for building of post
:return: Rendered post
"""
if post_config[POST_TYPE] == POST_TYPE_ORIGINAL:
post = Post.objects.get(id=post_config[POST_ID])
original_post = post
elif post_config[POST_TYPE] == POST_TYPE_RENDERED:
post = RenderedPost.objects.get(id=post_config[POST_ID])
original_post = post.post_id
else:
raise PostTypeIsNotDefined()
project = original_post.source_id.project_id
if post_config[POST_TYPE] == POST_TYPE_RENDERED and post_config[REPLACE] == 1:
rendered_post = post
rendered_post.images.all().delete()
else:
rendered_post = RenderedPost(
post_id=original_post,
project_id=project,
)
if post_config[AS_ORIGINAL]:
self._create_post_as_original(rendered_post, original_post)
return rendered_post
# Build text for image
if post_config[IMG] == 1:
if post_config[IMG_COUNT] > 1:
rendered_post.save()
self._add_original_images(rendered_post, original_post, IMG_COUNT)
return rendered_post
width = post_config[IMG_WIDTH]
original_text = original_post.text
original_text = self.text_builder.delete_emoji(original_text)
comment_text, comment = self._build_comment_text(original_post, post_config)
comment_text = self.text_builder.delete_emoji(comment_text)
post_img = original_post.get_pillow_first_image()
comment_img = comment.get_pillow_image() if comment else None
if post_config[AUTO] and post_img is None and comment_img is None:
width = 1000
self.image_builder.reset(width=width)
if post_config[IMG_WITH_ORIGINAL_TEXT]:
censored_original_text = self.text_builder.censor(original_text)
self.image_builder.add_text(censored_original_text)
if post_config[IMG_WITH_POST_IMG]:
self.image_builder.add_image(post_img)
if post_config[IMG_WITH_COMMENT] and post_config[IMG_WITH_COMMENT_TEXT]:
censored_comment_text = self.text_builder.censor(comment_text)
self.image_builder.add_text(censored_comment_text)
if post_config[IMG_WITH_COMMENT] and post_config[IMG_COMMENT_WITH_IMG]:
self.image_builder.add_image(comment_img, width=600)
# Add watermark
self.image_builder.add_text(project.name, location='right', text_margin=5, points=30)
rendered_img = self.image_builder.build()
img_reader = io.BytesIO()
rendered_img.save(img_reader, format=rendered_img.format)
rendered_post.save()
img_of_rendered_post = RenderedImage(
rendered_post_id=rendered_post
)
img_of_rendered_post.image.save(
self.text_builder.get_random_name(format=rendered_img.format),
File(img_reader)
)
img_of_rendered_post.save()
return rendered_post
else:
rendered_post.save()
return rendered_post
def _build_comment_text(self, original_post, post_config):
image_text = ''
comment = None
if post_config[IMG_COMMENT_ID] is not None:
comment_id = post_config[IMG_COMMENT_ID]
comment = Comment.objects.get(id=comment_id)
else:
original_comments = original_post.comments.all().order_by('-rating')
if len(original_comments) > 0:
comment = original_comments[0]
if comment:
if post_config[IMG_COMMENT_TEXT_WITH_REF]:
image_text = self.text_builder.format_text(comment.text, comment.ref_text)
else:
image_text = self.text_builder.format_text(comment.text)
return image_text, comment
def _create_post_as_original(self, rendered_post, original_post):
rendered_post.text = original_post.text
rendered_post.images.all().delete()
rendered_post.save()
self._add_original_images(rendered_post, original_post)
def _add_original_images(self, rendered_post, original_post, count=-1):
for num, img in enumerate(original_post.images.all()):
if num == count:
break
rendered_img = RenderedImage(
rendered_post_id=rendered_post
)
rendered_img.image = img.image
rendered_img.save()
| [
"skupov2d@mail.ru"
] | skupov2d@mail.ru |
3e3bc699374bb3c06ea7bdac37bd5b7b1e0caeac | 95947b689079ef8af9a9233e244599b3f5b06300 | /install.py | 083d661404384a50be5bbcaa7e13be68346d0604 | [] | no_license | DaveDaCoda/YAKSR | 87f96ba526ec4bb14ab48f26a64c6508f886df75 | 7201c0ed6a24fe6f1b4520016f75ae2fdffc7c22 | refs/heads/master | 2021-01-01T06:19:00.828800 | 2017-07-07T21:03:06 | 2017-07-07T21:03:06 | 97,404,700 | 2 | 0 | null | 2017-07-16T19:03:51 | 2017-07-16T19:03:51 | null | UTF-8 | Python | false | false | 846 | py | #!/usr/bin/python
import os
from subprocess import call
currentdir=os.getcwd()
os.chdir(str(currentdir))
os.system("chmod +777 AptGetInstalls.sh")
call(["sh", "AptGetInstalls.sh"])
if (os.path.isdir(str(currentdir)+"/ToolBox")) is False:
os.makedirs(str(currentdir)+"/ToolBox")
os.chdir(str(currentdir)+ "/Lists")
files = [x for x in os.listdir(currentdir+str("/Lists")) if x.endswith('.txt')]
for filename in files:
dirname= os.path.splitext(str(filename))[0]
os.chdir(str(currentdir)+"/ToolBox/")
if (os.path.isdir(str(currentdir)+"/ToolBox/")) is False:
os.system("rm -rf "+str(dirname))
os.makedirs(str(currentdir)+"/ToolBox/"+str(dirname))
os.chdir(str(currentdir)+"/ToolBox/"+str(dirname))
f=open(str(currentdir)+ "/Lists/"+str(filename))
line=f.readline()
while line:
os.system(str(line))
line=f.readline()
f.close()
| [
"noreply@github.com"
] | noreply@github.com |
cce96bf74aaa3c84217177403fdeb84fb37ffa2d | f066cdd363fcf0c90575cc7c6ed3931c9687077f | /pow.py | c920234683e60a75a88f3e862a6d0cbb01fec281 | [] | no_license | kalaiselvikandasamy/python | 33efa21d031625be6805e61ebdf43e842f88ece4 | 6eaef0a72d9cc58fc38bedae2f35ddca00b85db4 | refs/heads/master | 2020-06-28T09:06:32.208812 | 2019-08-16T09:01:00 | 2019-08-16T09:01:00 | 200,194,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | x=int(input("enter the number:"))
n=int(input("enter the times:"))
z=pow(x,n)
print("the value:",z)
| [
"noreply@github.com"
] | noreply@github.com |
7ceab98892b6175d04fc3b8f5a0ecab1b1ebf58c | 102c11d9412f54eb8eddb987c45785a2fb78a45a | /cnn_lstm.py | 278826b04b88f494da336244bdaff9b109caee29 | [] | no_license | hujianhang2996/neural_style_transfer_tensorflow | fd83ea2161225eacf016587f7f2e25e892e7fdf9 | 27075fb38872b96284ca31fc8061e380ddee08bc | refs/heads/master | 2021-05-14T00:35:51.041463 | 2018-02-07T12:15:41 | 2018-02-07T12:15:41 | 116,544,479 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | import tensorflow as tf
import numpy as np
from tensorflow.contrib.rnn import BasicLSTMCell
seq_length = 48
X = np.random.rand(10, seq_length, 1)
Y = np.random.rand(10, 2, 1)
df_graph = tf.Graph()
with df_graph.as_default():
batch_size = tf.placeholder(tf.int32, [])
input_X = tf.placeholder(tf.float32, (None, seq_length, 1), 'input_X')
input_Y = tf.placeholder(tf.float32, (None, 2, 1), 'input_Y')
conv1_out = tf.layers.conv1d(input_X, 2, 13, activation=tf.nn.relu, name='conv1')
conv2_out = tf.layers.conv1d(conv1_out, 2, 13, activation=tf.nn.relu, name='conv2')
pooling_out = tf.layers.average_pooling1d(conv2_out, 2, 2, name='pooling')
conv3_out = tf.layers.conv1d(pooling_out, 4, 5, activation=tf.nn.relu, name='conv3')
conv4_out = tf.layers.conv1d(conv3_out, 4, 5, activation=tf.nn.relu, name='conv4')
pooling1_out = tf.layers.average_pooling1d(conv4_out, 2, 2, name='pooling1')
resort_out = tf.transpose(pooling1_out, [1, 0, 2], name='resort')
lstm_layer = BasicLSTMCell(1)
state = lstm_layer.zero_state(batch_size, tf.float32)
out = []
for i in range(2):
output, state = lstm_layer(resort_out[i], state)
out.append(output)
out_gather = [conv4_out, pooling1_out, resort_out, out]
init_op = tf.global_variables_initializer()
sess = tf.Session(graph=df_graph)
sess.run(init_op)
train_writer = tf.summary.FileWriter('./cnn_lstm', sess.graph, flush_secs=5)
out_run = sess.run(out_gather, feed_dict={input_X: X, input_Y: Y, batch_size: X.shape[0]})
[print(np.array(x).shape) for x in out_run]
| [
"hujianhang2996@sina.com"
] | hujianhang2996@sina.com |
5ef27843efde23d377b3ecb8efc33cea986f42d5 | 498b0eb2c59efa970fe7d7d1ef9e99511ddae4ce | /server/vcr-server/subscriptions/utils.py | ac1c99a20c0050e2c685850b2cf26e12a7d33124 | [
"Apache-2.0"
] | permissive | nrempel/indy-catalyst | dba130879f88de00ef137d85242d1f4209c53d98 | c54e8258d49b6cad5d9f175d6b2beddbb80d16f7 | refs/heads/master | 2021-07-23T03:41:30.394817 | 2020-05-19T22:18:04 | 2020-05-19T22:18:04 | 168,226,204 | 2 | 0 | Apache-2.0 | 2020-01-13T18:57:49 | 2019-01-29T20:48:15 | Python | UTF-8 | Python | false | false | 1,757 | py | import logging
import os
import threading
from enum import Enum
from .models.CredentialHookStats import CredentialHookStats
LOGGER = logging.getLogger(__name__)
class HookStep(Enum):
FIRST_ATTEMPT = "first_attempt"
RETRY = "retry_attempt"
RETRY_FAIL = "retry_fail"
class TooManyRetriesException(Exception):
pass
def log_webhook_execution_result(success, hook_step=None, json_data=None):
worker_name = os.environ["CELERY_WORKER_NAME"]
if worker_name is None:
LOGGER.warning(
"No name set for current worker, falling back to using OS-assigned thread ID"
)
worker_name = threading.get_native_id()
# Store stats for current worker
current_stats = (
CredentialHookStats.objects.filter(worker_id=worker_name).first()
or CredentialHookStats(worker_id=worker_name)
)
if success is False or hook_step is HookStep.FIRST_ATTEMPT:
current_stats.attempt_count = current_stats.attempt_count + 1
current_stats.total_count = current_stats.total_count + 1
elif success is False and hook_step is HookStep.RETRY:
current_stats.retry_count = current_stats.retry_count + 1
current_stats.total_count = current_stats.total_count + 1
elif success is False and hook_step is HookStep.RETRY_FAIL:
current_stats.retry_fail_count = current_stats.retry_fail_count + 1
elif success is False and hook_step is None:
current_stats.fail_count = current_stats.fail_count + 1
elif success is True:
current_stats.success_count = current_stats.success_count + 1
else:
LOGGER.warning(
f"Unexpected argument combination: success={success}, hook_step={hook_step}"
)
current_stats.save()
| [
"emiliano.sune@gmail.com"
] | emiliano.sune@gmail.com |
b4d58ab6626006d3f890942f673b4ca330f006bd | 8144255b44c1c1f8cab341606a7a611fca201f3e | /e3.py | 7af04d458309c8379d5d6f08a63d23dc19272d45 | [
"Apache-2.0"
] | permissive | piyushptiwari/python-programs | 720e9c28e3fd4bceddc9795c699c869ee6e9a06c | 5dba0ef4e77f1d2528f510327de4224b60b1d4ba | refs/heads/master | 2021-09-17T09:56:11.141134 | 2018-06-30T10:55:41 | 2018-06-30T10:55:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | try:
raise NameError("hi there") #Raise Error
except NameError:
print("an exception")
| [
"noreply@github.com"
] | noreply@github.com |
67a20ce0e3e82ec860d6764209e162fd4fe77b4d | 00cb405170a6a9572bef0ec8f373813eada08c03 | /Game Structure/geometry/version5/myentitygroup.py | 48d7e8cdcb9636189cb9f36e9451420877bcb85d | [] | no_license | MarcPartensky/Python-Games | c0ad2857be5832d6029642bb0a96bc8e403a12e3 | ebfcaaf4a028eddb36bbc99184eb3f7a86eb24ed | refs/heads/master | 2022-09-03T00:04:16.402288 | 2022-08-12T17:10:22 | 2022-08-12T17:10:22 | 166,606,022 | 2 | 1 | null | 2021-03-07T16:20:15 | 2019-01-19T23:56:04 | Python | UTF-8 | Python | false | false | 11,746 | py | from mymanager import Manager
from myentity import Entity
from mymotion import Motion
from mygroup import Group
import numpy as np
import random
class EntityGroup(Group):
"""An entity group is a group of entities. Entity specific features are added."""
@staticmethod
def getCollided(group1, group2):
"""Determine the collisions between 2 groups."""
collisions = []
for e1 in group1:
for e2 in group2:
if (e1.position - e2.position).norm < e1.born + e2.born:
if e1.cross(e2):
collisions.append((e1, e2))
return collisions
@staticmethod
def killOnCollision(group1, group2):
"""We suppose the entities of group1 and group2 alives."""
for e1 in group1:
for e2 in group2:
if (e1.position - e2.position).norm < e1.born + e2.born:
if e1.cross(e2):
e1.die()
e2.die()
@classmethod
def randomOfType(cls, etype, n=0, **kwargs):
"""Create a group of n random entities of type 'etype'."""
entities = [etype.random() for i in range(n)]
return cls(*entities, **kwargs)
@classmethod
def randomOfTypes(cls, *types, n=0, **kwargs):
"""Create a group of n random entities of type a all of the given types."""
class etype(*types):
pass
return cls.randomOfType(etype, **kwargs)
@classmethod
def random(cls, n=10, **kwargs):
"""Create n random entities."""
entities = [Entity.random() for i in range(n)]
return cls(*entities, **kwargs)
@classmethod
def randomWithSizeSparse(self, n, size, sparse, **kwargs):
"""Create a random group using the size and sparse parameters."""
g = super().random(n, **kwargs)
g.enlarge(size)
g.spread(sparse)
return g
def __init__(self, *entities, alive=False, active=False, activate=False):
"""Create a entity group."""
super().__init__(*entities)
self.active = active
self.alive = alive
if activate:
self.activate()
# Binding the entities to the elements
entities = property(Group.getElements, Group.setElements, Group.delElements)
def randomEntity(self):
"""Return a random entity of the group."""
chosen = []
for entity in self.entities:
if isinstance(entity, EntityGroup):
chosen.append(entity.randomEntity())
else:
chosen.append(entity)
return random.choice(chosen)
def spawn(self):
"""Spawn each entity."""
self.alive = True
for entity in self:
entity.spawn()
def updateActivation(self):
"""Determine if the group is active if any of the entities is active."""
self.active = False
for entity in self:
if entity.active:
self.active = True
def activate(self):
"""Reactivate all entities."""
self.active = True
for entity in self:
entity.activate()
def deactivate(self):
"""Deactivate all entities."""
self.active = False
for entity in self:
entity.deactivate()
def reactKeyDown(self, key):
"""Make each entity react to the key down event."""
for entity in self:
if entity.active:
entity.reactKeyDown(key)
def reactMouseMotion(self, position):
"""Make each entity react to a mouse motion event."""
for entity in self:
if entity.active:
entity.reactMouseMotion(position)
def reactMouseButtonDown(self, button, position):
"""Make all entities react to a mouse button down event."""
for entity in self:
if entity.active:
entity.reactMouseButtonDown(button, position)
def respawn(self):
"""Respawn all dead entities."""
for entity in self:
entity.respawn()
def clean(self):
"""Delete all dead entities."""
i = 0
while i < len(self):
if self[i].alive:
if isinstance(self[i], EntityGroup):
self[i].clean()
i += 1
else:
del self[i]
def show(self, context):
"""Show all entities."""
for entity in self:
entity.show(context)
def showBorn(self, context):
for entity in self:
entity.showBorn(context)
def __str__(self, name=None):
"""Return the str of the types of the entities."""
if name is None:
name = type(self).__name__
return super().__str__(name)
def update(self, dt):
"""Update all entities."""
for entity in self:
entity.update(dt)
def setFriction(self, friction):
"""Set the friction of the entities to a given friction."""
for entity in self:
entity.setFriction(friction)
def enlarge(self, n):
"""Enlarge the anatomies of the entities."""
for entity in self:
entity.enlarge(n)
def spread(self, n):
"""Spread the bodies of the entities."""
for entity in self:
entity.spread(n)
def control(self, controller):
"""Return the controlled entity using the controller."""
# print(self[:])
if len(controller) > 1:
return self[controller[0]].control(controller[1:])
else:
return self[controller[0]]
class AliveEntityGroup:
"""Group of entities that handle themselves."""
@classmethod
def random(cls, n=5, np=3, nm=2, nv=2, dv=2):
"""Create a random entity group using the optional number of entities 'n'."""
entities = [Entity.random(n=np, nm=nm, nv=nv, d=dv) for i in range(n)]
entities = dict(zip(range(len(entities)), entities))
return cls(entities)
def __init__(self, entities):
"""Create a body group using the dictionary of entities."""
self.entities = entities
self.updateAlives()
self.updateMaxBorn()
def updateAlives(self):
"""Update the ids of alive entities."""
self.alives = dict([(id, entity) for (id, entity)
in self.entities.items() if entity.alive])
# Recurrent data that must be updated.
# It is better to proceed that way for efficiency
@property
def deads(self):
"""Return the ids of dead entities."""
return {k: v for k, v in self.entities.items() if k not in self.alives}
def spawnEach(self):
"""Spawn each entity."""
for entity in self.entities.values():
entity.spawn()
self.alives = self.entities.keys()
def update(self, dt):
"""Update the group."""
self.updateEach(dt)
collisions = self.getCollisionsWithCircles()
if len(collisions) > 0:
collided = self.getCollided(collisions)
if len(collided) != 0:
self.killEach(collided)
self.updateAlives()
def updateEach(self, dt):
"""Update each entity alive."""
for entity in self.alives.values():
entity.update(dt)
def showEach(self, context):
"""Show each entity alive."""
for entity in self.alives.values():
entity.show(context)
def respawnDeads(self):
"""Respawn each dead entity."""
for entity in self.deads.values():
entity.respawn()
def getCollisions(self):
"""Return the list of couples of collisions detected between alive entities."""
collisions = []
keys = list(self.alives.keys())
n = len(keys)
for i in range(n):
for j in range(i + 1, n):
id1 = keys[i]
id2 = keys[j]
e1 = self.alives[id1]
e2 = self.alives[id2]
if e1.cross(e2):
collisions.append((id1, id2))
return collisions
def getCollided(self, collisions):
"""Return the ids of collided entities."""
ids = list(set(np.reshape(collisions, 2 * len(collisions))))
return dict([(id, self.entities[id]) for id in ids])
def killEach(self, collided):
"""Kill entities with their ids."""
for entity in collided.values():
entity.die()
def spread(self, n=10):
"""Spread randomly the entities."""
for entity in self.entities.values():
entity.motion = n * Motion.random()
def followEach(self, point):
"""Make each entity follow the point."""
for entity in self.alives.values():
entity.follow(point)
def getMaxBorn(self):
"""Return the borns of all entities."""
return self._max_born
def updateMaxBorn(self, ):
"""Set the max born of all the entities."""
self._max_born = max([e.born for e in self.alives.values()])
def getCollisionsWithCircles(self):
"""Return all circle collisions."""
collisions = []
keys = list(self.alives.keys())
n = len(keys)
for i in range(n):
for j in range(i + 1, n):
id1 = keys[i]
id2 = keys[j]
e1 = self.alives[id1]
e2 = self.alives[id2]
if (e1.position - e2.position).norm < e1.born + e2.born:
if e1.cross(e2):
collisions.append((id1, id2))
return collisions
@property
def alive(self):
"""Return true if any of the entity is alive."""
return len(self.alives) != 0
@property
def dead(self):
"""Return true if all entities are dead."""
return len(self.alives) == 0
class GroupManager(Manager):
@classmethod
def random(cls, **kwargs):
"""Create a random entity group."""
group = EntityGroup.random(**kwargs)
return cls(group)
def __init__(self, group, **kwargs):
"""Create a body group manager using the group and optional arguments."""
super().__init__(**kwargs)
self.group = group
def update(self):
"""Update the group."""
collisions = self.group.getCollisions()
collided = self.group.getCollided(collisions)
self.group.killEach(collided)
self.group.updateAlives()
self.group.updateEach(self.dt)
def show(self):
"""Show the group."""
self.group.showEach(self.context)
class GroupTester(GroupManager):
def __init__(self, *args, **kwargs):
super().__init__(*args)
self.group.spread(100)
self.following = True
def update(self):
"""Update without collisions checks."""
# self.group.updateEach(self.dt)
self.updateWithCollisions()
def updateWithCollisions(self):
"""Update the group."""
self.group.followEach(self.context.point())
collisions = self.group.getCollisionsWithCircles()
if len(collisions) > 0:
self.context.console.append(collisions)
collided = self.group.getCollided(collisions)
if len(collided) != 0:
self.group.killEach(collided)
self.group.updateAlives()
self.group.updateEach(self.dt)
if __name__ == "__main__":
# bm = SpaceShipTester.random(following=True, dt=0.1)
# bm()
# gt = GroupTester.random(n=50)
# print(gt.group.alives)
# gt()
b1 = EntityGroup.random()
b2 = EntityGroup.random()
b1.enlarge(100)
print(b1 + b2)
| [
"marc.partensky@gmail.com"
] | marc.partensky@gmail.com |
49359e0023eda45fba60e91197856055de571f96 | aa9f771c21c6d6b40be5f079f7ad7144a5129a7a | /optimus/input/variable_input_type_adapter.py | efbc561a4f6450b9933a5ffc9fff30bce4fe70de | [
"MIT"
] | permissive | sobhanlenka/optimus | 9b9476519c6c31c29fdb62ef8637f39f9eeae084 | ed02bffc4afdde2c80db29e1ac3356f27e8d5475 | refs/heads/master | 2022-10-31T16:34:48.068594 | 2018-06-22T18:33:41 | 2018-06-22T18:33:41 | 138,263,902 | 0 | 1 | MIT | 2022-10-20T13:49:25 | 2018-06-22T06:30:00 | Python | UTF-8 | Python | false | false | 1,890 | py | from __future__ import unicode_literals
from optimus.input import InputAdapter
from optimus.conversation import Statement
class VariableInputTypeAdapter(InputAdapter):
JSON = 'json'
TEXT = 'text'
OBJECT = 'object'
VALID_FORMATS = (JSON, TEXT, OBJECT, )
def detect_type(self, statement):
import sys
if sys.version_info[0] < 3:
string_types = basestring # NOQA
else:
string_types = str
if hasattr(statement, 'text'):
return self.OBJECT
if isinstance(statement, string_types):
return self.TEXT
if isinstance(statement, dict):
return self.JSON
input_type = type(statement)
raise self.UnrecognizedInputFormatException(
'The type {} is not recognized as a valid input type.'.format(
input_type
)
)
def process_input(self, statement):
input_type = self.detect_type(statement)
# Return the statement object without modification
if input_type == self.OBJECT:
return statement
# Convert the input string into a statement object
if input_type == self.TEXT:
return Statement(statement)
# Convert input dictionary into a statement object
if input_type == self.JSON:
input_json = dict(statement)
text = input_json['text']
del input_json['text']
return Statement(text, **input_json)
class UnrecognizedInputFormatException(Exception):
"""
Exception raised when an input format is specified that is
not in the VariableInputTypeAdapter.VALID_FORMATS variable.
"""
def __init__(self, value='The input format was not recognized.'):
self.value = value
def __str__(self):
return repr(self.value)
| [
"sobhanlenka@gmail.com"
] | sobhanlenka@gmail.com |
91a0cbaa942ca70868ca7d37008a1923d0fb2ee7 | 9a8ee2d10e54229b0a7d528133398bff95456dcb | /Python/Desafio015.py | 3e221c43332407f0d788e30c81692f323327d333 | [] | no_license | ArielKollross/curso_em_video | c7a381e4caa1ee4a0e5fda7d87d71fe83b5fe2c7 | 978b5d77359edf046977f722c215eaf435723716 | refs/heads/master | 2020-07-04T19:38:52.017538 | 2020-02-14T02:44:16 | 2020-02-14T02:44:16 | 202,391,908 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | # Desafio 015: leia quantidade de dias e Km rodado, o custo R$ 60/dia além do custo de R$ 0.15/km
dias = int(input('Numero de dias utilizados: '))
km = float(input('Numero de Km rodados: '))
alugel = 60*dias + km*0.15
print('O valor total a ser pago é de: {:.2f}'.format(alugel)) | [
"noreply@github.com"
] | noreply@github.com |
bf342befc93f6e874f5a82c83db670ea0dcd7f9b | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/numpy/2016/8/system_info.py | 014223b745ac3bc735add615d462bac5093d83af | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 84,885 | py | #!/bin/env python
"""
This file defines a set of system_info classes for getting
information about various resources (libraries, library directories,
include directories, etc.) in the system. Currently, the following
classes are available:
atlas_info
atlas_threads_info
atlas_blas_info
atlas_blas_threads_info
lapack_atlas_info
lapack_atlas_threads_info
atlas_3_10_info
atlas_3_10_threads_info
atlas_3_10_blas_info,
atlas_3_10_blas_threads_info,
lapack_atlas_3_10_info
lapack_atlas_3_10_threads_info
blas_info
lapack_info
openblas_info
blis_info
blas_opt_info # usage recommended
lapack_opt_info # usage recommended
fftw_info,dfftw_info,sfftw_info
fftw_threads_info,dfftw_threads_info,sfftw_threads_info
djbfft_info
x11_info
lapack_src_info
blas_src_info
numpy_info
numarray_info
numpy_info
boost_python_info
agg2_info
wx_info
gdk_pixbuf_xlib_2_info
gdk_pixbuf_2_info
gdk_x11_2_info
gtkp_x11_2_info
gtkp_2_info
xft_info
freetype2_info
umfpack_info
Usage:
info_dict = get_info(<name>)
where <name> is a string 'atlas','x11','fftw','lapack','blas',
'lapack_src', 'blas_src', etc. For a complete list of allowed names,
see the definition of get_info() function below.
Returned info_dict is a dictionary which is compatible with
distutils.setup keyword arguments. If info_dict == {}, then the
asked resource is not available (system_info could not find it).
Several *_info classes specify an environment variable to specify
the locations of software. When setting the corresponding environment
variable to 'None' then the software will be ignored, even when it
is available in system.
Global parameters:
system_info.search_static_first - search static libraries (.a)
in precedence to shared ones (.so, .sl) if enabled.
system_info.verbosity - output the results to stdout if enabled.
The file 'site.cfg' is looked for in
1) Directory of main setup.py file being run.
2) Home directory of user running the setup.py file as ~/.numpy-site.cfg
3) System wide directory (location of this file...)
The first one found is used to get system configuration options The
format is that used by ConfigParser (i.e., Windows .INI style). The
section ALL has options that are the default for each section. The
available sections are fftw, atlas, and x11. Appropriate defaults are
used if nothing is specified.
The order of finding the locations of resources is the following:
1. environment variable
2. section in site.cfg
3. ALL section in site.cfg
Only the first complete match is returned.
Example:
----------
[ALL]
library_dirs = /usr/lib:/usr/local/lib:/opt/lib
include_dirs = /usr/include:/usr/local/include:/opt/include
src_dirs = /usr/local/src:/opt/src
# search static libraries (.a) in preference to shared ones (.so)
search_static_first = 0
[fftw]
fftw_libs = rfftw, fftw
fftw_opt_libs = rfftw_threaded, fftw_threaded
# if the above aren't found, look for {s,d}fftw_libs and {s,d}fftw_opt_libs
[atlas]
library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas
# for overriding the names of the atlas libraries
atlas_libs = lapack, f77blas, cblas, atlas
[x11]
library_dirs = /usr/X11R6/lib
include_dirs = /usr/X11R6/include
----------
Authors:
Pearu Peterson <pearu@cens.ioc.ee>, February 2002
David M. Cooke <cookedm@physics.mcmaster.ca>, April 2002
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
"""
from __future__ import division, absolute_import, print_function
import sys
import os
import re
import copy
import warnings
from glob import glob
from functools import reduce
if sys.version_info[0] < 3:
from ConfigParser import NoOptionError
from ConfigParser import RawConfigParser as ConfigParser
else:
from configparser import NoOptionError
from configparser import RawConfigParser as ConfigParser
# It seems that some people are importing ConfigParser from here so is
# good to keep its class name. Use of RawConfigParser is needed in
# order to be able to load path names with percent in them, like
# `feature%2Fcool` which is common on git flow branch names.
from distutils.errors import DistutilsError
from distutils.dist import Distribution
import distutils.sysconfig
from distutils import log
from distutils.util import get_platform
from numpy.distutils.exec_command import \
find_executable, exec_command, get_pythonexe
from numpy.distutils.misc_util import is_sequence, is_string, \
get_shared_lib_extension
from numpy.distutils.command.config import config as cmd_config
from numpy.distutils.compat import get_exception
import distutils.ccompiler
import tempfile
import shutil
# Determine number of bits
import platform
_bits = {'32bit': 32, '64bit': 64}
platform_bits = _bits[platform.architecture()[0]]
def libpaths(paths, bits):
"""Return a list of library paths valid on 32 or 64 bit systems.
Inputs:
paths : sequence
A sequence of strings (typically paths)
bits : int
An integer, the only valid values are 32 or 64. A ValueError exception
is raised otherwise.
Examples:
Consider a list of directories
>>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib']
For a 32-bit platform, this is already valid:
>>> np.distutils.system_info.libpaths(paths,32)
['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib']
On 64 bits, we prepend the '64' postfix
>>> np.distutils.system_info.libpaths(paths,64)
['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib',
'/usr/lib64', '/usr/lib']
"""
if bits not in (32, 64):
raise ValueError("Invalid bit size in libpaths: 32 or 64 only")
# Handle 32bit case
if bits == 32:
return paths
# Handle 64bit case
out = []
for p in paths:
out.extend([p + '64', p])
return out
if sys.platform == 'win32':
default_lib_dirs = ['C:\\',
os.path.join(distutils.sysconfig.EXEC_PREFIX,
'libs')]
default_runtime_dirs = []
default_include_dirs = []
default_src_dirs = ['.']
default_x11_lib_dirs = []
default_x11_include_dirs = []
else:
default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib',
'/opt/local/lib', '/sw/lib'], platform_bits)
default_runtime_dirs = []
default_include_dirs = ['/usr/local/include',
'/opt/include', '/usr/include',
# path of umfpack under macports
'/opt/local/include/ufsparse',
'/opt/local/include', '/sw/include',
'/usr/include/suitesparse']
default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src']
default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib',
'/usr/lib'], platform_bits)
default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include',
'/usr/include']
if os.path.exists('/usr/lib/X11'):
globbed_x11_dir = glob('/usr/lib/*/libX11.so')
if globbed_x11_dir:
x11_so_dir = os.path.split(globbed_x11_dir[0])[0]
default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11'])
default_x11_include_dirs.extend(['/usr/lib/X11/include',
'/usr/include/X11'])
import subprocess as sp
tmp = None
try:
# Explicitly open/close file to avoid ResourceWarning when
# tests are run in debug mode Python 3.
tmp = open(os.devnull, 'w')
p = sp.Popen(["gcc", "-print-multiarch"], stdout=sp.PIPE,
stderr=tmp)
except (OSError, DistutilsError):
# OSError if gcc is not installed, or SandboxViolation (DistutilsError
# subclass) if an old setuptools bug is triggered (see gh-3160).
pass
else:
triplet = str(p.communicate()[0].decode().strip())
if p.returncode == 0:
# gcc supports the "-print-multiarch" option
default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)]
default_lib_dirs += [os.path.join("/usr/lib/", triplet)]
finally:
if tmp is not None:
tmp.close()
if os.path.join(sys.prefix, 'lib') not in default_lib_dirs:
default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib'))
default_include_dirs.append(os.path.join(sys.prefix, 'include'))
default_src_dirs.append(os.path.join(sys.prefix, 'src'))
default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)]
default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)]
default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)]
default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)]
so_ext = get_shared_lib_extension()
def get_standard_file(fname):
"""Returns a list of files named 'fname' from
1) System-wide directory (directory-location of this module)
2) Users HOME directory (os.environ['HOME'])
3) Local directory
"""
# System-wide file
filenames = []
try:
f = __file__
except NameError:
f = sys.argv[0]
else:
sysfile = os.path.join(os.path.split(os.path.abspath(f))[0],
fname)
if os.path.isfile(sysfile):
filenames.append(sysfile)
# Home directory
# And look for the user config file
try:
f = os.path.expanduser('~')
except KeyError:
pass
else:
user_file = os.path.join(f, fname)
if os.path.isfile(user_file):
filenames.append(user_file)
# Local file
if os.path.isfile(fname):
filenames.append(os.path.abspath(fname))
return filenames
def get_info(name, notfound_action=0):
"""
notfound_action:
0 - do nothing
1 - display warning message
2 - raise error
"""
cl = {'atlas': atlas_info, # use lapack_opt or blas_opt instead
'atlas_threads': atlas_threads_info, # ditto
'atlas_blas': atlas_blas_info,
'atlas_blas_threads': atlas_blas_threads_info,
'lapack_atlas': lapack_atlas_info, # use lapack_opt instead
'lapack_atlas_threads': lapack_atlas_threads_info, # ditto
'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead
'atlas_3_10_threads': atlas_3_10_threads_info, # ditto
'atlas_3_10_blas': atlas_3_10_blas_info,
'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info,
'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead
'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto
'mkl': mkl_info,
# openblas which may or may not have embedded lapack
'openblas': openblas_info, # use blas_opt instead
# openblas with embedded lapack
'openblas_lapack': openblas_lapack_info, # use blas_opt instead
'blis': blis_info, # use blas_opt instead
'lapack_mkl': lapack_mkl_info, # use lapack_opt instead
'blas_mkl': blas_mkl_info, # use blas_opt instead
'x11': x11_info,
'fft_opt': fft_opt_info,
'fftw': fftw_info,
'fftw2': fftw2_info,
'fftw3': fftw3_info,
'dfftw': dfftw_info,
'sfftw': sfftw_info,
'fftw_threads': fftw_threads_info,
'dfftw_threads': dfftw_threads_info,
'sfftw_threads': sfftw_threads_info,
'djbfft': djbfft_info,
'blas': blas_info, # use blas_opt instead
'lapack': lapack_info, # use lapack_opt instead
'lapack_src': lapack_src_info,
'blas_src': blas_src_info,
'numpy': numpy_info,
'f2py': f2py_info,
'Numeric': Numeric_info,
'numeric': Numeric_info,
'numarray': numarray_info,
'numerix': numerix_info,
'lapack_opt': lapack_opt_info,
'blas_opt': blas_opt_info,
'boost_python': boost_python_info,
'agg2': agg2_info,
'wx': wx_info,
'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info,
'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info,
'gdk_pixbuf_2': gdk_pixbuf_2_info,
'gdk-pixbuf-2.0': gdk_pixbuf_2_info,
'gdk': gdk_info,
'gdk_2': gdk_2_info,
'gdk-2.0': gdk_2_info,
'gdk_x11_2': gdk_x11_2_info,
'gdk-x11-2.0': gdk_x11_2_info,
'gtkp_x11_2': gtkp_x11_2_info,
'gtk+-x11-2.0': gtkp_x11_2_info,
'gtkp_2': gtkp_2_info,
'gtk+-2.0': gtkp_2_info,
'xft': xft_info,
'freetype2': freetype2_info,
'umfpack': umfpack_info,
'amd': amd_info,
}.get(name.lower(), system_info)
return cl().get_info(notfound_action)
class NotFoundError(DistutilsError):
"""Some third-party program or library is not found."""
class AtlasNotFoundError(NotFoundError):
"""
Atlas (http://math-atlas.sourceforge.net/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [atlas]) or by setting
the ATLAS environment variable."""
class LapackNotFoundError(NotFoundError):
"""
Lapack (http://www.netlib.org/lapack/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [lapack]) or by setting
the LAPACK environment variable."""
class LapackSrcNotFoundError(LapackNotFoundError):
"""
Lapack (http://www.netlib.org/lapack/) sources not found.
Directories to search for the sources can be specified in the
numpy/distutils/site.cfg file (section [lapack_src]) or by setting
the LAPACK_SRC environment variable."""
class BlasNotFoundError(NotFoundError):
"""
Blas (http://www.netlib.org/blas/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [blas]) or by setting
the BLAS environment variable."""
class BlasSrcNotFoundError(BlasNotFoundError):
"""
Blas (http://www.netlib.org/blas/) sources not found.
Directories to search for the sources can be specified in the
numpy/distutils/site.cfg file (section [blas_src]) or by setting
the BLAS_SRC environment variable."""
class FFTWNotFoundError(NotFoundError):
"""
FFTW (http://www.fftw.org/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [fftw]) or by setting
the FFTW environment variable."""
class DJBFFTNotFoundError(NotFoundError):
"""
DJBFFT (http://cr.yp.to/djbfft.html) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [djbfft]) or by setting
the DJBFFT environment variable."""
class NumericNotFoundError(NotFoundError):
"""
Numeric (http://www.numpy.org/) module not found.
Get it from above location, install it, and retry setup.py."""
class X11NotFoundError(NotFoundError):
"""X11 libraries not found."""
class UmfpackNotFoundError(NotFoundError):
"""
UMFPACK sparse solver (http://www.cise.ufl.edu/research/sparse/umfpack/)
not found. Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [umfpack]) or by setting
the UMFPACK environment variable."""
class system_info(object):
""" get_info() is the only public method. Don't use others.
"""
section = 'ALL'
dir_env_var = None
search_static_first = 0 # XXX: disabled by default, may disappear in
# future unless it is proved to be useful.
verbosity = 1
saved_results = {}
notfounderror = NotFoundError
def __init__(self,
default_lib_dirs=default_lib_dirs,
default_include_dirs=default_include_dirs,
verbosity=1,
):
self.__class__.info = {}
self.local_prefixes = []
defaults = {'library_dirs': os.pathsep.join(default_lib_dirs),
'include_dirs': os.pathsep.join(default_include_dirs),
'runtime_library_dirs': os.pathsep.join(default_runtime_dirs),
'rpath': '',
'src_dirs': os.pathsep.join(default_src_dirs),
'search_static_first': str(self.search_static_first),
'extra_compile_args': '', 'extra_link_args': ''}
self.cp = ConfigParser(defaults)
self.files = []
self.files.extend(get_standard_file('.numpy-site.cfg'))
self.files.extend(get_standard_file('site.cfg'))
self.parse_config_files()
if self.section is not None:
self.search_static_first = self.cp.getboolean(
self.section, 'search_static_first')
assert isinstance(self.search_static_first, int)
def parse_config_files(self):
self.cp.read(self.files)
if not self.cp.has_section(self.section):
if self.section is not None:
self.cp.add_section(self.section)
def calc_libraries_info(self):
libs = self.get_libraries()
dirs = self.get_lib_dirs()
# The extensions use runtime_library_dirs
r_dirs = self.get_runtime_lib_dirs()
# Intrinsic distutils use rpath, we simply append both entries
# as though they were one entry
r_dirs.extend(self.get_runtime_lib_dirs(key='rpath'))
info = {}
for lib in libs:
i = self.check_libs(dirs, [lib])
if i is not None:
dict_append(info, **i)
else:
log.info('Library %s was not found. Ignoring' % (lib))
if r_dirs:
i = self.check_libs(r_dirs, [lib])
if i is not None:
# Swap library keywords found to runtime_library_dirs
# the libraries are insisting on the user having defined
# them using the library_dirs, and not necessarily by
# runtime_library_dirs
del i['libraries']
i['runtime_library_dirs'] = i.pop('library_dirs')
dict_append(info, **i)
else:
log.info('Runtime library %s was not found. Ignoring' % (lib))
return info
def set_info(self, **info):
if info:
lib_info = self.calc_libraries_info()
dict_append(info, **lib_info)
# Update extra information
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
self.saved_results[self.__class__.__name__] = info
def has_info(self):
return self.__class__.__name__ in self.saved_results
def calc_extra_info(self):
""" Updates the information in the current information with
respect to these flags:
extra_compile_args
extra_link_args
"""
info = {}
for key in ['extra_compile_args', 'extra_link_args']:
# Get values
opt = self.cp.get(self.section, key)
if opt:
tmp = {key : [opt]}
dict_append(info, **tmp)
return info
def get_info(self, notfound_action=0):
""" Return a dictonary with items that are compatible
with numpy.distutils.setup keyword arguments.
"""
flag = 0
if not self.has_info():
flag = 1
log.info(self.__class__.__name__ + ':')
if hasattr(self, 'calc_info'):
self.calc_info()
if notfound_action:
if not self.has_info():
if notfound_action == 1:
warnings.warn(self.notfounderror.__doc__)
elif notfound_action == 2:
raise self.notfounderror(self.notfounderror.__doc__)
else:
raise ValueError(repr(notfound_action))
if not self.has_info():
log.info(' NOT AVAILABLE')
self.set_info()
else:
log.info(' FOUND:')
res = self.saved_results.get(self.__class__.__name__)
if self.verbosity > 0 and flag:
for k, v in res.items():
v = str(v)
if k in ['sources', 'libraries'] and len(v) > 270:
v = v[:120] + '...\n...\n...' + v[-120:]
log.info(' %s = %s', k, v)
log.info('')
return copy.deepcopy(res)
def get_paths(self, section, key):
dirs = self.cp.get(section, key).split(os.pathsep)
env_var = self.dir_env_var
if env_var:
if is_sequence(env_var):
e0 = env_var[-1]
for e in env_var:
if e in os.environ:
e0 = e
break
if not env_var[0] == e0:
log.info('Setting %s=%s' % (env_var[0], e0))
env_var = e0
if env_var and env_var in os.environ:
d = os.environ[env_var]
if d == 'None':
log.info('Disabled %s: %s',
self.__class__.__name__, '(%s is None)'
% (env_var,))
return []
if os.path.isfile(d):
dirs = [os.path.dirname(d)] + dirs
l = getattr(self, '_lib_names', [])
if len(l) == 1:
b = os.path.basename(d)
b = os.path.splitext(b)[0]
if b[:3] == 'lib':
log.info('Replacing _lib_names[0]==%r with %r' \
% (self._lib_names[0], b[3:]))
self._lib_names[0] = b[3:]
else:
ds = d.split(os.pathsep)
ds2 = []
for d in ds:
if os.path.isdir(d):
ds2.append(d)
for dd in ['include', 'lib']:
d1 = os.path.join(d, dd)
if os.path.isdir(d1):
ds2.append(d1)
dirs = ds2 + dirs
default_dirs = self.cp.get(self.section, key).split(os.pathsep)
dirs.extend(default_dirs)
ret = []
for d in dirs:
if len(d) > 0 and not os.path.isdir(d):
warnings.warn('Specified path %s is invalid.' % d)
continue
if d not in ret:
ret.append(d)
log.debug('( %s = %s )', key, ':'.join(ret))
return ret
def get_lib_dirs(self, key='library_dirs'):
return self.get_paths(self.section, key)
def get_runtime_lib_dirs(self, key='runtime_library_dirs'):
path = self.get_paths(self.section, key)
if path == ['']:
path = []
return path
def get_include_dirs(self, key='include_dirs'):
return self.get_paths(self.section, key)
def get_src_dirs(self, key='src_dirs'):
return self.get_paths(self.section, key)
def get_libs(self, key, default):
try:
libs = self.cp.get(self.section, key)
except NoOptionError:
if not default:
return []
if is_string(default):
return [default]
return default
return [b for b in [a.strip() for a in libs.split(',')] if b]
def get_libraries(self, key='libraries'):
if hasattr(self, '_lib_names'):
return self.get_libs(key, default=self._lib_names)
else:
return self.get_libs(key, '')
def library_extensions(self):
static_exts = ['.a']
if sys.platform == 'win32':
static_exts.append('.lib') # .lib is used by MSVC
if self.search_static_first:
exts = static_exts + [so_ext]
else:
exts = [so_ext] + static_exts
if sys.platform == 'cygwin':
exts.append('.dll.a')
if sys.platform == 'darwin':
exts.append('.dylib')
return exts
def check_libs(self, lib_dirs, libs, opt_libs=[]):
"""If static or shared libraries are available then return
their info dictionary.
Checks for all libraries as shared libraries first, then
static (or vice versa if self.search_static_first is True).
"""
exts = self.library_extensions()
info = None
for ext in exts:
info = self._check_libs(lib_dirs, libs, opt_libs, [ext])
if info is not None:
break
if not info:
log.info(' libraries %s not found in %s', ','.join(libs),
lib_dirs)
return info
def check_libs2(self, lib_dirs, libs, opt_libs=[]):
"""If static or shared libraries are available then return
their info dictionary.
Checks each library for shared or static.
"""
exts = self.library_extensions()
info = self._check_libs(lib_dirs, libs, opt_libs, exts)
if not info:
log.info(' libraries %s not found in %s', ','.join(libs),
lib_dirs)
return info
def _find_lib(self, lib_dir, lib, exts):
assert is_string(lib_dir)
# under windows first try without 'lib' prefix
if sys.platform == 'win32':
lib_prefixes = ['', 'lib']
else:
lib_prefixes = ['lib']
# for each library name, see if we can find a file for it.
for ext in exts:
for prefix in lib_prefixes:
p = self.combine_paths(lib_dir, prefix + lib + ext)
if p:
break
if p:
assert len(p) == 1
# ??? splitext on p[0] would do this for cygwin
# doesn't seem correct
if ext == '.dll.a':
lib += '.dll'
return lib
return False
def _find_libs(self, lib_dirs, libs, exts):
# make sure we preserve the order of libs, as it can be important
found_dirs, found_libs = [], []
for lib in libs:
for lib_dir in lib_dirs:
found_lib = self._find_lib(lib_dir, lib, exts)
if found_lib:
found_libs.append(found_lib)
if lib_dir not in found_dirs:
found_dirs.append(lib_dir)
break
return found_dirs, found_libs
def _check_libs(self, lib_dirs, libs, opt_libs, exts):
"""Find mandatory and optional libs in expected paths.
Missing optional libraries are silently forgotten.
"""
if not is_sequence(lib_dirs):
lib_dirs = [lib_dirs]
# First, try to find the mandatory libraries
found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts)
if len(found_libs) > 0 and len(found_libs) == len(libs):
# Now, check for optional libraries
opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts)
found_libs.extend(opt_found_libs)
for lib_dir in opt_found_dirs:
if lib_dir not in found_dirs:
found_dirs.append(lib_dir)
info = {'libraries': found_libs, 'library_dirs': found_dirs}
return info
else:
return None
def combine_paths(self, *args):
"""Return a list of existing paths composed by all combinations
of items from the arguments.
"""
return combine_paths(*args, **{'verbosity': self.verbosity})
class fft_opt_info(system_info):
def calc_info(self):
info = {}
fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw')
djbfft_info = get_info('djbfft')
if fftw_info:
dict_append(info, **fftw_info)
if djbfft_info:
dict_append(info, **djbfft_info)
self.set_info(**info)
return
class fftw_info(system_info):
#variables to override
section = 'fftw'
dir_env_var = 'FFTW'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw3',
'libs':['fftw3'],
'includes':['fftw3.h'],
'macros':[('SCIPY_FFTW3_H', None)]},
{'name':'fftw2',
'libs':['rfftw', 'fftw'],
'includes':['fftw.h', 'rfftw.h'],
'macros':[('SCIPY_FFTW_H', None)]}]
def calc_ver_info(self, ver_param):
"""Returns True on successful version detection, else False"""
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
incl_dir = None
libs = self.get_libs(self.section + '_libs', ver_param['libs'])
info = self.check_libs(lib_dirs, libs)
if info is not None:
flag = 0
for d in incl_dirs:
if len(self.combine_paths(d, ver_param['includes'])) \
== len(ver_param['includes']):
dict_append(info, include_dirs=[d])
flag = 1
incl_dirs = [d]
break
if flag:
dict_append(info, define_macros=ver_param['macros'])
else:
info = None
if info is not None:
self.set_info(**info)
return True
else:
log.info(' %s not found' % (ver_param['name']))
return False
def calc_info(self):
for i in self.ver_info:
if self.calc_ver_info(i):
break
class fftw2_info(fftw_info):
#variables to override
section = 'fftw'
dir_env_var = 'FFTW'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw2',
'libs':['rfftw', 'fftw'],
'includes':['fftw.h', 'rfftw.h'],
'macros':[('SCIPY_FFTW_H', None)]}
]
class fftw3_info(fftw_info):
#variables to override
section = 'fftw3'
dir_env_var = 'FFTW3'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw3',
'libs':['fftw3'],
'includes':['fftw3.h'],
'macros':[('SCIPY_FFTW3_H', None)]},
]
class dfftw_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'dfftw',
'libs':['drfftw', 'dfftw'],
'includes':['dfftw.h', 'drfftw.h'],
'macros':[('SCIPY_DFFTW_H', None)]}]
class sfftw_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'sfftw',
'libs':['srfftw', 'sfftw'],
'includes':['sfftw.h', 'srfftw.h'],
'macros':[('SCIPY_SFFTW_H', None)]}]
class fftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'fftw threads',
'libs':['rfftw_threads', 'fftw_threads'],
'includes':['fftw_threads.h', 'rfftw_threads.h'],
'macros':[('SCIPY_FFTW_THREADS_H', None)]}]
class dfftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'dfftw threads',
'libs':['drfftw_threads', 'dfftw_threads'],
'includes':['dfftw_threads.h', 'drfftw_threads.h'],
'macros':[('SCIPY_DFFTW_THREADS_H', None)]}]
class sfftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'sfftw threads',
'libs':['srfftw_threads', 'sfftw_threads'],
'includes':['sfftw_threads.h', 'srfftw_threads.h'],
'macros':[('SCIPY_SFFTW_THREADS_H', None)]}]
class djbfft_info(system_info):
section = 'djbfft'
dir_env_var = 'DJBFFT'
notfounderror = DJBFFTNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend(self.combine_paths(d, ['djbfft']) + [d])
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
info = None
for d in lib_dirs:
p = self.combine_paths(d, ['djbfft.a'])
if p:
info = {'extra_objects': p}
break
p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext])
if p:
info = {'libraries': ['djbfft'], 'library_dirs': [d]}
break
if info is None:
return
for d in incl_dirs:
if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2:
dict_append(info, include_dirs=[d],
define_macros=[('SCIPY_DJBFFT_H', None)])
self.set_info(**info)
return
return
class mkl_info(system_info):
section = 'mkl'
dir_env_var = 'MKLROOT'
_lib_mkl = ['mkl_rt']
def get_mkl_rootdir(self):
mklroot = os.environ.get('MKLROOT', None)
if mklroot is not None:
return mklroot
paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep)
ld_so_conf = '/etc/ld.so.conf'
if os.path.isfile(ld_so_conf):
for d in open(ld_so_conf, 'r'):
d = d.strip()
if d:
paths.append(d)
intel_mkl_dirs = []
for path in paths:
path_atoms = path.split(os.sep)
for m in path_atoms:
if m.startswith('mkl'):
d = os.sep.join(path_atoms[:path_atoms.index(m) + 2])
intel_mkl_dirs.append(d)
break
for d in paths:
dirs = glob(os.path.join(d, 'mkl', '*'))
dirs += glob(os.path.join(d, 'mkl*'))
for d in dirs:
if os.path.isdir(os.path.join(d, 'lib')):
return d
return None
def __init__(self):
mklroot = self.get_mkl_rootdir()
if mklroot is None:
system_info.__init__(self)
else:
from .cpuinfo import cpu
if cpu.is_Itanium():
plt = '64'
elif cpu.is_Intel() and cpu.is_64bit():
plt = 'intel64'
else:
plt = '32'
system_info.__init__(
self,
default_lib_dirs=[os.path.join(mklroot, 'lib', plt)],
default_include_dirs=[os.path.join(mklroot, 'include')])
def calc_info(self):
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
mkl_libs = self.get_libs('mkl_libs', self._lib_mkl)
info = self.check_libs2(lib_dirs, mkl_libs)
if info is None:
return
dict_append(info,
define_macros=[('SCIPY_MKL_H', None),
('HAVE_CBLAS', None)],
include_dirs=incl_dirs)
if sys.platform == 'win32':
pass # win32 has no pthread library
else:
dict_append(info, libraries=['pthread'])
self.set_info(**info)
class lapack_mkl_info(mkl_info):
pass
class blas_mkl_info(mkl_info):
pass
class atlas_info(system_info):
section = 'atlas'
dir_env_var = 'ATLAS'
_lib_names = ['f77blas', 'cblas']
if sys.platform[:7] == 'freebsd':
_lib_atlas = ['atlas_r']
_lib_lapack = ['alapack_r']
else:
_lib_atlas = ['atlas']
_lib_lapack = ['lapack']
notfounderror = AtlasNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*',
'sse', '3dnow', 'sse2']) + [d])
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
atlas_libs = self.get_libs('atlas_libs',
self._lib_names + self._lib_atlas)
lapack_libs = self.get_libs('lapack_libs', self._lib_lapack)
atlas = None
lapack = None
atlas_1 = None
for d in lib_dirs:
atlas = self.check_libs2(d, atlas_libs, [])
lapack_atlas = self.check_libs2(d, ['lapack_atlas'], [])
if atlas is not None:
lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*'])
lapack = self.check_libs2(lib_dirs2, lapack_libs, [])
if lapack is not None:
break
if atlas:
atlas_1 = atlas
log.info(self.__class__)
if atlas is None:
atlas = atlas_1
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
if lapack is not None:
dict_append(info, **lapack)
dict_append(info, **atlas)
elif 'lapack_atlas' in atlas['libraries']:
dict_append(info, **atlas)
dict_append(info,
define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)])
self.set_info(**info)
return
else:
dict_append(info, **atlas)
dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)])
message = """
*********************************************************************
Could not find lapack library within the ATLAS installation.
*********************************************************************
"""
warnings.warn(message)
self.set_info(**info)
return
# Check if lapack library is complete, only warn if it is not.
lapack_dir = lapack['library_dirs'][0]
lapack_name = lapack['libraries'][0]
lapack_lib = None
lib_prefixes = ['lib']
if sys.platform == 'win32':
lib_prefixes.append('')
for e in self.library_extensions():
for prefix in lib_prefixes:
fn = os.path.join(lapack_dir, prefix + lapack_name + e)
if os.path.exists(fn):
lapack_lib = fn
break
if lapack_lib:
break
if lapack_lib is not None:
sz = os.stat(lapack_lib)[6]
if sz <= 4000 * 1024:
message = """
*********************************************************************
Lapack library (from ATLAS) is probably incomplete:
size of %s is %sk (expected >4000k)
Follow the instructions in the KNOWN PROBLEMS section of the file
numpy/INSTALL.txt.
*********************************************************************
""" % (lapack_lib, sz / 1024)
warnings.warn(message)
else:
info['language'] = 'f77'
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(info, **atlas_extra_info)
self.set_info(**info)
class atlas_blas_info(atlas_info):
_lib_names = ['f77blas', 'cblas']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
atlas_libs = self.get_libs('atlas_libs',
self._lib_names + self._lib_atlas)
atlas = self.check_libs2(lib_dirs, atlas_libs, [])
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(atlas, **atlas_extra_info)
dict_append(info, **atlas)
self.set_info(**info)
return
class atlas_threads_info(atlas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['ptf77blas', 'ptcblas']
class atlas_blas_threads_info(atlas_blas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['ptf77blas', 'ptcblas']
class lapack_atlas_info(atlas_info):
_lib_names = ['lapack_atlas'] + atlas_info._lib_names
class lapack_atlas_threads_info(atlas_threads_info):
_lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names
class atlas_3_10_info(atlas_info):
_lib_names = ['satlas']
_lib_atlas = _lib_names
_lib_lapack = _lib_names
class atlas_3_10_blas_info(atlas_3_10_info):
_lib_names = ['satlas']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
atlas_libs = self.get_libs('atlas_libs',
self._lib_names)
atlas = self.check_libs2(lib_dirs, atlas_libs, [])
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(atlas, **atlas_extra_info)
dict_append(info, **atlas)
self.set_info(**info)
return
class atlas_3_10_threads_info(atlas_3_10_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['tatlas']
_lib_atlas = _lib_names
_lib_lapack = _lib_names
class atlas_3_10_blas_threads_info(atlas_3_10_blas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['tatlas']
class lapack_atlas_3_10_info(atlas_3_10_info):
pass
class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info):
pass
class lapack_info(system_info):
section = 'lapack'
dir_env_var = 'LAPACK'
_lib_names = ['lapack']
notfounderror = LapackNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
lapack_libs = self.get_libs('lapack_libs', self._lib_names)
info = self.check_libs(lib_dirs, lapack_libs, [])
if info is None:
return
info['language'] = 'f77'
self.set_info(**info)
class lapack_src_info(system_info):
section = 'lapack_src'
dir_env_var = 'LAPACK_SRC'
notfounderror = LapackSrcNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'dgesv.f')):
src_dir = d
break
if not src_dir:
#XXX: Get sources from netlib. May be ask first.
return
# The following is extracted from LAPACK-3.0/SRC/Makefile.
# Added missing names from lapack-lite-3.1.1/SRC/Makefile
# while keeping removed names for Lapack-3.0 compatibility.
allaux = '''
ilaenv ieeeck lsame lsamen xerbla
iparmq
''' # *.f
laux = '''
bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1
laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2
lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre
larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4
lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1
lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf
stebz stedc steqr sterf
larra larrc larrd larr larrk larrj larrr laneg laisnan isnan
lazq3 lazq4
''' # [s|d]*.f
lasrc = '''
gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak
gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv
gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2
geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd
gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal
gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd
ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein
hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0
lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb
lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp
laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv
lartv larz larzb larzt laswp lasyf latbs latdf latps latrd
latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv
pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2
potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri
pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs
spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv
sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2
tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs
trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs
tzrqf tzrzf
lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5
''' # [s|c|d|z]*.f
sd_lasrc = '''
laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l
org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr
orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3
ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx
sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd
stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd
sygvx sytd2 sytrd
''' # [s|d]*.f
cz_lasrc = '''
bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev
heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv
hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd
hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf
hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7
laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe
laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv
spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq
ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2
unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr
''' # [c|z]*.f
#######
sclaux = laux + ' econd ' # s*.f
dzlaux = laux + ' secnd ' # d*.f
slasrc = lasrc + sd_lasrc # s*.f
dlasrc = lasrc + sd_lasrc # d*.f
clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f
zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f
oclasrc = ' icmax1 scsum1 ' # *.f
ozlasrc = ' izmax1 dzsum1 ' # *.f
sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \
+ ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \
+ ['c%s.f' % f for f in (clasrc).split()] \
+ ['z%s.f' % f for f in (zlasrc).split()] \
+ ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()]
sources = [os.path.join(src_dir, f) for f in sources]
# Lapack 3.1:
src_dir2 = os.path.join(src_dir, '..', 'INSTALL')
sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz']
# Lapack 3.2.1:
sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz']
sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz']
sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz']
# Should we check here actual existence of source files?
# Yes, the file listing is different between 3.0 and 3.1
# versions.
sources = [f for f in sources if os.path.isfile(f)]
info = {'sources': sources, 'language': 'f77'}
self.set_info(**info)
atlas_version_c_text = r'''
/* This file is generated from numpy/distutils/system_info.py */
void ATL_buildinfo(void);
int main(void) {
ATL_buildinfo();
return 0;
}
'''
_cached_atlas_version = {}
def get_atlas_version(**config):
libraries = config.get('libraries', [])
library_dirs = config.get('library_dirs', [])
key = (tuple(libraries), tuple(library_dirs))
if key in _cached_atlas_version:
return _cached_atlas_version[key]
c = cmd_config(Distribution())
atlas_version = None
info = {}
try:
s, o = c.get_output(atlas_version_c_text,
libraries=libraries, library_dirs=library_dirs,
use_tee=(system_info.verbosity > 0))
if s and re.search(r'undefined reference to `_gfortran', o, re.M):
s, o = c.get_output(atlas_version_c_text,
libraries=libraries + ['gfortran'],
library_dirs=library_dirs,
use_tee=(system_info.verbosity > 0))
if not s:
warnings.warn("""
*****************************************************
Linkage with ATLAS requires gfortran. Use
python setup.py config_fc --fcompiler=gnu95 ...
when building extension libraries that use ATLAS.
Make sure that -lgfortran is used for C++ extensions.
*****************************************************
""")
dict_append(info, language='f90',
define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)])
except Exception: # failed to get version from file -- maybe on Windows
# look at directory name
for o in library_dirs:
m = re.search(r'ATLAS_(?P<version>\d+[.]\d+[.]\d+)_', o)
if m:
atlas_version = m.group('version')
if atlas_version is not None:
break
# final choice --- look at ATLAS_VERSION environment
# variable
if atlas_version is None:
atlas_version = os.environ.get('ATLAS_VERSION', None)
if atlas_version:
dict_append(info, define_macros=[(
'ATLAS_INFO', '"\\"%s\\""' % atlas_version)
])
else:
dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)])
return atlas_version or '?.?.?', info
if not s:
m = re.search(r'ATLAS version (?P<version>\d+[.]\d+[.]\d+)', o)
if m:
atlas_version = m.group('version')
if atlas_version is None:
if re.search(r'undefined symbol: ATL_buildinfo', o, re.M):
atlas_version = '3.2.1_pre3.3.6'
else:
log.info('Status: %d', s)
log.info('Output: %s', o)
if atlas_version == '3.2.1_pre3.3.6':
dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)])
else:
dict_append(info, define_macros=[(
'ATLAS_INFO', '"\\"%s\\""' % atlas_version)
])
result = _cached_atlas_version[key] = atlas_version, info
return result
class lapack_opt_info(system_info):
notfounderror = LapackNotFoundError
def calc_info(self):
lapack_mkl_info = get_info('lapack_mkl')
if lapack_mkl_info:
self.set_info(**lapack_mkl_info)
return
openblas_info = get_info('openblas_lapack')
if openblas_info:
self.set_info(**openblas_info)
return
atlas_info = get_info('atlas_3_10_threads')
if not atlas_info:
atlas_info = get_info('atlas_3_10')
if not atlas_info:
atlas_info = get_info('atlas_threads')
if not atlas_info:
atlas_info = get_info('atlas')
if sys.platform == 'darwin' and not atlas_info:
# Use the system lapack from Accelerate or vecLib under OSX
args = []
link_args = []
if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
'x86_64' in get_platform() or \
'i386' in platform.platform():
intel = 1
else:
intel = 0
if os.path.exists('/System/Library/Frameworks'
'/Accelerate.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
elif os.path.exists('/System/Library/Frameworks'
'/vecLib.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
if args:
self.set_info(extra_compile_args=args,
extra_link_args=link_args,
define_macros=[('NO_ATLAS_INFO', 3),
('HAVE_CBLAS', None)])
return
need_lapack = 0
need_blas = 0
info = {}
if atlas_info:
l = atlas_info.get('define_macros', [])
if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \
or ('ATLAS_WITHOUT_LAPACK', None) in l:
need_lapack = 1
info = atlas_info
else:
warnings.warn(AtlasNotFoundError.__doc__)
need_blas = 1
need_lapack = 1
dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
if need_lapack:
lapack_info = get_info('lapack')
#lapack_info = {} ## uncomment for testing
if lapack_info:
dict_append(info, **lapack_info)
else:
warnings.warn(LapackNotFoundError.__doc__)
lapack_src_info = get_info('lapack_src')
if not lapack_src_info:
warnings.warn(LapackSrcNotFoundError.__doc__)
return
dict_append(info, libraries=[('flapack_src', lapack_src_info)])
if need_blas:
blas_info = get_info('blas')
if blas_info:
dict_append(info, **blas_info)
else:
warnings.warn(BlasNotFoundError.__doc__)
blas_src_info = get_info('blas_src')
if not blas_src_info:
warnings.warn(BlasSrcNotFoundError.__doc__)
return
dict_append(info, libraries=[('fblas_src', blas_src_info)])
self.set_info(**info)
return
class blas_opt_info(system_info):
notfounderror = BlasNotFoundError
def calc_info(self):
blas_mkl_info = get_info('blas_mkl')
if blas_mkl_info:
self.set_info(**blas_mkl_info)
return
blis_info = get_info('blis')
if blis_info:
self.set_info(**blis_info)
return
openblas_info = get_info('openblas')
if openblas_info:
self.set_info(**openblas_info)
return
atlas_info = get_info('atlas_3_10_blas_threads')
if not atlas_info:
atlas_info = get_info('atlas_3_10_blas')
if not atlas_info:
atlas_info = get_info('atlas_blas_threads')
if not atlas_info:
atlas_info = get_info('atlas_blas')
if sys.platform == 'darwin' and not atlas_info:
# Use the system BLAS from Accelerate or vecLib under OSX
args = []
link_args = []
if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
'x86_64' in get_platform() or \
'i386' in platform.platform():
intel = 1
else:
intel = 0
if os.path.exists('/System/Library/Frameworks'
'/Accelerate.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
args.extend([
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
elif os.path.exists('/System/Library/Frameworks'
'/vecLib.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
args.extend([
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
if args:
self.set_info(extra_compile_args=args,
extra_link_args=link_args,
define_macros=[('NO_ATLAS_INFO', 3),
('HAVE_CBLAS', None)])
return
need_blas = 0
info = {}
if atlas_info:
info = atlas_info
else:
warnings.warn(AtlasNotFoundError.__doc__)
need_blas = 1
dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
if need_blas:
blas_info = get_info('blas')
if blas_info:
dict_append(info, **blas_info)
else:
warnings.warn(BlasNotFoundError.__doc__)
blas_src_info = get_info('blas_src')
if not blas_src_info:
warnings.warn(BlasSrcNotFoundError.__doc__)
return
dict_append(info, libraries=[('fblas_src', blas_src_info)])
self.set_info(**info)
return
class blas_info(system_info):
section = 'blas'
dir_env_var = 'BLAS'
_lib_names = ['blas']
notfounderror = BlasNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
blas_libs = self.get_libs('blas_libs', self._lib_names)
info = self.check_libs(lib_dirs, blas_libs, [])
if info is None:
return
if platform.system() == 'Windows':
# The check for windows is needed because has_cblas uses the
# same compiler that was used to compile Python and msvc is
# often not installed when mingw is being used. This rough
# treatment is not desirable, but windows is tricky.
info['language'] = 'f77' # XXX: is it generally true?
else:
lib = self.has_cblas(info)
if lib is not None:
info['language'] = 'c'
info['libraries'] = [lib]
info['define_macros'] = [('HAVE_CBLAS', None)]
self.set_info(**info)
def has_cblas(self, info):
# primitive cblas check by looking for the header and trying to link
# cblas or blas
res = False
c = distutils.ccompiler.new_compiler()
c.customize('')
tmpdir = tempfile.mkdtemp()
s = """#include <cblas.h>
int main(int argc, const char *argv[])
{
double a[4] = {1,2,3,4};
double b[4] = {5,6,7,8};
return cblas_ddot(4, a, 1, b, 1) > 10;
}"""
src = os.path.join(tmpdir, 'source.c')
try:
with open(src, 'wt') as f:
f.write(s)
try:
# check we can compile (find headers)
obj = c.compile([src], output_dir=tmpdir,
include_dirs=self.get_include_dirs())
# check we can link (find library)
# some systems have separate cblas and blas libs. First
# check for cblas lib, and if not present check for blas lib.
try:
c.link_executable(obj, os.path.join(tmpdir, "a.out"),
libraries=["cblas"],
library_dirs=info['library_dirs'],
extra_postargs=info.get('extra_link_args', []))
res = "cblas"
except distutils.ccompiler.LinkError:
c.link_executable(obj, os.path.join(tmpdir, "a.out"),
libraries=["blas"],
library_dirs=info['library_dirs'],
extra_postargs=info.get('extra_link_args', []))
res = "blas"
except distutils.ccompiler.CompileError:
res = None
finally:
shutil.rmtree(tmpdir)
return res
class openblas_info(blas_info):
section = 'openblas'
dir_env_var = 'OPENBLAS'
_lib_names = ['openblas']
notfounderror = BlasNotFoundError
def check_embedded_lapack(self, info):
return True
def calc_info(self):
lib_dirs = self.get_lib_dirs()
openblas_libs = self.get_libs('libraries', self._lib_names)
if openblas_libs == self._lib_names: # backward compat with 1.8.0
openblas_libs = self.get_libs('openblas_libs', self._lib_names)
info = self.check_libs(lib_dirs, openblas_libs, [])
if info is None:
return
# Add extra info for OpenBLAS
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
if not self.check_embedded_lapack(info):
return
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
self.set_info(**info)
class openblas_lapack_info(openblas_info):
section = 'openblas'
dir_env_var = 'OPENBLAS'
_lib_names = ['openblas']
notfounderror = BlasNotFoundError
def check_embedded_lapack(self, info):
res = False
c = distutils.ccompiler.new_compiler()
c.customize('')
tmpdir = tempfile.mkdtemp()
s = """void zungqr();
int main(int argc, const char *argv[])
{
zungqr_();
return 0;
}"""
src = os.path.join(tmpdir, 'source.c')
out = os.path.join(tmpdir, 'a.out')
# Add the additional "extra" arguments
try:
extra_args = info['extra_link_args']
except:
extra_args = []
try:
with open(src, 'wt') as f:
f.write(s)
obj = c.compile([src], output_dir=tmpdir)
try:
c.link_executable(obj, out, libraries=info['libraries'],
library_dirs=info['library_dirs'],
extra_postargs=extra_args)
res = True
except distutils.ccompiler.LinkError:
res = False
finally:
shutil.rmtree(tmpdir)
return res
class blis_info(blas_info):
section = 'blis'
dir_env_var = 'BLIS'
_lib_names = ['blis']
notfounderror = BlasNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
blis_libs = self.get_libs('libraries', self._lib_names)
if blis_libs == self._lib_names:
blis_libs = self.get_libs('blis_libs', self._lib_names)
info = self.check_libs2(lib_dirs, blis_libs, [])
if info is None:
return
# Add include dirs
incl_dirs = self.get_include_dirs()
dict_append(info,
language='c',
define_macros=[('HAVE_CBLAS', None)],
include_dirs=incl_dirs)
self.set_info(**info)
class blas_src_info(system_info):
section = 'blas_src'
dir_env_var = 'BLAS_SRC'
notfounderror = BlasSrcNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['blas']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'daxpy.f')):
src_dir = d
break
if not src_dir:
#XXX: Get sources from netlib. May be ask first.
return
blas1 = '''
caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot
dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2
srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg
dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax
snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap
scabs1
'''
blas2 = '''
cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv
chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv
dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv
sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger
stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc
zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2
ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv
'''
blas3 = '''
cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k
dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm
ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm
'''
sources = [os.path.join(src_dir, f + '.f') \
for f in (blas1 + blas2 + blas3).split()]
#XXX: should we check here actual existence of source files?
sources = [f for f in sources if os.path.isfile(f)]
info = {'sources': sources, 'language': 'f77'}
self.set_info(**info)
class x11_info(system_info):
section = 'x11'
notfounderror = X11NotFoundError
def __init__(self):
system_info.__init__(self,
default_lib_dirs=default_x11_lib_dirs,
default_include_dirs=default_x11_include_dirs)
def calc_info(self):
if sys.platform in ['win32']:
return
lib_dirs = self.get_lib_dirs()
include_dirs = self.get_include_dirs()
x11_libs = self.get_libs('x11_libs', ['X11'])
info = self.check_libs(lib_dirs, x11_libs, [])
if info is None:
return
inc_dir = None
for d in include_dirs:
if self.combine_paths(d, 'X11/X.h'):
inc_dir = d
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir])
self.set_info(**info)
class _numpy_info(system_info):
section = 'Numeric'
modulename = 'Numeric'
notfounderror = NumericNotFoundError
def __init__(self):
include_dirs = []
try:
module = __import__(self.modulename)
prefix = []
for name in module.__file__.split(os.sep):
if name == 'lib':
break
prefix.append(name)
# Ask numpy for its own include path before attempting
# anything else
try:
include_dirs.append(getattr(module, 'get_include')())
except AttributeError:
pass
include_dirs.append(distutils.sysconfig.get_python_inc(
prefix=os.sep.join(prefix)))
except ImportError:
pass
py_incl_dir = distutils.sysconfig.get_python_inc()
include_dirs.append(py_incl_dir)
py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
if py_pincl_dir not in include_dirs:
include_dirs.append(py_pincl_dir)
for d in default_include_dirs:
d = os.path.join(d, os.path.basename(py_incl_dir))
if d not in include_dirs:
include_dirs.append(d)
system_info.__init__(self,
default_lib_dirs=[],
default_include_dirs=include_dirs)
def calc_info(self):
try:
module = __import__(self.modulename)
except ImportError:
return
info = {}
macros = []
for v in ['__version__', 'version']:
vrs = getattr(module, v, None)
if vrs is None:
continue
macros = [(self.modulename.upper() + '_VERSION',
'"\\"%s\\""' % (vrs)),
(self.modulename.upper(), None)]
break
dict_append(info, define_macros=macros)
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
if self.combine_paths(d,
os.path.join(self.modulename,
'arrayobject.h')):
inc_dir = d
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir])
if info:
self.set_info(**info)
return
class numarray_info(_numpy_info):
section = 'numarray'
modulename = 'numarray'
class Numeric_info(_numpy_info):
section = 'Numeric'
modulename = 'Numeric'
class numpy_info(_numpy_info):
section = 'numpy'
modulename = 'numpy'
class numerix_info(system_info):
section = 'numerix'
def calc_info(self):
which = None, None
if os.getenv("NUMERIX"):
which = os.getenv("NUMERIX"), "environment var"
# If all the above fail, default to numpy.
if which[0] is None:
which = "numpy", "defaulted"
try:
import numpy
which = "numpy", "defaulted"
except ImportError:
msg1 = str(get_exception())
try:
import Numeric
which = "numeric", "defaulted"
except ImportError:
msg2 = str(get_exception())
try:
import numarray
which = "numarray", "defaulted"
except ImportError:
msg3 = str(get_exception())
log.info(msg1)
log.info(msg2)
log.info(msg3)
which = which[0].strip().lower(), which[1]
if which[0] not in ["numeric", "numarray", "numpy"]:
raise ValueError("numerix selector must be either 'Numeric' "
"or 'numarray' or 'numpy' but the value obtained"
" from the %s was '%s'." % (which[1], which[0]))
os.environ['NUMERIX'] = which[0]
self.set_info(**get_info(which[0]))
class f2py_info(system_info):
def calc_info(self):
try:
import numpy.f2py as f2py
except ImportError:
return
f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src')
self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')],
include_dirs=[f2py_dir])
return
class boost_python_info(system_info):
section = 'boost_python'
dir_env_var = 'BOOST'
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['boost*']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'libs', 'python', 'src',
'module.cpp')):
src_dir = d
break
if not src_dir:
return
py_incl_dirs = [distutils.sysconfig.get_python_inc()]
py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
if py_pincl_dir not in py_incl_dirs:
py_incl_dirs.append(py_pincl_dir)
srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src')
bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp'))
bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp'))
info = {'libraries': [('boost_python_src',
{'include_dirs': [src_dir] + py_incl_dirs,
'sources':bpl_srcs}
)],
'include_dirs': [src_dir],
}
if info:
self.set_info(**info)
return
class agg2_info(system_info):
section = 'agg2'
dir_env_var = 'AGG2'
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['agg2*']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')):
src_dir = d
break
if not src_dir:
return
if sys.platform == 'win32':
agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform',
'win32', 'agg_win32_bmp.cpp'))
else:
agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp'))
agg2_srcs += [os.path.join(src_dir, 'src', 'platform',
'X11',
'agg_platform_support.cpp')]
info = {'libraries':
[('agg2_src',
{'sources': agg2_srcs,
'include_dirs': [os.path.join(src_dir, 'include')],
}
)],
'include_dirs': [os.path.join(src_dir, 'include')],
}
if info:
self.set_info(**info)
return
class _pkg_config_info(system_info):
section = None
config_env_var = 'PKG_CONFIG'
default_config_exe = 'pkg-config'
append_config_exe = ''
version_macro_name = None
release_macro_name = None
version_flag = '--modversion'
cflags_flag = '--cflags'
def get_config_exe(self):
if self.config_env_var in os.environ:
return os.environ[self.config_env_var]
return self.default_config_exe
def get_config_output(self, config_exe, option):
cmd = config_exe + ' ' + self.append_config_exe + ' ' + option
s, o = exec_command(cmd, use_tee=0)
if not s:
return o
def calc_info(self):
config_exe = find_executable(self.get_config_exe())
if not config_exe:
log.warn('File not found: %s. Cannot determine %s info.' \
% (config_exe, self.section))
return
info = {}
macros = []
libraries = []
library_dirs = []
include_dirs = []
extra_link_args = []
extra_compile_args = []
version = self.get_config_output(config_exe, self.version_flag)
if version:
macros.append((self.__class__.__name__.split('.')[-1].upper(),
'"\\"%s\\""' % (version)))
if self.version_macro_name:
macros.append((self.version_macro_name + '_%s'
% (version.replace('.', '_')), None))
if self.release_macro_name:
release = self.get_config_output(config_exe, '--release')
if release:
macros.append((self.release_macro_name + '_%s'
% (release.replace('.', '_')), None))
opts = self.get_config_output(config_exe, '--libs')
if opts:
for opt in opts.split():
if opt[:2] == '-l':
libraries.append(opt[2:])
elif opt[:2] == '-L':
library_dirs.append(opt[2:])
else:
extra_link_args.append(opt)
opts = self.get_config_output(config_exe, self.cflags_flag)
if opts:
for opt in opts.split():
if opt[:2] == '-I':
include_dirs.append(opt[2:])
elif opt[:2] == '-D':
if '=' in opt:
n, v = opt[2:].split('=')
macros.append((n, v))
else:
macros.append((opt[2:], None))
else:
extra_compile_args.append(opt)
if macros:
dict_append(info, define_macros=macros)
if libraries:
dict_append(info, libraries=libraries)
if library_dirs:
dict_append(info, library_dirs=library_dirs)
if include_dirs:
dict_append(info, include_dirs=include_dirs)
if extra_link_args:
dict_append(info, extra_link_args=extra_link_args)
if extra_compile_args:
dict_append(info, extra_compile_args=extra_compile_args)
if info:
self.set_info(**info)
return
class wx_info(_pkg_config_info):
section = 'wx'
config_env_var = 'WX_CONFIG'
default_config_exe = 'wx-config'
append_config_exe = ''
version_macro_name = 'WX_VERSION'
release_macro_name = 'WX_RELEASE'
version_flag = '--version'
cflags_flag = '--cxxflags'
class gdk_pixbuf_xlib_2_info(_pkg_config_info):
section = 'gdk_pixbuf_xlib_2'
append_config_exe = 'gdk-pixbuf-xlib-2.0'
version_macro_name = 'GDK_PIXBUF_XLIB_VERSION'
class gdk_pixbuf_2_info(_pkg_config_info):
section = 'gdk_pixbuf_2'
append_config_exe = 'gdk-pixbuf-2.0'
version_macro_name = 'GDK_PIXBUF_VERSION'
class gdk_x11_2_info(_pkg_config_info):
section = 'gdk_x11_2'
append_config_exe = 'gdk-x11-2.0'
version_macro_name = 'GDK_X11_VERSION'
class gdk_2_info(_pkg_config_info):
section = 'gdk_2'
append_config_exe = 'gdk-2.0'
version_macro_name = 'GDK_VERSION'
class gdk_info(_pkg_config_info):
section = 'gdk'
append_config_exe = 'gdk'
version_macro_name = 'GDK_VERSION'
class gtkp_x11_2_info(_pkg_config_info):
section = 'gtkp_x11_2'
append_config_exe = 'gtk+-x11-2.0'
version_macro_name = 'GTK_X11_VERSION'
class gtkp_2_info(_pkg_config_info):
section = 'gtkp_2'
append_config_exe = 'gtk+-2.0'
version_macro_name = 'GTK_VERSION'
class xft_info(_pkg_config_info):
section = 'xft'
append_config_exe = 'xft'
version_macro_name = 'XFT_VERSION'
class freetype2_info(_pkg_config_info):
section = 'freetype2'
append_config_exe = 'freetype2'
version_macro_name = 'FREETYPE2_VERSION'
class amd_info(system_info):
section = 'amd'
dir_env_var = 'AMD'
_lib_names = ['amd']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
amd_libs = self.get_libs('amd_libs', self._lib_names)
info = self.check_libs(lib_dirs, amd_libs, [])
if info is None:
return
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, 'amd.h')
if p:
inc_dir = os.path.dirname(p[0])
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir],
define_macros=[('SCIPY_AMD_H', None)],
swig_opts=['-I' + inc_dir])
self.set_info(**info)
return
class umfpack_info(system_info):
section = 'umfpack'
dir_env_var = 'UMFPACK'
notfounderror = UmfpackNotFoundError
_lib_names = ['umfpack']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
umfpack_libs = self.get_libs('umfpack_libs', self._lib_names)
info = self.check_libs(lib_dirs, umfpack_libs, [])
if info is None:
return
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h')
if p:
inc_dir = os.path.dirname(p[0])
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir],
define_macros=[('SCIPY_UMFPACK_H', None)],
swig_opts=['-I' + inc_dir])
amd = get_info('amd')
dict_append(info, **get_info('amd'))
self.set_info(**info)
return
def combine_paths(*args, **kws):
""" Return a list of existing paths composed by all combinations of
items from arguments.
"""
r = []
for a in args:
if not a:
continue
if is_string(a):
a = [a]
r.append(a)
args = r
if not args:
return []
if len(args) == 1:
result = reduce(lambda a, b: a + b, map(glob, args[0]), [])
elif len(args) == 2:
result = []
for a0 in args[0]:
for a1 in args[1]:
result.extend(glob(os.path.join(a0, a1)))
else:
result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:]))
verbosity = kws.get('verbosity', 1)
log.debug('(paths: %s)', ','.join(result))
return result
language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3}
inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'}
def dict_append(d, **kws):
languages = []
for k, v in kws.items():
if k == 'language':
languages.append(v)
continue
if k in d:
if k in ['library_dirs', 'include_dirs',
'extra_compile_args', 'extra_link_args',
'runtime_library_dirs', 'define_macros']:
[d[k].append(vv) for vv in v if vv not in d[k]]
else:
d[k].extend(v)
else:
d[k] = v
if languages:
l = inv_language_map[max([language_map.get(l, 0) for l in languages])]
d['language'] = l
return
def parseCmdLine(argv=(None,)):
import optparse
parser = optparse.OptionParser("usage: %prog [-v] [info objs]")
parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
default=False,
help='be verbose and print more messages')
opts, args = parser.parse_args(args=argv[1:])
return opts, args
def show_all(argv=None):
import inspect
if argv is None:
argv = sys.argv
opts, args = parseCmdLine(argv)
if opts.verbose:
log.set_threshold(log.DEBUG)
else:
log.set_threshold(log.INFO)
show_only = []
for n in args:
if n[-5:] != '_info':
n = n + '_info'
show_only.append(n)
show_all = not show_only
_gdict_ = globals().copy()
for name, c in _gdict_.items():
if not inspect.isclass(c):
continue
if not issubclass(c, system_info) or c is system_info:
continue
if not show_all:
if name not in show_only:
continue
del show_only[show_only.index(name)]
conf = c()
conf.verbosity = 2
r = conf.get_info()
if show_only:
log.info('Info classes not defined: %s', ','.join(show_only))
if __name__ == "__main__":
show_all()
| [
"rodrigosoaresilva@gmail.com"
] | rodrigosoaresilva@gmail.com |
343b803e15102a3375ee6ccb4bb67ca91238249e | 562c79cf793f4f138cff206d718b5ce4a48f2add | /Catalog/urls.py | 3059dc2b7a9a20e23a43caa37b26b7a7780a3ecc | [] | no_license | iamhunter/Worthwhile_Django | 5bd151b13503809a6076ae3bba23b267ad9b299c | a7f72dab288637e5d107331bde76850656593188 | refs/heads/master | 2021-01-21T11:24:00.939507 | 2017-03-02T19:15:26 | 2017-03-02T19:15:26 | 83,571,339 | 0 | 0 | null | 2017-03-01T19:24:23 | 2017-03-01T15:42:08 | Python | UTF-8 | Python | false | false | 463 | py | from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
url(r'^courses/$', views.index, name='index'),
url(r'^add/$', views.add, name='add'),
url(r'^courses/(?P<course_id>[0-9]+)$', views.detail, name='detail'),
url(r'^courses/(?P<course_id>[0-9]+)/change/$', views.change, name='change'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"andr3whunter@gmail.com"
] | andr3whunter@gmail.com |
a6a29c15db48347b902092bbe69ff4cf8a047f4b | 5346d8161e1cfcc57da37b09736f46d00fc58460 | /fasta_majority_consensus_DIR.py | 57e1eac74bb11d9f9c1b84700745f1d366b6ba09 | [] | no_license | mscharmann/tools | b4dbf7a99f9609ea7968faf3acf0808898aa2202 | 8c7fad9e38ece5b8f57176db6e605ea6a520fffd | refs/heads/master | 2022-10-26T13:50:36.869517 | 2022-09-27T15:47:26 | 2022-09-27T15:47:26 | 130,751,495 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py |
import sys, os
def read_fasta (infile):
fasta_dict = {}
seq = ""
name = "dummy"
with open(infile, "r") as F:
for line in F:
if line.startswith(">"):
fasta_dict[name] = seq
name = line.lstrip(">").rstrip("\n")
seq = ""
else:
seq += line.strip("\n")
# last record:
fasta_dict[name] = seq
del fasta_dict["dummy"]
seqs = list( fasta_dict.values() )
return seqs
def make_maj_consensus (seqs):
consensus_seq = ""
for i in range(len(seqs[0])):
NUCs = [x[i] for x in seqs if not x[i] == "-"]
unique_NUCs = list(set(NUCs))
counts = []
for A in unique_NUCs:
counts.append( NUCs.count(A) )
maxidx = counts.index(max(counts))
consensus_seq += unique_NUCs[maxidx]
return consensus_seq
####
if len(sys.argv) != 2:
print ( "usage: python translate_CDS_to_PEP.py cds_fasta_dir" )
exit()
infiles = [x for x in os.listdir(sys.argv[1]) if x. endswith(".CDS.aln")]
outlines = []
cnt = 0
for f in infiles:
cnt += 1
print(cnt)
seqs = read_fasta(sys.argv[1] + "/" + f)
if len(seqs) > 0:
cons = make_maj_consensus(seqs)
outlines.append(">" + f.strip(".CDS.aln"))
outlines.append(cons)
else:
with open("maj_cons_problems_log.txt", "a") as O:
O.write(f + "\n")
print ( f )
with open("maj_cons.fasta", "w") as O:
O.write("\n".join(outlines) + "\n")
| [
"scharmann@Papyrus-Scharmann.local"
] | scharmann@Papyrus-Scharmann.local |
b7455aa65c1f8d4d64eb46619c21e6a3c2fc508c | 29adee7d539002d8adf56b9772958c2fec1e66a0 | /datascience/farToCel.py | 69cecc9a23031b5941aa20e9ad5e13adeb6676aa | [] | no_license | ChivukulaVirinchi/python-virinchi | 4e71227c162b8d8eac2796187f257e136b710038 | 5e7a39f42b42900ecf54fe169100764b7ac79d23 | refs/heads/master | 2022-04-23T10:23:24.090397 | 2020-04-18T11:55:26 | 2020-04-18T11:55:26 | 256,741,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | far = float(input('Enter the temperature in Farenheit\n>>>'))
cel = (far - 32) *5//9
print('You entered Farenheit as', far, 'and the celcius value is', cel) | [
"virinchi@gurujada.com"
] | virinchi@gurujada.com |
a7259e3e503b786aac94055c070c70da31487417 | 5af1b57c6e883b62f51de0def33cf5a313f9e549 | /add_bottle.py | 91e5a5fd40f3ab4a38092e7efd6f18e728697d97 | [] | no_license | jayanth0906/testscriptsims | ad32a8beab707e8defb35a86e7b1ae534a0611bd | c6cc0ae2091e0bf92eb9d86ecabb50135b57093c | refs/heads/master | 2020-10-02T02:17:13.834030 | 2019-12-12T19:12:09 | 2019-12-12T19:12:09 | 227,678,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,867 | py | import unittest
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
class ims_Test6(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
def test_login(self):
driver = self.driver
driver.maximize_window()
driver.get("https://maverickims.herokuapp.com/")
elem = driver.find_element_by_xpath('//*[@ id = "navbarSupportedContent"] / ul[2] / li / a')
elem.click()
time.sleep(0.5)
elem = driver.find_element_by_id("id_username")
elem.send_keys("warehouse")
elem = driver.find_element_by_id("id_password")
elem.send_keys("winventory")
elem = driver.find_element_by_xpath(" /html/body/div/form/p[3]/input")
elem.click()
time.sleep(3)
elem = driver.find_element_by_xpath(" /html/body/div[2]/div/div[3]/a")
elem.click()
time.sleep(2)
elem = driver.find_element_by_xpath("/html/body/div/div/div/div/div/p[2]/a")
elem.click()
time.sleep(2)
elem = driver.find_element_by_xpath("/html/body/div/div/div/div/div/p[1]/a")
elem.click()
time.sleep(2)
elem = driver.find_element_by_id("id_emp_init")
elem.send_keys("JT")
elem = driver.find_element_by_id("id_fill")
elem.send_keys("Argon")
elem = driver.find_element_by_id("id_serial_number")
elem.send_keys("12")
elem = driver.find_element_by_id("id_barcode")
elem.send_keys("12")
elem = driver.find_element_by_xpath("/html/body/div/div/div/div/form/div[7]/button")
elem.click()
time.sleep(2)
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main()
| [
"42557399+jayanth0906@users.noreply.github.com"
] | 42557399+jayanth0906@users.noreply.github.com |
23d6d5c985449a9537a51ae1e5dcbd2e1c388212 | 893a83cf9b42ebd6b66d92fe037af10952ac671b | /PythonPC/catchdy/into_mysql.py | 3e84fd1954ed02e07bd03aecdcff9110e5f30968 | [] | no_license | Nigihayamiim/StudyPython | 777f3cd7b55eaafb08f0f8e227a59fe1e87efafd | d2fe2e1fed74df8fdee30839d575421e0e0856fe | refs/heads/master | 2023-01-08T12:55:47.726155 | 2020-11-11T08:57:16 | 2020-11-11T08:57:16 | 264,388,409 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,727 | py | import re
import warnings
from queue import Queue
from threading import Thread
from time import sleep
from random import randint
import pymysql as pymysql
import requests
from fake_useragent import UserAgent
class CrawlInfo1(Thread):
def __init__(self, url_queue):
Thread.__init__(self)
self.url_queue = url_queue
def run(self):
headers = {
"User-Agent": UserAgent().chrome,
"Cookie": "_ga=GA1.2.157089973.1589035569; remember_web_59ba36addc2b2f9401580f014c7f58ea4e30989d=eyJpdiI6Ik96REdIclMzb3BWXC8xNDBMRjNSSzRBPT0iLCJ2YWx1ZSI6IlRrc01cL1VtNUxMRktEUmU5MkxzM0VYbm9PV1pGWGVnNUpJOG1xS0taeFBCN0g2Zlp2Z2J5eFpMNk5KMmZmbkp0SE8yZUNJeXFBSEhsekpUd1p3cHZ5MjdZc0xQakFMTitEcU5sYk95bVkrcHVOSFVYaVBRSXgxM1pwZWlvcFkyRk9jUFg4c3Q1Rit4M3VKeEVZSXBwM1E9PSIsIm1hYyI6IjM1MzM0ZmIzMWVkOTAwMmNhZWIxNjk3MDg3OGQwYTMwYTFlNTk3MGYyOTg1MzVkZmZmZmM3Mjg0MmYyNzdiYTAifQ%3D%3D; __gads=ID=03d2175f76b8ad70:T=1589035656:S=ALNI_MYaduCxeGubch0zBJrX4ZV9ULPFxg; Hm_lvt_09720a8dd79381f0fd2793fad156ddfa=1590381533,1590477750,1590551460,1590816005; _gid=GA1.2.1541124885.1590816005; XSRF-TOKEN=eyJpdiI6IjFKRWtcL2V0Q3lHVHJ1blREa3ZmRDR3PT0iLCJ2YWx1ZSI6IkdkMFdyUmJrNWwwVERrRXdkTnhBUW5Ma1gzRVwveVRIVmJyeURBb2FiSnN4eFFpK1F3MzJCTTdWSWo2Yk5qVmR4IiwibWFjIjoiN2Y5ZTczNTg1MjdhZjZjNmFmNDQzNmQyZDhiZDI2ZTJkZDYzNWI0MzI4ZmVlZjNkMGRlMDlhMjgwZDI2ZGJiMyJ9; toobigdata_session=eyJpdiI6IkJYa1wvdzg5bWJnSTl4dUpXd3lWS3ZBPT0iLCJ2YWx1ZSI6IlVidUMwbXJaMzFxMVMrWUVEc0FEa3E3ZDk5NWJpSFBxZWV2QlwvRjBHOEIyUWlVdVZGVmdNMm11d1JFSnpaT0xvIiwibWFjIjoiZTU3YWFhNzRmMDU2ZDJlNmQzYTYxMjk0MWEwZTk2ZWNkYzE3MDRlYThkNjQyMmRmNDViZTZiZjExMzE3ODU2MiJ9; Hm_lpvt_09720a8dd79381f0fd2793fad156ddfa=1590816095; Hm_cv_09720a8dd79381f0fd2793fad156ddfa=1*email*odgLGvxs0_rWSDfFPs8fBjO2SxIQ%40wechat.com!*!*!1*role*free"
}
proxies = {
"http": "http://0502fq1t1m:0502fq1t1m@59.55.158.225:65000",
"https": "http://0502fq1t1m:0502fq1t1m@59.55.158.225:65000"
}
num = 1
while not self.url_queue.empty():
try:
response = requests.get(self.url_queue.get(), proxies=proxies, headers=headers)
except Exception:
print("第"+str(num)+"页数据出现超时,尝试再次连接")
response = requests.get(self.url_queue.get(), proxies=proxies, headers=headers)
if response.status_code == 200:
response.encoding = "utf-8"
info = response.text
# print(info)
infos = re.findall(
r'<div class="col-md-2">\s+<a href="/douyin/promotion/g/(\d{19})" target="_blank"',
info)
for shop_id in infos:
try:
result = cursor.execute(sql_goods_id, [shop_id])
except Exception as e:
client.rollback()
if result:
shop_url_queue.put(base_shop_url.format(shop_id))
print("第" + str(num) + "页")
num += 1
class CrawlInfo2(Thread):
def __init__(self, shop_url_queue, end_no, start_no):
Thread.__init__(self)
self.shop_url_queue = shop_url_queue
self.end_no = end_no
self.start_no = start_no
def run(self):
headers = {
"User-Agent": UserAgent().chrome
}
proxies = {
"http": "http://0502fq1t1m:0502fq1t1m@59.55.158.225:65000",
"https": "http://0502fq1t1m:0502fq1t1m@59.55.158.225:65000"
}
num = 1
count = 1
while not self.shop_url_queue.empty():
shop_url = self.shop_url_queue.get()
try:
response = requests.get(shop_url, headers=headers, proxies=proxies)
except Exception:
print("第"+str(count)+"条数据出现超时,尝试再次连接")
response = requests.get(shop_url, headers=headers, proxies=proxies)
code = response.status_code
print("这是第" + str(count) + "条数据,地址为:" + shop_url)
sleep(randint(0, 1))
count += 1
if code == 200:
response.encoding = "utf-8"
shop = response.text
shop_position = ''.join(re.findall(r'"product_province_name":"(.*?)"', shop))
shop_id = ''.join(re.findall(r'"shop_id":"(.*?)"', shop))
shop_name = ''.join(re.findall(r'"shop_name":"(.*?)"', shop))
shop_tel = ''.join(re.findall(r'"shop_tel":"(.*?)"', shop))
product_id = ''.join(re.findall(r'"product_id":"(.*?)"', shop))
good_name = ''.join(re.findall(r'"name":"(.*?)"', shop))
## result = cursor.execute(sql_goods, [product_id, good_name, shop_id])
result = cursor.execute(sql_shop, [shop_id, shop_name, shop_tel, shop_position, product_id])
client.commit()
if result:
print("成功添加了"+str(num)+"条数据")
num += 1
print(str(start_no) + "页到" + str(end_no) + "页的内容搜集完毕")
if __name__ == '__main__':
base_url = "https://toobigdata.com/douyin/promotions?page={}"
base_shop_url = "https://ec.snssdk.com/product/fxgajaxstaticitem?id={}&b_type_new=0&device_id=0"
client = pymysql.connect(host='49.233.3.208', port=3306, user='root', password='x1113822624', charset='utf8',
db='forTel')
cursor = client.cursor()
sql_goods = 'insert ignore into goods values (%s, %s, %s)'
sql_shop = 'insert ignore into shop(shop_id,shop_name,shop_tel,shop_position,good_id) values (%s,%s,%s,%s,%s)'
sql_goods_id = 'insert ignore into goods_id values (%s)'
warnings.filterwarnings("ignore")
start_No = 1069
for i in range(1, 11):
print("开始咯!")
url_queue = Queue()
shop_url_queue = Queue()
start_no = start_No
end_no = start_no + 5
for pn in range(start_no, end_no):
url_queue.put(base_url.format(pn))
# crawl1_list = []
# for i in range(0, 3):
# crawl1 = CrawlInfo1(url_queue)
# crawl1_list.append(crawl1)
# crawl1.start()
# for crawl1s in crawl1_list:
# crawl1s.join()
#
# for i in range(0, 3):
# crawl2 = CrawlInfo2(shop_url_queue)
# crawl2.start()
crawl1 = CrawlInfo1(url_queue)
crawl1.start()
crawl1.join()
crawl2 = CrawlInfo2(shop_url_queue, end_no, start_no)
crawl2.start()
crawl2.join()
start_No += 5
sleep(randint(1, 5))
| [
"xzyim@outlook.com"
] | xzyim@outlook.com |
3799a3db294967a1c44716bf46c71fb49f651f0c | 3f1c4553c1b2e39351a501148022a66b6f5e884a | /blog/migrations/0001_initial.py | 8e4f641285bbce9294bec4b4b8cf16432c4a5e6b | [] | no_license | 6110110556/3SB04 | 94ccf5b1ecd69fe33d2c66d02913e20b99197ef8 | 46291a04a057b697d48df62de214480e630ac6ec | refs/heads/master | 2023-02-19T04:46:35.500450 | 2021-01-23T13:46:18 | 2021-01-23T13:46:18 | 332,205,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | # Generated by Django 2.2.17 on 2021-01-23 11:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"6110110556@psu.ac.th"
] | 6110110556@psu.ac.th |
18ae13ab5fe9a37d93a1481fdd931968fef0aec0 | 76393fbde45bf9261ee884f61a3f1caa3ee4ecc4 | /Assignment2_AustinGailey.py | af5bb122262763b70625885757a2013cfcd12eab | [] | no_license | AustinGailey/CS3120 | ff3f410cf1d10541cff776eafe39134c897102cd | 0767cb0bcab763f176584192f445ee9d8d46e52b | refs/heads/main | 2023-06-25T10:10:14.452775 | 2021-07-31T00:52:43 | 2021-07-31T00:52:43 | 362,570,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,949 | py | # HW 2
# Austin Gailey
# 03/16/21
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import precision_score
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_curve, RocCurveDisplay
from sklearn.metrics import auc
from sklearn.metrics import classification_report
# Data Column Names
col_names = ['pregnant', 'glucose', 'bp', 'skin', 'insulin', 'bmi', 'pedigree', 'age', 'label']
# Load Dataset
pima = pd.read_csv("downloads/pima-indians-diabetes.csv", header=None, names=col_names)
# Selected Columns
feature_cols = ['glucose', 'skin', 'insulin', 'bmi', 'age']
print("Columns Selected: ")
print(feature_cols)
X = pima[feature_cols]
y = pima.label
X_train, X_test, y_train, y_test= train_test_split(X, y, test_size=0.4, random_state=0)
# instantiate the model (using the default parameters)
logreg = LogisticRegression()
# Fit model
logreg.fit(X_train,y_train)
# Y Score
y_score = logreg.decision_function(X_test)
# Predict Y values
y_pred = logreg.predict(X_test)
# Confusion Matrix Display
cm = confusion_matrix(y_test, y_pred)
display = ConfusionMatrixDisplay(confusion_matrix=cm)
display.plot()
plt.show()
# Classification Report
print("Accuracy: ", accuracy_score(y_test, y_pred))
print("Precision: ", precision_score(y_test, y_pred))
print("Recall: ", recall_score(y_test, y_pred))
print(classification_report(y_test, y_pred))
# Receiver Operating Characteristic
fpr, tpr, thresholds = roc_curve(y_test, y_score)
#ROC AUC Score
roc_auc = auc(fpr, tpr)
#ROC Curve
display = RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=roc_auc, estimator_name="ROC Curve")
display.plot()
plt.show() | [
"austingailey@yahoo.com"
] | austingailey@yahoo.com |
a6016ad52f16aa6305c950af46cef83b7d1156de | 220c292659676409854d9b35f23cefd31ea418d5 | /JAN 19/Assignment/Conditional statement & Loop/Q6.py | 400d0c8e759e53478e421a2e8fbe2873932638fe | [] | no_license | nisheshthakuri/Python-Workshop | df096c3de717b91ec4452341e8de77e54778f53d | 22fb9522076042551a6028d9b0a306d1917f21dc | refs/heads/master | 2021-10-21T02:50:04.406258 | 2019-03-02T13:59:10 | 2019-03-02T13:59:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | Q.Program which takes two digits m (row) and n (column) as input and
generates a two-dimensional array. The element value in the i-th row and j-th column of the
array should be i*j.
Solution:
row_m = int(input("Input number of rows: "))
col_n = int(input("Input number of columns: "))
_list = [[0 for col in range(col_n)] for row in range(row_m)]
for row in range(row_m):
for col in range(col_n):
_list[row][col]= row*col
print(_list)
| [
"noreply@github.com"
] | noreply@github.com |
5570b7f2b7f760b10e2f1f04b14e793a57114f75 | 2635ed5a8fef847f4740ae6d40769ff859563a45 | /python-tooling/2-handlerror-tool/handlerror | 06e13e559927ecdb4d59089d378903e80e1fd92a | [] | no_license | LuisDio/Cloud-infra-templating | 19db9c5000f5b5a24fd283f287f4c530021e7d68 | dee785290e1d0dc2d1fa4cd463febf9a47f259ef | refs/heads/master | 2022-10-18T22:53:49.646853 | 2020-06-12T20:05:43 | 2020-06-12T20:05:43 | 259,060,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | #!/usr/bin/env python3.6
import argparse
# Build Parser object
parser = argparse.ArgumentParser(description='Read a file in reverse')
parser.add_argument('filename', help='the file to read')
parser.add_argument('--limit', '-l', type=int, help='the number of lines to read')
parser.add_argument('--version', '-v', action='version', version='%(prog)s 1.0')
# Parse the argument
args = parser.parse_args()
# Carry out the script as mentioned below
# Read the file, reverse the content and print
try:
f = open(args.filename)
limit = args.limit
except FileNotFoundError as err:
print(f"Error: {err}")
else:
with f:
lines = f.readlines()
lines.reverse()
if args.limit:
lines = lines[:limit]
for line in lines:
print(line.strip()[::-1])
| [
"luis.diolindo@gmail.com"
] | luis.diolindo@gmail.com | |
c8da18d00d1a1e9fb12e345f57a5586080b20056 | 212ad01b0951118001eb7a1643441f6492e5e070 | /ask.py | d7971de0d936f76d9756bd1e16fd4f5920895afb | [] | no_license | przywartya/sms-web | f4ca036f783e61cdbe0b05838396eb4509a2ff6b | a2d371703d8dfdbf8411eef957718ba9ea6787a2 | refs/heads/master | 2021-01-10T04:42:15.745098 | 2016-03-20T16:50:20 | 2016-03-20T16:50:20 | 54,328,152 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,507 | py | """ Main part of Ask """
from werkzeug.wrappers import Request, Response
import logging
import requests
from reply import get_reply
# Set up output file for logs
logging.basicConfig(filename='/Apps/MAMP/logs/request.log', level=logging.INFO)
# Make requests output only warnings or errors
logging.getLogger('requests').setLevel(logging.WARNING)
def application(environ, start_response):
"""
Main WSGI callable - the function the server calls upon
receiveing a request.
Receives an SMS message, calls get_reply (which interprets
the message and returns an adequate reply) and dispatches
a reply SMS to the SMSGateway app server. (via a HTTP GET request)
"""
request = Request(environ)
method = request.method
# Our server receives an SMS using a GET Request.
# I'd use POST if it was my choice, but the
# creator of SMS Gateway chose GET :(.
if method == 'GET':
params = request.args
phone = str(params.get('phone'))
user_message = str(params.get('text'))
log("Received", phone, user_message, begin=True)
# call the reply function
reply_message = get_reply(user_message, phone)
log("Sent", phone, reply_message, end=True)
if __name__ != '__main__':
send_sms(phone, reply_message)
#response = Response('', mimetype='text/html')
response = Response(reply_message, mimetype='text/html')
return response(environ, start_response)
def send_sms(phone, message):
""" send_sms(string, string) -> void
Dispatches a HTTP GET request containing the SMS
to SMSGateway's server.
"""
# sms_gateway_url = 'http://192.168.0.102:9090/sendsms'
sms_gateway_url = 'http://192.168.43.78:6969/sendsms'
message_params = {'phone': phone, 'text': message}
# Again - not my idea, just what SMSG requires :(
requests.get(sms_gateway_url, params=message_params)
# might make it more versatile in the future
def log(event, phone, message, begin=False, end=False, line_len=20):
"""
Logs the number and message. Optional args begin and end
add some separators and spacing.
"""
word_len = len(event) + 2 #for two spaces
left = (line_len - word_len) // 2
right = line_len - (left + word_len)
if begin: logging.info(' ' + '*'*line_len)
logging.info(" {L} {ev} {R} ".format( L=left*'-', ev=event, R=right*'-' ))
logging.info(" " + phone)
logging.info(" " + message)
if end: logging.info('\n\n')
if __name__ == '__main__':
from wsgiref.simple_server import make_server
server = make_server('localhost', 80, application)
server.serve_forever() | [
"przywarty.it@gmail.com"
] | przywarty.it@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.