blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f662d955f122cb26eb9042c12234d75832957de7
|
0007ba97130140d0b9d608ece9879323c6dc5f85
|
/53.py
|
bf6019caef1bcaedc7713c5b111bd521a6f0a775
|
[] |
no_license
|
Ashgomathi/ash
|
d0a4fb79fc8b15bb286d19afc121671a0ca8b79c
|
83c879c570e8abc261069574ee671ddee042664a
|
refs/heads/master
| 2020-06-10T10:32:58.875287
| 2019-07-27T09:07:40
| 2019-07-27T09:07:40
| 193,635,194
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 87
|
py
|
gta8=input()
number=0
for i in range(0,len(gta8)):
number+=int(gta8[i])
print(number)
|
[
"noreply@github.com"
] |
Ashgomathi.noreply@github.com
|
f02dec514ce9a82d6c5ff24abbd45981bfda669f
|
cd7722c0c75513cc896320a73a2eac960f2f340d
|
/custom_components/mosenergosbyt/sensor.py
|
a90662d061be73c2d389713bea3d706f22ebf75e
|
[] |
no_license
|
kkuryshev/ha_mosenergosbyt
|
faeab92571b45f8c1a7c531375e8ebac592eacc6
|
dfd9e35520e956ae0651b1e922ff91e0702d01eb
|
refs/heads/master
| 2022-09-13T04:05:32.377587
| 2020-05-31T22:10:59
| 2020-05-31T22:10:59
| 268,356,051
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,896
|
py
|
"""Platform for sensor integration."""
from homeassistant.helpers.entity import Entity
from .const import DOMAIN
import logging
from homeassistant.const import CONF_NAME
from datetime import datetime
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
if discovery_info is None:
return
client = hass.data[DOMAIN]
meter_list = discovery_info.items()
if not meter_list:
return
entities = []
for meter in meter_list:
sensor = MosenergoSensor(
client,
meter[0]
)
entities.append(sensor)
_LOGGER.debug(f'Счетчики мосэнергосбыт добавлены {entities}')
async_add_entities(entities, update_before_add=True)
class MosenergoSensor(Entity):
"""Representation of a Sensor."""
def __init__(self, client, meter_id):
"""Initialize the sensor."""
self.client = client
self._device_class = 'power'
self._unit = 'kw'
self._icon = 'mdi:speedometer'
self._available = True
self._name = meter_id
self._state = None
self.meter_id = meter_id
self.update_time = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if self._state:
return self._state.last_measure.nm_status
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return 'кв'
@property
def unique_id(self) -> str:
"""Return a unique identifier for this entity."""
return f"mosenergosbyt_{self.name}"
@property
def device_state_attributes(self):
if self._state:
measure = self._state.last_measure
attributes = {
'nn_ls': self._state.nn_ls,
'nm_provider': self._state.nm_provider,
'nm_ls_group_full': self._state.nm_ls_group_full,
'dt_pay': measure.dt_pay,
'nm_status': measure.nm_status,
'sm_pay': measure.sm_pay,
'dt_meter_installation': measure.dt_meter_installation,
'dt_indication': measure.dt_indication,
'nm_description_take': measure.nm_description_take,
'nm_take': measure.nm_take,
'nm_t1': measure.nm_t1,
'nm_t2': measure.nm_t2,
'nm_t3': measure.nm_t3,
'pr_zone_t1': measure.pr_zone_t1,
'pr_zone_t2': measure.pr_zone_t2,
'pr_zone_t3': measure.pr_zone_t3,
'vl_t1': measure.vl_t1,
'vl_t2': measure.vl_t2,
'vl_t3': measure.vl_t3,
'refresh_date': self.update_time,
'nn_days': self._state.nn_days,
'vl_debt': self._state.vl_debt,
'vl_balance': self._state.vl_balance
}
return attributes
async def async_update(self):
self._state, self.update_time = await self.async_fetch_state()
@property
def should_poll(self):
"""No need to poll. Coordinator notifies entity of updates."""
return False
async def async_fetch_state(self):
try:
_LOGGER.debug('получение данных с портала по счетчикам')
meter_list = await self.client.fetch_data()
if not meter_list:
return
for item in meter_list.values():
if item.nn_ls == self.meter_id:
return item, datetime.now()
except BaseException:
_LOGGER.exception('ошибка получения состояния счетчиков с портала')
|
[
"kkurishev@gmail.com"
] |
kkurishev@gmail.com
|
7df7704850cb5541240900662aa18de7e49573fc
|
ec9f242c13c271910cf9db0fb8202ab6f2fcdf9c
|
/Chapter_3/Chapter_3_1_1_1.py
|
78c42c8309e6f85f8ca0065f4b9ca077ec018a47
|
[
"Apache-2.0"
] |
permissive
|
flytian/python_machinelearning
|
0f32807c73e92b98b008cea1e6d8fb92702cb4fb
|
004707c3e66429f102272a7da97e532255cca293
|
refs/heads/master
| 2021-08-30T12:52:20.259662
| 2017-12-16T07:16:29
| 2017-12-16T07:16:29
| 114,345,987
| 0
| 0
|
Apache-2.0
| 2017-12-16T07:16:30
| 2017-12-15T08:23:23
|
Python
|
UTF-8
|
Python
| false
| false
| 5,622
|
py
|
# coding:utf-8
# 定义一组字典列表,用来表示多个数据样本(每个字典代表一个数据样本)。
measurements = [{'city': 'Dubai', 'temperature': 33.}, {'city': 'London', 'temperature': 12.},
{'city': 'San Fransisco', 'temperature': 18.}]
# 从sklearn.feature_extraction 导入 DictVectorizer
from sklearn.feature_extraction import DictVectorizer
# 初始化DictVectorizer特征抽取器
vec = DictVectorizer()
# 输出转化之后的特征矩阵。
print vec.fit_transform(measurements).toarray()
# 输出各个维度的特征含义。
print vec.get_feature_names() # ['city=Dubai', 'city=London', 'city=San Fransisco', 'temperature']
# 从sklearn.datasets里导入20类新闻文本数据抓取器。
from sklearn.datasets import fetch_20newsgroups
# 从互联网上即时下载新闻样本,subset='all'参数代表下载全部近2万条文本存储在变量news中。
news = fetch_20newsgroups(subset='all')
# 从sklearn.cross_validation导入train_test_split模块用于分割数据集。
from sklearn.cross_validation import train_test_split
# 对news中的数据data进行分割,25%的文本用作测试集;75%作为训练集。
X_train, X_test, y_train, y_test = train_test_split(news.data, news.target, test_size=0.25, random_state=33)
# 从sklearn.feature_extraction.text里导入CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
# 采用默认的配置对CountVectorizer进行初始化(默认配置不去除英文停用词),并且赋值给变量count_vec。
count_vec = CountVectorizer()
# 只使用词频统计的方式将原始训练和测试文本转化为特征向量。
X_count_train = count_vec.fit_transform(X_train)
X_count_test = count_vec.transform(X_test)
# 从sklearn.naive_bayes里导入朴素贝叶斯分类器。
from sklearn.naive_bayes import MultinomialNB
# 使用默认的配置对分类器进行初始化。
mnb_count = MultinomialNB()
# 使用朴素贝叶斯分类器,对CountVectorizer(不去除停用词)后的训练样本进行参数学习。
mnb_count.fit(X_count_train, y_train)
# 输出模型准确性结果。
print 'The accuracy of classifying 20newsgroups using Naive Bayes (CountVectorizer without filtering stopwords):', mnb_count.score(
X_count_test, y_test)
# 将分类预测的结果存储在变量y_count_predict中。
y_count_predict = mnb_count.predict(X_count_test)
# 从sklearn.metrics 导入 classification_report。
from sklearn.metrics import classification_report
# 输出更加详细的其他评价分类性能的指标。
print classification_report(y_test, y_count_predict, target_names=news.target_names)
# 从sklearn.feature_extraction.text里分别导入TfidfVectorizer。
from sklearn.feature_extraction.text import TfidfVectorizer
# 采用默认的配置对TfidfVectorizer进行初始化(默认配置不去除英文停用词),并且赋值给变量tfidf_vec。
tfidf_vec = TfidfVectorizer()
# 使用tfidf的方式,将原始训练和测试文本转化为特征向量。
X_tfidf_train = tfidf_vec.fit_transform(X_train)
X_tfidf_test = tfidf_vec.transform(X_test)
# 依然使用默认配置的朴素贝叶斯分类器,在相同的训练和测试数据上,对新的特征量化方式进行性能评估。
mnb_tfidf = MultinomialNB()
mnb_tfidf.fit(X_tfidf_train, y_train)
print 'The accuracy of classifying 20newsgroups with Naive Bayes (TfidfVectorizer without filtering stopwords):', mnb_tfidf.score(
X_tfidf_test, y_test)
y_tfidf_predict = mnb_tfidf.predict(X_tfidf_test)
print classification_report(y_test, y_tfidf_predict, target_names=news.target_names)
# 继续沿用代码56与代码57中导入的工具包(在同一份源代码中,或者不关闭解释器环境),分别使用停用词过滤配置初始化CountVectorizer与TfidfVectorizer。
count_filter_vec, tfidf_filter_vec = CountVectorizer(analyzer='word', stop_words='english'), TfidfVectorizer(
analyzer='word', stop_words='english')
# 使用带有停用词过滤的CountVectorizer对训练和测试文本分别进行量化处理。
X_count_filter_train = count_filter_vec.fit_transform(X_train)
X_count_filter_test = count_filter_vec.transform(X_test)
# 使用带有停用词过滤的TfidfVectorizer对训练和测试文本分别进行量化处理。
X_tfidf_filter_train = tfidf_filter_vec.fit_transform(X_train)
X_tfidf_filter_test = tfidf_filter_vec.transform(X_test)
# 初始化默认配置的朴素贝叶斯分类器,并对CountVectorizer后的数据进行预测与准确性评估。
mnb_count_filter = MultinomialNB()
mnb_count_filter.fit(X_count_filter_train, y_train)
print 'The accuracy of classifying 20newsgroups using Naive Bayes (CountVectorizer by filtering stopwords):', mnb_count_filter.score(
X_count_filter_test, y_test)
y_count_filter_predict = mnb_count_filter.predict(X_count_filter_test)
# 初始化另一个默认配置的朴素贝叶斯分类器,并对TfidfVectorizer后的数据进行预测与准确性评估。
mnb_tfidf_filter = MultinomialNB()
mnb_tfidf_filter.fit(X_tfidf_filter_train, y_train)
print 'The accuracy of classifying 20newsgroups with Naive Bayes (TfidfVectorizer by filtering stopwords):', mnb_tfidf_filter.score(
X_tfidf_filter_test, y_test)
y_tfidf_filter_predict = mnb_tfidf_filter.predict(X_tfidf_filter_test)
# 对上述两个模型进行更加详细的性能评估。
from sklearn.metrics import classification_report
print classification_report(y_test, y_count_filter_predict, target_names=news.target_names)
print classification_report(y_test, y_tfidf_filter_predict, target_names=news.target_names)
|
[
"flytonus@sina.cn"
] |
flytonus@sina.cn
|
86308bfcffce3e71e197ca2c8c3120a75ad06334
|
4dda601cb02b404bc0ae25f984825641ddb135fe
|
/scuole/districts/management/commands/bootstrapdistricts_v2.py
|
46af7c74d2b94e19d1157f0518a10620f2fe82f8
|
[
"MIT"
] |
permissive
|
texastribune/scuole
|
d89e49d6bf42d6476a8b2e5a4ebe6380c28e9f60
|
155444e313313ba484d98d73d94d34e9b8f57fbe
|
refs/heads/master
| 2023-06-28T02:52:40.037200
| 2023-05-22T21:51:15
| 2023-05-22T21:51:15
| 35,112,798
| 1
| 0
|
MIT
| 2023-06-12T20:04:49
| 2015-05-05T17:03:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,884
|
py
|
from csv import DictReader
from json import dumps, load
from os.path import join
from django.conf import settings
from django.contrib.gis.geos import GEOSGeometry, MultiPolygon
from django.core.management.base import BaseCommand, CommandError
from django.utils.text import slugify
from scuole.counties.models import County
from scuole.districts.models import District
from scuole.regions.models import Region
class Command(BaseCommand):
help = "Bootstraps District models using TEA data."
def add_arguments(self, parser):
parser.add_argument("year", nargs="?", type=str, default=None)
def handle(self, *args, **options):
self.year = options.get("year")
if not self.year:
raise CommandError("A year is required.")
entities_file = join(
settings.DATA_FOLDER, f"tapr/{self.year}/district/entities.csv"
)
with open(entities_file) as infile:
districts = [row for row in DictReader(infile)]
districts_geojson_file = join(
settings.DATA_FOLDER, "tapr/reference/district/shapes/districts.geojson"
)
shape_data = {}
with open(districts_geojson_file) as infile:
geo_data = load(infile)
features = geo_data.get("features")
for feature in features:
properties = feature.get("properties")
tea_id = properties.get("DISTRICT_C")
shape_data[tea_id] = feature.get("geometry")
self.shape_data = shape_data
for district in districts:
self.create_district(district)
def create_district(self, data):
district_id = str(data.get("DISTRICT")).zfill(6)
district_name = data.get("DISTNAME_CLEAN")
county_state_code = data.get("COUNTY").zfill(3)
region_id = str(data.get("REGION")).zfill(2)
self.stdout.write(f"Creating {district_name} ({district_id})")
county = County.objects.get(state_code=county_state_code)
region = Region.objects.get(region_id=region_id)
is_charter = data["DFLCHART"] == "Y"
if district_id in self.shape_data:
geometry = GEOSGeometry(dumps(self.shape_data.get(district_id)))
# checks to see if the geometry is a MultiPolygon
if geometry.geom_typeid == 3:
geometry = MultiPolygon(geometry)
else:
geometry = None
self.stderr.write(f"No shape data for {district_name}")
instance, _ = District.objects.update_or_create(
tea_id=district_id,
defaults={
"name": district_name,
"slug": slugify(district_name, allow_unicode=True),
"charter": is_charter,
"region": region,
"county": county,
"shape": geometry,
},
)
|
[
"rdmurphy@users.noreply.github.com"
] |
rdmurphy@users.noreply.github.com
|
85f779834d336ce10a4a871e05d86f674c49d738
|
6c51f665a21bde3e2c10f068e8a741a62c8ec9e2
|
/nexfil.py
|
6f916d6cf5b042a2764a67c68a19293e6c80f676
|
[
"MIT"
] |
permissive
|
ActorExpose/nexfil
|
0676bc9b7c719fa98280921359f21e63bb3fa9d5
|
7630b8ee9f7b626c2dbb0f58247afa4c70bbddd4
|
refs/heads/main
| 2023-05-27T06:53:29.801617
| 2021-05-22T21:59:43
| 2021-05-22T21:59:43
| 369,937,090
| 1
| 0
|
MIT
| 2021-05-23T01:23:10
| 2021-05-23T01:23:10
| null |
UTF-8
|
Python
| false
| false
| 13,136
|
py
|
#!/usr/bin/env python3
version = '1.0.0'
R = '\033[31m' # red
G = '\033[32m' # green
C = '\033[36m' # cyan
W = '\033[0m' # white
Y = '\033[33m' # yellow
import argparse
parser = argparse.ArgumentParser(description=f'nexfil - Find social media profiles on the web | v{version}')
parser.add_argument('-u', help='Specify username', type=str)
parser.add_argument('-d', help='Specify DNS Servers [Default : 1.1.1.1]', type=str, nargs='+')
parser.add_argument('-f', help='Specify a file containing username list', type=str)
parser.add_argument('-l', help='Specify multiple comma separated usernames', type=str)
parser.add_argument('-t', help='Specify timeout [Default : 20]', type=int)
parser.add_argument('-v', help='Prints version', action='store_true')
parser.set_defaults(
d=['1.1.1.1'],
t=20,
v=False
)
args = parser.parse_args()
uname = args.u
dns = args.d
ulist = args.l
fname = args.f
tout = args.t
vers = args.v
if vers == True:
print(dns, type(dns))
print(uname, type(uname))
print(version)
exit()
if uname == None and ulist == None and fname == None:
print(f'{R}[-] {C}Please provide {Y}one {C}of the following : \n\t{C}* {Y}username [-u]\n\t{C}* {Y}comma separated usernames [-l]\n\t{C}* {Y}file containing list of usernames [-f]{W}')
exit()
if uname != None:
mode = 'single'
if len(uname) > 0:
if uname.isspace():
print(f'{R}[-] {C}Username Missing!{W}')
exit()
else:
pass
else:
print(f'{R}[-] {C}Username Missing!{W}')
exit()
elif fname != None:
mode = 'file'
elif ulist != None:
mode = 'list'
tmp = ulist
if ',' not in tmp:
print(f'{R}[-] {C}Invalid Format!{W}')
exit()
else:
ulist = tmp.split(',')
else:
pass
print(f'{G}[+] {C}Importing Modules...{W}')
import socket
import asyncio
import aiohttp
import tldextract
from json import loads
from datetime import datetime
from requests import get, exceptions
from os import getenv, path, makedirs
gh_version = ''
twitter_url = ''
discord_url = ''
found = []
codes = [200, 301, 302, 403, 405, 410, 418, 500]
home = getenv('HOME')
loc_data = home + '/.local/share/nexfil/dumps/'
def fetch_meta():
global gh_version, twitter_url, discord_url
try:
rqst = get('https://raw.githubusercontent.com/thewhiteh4t/nexfil/master/metadata.json', timeout=5)
sc = rqst.status_code
if sc == 200:
metadata = rqst.text
json_data = loads(metadata)
gh_version = json_data['version']
twitter_url = json_data['twitter']
discord_url = json_data['discord']
else:
with open('metadata.json', 'r') as metadata:
json_data = loads(metadata.read())
gh_version = json_data['version']
twitter_url = json_data['twitter']
discord_url = json_data['discord']
except Exception as exc:
print(f'\n{R}[-] {C}Exception : {W}{str(exc)}')
with open('metadata.json', 'r') as metadata:
json_data = loads(metadata.read())
gh_version = json_data['version']
twitter_url = json_data['twitter']
discord_url = json_data['discord']
def banner():
banner = r'''
__ _ _____ _ _ _____ _____ _
| \ | |____ \___/ |____ | |
| \_| |____ _/ \_ | __|__ |_____'''
print(f'{G}{banner}{W}\n')
print(f'{G}[>] {C}Created By : {W}thewhiteh4t')
print(f'{G} |---> {C}Twitter : {W}{twitter_url}')
print(f'{G} |---> {C}Discord : {W}{discord_url}')
print(f'{G}[>] {C}Version : {W}{version}\n')
async def clout(url):
global found
found.append(url)
url = str(url)
ext = tldextract.extract(url)
dom = str(ext.domain)
suf = str(ext.suffix)
orig = f'{dom}.{suf}'
cl_dom = f'{Y}{dom}.{suf}{C}'
url = url.replace(orig, cl_dom)
print(f'{G}[+] {C}{url}{W}')
async def query(session, url, test, data, uname):
try:
if test == 'method':
await test_method(session, url)
elif test == 'string':
await test_string(session, url, data)
elif test == 'redirect':
await test_redirect(session, url)
elif test == 'api':
data = data.format(uname)
await test_api(session, url, data)
elif test == 'alt':
data = data.format(uname)
await test_alt(session, url, data)
else:
response = await session.head(url, allow_redirects=True)
if response.status in codes:
if test == None:
await clout(response.url)
elif test == 'url':
await test_url(response.url)
elif test == 'subdomain':
await test_sub(url, response.url)
else:
pass
elif response.status == 404 and test == 'method':
await test_method(session, url)
elif response.status != 404:
print(f'{R}[-] {Y}[{url}] {W}[{response.status}]')
else:
pass
except asyncio.exceptions.TimeoutError:
print(f'{Y}[!] Timeout :{C} {url}{W}')
except Exception as exc:
print(f'{Y}[!] Exception [query] [{url}] :{W} {str(exc)}')
async def test_method(session, url):
try:
response = await session.get(url, allow_redirects=True)
if response.status != 404:
await clout(response.url)
else:
pass
except asyncio.exceptions.TimeoutError:
print(f'{Y}[!] Timeout :{C} {url}{W}')
except Exception as exc:
print(f'{Y}[!] Exception [test_method] [{url}] :{W} {exc}')
return
async def test_url(url):
url = str(url)
proto = url.split('://')[0]
ext = tldextract.extract(url)
subd = ext.subdomain
if subd != '':
base_url = proto + '://' + subd + '.' + ext.registered_domain
else:
base_url = proto + '://' + ext.registered_domain
if url.endswith('/') == False and base_url.endswith('/') == True:
if url + '/' != base_url:
await clout(url)
else:
pass
elif url.endswith('/') == True and base_url.endswith('/') == False:
if url != base_url + '/':
await clout(url)
else:
pass
elif url != base_url:
await clout(url)
else:
pass
async def test_sub(url, resp_url):
if url == str(resp_url):
await clout(url)
else:
pass
async def test_string(session, url, data):
try:
response = await session.get(url)
if response.status == 404:
pass
elif response.status not in codes:
print(f'{R}[-] {Y}[{url}] {W}[{response.status}]')
else:
resp_body = await response.text()
if data in resp_body:
pass
else:
await clout(response.url)
except asyncio.exceptions.TimeoutError:
print(f'{Y}[!] Timeout :{C} {url}{W}')
return
except Exception as exc:
print(f'{Y}[!] Exception [test_string] [{url}] :{W} {exc}')
return
async def test_api(session, url, endpoint):
try:
response = await session.get(endpoint)
if response.status != 404:
resp_body = loads(await response.text())
if len(resp_body) != 0:
tmp_vars = ['results', 'users', 'username']
for var in tmp_vars:
try:
if resp_body.get(var) != None:
if len(resp_body[var]) != 0:
await clout(url)
return
else:
pass
else:
pass
except:
pass
else:
pass
else:
pass
except Exception as exc:
print(f'{Y}[!] Exception [test_api] [{url}] :{W} {exc}')
return
async def test_alt(session, url, alt_url):
try:
response = await session.get(alt_url, allow_redirects=False)
if response.status != 200:
pass
else:
await clout(url)
except Exception as exc:
print(f'{Y}[!] Exception [test_alt] [{url}] :{W} {str(exc)}')
return
async def test_redirect(session, url):
try:
response = await session.head(url, allow_redirects=False)
except asyncio.exceptions.TimeoutError:
print(f'{Y}[!] Timeout :{C} {url}{W}')
return
except Exception as exc:
print(f'{Y}[!] Exception [test_redirect] [{url}] :{W} {str(exc)}')
return
try:
location = response.headers['Location']
if url != location:
pass
else:
await clout(url)
except KeyError:
await clout(url)
def autosave(uname, ulist, mode, found, start_time, end_time):
if not path.exists(loc_data):
makedirs(loc_data)
else:
pass
if mode == 'single':
filename = f'{uname}_{str(int(datetime.now().timestamp()))}.txt'
username = uname
elif mode == 'list' or mode == 'file':
filename = f'session_{str(int(datetime.now().timestamp()))}.txt'
username = ulist
else:
pass
with open(loc_data + filename, 'w') as outfile:
outfile.write(f'nexfil v{version}\n')
outfile.write(f'Username : {username}\n')
outfile.write(f'Start Time : {start_time.strftime("%c")}\n')
outfile.write(f'End Time : {end_time.strftime("%c")}\n')
outfile.write(f'Total Profiles Found : {len(found)}\n\n')
outfile.write(f'URLs : \n\n')
for url in found:
outfile.write(f'{url}\n')
outfile.write(f'{"-" * 40}\n')
print(f'{G}[+] {C}Saved : {W}{loc_data + filename}')
async def main(uname):
tasks = []
print(f'\n{G}[+] {C}Target :{W} {uname}\n')
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux i686; rv:88.0) Gecko/20100101 Firefox/88.0'
}
resolver = aiohttp.AsyncResolver(nameservers=dns)
timeout = aiohttp.ClientTimeout(total=tout)
conn = aiohttp.TCPConnector(
limit=0,
family=socket.AF_INET,
ssl=False,
resolver=resolver
)
print(f'{Y}[!] Finding Profiles...{W}\n')
async with aiohttp.ClientSession(connector=conn, headers=headers, timeout=timeout) as session:
for block in urls_json:
curr_url = block['url'].format(uname)
test = block['test']
data = block['data']
task = asyncio.create_task(query(session, curr_url, test, data, uname))
tasks.append(task)
await asyncio.gather(*tasks)
def netcheck():
print(f'\n{G}[+] {C}Checking Connectivity...{W}')
try:
rqst = get('https://github.com/', timeout=5)
if rqst.status_code == 200:
pass
else:
print(f'{Y}[!] {C}Status : {W}{rqst.status_code}')
except exceptions.ConnectionError:
print(f'{R}[-] {C}Connection Error! Exiting.{W}')
exit()
def launch(uname):
loop = asyncio.new_event_loop()
loop.run_until_complete(main(uname))
loop.run_until_complete(asyncio.sleep(0))
loop.close()
try:
netcheck()
fetch_meta()
banner()
print(f'{Y}[!] Loading URLs...{W}')
with open('url_store.json', 'r') as url_store:
raw_data = url_store.read()
urls_json = loads(raw_data)
print(f'{G}[+] {W}{len(urls_json)} {C}URLs Loaded!{W}')
print(f'{G}[+] {C}Timeout : {W}{tout} secs')
print(f'{G}[+] {C}DNS Servers : {W}{dns}')
start_time = datetime.now()
if mode == 'single':
launch(uname)
elif mode == 'list':
for uname in ulist:
ulist[ulist.index(uname)] = uname.strip()
launch(uname)
elif mode == 'file':
ulist = []
try:
with open(fname, 'r') as wdlist:
tmp = wdlist.readlines()
for user in tmp:
ulist.append(user.strip())
for uname in ulist:
uname = uname.strip()
launch(uname)
except Exception as exc:
print(f'{Y}[!] Exception [file] :{W} {str(exc)}')
exit()
else:
pass
end_time = datetime.now()
delta = end_time - start_time
if mode == 'single':
print(f'\n{G}[+] {C}Lookup for {Y}{uname} {C}completed in {W}{delta}')
print(f'\n{G}[+] {Y}{len(found)} {C}Possible Profiles Found for {Y}{uname}{W}')
elif mode == 'list' or mode == 'file':
print(f'\n{G}[+] {C}Lookup for {Y}{ulist} {C}completed in {W}{delta}')
print(f'\n{G}[+] {Y}{len(found)} {C}Possible Profiles Found for {Y}{ulist}{W}')
if len(found) != 0:
autosave(uname, ulist, mode, found, start_time, end_time)
else:
pass
except KeyboardInterrupt:
print(f'{R}[-] {C}Keyboard Interrupt.{W}')
exit()
|
[
"lohityapushkar@gmail.com"
] |
lohityapushkar@gmail.com
|
e606b3c1a271a2e229c69bc0923948d3028a7129
|
41a515a85c8116ad95eedf8e6f75825eef044d09
|
/app.py
|
4ce1afbc9182266a06af5ab3d7cae746fb85c1f3
|
[] |
no_license
|
ionagamed/ds-lab-09
|
ffa3fe2dc70ec9e000ad9ecc8f65b5b6e852a726
|
219259a2647a4a11631705572d6905a15b1fee72
|
refs/heads/master
| 2020-08-30T19:16:32.501191
| 2019-10-30T07:23:53
| 2019-10-30T07:23:53
| 218,466,786
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
from flask import Flask, request, render_template
from pymongo import MongoClient
client_url = ",".join(
f"mongodb-replicaset-{i}.mongodb-replicaset"
for i in range(3)
)
client = MongoClient(client_url, 27017)
db = client.chat.messages
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "POST":
doc = {
"username": request.form["username"],
"message": request.form["message"],
}
db.insert_one(doc)
messages = reversed(list(db.find()))
return render_template("index.html", messages=messages)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
|
[
"ionagamed@gmail.com"
] |
ionagamed@gmail.com
|
f1e92c9e31fb60a62c8c597e1bf458d31f4fce2b
|
08954e1a6405612aa7ee432f55210ad053127ccc
|
/test/test_parser.py
|
d4476a33e04f4ead9bd31c0c0068b84c6cbb0e8b
|
[
"MIT"
] |
permissive
|
burritojustice/xyz-qgis-plugin
|
d4260fe0faa853761387aae84475ef3b737dcbc2
|
37b7d84992155fe35d9578b58c9d74a198eccb40
|
refs/heads/master
| 2022-10-14T18:41:50.251108
| 2020-06-09T15:48:18
| 2020-06-10T14:51:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,230
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (c) 2019 HERE Europe B.V.
#
# SPDX-License-Identifier: MIT
#
###############################################################################
import json
import random
import numpy as np
from test.utils import (BaseTestAsync, TestFolder, format_long_args,
len_of_struct, len_of_struct_sorted, flatten,
format_map_fields)
from qgis.core import QgsFields, QgsVectorLayer
from qgis.testing import unittest
from XYZHubConnector.xyz_qgis.layer import parser
# import unittest
# class TestParser(BaseTestAsync, unittest.TestCase):
class TestParser(BaseTestAsync):
def __init__(self,*a,**kw):
super().__init__(*a,**kw)
self.similarity_threshold=80
######## Parse xyz geojson -> QgsFeature
def test_parse_xyzjson(self):
folder = "xyzjson-small"
fnames = [
"airport-xyz.geojson",
"water-xyz.geojson"
]
for fname in fnames:
self.subtest_parse_xyzjson(folder,fname)
def subtest_parse_xyzjson(self,folder,fname):
with self.subTest(folder=folder,fname=fname):
resource = TestFolder(folder)
txt = resource.load(fname)
obj = json.loads(txt)
obj_feat = obj["features"]
fields = QgsFields()
feat = [parser.xyz_json_to_feat(ft, fields) for ft in obj_feat]
self._assert_parsed_fields(obj_feat, feat, fields)
self._assert_parsed_geom(obj_feat, feat, fields)
def _assert_parsed_fields_unorder(self, obj_feat, feat, fields):
# self._log_debug(fields.names())
# self._log_debug("debug id, json vs. QgsFeature")
# self._log_debug([o["id"] for o in obj_feat])
# self._log_debug([ft.attribute(parser.QGS_XYZ_ID) for ft in feat])
names = fields.names()
self.assertTrue(parser.QGS_XYZ_ID in names,
"%s %s" % (len(names), names))
self.assertEqual( len(obj_feat), len(feat))
def _assert_parsed_fields(self, obj_feat, feat, fields):
self._assert_parsed_fields_unorder(obj_feat, feat, fields)
def msg_fields(obj):
return (
"{sep}{0}{sep}{1}"
"{sep}fields-props {2}"
"{sep}props-fields {3}"
"{sep}json {4}"
.format(*tuple(map(
lambda x: "%s %s" % (len(x), x), [
obj_props,
fields.names(),
set(fields.names()).difference(obj_props),
set(obj_props).difference(fields.names())
])),
format_long_args(json.dumps(obj)),
sep="\n>> ")
)
for o in obj_feat:
obj_props = list(o["properties"].keys())
self.assertLessEqual( len(obj_props), fields.size(), msg_fields(o))
self.assertTrue( set(obj_props) < set(fields.names()), msg_fields(o))
# self.assertEqual( obj_props, fields.names(), msg_fields(o)) # strict assert
def _assert_parsed_geom_unorder(self, obj_feat, feat, fields, geom_str):
for ft in feat:
geom = json.loads(ft.geometry().asJson()) # limited to 13 or 14 precison (ogr.CreateGeometryFromJson)
self.assertEqual(geom["type"], geom_str)
def _assert_parsed_geom(self, obj_feat, feat, fields):
# both crs is WGS84
for o, ft in zip(obj_feat, feat):
geom = json.loads(ft.geometry().asJson()) # limited to 13 or 14 precison (ogr.CreateGeometryFromJson)
obj_geom = o["geometry"]
self.assertEqual(geom["type"], obj_geom["type"])
id_ = ft.attribute(parser.QGS_XYZ_ID)
obj_id_ = o["id"]
self.assertEqual(id_, obj_id_)
# self._log_debug(geom)
# self._log_debug(obj_geom)
# coords = obj_geom["coordinates"]
# obj_geom["coordinates"] = [round(c, 13) for c in coords]
# obj_geom["coordinates"] = [float("%.13f"%c) for c in coords]
# self.assertDictEqual(geom, obj_geom) # precision
# for c1, c2 in zip(geom["coordinates"], obj_geom["coordinates"]):
# self.assertAlmostEqual(c1, c2, places=13)
c1 = np.array(obj_geom["coordinates"])
c2 = np.array(geom["coordinates"])
if c1.shape != c2.shape:
self._log_debug(
"\nWARNING: Geometry has mismatch shape",
c1.shape, c2.shape,
"\nOriginal geom has problem. Testing parsed geom..")
self.assertEqual(c2.shape[-1], 2,
"parsed geom has wrong shape of coord")
continue
else:
self.assertLess( np.max(np.abs(c1 - c2)), 1e-13,
"parsed geometry error > 1e-13")
# @unittest.skip("large")
def test_parse_xyzjson_large(self):
folder = "xyzjson-large"
fnames = [
"cmcs-osm-dev-building-xyz.geojson",
"cmcs-osm-dev-building-xyz-30000.geojson",
]
for fname in fnames:
self.subtest_parse_xyzjson(folder,fname)
######## Parse xyz geojson -> struct of geom: [fields], [[QgsFeature]]
def test_parse_xyzjson_map(self):
folder = "xyzjson-small"
fnames = [
"mixed-xyz.geojson",
]
for fname in fnames:
self.subtest_parse_xyzjson_map(folder,fname)
mix_fnames = [
"airport-xyz.geojson",
"water-xyz.geojson",
]
self.subtest_parse_xyzjson_mix(folder,mix_fnames)
def test_parse_xyzjson_map_similarity_0(self):
s = self.similarity_threshold
self.similarity_threshold = 0
try:
folder = "xyzjson-small"
fnames = [
"mixed-xyz.geojson",
]
for fname in fnames:
with self.subTest(folder=folder,fname=fname,
similarity_threshold=self.similarity_threshold):
map_fields = self._parse_xyzjson_map_simple(folder,fname)
self._assert_map_fields_similarity_0(map_fields)
finally:
self.similarity_threshold = s
def test_parse_xyzjson_map_dupe_case(self):
folder = "xyzjson-small"
fnames = [
"airport-xyz.geojson",
"water-xyz.geojson",
]
for fname in fnames:
self.subtest_parse_xyzjson_map_dupe_case(folder,fname)
def _parse_xyzjson_map_simple(self,folder,fname):
resource = TestFolder(folder)
txt = resource.load(fname)
obj = json.loads(txt)
return self.subtest_parse_xyzjson_map_chunk(obj)
def subtest_parse_xyzjson_map_dupe_case(self,folder,fname):
with self.subTest(folder=folder,fname=fname):
import random
mix_case = lambda txt, idx: "".join([
(s.lower() if s.isupper() else s.upper())
if i == idx else s
for i, s in enumerate(txt)])
new_feat = lambda ft, props: dict(ft, properties=dict(props))
n_new_ft = 2
with self.subTest(folder=folder,fname=fname):
resource = TestFolder(folder)
txt = resource.load(fname)
obj = json.loads(txt)
features = obj["features"]
features[0]["properties"].update(fid=1) # test fid
lst_k = list()
lst_new_k = list()
props_ = dict(obj["features"][0]["properties"])
props_ = sorted(props_.keys())
debug_msg = ""
for k in props_:
lst_k.append(k)
for i in range(n_new_ft):
ft = dict(features[0])
props = dict(ft["properties"])
new_k = k
while new_k == k:
idx = random.randint(0,len(k)-1)
if k == "fid": idx = i
new_k = mix_case(k, idx)
if new_k not in lst_new_k: lst_new_k.append(new_k)
debug_msg += format_long_args("\n", "mix_case", k, new_k, props[k], idx)
props[new_k] = props.pop(k) or ""
new_ft = new_feat(ft, props)
features.append(new_ft)
map_fields = self.subtest_parse_xyzjson_map_chunk(obj,chunk_size=1)
# assert that parser handle dupe of case insensitive prop name, e.g. name vs Name
self.assertEqual(len(map_fields),1, "not single geom")
lst_fields = list(map_fields.values())[0]
for k in lst_k:
self.assertIn(k, lst_fields[0].names())
# debug
debug_msg += format_long_args("\n", lst_fields[0].names())
for k, fields in zip(lst_new_k, lst_fields[1:]):
if k.lower() in {parser.QGS_ID, parser.QGS_XYZ_ID}:
k = "{}_{}".format(k,
"".join(str(i) for i, s in enumerate(k) if s.isupper()))
debug_msg += format_long_args("\n", k in fields.names(), k, fields.names())
# self.assertEqual(len(lst_fields), len(lst_new_k) + 1)
for k, fields in zip(lst_new_k, lst_fields[1:]):
if k.lower() in {parser.QGS_ID, parser.QGS_XYZ_ID}:
k = "{}_{}".format(k,
"".join(str(i) for i, s in enumerate(k) if s.isupper()))
self.assertIn(k, fields.names(),
"len lst_fields vs. len keys: %s != %s" %
(len(lst_fields), len(lst_new_k) + 1) +
debug_msg
)
def subtest_parse_xyzjson_map(self,folder,fname):
with self.subTest(folder=folder,fname=fname):
resource = TestFolder(folder)
txt = resource.load(fname)
obj = json.loads(txt)
self.subtest_parse_xyzjson_map_shuffle(obj)
self.subtest_parse_xyzjson_map_multi_chunk(obj)
def subtest_parse_xyzjson_mix(self,folder,fnames):
if len(fnames) < 2: return
with self.subTest(folder=folder, fname="mix:"+",".join(fnames)):
resource = TestFolder(folder)
lst_obj = [
json.loads(resource.load(fname))
for fname in fnames
]
obj = lst_obj[0]
for o in lst_obj[1:]:
obj["features"].extend(o["features"])
random.seed(0.1)
random.shuffle(obj["features"])
self.subtest_parse_xyzjson_map_shuffle(obj)
self.subtest_parse_xyzjson_map_multi_chunk(obj)
def subtest_parse_xyzjson_map_multi_chunk(self, obj, lst_chunk_size=None):
if not lst_chunk_size:
p10 = 1+len(str(len(obj["features"])))
lst_chunk_size = [10**i for i in range(p10)]
with self.subTest(lst_chunk_size=lst_chunk_size):
ref_map_feat, ref_map_fields = self.do_test_parse_xyzjson_map(obj)
lst_map_fields = list()
for chunk_size in lst_chunk_size:
map_fields = self.subtest_parse_xyzjson_map_chunk(obj, chunk_size)
if map_fields is None: continue
lst_map_fields.append(map_fields)
for map_fields, chunk_size in zip(lst_map_fields, lst_chunk_size):
with self.subTest(chunk_size=chunk_size):
self._assert_len_map_fields(
map_fields, ref_map_fields)
def subtest_parse_xyzjson_map_shuffle(self, obj, n_shuffle=5, chunk_size=10):
with self.subTest(n_shuffle=n_shuffle):
o = dict(obj)
ref_map_feat, ref_map_fields = self.do_test_parse_xyzjson_map(o)
lst_map_fields = list()
random.seed(0.5)
for i in range(n_shuffle):
random.shuffle(o["features"])
map_fields = self.subtest_parse_xyzjson_map_chunk(o, chunk_size)
if map_fields is None: continue
lst_map_fields.append(map_fields)
# self._log_debug("parsed fields shuffle", len_of_struct(map_fields))
for i, map_fields in enumerate(lst_map_fields):
with self.subTest(shuffle=i):
self._assert_len_map_fields(
map_fields, ref_map_fields)
def subtest_parse_xyzjson_map_chunk(self, obj, chunk_size=100):
similarity_threshold = self.similarity_threshold
with self.subTest(chunk_size=chunk_size, similarity_threshold=similarity_threshold):
o = dict(obj)
obj_feat = obj["features"]
lst_map_feat = list()
map_fields = dict()
for i0 in range(0,len(obj_feat), chunk_size):
chunk = obj_feat[i0:i0+chunk_size]
o["features"] = chunk
map_feat, _ = parser.xyz_json_to_feature_map(o, map_fields, similarity_threshold)
self._assert_parsed_map(chunk, map_feat, map_fields)
lst_map_feat.append(map_feat)
# self._log_debug("len feat", len(chunk))
# self._log_debug("parsed feat", len_of_struct(map_feat))
# self._log_debug("parsed fields", len_of_struct(map_fields))
lst_feat = flatten([x.values() for x in lst_map_feat])
self.assertEqual(len(lst_feat), len(obj["features"]))
return map_fields
def do_test_parse_xyzjson_map(self, obj, similarity_threshold=None):
obj_feat = obj["features"]
# map_fields=dict()
if similarity_threshold is None:
similarity_threshold = self.similarity_threshold
map_feat, map_fields = parser.xyz_json_to_feature_map(obj, similarity_threshold=similarity_threshold)
self._log_debug("len feat", len(obj_feat))
self._log_debug("parsed feat", len_of_struct(map_feat))
self._log_debug("parsed fields", len_of_struct(map_fields))
self._assert_parsed_map(obj_feat, map_feat, map_fields)
return map_feat, map_fields
def _assert_len_map_fields(self, map_fields, ref, strict=False):
len_ = len_of_struct if strict else len_of_struct_sorted
self.assertEqual(
len_(map_fields), len_(ref), "\n".join([
"map_fields, ref_map_fields",
format_map_fields(map_fields),
format_map_fields(ref),
])
)
def _assert_parsed_map(self, obj_feat, map_feat, map_fields):
self._assert_len_map_feat_fields(map_feat, map_fields)
self.assertEqual(len(obj_feat),
sum(len(lst)
for lst_lst in map_feat.values()
for lst in lst_lst),
"total len of parsed feat incorrect")
# NOTE: obj_feat order does not corresponds to that of map_feat
# -> use unorder assert
for geom_str in map_feat:
for feat, fields in zip(map_feat[geom_str], map_fields[geom_str]):
o = obj_feat[:len(feat)]
self._assert_parsed_fields_unorder(o, feat, fields)
self._assert_parsed_geom_unorder(o, feat, fields, geom_str)
obj_feat = obj_feat[len(feat):]
def _assert_len_map_feat_fields(self, map_feat, map_fields):
self.assertEqual(map_feat.keys(), map_fields.keys())
for geom_str in map_feat:
self.assertEqual(len(map_feat[geom_str]), len(map_fields[geom_str]),
"len mismatch: map_feat, map_fields" +
"\n %s \n %s" % (len_of_struct(map_feat), len_of_struct(map_fields))
)
def _assert_map_fields_similarity_0(self, map_fields):
fields_cnt = {k:len(lst_fields) for k, lst_fields in map_fields.items()}
ref = {k:1 for k in map_fields}
self.assertEqual(fields_cnt, ref,
"given similarity_threshold=0, " +
"map_fields should have exact 1 layer/fields per geom")
def test_parse_xyzjson_map_large(self):
folder = "xyzjson-large"
fnames = [
"cmcs-osm-dev-building-xyz.geojson",
"cmcs-osm-dev-road-xyz.geojson",
]
for fname in fnames:
self.subtest_parse_xyzjson_map(folder,fname)
######## Parse QgsFeature -> json
def test_parse_qgsfeature(self):
self.subtest_parse_qgsfeature("geojson-small","airport-qgis.geojson") # no xyz_id
def subtest_parse_qgsfeature(self,folder,fname):
# qgs layer load geojson -> qgs feature
# parse feature to xyz geojson
# compare geojson and xyzgeojson
with self.subTest(folder=folder,fname=fname):
resource = TestFolder(folder)
path = resource.fullpath(fname)
txt = resource.load(fname)
obj = json.loads(txt)
vlayer = QgsVectorLayer(path, "test", "ogr")
feat = parser.feature_to_xyz_json(list(vlayer.getFeatures()),is_new=True) # remove QGS_XYZ_ID if exist
self._log_debug(feat)
self.assertListEqual(obj["features"],feat)
self.assertEqual(len(obj["features"]),len(feat))
def test_parse_qgsfeature_large(self):
pass
if __name__ == "__main__":
# unittest.main()
tests = [
# "TestParser.test_parse_xyzjson",
"TestParser.test_parse_xyzjson_map_similarity_0",
# "TestParser.test_parse_xyzjson_map",
# "TestParser.test_parse_xyzjson_map_dupe_case",
# "TestParser.test_parse_xyzjson_large",
# "TestParser.test_parse_xyzjson_map_large",
]
# unittest.main(defaultTest = tests, failfast=True) # will not run all subtest
unittest.main(defaultTest = tests)
|
[
"16268924+minff@users.noreply.github.com"
] |
16268924+minff@users.noreply.github.com
|
050ede100f804daccbc9e5d1be042cf7b8a52937
|
32e0e3ad8bf23aa2c3672d5a803069e80c1d33e1
|
/commonsrc/Log.py
|
fb2019f580468d59829cecb3f1db2bb3795ccb03
|
[] |
no_license
|
abao0713/interfaceTest2
|
d0c5ba0718c7b4b50f6ce327b641567d00209ad0
|
854a31f3b9c6ea75e8a9d457dac7f6f21009e676
|
refs/heads/master
| 2020-03-18T09:09:39.153793
| 2019-05-22T01:07:14
| 2019-05-22T01:07:14
| 134,547,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,691
|
py
|
import os
import readConfig as readConfig
import logging
from datetime import datetime
import threading
localReadConfig = readConfig.ReadConfig()
class Log:
def __init__(self):
global logPath, resultPath, proDir
proDir = readConfig.proDir
resultPath = os.path.join(proDir, "result")
if not os.path.exists(resultPath):
os.mkdir(resultPath)
logPath = os.path.join(resultPath, str(datetime.now().strftime("%Y%m%d%H%M%S")))
if not os.path.exists(logPath):
os.mkdir(logPath)
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
# defined handler
handler = logging.FileHandler(os.path.join(logPath, "output.log"))
# defined formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
def get_logger(self):
"""
get logger
:return:
"""
return self.logger
def build_start_line(self, case_no):
"""
write start line
:return:
"""
self.logger.info("--------" + case_no + " START--------")
def build_end_line(self, case_no):
"""
write end line
:return:
"""
self.logger.info("--------" + case_no + " END--------")
def build_case_line(self, case_name, msg):
"""
write test case line
:param case_name:
:param code:
:param msg:
:return:
"""
self.logger.info(case_name+"----msg:"+msg)
def get_report_path(self):
"""
get report file path
:return:
"""
report_path = os.path.join(logPath, "report.html")
return report_path
def get_result_path(self):
"""
get test result path
:return:
"""
return logPath
def write_result(self, result):
"""
:param result:
:return:
"""
result_path = os.path.join(logPath, "report.txt")
fb = open(result_path, "wb")
try:
fb.write(result)
except FileNotFoundError as ex:
logger.error(str(ex))
class MyLog:
log = None
mutex = threading.Lock()
def __init__(self):
pass
@staticmethod
def get_log():
if MyLog.log is None:
MyLog.mutex.acquire()
MyLog.log = Log()
MyLog.mutex.release()
return MyLog.log
if __name__ == "__main__":
log = MyLog.get_log()
logger = log.get_logger()
logger.debug("test debug")
logger.info("test info")
|
[
"13686821736@163.com"
] |
13686821736@163.com
|
c1d95549cd754be59496169e8ee446c75643f18f
|
62dd39e19d2b839d8e01f6d2d6b0d22bc348be77
|
/test_PokerScoring.py
|
fad8fd4d48b9370a068e0d9f6f2e175b647a3093
|
[] |
no_license
|
MattMorris1996/PythonPoker
|
6c6cc64f39c138dd2c4c73e5bf793f2b0f6cca33
|
dd174f0019c618f4754d3630cd5f5bd66048d639
|
refs/heads/master
| 2020-12-04T14:46:18.741751
| 2020-09-01T23:23:42
| 2020-09-01T23:23:42
| 231,806,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,916
|
py
|
import unittest
import PokerScoring
import CardDeck
import random
class TestPokerHands(unittest.TestCase):
def setUp(self):
# suit values
diamonds = 0
hearts = 1
spade = 2
clubs = 3
# duplicates setup
self.multiples = [CardDeck.Card(diamonds, 4),CardDeck.Card(hearts, 4),CardDeck.Card(spade, 4),CardDeck.Card(clubs, 4)]
# full house setup
self.doubles = [[CardDeck.Card(diamonds, 4), CardDeck.Card(spade, 4)],[CardDeck.Card(hearts, 1), CardDeck.Card(clubs, 1)]]
self.doubles_same = [[CardDeck.Card(diamonds, 4), CardDeck.Card(spade, 4)],[CardDeck.Card(hearts, 0), CardDeck.Card(clubs, 0)]]
self.only_triples = [[CardDeck.Card(diamonds, 0), CardDeck.Card(spade, 0),CardDeck.Card(hearts, 0)]]
self.straight_test = []
# straight setup
for i in range(7):
self.straight_test.append(CardDeck.Card(clubs, i))
self.flush_test = []
# flush setup
for i in range(7):
self.flush_test.append(CardDeck.Card(hearts, random.randint(0, 13)))
# straight flush setup
self.straights = []
self.flushes = []
straight = []
flush = []
# generate straight flush
for i in range(5):
straight.append(CardDeck.Card(hearts, i))
for i in range(5):
flush.append(CardDeck.Card(hearts, i))
self.flushes.append(flush)
self.straights.append(straight)
pass
def test_duplicates(self):
dupl = PokerScoring.duplicates(self.multiples)
self.assertEqual(3, len(dupl))
def test_full_house(self):
# test doubles and triples with unique values
full_house = PokerScoring.full_house(self.only_triples, self.doubles)
self.assertEqual(2, len(full_house))
for hands in full_house:
self.assertEqual(5, len(hands))
# test doubles and triples where values arent unique
full_house = PokerScoring.full_house(self.only_triples, self.doubles_same)
self.assertEqual(1, len(full_house))
for hands in full_house:
self.assertEqual(5, len(hands))
def test_two_pair(self):
two_pair = PokerScoring.two_pair(self.doubles)
self.assertEqual(2, len(two_pair))
def test_straights(self):
straights = PokerScoring.connectivity(self.straight_test)
self.assertEqual(3, len(straights))
for straight in straights:
self.assertEqual(5, len(straight))
def test_flushes(self):
flushes = PokerScoring.same_suit(self.flush_test)
self.assertEqual(3, len(flushes))
for flush in flushes:
self.assertEqual(5, len(flush))
def test_straight_flush(self):
straight_flushes = PokerScoring.connected_flushes(self.flushes, self.straights)
self.assertEqual(1, len(straight_flushes))
|
[
"matthew.m1996@gmail.com"
] |
matthew.m1996@gmail.com
|
ed46e132c7a54dfeeebb3287a3a61a345d35061c
|
3aab4e1db47f51ef99b94c7f26d3f3788b5cef3c
|
/mrtandam-ica-code/ensemble/src/eca_launch_mapreduce.py
|
6dae3f6fb8487f92e9a6fb4c482abc9c54f93fb2
|
[
"Apache-2.0"
] |
permissive
|
baimingze/project1
|
8f73bc5e3af151335c7ee97b583914423d927269
|
6c9cd57bf1d413645bd47785f2ca597d97f9df22
|
refs/heads/master
| 2021-01-18T14:33:48.211413
| 2015-07-15T20:56:58
| 2015-07-15T20:56:58
| 38,840,952
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,530
|
py
|
#!/opt/local/bin/python
# script for launching ensemble learning jobs in Amazon Elastic Map Reduce
# Copyright (C) 2010 Insilicos LLC All Rights Reserved
# Original Authors Jeff Howbert, Natalie Tasman, Brian Pratt
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# general idea is to launch a framework R script which sources a configurable
# script that contains the various bits of mapreduce code
#
# expected file layout when this all runs:
# <your bucket>
# <your bucket>/<path_to_trainingDataFile>
# <your bucket>/<path_to_testDataFile>
# <your bucket>/<baseNameFromConfigFile>
# <your bucket>/<baseNameFromConfigFile>/<timestamp>/ (the "job directory")
# <your bucket>/<baseNameFromConfigFile>/<timestamp>/<configFile>
# <your bucket>/<baseNameFromConfigFile>/<timestamp>/<scriptFile>
# <your bucket>/<baseNameFromConfigFile>/<timestamp>/results/<mapReduce results file(s)>
import sys
import os.path
import boto.ec2
import boto.s3
import boto.emr
from boto.emr import BootstrapAction
from boto.ec2.regioninfo import RegionInfo
from boto.emr.step import StreamingStep
from boto.emr.connection import EmrConnection
from boto.s3.connection import S3Connection
from boto.s3.bucketlistresultset import BucketListResultSet
import eca_launch_helper as eca # functions commont to RMPI and MapReduce versions
from boto.s3.key import Key
import simplejson as json
from time import sleep
import datetime
eca.loadConfig("mapreduce") # get config as directed by commandline, mapreduce style
jobDir = eca.getCoreJobDir() # gets baseName, or umbrella name for multi-config batch job
jobDirS3 = eca.S3CompatibleString(jobDir)
syspath=os.path.dirname(sys.argv[0])
if (""==syspath) :
syspath = os.getcwd()
syspath = syspath.replace("\\","/") # tidy up any windowsy slashes
eca.setCoreConfig("mapReduceFrameworkScript", eca.getConfig( "mapReduceFrameworkScript",syspath+"/eca_mapreduce_framework.R"))
eca.setCoreConfig("frameworkSupportScript", eca.getConfig( "frameworkSupportScript",syspath+"/eca_common_framework.R"))
# are we running on AWS Elastic MapReduce? (could be a generic hadoop cluster, instead)
if ( eca.runAWS() ) :
aws_access_key_id = eca.getConfig( "aws_access_key_id" )
aws_secret_access_key = eca.getConfig( "aws_secret_access_key" )
aws_region = eca.getConfig( "aws_region" )
aws_placement = eca.getConfig( "aws_placement", required=False ) # sub-region
aws_region = RegionInfo(name=eca.getConfig( "aws_region" ),endpoint=eca.getConfig( "ec2_endpoint",'elasticmapreduce.amazonaws.com' ))
conn = boto.emr.EmrConnection(region=aws_region,aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
else :
conn = eca.HadoopConnection()
head_instance_type = eca.getConfig( "ec2_head_instance_type" )
client_instance_type = eca.getConfig( "ec2_client_instance_type" )
# optional: name of existing EC2 keypair for SSH to head node
ec2KeyPair = eca.getConfig( "RSAKeyName", required=False )
# prepare a list of files to be copied from S3 to where the clients can access them
if (eca.runLocal()) :
eca.setCoreConfig("sharedDir",jobDir + "/")
elif ( eca.runAWS() ) :
bucketName = eca.S3CompatibleString(eca.getConfig("s3bucketID" ),isBucketName=True) # enforce bucket naming rules
bucketURL = "s3n://"+bucketName
# directory for passing large key values in files
eca.setCoreConfig("sharedDir","/mnt/var/lib/hadoop/dfs/")
s3conn = S3Connection(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
s3bucket = s3conn.create_bucket( bucketName )
k = Key(s3bucket)
else :
bucketName = 'hdfs://%s' % eca.getHadoopDir()
bucketURL = bucketName
k = eca.HadoopConnection()
# write the framework and implementation scripts to
# per-job directory as a matter of record
frameworkScriptPath = eca.getConfig( "mapReduceFrameworkScript")
frameworkSupportScriptPath = eca.getConfig( "frameworkSupportScript")
mapReduceScriptPath = eca.getConfig( "scriptFileName" )
baseName = eca.getConfig( "baseName" )
configName = '%s.cfg.r' % baseName
if ( not eca.runLocal() ) :
frameworkScriptName = eca.S3CompatibleString(os.path.basename(frameworkScriptPath))
k.key = '%s/%s' % ( jobDirS3 , frameworkScriptName )
k.set_contents_from_filename(frameworkScriptPath)
eca.makeFileExecutable(k.key)
frameworkSupportScriptName = eca.S3CompatibleString(os.path.basename(frameworkSupportScriptPath))
k.key = '%s/%s' % ( jobDirS3 , frameworkSupportScriptName )
eca.setCoreConfig( "frameworkSupportScript", frameworkSupportScriptName) # use the version without path info
k.set_contents_from_filename(frameworkSupportScriptPath)
scriptName = eca.S3CompatibleString(os.path.basename(mapReduceScriptPath))
k.key = '%s/%s' % ( jobDirS3 , scriptName )
k.set_contents_from_filename(mapReduceScriptPath)
# now we can refer to these without a path
eca.setCoreConfig( "mapReduceFrameworkScript", frameworkScriptName )
eca.setCoreConfig( "frameworkSupportScript", frameworkSupportScriptName)
eca.setCoreConfig( "scriptFileName", scriptName)
configName = os.path.basename(configName)
configCache = eca.constructCacheFileReference( bucketName , jobDirS3 , configName )
frameworkCache = eca.constructCacheFileReference( bucketName , jobDirS3 , frameworkScriptName)
scriptCache = eca.constructCacheFileReference( bucketName , jobDirS3 , scriptName )
scriptSupportCache = eca.constructCacheFileReference( bucketName , jobDirS3 , frameworkSupportScriptName )
cachefiles = [ configCache, frameworkCache, scriptCache, scriptSupportCache ]
# create a job step to copy data from S3 to HDFS
copierInputFile = '%s/copier-input-values' % jobDirS3
copierCommands = ""
# go through the config parameters, anything named "sharedFile_*" gets uploaded
# to S3 with a gzip preference
for n in range(-1,len(eca.cfgStack)) :
for cfgKey,val in eca.selectConfig(n).iteritems():
if (cfgKey.startswith("sharedFile_")):
fullLocalPath = eca.my_abspath( val ) # convert relative path to absolute
eca.setConfig( cfgKey, fullLocalPath)
# do the upload to S3
s3FileList = [(cfgKey, n, "", True)]
eca.uploadToS3(s3FileList) # side effect: after this call config speaks of data files in terms of S3
# and set up for copying S3 files out to HDFS
hdfsPath = "hdfs:///home/hadoop/"
hdfsname = hdfsPath+os.path.basename(eca.getConfig(cfgKey))
hadoopCopyCmd = "hadoop dfs -cp "
# prepare a list of copy commands to be passed out to mappers
cmd = '%s %s%s %s\n' % ( hadoopCopyCmd, bucketURL, eca.getConfig(cfgKey), hdfsname )
if not cmd in copierCommands :
copierCommands = copierCommands + cmd
eca.setConfig(cfgKey,hdfsname)
k.key = copierInputFile
k.set_contents_from_string(copierCommands)
# are we planning a spot bid instead of demand instances?
spotBid = eca.getConfig("spotBid","")
if ("" != spotBid) :
if ('%' in spotBid) : # a percentage, eg "25%" or "25%%"
spotBid = eca.calculateSpotBidAsPercentage( spotBid, client_instance_type, 0.20 ) # about 20% more for EMR instances
launchgroup = "ECA"+eca.getConfig( "baseName" ) +"_"+eca.getConfig("jobTimeStamp")
else :
launchgroup = ""
eca.setCoreConfig("launchgroup",launchgroup)
# mapper keys are random seeds
# there is only one reducer key
# mapper input file is just a list of integers 0 through (ensembleSize-1)
mapperInputFile = '%s/mapper-input-values' % jobDirS3
mapperInputs = ""
for count in range(int(eca.getConfig( "ensembleSize" ))) :
mapperInputs = mapperInputs + str(count) + "\n"
eca.saveStringToFile(mapperInputs,mapperInputFile)
# write parameters to for the record (after removing security info)
eca.scrubAndPreserveJobConfig( '%s/%s' % ( jobDirS3 , configName ) )
# and now execute
if (eca.runLocal()) :
# execute the package installer script
packageInstallerScriptText=eca.create_R_package_loader_script(eca.getConfig("scriptFileName"))
eca.setCoreConfig("packageInstaller", '%s/%s.installpackages.r' % ( jobDirS3, configName ))
eca.saveStringToFile(packageInstallerScriptText, eca.getConfig("packageInstaller"))
cmd = "Rscript " + eca.getConfig("packageInstaller")
eca.log( "run: " + cmd )
os.system(cmd)
configName = '%s/%s' % ( jobDirS3 , configName )
for n in range(0,len(eca.cfgStack)) :
eca.selectConfig(n)
resultsFilename=eca.getConfig("resultsFilename")
subCfgName = eca.getConfig("eca_uniqueName")
mapResults=resultsFilename+"."+subCfgName+".map"
redResults=resultsFilename+"."+subCfgName+".red"
mapper = "Rscript %s mapper %s %s %s" % (frameworkScriptPath,mapReduceScriptPath,configName,subCfgName)
if (resultsFilename != "") :
mapper=mapper+" 2>"+mapResults # capture logging on stderr
reducer = "Rscript %s reducer %s %s %s" % (frameworkScriptPath,mapReduceScriptPath,configName,subCfgName)
if (resultsFilename != "") :
reducer=reducer+" >"+redResults +" 2>&1" # capture logging on stderr as well as results on stdout
cmd = "cat " + mapperInputFile + " | " + mapper + " | sort | " + reducer
eca.log("run: "+ cmd)
os.system(cmd)
wait = 1
if (resultsFilename != "") :
os.system("cat "+mapResults +" >> "+resultsFilename) # combine mapper and reducer logs
os.system("cat "+redResults +" >> "+resultsFilename) # combine mapper and reducer logs
os.system("cat "+mapResults) # display mapper logs
os.system("cat "+redResults) # display reducer logs
os.system("rm "+mapResults) # delete mapper log
os.system("rm "+redResults) # delete reducer log
else :
# bootstrap actions to customize EMR image for our purposes - no need to run on master
bootstrapText = '#!/bin/bash\n'
if ("True" == eca.getConfig("update_EMR_R_install","False")) :
# get latest R (creaky old 2.7 is default on EMR)
bootstrapText = bootstrapText + '# select a random CRAN mirror\n'
bootstrapText = bootstrapText + 'mirror=$(sudo Rscript -e "m=getCRANmirrors(all = TRUE) ; m[sample(1:dim(m)[1],1),4]" | cut -d "\\"" -f 2)\n'
bootstrapText = bootstrapText + 'echo "deb ${mirror}bin/linux/debian lenny-cran/" | sudo tee -a /etc/apt/sources.list\n'
bootstrapText = bootstrapText + '# hose out old pre-2.10 R packages\n'
bootstrapText = bootstrapText + 'rpkgs="r-base r-base-dev r-recommended"\n'
bootstrapText = bootstrapText + 'sudo apt-get remove --yes --force-yes r-cran-* r-base* $rpkgs\n'
bootstrapText = bootstrapText + '# install fresh R packages\n'
bootstrapText = bootstrapText + 'sudo apt-get update\nsudo apt-get -t lenny-cran install --yes --force-yes $rpkgs\n'
# and make sure any packages mentioned in the user script are present
bootstrapText = bootstrapText + 'cat >/tmp/installPackages.R <<"EndBlock"\n'
bootstrapText = bootstrapText + eca.create_R_package_loader_script(eca.getConfig("scriptFileName"))
bootstrapText = bootstrapText + "EndBlock\nsudo Rscript /tmp/installPackages.R\nexit $?\n"
bootstrapFile = "bootstrap.sh"
eca.debug("writing AWS EMR bootstrap script to %s" % bootstrapFile)
k.key = '%s/%s' % ( jobDirS3, bootstrapFile )
k.set_contents_from_string(bootstrapText)
bootstrapActionInstallRPackages = BootstrapAction("install R packages",'s3://elasticmapreduce/bootstrap-actions/run-if', ['instance.isMaster!=true','s3://%s/%s' % (bucketName, k.key)])
copierScript = '%s copier' % ( frameworkScriptName )
mapperScript = '%s mapper %s %s' % ( frameworkScriptName , scriptName, configName )
reducerScript = '%s reducer %s %s' % ( frameworkScriptName , scriptName, configName )
# write results here
eca.log("scripts, config and logs will be written to %s/%s" % (bucketURL,jobDirS3))
# tell Hadoop to run just one reducer task, and set mapper task count in hopes of giving reducer equal resources
nodecount = int(eca.getConfig( "numberOfClientNodes", eca.getConfig( "numberOfNodes", 0 ) )) # read old style as well as new
if (nodecount < 1) :
nodecount = 1 # 0 client nodes means something in RMPI, but not Hadoop
mapTasksPerClient = int(eca.getConfig("numberOfRTasksPerClient"))
nmappers = (nodecount*mapTasksPerClient)-1 # -1 so reducer gets equal resources
stepArgs = ['-jobconf','mapred.task.timeout=1200000','-jobconf','mapred.reduce.tasks=1','-jobconf','mapred.map.tasks=%d' % nmappers]
workstepsStack = []
for n in range(0,len(eca.cfgStack)) :
worksteps = []
if ((0==n) and eca.runAWS()) :
# specify a streaming (stdio-oriented) step to copy data files from S3
copierStep = boto.emr.StreamingStep( name = '%s-copyDataFromS3toHDFS' % baseName,
mapper = '%s' % (copierScript),
reducer = 'NONE',
cache_files = cachefiles,
input = '%s/%s' % (bucketURL, copierInputFile),
output = '%s/%s/copierStepResults' % (bucketURL, jobDir),
step_args = ['-jobconf','mapred.task.timeout=1200000']) # double the std timeout for file transfer
worksteps.extend( [copierStep] )
eca.selectConfig(n)
eca.setConfig("completed",False,noPostSaveWarn=True)
subCfgName = eca.getConfig("eca_uniqueName")
eca.setConfig("resultsDir", '%s/%s' % (jobDir,subCfgName),noPostSaveWarn=True)
# specify a streaming (stdio-oriented) step
if (baseName == subCfgName) :
stepname = baseName
else :
stepname = '%s-%s' % (baseName, subCfgName)
workstep = boto.emr.StreamingStep( name = stepname,
mapper = '%s %s' % (mapperScript, subCfgName),
reducer = '%s %s' % (reducerScript, subCfgName),
cache_files = cachefiles,
input = '%s/%s' % (bucketURL, mapperInputFile),
output = '%s/%s' % (bucketURL, eca.getConfig("resultsDir")),
step_args = stepArgs)
worksteps.extend([workstep])
workstepsStack.extend([worksteps])
# and run the job
keepalive = ("True" == eca.getConfig("keepHead","False"))
if ( keepalive ) :
failure_action = 'CANCEL_AND_WAIT'
else :
failure_action = 'TERMINATE_JOB_FLOW'
if ("" != spotBid) :
from boto.emr.instance_group import InstanceGroup # spot EMR is post-2.0 stuff - 2.1rc2 is known to work
launchGroup = eca.getConfig("launchgroup")
instanceGroups = [
InstanceGroup(1, 'MASTER', head_instance_type, 'SPOT', 'master-%s' % launchGroup, spotBid),
InstanceGroup(nodecount, 'CORE', client_instance_type, 'SPOT', 'core-%s' % launchGroup, spotBid)
]
jf_id = conn.run_jobflow(name = baseName,
log_uri='s3://%s/%s' % (bucketName, jobDir),
ec2_keyname=ec2KeyPair,
action_on_failure=failure_action,
keep_alive=keepalive,
instance_groups=instanceGroups,
enable_debugging=("False"==eca.getConfig("noDebugEMR","False")),
steps=workstepsStack[0],
bootstrap_actions=[bootstrapActionInstallRPackages])
else :
jf_id = conn.run_jobflow(name = baseName,
log_uri='s3://%s/%s' % (bucketName, jobDir),
ec2_keyname=ec2KeyPair,
action_on_failure=failure_action,
keep_alive=keepalive,
master_instance_type=head_instance_type,
slave_instance_type=client_instance_type,
enable_debugging=("False"==eca.getConfig("noDebugEMR","False")),
num_instances=(nodecount+1), # +1 for master
steps=workstepsStack[0],
bootstrap_actions=[bootstrapActionInstallRPackages])
for n in range(1,len(workstepsStack)) : # adding all multi-config steps at once can overwhelm boto
conn.add_jobflow_steps(jf_id,workstepsStack[n])
wait = 10 # much less than this and AWS gets irritated and throttles you back
lastState = ""
while True:
jf = conn.describe_jobflow(jf_id)
if (lastState != jf.state) : # state change
eca.log_no_newline("cluster status: "+jf.state)
lastState = jf.state
else :
eca.log_progress() # just put a dot
for n in range(0,len(eca.cfgStack)) :
eca.selectConfig(n)
if (not eca.getConfig("completed")) :
# grab the results
concat = ""
mask = '%s/part-' % eca.getConfig("resultsDir")
eca.debug("checking %s"%mask)
if ( eca.runAWS() ) :
for part in BucketListResultSet(s3bucket, prefix=mask) :
# all results in one string
k.key = part
concat = concat + k.get_contents_as_string()
else : # hadoop
k.key = mask+"*"
concat = k.get_contents_as_string()
if (len(concat) > 0) :
eca.log("Done. Results:")
eca.log(concat)
# write to file?
resultsFilename=eca.getConfig("resultsFilename")
if (resultsFilename != "") :
f = open(resultsFilename,"w+")
f.write(concat)
f.close()
eca.log('results also written to %s' % resultsFilename)
eca.setConfig("completed",True,noPostSaveWarn=True)
lastState = '' # just to provoke reprint of state on console
if lastState == 'COMPLETED':
break
if lastState == 'FAILED':
break
if lastState == 'TERMINATED':
break
sleep(wait)
eca.log_close()
|
[
"mingze@localhost.localdomain"
] |
mingze@localhost.localdomain
|
c5a0ff62b99e765cf2248885b7536589d03a6b90
|
13f6d3ff4764956d61ebb5ca8ad55927e2fea919
|
/session1/list1.py
|
81f398f7ba7f504c34d9d0da0abe33ab9c7f5198
|
[] |
no_license
|
Kit-Data-Science-Telecom-2021-2022/Kit-Data_Marie-Elisabeth-Campo
|
73390bf073ee68843f34ca3e354142ba6c9397bf
|
522cf35558401557aa74263d09ecc0d6ab3c55fb
|
refs/heads/main
| 2023-09-05T22:35:18.090965
| 2021-11-07T23:24:07
| 2021-11-07T23:24:07
| 409,204,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,105
|
py
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
return len([w for w in words if len(w)>=2 and w[0]==w[-1]])
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
o = list(w for w in words if w[0] != 'x')
o = sorted(o)
x = list(w for w in words if w[0] == 'x')
x = sorted(x)
return x + o
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
l = sorted(list(t[::-1] for t in tuples))
l1 = list(t[::-1] for t in l)
return l1
return
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print ('%s got: %s expected: %s' % (prefix, repr(got), repr(expected)))
# Calls the above functions with interesting inputs.
def main():
print ('match_ends')
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print ('front_x')
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print
print ('sort_last')
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
|
[
"marinette@gmx.fr"
] |
marinette@gmx.fr
|
4565b9d512665043f48b45c2190f63f7c94d3f14
|
3b059132c1ef89671416fbf1d2b91b5709f24321
|
/singly_linked.py
|
b78d6f888ac15a58e2fd3ff4116db24779633ba5
|
[] |
no_license
|
Philipwallen/Data-Structures
|
0eeae94b9322d4c0a012df0871e187f4498ec86e
|
7c6b9c42aec1002a97f52b079e599ac57a36a2dc
|
refs/heads/master
| 2020-12-03T00:07:20.851363
| 2017-07-07T14:59:05
| 2017-07-07T14:59:05
| 95,990,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
def Node(object):
def __init__(self, value):
self.value = value
self.nextnode = None
'''
Here we have a singly linked list class.
'''
|
[
"philipwallen1@gmail.com"
] |
philipwallen1@gmail.com
|
bb45d0593e0bed3aa6c10277e34775a2e6257981
|
4a216a1d968cb3f3ed78f24def35773ed7c04459
|
/main.py
|
c17f90b508427d9ff451d5ab5497d96d8f8826fd
|
[] |
no_license
|
luizgdias/kafka_producer_topic_consumer
|
a43d15b40aed048271e37b64f24af3adf2fe47e2
|
b4484ece16443423e7e1f2dfe16b5084e98f2abf
|
refs/heads/master
| 2020-06-28T15:44:06.080711
| 2019-08-02T18:09:21
| 2019-08-02T18:09:21
| 200,271,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,230
|
py
|
# -*- Coding: UTF-8 -*-
#coding: utf-8
#########################################################
# author: Luiz Gustavo Dias
# date : 07/23/2019
#########################################################
# At First time is necessary to run in terminal:
# $ docker run -d --name zookeeper jplock/zookeeper:3.4.6
# $ docker run -d --name kafka --link zookeeper:zookeeper ches/kafka
# $ export ZK_IP=$(docker inspect --format "{{ .NetworkSettings.IPAddress }}" zookeeper)
# $ export KAFKA_IP=$(docker inspect --format "{{ .NetworkSettings.IPAddress }}" kafka)
# $ docker run --rm ches/kafka kafka-topics.sh --create --topic test --replication-factor 1 --partitions 1 --zookeeper $ZK_IP:2181
# Created topic "test".
#########################################################
# Description: The script list all files in ./Files directory on a txt file,
# after a kafka producer is created, the producer reads the file
# and sendsd all the files name to kafka consumer that uses the
# same kafka topic.
# docker run --rm --interactive ches/kafka kafka-console-producer.sh --broker-list 172.17.0.3:9092 --topic test
# docker run --rm ches/kafka kafka-console-consumer.sh --topic test --from-beginning --zookeeper 172.17.0.2:2181
#########################################################
from kafka import KafkaConsumer
from kafka import KafkaProducer
from json import loads
import os, sys, subprocess, shlex
import json
from json import dumps
from time import sleep
def buffering():
os.system("touch buffer-list-files.json")
buffer_list_files = open("buffer-list-files.json").readlines()
print(buffer_list_files)
buffer_list_files2 = open("buffer-list-files.json", "a")
for root, dirs, files in os.walk("./Files", topdown=False):
for name in files:
json_lista = '{"file_path":"'+os.path.join(root,name)+'", "submited":" "}\n'
if json_lista in buffer_list_files:
print("O arquivo <"+name+"> já está bo buffer!")
else:
print("O arquivo <"+name+"> não está no buffer....\nPreparando para inserir o arquivo <"+name+"> no buffer...")
#print(os.path.join(root,name))
buffer_list_files2.write('{"file_path":"'+os.path.join(root,name)+'", "submited":" "}\n')
print("Arquivo <"+name+"> inserido no buffer.")
buffer_list_files2.close()
def connection():
x = "docker start zookeeper kafka"
process = subprocess.Popen(x, stdout=subprocess.PIPE, shell=True)
process.communicate()
def sendToTopic():
# os.system('docker stop zookeeper kafka')
# os.system('docker rm zookeeper kafka')
# os.system('docker run -d --name zookeeper jplock/zookeeper:3.4.6')
# os.system('docker run -d --name kafka --link zookeeper:zookeeper ches/kafka')
# os.system('export KAFKA_IP=$(docker inspect --format "{{ .NetworkSettings.IPAddress }}" kafka)')
# os.system('echo $KAFKA_IP')
x = "docker start zookeeper kafka"
process = subprocess.Popen(x, stdout=subprocess.PIPE, shell=True)
process.communicate()
producer = KafkaProducer(bootstrap_servers=['172.17.0.3:9092'], api_version=(0,10,1),
value_serializer=lambda x:
dumps(x).encode('utf-8'))
for e in range(10):
data = {'id': e,'x1': '1', 'y1': '1','x2': '2', 'y2': '2','page': '3', 'type': '3', 'path': '/out'}
producer.send('test', value=data)
print("Producer to topic: "+str(e))
sleep(1)
#os.system('docker stop zookeeper kafka')
def getKafkaMessages(topicName):
#os.system('docker run --rm ches/kafka kafka-console-consumer.sh --topic testTopic --from-beginning --zookeeper 172.17.0.2:2181')
# x = "docker start zookeeper kafka"
# process = subprocess.Popen('export ZK_IP=$(docker inspect --format \'{{ .NetworkSettings.IPAddress }}\' zookeeper) && echo $ZK_IP', stdout=subprocess.PIPE, shell=True)
# zookeeper_ip = process.communicate()[0]
# zookeeper_ip = (str(zookeeper_ip, 'UTF-8')).strip('\n')
# print(zookeeper_ip)
os.system('docker run --rm ches/kafka kafka-console-consumer.sh --topic image-detection-topic --from-beginning --zookeeper 192.168.1.112:2181')
# process.communicate()
#buffering()
def getKafkaMessagesV2(topic, kafka_ip):
## Collect Messages from Bus
consumer = KafkaConsumer(topic, auto_offset_reset='earliest',
bootstrap_servers=[kafka_ip],
api_version=(0, 10, 1))
consumer.subscribe([topic])
print('after consumer')
print(consumer)
for msg in consumer:
print('inside for')
print(msg[6])
#sendToTopic()
#getKafkaMessages('image-detection-topic')
getKafkaMessagesV2('image-detection-topic', '10.100.14.107:9092')
#getKafkaMessagesV2('test', '172.17.0.3:9092')
#bin/kafka-console-consumer --zookeeper localhost:2181 --topic kafkatest --from-beginning
#bin/kafka-console-consumer --zookeeper localhost:2181 /kafka --topic kafkatest --from-beginning
#kafka/bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic my-topic --from-beginning
|
[
"gusttavodiias@gmail.com"
] |
gusttavodiias@gmail.com
|
71d68c642a70a8d625599303258d762986ccd3f6
|
46537fe6906fa10ed515baf36598168ff948aeaf
|
/mq/apps.py
|
bd83a9c748c327fc22707100d3383b38124cd392
|
[] |
no_license
|
dima-kov/django-mq
|
6b0538499a6091601ada1ecb962875d5f5634104
|
08c458780173e64785d30f87536121fa9e8a29ae
|
refs/heads/master
| 2023-07-31T10:33:40.209376
| 2021-09-19T10:18:34
| 2021-09-19T10:18:34
| 408,092,675
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
from django.apps import AppConfig
class MqAppConfig(AppConfig):
def ready(self):
from mq.facade import QueuesFacade # noqa
|
[
"dima.kovalchuk.v@gmail.com"
] |
dima.kovalchuk.v@gmail.com
|
056bd878933e3e8f3a603973ac2a820ac07bce18
|
fd43e56e22254e8a653e32ad7262c1f41c670391
|
/lcov/__init__.py
|
468b86309784894323139b90053882aff64f7019
|
[
"MIT"
] |
permissive
|
hubiao7/scons-lcov
|
0ba681537aa8b81d1e5668b4bc011c182bf47eee
|
0c88bea03c787d001691f970faf5e9b7a3fe98ba
|
refs/heads/master
| 2021-06-01T05:49:25.356588
| 2016-06-01T20:47:08
| 2016-06-01T20:47:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,068
|
py
|
import SCons
from SCons.Builder import Builder
from SCons.Script import Dir, Flatten, Mkdir
from os import path
class ToolLCovWarning(SCons.Warnings.Warning):
pass
class LCovExecutableNotFound(ToolLCovWarning):
pass
def lcov_generator(source, target, env, for_signature):
cmd = ['lcov --capture']
cmd += ['--output-file', target[0].abspath]
if 'LCOVDIR' in env:
cmd += ['--directory', str(Dir(env['LCOVDIR']))]
if 'LCOVBASEDIR' in env:
cmd += ['--base-directory', str(Dir(env['LCOVBASEDIR']))]
return ' '.join(Flatten(cmd))
_lcov_builder = Builder(generator=lcov_generator)
def generate(env):
env['LCov'] = _detect(env)
env['BUILDERS']['LCov'] = _lcov_builder
def _detect(env):
try:
return env['LCov']
except KeyError:
pass
lcov = env.WhereIs('lcov')
if lcov:
return lcov
raise SCons.Errors.StopError(LCovExecutableNotFound,
'Cound not detect lcov executable')
return None
def exists(env):
return _detect(env)
|
[
"rhythm.mail@gmail.com"
] |
rhythm.mail@gmail.com
|
11fffdf455b5b2de1d41093e5db837b67414fb80
|
5cd8fb2e84e5f50f39505a97e9021198700920e3
|
/src/employees/models.py
|
9e10dd1d3e77cc6a7f661f1a78ffe127e549711f
|
[] |
no_license
|
danliu277/openbag_python
|
81a597f72bfc943f8ff98e8b732fe7d6fb936999
|
aef1596709042f66a93883d67114b5b08f8f504f
|
refs/heads/master
| 2022-12-04T03:23:09.670440
| 2020-08-26T20:49:18
| 2020-08-26T20:49:18
| 287,374,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
from django.db import models
class Employee(models.Model):
name = models.CharField(max_length=120)
username = models.CharField(max_length=120)
password = models.CharField(max_length=120)
address = models.CharField(max_length=120)
email = models.CharField(max_length=120)
def __str__(self):
return self.name
|
[
"danliu277@gmail.com"
] |
danliu277@gmail.com
|
db5af6c9ccb8290c2b3765b621537f4f20a2bf9b
|
fc85f6e336d4d5624af45d58e4d2b6a7b6edafaf
|
/image_diet/tests/test_commands.py
|
2948470b1b1960159a208d6f7081a138281c1f53
|
[
"MIT"
] |
permissive
|
ArabellaTech/django-image-diet
|
2d0cf77369035c2ebfe6685e2f4ffe347507e092
|
dcf904d89f65a5123509a0718ef3758ea5674579
|
refs/heads/master
| 2020-04-05T22:54:17.661895
| 2016-07-25T07:35:21
| 2016-07-25T07:35:21
| 31,337,651
| 1
| 1
| null | 2016-07-25T07:12:47
| 2015-02-25T21:52:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
import os
from os.path import join, dirname
from shutil import copyfile
from django.test import TestCase
from image_diet.management.commands import diet_images
TEST_DIR = join(dirname(__file__), 'test_files')
class DietCommandTest(TestCase):
def setUp(self):
image_path = join(TEST_DIR, 'stockholm.jpg')
self.nested_dir = join('dir1', 'dir2', 'dir3')
self.test_root_dir = join(TEST_DIR, 'dir1')
os.makedirs(join(TEST_DIR, self.nested_dir))
self.test_image_path = join(TEST_DIR, self.nested_dir, 'stockholm.jpg')
copyfile(image_path, self.test_image_path)
def tearDown(self):
os.remove(self.test_image_path)
os.chdir(TEST_DIR)
os.removedirs(self.nested_dir)
def test_diet_images(self):
old_size = os.stat(self.test_image_path).st_size
action = diet_images.Command()
action.handle(self.test_root_dir)
new_size = os.stat(self.test_image_path).st_size
self.assertTrue(new_size < old_size)
|
[
"markos@gaivo.net"
] |
markos@gaivo.net
|
d5f4573a4e213f3cc96fd81d923189be3a18f7b8
|
e7b5561944ca0cbec321110e17618815e4ff627c
|
/backend/app/migrations/garpix_notify/0002_auto_20210720_2244.py
|
9371a081124611fbdb04a0a27df63a7bec829035
|
[] |
no_license
|
AlexandrMikhailovich/cms_test3
|
3a6ac4be10ef7ae5bda2bfdaf2ff38ad9bc5c641
|
1579f853cc2c526f0fdaab9f14baf9659c23d178
|
refs/heads/master
| 2023-06-23T01:34:19.806972
| 2021-07-21T06:08:39
| 2021-07-21T06:08:39
| 388,012,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,345
|
py
|
# Generated by Django 3.1 on 2021-07-20 19:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0012_alter_user_first_name_max_length'),
('garpix_notify', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='notifyuserlistparticipant',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='user_lists', to=settings.AUTH_USER_MODEL, verbose_name='Пользователь (получатель)'),
),
migrations.AddField(
model_name='notifyuserlistparticipant',
name='user_list',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='participants', to='garpix_notify.notifyuserlist', verbose_name='Список пользователей для рассылки'),
),
migrations.AddField(
model_name='notifyuserlist',
name='user_groups',
field=models.ManyToManyField(blank=True, to='auth.Group', verbose_name='Группы пользователей'),
),
migrations.AddField(
model_name='notifytemplate',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='templates', to='garpix_notify.notifycategory', verbose_name='Категория'),
),
migrations.AddField(
model_name='notifytemplate',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Пользователь (получатель)'),
),
migrations.AddField(
model_name='notifytemplate',
name='user_lists',
field=models.ManyToManyField(blank=True, to='garpix_notify.NotifyUserList', verbose_name='Списки пользователей, которые получат копию уведомления'),
),
migrations.AddField(
model_name='notifyerrorlog',
name='notify',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='logs', to='garpix_notify.notify', verbose_name='Notify'),
),
migrations.AddField(
model_name='notify',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notifies', to='garpix_notify.notifycategory', verbose_name='Категория'),
),
migrations.AddField(
model_name='notify',
name='files',
field=models.ManyToManyField(to='garpix_notify.NotifyFile', verbose_name='Файлы'),
),
migrations.AddField(
model_name='notify',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='notifies', to=settings.AUTH_USER_MODEL, verbose_name='Пользователь (получатель)'),
),
]
|
[
"Alexandr1990@gitlab.com"
] |
Alexandr1990@gitlab.com
|
fb7d9981d74ca20b240ec0f878f8bdfe495d0c7a
|
92fd68a8a6ac6e3946e9515cba58a626339552e8
|
/machineLearning_1/ML-Model_example1_ch.19.py
|
716f7b49d3a8ae97472ee42a00808649fb20c377
|
[] |
no_license
|
dlichtb/python_ml_deepLearning
|
9dac116e4de1278598a420a33bdf089a4706d28e
|
f9d6d03b30b9c0e721d6cd3a4833488ca8bc0cff
|
refs/heads/master
| 2020-04-25T11:06:34.326040
| 2019-02-26T20:48:13
| 2019-02-26T20:48:13
| 172,733,989
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,031
|
py
|
#!/usr/bin/env python
import sys
import scipy
import numpy
import matplotlib
import pandas# ### Used for EXPLORATORY/DESCRIPTIVE/DATA-VIZUALIZATION statistics
import sklearn
print('Python: {}'.format(sys.version))
print('scipy: {}'.format(scipy.__version__))
print('numpy: {}'.format(numpy.__version__))
print('matplotlib: {}'.format(matplotlib.__version__))
print('pandas: {}'.format(pandas.__version__))
print('sklearn: {}'.format(sklearn.__version__))
print('##############################################################################')
print('')
### 1. LOAD DATA:
#########################
# 1.1: Import Library Modules/Functions/Objects
# Load libraries
from pandas import read_csv
from pandas.tools.plotting import scatter_matrix
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
# 1.2: Load Dataset
# Load dataset
filename = '____.csv'
names = ['', '', '', '']
dataset = read_csv(filename, names=names)
print('##############################################################################')
print('')
#################################################################################################
#################################################################################################
#################################################################################################
### 2. SUMMARIZE DATA:
##############################
# 2.1: Dimensions of the dataset
print('SHAPE(ROWS, COLUMNS):', dataset.shape)
# 2.2: Data-types of each attribute
set_option('display.max_rows', 500)
print('ATTRIBUTE DATA-TYPES:')
print(dataset.dtypes)
print('')
# 2.2: Peek at the data itself
set_option('display.width', 100)
print('HEAD(20):')
print(dataset.head(20))
print('')
# 2.3: Summarize ATTRIBUTE-DISTRIBUTION
# - Change precision to 3 places
set_option('precision', 3)
print(dataset.describe())
print('##############################################################################')
print('')
# 2.4: Breakdown of the data by the CLASS variable: Class Distribution
print(dataset.groupby(60).size())
# ##############################################################################
# ##############################################################################
# OR
# ##############################################################################
# ##############################################################################
# from pandas import read_csv
# filename = '____.csv'
# names = ['', '', '', '']
# data = read_csv(filename, names = names)
# class_counts = data.groupby('class').size()
# print(class_counts)
print('##############################################################################')
print('')
# 2.5: Statistical summary of all attributes Statistical Summary(Attribute-x) = Count, Mean, Std.Dev, Min.Value, 25th Percentile, 50th Percentile, 75th Percentile, Max.Value
print('STATISTICAL SUMMARY FOR EACH COLUMN/ATTRIBUTE:')#set_option('precision', 1)
print(dataset.describe())
print('')
# 2.6: Taking a look at the correlation between all of the numeric attributes
# CORRELATIONS
# Assess where 'LSTAT' has highest |%|-correlation to an output-variable
# set_option('precision', 2)
# print(dataset.corr(method = 'pearson'))
# ##############################################################################
# ##############################################################################
# OR
# ##############################################################################
# ##############################################################################
# PAIRWISE PEARSON CORRELATION:
# from pandas import read_csv
# from pandas import set_option
# filename = '____'
# names = ['', '', '', '', ''}# Attribute/Column Names
# data = read_csv(filename, names = names)
# set_option('display width', 100)
# set_option('precision', 3)
# correlations = data.corr(method = 'pearson')
# print(correlations)
#print('##############################################################################')
#print('')
#################################################################################################
#################################################################################################
#################################################################################################
### 3. DATA VISUALIZATION:
##################################
# 3.1: Univariate/Unimodal Plots
# i) Attribute-based HISTOGRAMS
# HISTOGRAM PLOT
#dataset.hist()
#plt.show()
dataset.hist(sharex = False, sharey = False, xlabelsie = 1, ylabelsize = 1)
pyplot.show()
# from matplotlib import pyplot
# from pandas import read_csv
# filename = '____.csv'
# names = ['', '', '', '']
# data = read_csv(filename, names = names)
# data.hist()
# pyplot.show()
print('##############################################################################')
print('')
# ii) Density-Plots to determine Attribute-Distributions
# Attribute-based DENSITY-PLOT Distributions
dataset.plot(kind = 'density', subplots = True, layout(8,8), sharex = False, legend = False, fontsize = 1)
pyplot.show()
# from matplotlib import pyplot
# from pandas import read_csv
# filename = '____.csv'
# names = ['', '', '', '']
# data = read_csv(filename, names = names)
# dataset.plot(kind = 'density', subplots = True, layout(3,3), sharex = False, legend = False, fontsize = 1)
# pyplot.show()
print('##############################################################################')
print('')
# iii) BOX & WHISKER PLOTS
dataset.plot(kind='box', subplots=True, layout=(8,8), sharex=False, sharey=False, fontsize = 1)
pyplot.show()
# from matplotlib import pyplot
# from pandas import read_csv
# filename = '____.csv'
# names = ['', '', '', '']
# data = read_csv(filename, names = names)
# dataset.plot(kind='box', subplots=True, layout=(3,3), sharex=False, sharey=False, fontsize = 1)
# pyplot.show()
print('##############################################################################')
print('')
##plt.show()
# 3.4: SKEW for UNIVARIATE-DISTRIBUTIONS
# Skew/Attribute
from pandas import read_csv
filename = '___'
names = ['', '', '', '']
data = read_csv(filename, names = names)
skew = data.skew()
print(skew)
print('##############################################################################')
print('')
# 3.2: Multivariate/Multimodal Plots: - Intersections between variables
# i) SCATTER-PLOT MATRIX
# - Represents relationship between 2-variables as a 2-Dimm-dot
# - A series/sequence of scatter-plots for multiple variable-pairs = Scatter-Plot Matrix
# from matplotlib import pyplot
# from pandas import read_csv
# import numpy
# filename = '___.csv'
# names = ['', '', '', '']
# data = read_csv(filename, names = names)
# scatter_matrix(dataset)
# pyplot.show()
scatter_matrix(dataset)
pyplot.show()
#print('##############################################################################')
#print('')
#################################################################################################
#################################################################################################
#################################################################################################
### 4. EVALUATING ALGORITHMS:
#####################################
# 4.1: Isolate VALIDATION/TESTING-Set
# a) Create VALIDATION/TESTING-Set
# SLIT-OUT (Validation / Testing set)
array = dataset.values
X = array[:,0:4]
Y = array[:,4]
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = train_test_split(X, Y, test_size = validation_size, random_state = seed)
# 4.2: Configure TEST-HARNESS to use K(10)-FOLD CROSS-VALIDATION on ML-Models
# [a) Build ML-Models] >> [b) Build 5 ML-Models: Predicting species from Flower-Measurements/Attributes] >> [c) Select best ML-Model]
# SPOT-CHECK ML-Models/Algorithms
models = []
models.append(( ' LR ' , LogisticRegression()))
models.append(( ' LDA ' , LinearDiscriminantAnalysis()))
models.append(( ' KNN ' , KNeighborsClassifier()))
models.append(( ' CART ' , DecisionTreeClassifier()))
models.append(( ' NB ' , GaussianNB()))
models.append(( ' SVM ' , SVC()))
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = KFold(n_splits=10, random_state=seed)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring= ' accuracy ' )
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
# output results to evaluate and select best ML-Model/Algorithm
print(msg)
print('##############################################################################')
print('')
# 5: COMPARE ALGORITHMS:
##########################################
# Compare Algorithms
fig = pyplot.figure()
fig.suptitle( ' Algorithm Comparison ' )
ax = fig.add_subplot(111)
pyplot.boxplot(results)
ax.set_xticklabels(names)
pyplot.show()
print('##############################################################################')
print('')
# 5: MAKE PREDICTIONS:
########################################
# Make predictions on validation dataset
knn = KNeighborsClassifier()
knn.fit(X_train, Y_train)
predictions = knn.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
print('##############################################################################')
print('')
|
[
"noreply@github.com"
] |
dlichtb.noreply@github.com
|
8e6db07c7045df813af567ff7094d94f80b3b8c6
|
fb6482e5d6c6f93b6d04165048e32ba47ab0f605
|
/CSCI 127/Labs/lab12.py
|
ba610d51fe0d1d80d44ec6d31c607ae79398ced5
|
[] |
no_license
|
Risauce/Pre2015Code
|
3a5a13bc38769708b151b9a790cf7ccfc0251574
|
fc0e641e7bebbaeec8202550ece880b98b48c1fc
|
refs/heads/master
| 2020-08-28T09:33:18.171859
| 2019-10-26T05:54:24
| 2019-10-26T05:54:24
| 217,662,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,931
|
py
|
import numpy as np
import matplotlib.pyplot as plt
# -----------------------------------------------------
# CSCI 127, Lab 12
# November 21, 2017
# Your Name
# -----------------------------------------------------
def read_file(name):
input_file = open(name, "r")
number_buckets = int(input_file.readline())
total_counties = int(input_file.readline())
county_populations = np.zeros([total_counties], dtype="int")
for county_number in range(total_counties):
line = input_file.readline().split(",")
county_populations[county_number] = int(line[1])
county_populations.sort()
input_file.close()
return number_buckets, county_populations
# -----------------------------------------------------
def print_summary(averages):
print("Population Grouping Summary")
print("---------------------------")
for grouping in range(len(averages)):
print("Grouping", grouping + 1, "has a population average of",
averages[grouping])
# -----------------------------------------------------
# Do not change anything above this line
# -----------------------------------------------------
def calculate_averages(number_buckets, county_populations):
numberOfSplit = len(county_populations) / number_buckets
for i in range(number_buckets):
if i == 0:
average = np.average(county_populations[0:numberOfSplit])
print(average)
else:
print("none")
# -----------------------------------------------------
def graph_summary(averages):
pass
# -----------------------------------------------------
number_buckets, county_populations = read_file("montana-counties.txt")
averages = calculate_averages(number_buckets, county_populations)
print_summary(averages)
graph_summary(averages)
|
[
"noreply@github.com"
] |
Risauce.noreply@github.com
|
a3999bae68200d01e0d1c7cfcc0ba9cd188bd945
|
9d8bd40159f119cea1c2c3fd86743c5bc1d9907c
|
/examples/spatial_filter.py
|
45583d7527bc32d44ff66ff560834c812c30a7f8
|
[
"BSD-3-Clause"
] |
permissive
|
mihaieduard/Diffraction-Simulations--Angular-Spectrum-Method
|
70757b315d06de40c7914184b4015e53a5f3dd1f
|
4ec7abcc1a726c5e7b65d05455cab2467fdca9cf
|
refs/heads/main
| 2023-03-23T16:46:52.097425
| 2021-03-19T18:36:12
| 2021-03-19T18:36:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
from diffractsim import MonochromaticField, nm, mm, cm
F = MonochromaticField(
wavelength=632.8 * nm, extent_x=25. * mm, extent_y=25. * mm, Nx=2000, Ny=2000,power = 0.1
)
F.add_gaussian_beam(0.7*mm)
F.add_spatial_noise(noise_radius = 2.2*mm, f_mean = 1/(0.2*mm), f_size = 1/(0.5*mm), A = 0.2, N= 50)
F.add_lens(f = 50*cm)
F.propagate(50*cm)
F.add_circular_slit( 0, 0, 0.28*mm)
F.propagate(50*cm)
F.add_lens(f = 50*cm)
F.propagate(30*cm)
rgb = F.get_colors()
F.plot(rgb, xlim=[-2.5,2.5], ylim=[-2.5,2.5])
|
[
"rafael.fuente.herrezuelo@gmail.com"
] |
rafael.fuente.herrezuelo@gmail.com
|
05fc046d63ad0da119f177a76e959f80d9d8f37b
|
d184d1fc998a300feee2d716d97209b9fbc78468
|
/probability.py
|
dbeb07713ae4103f2e739fabfa5eb51dd35d80c9
|
[] |
no_license
|
MickeyKen/plot_node_master_thesis
|
df196d7a037b1960c1ee95268a1ae3b1e8f24148
|
5182ea79cb8cfbc6bead60d97eda9307f7e53c10
|
refs/heads/master
| 2023-02-16T21:17:49.284973
| 2021-01-19T09:19:40
| 2021-01-19T09:19:40
| 330,574,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,316
|
py
|
#!/usr/bin/python
import matplotlib.pyplot as plt
path = 'data/param_UD-v95_output.txt'
isServiceCount = True
ACTOR_NUM = 3
AVERAGE_NUM = 100
LIMIT = 5000
if __name__ == '__main__':
collision = [[] for j in range(ACTOR_NUM)]
average_collision = []
success = [[] for j in range(ACTOR_NUM)]
average_success = []
no_action = [[] for j in range(ACTOR_NUM)]
average_no_action = []
eps = []
average_eps = []
epsilons = [[] for j in range(ACTOR_NUM)]
flag = 0
count = 0
fig = plt.figure(figsize=(8.27,3.9), dpi=100)
plt.ion()
plt.xlabel('Episode')
# plt.ylabel('P')
plt.grid()
cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
with open(path) as f:
for s_line in f:
eps_num = int(s_line.split(',')[0])
actor_num = int(s_line.split(',')[1])
step = int(s_line.split(',')[3])
reward = float(s_line.split(',')[5])
if step < 150 and reward < -200:
collision[actor_num].append(1.0)
success[actor_num].append(0.0)
no_action[actor_num].append(0.0)
elif step < 150 and reward > 0:
collision[actor_num].append(0.0)
success[actor_num].append(1.0)
no_action[actor_num].append(0.0)
else:
collision[actor_num].append(0.0)
success[actor_num].append(0.0)
no_action[actor_num].append(1.0)
collision_sum = 0.0
success_sum = 0.0
no_action_sum = 0.0
average_collision_sum = 0.0
average_success_sum = 0.0
average_no_action_sum = 0.0
count = 1
for index in range(min(len(v) for v in collision)):
collision_sum = 0.0
success_sum = 0.0
no_action_sum = 0.0
if index <= LIMIT:
for n in range(ACTOR_NUM):
collision_sum += collision[n][index]
success_sum += success[n][index]
no_action_sum += no_action[n][index]
average_collision_sum += collision_sum / float(ACTOR_NUM)
average_success_sum += success_sum / float(ACTOR_NUM)
average_no_action_sum += no_action_sum / float(ACTOR_NUM)
if index % AVERAGE_NUM == 0 and index > 0:
average_eps.append(count*AVERAGE_NUM)
average_collision.append(average_collision_sum / float(AVERAGE_NUM))
average_success.append(average_success_sum / float(AVERAGE_NUM))
average_no_action.append(average_no_action_sum / float(AVERAGE_NUM))
average_collision_sum = 0.0
average_success_sum = 0.0
average_no_action_sum = 0.0
count += 1
eps.append(index + 1)
plt.plot(average_eps, average_success, color='#e41a1c', label="success")
plt.plot(average_eps, average_collision, color='#00529a', label="collision")
plt.plot(average_eps, average_no_action, color='#3FBF00', label="past 150 steps")
plt.legend( loc='upper left', borderaxespad=1)
plt.draw()
fig.savefig("result_multi_probability.png")
plt.pause(0)
|
[
"mickey333ff@gmail.com"
] |
mickey333ff@gmail.com
|
145e5904cf2bc4e6e47030788b2461978b486ece
|
6318f1458f9c6cca91cb00aa415638a599d8ba26
|
/arcade/python/arcade-theCore/11_SpringOfIntegration/091_Combs.py
|
ec81b4e9bfbc202b226d08d5d49310be3d66ef37
|
[
"MIT"
] |
permissive
|
netor27/codefights-solutions
|
836016a048086cd2bc644b2c40b7686102b6f179
|
69701ab06d45902c79ec9221137f90b75969d8c8
|
refs/heads/master
| 2021-10-28T13:04:42.940059
| 2019-01-16T23:12:08
| 2019-01-16T23:12:08
| 110,753,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,377
|
py
|
'''
Miss X has only two combs in her possession, both of which are old and miss a tooth or two. She also has many purses of different length, in which she carries the combs. The only way they fit is horizontally and without overlapping. Given teeth' positions on both combs, find the minimum length of the purse she needs to take them with her.
It is guaranteed that there is at least one tooth at each end of the comb.
It is also guaranteed that the total length of two strings is smaller than 32.
Note, that the combs can not be rotated/reversed.
Example
For comb1 = "*..*" and comb2 = "*.*", the output should be
combs(comb1, comb2) = 5.
Although it is possible to place the combs like on the first picture, the best way to do this is either picture 2 or picture 3.
'''
def combs(comb1, comb2):
n1, n2 = len(comb1), len(comb2)
res = n1 + n2
m1, m2 = mask(comb1), mask(comb2)
for i in range(n1 + 1):
if (m2 << i) & m1 == 0:
temp = max(n2 + i, n1)
if temp < res:
res = temp
for i in range(n2 + 1):
if (m1 << i) & m2 == 0:
temp = max(n1 + i, n2)
if temp < res:
res = temp
return res
def mask(s):
r = 0
for c in s:
digit = 0
if c == '*':
digit = 1
r = (r << 1) + digit
return r
|
[
"neto.r27@gmail.com"
] |
neto.r27@gmail.com
|
466524b45d53aaa85ccff0695a52ed8c641b06bf
|
8a0297dbf9b90f001077ba487f6d7c9263e1242b
|
/setup.py
|
0205937a6520d9311cb7270c3353578328a97bbe
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
shea256/pybitcointools
|
b2173fe2735229380384460315f00185e3310d3c
|
f7223208e5ce260f27a447fcef22a98957e938c2
|
refs/heads/master
| 2021-01-17T11:52:18.686497
| 2013-12-10T02:13:02
| 2013-12-10T02:13:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='pybitcointools',
version='1.0',
description='Python Bitcoin Tools',
author='Vitalik Buterin',
author_email='vbuterin@gmail.com',
url='http://github.com/vbuterin/pybitcointools',
packages=['pybitcointools'],
scripts=['pybtctool']
)
|
[
"vub@gmail.com"
] |
vub@gmail.com
|
9f9baa8b7fa3b27ad62e12bd7f6621fcb8b83ba6
|
9bbf1cb7b0cd2444f2830efb696640ad42a2bfd4
|
/python/__init__.py
|
611fda76681e56175d3830d41a6a4dd31dbb5d14
|
[] |
no_license
|
idaohang/KalmanAnalyzer
|
aad4dfd209c1c160a5cdd8258d5ee77a01bfc769
|
12cdfc1f8ff480c2a2b8a5ca795eb982e1936ed9
|
refs/heads/master
| 2021-01-22T08:19:11.261942
| 2014-11-17T13:25:14
| 2014-11-17T13:25:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
#Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/UserCode/KalmanAnalyzer/',1)[0])+'/cfipython/slc6_amd64_gcc472/UserCode/KalmanAnalyzer')
|
[
"e.bouvier@ipnl.in2p3.fr"
] |
e.bouvier@ipnl.in2p3.fr
|
b31250d3654352faa232e299e85343692965b7ff
|
1951c50108892a1b89777749dd951cf49a4361ae
|
/blog/__init__.py
|
696460cf82c714d5d021fca0e6d398d58b4308b0
|
[] |
no_license
|
bluewhale1207/myblog
|
3e04c7b4a3d598d52890624a361b16cc752250d9
|
e8351cf68b36dfdbd8290cffaaa0915fc182a1b7
|
refs/heads/master
| 2016-09-05T16:16:30.627316
| 2015-11-16T11:25:07
| 2015-11-16T11:25:07
| 31,940,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 692
|
py
|
# -*- coding: utf-8 -*-
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from blog.models.model_user import User
app = Flask(__name__)
app.config.from_object(os.environ['BLOG_SETTINGS'])
# 登录
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = '.user_login_required'
login_manager.login_message = u'请登录'
db = SQLAlchemy()
db.init_app(app)
@login_manager.user_loader
def load_user(username):
return User.query.filter_by(name=username).first()
from blog.views import general
from blog.views import user
app.register_blueprint(general.mod)
app.register_blueprint(user.mod)
|
[
"liushujie@papayamobile.com"
] |
liushujie@papayamobile.com
|
b2a3440874508491785688101a4108cfd7f6edcc
|
5d2ad10a424fd71cc2f12e1ca78d278362435c3b
|
/Day4/newPkg/src/myPkg/scripts/comp_sub.py
|
4617429fc49187b0c83bde193dc654ee8ec0815b
|
[] |
no_license
|
IsraaFahmy/ROS-training-
|
ec7034f55692c062ed42aa2cd9a63d9201db67e8
|
61924149a170292b9e7b049cfd704ed807c01e9a
|
refs/heads/master
| 2022-12-03T00:51:11.341733
| 2020-08-23T22:06:41
| 2020-08-23T22:06:41
| 287,059,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
#!/usr/bin/env python
import rospy
from myPkg.msg import comp
def callback(message):
rospy.loginfo("complex number recieved: %d + %d i", message.real,message.imaginary)
rospy.init_node('comp_node2', anonymous=True)
rospy.Subscriber("comp_topic", comp, callback)
rospy.spin()
|
[
"israafahmy@aucegypt.edu"
] |
israafahmy@aucegypt.edu
|
2baa1ddb9774e7deee027fc888daf5f6d3280f5e
|
a63bd0c2b9ce527a163ebc7f326316dc1d8c50b0
|
/tests/fileSizeTest.py
|
b4346abaa5df91e031f403931282400d1a85f57c
|
[] |
no_license
|
jan-polaczek/isodBot-demo
|
902c20d49ffce225736f82a696fef69e914a7c44
|
e963b1835f6706f526249f83237223557ef27f02
|
refs/heads/master
| 2020-06-04T16:49:33.686587
| 2019-06-15T18:20:45
| 2019-06-15T18:20:45
| 192,110,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,433
|
py
|
import os,sys,inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
from Registration import *
def run():
errorMessages = {}
number_of_test = 5
counter = 0
if Registration.fileSize('anonek.tst') == 1:
errorMessages['anonek'] = 'test: podano tylko login'
elif Registration.fileSize('anonek.tst') == 2:
errorMessages['anonek'] = 'test: podano login i hasło'
elif Registration.fileSize('anonek.tst') > 2:
errorMessages['anonek'] = 'test: za dużo linijek w pliku'
else:
#errorMessages['anonek'] = 'test: plik nie istnieje'
counter += 1
if Registration.fileSize('balasm.tst') == 1:
errorMessages['balasm'] = 'test: podano tylko login'
elif Registration.fileSize('balasm.tst') == 2:
errorMessages['balasm'] = 'test: podano login i hasło'
elif Registration.fileSize('balasm.tst') > 2:
#errorMessages['balasm'] = 'test: za dużo linijek w pliku'
counter += 1
else:
errorMessages['balasm'] = 'test: plik nie istnieje'
if Registration.fileSize('boguszj.tst') == 1:
errorMessages['boguszj'] = 'test: podano tylko login'
elif Registration.fileSize('boguszj.tst') == 2:
errorMessages['boguszj'] = 'test: podano login i hasło'
elif Registration.fileSize('boguszj.tst') > 2:
#errorMessages['boguszj'] = 'test: za dużo linijek w pliku'
counter += 1
else:
errorMessages['boguszj'] = 'test: plik nie istnieje'
if Registration.fileSize('polaczej.tst') == 1:
#errorMessages['polaczej'] = 'test: podano tylko login'
counter += 1
elif Registration.fileSize('polaczej.tst') == 2:
errorMessages['polaczej'] = 'test: podano login i hasło'
elif Registration.fileSize('polaczej.tst') > 2:
errorMessages['polaczej'] = 'test: za dużo linijek w pliku'
else:
errorMessages['polaczej'] = 'test: plik nie istnieje'
if Registration.fileSize('ktokolwiek.tst') == 1:
errorMessages['ktokolwiek'] = 'test: podano tylko login'
elif Registration.fileSize('ktokolwiek.tst') == 2:
errorMessages['ktokolwiek'] = 'test: podano login i hasło'
elif Registration.fileSize('ktokolwiek.tst') > 2:
errorMessages['ktokolwiek'] = 'test: za dużo linijek w pliku'
else:
#errorMessages['ktokolwiek'] = 'test: plik nie istnieje'
counter += 1
errorMessages['ilość testów'] = number_of_test
errorMessages['ilość testów zaliczonych'] = counter
return errorMessages
|
[
"jan.polaczek@interia.pl"
] |
jan.polaczek@interia.pl
|
92167807a2c7f1bf7c9b2fa2d8c101cf3984620c
|
2f1a2a175bd3b6ef646b6329169dda18127e34b2
|
/todoclass/urls.py
|
2397da1dd7a14e1820678879959d92ca02b8dd8b
|
[] |
no_license
|
Alisher007/todomain
|
13608ca796a47b69f86ca709c1fafd2b424978b4
|
cdd08a21f2bdd0b80bf2f6ae2ebc6825ed760869
|
refs/heads/master
| 2022-12-14T04:50:09.869081
| 2020-09-11T11:47:00
| 2020-09-11T11:47:00
| 294,675,849
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
from django.contrib import admin
from django.urls import path
from .views import TodoListView, TodoDetailView, TodoCreateView, TodoUpdateView, TodoDeleteView
app_name = 'todoclass'
urlpatterns = [
path('', TodoListView.as_view(), name='list'),
path('create/', TodoCreateView.as_view(), name='create'),
path('<int:pk>/', TodoDetailView.as_view(), name='detail'),
path('<int:pk>/update/', TodoUpdateView.as_view(), name='update'),
path('<int:pk>/delete/', TodoDeleteView.as_view(), name='delete'),
]
|
[
"alisher.khalikulov@jaresorts.com"
] |
alisher.khalikulov@jaresorts.com
|
261f0667d897b235c0624553e90745571f971418
|
43e8e14e9ffa1a85d4383334d1e9bd0a041131fb
|
/setup.py
|
5fc4d7d525f17172d05a6ce9bc7d98c975aaafbf
|
[
"BSD-3-Clause"
] |
permissive
|
Python3pkg/PandaRSS
|
a952d24762ceec0e65a44859590a6e9e49ae49fb
|
8e8727744c8a876b314879193ae01422831a76dd
|
refs/heads/master
| 2021-01-21T17:38:48.551614
| 2017-05-21T17:48:59
| 2017-05-21T17:48:59
| 91,976,509
| 0
| 0
| null | 2017-05-21T17:48:57
| 2017-05-21T17:48:57
| null |
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
#!/usr/bin/python
from setuptools import setup, find_packages
import pandarss
install_requires = [
'Twisted>=15.0.0',
'bottle>=0.12.7'
]
package_data={
'pandarss': [
'views/css/*',
'views/js/*',
'views/*.html'
]
}
setup(name='pandarss',
version='0.2',
author='pandaman',
author_email='pandaman1999@foxmail.com',
url='https://github.com/PandaPark/PandaRSS',
license='BSD',
description='ToughRADIUS Self-service Portal',
long_description=open('README.md').read(),
classifiers=[
'Development Status :: 6 - Mature',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Systems Administration :: Authentication/Directory',
],
packages=find_packages(),
package_data=package_data,
keywords=['radius','toughradius','self-service ','pandarss'],
zip_safe=True,
include_package_data=True,
eager_resources=['pandarss'],
install_requires=install_requires,
entry_points={
'console_scripts': [
'pandarss = pandarss.pandarss:main',
'pandarss_txrun = pandarss.pandarss:txrun',
]
}
)
|
[
"pandaman1999@foxmail.com"
] |
pandaman1999@foxmail.com
|
add2368027110c4b923645c5840c3a6f70084c32
|
a354f18367975097f0b19de816e763425e31f599
|
/lists/admin.py
|
ea9e3c8fc9bfcb1226d740be04827446a4be89a3
|
[] |
no_license
|
egibney/listapp
|
56f02c4a3311059aca0c73933241bff4d01f177a
|
f15875c304ff622985eb2dad7b8a20cc4def8b3f
|
refs/heads/master
| 2021-01-22T17:47:59.312735
| 2012-09-14T03:04:55
| 2012-09-14T03:04:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
from lists.models import List
from lists.models import Item
from django.contrib import admin
admin.site.register(List)
admin.site.register(Item)
|
[
"epgibney@gmail.com"
] |
epgibney@gmail.com
|
85e36f729f2df6e0df523fa8d40795f65a763c64
|
44576656e6be64c8a8f6823f989ecaae1ffc32c8
|
/blog/migrations/0007_auto_20200225_2025.py
|
456bf4d0c25b73c63c0ce130bb7946c4d9bdca5f
|
[] |
no_license
|
Rainysponge/mysite
|
ecbaf5d08a0b4863894e9037af82d4c7b18818a7
|
4ee8aff0c5b90a91041853cea0a14d2a3d063144
|
refs/heads/master
| 2021-01-26T01:56:21.129558
| 2020-02-26T13:37:10
| 2020-02-26T13:37:10
| 243,265,221
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
# Generated by Django 2.1.15 on 2020-02-25 20:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0006_auto_20200212_1741'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='content',
field=models.TextField(),
),
]
|
[
"375364412@qq.com"
] |
375364412@qq.com
|
9041c058921688f8d5835b092cb95e45d74fffcf
|
042f3881f11f9fc7f7d70aa8d7822c40f21c8fd0
|
/crankycoin/__init__.py
|
5526dd401e47ec089c0926d4f6b50b5e590db7c3
|
[
"MIT"
] |
permissive
|
benthomasson/crankycoin
|
1e3801c06a1e9e217de0a171f2b6d5f6926d2446
|
37dc3048cef9b17745da0d21b0c9095a081a87a0
|
refs/heads/master
| 2021-01-01T15:21:37.481370
| 2017-07-17T09:00:04
| 2017-07-17T09:00:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
from block import *
from blockchain import *
from node import *
from wallet import *
from errors import *
|
[
"cranklin@gmail.com"
] |
cranklin@gmail.com
|
a5e6bca6ad202cb0a10c51af6c42e62ce5c65b3c
|
a113ca707799131092e5e5ad9feb71e69c3659e7
|
/Employee_project/urls.py
|
7816a4acebf308e0c7be98e0f351ad69e90bb4b0
|
[] |
no_license
|
thuytran-team/Employee_Project
|
648680e9a1fb9ab7827ae17d21b569b05e007ccc
|
6bf588b5d294da12a5cfb380b062203bfd68f9e2
|
refs/heads/master
| 2022-12-17T06:53:39.358047
| 2020-09-12T22:55:12
| 2020-09-12T22:55:12
| 295,039,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 823
|
py
|
"""Employee_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('employee/',include('employee_register.urls'))
]
|
[
"thuytran898@gmail.com"
] |
thuytran898@gmail.com
|
2d2699072eae36c651fe088d627f69f90b657d58
|
b39ec77a8f5a5779edcecf5a09c39224472fd139
|
/Clase03/contar.py
|
0b60d0a7381ff2765efd0891e4b3ce879ffe2a47
|
[] |
no_license
|
GonzaloMonteodorisio/ejercicios-python-unsam
|
76b6288491ccba8f44b819c26bed4811268e995e
|
37ba16197107717a4c582eb552175e1c981c286b
|
refs/heads/main
| 2023-07-28T07:18:10.178029
| 2021-09-15T05:42:46
| 2021-09-15T05:42:46
| 406,627,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 56
|
py
|
for n in range(10,0,-1):
print(n, end=' ')
|
[
"gonzalomonteodorisio@gmail.com"
] |
gonzalomonteodorisio@gmail.com
|
f723cbdc7b1832a6b9940919ccfcb541b77cc299
|
a6b010255c544b51edef707fa675a2f2f120c159
|
/_site/lib/ml_level.py
|
4e13a8dd97eb2bbb2361ca372f122f4491c5faa6
|
[] |
no_license
|
StonyBrookNLP/irene
|
f30d3dcdc5b336f4816c37017d6cbfd9d4eb80a5
|
54862c65f65bd4eb302344e110401d45c36af49c
|
refs/heads/master
| 2023-07-18T03:54:40.400352
| 2021-09-08T22:01:28
| 2021-09-08T22:01:28
| 371,047,659
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,211
|
py
|
import argparse
from typing import Tuple, List, Dict
import json
import copy
import pickle # may be change to dill?
from collections import defaultdict
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn import linear_model
from lib.tree_node import TreeNode
np.random.seed(13370)
def train_linear_regressor(features: np.array, ground_truths: np.array) -> Tuple:
"""
Scales data and trains a simple linear regressor.
"""
regressor = linear_model.LinearRegression()
scale_standardizer = StandardScaler().fit(features)
features = scale_standardizer.transform(features)
transformations = [scale_standardizer]
regressor = regressor.fit(features, ground_truths)
return regressor, transformations
def predict_linear_regressor(
regressor, transformations: List, features: np.array, ids: List[str]
) -> Dict[str, float]:
"""
Generates prediction using trained regressor on the passed features
and returns a dictionary of id to predictions.
"""
for transformation in transformations:
features = transformation.transform(features)
predicted_outputs = regressor.predict(features)
id_to_predicted_values = {
id_: pred for id_, pred in zip(list(ids), list(predicted_outputs))
}
return id_to_predicted_values
def train_ml_level_models(train_trees: List[TreeNode]) -> Tuple[Dict, Dict]:
"""
Trains ML-level regressor on the leaf nodes of training trees and outputs
trained regressor and scalars.
"""
operationwise_ml_level_instances = defaultdict(list)
for tree in train_trees:
for operation_type, ml_level_instances in tree.get_ml_level_data().items():
operationwise_ml_level_instances[operation_type].extend(ml_level_instances)
operationwise_ml_level_model = {}
operationwise_ml_level_transformations = {}
for operation_type, ml_level_instances in operationwise_ml_level_instances.items():
features = np.stack(
[np.array(instance["features"]) for instance in ml_level_instances], axis=0
)
ground_truths = np.array(
[instance["gold_energy"] for instance in ml_level_instances]
)
regressor, transformations = train_linear_regressor(
features=features, ground_truths=ground_truths
)
operationwise_ml_level_model[operation_type] = regressor
operationwise_ml_level_transformations[operation_type] = transformations
return operationwise_ml_level_model, operationwise_ml_level_transformations
def predict_ml_level_models(
operationwise_ml_level_model: Dict,
operationwise_ml_level_transformations: Dict,
predict_trees: List[TreeNode],
) -> List[TreeNode]:
"""
Runs regressor on the leaf/ml-level nodes of the predic_trees and saves
the predicted_energy field into it. Returns predicted_energy annotated trees.
"""
assert set(operationwise_ml_level_model.keys()) == set(
operationwise_ml_level_transformations.keys()
)
predict_trees = copy.deepcopy(predict_trees)
for predict_tree in predict_trees:
operationwise_ml_level_instances = predict_tree.get_ml_level_data()
for (
operation_type,
ml_level_instances,
) in operationwise_ml_level_instances.items():
if operation_type not in operationwise_ml_level_model:
raise Exception(
f"Given model isn't trained on operation_type {operation_type}"
)
regressor = operationwise_ml_level_model[operation_type]
transformations = operationwise_ml_level_transformations[operation_type]
features = np.stack(
[np.array(instance["features"]) for instance in ml_level_instances],
axis=0,
)
ids = [instance["id"] for instance in ml_level_instances]
id_to_predicted_values = predict_linear_regressor(
regressor, transformations, features, ids
)
predict_tree.update_tree_node_attributes(
"predicted_energy", id_to_predicted_values
)
return predict_trees
|
[
"yklal95@gmail.com"
] |
yklal95@gmail.com
|
cbae2ad04a7ab972f74b8a1132069e8c30ab885a
|
dac5f6f1314fa1b2cc19ccc5e3f6ba35dcb04672
|
/Loop_for.py
|
2be2ca49cbe2584c3de74e2729b3d964c1b88a72
|
[] |
no_license
|
mohamedasa2019/PYTHON-ALL-CODE
|
f0942f3c37847dc301a4e873efdfa279dfa175f0
|
5ab782662c0c4489130b841cc0d953b5ef485bf5
|
refs/heads/master
| 2020-04-30T09:40:20.204921
| 2019-03-20T15:08:01
| 2019-03-20T15:08:01
| 176,754,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
def main():
L=[1,3,4.5,"hi"] # تستخدم for loop لطباعه كل عنصر قي سطر في
for item in L:
print(item)
if __name__ == '__main__': main()
|
[
"noreply@github.com"
] |
mohamedasa2019.noreply@github.com
|
3c70c973d79447bece9afe2d49c5fd583a4173dd
|
4bfe4afd1b1e11f9a03d8e3640aa297c875c076d
|
/demos/basic.py
|
9a86954581726ae9f13bad67294d6355e90d696a
|
[] |
no_license
|
pankajti/capstone
|
81cdd2187e71e8d1bf327579b574ea7cf91a7e76
|
af57a52d34dbcdd40e8e81f1d72c142263a98893
|
refs/heads/master
| 2021-03-02T09:49:51.054153
| 2020-07-09T02:28:58
| 2020-07-09T02:28:58
| 245,857,468
| 0
| 0
| null | 2020-03-22T00:54:01
| 2020-03-08T17:26:43
| null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
from tensorflow.keras.layers import Dense,SimpleRNN
from tensorflow.keras import Sequential
import numpy as np
from tensorflow.keras.utils import plot_model
model =Sequential()
model.add(Dense(2))
model.add(Dense(1))
plot_model(model)
|
[
"pankaj.tiwari2@gmail.com"
] |
pankaj.tiwari2@gmail.com
|
237ae19dcd1861ce4f5ee4f9d3bcf53f20e82e1f
|
ca152095b72ce93b6ca79042084f5ef70c658576
|
/search_hparam.py
|
ae2d717cf0cdbbb6e9de860e789f8fc2287a1fcc
|
[] |
no_license
|
fl16180/SeqModel
|
72806eca1ec21b564262f8d444366a984ede7c64
|
3bba92bc23d0fef55a479f18e731c50e1feed186
|
refs/heads/master
| 2020-12-14T16:49:13.468564
| 2020-05-12T00:24:17
| 2020-05-12T00:24:17
| 234,813,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,639
|
py
|
import argparse
import numpy as np
# hide sklearn deprecation message triggered within skorch
from warnings import simplefilter
simplefilter('ignore', category=FutureWarning)
import torch
from skorch import NeuralNetClassifier
from skorch.callbacks import LRScheduler
from skorch.callbacks import EpochScoring
from sklearn.metrics import plot_roc_curve, roc_auc_score
from sklearn.metrics import plot_precision_recall_curve, average_precision_score
from constants import *
from datasets import *
import models
from utils.model_utils import *
from utils.data_utils import get_roadmap_col_order
DATA_CHOICES = ['mpra', 'mpra+scores', 'neighbor', 'neighbor+scores']
MODEL_CFG = {
'mpra': None,
'mpra+scores': None,
'neighbor': None,
'neighbor+scores': None
}
def fit_model(args):
torch.manual_seed(1000)
print(f'Fitting model for {args.data}:')
project = args.project
auc = EpochScoring(scoring='roc_auc', lower_is_better=False)
apr = EpochScoring(scoring='average_precision', lower_is_better=False)
if args.data == 'mpra':
train_df = load_train_set(project, datasets=['roadmap'])
proc = Processor(project)
train_df = proc.fit_transform(train_df, na_thresh=0.05)
proc.save(args.data)
X_train = train_df.drop(['chr', 'pos', 'Label'], axis=1) \
.values \
.astype(np.float32)
y_train = train_df['Label'].values.astype(np.int64)
net = NeuralNetClassifier(
models.MpraDense,
batch_size=256,
optimizer=torch.optim.Adam,
optimizer__weight_decay=2e-6,
lr=1e-4,
max_epochs=20,
module__n_input=1016,
module__n_units=(400, 250),
module__dropout=0.3,
callbacks=[auc, apr],
iterator_train__shuffle=True,
train_split=None
)
elif args.data == 'mpra+scores':
train_df = load_train_set(project, datasets=['roadmap', 'eigen', 'regbase'])
proc = Processor(project)
train_df = proc.fit_transform(train_df, na_thresh=0.05)
proc.save(args.data)
X_train = train_df.drop(['chr', 'pos', 'Label'], axis=1) \
.values \
.astype(np.float32)
y_train = train_df['Label'].values.astype(np.int64)
net = NeuralNetClassifier(
models.MpraDense,
batch_size=256,
optimizer=torch.optim.Adam,
optimizer__weight_decay=2e-6,
lr=1e-4,
max_epochs=20,
module__n_input=1079,
module__n_units=(400, 250),
module__dropout=0.3,
callbacks=[auc, apr],
iterator_train__shuffle=True,
train_split=None
)
elif args.data == 'neighbor':
X_train = load_train_neighbors(project).astype(np.float32)
tmp = load_train_set(project, datasets=['roadmap', 'eigen', 'regbase'],
make_new=False)
y_train = tmp['Label'].values.astype(np.int64)
assert X_train.shape[0] == y_train.shape[0]
net = NeuralNetClassifier(
models.MpraCNN,
batch_size=256,
optimizer=torch.optim.Adam,
optimizer__weight_decay=1e-4,
lr=5e-4,
max_epochs=20,
callbacks=[auc, apr],
iterator_train__shuffle=True
)
elif args.data == 'neighbor+scores':
print('\tLoading neighbors')
X_neighbor = load_train_neighbors(project).astype(np.float32)
print('\tLoading scores')
train_df = load_train_set(project, datasets=['roadmap', 'eigen', 'regbase'])
proc = Processor(project)
train_df = proc.fit_transform(train_df, na_thresh=0.05)
proc.save(args.data)
print('\tArranging data')
rm_cols = [f'{x}-E116' for x in ROADMAP_MARKERS]
# rm_cols = [x for x in get_roadmap_col_order(order='marker') if 'E116' in x]
X_score = train_df.drop(['chr', 'pos', 'Label'] + rm_cols, axis=1) \
.values \
.astype(np.float32)
y_train = train_df['Label'].values.astype(np.int64)
X_train = (X_neighbor, X_score)
net = NeuralNetClassifier(
models.MpraFullCNN,
batch_size=256,
optimizer=torch.optim.Adam,
optimizer__weight_decay=0,
lr=5e-4,
max_epochs=20,
callbacks=[auc, apr],
iterator_train__shuffle=True
)
# import sys; sys.exit()
net.fit(X_train, y_train)
class_pred = net.predict(X_train)
score_pred = net.predict_proba(X_train)
print('\tAUROC: ', roc_auc_score(y_train, score_pred[:, 1]))
print('\tAUPR: ', average_precision_score(y_train, score_pred[:, 1]))
save_model(net, project, args.data)
def evaluate_model(args):
print(f"Evaluating model for {args.data}:")
project = args.project
net = load_model(project, args.data)
X_test = load_test_neighbors(project)
X_test = X_test.astype(np.float32)
tmp = load_test_set(project, datasets=['roadmap', 'eigen', 'regbase'])
y_test = tmp['Label'].values.astype(np.int64)
# test_df = load_test_set(project, datasets=['roadmap', 'eigen', 'roadmap'])
# proc = Processor(project)
# proc.load(args.data)
# test_df = proc.transform(test_df)
# X_test = test_df.drop(['chr', 'pos', 'Label'], axis=1) \
# .values \
# .astype(np.float32)
# y_test = test_df['Label'].values.astype(np.int64)
class_pred = net.predict(X_test)
score_pred = net.predict_proba(X_test)
print('\tAUROC: ', roc_auc_score(y_test, score_pred[:, 1]))
print('\tAUPR: ', average_precision_score(y_test, score_pred[:, 1]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--project', '-p', choices=PROJ_CHOICES, required=True)
parser.add_argument('--data', '-d', default='mpra+scores', choices=DATA_CHOICES,
help='Which data/model to train on')
parser.add_argument('--full', default=False,
help='Fit all models (overrides --data)')
parser.add_argument('--evaluate', '-e', action='store_true', default=False,
help='Evaluate model on test set after fitting')
args = parser.parse_args()
fit_model(args)
if args.evaluate:
evaluate_model(args)
|
[
"fredlu.flac@gmail.com"
] |
fredlu.flac@gmail.com
|
4faba1910def77457e265813a6749d9fcdc2c9fa
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/ec2_write_3/managed-prefix-list_create.py
|
a37a54b7d58925db27ffcd48c98d760451977f82
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,393
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_three_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/create-managed-prefix-list.html
if __name__ == '__main__':
"""
delete-managed-prefix-list : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/delete-managed-prefix-list.html
describe-managed-prefix-lists : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-managed-prefix-lists.html
modify-managed-prefix-list : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/modify-managed-prefix-list.html
"""
parameter_display_string = """
# prefix-list-name : A name for the prefix list.
Constraints: Up to 255 characters in length. The name cannot start with com.amazonaws .
# max-entries : The maximum number of entries for the prefix list.
# address-family : The IP address type.
Valid Values: IPv4 | IPv6
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_three_parameter("ec2", "create-managed-prefix-list", "prefix-list-name", "max-entries", "address-family", add_option_dict)
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
71a4c69ae3c773949b3127804ef78ee861a60fee
|
f9407b6f9454640b35753a39ac3fc57c1a105630
|
/parse_out_email_text.py
|
16017ff5240a650c9e78703bc219d8a033e475aa
|
[] |
no_license
|
saeidmoha/tools
|
5e91c7efc79fe75a1a780565233cdcd9b23c000d
|
18f20dfdade5a374c5ec2cbd71f4b661d61788db
|
refs/heads/master
| 2021-08-14T14:43:31.181434
| 2017-11-16T02:00:27
| 2017-11-16T02:00:27
| 110,909,987
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,703
|
py
|
#!/usr/bin/python
from nltk.stem.snowball import SnowballStemmer
import string
def parseOutText(f):
""" given an opened email file f, parse out all text below the
metadata block at the top
(in Part 2, you will also add stemming capabilities)
and return a string that contains all the words
in the email (space-separated)
example use case:
f = open("email_file_name.txt", "r")
text = parseOutText(f)
"""
f.seek(0) ### go back to beginning of file (annoying)
all_text = f.read()
### split off metadata
content = all_text.split("X-FileName:")
#print ("content[0] = ", content[0], "content[1] = ", content[1])
words = ""
stemmer = SnowballStemmer("english")
if len(content) > 1:
### remove punctuation
#text_string = content[1].translate(string.maketrans("", ""), string.punctuation)
text_string = content[1].translate(str.maketrans("", "", string.punctuation))
### project part 2: comment out the line below
#words = text_string
### split the text string into individual words, stem each word,
### and append the stemmed word to words (make sure there's a single
### space between each stemmed word)
text_string = ' '.join(text_string.split())
for word in text_string.split(" "):
stemword = stemmer.stem(word)
words += stemword + ' '
return words
def main():
ff = open("../text_learning/test_email.txt", "r")
#ff = open("../maildir/bailey-s/deleted_items/101.", "r")
text = parseOutText(ff)
print (text)
if __name__ == '__main__':
main()
|
[
"saeid@saeidm.com"
] |
saeid@saeidm.com
|
5429b29c77f91823ead2f1173cbc0e47dd660763
|
d95e6dbbcd0673b8adb81b4bd8c6bf5b8917a6c4
|
/spatial_paper_data_archive.py
|
d4feb4e672e31530d051c3031367e3ff7b3d0e81
|
[
"MIT"
] |
permissive
|
Timothy-W-Hilton/COS_Spatial_Analyses
|
ab907c811605ab6cbd406451bd1dbb386e88695c
|
dfb6f99f8c7181739c2079936dce83a4d86f5c1f
|
refs/heads/master
| 2021-01-20T20:39:00.748486
| 2016-06-17T20:56:25
| 2016-06-17T20:56:25
| 61,402,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,481
|
py
|
"""Calculate total size of COS spatial paper data files and copy the
data files to a single directory tree for archiving
"""
import os
import os.path
import shutil
from stem_pytools import NERSC_data_paths as ndp
def Get_Human_Readable(size, precision=2):
"""http://stackoverflow.com/questions/5194057/better-way-to-convert-file-sizes-in-python
"""
suffixes = ['B', 'KB', 'MB', 'GB', 'TB']
suffixIndex = 0
while size > 1024 and suffixIndex < 4:
suffixIndex += 1 # increment the index of the suffix
size = size/1024.0 # apply the division
return "%.*f%s" % (precision, size, suffixes[suffixIndex])
def get_spatial_paper_data_total(runs):
all_data_sum = 0
for k, this_run in runs.items():
for this_file in (this_run.aqout_path, this_run.gpp_path,
this_run.gppraw_path, this_run.fcos_path):
if this_file is not None:
all_data_sum += os.path.getsize(this_file)
print "Spatial paper data total: " + Get_Human_Readable(all_data_sum)
return all_data_sum
def make_data_archive(root_dir, runs):
"""Copy all non-regridded GPP, regridded GPP, STEM AQOUT, and fCOS
netcdf files to a single directory tree for archiving.
"""
if os.path.exists(root_dir):
try:
shutil.rmtree(root_dir)
except:
print "unable to delete".format(root_dir)
try:
os.makedirs(root_dir)
except:
print "unable to create {}".format(root_dir)
for k, this_run in runs.items():
print "copying {} files".format(k)
this_run_dir = os.path.join(root_dir, k)
os.makedirs(this_run_dir)
for this_file in (this_run.aqout_path, this_run.gpp_path,
this_run.gppraw_path, this_run.fcos_path):
if this_file is not None:
print " copying {}".format(os.path.basename(this_file))
shutil.copy(this_file, this_run_dir)
if k is 'climatological_bnd':
for this_bnd in (runs[k].top_bounds_path,
runs[k].lateral_bounds_path):
print " copying {}".format(os.path.basename(this_bnd))
shutil.copy(this_bnd, this_run_dir)
if __name__ == "__main__":
runs = ndp.get_Spatial_Paper_runs()
total = get_spatial_paper_data_total(runs)
archive_dir = os.path.join(os.getenv('SCRATCH'), 'SpatialPaperData')
make_data_archive(archive_dir, runs)
|
[
"thilton@ucmerced.edu"
] |
thilton@ucmerced.edu
|
55dc1576cdd1996d90d1c4d72010b67b9c856d33
|
a54809b430481f1b0047f046d412ffc3f0c7fe68
|
/myenv/lib/python3.6/encodings/mbcs.py
|
fef3c7ee1eaa9a11983fb638e663bd09cc620156
|
[] |
no_license
|
vineet0713/PaaS-ProberS-AWS-EB-
|
2521d4ee7e41e6d25c839cfea672d5706b4dbd85
|
1f9ca9e5d59ddcb3f94d7aaa96ca66719bc805cf
|
refs/heads/master
| 2020-04-30T05:47:55.793035
| 2019-03-20T02:54:43
| 2019-03-20T02:54:43
| 176,635,164
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 105
|
py
|
/usr/local/Cellar/python/3.6.4_3/Frameworks/Python.framework/Versions/3.6/lib/python3.6/encodings/mbcs.py
|
[
"vineet0713@gmail.com"
] |
vineet0713@gmail.com
|
cc74a182310695445168f47a0e42b74f72ac72f7
|
6026c5fa42c561256510fd997286c21cb935690b
|
/volumeWidgets.py
|
be70dd6721a0fd5913db0939ec99b7a62eba1e57
|
[] |
no_license
|
frankbx/Volume
|
b5894e7ac13491e0c52af2ec39ebfea5a695ecf2
|
516815a1498e26b43d73f0c7f55da5fb2765b2d2
|
refs/heads/master
| 2020-04-03T22:40:43.346951
| 2017-03-13T04:37:05
| 2017-03-13T04:37:05
| 58,268,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,676
|
py
|
import pyqtgraph as pg
from PyQt4 import QtCore, QtGui
# Create a subclass of GraphicsObject.
# The only required methods are paint() and boundingRect()
# (see QGraphicsItem documentation)
class CandlestickItem(pg.GraphicsObject):
def __init__(self):
pg.GraphicsObject.__init__(self)
self.flagHasData = False
def set_data(self, data):
self.data = data # data must have fields: time, open, close, min, max
self.flagHasData = True
self.generatePicture()
self.informViewBoundsChanged()
def generatePicture(self):
# pre-computing a QPicture object allows paint() to run much more quickly,
# rather than re-drawing the shapes every time.
self.picture = QtGui.QPicture()
p = QtGui.QPainter(self.picture)
p.setPen(pg.mkPen('w'))
barWidth = 1 / 3.
for (open, close, min, max, index) in self.data:
p.drawLine(QtCore.QPointF(index, min), QtCore.QPointF(index, max))
if open > close:
p.setBrush(pg.mkBrush('r'))
else:
p.setBrush(pg.mkBrush('g'))
p.drawRect(QtCore.QRectF(index - barWidth, open, barWidth * 2, close - open))
p.end()
def paint(self, p, *args):
if self.flagHasData:
p.drawPicture(0, 0, self.picture)
def boundingRect(self):
# boundingRect _must_ indicate the entire area that will be drawn on
# or else we will get artifacts and possibly crashing.
# (in this case, QPicture does all the work of computing the bouning rect for us)
return QtCore.QRectF(self.picture.boundingRect())
class CandleWidget(pg.PlotWidget):
def __init__(self, raw_data):
super(CandleWidget, self).__init__()
self.update(raw_data)
# self.candle_data = raw_data.loc[:, ['open', 'close', 'low', 'high']]
# r, c = self.candle_data.shape
# self.candle_data['num'] = range(1, r + 1)
# self.item = CandlestickItem()
# self.item.set_data(self.candle_data.values)
self.addItem(self.item)
def update(self, raw_data):
# raw_data.sort_index(axis=0, inplace=True)
self.candle_data = raw_data.loc[:, ['open', 'close', 'low', 'high']]
r, c = self.candle_data.shape
self.candle_data['num'] = range(1, r + 1)
self.item = CandlestickItem()
self.item.set_data(self.candle_data.values)
# app = QtGui.QApplication([])
# df = ts.get_hist_data('000681', '2015-01-01', ktype='w')
# r, c = df.shape
# print(r)
# cData = df.copy().loc[:, ['open', 'close', 'low', 'high']]
# cData['num'] = range(1, r + 1)
#
# print(cData)
# # cData = np.array(cData)
# item = CandlestickItem()
# item.set_data(cData.values)
#
# plt = pg.plot()
# plt.addItem(item)
# plt.setWindowTitle('pyqtgraph example: customGraphicsItem')
#
#
# def update():
# global item
# df = ts.get_hist_data('000681', '2015-01-01', ktype='d')
# r, c = df.shape
# print(r)
# cData = df.loc[:, ['open', 'close', 'low', 'high']]
# cData['num'] = range(1, r + 1)
# item.set_data(cData.values)
# # app.processEvents() ## force complete redraw for every plot
#
#
# timer = QtCore.QTimer()
# timer.timeout.connect(update)
# timer.start(10000)
# df = ts.get_hist_data('000681', '2015-01-01', ktype='w')
# print(enumerate(df))
# for (value) in df.head(10).values:
# print(value)
# print(type(value))
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
[
"xiang.bao.frank@gmail.com"
] |
xiang.bao.frank@gmail.com
|
46296617c7f07b0473680d71fe3811728becd60d
|
5f3902453ad41436382bb77e3e6a06dfe94deaed
|
/ask7.py
|
b2b9666cc40348812f8a3686d3d5bcd7c8b2a150
|
[] |
no_license
|
dimitrisadam/askhseis
|
23a6c45a33004c2b50837a42e10dae72479adc34
|
504f270056e2afa6e331bd501a8f476bf35dd991
|
refs/heads/master
| 2021-01-21T08:21:33.339580
| 2017-02-27T18:13:38
| 2017-02-27T18:13:38
| 83,341,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,647
|
py
|
import tweepy
from tweepy import OAuthHandler
import sys
consumer_key = ''
consumer_secret = ''
access_token = ''
access_secret = ''
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
#pairnei thn lista me ta tweets (sunolika 10).Ta bazei ta epejergazetai ena ena
#afairwntas ta https-links ka8ws den einai lejeis kai lejeis pou periexoun mesa ton xarakthra
# '\' ka8ws upodikniei oti einai photografia.Epishs afairei memonomenes paules kai dipla kena.
#Telos, epistrefei to a8roisma twn lejewn pou metrhse sta tweets.
def countWords(alltweets):
httpFlag = "http"
delimeter = "\\"
paula="-"
doublespace=""
totalWords = 0
test =" "
for i in range(len(alltweets)):
test = str(alltweets[i])
test = test.split(" ")
for j in range(len(test)):
if delimeter not in test[j] and httpFlag not in test[j] and test[j] is not paula and test[j] is not doublespace:
totalWords+=1
#print test[j]
return totalWords
firstUser = raw_input("Dwste to tweeter username tou prwtou xrhsth: \n")
secondUser = raw_input("Dwste to tweeter username tou deuterou xrhsth: \n")
#firstUserTweets = api.user_timeline(screen_name="RollingStones",count=10)
#edw diavazw kai vazei sto firsttweets ta 10 pio prosfata tweets tou prwtou user
firstUserTweets = api.user_timeline(screen_name=firstUser,count=10)
firsttweets = [[tweet.text.encode('utf-8')] for tweet in firstUserTweets]
#print firsttweets
#secondUserTweets = api.user_timeline(screen_name="rogerfederer",count=10)
#edw diavazw kai vazei sto secondtweets ta 10 pio prosfata tweets tou deuterou user
secondUserTweets = api.user_timeline(screen_name=secondUser,count=10)
secondtweets = [[tweet.text.encode('utf-8')] for tweet in secondUserTweets]
#print secondtweets
# Elegxos gia an exoun ginei ta 10 tweets. An oxi to afhnw na sunexisei 8a borousa omws na kanw kai ena sys.exit(0)
if len(firsttweets) < 10:
print '\nWARNING: O xrhsths',firstUser,'den exei kanei 10 tweets'
if len(secondtweets) < 10:
print '\nWARNING: O xrhsths',secondUser,'den exei kanei 10 tweets'
firstUserTotalWorlds = countWords(firsttweets)
secondUserTolalWorlds = countWords(secondtweets)
if firstUserTotalWorlds > secondUserTolalWorlds:
print '\nPerissoteres lexeis exei o user',firstUser,'pou exei',firstUserTotalWorlds,'lexeis.O user',secondUser,'exei',secondUserTolalWorlds,'lexeis'
else:
print '\nPerissoteres lexeis exei o user',secondUser,'pou exei',secondUserTolalWorlds,'lexeis.O user',firstUser,'exei',firstUserTotalWorlds,'lexeis'
#print 'totalwords =',countWords(firsttweets)
#print 'totalwords =',countWords(secondtweets)
|
[
"mitsoseleysina2@gmail.com"
] |
mitsoseleysina2@gmail.com
|
5c80ed9e14391ad32e4cc6fd9fcae8dce388c672
|
479518429066a4200b0c9ffbc42f22620dee1749
|
/app.py
|
5074f7904d2af983e17faf125c1a1f1f6874b9a4
|
[] |
no_license
|
nikhilkumarsingh/nitdhack
|
d2b4871c2aa3ef461c409a2f75e4f346759f1797
|
633ddf770c19fb8b0dd66479bc8e865e36181ffa
|
refs/heads/master
| 2021-01-19T21:33:27.880021
| 2017-04-18T23:43:06
| 2017-04-18T23:43:06
| 88,665,337
| 0
| 1
| null | 2018-10-03T05:33:57
| 2017-04-18T19:59:40
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,291
|
py
|
import flask
app = flask.Flask(__name__,static_folder='static')
app.config.from_object(__name__)
app.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b6176a'
@app.route('/')
def home():
return flask.render_template('index.html')
def NearbySearch(lat,lng,keyword,radius=1000):
key="AIzaSyApuFoKxVMRQ2einlsA0rkx2S4WJjJIh34"
url="https://maps.googleapis.com/maps/api/place/nearbysearch/json?"
url+="location=%f,%f&" % (lat,lng)
url+="radius=%i&" % radius
url+="type=%s&" % keyword
url+="key=%s" % key
response=requests.get(url)
json_dict=response.json()
res=json_dict['results']
info_pack=[]
for x in res:
placeid = x['place_id']
url = "https://maps.googleapis.com/maps/api/place/details/json?placeid={}&key={}".format(placeid,key)
r = requests.get(url).json()['result']
info = {}
info['name'] = r['name']
info['lat'] = r['geometry']['location']['lat']
info['lng'] = r['geometry']['location']['lng']
info_pack.append(info)
return info_pack
@app.route('/query', methods = ['POST'])
def query():
if flask.request.method == 'POST':
# lat,lang =
lat, lang = 28,76
data = {'locations':NearbySearch(lat,lng,'doctor')}
print(flask.request.form['query'])
return data
if __name__ == "__main__":
app.run(debug = True, port=5003)
|
[
"nikhilksingh97@gmail.com"
] |
nikhilksingh97@gmail.com
|
20635375b97f4e276fbfab8866c1ba60fc8aff05
|
ffddf50985bd61a6bb4d7001fe838f8d5c709bf4
|
/Builders/TechnologyBUILD
|
3833e5eef716d6e008e45dbb6d8221286f31d5c2
|
[] |
no_license
|
NoahBarrett98/Lost-and-Found
|
9a7216e401aa5f3e31da637f1c20e75d681eb12d
|
0d828029c466aeda9e5aac27695d22335e574e26
|
refs/heads/master
| 2021-01-26T07:44:06.501000
| 2020-03-22T19:47:42
| 2020-03-22T19:47:42
| 243,370,004
| 1
| 1
| null | 2020-03-10T13:58:11
| 2020-02-26T21:31:06
|
Python
|
UTF-8
|
Python
| false
| false
| 752
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 8 09:27:37 2020
@author: hannahmacdonell
"""
#H: Technology(itemID, tBrand, device, serialNo)
import random
import csv
d = []
c = open("device.txt", "r")
for line in c:
d.append(line.strip().split('\n')[0])
c.close()
l = []
c = open("tBrand.txt", "r")
for line in c:
l.append(line.strip().split('\n')[0])
c.close()
with open('tech.csv', mode='w') as data_file:
data_writer = csv.writer(data_file, delimiter=',')
data_writer.writerow(['tBrand','Device','SerialNo'])
for x in range(1000):
data_writer.writerow([l[random.randint(0,len(l)-1)],d[random.randint(0,len(d)-1)],str(random.randint(4123456,9123456))])
data_file.close()
|
[
"noreply@github.com"
] |
NoahBarrett98.noreply@github.com
|
|
837341341225792eaf8191f24e39c25694df9f97
|
726d8518a8c7a38b0db6ba9d4326cec172a6dde6
|
/0909. Snakes and Ladders/Solution.py
|
29b98be3c2e7335ac5e219780c47380b948657d7
|
[] |
no_license
|
faterazer/LeetCode
|
ed01ef62edbcfba60f5e88aad401bd00a48b4489
|
d7ba416d22becfa8f2a2ae4eee04c86617cd9332
|
refs/heads/master
| 2023-08-25T19:14:03.494255
| 2023-08-25T03:34:44
| 2023-08-25T03:34:44
| 128,856,315
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
from collections import deque
from typing import List
class Solution:
def snakesAndLadders(self, board: List[List[int]]) -> int:
n = len(board)
visited = [False] * (n * n + 1)
queue = deque([1])
steps = 0
while queue:
size = len(queue)
for _ in range(size):
pos = queue.popleft()
for i in range(pos + 1, min(pos + 7, n * n + 1)):
if visited[i]:
continue
visited[i] = True
if i == n * n:
return steps + 1
r, c = divmod(i - 1, n)
if r & 1:
c = n - 1 - c
r = n - 1 - r
if board[r][c] != -1 and board[r][c] == n * n:
return steps + 1
if board[r][c] == -1:
queue.append(i)
else:
queue.append(board[r][c])
steps += 1
return -1
|
[
"yubowen.ssr@bytedance.com"
] |
yubowen.ssr@bytedance.com
|
48201b6182773eb907fb42c0093c1f0bf47efc96
|
853c189602a667990eda858db98d163fb597caa1
|
/tfx/orchestration/experimental/core/constants.py
|
e5a5208afa70ef912a346dc02d4fe9ccce962866
|
[
"Apache-2.0"
] |
permissive
|
swap-10/tfx
|
9bef96fc592810ed2d7dfa5dd60044c9ac481e02
|
8e80ce2486b4d7b219dcff906d6930e62c5fdd45
|
refs/heads/master
| 2023-07-15T22:54:18.642120
| 2021-09-06T06:17:48
| 2021-09-06T06:17:48
| 402,296,955
| 0
| 0
|
Apache-2.0
| 2021-09-02T05:49:03
| 2021-09-02T05:09:23
| null |
UTF-8
|
Python
| false
| false
| 823
|
py
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants shared across modules."""
EXECUTION_ERROR_MSG_KEY = '__execution_error_msg__'
IMPORTER_NODE_TYPE = 'tfx.dsl.components.common.importer.Importer'
RESOLVER_NODE_TYPE = 'tfx.dsl.components.common.resolver.Resolver'
|
[
"tensorflow-extended-nonhuman@googlegroups.com"
] |
tensorflow-extended-nonhuman@googlegroups.com
|
e253dc4bc39f59c0d01f1734c35d33edfc76853a
|
c5e7926ffa9af44e3d3fea7c854c013898b8f346
|
/scrap_tenders/scrap_tenders/items.py
|
da60390af0bf3875f87e8865f44838ec5e8df41d
|
[
"MIT"
] |
permissive
|
Salomari1987/med-tenders-egypt
|
a821cd1064a5c68cbd7318c8ade254667692b7d9
|
31b5061fe28c56d5e9a8bb4b267148848bfeaf5a
|
refs/heads/master
| 2021-01-19T18:32:36.773483
| 2017-04-19T00:36:02
| 2017-04-19T00:40:26
| 88,364,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
# from scrapy.item import Item, Field
#
#
# class StackItem(Item):
# Tender_Notice_Type = Field()
# Country = Field()
# Category = Field()
# Description = Field()
# Deadline = Field()
# Ref = Field()
from scrapy_djangoitem import DjangoItem
from tenders_django_app.models import Tender
class StackItem(DjangoItem):
django_model = Tender
|
[
"s.z.alomari.1987@gmail.com"
] |
s.z.alomari.1987@gmail.com
|
73ea759b8c4f767004d46136a4cb1eec0f7feabe
|
eb99e1d5008f90e5a54724863dacba4878fb2cea
|
/tests/test_basic.py
|
875207f11e43de580d9a19a086b9fd20315d8529
|
[] |
no_license
|
machow/hoof
|
fe529ef6573ecae35ba51704cd5c95c188c50295
|
4c9460492f283abd539ab3577982226efe15db5a
|
refs/heads/master
| 2022-05-20T03:43:33.847717
| 2020-04-25T21:47:48
| 2020-04-25T21:47:48
| 256,900,890
| 1
| 0
| null | 2020-04-25T21:38:26
| 2020-04-19T02:56:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,519
|
py
|
from hoof import Hoof, AntlrAst
class Program(AntlrAst):
_fields = ["body"]
class BinaryExpr(AntlrAst):
_fields = ["left", "right", "op"]
class RunExpr(AntlrAst):
_fields = ["op", "expr"]
_remap = ["RUN->op"]
_rules = "RunExpr"
hoof = Hoof("hoof_examples.Expr")
class AstVisitor(hoof.Visitor):
def visitParentheses(self, ctx):
# skip parentheses
return self.visit(ctx.expr())
def visitTerminal(self, ctx):
return ctx.getText()
hoof.register("Prog", Program, ["expr->body"]) # no config on node
hoof.register("BinaryExpr", BinaryExpr) # no need to remap
hoof.register(RunExpr) # rule and remap on node
hoof.bind(AstVisitor)
def test_program():
node = hoof.parse("1 + 2; 3 - 4;", "prog")
assert isinstance(node, Program)
assert len(node.body) == 2
assert isinstance(node.body[0], BinaryExpr)
def test_binary():
node = hoof.parse("1 + 2", "expr")
assert isinstance(node, BinaryExpr)
assert node.left == "1"
assert node.right == "2"
assert node.op == "+"
def test_put():
node = hoof.parse("run 2", "expr")
assert isinstance(node, RunExpr)
assert node.expr == "2"
def test_parentheses():
node = hoof.parse("(1 + 1)", "expr")
assert isinstance(node, BinaryExpr)
def test_expr_integer():
# this is a Token (INT) with no explicit shaping, so is result of visitTerminal
node = hoof.parse("1", "expr")
node == "1"
|
[
"machow@princeton.edu"
] |
machow@princeton.edu
|
e1bea179688f6a672cd83a7f2b9f861bbb702425
|
4b5c21db88a80fcca418c0c8b431d93774d9336a
|
/envfarmaciaveterinaria/Scripts/viewer.py
|
fb21137537dab877fe83180aef6958ef73bea3df
|
[] |
no_license
|
laMoradaPostrera/FarmaciaVeterinariaUnillanos
|
e9620b1b108ab53956a50e754dd7f339e237f150
|
2312ccee591c4991c3ee0627ea4815de65e7a1eb
|
refs/heads/master
| 2020-10-01T23:43:19.395012
| 2018-06-14T05:15:36
| 2018-06-14T05:15:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
#!c:\users\lenovo~1\mispro~1\unilla~1\farmac~2\envfar~1\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
#
# an image viewer
class UI(tkinter.Label):
def __init__(self, master, im):
if im.mode == "1":
# bitmap image
self.image = ImageTk.BitmapImage(im, foreground="white")
tkinter.Label.__init__(self, master, image=self.image, bd=0,
bg="black")
else:
# photo image
self.image = ImageTk.PhotoImage(im)
tkinter.Label.__init__(self, master, image=self.image, bd=0)
#
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python viewer.py imagefile")
sys.exit(1)
filename = sys.argv[1]
root = tkinter.Tk()
root.title(filename)
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
|
[
"diegoasencio96@gmail.com"
] |
diegoasencio96@gmail.com
|
482b54447b3f7cd5d3fb519221920951b5b68ed0
|
d9764a604c85c134ff217747d243eac8fe28e792
|
/src/demo2.py
|
e3c0801f18c91206c2e18df08c2caacf8e0007bf
|
[] |
no_license
|
afcarl/INF421-project
|
5a0130c3ba6e0c767323001048d3f191379dbc6e
|
dc6eef684f6d277b6a9bbbc227a9e20a1525e115
|
refs/heads/master
| 2020-03-19T21:21:53.465240
| 2017-08-14T13:39:52
| 2017-08-14T13:39:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
#!/usr/bin/env python3
"""
Special notes :
This implementation supports MULTIPLE shortest path.
(except for the number_of_possible_locations_with_mindist_simple function)
"""
import random
from Graph import Graph
from algo import *
from unused import *
from Dijkstra import *
from util import timeit
from reach import reach
####################
data = '/Users/louisabraham/Downloads/RoadNetworks/data/france.in'
logging = '/Users/louisabraham/Downloads/RoadNetworks/vis/points.js'
hour = 3600000
# We can control the display of chronos using timeit.activated
timeit.activated = True
####################
# graph importation
g = Graph.from_file(data)
# we chose a random starting point
v = random.choice(list(g.keys()))
#
# # Question 1.1
# print(number_of_possible_locations(g, v, 1 * hour))
#
# # the same result is computed
# print(number_of_possible_locations_with_mindist_dijkstra(
# g, v, 1 * hour, 0))
# print(number_of_possible_locations_with_mindist_dijkstra(
# g, v, 1 * hour, 0))
print(number_of_possible_locations_with_mindist_dijkstra(
g, v, 1 * hour, 2 * hour, logging=logging))
input()
g.generate_converse()
print(number_of_possible_locations_with_mindist_dijkstra(
g.converse, v, 1 * hour, 2 * hour, logging=logging))
# print(reach(g, v))
#
# # We can free memory like this
# dijkstra.clean()
|
[
"louis.abraham@yahoo.fr"
] |
louis.abraham@yahoo.fr
|
c081b2cc5c19aacf9997f2dcf145d5c6d6a94c75
|
a985c0797ed10fc7eef59c527b0490dbfeadd2af
|
/Docker Model/utils/makeprediction.py
|
c5e55cdb241d98d033c0a7d78c01784c14785250
|
[] |
no_license
|
etheleon/data-science-for-fun
|
1bd0c9f04a8c5f0e533d42816a085c8e0656092d
|
7488c3f9a3a0e36371905c71fdf7f2528e9d0e95
|
refs/heads/master
| 2021-07-23T14:21:08.860090
| 2017-10-30T02:30:43
| 2017-10-30T02:30:43
| 108,791,255
| 0
| 0
| null | 2017-10-30T02:19:55
| 2017-10-30T02:19:54
| null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
def predict(inputFeatures):
iris = datasets.load_iris()
knn = KNeighborsClassifier()
knn.fit(iris.data, iris.target)
predictInt = knn.predict(inputFeatures)
if predictInt[0] == 0:
predictString = 'setosa'
elif predictInt[0] == 1:
predictString = 'versicolor'
elif predictInt[0] == 2:
predictString = 'virginica'
else:
predictString = 'null'
return predictString
|
[
"A0134553@u.nus.edu"
] |
A0134553@u.nus.edu
|
a720e9e9dba4c9e67cd739029eb1e94d9b40b70b
|
7b532a7d7c79601a5a8dd7beaf6b06e7a23b3666
|
/Inception_model/softmax.py
|
7f2dd55bc8779ae70bfbfb3f2fe90788f2300a17
|
[] |
no_license
|
lanardo/Image_processing_server
|
e3d3151cf825ebca01a64d851642bca0e99b0646
|
957ab8d82a453049885f85f440efcfc60c0e7d7f
|
refs/heads/master
| 2021-05-14T13:13:27.358287
| 2018-07-04T19:41:06
| 2018-07-04T19:41:06
| 116,435,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,287
|
py
|
import tensorflow as tf
import csv
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", help="path to folder containing the trainning data")
parser.add_argument("--output_dir", help="path to folder containing the result coef files")
parser.add_argument("--restore", default="yes", help="restore from the checkpoint")
parser.add_argument("--rate", type=float, default=0.0001, help="rate(alpha) for trainning")
parser.add_argument("--epochs", type=int, default=200000, help="max epoches")
parser.add_argument("--strip", type=int, default=50, help="step for writing the result on loop")
a = parser.parse_args()
# a.input_dir = './model'
# a.output_dir = './model'
# a.restore = "no"
def xaver_init(n_inputs, n_outputs, uniform=True):
if uniform:
init_range = tf.sqrt(6.0 / (n_inputs + n_outputs))
return tf.random_uniform_initializer(-init_range, init_range)
else:
stddev = tf.sqrt(3.0 / (n_inputs + n_outputs))
return tf.truncated_normal_initializer(stddev=stddev)
def acc(d1, d2):
cnt = 0
for i in range(d1.__len__()):
if d1[i] == d2[i]:
cnt += 1
return float(cnt)/d1.__len__()
def sel_max(data):
ret_ind = []
for i in range(data.__len__()):
if data[i][0] == 1:
ret_ind.append(0)
else:
ret_ind.append(1)
return ret_ind
if __name__ == '__main__':
learning_rate = a.rate
in_dir = a.input_dir
out_dir = a.output_dir
epochs = a.epochs
strip = a.strip
train_data_path = os.path.join(in_dir, 'train_data.csv')
w_coef_path = os.path.join(out_dir, 'w.csv')
b_coef_path = os.path.join(out_dir, 'b.csv')
ckpt_path = os.path.join(out_dir, 'model_bin.ckpt')
labels = ['front', 'front_3_quarter', 'side', 'rear_3_quarter', 'rear', 'interior', 'tire']
directions = [
[1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1]
]
x_data = []
y_data = []
""" Loading training data from csv files """
print('[Step 1] Loading training data ...')
# for python 2x
with open(train_data_path) as fp:
csv_reader = csv.reader(fp, delimiter=',')
for row in csv_reader:
x_data.append([float(row[i]) for i in range(0, len(row)-7)])
y_data.append([float(row[i]) for i in range(len(row)-7, len(row))])
print("total features :" + str(len(x_data)))
print("length of feature :" + str(len(x_data[0])))
print("length of label :" + str(len(y_data[0])))
""" Placeholder """
print('[Step 2] Placeholder')
x = tf.placeholder('float', [None, 2048]) # len(feature) = 2048
y = tf.placeholder('float', [None, 7]) # len(Directions) = 7 : classes
W1 = tf.get_variable('W1', shape=[2048, 7], initializer=xaver_init(2048, 7))
b1 = tf.Variable(tf.zeros([7]))
activation = tf.add(tf.matmul(x, W1), b1)
t1 = tf.nn.softmax(activation)
""" Minimize error using cross entropy """
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=activation, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Gradient Descent
""" Initializing the variables """
print('[Step 3] Initializing the variables.')
# init = tf.initialize_all_variables() # python 3x
init = tf.global_variables_initializer() # python 2x
sess = tf.Session()
sess.run(init)
saver = tf.train.Saver()
print(a.restore)
if a.restore == "yes":
print('Loading the last learning Session.')
saver.restore(sess, ckpt_path)
""" Training cycle """
print('[Step 4] Training...')
for step in range(epochs):
sess.run(optimizer, feed_dict={x: x_data, y: y_data})
if step % strip == 0:
ret = sess.run(t1, feed_dict={x: x_data})
acc1 = acc(sess.run(tf.arg_max(ret, 1)), sess.run(tf.arg_max(y_data, 1))) * 100
print(' ' + str(step) + ' ' + str(sess.run(cost, feed_dict={x: x_data, y: y_data})) + ' ' + str(acc1))
saver.save(sess, ckpt_path)
print('Optimization Finished!')
|
[
"williams.lanardo@gmail.com"
] |
williams.lanardo@gmail.com
|
1186138ee1bd98ce6cc3c24b6d4b5d7158920d79
|
f81099738d3ab7d4a4773a04ed9e36e493632590
|
/angelos-portfolio/test/test_domain_update.py
|
2ccd8c81f1a7ea5f7e2d64656a9b8ccd5a5df49a
|
[
"MIT"
] |
permissive
|
kristoffer-paulsson/angelos
|
eff35753e4d7e4465d2aadac39265f206b09fcf9
|
d789f47766fe3a63a6752b92e4ea955f420dbaf9
|
refs/heads/master
| 2022-05-05T15:16:59.340527
| 2022-03-27T16:05:51
| 2022-03-27T16:05:51
| 142,691,235
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,287
|
py
|
#
# Copyright (c) 2018-2020 by Kristoffer Paulsson <kristoffer.paulsson@talenten.se>.
#
# This software is available under the terms of the MIT license. Parts are licensed under
# different terms if stated. The legal terms are attached to the LICENSE file and are
# made available on:
#
# https://opensource.org/licenses/MIT
#
# SPDX-License-Identifier: MIT
#
# Contributors:
# Kristoffer Paulsson - initial implementation
#
"""Security tests putting the policies to the test."""
from unittest import TestCase
from angelos.common.policy import evaluate
from angelos.lib.policy.types import PersonData
from angelos.portfolio.domain.create import CreateDomain
from angelos.portfolio.domain.update import UpdateDomain
from angelos.portfolio.entity.create import CreatePersonEntity
from test.fixture.generate import Generate
class TestUpdateDomain(TestCase):
def test_perform(self):
data = PersonData(**Generate.person_data()[0])
portfolio = CreatePersonEntity().perform(data)
CreateDomain().perform(portfolio)
self.assertIsNotNone(portfolio.domain)
with evaluate("Domain:Update") as report:
domain = UpdateDomain().perform(portfolio)
self.assertIs(domain, portfolio.domain)
self.assertTrue(report)
|
[
"kristoffer.paulsson@talenten.se"
] |
kristoffer.paulsson@talenten.se
|
d6b4abc7fbe0628b62ce4ae5c4de91acedb25971
|
962feeffee41625ff841f6590f97bb09cef9be4c
|
/torch_glow/tests/nodes/avgpool3d_test.py
|
93e26349ac4e677a2d89d2388568725436963f2f
|
[
"Apache-2.0"
] |
permissive
|
SushantDaga/glow
|
8c4c3fbc58c3ae760bdd8e1df2e8c05a72ff07bc
|
aab22c3e0421dadd29950c2ebfa88b86027cecf5
|
refs/heads/master
| 2022-11-03T08:39:33.958233
| 2020-06-19T17:03:14
| 2020-06-19T17:05:42
| 273,568,864
| 2
| 0
|
Apache-2.0
| 2020-06-19T19:12:31
| 2020-06-19T19:12:30
| null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests.utils import jitVsGlow
import unittest
class TestAvgPool3d(unittest.TestCase):
def test_avg_pool3d_basic(self):
"""Basic test of the PyTorch avg_pool3d Node on Glow."""
def test_f(inputs):
return F.avg_pool3d(inputs, 3)
inputs = torch.randn(1, 4, 5, 5, 5)
jitVsGlow(test_f, inputs, expected_fused_ops={"aten::avg_pool3d"})
def test_avg_pool3d_with_args(self):
"""Test of the PyTorch avg_pool3d Node with arguments on Glow."""
def test_f(inputs):
return F.avg_pool3d(inputs, padding=2, kernel_size=(4, 7, 7))
inputs = torch.randn(1, 4, 10, 10, 10)
jitVsGlow(test_f, inputs, expected_fused_ops={"aten::avg_pool3d"})
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
5f1d4ae6e02e4e33bd1e5716d22ee7da2b0c0cbd
|
f774ccfe88871fbe37b52487253108144c07f968
|
/exer95.py
|
d4a749738c38516b4e2e8805d75fdd3495a48c8e
|
[] |
no_license
|
capy-larit/exercicios_python
|
4bcfdc8985983dc69c63f315931c200c4c9f1100
|
c92b8ff31e2eb0c87f2dfdad9d97149db6f1181e
|
refs/heads/master
| 2023-03-23T05:29:07.409948
| 2020-09-08T22:42:48
| 2020-09-08T22:42:48
| 245,882,895
| 0
| 0
| null | 2021-03-15T21:43:15
| 2020-03-08T20:28:15
|
Python
|
UTF-8
|
Python
| false
| false
| 675
|
py
|
"""
Faça um programa utilizando um dict (dicionário) que leia dados de entrada do usuário. O
usuário deve entrar com os dados de uma pessoa como nome, idade e cidade onde mora.
Após isso, você deve imprimir os dados como o exemplo abaixo:
nome: João
idade: 20
cidade: São Paulo
"""
def chamar_menu():
nome = input('Digite seu nome: ')
idade = int(input('Digite sua idade: '))
cidade = input('Digite sua cidade: ')
dict[nome]=[idade, cidade]
dict = {}
try:
chamar_menu()
except:
print('A idade deve ser um número inteiro.')
chamar_menu()
for chave, item in dict.items():
print(f'Nome: {chave}\nIdade: {item[0]}\nCidade: {item[1]}')
|
[
"larissa.laritt@icloud.com"
] |
larissa.laritt@icloud.com
|
8b95e2ada92485e2e3e8915583d7b6c7899d04f7
|
5022b48f311ba4710e1851855552b9546a3142c5
|
/unittest/case_test.py
|
3b355326b97f14c7a95801f1b8d7f47cb5b04d82
|
[] |
no_license
|
18786262315/python_lx
|
a7a15a294312b8382c3d1fd97a8d0ede38f1c5a5
|
a870d49cc4ca6efd1b54c2b89dfbf5e3d911a568
|
refs/heads/master
| 2020-03-21T12:37:30.748759
| 2020-03-18T09:31:31
| 2020-03-18T09:31:31
| 138,563,274
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,228
|
py
|
'''
unittest条件断言
tester: cc
此文仅做翻译只用,不介绍具体使用
'''
Skiptest() # 在测试中引发此异常以跳过该异常。
_ShouldStop() # 停止测试
_UnexpectedSuccess() # 测试本来应该是失败的,但是没有失败
Skip() # 无条件跳过测试。
skipIf(condition, reason) # 条件为真时跳过测试
skipUnless(condition, reason) # 条件为假时跳过测试
expectedFailure(test_item) # 标记该测试预期就是失败,如果运行失败时,不算作失败用例。
_is_subtype(expected, basetype) # 判断类型是否符合预期
addTypeEqualityFunc(typeobj, function) # 为自定义检查类提供检查方法
addCleanup( function , *args , **kwargs ) #添加针对每个测试用例执行完tearDown()方法之后的清理方法,添加进去的函数按照后进先出(LIFO)的顺序执行,当然,如果setUp()方法执行失败,那么不会执行tearDown()方法,自然也不会执行addCleanup()里添加的函数。
setUp()#在执行每个测试用例之前被执行,任何异常(除了unittest.SkipTest和AssertionError异常以外)都会当做是error而不是failure,且会终止当前测试用例的执行。
tearDown()#执行了setUp()方法后,不论测试用例执行是否成功,都执行tearDown()方法。如果tearDown()的代码有异常(除了unittest.SkipTest和AssertionError异常以外),会多算一个error。
setUpClass( cls )与tearDownClass( cls )#测试用例们被执行前、后执行的方法,定义时必须加上classmethod装饰符
countTestCases()#返回测试用例的个数,对于TestCase实例来说,这个返回值一直是1.
defaultTestResult()#如果在run()方法中未提供result参数,该函数返回一个包含本用例测试结果的TestResult对象。
shortDescription()#返回测试用例的描述,即函数的docstring,如果没有,返回None。可以用于测试结果输出中描述测试内容。
id()#返回测试用例的编号,通常是如下格式:模块名.类名.函数名。可以用于测试结果的输出。
subTest( msg=_subtest_msg_sentinel, **params)#返回一个上下文管理器,它将返回由可选消息和关键字参数标识的子测试中的封闭代码块。子测试中的失败标志着测试用例失败,但在封闭块结束时恢复执行,允许执行进一步的测试代码。
run( result =None)#运行一个测试用例,将测试结果收集到result变量中,测试结果不返回给调用者。如果result参数的值为None,则测试结果在下面提到的defaultTestResult()方法的返回值中
doCleanups()#无条件强制调用addCleanup()添加的函数,适用于setUp()方法执行失败但是需要执行清理函数的场景,或者希望在tearDown()方法之前执行这些清理函数。
debug()#与run方法将测试结果存储到result变量中不同,debug方法运行测试用例将异常信息上报给调用者。
fail( msg =None)#无条件声明一个测试用例失败,msg是失败信息。
assertEqual(set1,set2,msg=None) #检测两个值是否相等
assertFalse( expr, msg=None) #检查表达式是否为假
assertTrue( expr, msg=None) #检查表达式是否为真
assertAlmostEqual与assertNotAlmostEqual(, first, second, places=None, msg=None,delta=None) #判断两个值是否约等于或者不约等于,places表示小数点后精确的位数
assertSequenceEqual(seq1, seq2, msg=None, seq_type=None) #有序序列的相等断言,如元组、列表
assertListEqual( list1, list2, msg=None) #列表相等的特定断言
assertTupleEqual(tuple1, tuple2, msg=None) #元组相等的特定断言
assertSetEqual( set1, set2, msg=None) #集合相等的特定断言
assertIn与assertNotIn( member, container, msg=None) #判断a 是否存在b中
assertIs与assertIsNot( expr1, expr2, msg=None) #判断a是不是b
assertDictEqual( d1, d2, msg=None) #检查两个字典是否相等
assertDictContainsSubset( subset, dictionary, msg=None) #检查字典是否是子集的超集。
assertCountEqual(first, second, msg=None) #判断两个无序列表内所出现的内容是否相等
assertMultiLineEqual( first, second, msg=None) #断言两个多行字符串相等
assertLess( a, b, msg=None) #断言a<b
assertLessEqual( a, b, msg=None) #断言a<=b
assertGreater( a, b, msg=None) #断言a>b
assertGreaterEqual(a, b, msg=None) #断言a>=b
assertIsNone与assertIsNotNone( obj, msg=None) #判断obj是否为空
assertIsInstance(a, b)与assertNotIsInstance(a, b)# 与assertTrue相同,其中的类型b,既可以是一个类型,也可以是类型组成的元组。
assertRaisesRegex( expected_exception, expected_regex,*args, **kwargs)#断言在引发异常中的消息与正则表达式匹配。
assertWarnsRegex( expected_warning, expected_regex,*args, **kwargs)#断言触发警告中的消息与ReGEXP匹配。基本功能类似于AdvestWr.NS.()只有消息与正则表达式匹配的警告。被认为是成功的匹配
assertRegex与assertNotRegex(text, expected_regex, msg=None) #判断文本与正则表达式是否匹配
shortDescription()#返回测试用例的描述,即函数的docstring,如果没有,返回None。可以用于测试结果输出中描述测试内容。
|
[
"843092012@qq.com"
] |
843092012@qq.com
|
d43e3d2d2a4cade3e15bd4256deff2b47f891672
|
2e4c9dafb6fc03d48df9f80d506474b87438056d
|
/fibers.py
|
55b6a4bd996a1723437447420a402201d3353313
|
[] |
no_license
|
ericwang915/pypbc
|
fd35d2d91d2f50c6b0353abc84b3dcd72261006f
|
22c97949c549867103e667d998e8be2cfb1911a6
|
refs/heads/master
| 2020-06-11T00:01:54.795875
| 2009-09-23T09:00:41
| 2009-09-23T09:00:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,479
|
py
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Basic functions to read and write TrackVis .trk files and to play
with fibers.
Copyright (c) 2009 Emanuele Olivetti <emanuele_AT_relativita.com>
This library is free software; you can redistribute it and/or modify
it either under the terms of the GNU General Public License version 3
as published by the Free Software Foundation.
"""
import numpy as N
import sys
# Definition of trackvis header structure.
# See http://www.trackvis.org/docs/?subsect=fileformat
# See http://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html
trk_header_structure = [['id_string', 1, 'S6'],
['dim', 3, '<h'],
['voxel_size', 3, '<f4'],
['origin', 3, '<f4'],
['n_scalars', 1, '<h'],
['scalar_name', 10, 'S20'],
['n_properties', 1, '<h'],
['property_name', 10, 'S20'],
['reserved', 1, 'S508'],
['voxel_order', 1, 'S4'],
['pad2', 1, 'S4'],
['image_orientation_patient', 6, '<f4'],
['pad1', 1, 'S2'],
['invert_x', 1, 'S1'],
['invert_y', 1, 'S1'],
['invert_z', 1, 'S1'],
['swap_xy', 1, 'S1'],
['swap_yz', 1, 'S1'],
['swap_zx', 1, 'S1'],
['n_count', 1, '<i4'],
['version', 1, '<i4'],
['hdr_size', 1, '<i4'],
]
def read_header(f):
""" Read and parse .trk file header structure.
"""
header = {}
for field_name, count, dtype in trk_header_structure:
header[field_name] = N.fromfile(f, dtype=dtype, count=count)
pass
assert(f.tell()==1000) # header is always 1000 bytes.
return header
def write_header(f, header):
"""Write .trk header to file.
"""
for field_name, count, dtype in trk_header_structure:
# Note that ".astype(dtype)" is just to be sure or correct types:
header[field_name].astype(dtype).tofile(f)
pass
assert(f.tell()==1000) # header is always 1000 bytes.
return
def print_header(header):
"""Print relevant info of .trk header.
"""
print "Header:"
relevant_fields = ['dim', 'voxel_size', 'origin', 'n_count' ]
for field in relevant_fields:
print '\t',field, ':', header[field]
pass
return
def progress_meter(position, total, message, steps=10):
"""Simple progress meter.
"""
if position%(int(total/steps))==0:
print message, str(1+int(100.0*position/total))+'%'
sys.stdout.flush()
pass
return
def read_fibers(f, header):
"""Read fibers from .trk file and fill a list.
"""
fiber = []
# structure of each entry of the list:
# [[X1,Y1,Z1,SCALAR1...],...,[Xn,Yn,Zn,SCALARn...]], [PROPERTIES]
# Note that in PBC2009 trckvis files there are no scalars or
# properties, which means that the actual structure of the fiber
# list is simply:
# fiber_id : [[X1,Y1,Z1],...,[Xn,Yn,Zn]], []
n_scalars = header['n_scalars'][0]
n_fibers = header['n_count'][0]
for fiber_id in range(n_fibers):
num_points = N.fromfile(f, dtype='<i4', count=1)[0]
xyz_scalar = N.fromfile(f, dtype='<f4', count=num_points*(3+n_scalars)).reshape(num_points, 3+n_scalars)
properties = N.fromfile(f, dtype='<f4', count=header['n_properties'][0])
fiber.append([xyz_scalar, properties])
progress_meter(fiber_id, n_fibers, 'Reading fibers...')
pass
return fiber
def write_fibers(f, fiber, header):
"""Write fibers to file in .trk format. Assumption: header has
already been written.
"""
n_scalars = header['n_scalars'][0]
n_fibers = header['n_count'][0]
for fiber_id in range(n_fibers):
num_points = N.array((fiber[fiber_id][0]).shape[0], dtype='<i4')
num_points.tofile(f)
xyz_scalar = N.array(fiber[fiber_id][0], dtype='<f4')
xyz_scalar.tofile(f)
properties = N.array(fiber[fiber_id][1], dtype='<f4')
properties.tofile(f)
progress_meter(fiber_id, n_fibers, 'Writing fibers...')
pass
return
def mm2voxel(xyz, header):
"""Converts coordinates from mm to voxel.
"""
return N.floor(xyz/header['voxel_size']).astype('i')
def voxel2mm(Vxyz, header):
"""Converts coordinates from voxel to mm.
"""
return (Vxyz+0.5)*header['voxel_size']
def build_voxel_fibers_dict(fiber, header):
"""Build a dictionary that given a voxel returns all fibers (IDs)
crossing it.
"""
voxel2fibers = {}
n_fibers = len(fiber)
for fiber_id in range(n_fibers):
xyz = fiber[fiber_id][0]
ijk = mm2voxel(xyz, header)
for i in range(xyz.shape[0]):
try:
voxel2fibers[tuple(ijk[i,:])].append(fiber_id)
except KeyError:
voxel2fibers[tuple(ijk[i,:])] = [fiber_id]
pass
pass
progress_meter(fiber_id, n_fibers, 'Mapping voxels to fibers...')
pass
n_voxels = len(voxel2fibers.keys())
# Now transform each list of IDs in an array of IDs:
for n, ijk in enumerate(voxel2fibers.keys()):
voxel2fibers[ijk] = N.array(voxel2fibers[ijk])
progress_meter(n, n_voxels, 'Converting lists to arrays...')
pass
return voxel2fibers
if __name__=="__main__":
print "This simple program reads a TrackVis .trk file, parse it, build"
print "structures to represent fibers as Python list of arrays"
print "and then saves structures in TrackVis .trk file format."
print "The resulting file is expected to be identical to the original."
print "As a further step a dictionary, mapping voxel to fibers, is built"
print "and some examples using it are shown."
# filename = "dsi.trk"
# filename = "dti.trk"
filename = "hardiO10.trk"
print
print "file:", filename
f = open(filename)
header = read_header(f)
print_header(header)
fiber = read_fibers(f, header)
f.close()
print
fiber_id = 1000
print "Example: fiber_id=",fiber_id
print fiber[fiber_id]
print "Convert points from mm to voxel coordinates:"
Vxyz = mm2voxel(fiber[fiber_id][0], header)
print Vxyz
print "Convert back and check whether differences are less than grid size...",
assert(((voxel2mm(Vxyz, header)-fiber[fiber_id][0])<header['voxel_size']).all())
print "OK."
print
filename2 = filename+"_COPY.trk"
print "Saving to:", filename2
f = open(filename2,'w')
write_header(f, header)
write_fibers(f, fiber, header)
f.close()
print
print "Building voxel2fibers dictionary:"
voxel2fibers = build_voxel_fibers_dict(fiber, header)
voxel = tuple(header['dim'] / 2)
print "Example: fibers crossing voxel", voxel
try:
print voxel2fibers[voxel]
except KeyError:
print []
print "There are no fibers crossing this voxel."
pass
print
x = header['dim'][0] / 2
print "Example: counting fibers crossing plane x =", x
counter = 0
for y in range(header['dim'][1]):
for z in range(header['dim'][2]):
try:
counter += voxel2fibers[(x,y,z)].size
except KeyError:
pass
pass
pass
print "Number of fibers:", counter
print
fiber_id = 2000
print "Which fibers cross (the voxels of) fiber[fiber_id=",fiber_id,"] ?"
xyz = fiber[fiber_id][0]
ijk = mm2voxel(xyz, header)
fiber_id_list = N.unique(N.hstack([voxel2fibers[i,j,k] for i,j,k in ijk]))
print fiber_id_list
print fiber_id_list.size, "fibers."
print
print "Saving .trk file with just the previous list of fibers."
filename3 = filename+'_cross_fiber_id_'+str(fiber_id)+'.trk'
print "Saving to:", filename3
import copy
fiber2 = [fiber[fiber_id] for fiber_id in fiber_id_list]
header2 = copy.deepcopy(header)
header2['n_count'] = N.array([fiber_id_list.size])
f = open(filename3, 'w')
write_header(f, header2)
write_fibers(f, fiber2, header2)
f.close()
|
[
"emanuele@relativita.com"
] |
emanuele@relativita.com
|
820ed298b2d0d51b64a647c759fec6a4a95c79e1
|
0c4b33d04cf7fb73b3752b03af89eeaf76b8a0d2
|
/第14章-网络编程/client.py
|
93a57207689113ca5cbd684fb77a81dba69d2db4
|
[] |
no_license
|
kingflyfly/python_study
|
3b3ab427d23174b61b8f14c223059cfa9f303219
|
8a63a7c11b408bbc11a2b636517beaa424b37725
|
refs/heads/master
| 2020-06-11T01:39:52.655730
| 2020-03-24T16:09:39
| 2020-03-24T16:09:39
| 193,817,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
import socket
import sys
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
port = 9992
# 连接服务,指定主机和端口
s.connect((host, port))
# 接收小于 1024 字节的数据
msg = s.recv(1024)
s.close()
print (msg.decode('utf-8'))
|
[
"542001608@qq.com"
] |
542001608@qq.com
|
3e4c84c039144eaf018a0dbe4dfa92d68101bbe8
|
f745231568d2f15c75a82638ffa4fd86c5b682ea
|
/assignment_4/shapes.py
|
2856f2733a4a28e8f7ad12c5634c596f054b3aef
|
[
"WTFPL"
] |
permissive
|
gauravjuvekar/ppl
|
c53dccd274e93207f543afc8ded787cff9319085
|
fc5592623fa294c18a6e24444b9e06e2a08b2f6c
|
refs/heads/master
| 2016-09-12T21:31:07.960658
| 2016-04-26T07:16:00
| 2016-04-26T07:16:00
| 57,103,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,886
|
py
|
#!/usr/bin/env python3
import math
class Shape(object):
def __init__(self, turtle=None):
self.turtle = turtle
class Polygon(Shape):
def __init__(self, points, turtle=None):
Shape.__init__(self, turtle)
self.points = points
def draw(self, turtle=None):
if turtle is None:
turtle = self.turtle
turtle.penup()
pos = turtle.pos()
relative = lambda x, y: (pos[0] + x, pos[1] + y)
turtle.goto(relative(*(self.points[-1])))
turtle.pendown()
for point in self.points:
turtle.goto(relative(*point))
turtle.penup()
turtle.goto(pos)
turtle.pendown()
def transform(self, matrix):
if not (len(matrix) == 2 and
(len(matrix[0]) == len(matrix[1]) == 2)):
raise ValueError("Transformation matrix must be order 2 square")
apply = lambda point, matrix: (
(point[0] * matrix[0][0]) + (point[1] * matrix[0][1]),
(point[0] * matrix[1][0]) + (point[1] * matrix[1][1]))
self.points = [apply(point, matrix) for point in self.points]
class RegularPolygon(Polygon):
def __init__(self, sides, radius, turtle=None):
step_angle = 360 / sides
points = []
angle = 0
while angle < 360:
points.append((
radius * math.cos(math.radians(angle)),
radius * math.sin(math.radians(angle))))
angle += step_angle
Polygon.__init__(self, points, turtle)
class Ellipse(RegularPolygon):
def __init__(self, rad_x, rad_y, turtle=None):
sides = max((rad_x, rad_y))
RegularPolygon.__init__(self, sides, min((rad_x, rad_y)), turtle)
if rad_x < rad_y:
self.transform(((1, 0), (0, rad_y / rad_x)))
else:
self.transform(((rad_x / rad_y, 0), (0, 1)))
|
[
"gauravjuvekar@gmail.com"
] |
gauravjuvekar@gmail.com
|
980bdcafecbd81a687de64b1aa498e359f541eb6
|
6a90c88cd3898a0936f83c7d2a8f713943d440db
|
/POSTagging/rnn_easy.py
|
245cb834e3e35727bcafae2471703f73190745f7
|
[
"Apache-2.0"
] |
permissive
|
LindgeW/POS-Tagging
|
3be4bc5da30444b22722a15e3e39350231d42c76
|
358570047e8ad8403bcab4a1e9e3b082b9bea5fc
|
refs/heads/master
| 2022-02-17T23:21:58.504742
| 2019-07-25T09:11:03
| 2019-07-25T09:11:03
| 186,325,676
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,426
|
py
|
import torch
import torch.nn as nn
'''
LSTMCell
输入: input, (h_0, c_0)
input (seq_len, batch, input_size): 包含输入序列特征的Tensor。也可以是packed variable
h_0 (batch, hidden_size): 保存着batch中每个元素的初始化隐状态的Tensor
c_0 (batch, hidden_size): 保存着batch中每个元素的初始化细胞状态的Tensor
输出:h_1, c_1
h_1 (batch, hidden_size): 下一个时刻的隐状态。
c_1 (batch, hidden_size): 下一个时刻的细胞状态。
LSTM
输入: input, (h_0, c_0)
input (seq_len, batch, input_size): 包含输入序列特征的Tensor。也可以是packed variable ,详见 [pack_padded_sequence](#torch.nn.utils.rnn.pack_padded_sequence(input, lengths, batch_first=False[source])
h_0 (num_layers * num_directions, batch, hidden_size):保存着batch中每个元素的初始化隐状态的Tensor
c_0 (num_layers * num_directions, batch, hidden_size): 保存着batch中每个元素的初始化细胞状态的Tensor
输出: output, (h_n, c_n)
output (seq_len, batch, hidden_size * num_directions): 保存RNN最后一层的输出的Tensor。 如果输入是torch.nn.utils.rnn.PackedSequence,那么输出也是torch.nn.utils.rnn.PackedSequence。
h_n (num_layers * num_directions, batch, hidden_size): Tensor,保存着RNN最后一个时间步的隐状态。
c_n (num_layers * num_directions, batch, hidden_size): Tensor,保存着RNN最后一个时间步的细胞状态。
'''
class RNNEncoder(nn.Module):
def __init__(self, input_size=0, hidden_size=0, num_layers=1, batch_first=False, bidirectional=False, dropout=0.0, rnn_type='lstm'):
super(RNNEncoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bidirectional = bidirectional
self.dropout = dropout
self.num_directions = 2 if self.bidirectional else 1
self._rnn_types = ['RNN', 'LSTM', 'GRU']
self.rnn_type = rnn_type.upper()
assert self.rnn_type in self._rnn_types
# 获取torch.nn对象中相应的的构造函数
self._rnn_cell = getattr(nn, self.rnn_type+'Cell') # getattr获取对象的属性或者方法
# ModuleList是Module的子类,当在Module中使用它的时候,就能自动识别为子module
# 当添加 nn.ModuleList作为nn.Module对象的一个成员时(即当我们添加模块到我们的网络时),
# 所有nn.ModuleList内部的nn.Module的parameter也被添加作为我们的网络的parameter
self.fw_cells, self.bw_cells = nn.ModuleList(), nn.ModuleList()
for layer_i in range(self.num_layers):
layer_input_size = self.input_size if layer_i == 0 else self.num_directions * self.hidden_size
self.fw_cells.append(self._rnn_cell(input_size=layer_input_size, hidden_size=self.hidden_size))
if self.bidirectional:
self.bw_cells.append(self._rnn_cell(input_size=layer_input_size, hidden_size=self.hidden_size))
# self.cell = nn.LSTMCell(
# input_size=self.input_size, # 输入的特征维度
# hidden_size=self.hidden_size # 隐层的维度
# )
def init_hidden(self, batch_size=1, retain=True, device=torch.device('cpu')):
if retain: # 是否保证每轮迭代都初始化隐层
torch.manual_seed(3357)
# hidden = torch.randn(batch_size, self.hidden_size, device=device)
hidden = torch.zeros(batch_size, self.hidden_size, device=device)
if self.rnn_type == 'LSTM':
hidden = (hidden, hidden)
return hidden
def _forward_mask(self, cell, inputs, lens, init_hidden, drop_mask=None):
out_fw = []
seq_len = inputs.size(0)
hx_fw = init_hidden
assert torch.is_tensor(lens)
for xi in range(seq_len):
# print('data in device: ', inputs.device, hx_fw.device)
# print('cell: ', next(cell.parameters()).is_cuda)
hidden = cell(input=inputs[xi], hx=hx_fw)
if self.rnn_type == 'LSTM':
h_next, c_next = hidden
mask = (xi < lens).float().unsqueeze(1).expand_as(h_next)
# mask = torch.tensor((xi < lens), dtype=torch.float, device=inputs.device).unsqueeze(1).expand_as(h_next)
h_next = h_next * mask + init_hidden[0] * (1 - mask)
c_next = c_next * mask + init_hidden[1] * (1 - mask)
out_fw.append(h_next)
if drop_mask is not None: # 循环层使用dropout
h_next = h_next * drop_mask
hx_next = (h_next, c_next)
else:
h_next = hidden
mask = (xi < lens).float().unsqueeze(1).expand_as(h_next)
# mask = torch.tensor((xi < lens), dtype=torch.float, device=inputs.device).unsqueeze(1).expand_as(h_next)
h_next = h_next * mask + init_hidden * (1 - mask)
out_fw.append(h_next)
if drop_mask is not None: # 循环层使用dropout
h_next = h_next * drop_mask
hx_next = h_next
hx_fw = hx_next
out_fw = torch.stack(tuple(out_fw), dim=0)
return out_fw, hx_fw
def _backward_mask(self, cell, inputs, lens, init_hidden, drop_mask=None):
out_bw = []
seq_len = inputs.size(0)
hx_bw = init_hidden
assert torch.is_tensor(lens)
for xi in reversed(range(seq_len)):
hidden = cell(input=inputs[xi], hx=hx_bw)
if self.rnn_type == 'LSTM':
h_next, c_next = hidden
mask = (xi < lens).float().unsqueeze(1).expand_as(h_next)
# mask = torch.tensor((xi < lens), dtype=torch.float, device=inputs.device).unsqueeze(1).expand_as(h_next)
h_next = h_next * mask + init_hidden[0] * (1 - mask)
c_next = c_next * mask + init_hidden[1] * (1 - mask)
out_bw.append(h_next)
if drop_mask is not None: # 循环层使用dropout
h_next = h_next * drop_mask
hx_next = (h_next, c_next)
else:
h_next = hidden
mask = (xi < lens).float().unsqueeze(1).expand_as(h_next)
# mask = torch.tensor((xi < lens), dtype=torch.float, device=inputs.device).unsqueeze(1).expand_as(h_next)
h_next = h_next * mask + init_hidden * (1 - mask)
out_bw.append(h_next)
if drop_mask is not None: # 循环层使用dropout
h_next = h_next * drop_mask
hx_next = h_next
hx_bw = hx_next
out_bw.reverse()
out_bw = torch.stack(tuple(out_bw), dim=0)
return out_bw, hx_bw
def forward(self, inputs, seq_lens, init_hidden=None):
if self.batch_first:
inputs = inputs.transpose(0, 1)
batch_size = inputs.size(1)
if init_hidden is None:
init_hidden = self.init_hidden(batch_size, device=inputs.device)
# init_hidden = inputs.data.new(batch_size, self.hidden_size).zero_()
# if self.rnn_type == 'LSTM':
# init_hidden = (init_hidden, init_hidden)
hx = init_hidden
hn, cn = [], []
for layer in range(self.num_layers):
input_drop_mask, hidden_drop_mask = None, None
seq_len, batch_size, input_size = inputs.size()
if self.training:
# print('use dropout...')
if layer != 0:
input_drop_mask = torch.zeros(batch_size, input_size, device=inputs.device).fill_(1 - self.dropout)
# 在相同的设备上创建一个和inputs数据类型相同的tensor
# input_drop_mask = inputs.data.new(batch_size, input_size).fill_(1 - self.dropout)
input_drop_mask = torch.bernoulli(input_drop_mask)
input_drop_mask = torch.div(input_drop_mask, (1 - self.dropout))
input_drop_mask = input_drop_mask.unsqueeze(-1).expand((-1, -1, seq_len)).permute((2, 0, 1))
inputs = inputs * input_drop_mask
hidden_drop_mask = torch.zeros(batch_size, self.hidden_size, device=inputs.device).fill_(1 - self.dropout)
# hidden_drop_mask = inputs.data.new(batch_size, self.hidden_size).fill_(1 - self.dropout)
hidden_drop_mask = torch.bernoulli(hidden_drop_mask) # 以输入值为概率p输出1,(1-p)输出0
hidden_drop_mask = torch.div(hidden_drop_mask, (1 - self.dropout)) # 保证训练和预测时期望值一致
# print('data is in cuda: ', inputs.device, mask.device, hx.device, hidden_drop_mask.device)
out_fw, (hn_f, cn_f) = self._forward_mask(cell=self.fw_cells[layer], inputs=inputs, lens=seq_lens, init_hidden=hx, drop_mask=hidden_drop_mask)
# print(out_fw.shape, hn_f.shape, cn_f.shape)
out_bw, hn_b, cn_b = None, None, None
if self.bidirectional:
out_bw, (hn_b, cn_b) = self._backward_mask(cell=self.bw_cells[layer], inputs=inputs, lens=seq_lens, init_hidden=hx, drop_mask=hidden_drop_mask)
# print(out_bw.shape, hn_b.shape, cn_b.shape)
hn.append(torch.cat((hn_f, hn_b), dim=1) if self.bidirectional else hn_f)
cn.append(torch.cat((cn_f, cn_b), dim=1) if self.bidirectional else cn_f)
inputs = torch.cat((out_fw, out_bw), dim=2) if self.bidirectional else out_fw
# print('input shape:', inputs.shape) # (6, 3, 10)
hn = torch.stack(tuple(hn), dim=0)
cn = torch.stack(tuple(cn), dim=0)
output = inputs.transpose(0, 1) if self.batch_first else inputs
return output, (hn, cn)
# 默认inputs: [seq_len, batch_size, input_size]
# batch_first: [batch_size, seq_len, input_size]
# def forward(self, inputs, init_hidden=None):
# assert torch.is_tensor(inputs) and inputs.dim() == 3
#
# if self.batch_first:
# inputs = inputs.permute(1, 0, 2)
#
# batch_size = inputs.size(1)
# if init_hidden is None:
# init_hidden = self.init_hidden(batch_size)
#
# hx = init_hidden
#
# hn, cn = [], []
# for layer in range(self.num_layers):
# input_drop_mask, hidden_drop_mask = None, None
# seq_len, batch_size, input_size = inputs.size()
# if self.training:
# print('use dropout...')
# if layer != 0:
# input_drop_mask = torch.empty(batch_size, input_size).fill_(1 - self.dropout)
# input_drop_mask = torch.bernoulli(input_drop_mask)
# input_drop_mask = torch.div(input_drop_mask, (1 - self.dropout))
# input_drop_mask = input_drop_mask.unsqueeze(-1).expand((-1, -1, seq_len)).permute((2, 0, 1))
# inputs = inputs * input_drop_mask
#
# hidden_drop_mask = torch.empty(batch_size, self.hidden_size).fill_(1 - self.dropout)
# hidden_drop_mask = torch.bernoulli(hidden_drop_mask) # 以输入值为概率p输出1,(1-p)输出0
# hidden_drop_mask = torch.div(hidden_drop_mask, (1 - self.dropout)) # 保证训练和预测时期望值一致
#
# out_fw, (hn_f, cn_f) = RNNEncoder._forward(cell=self.fw_cells[layer], inputs=inputs, init_hidden=hx, drop_mask=hidden_drop_mask)
# # print(out_fw.shape, hn_f.shape, cn_f.shape)
#
# out_bw, hn_b, cn_b = None, None, None
# if self.bidirectional:
# out_bw, (hn_b, cn_b) = RNNEncoder._backward(cell=self.bw_cells[layer], inputs=inputs, init_hidden=hx, drop_mask=hidden_drop_mask)
# # print(out_bw.shape, hn_b.shape, cn_b.shape)
#
# hn.append(torch.cat((hn_f, hn_b), dim=1) if self.bidirectional else hn_f)
# cn.append(torch.cat((cn_f, cn_b), dim=1) if self.bidirectional else cn_f)
#
# inputs = torch.cat((out_fw, out_bw), dim=2) if self.bidirectional else out_fw
# # print('input shape:', inputs.shape) # (6, 3, 10)
#
# hn = torch.stack(tuple(hn), dim=0)
# cn = torch.stack(tuple(cn), dim=0)
#
# output = inputs.permute((1, 0, 2)) if self.batch_first else inputs
#
# return output, (hn, cn)
if __name__ == '__main__':
# [batch_size, seq_len, input_size]
inputs = torch.rand(3, 6, 20)
mask = torch.zeros(3, 6)
mask[0, :3] = torch.ones(3)
mask[1, :2] = torch.ones(2)
lstm = RNNEncoder(input_size=20, hidden_size=100, num_layers=3, batch_first=True, bidirectional=True, dropout=0.2)
# h0, c0 = torch.randn(3, 10), torch.randn(3, 10)
# out, (hn, cn) = lstm(inputs, (h0, c0))
out, (hn, cn) = lstm(inputs, mask)
print(out.shape) # [6, 3, 20]
print(hn.shape, cn.shape) # [2, 3, 20] [2, 3, 20]
|
[
"ncu151wlz@qq.com"
] |
ncu151wlz@qq.com
|
f8a4758b89fce2ae01dfdac0c57540060c9a0e3f
|
1f416c5f06c6ccf14e0f9778e52a2e556e6888b7
|
/Config/ssd_config.py
|
73fd796b7d333e67b702743a2931a59f96af67bd
|
[] |
no_license
|
LLLibra/yolo-v3
|
0a8961b3e7c8d099174c72685176b82c3e627f59
|
6dedf28f5b9d07cb609dc1c91119c328d02b6e17
|
refs/heads/master
| 2020-09-24T17:54:22.993738
| 2019-12-28T08:04:11
| 2019-12-28T08:04:11
| 225,812,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,108
|
py
|
# -*- coding:UTF-8 -*-
from utils.ssd_loss import *
from Model.SSD import build_SSD
extras = {
'300': [[1024,256,512],[512,128,256],[256,128,256],[256,128,256]],
'512': [],
}
mbox = {
'300': [4, 6, 6, 6, 4, 4], # 最终特征图中每个点有多少个box
'512': [],
}
##SSD 300 config
voc = {
'num_classes': 21,
'feature_maps':[38,19,10,5,3,1],
'min_dim':300,
'img_size':300,
'xywh':False,
'steps':[8,16,32,64,100,300],
'min_sizes':[30,60,111,162,216,264],
'max_sizes':[60,111,162,213,264,315],
'aspect_ratio':[[2],[2,3],[2,3],[2,3],[2],[2]],
'variance':[0.1,0.2],
'clip':True,
'name':'VOC',
}
coco = {
'num_classes': 201,
'lr_steps': (280000, 360000, 400000),
'max_iter': 400000,
'feature_maps': [38, 19, 10, 5, 3, 1],
'min_dim': 300,
'img_size':300,
'steps': [8, 16, 32, 64, 100, 300],
'min_sizes': [21, 45, 99, 153, 207, 261],
'max_sizes': [45, 99, 153, 207, 261, 315],
'aspect_ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
'variance': [0.1, 0.2],
'clip': True,
'name': 'COCO',
}
|
[
"245407754@qq.com"
] |
245407754@qq.com
|
4120b422aab2e14830f4047978f9995ac06fa5c4
|
430722ea44c3704706f506554bb3ce64a7ee6596
|
/tests/image/test_backbones.py
|
60369275557cfe450f81d029b1bc533270f6f016
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
ethanwharris/lightning-flash
|
462907c7e7dbbbf0840917ae72a67b5ddbdc7138
|
48bdfd86639aa4aad493d264cd8a6eeeb50a394f
|
refs/heads/master
| 2023-06-07T10:06:06.672478
| 2021-07-12T11:55:15
| 2021-07-12T11:55:15
| 385,288,776
| 0
| 0
|
Apache-2.0
| 2021-07-12T15:09:39
| 2021-07-12T15:09:39
| null |
UTF-8
|
Python
| false
| false
| 2,080
|
py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib.error
import pytest
from pytorch_lightning.utilities import _TORCHVISION_AVAILABLE
from flash.core.utilities.imports import _BOLTS_AVAILABLE, _TIMM_AVAILABLE
from flash.image.backbones import catch_url_error, IMAGE_CLASSIFIER_BACKBONES
@pytest.mark.parametrize(["backbone", "expected_num_features"], [
pytest.param("resnet34", 512, marks=pytest.mark.skipif(not _TORCHVISION_AVAILABLE, reason="No torchvision")),
pytest.param("mobilenetv2_100", 1280, marks=pytest.mark.skipif(not _TIMM_AVAILABLE, reason="No timm")),
pytest.param("simclr-imagenet", 2048, marks=pytest.mark.skipif(not _BOLTS_AVAILABLE, reason="No bolts")),
pytest.param("swav-imagenet", 2048, marks=pytest.mark.skipif(not _BOLTS_AVAILABLE, reason="No bolts")),
pytest.param("mobilenet_v2", 1280, marks=pytest.mark.skipif(not _TORCHVISION_AVAILABLE, reason="No torchvision")),
])
def test_image_classifier_backbones_registry(backbone, expected_num_features):
backbone_fn = IMAGE_CLASSIFIER_BACKBONES.get(backbone)
backbone_model, num_features = backbone_fn(pretrained=False)
assert backbone_model
assert num_features == expected_num_features
def test_pretrained_backbones_catch_url_error():
def raise_error_if_pretrained(pretrained=False):
if pretrained:
raise urllib.error.URLError('Test error')
with pytest.warns(UserWarning, match="Failed to download pretrained weights"):
catch_url_error(raise_error_if_pretrained)(pretrained=True)
|
[
"noreply@github.com"
] |
ethanwharris.noreply@github.com
|
7a8949c381732e07d4f492876a8541503b3d5db0
|
8f7b755a7d21332ae26a9d2e59dc0da00da8affb
|
/euler-081.py
|
ab42d76c52e73ce1d22a8a8550abf38c61c371fb
|
[] |
no_license
|
msbelal/Project-Euler
|
95204d1ea455f45a49e9ce517d427db80fe15e36
|
1eda6b8a1786f0613023193d3dcde3090edaac9a
|
refs/heads/master
| 2020-04-12T12:07:41.921989
| 2012-04-01T15:41:12
| 2012-04-01T15:41:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
from __future__ import with_statement
with open ("matrix-81.txt") as f:
lines = [ line.strip().split(",") for line in f.readlines() ]
q = {}
for i in xrange(159) :
for j in xrange(0,i+1) :
x, y = j, i - j
if (0 <= x < 80) and (0 <= y < 80) :
if x == 0 and y == 0:
q[x,y] = 0
elif x == 0 :
q[x,y] = q[x,y-1]
elif y == 0 :
q[x,y] = q[x-1,y]
else :
q[x,y] = min(q[x-1,y], q[x, y-1])
q[x,y] += int(lines[x][y])
print q[79,79]
|
[
"hughdbrown@.(none)"
] |
hughdbrown@.(none)
|
136b1182e8e9b3bb6006d82097af6a64457a1413
|
817965ef6ee70672eabedbbafe336ca07d6443ff
|
/0x0B-python-input_output/8-load_from_json_file.py
|
34f8ae593948ca8fc24e3410cf357a351c626b5f
|
[] |
no_license
|
julianfrancor/holbertonschool-higher_level_programming
|
f021086eb2a86b366c391452b13581c87587a3a8
|
bd2a291c725ba09d88e9a629d0b22cf4ed7122e7
|
refs/heads/master
| 2022-12-23T05:27:27.942300
| 2020-09-24T21:22:56
| 2020-09-24T21:22:56
| 257,935,813
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
#!/usr/bin/python3
"""
function that creates an Object from a “JSON file”
"""
import json
def load_from_json_file(filename):
"""
Args
filename: JSON file form where the string
is going to be read
json.dumps() method can convert a Python object into a JSON string.
json.dump() method can be used to write to file a JSON file directly.
can Write in an open file
json.loads() expects to get its text from a string object
json.load() expects to get the text from a file
can Read from an open file an convert
"""
with open(filename, mode="r", encoding="UTF8") as file:
return json.load(file)
|
[
"julianfrancor@gmail.com"
] |
julianfrancor@gmail.com
|
9af53ed594299e5bca7f79a0631bb772ce8737c6
|
f2f96ef63c721dbc985dae99f294aa49e7c5fe48
|
/Server/database/__init__.py
|
5825c14821c76e90269bc986588ee40a40b21363
|
[
"MIT"
] |
permissive
|
Ricky-Hao/IMPK-Server
|
6e44e7ea81563908dfad3ea6347b2ca0da6cbb0c
|
786e24269e7cc506a82ae8aa0fa0d1df8c478f51
|
refs/heads/master
| 2018-09-25T05:17:24.551553
| 2018-06-07T05:42:20
| 2018-06-07T05:42:20
| 124,077,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 62
|
py
|
from Server.database.database import Database
db = Database()
|
[
"a471558277@gmail.com"
] |
a471558277@gmail.com
|
e9f935855c936f7be736e9cada0f8dfb9d5cbf2c
|
6f444f025f27a10dd7b1bf61083ea2832ffcb196
|
/backend/location/api/v1/serializers.py
|
f4a37f977e26a6abd08e6dffcee6108c10dadd98
|
[] |
no_license
|
crowdbotics-apps/ledger-wallet-29295
|
2fe0eee9e06cb1f5c8e514ad650df8276aac789b
|
d96542a71685ce6d335882c10cf840355c8252f7
|
refs/heads/master
| 2023-06-24T00:46:30.889717
| 2021-07-30T20:37:03
| 2021-07-30T20:37:03
| 391,182,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
from rest_framework import serializers
from location.models import TaskLocation, CustomerLocation, TaskerLocation, MapLocation
class CustomerLocationSerializer(serializers.ModelSerializer):
class Meta:
model = CustomerLocation
fields = "__all__"
class MapLocationSerializer(serializers.ModelSerializer):
class Meta:
model = MapLocation
fields = "__all__"
class TaskerLocationSerializer(serializers.ModelSerializer):
class Meta:
model = TaskerLocation
fields = "__all__"
class TaskLocationSerializer(serializers.ModelSerializer):
class Meta:
model = TaskLocation
fields = "__all__"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
81f726744a38d25f6099ad36107663ac8a5d3212
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/stdlib-big-2805.py
|
b2c7ae07ef65cab60cc16a7073cc6a18c9d869b1
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,992
|
py
|
# ChocoPy library functions
def int_to_str(x: int) -> str:
digits:[str] = None
result:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str2(x: int, x2: int) -> str:
digits:[str] = None
digits2:[str] = None
result:str = ""
result2:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str3(x: int, x2: int, x3: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str4(x: int, x2: int, x3: int, x4: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str5(x: int, x2: int, x3: int, x4: int, x5: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
digits5:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
result5:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def str_to_int(x: str) -> int:
result:int = 0
digit:int = 0
char:str = ""
sign:int = 1
first_char:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int2(x: str, x2: str) -> int:
result:int = 0
result2:int = 0
digit:int = 0
digit2:int = 0
char:str = ""
char2:str = ""
sign:int = 1
sign2:int = 1
first_char:bool = True
first_char2:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int3(x: str, x2: str, x3: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
char:str = ""
char2:str = ""
char3:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int4(x: str, x2: str, x3: str, x4: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
sign4:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
first_char4:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int5(x: str, x2: str, x3: str, x4: str, x5: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
result5:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
digit5:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
char5:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
sign4:int = 1
sign5:int = 1
first_char:bool = True
first_char2:bool = True
$TypedVar = True
first_char4:bool = True
first_char5:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
# Input parameters
c:int = 42
c2:int = 42
c3:int = 42
c4:int = 42
c5:int = 42
n:int = 10
n2:int = 10
n3:int = 10
n4:int = 10
n5:int = 10
# Run [-nc, nc] with step size c
s:str = ""
s2:str = ""
s3:str = ""
s4:str = ""
s5:str = ""
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
i = -n * c
# Crunch
while i <= n * c:
s = int_to_str(i)
print(s)
i = str_to_int(s) + c
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
4fe2cf1c3b72558c9e48a68238cf6abf7425b930
|
073e5c1775886ec42ed741378e682534e79bb856
|
/kdb/MTS_patch.py
|
3ff2e1324c47467096b8afed52a839baa28a898d
|
[] |
no_license
|
tt9024/huan
|
97edd01e280651720a7556ff75dd64cc91184a04
|
48dcc7ef0ea40902e33bc67faf0298736a3ebe6b
|
refs/heads/master
| 2023-07-26T12:30:53.116852
| 2023-07-11T02:30:14
| 2023-07-11T02:30:14
| 134,997,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,463
|
py
|
import numpy as np
import repo_dbar as repo
import l1
import os
import glob
import dill
# patch vbs of various barseconds
def patch_vbs(dbar, day, utc, vbs, barsec):
bar, col, bs = dbar.load_day(day)
if bar is None or len(bar)==0:
print('problem getting bars from repo on ', day)
return
# make sure it's a multiple
bs_mul = barsec//bs
if bs_mul*bs != barsec:
print('barsec ', barsec, ' is not a multiple of repo barsec ', bs, ' on ', day)
return
utc_bs = dbar._make_daily_utc(day, barsec)
nbar = len(utc)
ix = np.clip(np.searchsorted(utc, utc_bs),0,nbar-1)
ixz = np.nonzero(utc[ix] == utc_bs)[0]
if len(ixz) == 0:
print('nothing found in repo on ', day)
return
# reuse the existing if not provided, but aggregated at barsec
#vbs_bs = np.zeros(len(utc_bs))
vbs_bs = np.sum(bar[:,repo.vbsc].reshape((len(utc_bs),bs_mul)),axis=1)
vbs_bs[ixz] = vbs[ix][ixz]
# calculate the weight to be vol within the barsec
vbs0 = bar[:,repo.volc].reshape((len(utc_bs),bs_mul))
vbs0 = (vbs0.T/np.sum(vbs0,axis=1)).T
vbs0[np.isinf(vbs0)] = 1.0/bs_mul
vbs0[np.isnan(vbs0)] = 1.0/bs_mul
vbs_bs0 = (vbs0.T*vbs_bs).T.reshape((len(utc_bs)*bs_mul,1))
# write this day back
dbar.overwrite([vbs_bs0], [day], [[repo.vbsc]], bs)
print('!!DONE ', day)
def update_array(dbar, vbs_array, barsec):
"""
vbs_array shape [nndays, 2], of utc and vbs
"""
nndays, nc = vbs_array.shape
assert nc == 2, 'vbs_array expected shape 2 (utc,vbs)'
utc=vbs_array[:,0]
vbs=vbs_array[:,1]
assert utc[1]-utc[0] == barsec, 'barsec mismatch! ' + str((utc[1]-utc[0],barsec))
start_day = l1.trd_day(vbs_array[0,0])
end_day = l1.trd_day(vbs_array[-1,0])
tdi = l1.TradingDayIterator(start_day)
day = tdi.yyyymmdd()
while day != end_day:
patch_vbs(dbar, day, utc, vbs, barsec)
tdi.next()
day = tdi.yyyymmdd()
def update_array_path(array_path='/home/bfu/kisco/kr/vbs/2021_1125_2022_0114', barsec=15, repo_path = '/home/bfu/kisco/kr/repo'):
os.system('gunzip ' + os.path.join(array_path,'*.npy.gz'))
fn = glob.glob(os.path.join(array_path, '*.npy'))
for f in fn:
print('processing ', f)
# expect file name as CL.npy
symbol = f.split('/')[-1].split('.')[0]
vsarr = np.load(open(f,'rb'))
dbar = repo.RepoDailyBar(symbol, repo_path=repo_path)
update_array(dbar, vsarr, barsec)
def update_dict(dict_file, barsec, repo_path='/home/bfu/kisco/kr/repo', symbol_list=None):
"""dict: {symbol : { 'utc': shape [ndays,2], 'vbs': shape [ndays, n] } }
where utc has each day's first/last utc
the barsec is given for verification purpose: barsec = (utc1-utc0)/n
"""
d = dill.load(open(dict_file, 'rb'))
for symbol in d.keys():
if symbol_list is not None:
if symbol not in symbol_list:
continue
utc=d[symbol]['utc']
vbs=d[symbol]['vbs']
ndays, nc = utc.shape
assert nc==2, 'utc shape not 2 for ' + symbol
print('got ',ndays,' for ', symbol)
dbar = repo.RepoDailyBar(symbol, repo_path=repo_path)
for u, v in zip(utc, vbs):
(u0,u1)=u
day = l1.trd_day(u0)
# LCO could have utc up until 18:00
# turn it on when fixed in mts_repo
#assert day == l1.trd_day(u1), 'not same trade day for %s on %d: %f-%f'%(symbol, day, u0, u1)
utc0 = np.arange(u0,u1+barsec,barsec).astype(int)
n = len(v)
assert len(utc0)==n, 'vbs shape mismatch with utc for %s on %s: %d-%d'%(symbol, day, (u1-u0)//barsec,n)
print('process %s on %s'%(symbol, day))
patch_vbs(dbar, day, utc0, v, barsec)
def update_dict_all():
# a scripted update, modify as needed
# the 2 _N1 from 20220223 to 20220415 with barsec=5
path = '/home/bfu/kisco/kr/vbs/update_0415'
dict_files = ['0223_0302_5s.dill', '0303_0415_5s.dill']
barsec=5
repo_path = '/home/bfu/kisco/kr/repo'
for df in dict_files:
update_dict(os.path.join(path, df), barsec, repo_path=repo_path)
# the _N2 from 20211125 to 20220415 with barsec=30
dict_files = ['20211125_2022_0415_N2_30s.dill']
barsec=30
repo_path = '/home/bfu/kisco/kr/repo_nc'
for df in dict_files:
update_dict(os.path.join(path, df), barsec, repo_path=repo_path)
|
[
"joy@joy.com"
] |
joy@joy.com
|
ace0c793df344ee3d16d8b97ce61547ac0670a0d
|
7accb98587c694db57507468525261458e707138
|
/fabfile.py
|
f12579758e7aff044a31e3975c7fa50ea643997a
|
[] |
no_license
|
kotechkice/kicekriea
|
47f6ce4b9fa162b3dafe8dda45c640876a3e4aeb
|
6457e97aeea13f768488287abc4a8afcf40f8131
|
refs/heads/master
| 2021-01-01T03:46:39.199835
| 2016-04-26T05:57:16
| 2016-04-26T05:57:16
| 57,111,058
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| true
| false
| 22,058
|
py
|
from __future__ import print_function, unicode_literals, with_statement
from future.builtins import input, open
import os
import re
import sys
from functools import wraps
from getpass import getpass, getuser
from glob import glob
from contextlib import contextmanager
from posixpath import join
from os.path import basename, dirname
from fabric.api import env, cd, prefix, sudo as _sudo, run as _run, hide, task
#from fabric.api import settings
from fabric.api import puts
from fabric.contrib.files import exists, upload_template
from fabric.colors import yellow, green, blue, red
from fabric.utils import warn
import pdb
###############
# Fab Command #
###############
#fab command
#fab install
################
# Config setup #
################
conf = {}
if sys.argv[0].split(os.sep)[-1] in ("fab", "fab-script.py"):
# Ensure we import settings from the current dir
try:
#conf = __import__("settings", globals(), locals(), [], 0).FABRIC
#conf = __import__("project.settings", globals(), locals(), [], 0).FABRIC
from project import settings
conf = settings.FABRIC
try:
conf["HOSTS"][0]
except (KeyError, ValueError):
raise ImportError
except (ImportError, AttributeError):
print("Aborting, no hosts defined.")
exit()
env.db_pass = conf.get("DB_PASS", None)
env.db_root_pass = env.db_pass
#env.admin_pass = conf.get("ADMIN_PASS", None)
env.user = conf.get("SSH_USER", getuser())
env.password = conf.get("SSH_PASS", None)
env.key_filename = conf.get("SSH_KEY_PATH", None)
env.hosts = conf.get("HOSTS", [""])
env.proj_name = conf.get("PROJECT_NAME", os.getcwd().split(os.sep)[-1])
env.venv_home = conf.get("VIRTUALENV_HOME", "/home/%s" % env.user)
env.venv_path = "%s/%s" % (env.venv_home, env.proj_name)
env.proj_dirname = "project"
env.proj_path = "%s/%s" % (env.venv_path, env.proj_dirname)
env.manage = "%s/bin/python %s/project/manage.py" % ((env.venv_path,) * 2)
env.domains = conf.get("DOMAINS", [conf.get("LIVE_HOSTNAME", env.hosts[0])])
env.domains_nginx = " ".join(env.domains)
env.domains_python = ", ".join(["'%s'" % s for s in env.domains])
env.ssl_disabled = "#" if len(env.domains) > 1 else ""
env.repo_url = conf.get("REPO_URL", "")
env.git = env.repo_url.startswith("git") or env.repo_url.endswith(".git")
env.reqs_path = conf.get("REQUIREMENTS_PATH", None)
env.locale = conf.get("LOCALE", "en_US.UTF-8")
env.secret_key = conf.get("SECRET_KEY", "")
env.nevercache_key = conf.get("NEVERCACHE_KEY", "")
env.django_user = conf.get("DJANGO_USER", "duser")
env.django_user_group = env.django_user
env.django_project_settings = "settings"
env.gunicorn_workers = 2
env.gunicorn_logfile = '%(venv_path)s/logs/projects/%(proj_name)s_gunicorn.log' % env
#env.rungunicorn_script = '%(venv_path)s/scripts/rungunicorn_%(proj_name)s.sh' % env
env.rungunicorn_script = '%(venv_path)s/bin/gunicorn_start' % env
env.gunicorn_worker_class = "eventlet"
env.gunicorn_loglevel = "info"
env.gunicorn_port = conf.get("GUNICORN_PORT", 8000)
env.supervisor_program_name = env.proj_name
env.supervisorctl = '/usr/bin/supervisorctl'
env.supervisor_autostart = 'true'
env.supervisor_autorestart = 'true'
env.supervisor_redirect_stderr = 'true'
env.supervisor_stdout_logfile = '%(venv_path)s/logs/projects/supervisord_%(proj_name)s.log' % env
#env.supervisord_conf_file = '%(venv_path)s/configs/supervisord/%(proj_name)s.conf' % env
env.supervisord_conf_file = '/etc/supervisor/conf.d/%(proj_name)s.conf' % env
##################
# Template setup #
##################
# Each template gets uploaded at deploy time, only if their
# contents has changed, in which case, the reload command is
# also run.
templates = {
"nginx": {
"local_path": "deploy/nginx.conf",
"remote_path": "/etc/nginx/sites-enabled/%(proj_name)s.conf",
},
"supervisor": {
"local_path": "deploy/supervisord.conf",
"remote_path": env.supervisord_conf_file,
},
"cron": {
"local_path": "deploy/crontab",
"remote_path": "/etc/cron.d/%(proj_name)s",
"owner": "root",
"mode": "600",
},
"gunicorn": {
"local_path": "deploy/gunicorn_start",
"remote_path": "%(venv_path)s/bin/gunicorn_start",
},
"settings": {
"local_path": "deploy/local_settings",
"remote_path": "%(proj_path)s/project/local_settings.py",
},
"mysql": {
"local_path": "deploy/mysql.cnf",
"remote_path": "/etc/mysql/my.cnf",
}
}
######################################
# Context for virtualenv and project #
######################################
@contextmanager
def virtualenv():
"""
Runs commands within the project's virtualenv.
"""
with cd(env.venv_path):
with prefix("source %s/bin/activate" % env.venv_path):
yield
@contextmanager
def project():
"""
Runs commands within the project's directory.
"""
with virtualenv():
with cd(env.proj_dirname):
yield
@contextmanager
def update_changed_requirements():
"""
Checks for changes in the requirements file across an update,
and gets new requirements if changes have occurred.
"""
reqs_path = join(env.proj_path, env.reqs_path)
get_reqs = lambda: run("cat %s" % reqs_path, show=False)
old_reqs = get_reqs() if env.reqs_path else ""
yield
if old_reqs:
new_reqs = get_reqs()
if old_reqs == new_reqs:
# Unpinned requirements should always be checked.
for req in new_reqs.split("\n"):
if req.startswith("-e"):
if "@" not in req:
# Editable requirement without pinned commit.
break
elif req.strip() and not req.startswith("#"):
if not set(">=<") & set(req):
# PyPI requirement without version.
break
else:
# All requirements are pinned.
return
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
###########################################
# Utils and wrappers for various commands #
###########################################
def _print(output):
print()
print(output)
print()
def print_command(command):
_print(blue("$ ", bold=True) +
yellow(command, bold=True) +
red(" ->", bold=True))
@task
def run(command, show=True):
"""
Runs a shell comand on the remote server.
"""
if show:
print_command(command)
with hide("running"):
return _run(command)
@task
def sudo(command, show=True):
"""
Runs a command as sudo.
"""
if show:
print_command(command)
with hide("running"):
return _sudo(command)
def log_call(func):
@wraps(func)
def logged(*args, **kawrgs):
header = "-" * len(func.__name__)
_print(green("\n".join([header, func.__name__, header]), bold=True))
return func(*args, **kawrgs)
return logged
def get_templates():
"""
Returns each of the templates with env vars injected.
"""
injected = {}
for name, data in templates.items():
injected[name] = dict([(k, v % env) for k, v in data.items()])
return injected
def upload_template_and_reload(name):
"""
Uploads a template only if it has changed, and if so, reload a
related service.
"""
template = get_templates()[name]
local_path = template["local_path"]
if not os.path.exists(local_path):
project_root = os.path.dirname(os.path.abspath(__file__))
local_path = os.path.join(project_root, local_path)
remote_path = template["remote_path"]
reload_command = template.get("reload_command")
owner = template.get("owner")
mode = template.get("mode")
remote_data = ""
if exists(remote_path):
with hide("stdout"):
remote_data = sudo("cat %s" % remote_path, show=False)
with open(local_path, "r") as f:
local_data = f.read()
# Escape all non-string-formatting-placeholder occurrences of '%':
local_data = re.sub(r"%(?!\(\w+\)s)", "%%", local_data)
if "%(db_pass)s" in local_data:
env.db_pass = db_pass()
local_data %= env
clean = lambda s: s.replace("\n", "").replace("\r", "").strip()
if clean(remote_data) == clean(local_data):
return
upload_template(local_path, remote_path, env, use_sudo=True, backup=False)
if owner:
sudo("chown %s %s" % (owner, remote_path))
if mode:
sudo("chmod %s %s" % (mode, remote_path))
if reload_command:
sudo(reload_command)
def db_pass():
"""
Prompts for the database password if unknown.
"""
if not env.db_pass:
env.db_pass = getpass("Enter the database password: ")
return env.db_pass
@task
def apt(packages):
"""
Installs one or more system packages via apt.
"""
return sudo("apt-get install -y -q " + packages)
@task
def pip(packages):
"""
Installs one or more Python packages within the virtual environment.
"""
with virtualenv():
return sudo("pip install %s" % packages)
def postgres(command):
"""
Runs the given command as the postgres user.
"""
show = not command.startswith("psql")
return run("sudo -u root sudo -u postgres %s" % command, show=show)
@task
def psql(sql, show=True):
"""
Runs SQL against the project's database.
"""
out = postgres('psql -c "%s"' % sql)
if show:
print_command(sql)
return out
@task
def backup(filename):
"""
Backs up the database.
"""
return postgres("pg_dump -Fc %s > %s" % (env.proj_name, filename))
@task
def restore(filename):
"""
Restores the database.
"""
return postgres("pg_restore -c -d %s %s" % (env.proj_name, filename))
@task
def python(code, show=True):
"""
Runs Python code in the project's virtual environment, with Django loaded.
"""
#pdb.set_trace()
setup = "import os; os.environ[\'DJANGO_SETTINGS_MODULE\']=\'settings\';"
full_code = 'python -c "%s%s"' % (setup, code.replace("`", "\\\`"))
with project():
result = run(full_code, show=False)
if show:
print_command(code)
return result
def static():
"""
Returns the live STATIC_ROOT directory.
"""
return python("from django.conf import settings;"
"print settings.STATIC_ROOT", show=False).split("\n")[-1]
@task
def manage(command):
"""
Runs a Django management command.
"""
return run("%s %s" % (env.manage, command))
#########################
# Install and configure #
#########################
@task
@log_call
def all():
"""
Installs everything required on a new system and deploy.
From the base software, up to the deployed project.
"""
install()
create_virtualenv()
create_SSH()
create_git()
#create_DB()
set_SSL()
create_django_user()
set_password_django_user()
upload_rungunicorn_script()
upload_supervisord_conf()
create_nginx()
set_project()
@task
@log_call
def install():
"""
Installs the base system and Python requirements for the entire server.
"""
#locale = "LC_ALL=%s" % env.locale
#with hide("stdout"):
# if locale not in sudo("cat /etc/default/locale"):
# sudo("update-locale %s" % locale)
# run("exit")
sudo("apt-get update -y -q")
apt("nginx libjpeg-dev python-dev python-setuptools git-core "
"libpq-dev memcached supervisor")
#apt("mysql-server mysql-client")
apt("openssh-server libev-dev python-all-dev build-essential")
apt("debconf-utils")
sudo("easy_install pip")
#sudo("pip install virtualenv mercurial")
apt("python-virtualenv virtualenvwrapper")
#sudo("apt-get install -y python-virtualenv virtualenvwrapper")
@task
@log_call
def create_virtualenv():
"""
Create a new virtual environment & git.
"""
#pdb.set_trace()
if not exists(env.venv_home):
run("mkdir %s" % env.venv_home)
with cd(env.venv_home):
if exists(env.proj_name):
prompt = input("\nVirtualenv exists: %s"
"\nWould you like to replace it? (yes/no) "
% env.proj_name)
if prompt.lower() != "yes":
print("\nAborting!")
return False
remove()
run("export WORKON_HOME=$HOME/.virtualenvs")
run("export PIP_VIRTUALENV_BASE=$WORKON_HOME")
run("source /usr/share/virtualenvwrapper/virtualenvwrapper.sh && mkvirtualenv %s"% env.proj_name)
@task
@log_call
def create_SSH():
"""
Create a new ssh key.
"""
#pdb.set_trace()
ssh_path = "/home/%s/.ssh" % env.user
if not exists(ssh_path):
run("mkdir %s" % env.ssh_path)
pub_path = ssh_path+"/id_rsa.pub"
with cd(ssh_path):
if not exists(pub_path):
run('ssh-keygen -t rsa')
run("cat %s"% pub_path)
input("\nSet SSH & Press Enter!")
@task
@log_call
def create_git():
"""
Create a new git.
"""
if not exists(env.venv_path):
print("\nVirtual env path isn't exists!")
return False
run("git clone %s %s" % (env.repo_url, env.proj_path))
def mysql_execute(sql, user, password):
""" Executes passed sql command using mysql shell. """
#user = user or env.conf.DB_USER
from fabric.api import prompt
sql = sql.replace('"', r'\"')
#if password == None:
# password = prompt('Please enter MySQL root password:')
return run('echo "%s" | mysql --user="%s" --password="%s"' % (sql, user , password))
@task
@log_call
def create_DB():
"""
Create DB and DB user.
"""
from fabric.api import settings, prompt
with settings(hide('warnings', 'stderr'), warn_only=True):
result = sudo('dpkg-query --show mysql-server')
if result.failed is False:
warn('MySQL is already installed')
else:
#sudo('echo "mysql-server-5.0 mysql-server/root_password password %s" | debconf-set-selections' % env.db_root_pass)
#sudo('echo "mysql-server-5.0 mysql-server/root_password_again password %s" | debconf-set-selections' % env.db_root_pass)
run('echo "mysql-server-5.0 mysql-server/root_password password %s" | sudo debconf-set-selections' % env.db_root_pass)
run('echo "mysql-server-5.0 mysql-server/root_password_again password %s" | sudo debconf-set-selections' % env.db_root_pass)
apt('mysql-server mysql-client')
upload_template_and_reload("mysql")
sql = 'CREATE DATABASE %(proj_name)s DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci' % env
mysql_execute(sql, 'root', env.db_root_pass)
sql = """CREATE USER '%(proj_name)s'@'%%' IDENTIFIED BY '%(db_pass)s';""" % env
#sql = """CREATE USER '%(proj_name)s'@'localhost' IDENTIFIED BY '%(db_pass)s';""" % env
mysql_execute(sql, 'root', env.db_root_pass)
sql = """GRANT ALL ON %(proj_name)s.* TO '%(proj_name)s'@'%%'; FLUSH PRIVILEGES;""" % env
#sql = """GRANT ALL ON %(proj_name)s.* TO '%(proj_name)s'@'localhost'; FLUSH PRIVILEGES;""" % env
mysql_execute(sql, 'root', env.db_root_pass)
sudo('service mysql restart')
@task
@log_call
def remove_DB():
"""
Remove DB and DB user.
"""
sql = 'DROP DATABASE %(proj_name)s' % env
mysql_execute(sql, 'root', env.db_root_pass)
sql = """DROP USER '%(proj_name)s';""" % env
mysql_execute(sql, 'root', env.db_root_pass)
sudo("service mysql stop")
sudo("apt-get remove -y --purge mysql-server mysql-client")
#sudo("netstat -tap | grep mysql")
sudo("apt-get remove -y --purge mysql-server*")
sudo("apt-get remove -y --purge mysql-client*")
@task
@log_call
def set_SSL():
"""
# Set up SSL certificate.
"""
if not env.ssl_disabled:
conf_path = "/etc/nginx/conf"
if not exists(conf_path):
sudo("mkdir %s" % conf_path)
with cd(conf_path):
crt_file = env.proj_name + ".crt"
key_file = env.proj_name + ".key"
if not exists(crt_file) and not exists(key_file):
try:
crt_local, = glob(join("deploy", "*.crt"))
key_local, = glob(join("deploy", "*.key"))
except ValueError:
parts = (crt_file, key_file, env.domains[0])
sudo("openssl req -new -x509 -nodes -out %s -keyout %s "
"-subj '/CN=%s' -days 3650" % parts)
else:
upload_template(crt_local, crt_file, use_sudo=True)
upload_template(key_local, key_file, use_sudo=True)
@task
@log_call
def migrate():
"""
migrate.
"""
manage('migrate')
@task
@log_call
def set_project():
"""
Set up project.
"""
upload_template_and_reload("settings")
with project():
if env.reqs_path:
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
apt('libmysqlclient-dev')
pip("fabric django python-social-auth "
"gunicorn django-hosts mysql-python django-crontab pytz django-dbbackup")
manage('migrate')
manage('createsuperuser')
@task
@log_call
def create_django_user():
"""
create django user
"""
sudo('groupadd --system %(django_user)s' % env)
sudo('useradd --system --gid %(django_user)s --home %(venv_path)s %(django_user)s' % env)
sudo('chown -R %(django_user)s:%(django_user)s %(venv_path)s' % env)
sudo('chmod -R g+w %(venv_path)s' % env)
sudo('usermod -a -G %(django_user)s %(user)s' % env)
@task
@log_call
def set_password_django_user():
"""
set password django user
"""
sudo('passwd %(django_user)s' % env)
@task
@log_call
def upload_rungunicorn_script():
"""
upload rungunicorn conf
"""
sudo('mkdir -p %s' % dirname(env.gunicorn_logfile))
sudo('chown %s %s' % (env.django_user, dirname(env.gunicorn_logfile)))
sudo('chmod -R 775 %s' % dirname(env.gunicorn_logfile))
sudo('touch %s' % env.gunicorn_logfile)
sudo('chown %s %s' % (env.django_user, env.gunicorn_logfile))
sudo('mkdir -p %s' % dirname(env.rungunicorn_script))
upload_template_and_reload("gunicorn")
sudo('chmod u+x %s' % env.rungunicorn_script)
sudo('chown -R %(django_user)s:%(django_user)s %(rungunicorn_script)s' % env)
@task
@log_call
def upload_supervisord_conf():
''' upload supervisor conf '''
sudo('mkdir -p %s' % dirname(env.supervisor_stdout_logfile))
sudo('chown %s %s' % (env.django_user, dirname(env.supervisor_stdout_logfile)))
sudo('chmod -R 775 %s' % dirname(env.supervisor_stdout_logfile))
sudo('touch %s' % env.supervisor_stdout_logfile)
sudo('chown %s %s' % (env.django_user, env.supervisor_stdout_logfile))
sudo('mkdir -p %s' % dirname(env.supervisord_conf_file))
upload_template_and_reload("supervisor")
sudo('%(supervisorctl)s reread' % env)
sudo('%(supervisorctl)s update' % env)
@task
@log_call
def create_nginx():
'''
create nginx
'''
upload_template_and_reload("nginx")
sudo('unlink /etc/nginx/sites-enabled/default')
sudo("service nginx restart")
@task
@log_call
def restart():
"""
Restart gunicorn worker processes for the project.
"""
pid_path = "%s/gunicorn.pid" % env.proj_path
if exists(pid_path):
#sudo("kill -HUP `cat %s`" % pid_path)
#$sudo("kill -HUP $(cat %s)" % pid_path)
run("cat %s" % pid_path)
prompt = input("\npid number(upper number) : ")
sudo("kill -HUP %s" % prompt)
else:
start_args = (env.proj_name, env.proj_name)
sudo("supervisorctl start %s:gunicorn_%s" % start_args)
##########
# Deploy #
##########
@task
@log_call
def pull_git():
"""
run git pull
"""
with cd(env.proj_path):
run("git pull")
@task
@log_call
def collectstatic():
"""
collect static for mangae django
"""
manage('collectstatic')
@task
@log_call
def restart_supervisor():
"""
restart supervisor
"""
sudo("supervisorctl restart %(proj_name)s" % env)
@task
@log_call
def upload_local_settings():
"""
upload_local_settings
"""
upload_template_and_reload("settings")
@task
@log_call
def upload_nginx():
'''
create nginx
'''
upload_template_and_reload("nginx")
sudo("service nginx restart")
@task
@log_call
def deploy():
"""
Deploy latest version of the project.
Check out the latest version of the project from version
control, install new requirements, sync and migrate the database,
collect any new static assets, and restart gunicorn's work
processes for the project.
"""
for name in get_templates():
upload_template_and_reload(name)
with project():
#backup("last.db")
#static_dir = static()
#if exists(static_dir):
# run("tar -cf last.tar %s" % static_dir)
git = env.git
last_commit = "git rev-parse HEAD"
run("%s > last.commit" % last_commit)
with update_changed_requirements():
run("git pull origin master -f")
#manage("collectstatic -v 0 --noinput")
#manage("syncdb --noinput")
#manage("migrate --noinput")
restart()
return True
@task
@log_call
def remove():
"""
Blow away the current project.
"""
if exists(env.venv_path):
sudo("rm -rf %s" % env.venv_path)
#for template in get_templates().values():
# remote_path = template["remote_path"]
# if exists(remote_path):
# sudo("rm %s" % remote_path)
#psql("DROP DATABASE IF EXISTS %s;" % env.proj_name)
#psql("DROP USER IF EXISTS %s;" % env.proj_name)
|
[
"wogud86@gmail.com"
] |
wogud86@gmail.com
|
73d6c871a33247c5a769ff502a2741d904f94c16
|
8813753442439c5408db80ed07a97f54ee90a115
|
/check_memavail.py
|
3a7da71db15642d156598096f164d05ee7b87032
|
[
"Unlicense"
] |
permissive
|
Rattkener/Check_MemAvail
|
7a0e801e01ca9aa4677a9e9646b36c30881902da
|
e963636d7421533d0d0019c98805bfd810262af3
|
refs/heads/master
| 2021-06-20T14:16:11.478011
| 2021-05-06T20:37:21
| 2021-05-06T20:37:21
| 206,613,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,867
|
py
|
#!/usr/bin/env python
#import#
import paramiko
import argparse
#end import#
parser = argparse.ArgumentParser(
description='Remote Memory check for Linux servers. Intended for use on OpsView/Nagios monitoring systems.',
usage = '%(prog)s -n [--hostname] HOSTNAME -w [--warning] warning%% -c [--critical] critical%% -m [--metric] {commit,consumed,swap,hybrid} -v [--verbose] -s [--swap] swap_limit%%',
)
### define arguments to be used. secondary metric will be the only non-required metric for now given the progression of the script.
parser.add_argument("-n","--hostname", type=str, required=True, help='hostname which check should run against. Assumes passwordless access')
parser.add_argument("-w","--warning", type=int, required=False, default=85, help='Warning alert threshold in percent, defaults to 85')
parser.add_argument("-c","--critical", type=int, required=False, default=95, help='Critical alert thresehold in percent, defaults to 95')
parser.add_argument("-m","--metric", type=str, required=True, choices=('commit','consumed','swap','hybrid'), help='Select alert metric. If Hybrid you should supply \'-s\' otherwise default is 85%%')
parser.add_argument("-v","--verbose", action='store_true', help='Display more memory stats used in determining alert status.')
parser.add_argument("-s","--swap", type=int, required=False, default=85, help='Value that is only used in Hybrid mode. Percentage of swap used to trigger hybrid alert defaults to 85')
### define argument catchall for future use
args = parser.parse_args()
### Ensure that Critical is greater than Warning
if args.warning > args.critical:
parser.error("Warning threshold is higher than Critical threshold!")
### predefine metrics array
a = {}
####Paramiko SSH & SFTP link to target host####
tgt_client = paramiko.SSHClient() # create paramiko client
#tgt_client.load_system_host_keys() # load system host keys to allow recognition of known hosts
tgt_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # allow paramiko to add hosts. This opens the script up to man in the middle attacks but may be necessary for our enviornment.
tgt_client.connect(args.hostname, username='root') # open connection to host to allow SFTP link
tgt_sftp = tgt_client.open_sftp() # define SFTP method
meminfo = tgt_sftp.open('/proc/meminfo') # method for grabbing mem info
low_Watermark = int(tgt_sftp.open('/proc/sys/vm/min_free_kbytes').readline().strip()) # grab absolute minimum amount of memory system can run on
try:
for entry in map( lambda x: x.strip().split( 'kB' )[0].strip(), meminfo.readlines()):
a[ entry.split( ':' )[0].strip() ] = int( entry.split( ':' )[1].split( 'kB' )[0].strip() )
finally:
#close files we're working with. Don't trust garbage collectors
meminfo.close()
tgt_client.close()
### define metrics that aren't available on all systems ###
if 'MemAvailable' in a: #define what "memory available" looks like. Older OS's do not calculate this in /proc/meminfo
memAvail = a['MemAvailable'] # But if they do why not use it?
else:
memAvail = a['MemFree'] - low_Watermark + (a['Cached'] - min(a['Cached'] / 2, low_Watermark)) #and if they don't then we'll make our own. https://github.com/torvalds/linux/blob/master/mm/page_alloc.c#L5089
### set testing metrics ###
total = a['MemTotal'] # Set memory total
commit = a['Committed_AS'] # Define the current system committed memory. This is NOT memory in use, just committed
pressure = ((commit * 100.0) / total)
ptotal_used = (100.0 - (memAvail * 100.0 / total) )
pswap = (100.0 - (a['SwapFree'] * 100.0 / a['SwapTotal']))
### High verbosity output ###
if args.verbose:
print("Memory Available: " + str(memAvail) + " kb")
print("Lower Watermark: " + str(low_Watermark) + " kb")
print("Total Memory: " + str(total) + " kb")
print("Total Commit: " + str(commit) + " kb")
print("Total Memory Used: %.2f%%" % ptotal_used)
print("Swap Used: %.2f%%" % pswap)
### Alert logic based on primary metric. Start with highest check first
if args.metric == "commit":
if pressure >= args.critical:
print('CRITICAL - Commit: {0:.2f}'.format(pressure,))
exit(2)
elif pressure >= args.warning:
print('WARNING - Commit: {0:.2f}'.format(pressure,))
exit(1)
else:
print('OK - Commit: {0:.2f}'.format(pressure,))
exit(0)
elif args.metric == "consumed":
if ptotal_used >= args.critical:
print("CRITICAL - UsedMemory: {0:.2f}".format( ptotal_used, ) )
exit(2)
elif ptotal_used >= args.warning:
print("WARNING - UsedMemory: {0:.2f}".format( ptotal_used, ) )
exit(1)
else:
print("OK - UsedMemory: {0:.2f}".format( ptotal_used, ) )
exit(0)
elif args.metric == "swap":
if pswap >= args.critical:
print("CRITICAL - SwapUsed: {0:.2f}".format( pswap, ) )
exit(2)
elif pswap >= args.warning:
print("WARNING - SwapUsed: {0:.2f}".format( pswap, ) )
exit(1)
else:
print("OK - SwapUsed: {0:.2f}".format( pswap, ) )
exit(0)
elif args.metric == "hybrid":
if ptotal_used >= args.critical:
if pswap >= args.swap:
print("CRITICAL - UsedMemory: {0:.2f} -- UsedSwap: {1:.2f}".format( ptotal_used, pswap ) )
exit(2)
elif ptotal_used >= args.warning:
if pswap >= args.swap:
print("WARNING - UsedMemory: {0:.2f} -- UsedSwap: {1:.2f}".format( ptotal_used, pswap ) )
exit(1)
else:
print("OK - UsedMemory: {0:.2f} -- UsedSwap: {1:.2f}".format( ptotal_used, pswap ) )
exit(0)
|
[
"29561978+ProficientPanda@users.noreply.github.com"
] |
29561978+ProficientPanda@users.noreply.github.com
|
59c2ab248f18fdadf951a4ecbbc12b55c6db470a
|
8e050e70e597102ccfebe9dce91cf804ae73260b
|
/cdd.py
|
4b16530762ea2f40ef1414f12a4fa8da9fbb5d2a
|
[] |
no_license
|
AngeloMendes/LogDel12
|
aac23176f9bb3357f38443692285d735009b8f20
|
266c99f3185242ac8e4b6e04d1ba9d4f50ed0634
|
refs/heads/master
| 2021-03-24T23:14:16.468273
| 2020-03-15T23:49:38
| 2020-03-15T23:49:38
| 247,571,030
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,951
|
py
|
#esse codigo eh para avaliar a qtd de grupos existem e agrupar os distribuidores
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
def get_name(names, lat):
for key in names['latitude']:
if names['latitude'][key] == lat:
return names['client_name'][key]
def elbow_curve():
K_clusters = range(1, 10)
kmeans = [KMeans(n_clusters=i) for i in K_clusters]
Y_axis = df[['latitude']]
X_axis = df[['longitude']]
score = [kmeans[i].fit(Y_axis).score(Y_axis) for i in range(len(kmeans))]
# Visualize
plt.plot(K_clusters, score)
plt.xlabel('Numero de Grupos')
plt.ylabel('Score')
plt.title('Elbow Curve')
plt.show()
def cluster(df):
names = df[['client_name', 'latitude']].to_dict()
df = df.drop(['client_name', 'date'], axis=1)
kmeans = KMeans(n_clusters=5, init='k-means++')
kmeans.fit(df[df.columns[0:6]])
df['cluster_label'] = kmeans.fit_predict(df[df.columns[0:6]])
centers = kmeans.cluster_centers_
labels = kmeans.predict(df[df.columns[0:6]])
# print centers
# print labels
length = len(df)
df.plot.scatter(x='latitude', y='longitude', c=labels, s=100, cmap='viridis')
center_x = []
center_y = []
for i in centers:
center_x.append(i[4])
for i in centers:
center_y.append(i[5])
# print(center_x)
# print(center_y)
plt.scatter(center_x, center_y, c='black', s=200, alpha=0.5)
# plt.scatter(centers[5:6, 0], centers[5:6, 1], c='black', s=200, alpha=0.5)
for i in range(0, length):
plt.annotate(get_name(names, df['latitude'][i]), (df['latitude'][i], df['longitude'][i]),
horizontalalignment='right', fontsize=13, verticalalignment='bottom')
plt.title("Grupos de Bares Moema -SP")
plt.show()
if __name__ == '__main__':
df = pd.read_csv('dist.csv')
elbow_curve()
cluster(df)
|
[
"contato.angelomendes@gmail.com"
] |
contato.angelomendes@gmail.com
|
d1a7559941c43363cdb920c3cdf95dfd113e5caa
|
4ba0b403637e7aa3e18c9bafae32034e3c394fe4
|
/python/PyProfiler/profiler6/test.py
|
bfa05d76fd1e3862c546d51554804efb9d66d939
|
[] |
no_license
|
ASMlover/study
|
3767868ddae63ac996e91b73700d40595dd1450f
|
1331c8861fcefbef2813a2bdd1ee09c1f1ee46d6
|
refs/heads/master
| 2023-09-06T06:45:45.596981
| 2023-09-01T08:19:49
| 2023-09-01T08:19:49
| 7,519,677
| 23
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,620
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2023 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../")
import py_profiler as pprof
from common import test_common as _tc
def test() -> None:
pprof.start_stats()
_tc.TestEntry().run()
pprof.print_stats()
if __name__ == "__main__":
test()
|
[
"ASMlover@126.com"
] |
ASMlover@126.com
|
0572ff0cec28243d6b72452f3f61deda3e6df64b
|
5be2bbf713c09e4f03f29a1c2fd071f3a8e90b5f
|
/src/main/local.py
|
684bfa9db184d454a15afb02d248836c50bdb193
|
[
"MIT"
] |
permissive
|
amrutadharmapurikar/hmr2.0
|
47a4c5ebfb64ce6349ad4e6446d84a033c8f0e05
|
a566fe424618f4cbdefe6441f8b91c9efeaa1219
|
refs/heads/master
| 2023-06-26T08:06:10.593071
| 2021-07-12T00:58:49
| 2021-07-12T00:58:49
| 382,423,981
| 0
| 0
|
MIT
| 2021-07-02T17:54:05
| 2021-07-02T17:54:04
| null |
UTF-8
|
Python
| false
| false
| 1,265
|
py
|
import os
from datetime import datetime
from main.config import Config
from main.model import Model
class LocalConfig(Config):
ROOT_DATA_DIR = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
LOG_DIR = os.path.join(ROOT_DATA_DIR, 'logs', datetime.now().strftime("%d%m%Y-%H%M%S"))
DATA_DIR = os.path.join(ROOT_DATA_DIR, 'src', 'tests', 'files')
SMPL_DATA_DIR = os.path.join(ROOT_DATA_DIR, 'src', 'tests', 'files')
SMPL_MODEL_PATH = os.path.join(ROOT_DATA_DIR, 'models', 'neutral_smpl_coco_regressor.pkl')
SMPL_MEAN_THETA_PATH = os.path.join(ROOT_DATA_DIR, 'models', 'neutral_smpl_mean_params.h5')
CUSTOM_REGRESSOR_PATH = os.path.join(ROOT_DATA_DIR, 'src', 'tests', 'files', 'regressors')
CUSTOM_REGRESSOR_IDX = {
0: 'regressor_test.npy',
}
DATASETS = ['dataset']
SMPL_DATASETS = ['smpl']
BATCH_SIZE = 2
JOINT_TYPE = 'cocoplus'
NUM_KP2D = 19
NUM_KP3D = 14
def __init__(self):
super(LocalConfig, self).__init__()
self.SEED = 1
self.NUM_TRAINING_SAMPLES = 1
self.NUM_TRAIN_SMPL_SAMPLES = 4
self.NUM_VALIDATION_SAMPLES = 1
self.NUM_TEST_SAMPLES = 1
if __name__ == '__main__':
LocalConfig()
model = Model()
model.train()
|
[
"alessandro.russo@allianz.de"
] |
alessandro.russo@allianz.de
|
8904819a1aed374abfd0b4aa31c6e9d42770301e
|
be8190250b78ced1dbc94ae8c9774299621c3905
|
/boxtree/pyfmmlib_integration.py
|
812eae4aa4d044bb5f198e8d8b8674066014057b
|
[
"MIT"
] |
permissive
|
Dracogenius17/boxtree
|
45b03f67df5c9faaffccec25d480dde787cd15ac
|
313159f001c5d8c4bbe68de65ed1077a954ce2f9
|
refs/heads/master
| 2021-05-02T02:36:42.958288
| 2017-12-13T16:44:23
| 2017-12-13T16:44:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28,929
|
py
|
from __future__ import division
"""Integration between boxtree and pyfmmlib."""
__copyright__ = "Copyright (C) 2013 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
from pytools import memoize_method
import logging
logger = logging.getLogger(__name__)
__doc__ = """Integrates :mod:`boxtree` with
`pyfmmlib <http://pypi.python.org/pypi/pyfmmlib>`_.
"""
class FMMLibExpansionWrangler(object):
"""Implements the :class:`boxtree.fmm.ExpansionWranglerInterface`
by using pyfmmlib.
"""
# {{{ constructor
def __init__(self, tree, helmholtz_k, fmm_level_to_nterms=None, ifgrad=False,
dipole_vec=None, dipoles_already_reordered=False, nterms=None):
"""
:arg fmm_level_to_nterms: a callable that, upon being passed the tree
and the tree level as an integer, returns the value of *nterms* for the
multipole and local expansions on that level.
"""
if nterms is not None and fmm_level_to_nterms is not None:
raise TypeError("may specify either fmm_level_to_nterms or nterms, "
"but not both")
if nterms is not None:
from warnings import warn
warn("Passing nterms is deprecated. Pass fmm_level_to_nterms instead.",
DeprecationWarning, stacklevel=2)
def fmm_level_to_nterms(tree, level):
return nterms
self.tree = tree
if helmholtz_k == 0:
self.eqn_letter = "l"
self.kernel_kwargs = {}
self.rscale_factor = 1
else:
self.eqn_letter = "h"
self.kernel_kwargs = {"zk": helmholtz_k}
self.rscale_factor = abs(helmholtz_k)
self.level_nterms = np.array([
fmm_level_to_nterms(tree, lev) for lev in range(tree.nlevels)
], dtype=np.int32)
if helmholtz_k:
logger.info("expansion orders by level used in Helmholtz FMM: %s",
self.level_nterms)
self.dtype = np.complex128
self.ifgrad = ifgrad
self.dim = tree.dimensions
if dipole_vec is not None:
assert dipole_vec.shape == (self.dim, self.tree.nsources)
if not dipoles_already_reordered:
dipole_vec = self.reorder_sources(dipole_vec)
self.dipole_vec = dipole_vec.copy(order="F")
self.dp_suffix = "_dp"
else:
self.dipole_vec = None
self.dp_suffix = ""
# }}}
def level_to_rscale(self, level):
result = self.tree.root_extent * 2 ** -level * self.rscale_factor
if abs(result) > 1:
result = 1
return result
@memoize_method
def projection_quad_extra_kwargs(self, level=None, nterms=None):
if level is None and nterms is None:
raise TypeError("must pass exactly one of level or nterms")
if level is not None and nterms is not None:
raise TypeError("must pass exactly one of level or nterms")
if level is not None:
nterms = self.level_nterms[level]
common_extra_kwargs = {}
if self.dim == 3 and self.eqn_letter == "h":
nquad = max(6, int(2.5*nterms))
from pyfmmlib import legewhts
xnodes, weights = legewhts(nquad, ifwhts=1)
common_extra_kwargs = {
"xnodes": xnodes,
"wts": weights,
}
return common_extra_kwargs
# {{{ overridable target lists for the benefit of the QBX FMM
def box_target_starts(self):
return self.tree.box_target_starts
def box_target_counts_nonchild(self):
return self.tree.box_target_counts_nonchild
def targets(self):
return self.tree.targets
# }}}
# {{{ routine getters
def get_routine(self, name, suffix=""):
import pyfmmlib
return getattr(pyfmmlib, "%s%s%s" % (
self.eqn_letter,
name % self.dim,
suffix))
def get_vec_routine(self, name):
return self.get_routine(name, "_vec")
def get_translation_routine(self, name, vec_suffix="_vec"):
suffix = ""
if self.dim == 3:
suffix = "quadu"
suffix += vec_suffix
rout = self.get_routine(name, suffix)
if self.dim == 2:
def wrapper(*args, **kwargs):
# not used
kwargs.pop("level_for_projection", None)
return rout(*args, **kwargs)
else:
def wrapper(*args, **kwargs):
kwargs.pop("level_for_projection", None)
nterms2 = kwargs["nterms2"]
kwargs.update(self.projection_quad_extra_kwargs(nterms=nterms2))
val, ier = rout(*args, **kwargs)
if (ier != 0).any():
raise RuntimeError("%s failed with nonzero ier" % name)
return val
# Doesn't work in in Py2
# from functools import update_wrapper
# update_wrapper(wrapper, rout)
return wrapper
def get_direct_eval_routine(self):
if self.dim == 2:
rout = self.get_vec_routine("potgrad%ddall" + self.dp_suffix)
def wrapper(*args, **kwargs):
kwargs["ifgrad"] = self.ifgrad
kwargs["ifhess"] = False
pot, grad, hess = rout(*args, **kwargs)
if not self.ifgrad:
grad = 0
return pot, grad
# Doesn't work in in Py2
# from functools import update_wrapper
# update_wrapper(wrapper, rout)
return wrapper
elif self.dim == 3:
rout = self.get_vec_routine("potfld%ddall" + self.dp_suffix)
def wrapper(*args, **kwargs):
kwargs["iffld"] = self.ifgrad
pot, fld = rout(*args, **kwargs)
if self.ifgrad:
grad = -fld
else:
grad = 0
return pot, grad
# Doesn't work in in Py2
# from functools import update_wrapper
# update_wrapper(wrapper, rout)
return wrapper
else:
raise ValueError("unsupported dimensionality")
def get_expn_eval_routine(self, expn_kind):
name = "%%dd%seval" % expn_kind
rout = self.get_routine(name, "_vec")
if self.dim == 2:
def wrapper(*args, **kwargs):
kwargs["ifgrad"] = self.ifgrad
kwargs["ifhess"] = False
pot, grad, hess = rout(*args, **kwargs)
if not self.ifgrad:
grad = 0
return pot, grad
# Doesn't work in in Py2
# from functools import update_wrapper
# update_wrapper(wrapper, rout)
return wrapper
elif self.dim == 3:
def wrapper(*args, **kwargs):
kwargs["iffld"] = self.ifgrad
pot, fld, ier = rout(*args, **kwargs)
if (ier != 0).any():
raise RuntimeError("%s failed with nonzero ier" % name)
if self.ifgrad:
grad = -fld
else:
grad = 0
return pot, grad
# Doesn't work in in Py2
# from functools import update_wrapper
# update_wrapper(wrapper, rout)
return wrapper
else:
raise ValueError("unsupported dimensionality")
# }}}
# {{{ data vector utilities
def expansion_shape(self, nterms):
if self.dim == 2 and self.eqn_letter == "l":
return (nterms+1,)
elif self.dim == 2 and self.eqn_letter == "h":
return (2*nterms+1,)
elif self.dim == 3:
# This is the transpose of the Fortran format, to
# minimize mismatch between C and Fortran orders.
return (2*nterms+1, nterms+1,)
else:
raise ValueError("unsupported dimensionality")
def _expansions_level_starts(self, order_to_size):
result = [0]
for lev in range(self.tree.nlevels):
lev_nboxes = (
self.tree.level_start_box_nrs[lev+1]
- self.tree.level_start_box_nrs[lev])
expn_size = order_to_size(self.level_nterms[lev])
result.append(
result[-1]
+ expn_size * lev_nboxes)
return result
@memoize_method
def multipole_expansions_level_starts(self):
from pytools import product
return self._expansions_level_starts(
lambda nterms: product(self.expansion_shape(nterms)))
@memoize_method
def local_expansions_level_starts(self):
from pytools import product
return self._expansions_level_starts(
lambda nterms: product(self.expansion_shape(nterms)))
def multipole_expansions_view(self, mpole_exps, level):
box_start, box_stop = self.tree.level_start_box_nrs[level:level+2]
expn_start, expn_stop = \
self.multipole_expansions_level_starts()[level:level+2]
return (box_start,
mpole_exps[expn_start:expn_stop].reshape(
box_stop-box_start,
*self.expansion_shape(self.level_nterms[level])))
def local_expansions_view(self, local_exps, level):
box_start, box_stop = self.tree.level_start_box_nrs[level:level+2]
expn_start, expn_stop = \
self.local_expansions_level_starts()[level:level+2]
return (box_start,
local_exps[expn_start:expn_stop].reshape(
box_stop-box_start,
*self.expansion_shape(self.level_nterms[level])))
def multipole_expansion_zeros(self):
return np.zeros(
self.multipole_expansions_level_starts()[-1],
dtype=self.dtype)
def local_expansion_zeros(self):
return np.zeros(
self.local_expansions_level_starts()[-1],
dtype=self.dtype)
def output_zeros(self):
if self.ifgrad:
from pytools import make_obj_array
return make_obj_array([
np.zeros(self.tree.ntargets, self.dtype)
for i in range(1 + self.dim)])
else:
return np.zeros(self.tree.ntargets, self.dtype)
def add_potgrad_onto_output(self, output, output_slice, pot, grad):
if self.ifgrad:
output[0, output_slice] += pot
output[1:, output_slice] += grad
else:
output[output_slice] += pot
# }}}
# {{{ source/target particle wrangling
def _get_source_slice(self, ibox):
pstart = self.tree.box_source_starts[ibox]
return slice(
pstart, pstart + self.tree.box_source_counts_nonchild[ibox])
def _get_target_slice(self, ibox):
pstart = self.box_target_starts()[ibox]
return slice(
pstart, pstart + self.box_target_counts_nonchild()[ibox])
@memoize_method
def _get_single_sources_array(self):
return np.array([
self.tree.sources[idim]
for idim in range(self.dim)
], order="F")
def _get_sources(self, pslice):
return self._get_single_sources_array()[:, pslice]
@memoize_method
def _get_single_targets_array(self):
return np.array([
self.targets()[idim]
for idim in range(self.dim)
], order="F")
def _get_targets(self, pslice):
return self._get_single_targets_array()[:, pslice]
# }}}
def reorder_sources(self, source_array):
return source_array[..., self.tree.user_source_ids]
def reorder_potentials(self, potentials):
return potentials[self.tree.sorted_target_ids]
def get_source_kwargs(self, src_weights, pslice):
if self.dipole_vec is None:
return {
"charge": src_weights[pslice],
}
else:
if self.eqn_letter == "l" and self.dim == 2:
return {
"dipstr": -src_weights[pslice] * (
self.dipole_vec[0, pslice]
+ 1j * self.dipole_vec[1, pslice])
}
else:
return {
"dipstr": src_weights[pslice],
"dipvec": self.dipole_vec[:, pslice],
}
def form_multipoles(self, level_start_source_box_nrs, source_boxes, src_weights):
formmp = self.get_routine("%ddformmp" + self.dp_suffix)
mpoles = self.multipole_expansion_zeros()
for lev in range(self.tree.nlevels):
start, stop = level_start_source_box_nrs[lev:lev+2]
if start == stop:
continue
level_start_ibox, mpoles_view = self.multipole_expansions_view(
mpoles, lev)
rscale = self.level_to_rscale(lev)
for src_ibox in source_boxes[start:stop]:
pslice = self._get_source_slice(src_ibox)
if pslice.stop - pslice.start == 0:
continue
kwargs = {}
kwargs.update(self.kernel_kwargs)
kwargs.update(self.get_source_kwargs(src_weights, pslice))
ier, mpole = formmp(
rscale=rscale,
source=self._get_sources(pslice),
center=self.tree.box_centers[:, src_ibox],
nterms=self.level_nterms[lev],
**kwargs)
if ier:
raise RuntimeError("formmp failed")
mpoles_view[src_ibox-level_start_ibox] = mpole.T
return mpoles
def coarsen_multipoles(self, level_start_source_parent_box_nrs,
source_parent_boxes, mpoles):
tree = self.tree
mpmp = self.get_translation_routine("%ddmpmp")
# nlevels-1 is the last valid level index
# nlevels-2 is the last valid level that could have children
#
# 3 is the last relevant source_level.
# 2 is the last relevant target_level.
# (because no level 1 box will be well-separated from another)
for source_level in range(tree.nlevels-1, 2, -1):
target_level = source_level - 1
start, stop = level_start_source_parent_box_nrs[
target_level:target_level+2]
source_level_start_ibox, source_mpoles_view = \
self.multipole_expansions_view(mpoles, source_level)
target_level_start_ibox, target_mpoles_view = \
self.multipole_expansions_view(mpoles, target_level)
source_rscale = self.level_to_rscale(source_level)
target_rscale = self.level_to_rscale(target_level)
for ibox in source_parent_boxes[start:stop]:
parent_center = tree.box_centers[:, ibox]
for child in tree.box_child_ids[:, ibox]:
if child:
child_center = tree.box_centers[:, child]
kwargs = {}
if self.dim == 3 and self.eqn_letter == "h":
kwargs["radius"] = tree.root_extent * 2**(-target_level)
kwargs.update(self.kernel_kwargs)
new_mp = mpmp(
rscale1=source_rscale,
center1=child_center,
expn1=source_mpoles_view[
child - source_level_start_ibox].T,
rscale2=target_rscale,
center2=parent_center,
nterms2=self.level_nterms[target_level],
**kwargs)
target_mpoles_view[
ibox - target_level_start_ibox] += new_mp[..., 0].T
def eval_direct(self, target_boxes, neighbor_sources_starts,
neighbor_sources_lists, src_weights):
output = self.output_zeros()
ev = self.get_direct_eval_routine()
for itgt_box, tgt_ibox in enumerate(target_boxes):
tgt_pslice = self._get_target_slice(tgt_ibox)
if tgt_pslice.stop - tgt_pslice.start == 0:
continue
#tgt_result = np.zeros(tgt_pslice.stop - tgt_pslice.start, self.dtype)
tgt_pot_result = 0
tgt_grad_result = 0
start, end = neighbor_sources_starts[itgt_box:itgt_box+2]
for src_ibox in neighbor_sources_lists[start:end]:
src_pslice = self._get_source_slice(src_ibox)
if src_pslice.stop - src_pslice.start == 0:
continue
kwargs = {}
kwargs.update(self.kernel_kwargs)
kwargs.update(self.get_source_kwargs(src_weights, src_pslice))
tmp_pot, tmp_grad = ev(
sources=self._get_sources(src_pslice),
targets=self._get_targets(tgt_pslice),
**kwargs)
tgt_pot_result += tmp_pot
tgt_grad_result += tmp_grad
self.add_potgrad_onto_output(
output, tgt_pslice, tgt_pot_result, tgt_grad_result)
return output
def multipole_to_local(self,
level_start_target_or_target_parent_box_nrs,
target_or_target_parent_boxes,
starts, lists, mpole_exps):
tree = self.tree
local_exps = self.local_expansion_zeros()
mploc = self.get_translation_routine("%ddmploc", vec_suffix="_imany")
for lev in range(self.tree.nlevels):
lstart, lstop = level_start_target_or_target_parent_box_nrs[lev:lev+2]
if lstart == lstop:
continue
starts_on_lvl = starts[lstart:lstop+1]
source_level_start_ibox, source_mpoles_view = \
self.multipole_expansions_view(mpole_exps, lev)
target_level_start_ibox, target_local_exps_view = \
self.local_expansions_view(local_exps, lev)
ntgt_boxes = lstop-lstart
itgt_box_vec = np.arange(ntgt_boxes)
tgt_ibox_vec = target_or_target_parent_boxes[lstart:lstop]
nsrc_boxes_per_tgt_box = (
starts[lstart + itgt_box_vec+1] - starts[lstart + itgt_box_vec])
nsrc_boxes = np.sum(nsrc_boxes_per_tgt_box)
src_boxes_starts = np.empty(ntgt_boxes+1, dtype=np.int32)
src_boxes_starts[0] = 0
src_boxes_starts[1:] = np.cumsum(nsrc_boxes_per_tgt_box)
rscale = self.level_to_rscale(lev)
rscale1 = np.ones(nsrc_boxes) * rscale
rscale1_offsets = np.arange(nsrc_boxes)
kwargs = {}
if self.dim == 3 and self.eqn_letter == "h":
kwargs["radius"] = (
tree.root_extent * 2**(-lev)
* np.ones(ntgt_boxes))
rscale2 = np.ones(ntgt_boxes, np.float64) * rscale
# These get max'd/added onto: pass initialized versions.
if self.dim == 3:
ier = np.zeros(ntgt_boxes, dtype=np.int32)
kwargs["ier"] = ier
expn2 = np.zeros(
(ntgt_boxes,) + self.expansion_shape(self.level_nterms[lev]),
dtype=self.dtype)
kwargs.update(self.kernel_kwargs)
expn2 = mploc(
rscale1=rscale1,
rscale1_offsets=rscale1_offsets,
rscale1_starts=src_boxes_starts,
center1=tree.box_centers,
center1_offsets=lists,
center1_starts=starts_on_lvl,
expn1=source_mpoles_view.T,
expn1_offsets=lists - source_level_start_ibox,
expn1_starts=starts_on_lvl,
rscale2=rscale2,
# FIXME: wrong layout, will copy
center2=tree.box_centers[:, tgt_ibox_vec],
expn2=expn2.T,
nterms2=self.level_nterms[lev],
**kwargs).T
target_local_exps_view[tgt_ibox_vec - target_level_start_ibox] += expn2
return local_exps
def eval_multipoles(self, level_start_target_box_nrs, target_boxes,
sep_smaller_nonsiblings_by_level, mpole_exps):
output = self.output_zeros()
mpeval = self.get_expn_eval_routine("mp")
for isrc_level, ssn in enumerate(sep_smaller_nonsiblings_by_level):
source_level_start_ibox, source_mpoles_view = \
self.multipole_expansions_view(mpole_exps, isrc_level)
rscale = self.level_to_rscale(isrc_level)
for itgt_box, tgt_ibox in enumerate(target_boxes):
tgt_pslice = self._get_target_slice(tgt_ibox)
if tgt_pslice.stop - tgt_pslice.start == 0:
continue
tgt_pot = 0
tgt_grad = 0
start, end = ssn.starts[itgt_box:itgt_box+2]
for src_ibox in ssn.lists[start:end]:
tmp_pot, tmp_grad = mpeval(
rscale=rscale,
center=self.tree.box_centers[:, src_ibox],
expn=source_mpoles_view[
src_ibox - source_level_start_ibox].T,
ztarg=self._get_targets(tgt_pslice),
**self.kernel_kwargs)
tgt_pot = tgt_pot + tmp_pot
tgt_grad = tgt_grad + tmp_grad
self.add_potgrad_onto_output(
output, tgt_pslice, tgt_pot, tgt_grad)
return output
def form_locals(self,
level_start_target_or_target_parent_box_nrs,
target_or_target_parent_boxes, starts, lists, src_weights):
local_exps = self.local_expansion_zeros()
formta = self.get_routine("%ddformta" + self.dp_suffix)
for lev in range(self.tree.nlevels):
lev_start, lev_stop = \
level_start_target_or_target_parent_box_nrs[lev:lev+2]
if lev_start == lev_stop:
continue
target_level_start_ibox, target_local_exps_view = \
self.local_expansions_view(local_exps, lev)
rscale = self.level_to_rscale(lev)
for itgt_box, tgt_ibox in enumerate(
target_or_target_parent_boxes[lev_start:lev_stop]):
start, end = starts[lev_start+itgt_box:lev_start+itgt_box+2]
contrib = 0
for src_ibox in lists[start:end]:
src_pslice = self._get_source_slice(src_ibox)
tgt_center = self.tree.box_centers[:, tgt_ibox]
if src_pslice.stop - src_pslice.start == 0:
continue
kwargs = {}
kwargs.update(self.kernel_kwargs)
kwargs.update(self.get_source_kwargs(src_weights, src_pslice))
ier, mpole = formta(
rscale=rscale,
source=self._get_sources(src_pslice),
center=tgt_center,
nterms=self.level_nterms[lev],
**kwargs)
if ier:
raise RuntimeError("formta failed")
contrib = contrib + mpole.T
target_local_exps_view[tgt_ibox-target_level_start_ibox] = contrib
return local_exps
def refine_locals(self, level_start_target_or_target_parent_box_nrs,
target_or_target_parent_boxes, local_exps):
locloc = self.get_translation_routine("%ddlocloc")
for target_lev in range(1, self.tree.nlevels):
start, stop = level_start_target_or_target_parent_box_nrs[
target_lev:target_lev+2]
source_lev = target_lev - 1
source_level_start_ibox, source_local_exps_view = \
self.local_expansions_view(local_exps, source_lev)
target_level_start_ibox, target_local_exps_view = \
self.local_expansions_view(local_exps, target_lev)
source_rscale = self.level_to_rscale(source_lev)
target_rscale = self.level_to_rscale(target_lev)
for tgt_ibox in target_or_target_parent_boxes[start:stop]:
tgt_center = self.tree.box_centers[:, tgt_ibox]
src_ibox = self.tree.box_parent_ids[tgt_ibox]
src_center = self.tree.box_centers[:, src_ibox]
kwargs = {}
if self.dim == 3 and self.eqn_letter == "h":
kwargs["radius"] = self.tree.root_extent * 2**(-target_lev)
kwargs.update(self.kernel_kwargs)
tmp_loc_exp = locloc(
rscale1=source_rscale,
center1=src_center,
expn1=source_local_exps_view[
src_ibox - source_level_start_ibox].T,
rscale2=target_rscale,
center2=tgt_center,
nterms2=self.level_nterms[target_lev],
**kwargs)[..., 0]
target_local_exps_view[
tgt_ibox - target_level_start_ibox] += tmp_loc_exp.T
return local_exps
def eval_locals(self, level_start_target_box_nrs, target_boxes, local_exps):
output = self.output_zeros()
taeval = self.get_expn_eval_routine("ta")
for lev in range(self.tree.nlevels):
start, stop = level_start_target_box_nrs[lev:lev+2]
if start == stop:
continue
source_level_start_ibox, source_local_exps_view = \
self.local_expansions_view(local_exps, lev)
rscale = self.level_to_rscale(lev)
for tgt_ibox in target_boxes[start:stop]:
tgt_pslice = self._get_target_slice(tgt_ibox)
if tgt_pslice.stop - tgt_pslice.start == 0:
continue
tmp_pot, tmp_grad = taeval(
rscale=rscale,
center=self.tree.box_centers[:, tgt_ibox],
expn=source_local_exps_view[
tgt_ibox - source_level_start_ibox].T,
ztarg=self._get_targets(tgt_pslice),
**self.kernel_kwargs)
self.add_potgrad_onto_output(
output, tgt_pslice, tmp_pot, tmp_grad)
return output
def finalize_potentials(self, potential):
if self.eqn_letter == "l" and self.dim == 2:
scale_factor = -1/(2*np.pi)
elif self.eqn_letter == "h" and self.dim == 2:
scale_factor = 1
elif self.eqn_letter in ["l", "h"] and self.dim == 3:
scale_factor = 1/(4*np.pi)
else:
raise NotImplementedError(
"scale factor for pyfmmlib %s for %d dimensions" % (
self.eqn_letter,
self.dim))
if self.eqn_letter == "l" and self.dim == 2:
potential = potential.real
return potential * scale_factor
# vim: foldmethod=marker
|
[
"inform@tiker.net"
] |
inform@tiker.net
|
5258a863174e0b8e1845a0504b06107374ae09f5
|
357eaed9e37bc97357261098e06a1219cfba73ff
|
/fpeg_helion/wsgi.py
|
9cc2d70e23adb4bb095d5e1310b1c62a795b0075
|
[
"MIT"
] |
permissive
|
andrewschreiber/fpeg
|
7acc6d39b7eb3fb6e662a5ac205f38f5372a3680
|
16fc3afb795040aea2e41216d6a9f88cedff2ba4
|
refs/heads/master
| 2021-07-12T09:19:31.461056
| 2020-06-30T21:43:17
| 2020-06-30T21:43:17
| 30,467,410
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 937
|
py
|
import bottle
from bottle import route, request, post, template
import logging
import json
import os
logging.basicConfig()
log = logging.getLogger("fpeg")
log.setLevel(logging.DEBUG)
STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')
@route('/')
def home():
bottle.TEMPLATE_PATH.insert(0, './views')
return bottle.template('home', sent=False, body=None)
@post('/compress')
def compress():
data = request.files.get("upload")
if data and data.file:
raw = data.file.read()
filename = data.filename
log.debug("uploaded {} ({} bytes).".format(filename, len(raw)))
else:
log.error("upload failed")
@route('/static/:filename')
def serve_static(filename):
log.debug("serving static assets")
return bottle.static_file(filename, root=STATIC_ROOT)
application = bottle.app()
application.catchall = False
bottle.run(application, host='0.0.0.0', port=os.getenv('PORT', 8080))
|
[
"Andrew Stocker"
] |
Andrew Stocker
|
94d547688e8c427036b8536f3210d9fa20b16541
|
792d26133b5504fef31ab56138db28a2c7f666db
|
/LINETCR/Api/Talk.py
|
be91375f5bef0da83ce54dbdd2d4ae6fbc4df023
|
[] |
no_license
|
GieVh4/aisya
|
333f18f7806ca99d242213ef41248335ac111c4c
|
6f14e06fa7c9df13d4830a435a11c1751b230038
|
refs/heads/master
| 2020-03-07T10:17:14.854975
| 2018-04-24T07:46:47
| 2018-04-24T07:46:47
| 127,427,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,662
|
py
|
# -*- coding: utf-8 -*-
import os, sys
path = os.path.join(os.path.dirname(__file__), '../lib/')
sys.path.insert(0, path)
import requests, rsa
from thrift.transport import THttpClient
from thrift.protocol import TCompactProtocol
from curve import LineService
from curve.ttypes import *
class Talk:
client = None
auth_query_path = "/api/v4/TalkService.do";
http_query_path = "/S4";
wait_for_mobile_path = "/Q";
host = "gd2.line.naver.jp";
port = 443;
UA = "Line/2018.07421.2455.Tanduri/760.1.6 WIN10/18.2.1"
LA = "CHROMEOS 8.3.2 HELLO-WORLD 12.1.1"
authToken = None
cert = None
def __init__(self):
self.transport = THttpClient.THttpClient('https://gd2.line.naver.jp:443'+self.auth_query_path)
self.transport.setCustomHeaders({
"User-Agent" : self.UA,
"X-Line-Application" : self.LA,
})
self.transport.open()
self.protocol = TCompactProtocol.TCompactProtocol(self.transport);
self.client = LineService.Client(self.protocol)
def login(self, mail, passwd, cert=None, callback=None):
self.transport.path = self.auth_query_path
rsakey = self.client.getRSAKeyInfo(IdentityProvider.LINE)
crypt = self.__crypt(mail, passwd, rsakey)
result = self.client.loginWithIdentityCredentialForCertificate(
IdentityProvider.LINE,
rsakey.keynm,
crypt,
True,
'127.0.0.1',
'http://dg.b9dm.com/KoenoKatachi.mp4',
cert
)
if result.type == 3:
callback(result.pinCode)
header = {"X-Line-Access": result.verifier}
r = requests.get(url="https://" + self.host + self.wait_for_mobile_path, headers=header)
result = self.client.loginWithVerifierForCerificate(r.json()["result"]["verifier"])
self.transport.setCustomHeaders({
"X-Line-Application" : self.LA,
"User-Agent" : self.UA,
"X-Line-Access" : result.authToken
})
self.authToken = result.authToken
self.cert = result.certificate
self.transport.path = self.http_query_path
elif result.type == 1:
self.authToken = result.authToken
self.cert = result.certificate
self.transport.setCustomHeaders({
"X-Line-Application" : self.LA,
"User-Agent" : self.UA,
"X-Line-Access" : result.authToken
})
self.transport.path = self.http_query_path
def TokenLogin(self, authToken):
self.transport.setCustomHeaders({
"X-Line-Application" : self.LA,
"User-Agent" : self.UA,
"X-Line-Access" : authToken,
})
self.authToken = authToken
self.transport.path = self.http_query_path
def qrLogin(self, callback):
self.transport.path = self.auth_query_path
qr = self.client.getAuthQrcode(True, "Bot")
callback("Copy Kode QR nya Plak\nJangan Lama2\nBatas 1 menit:\n line://au/q/" + qr.verifier)
r = requests.get("https://" + self.host + self.wait_for_mobile_path, headers={
"X-Line-Application": self.LA,
"X-Line-Access": qr.verifier,
})
vr = r.json()["result"]["verifier"]
lr = self.client.loginWithVerifierForCerificate(vr)
self.transport.setCustomHeaders({
"X-Line-Application" : self.LA,
"User-Agent" : self.UA,
"X-Line-Access": lr.authToken
})
self.authToken = lr.authToken
self.cert = lr.certificate
self.transport.path = self.http_query_path
def __crypt(self, mail, passwd, RSA):
message = (chr(len(RSA.sessionKey)) + RSA.sessionKey +
chr(len(mail)) + mail +
chr(len(passwd)) + passwd).encode('utf-8')
pub_key = rsa.PublicKey(int(RSA.nvalue, 16), int(RSA.evalue, 16))
crypto = rsa.encrypt(message, pub_key).encode('hex')
return crypto
|
[
"noreply@github.com"
] |
GieVh4.noreply@github.com
|
0fd5b6297580acd6887f5d68daa551292c7e1c7a
|
ab3e6cee73c76e1bda1ac8d4e9cb82286de757fe
|
/190605_Day7_Class_Model_CRUD/django/crud_review/boards/migrations/0001_initial.py
|
998b0a6ce4811fcda7f8db94df926fc4d27a1226
|
[] |
no_license
|
arara90/TIL_django
|
cc961535feba95e55d531c90a5d274cb5ec5f02e
|
5aa5fcb839dceb0abc9c5b09fdcb5a478dca34f4
|
refs/heads/master
| 2020-05-27T20:15:46.663200
| 2020-03-21T06:13:40
| 2020-03-21T06:13:40
| 188,775,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 676
|
py
|
# Generated by Django 2.2.1 on 2019-06-10 05:49
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Board',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=15)),
('content', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"arara90@hotmail.com"
] |
arara90@hotmail.com
|
722bf8448ff08e49ce1034f948b5d66e67fbe025
|
9eeddfe1707dfd5a899fab157432b77e4a4892b5
|
/code/get_embeddings.py
|
eeab87a1e8ff1b27af73502273d48d2a0e725ac9
|
[] |
no_license
|
ksenia007/humor_recognition
|
f523870945480c8ba4a83a7cabb49e40da4a3073
|
2f4077ace36f1e961a30f358eb73ed21ded1ff6f
|
refs/heads/master
| 2023-02-21T01:36:31.688257
| 2021-01-22T00:35:57
| 2021-01-22T00:35:57
| 261,538,901
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,111
|
py
|
from dataset import *
from train import *
from models import *
import torch.optim as optim
import pickle
import uuid
import warnings
from helper_functions import *
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=UserWarning)
dataset_function = BasicWeighted
folder_data = 'data/training_datasets/'
datafile_opt = ['humicroedit', 'puns','oneliners', 'short']
base_file = 'output/embeddings/'
bert_model = BertModel.from_pretrained('bert-base-uncased')
bert_model = bert_model.eval()
bert_model = bert_model.cuda()
for idata, dataf in enumerate(datafile_opt):
train_set = dataset_function(filename = folder_data+dataf+'_train.csv', maxlen = 30, weight=1)
print('Work with', dataf)
results = np.zeros((len(train_set), 768))
for i in range(len(train_set)):
tokens = train_set[i][0].unsqueeze(0).cuda()
attn_mask = train_set[i][1].unsqueeze(0).cuda()
_, cls_head = bert_model(tokens, attention_mask = attn_mask)
results[i, :] = cls_head.cpu().detach()
filename = base_file+dataf+'_embeddings.npy'
np.save(filename, results)
|
[
"26440954+ksenia007@users.noreply.github.com"
] |
26440954+ksenia007@users.noreply.github.com
|
18bd370f71f589cf2bcef712de9b7795ea1f4538
|
d6a182d1ab766f47ccdfbb7862bf4cd4c1d5cf48
|
/delete.py
|
025abad5ef6777be447639a89ed1c3ee6a504fbe
|
[] |
no_license
|
mlnsvbd/CRUD_SqLite_Python
|
e7db43bf154776b92b27f5489e563f3caf968b25
|
18f88ecb036017a92ac308f6aac3df3294e5192f
|
refs/heads/master
| 2021-05-28T14:16:35.306800
| 2015-01-25T22:21:12
| 2015-01-25T22:21:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
import sqlite3 as lite
con = lite.connect('text.db')
cur = con.cursor()
sql = "DELETE FROM users WHERE id = '1'"
try:
cur.execute(sql)
con.commit()
print("Delete ok!!!")
except Exception as e:
print(e.args)
finally:
con.close()
|
[
"welser.m.r@gmail.com"
] |
welser.m.r@gmail.com
|
b08a51aeb6644672aa2d6a3f7fcc2d9b19c3f3a1
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/insights/v20210401/data_collection_rule_association.py
|
e5cc8f03d180c23ad08149bb40a76e212462e4f5
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 12,201
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = ['DataCollectionRuleAssociationArgs', 'DataCollectionRuleAssociation']
@pulumi.input_type
class DataCollectionRuleAssociationArgs:
def __init__(__self__, *,
resource_uri: pulumi.Input[str],
association_name: Optional[pulumi.Input[str]] = None,
data_collection_endpoint_id: Optional[pulumi.Input[str]] = None,
data_collection_rule_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DataCollectionRuleAssociation resource.
:param pulumi.Input[str] resource_uri: The identifier of the resource.
:param pulumi.Input[str] association_name: The name of the association. The name is case insensitive.
:param pulumi.Input[str] data_collection_endpoint_id: The resource ID of the data collection endpoint that is to be associated.
:param pulumi.Input[str] data_collection_rule_id: The resource ID of the data collection rule that is to be associated.
:param pulumi.Input[str] description: Description of the association.
"""
pulumi.set(__self__, "resource_uri", resource_uri)
if association_name is not None:
pulumi.set(__self__, "association_name", association_name)
if data_collection_endpoint_id is not None:
pulumi.set(__self__, "data_collection_endpoint_id", data_collection_endpoint_id)
if data_collection_rule_id is not None:
pulumi.set(__self__, "data_collection_rule_id", data_collection_rule_id)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter(name="resourceUri")
def resource_uri(self) -> pulumi.Input[str]:
"""
The identifier of the resource.
"""
return pulumi.get(self, "resource_uri")
@resource_uri.setter
def resource_uri(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_uri", value)
@property
@pulumi.getter(name="associationName")
def association_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the association. The name is case insensitive.
"""
return pulumi.get(self, "association_name")
@association_name.setter
def association_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "association_name", value)
@property
@pulumi.getter(name="dataCollectionEndpointId")
def data_collection_endpoint_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource ID of the data collection endpoint that is to be associated.
"""
return pulumi.get(self, "data_collection_endpoint_id")
@data_collection_endpoint_id.setter
def data_collection_endpoint_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_collection_endpoint_id", value)
@property
@pulumi.getter(name="dataCollectionRuleId")
def data_collection_rule_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource ID of the data collection rule that is to be associated.
"""
return pulumi.get(self, "data_collection_rule_id")
@data_collection_rule_id.setter
def data_collection_rule_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_collection_rule_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the association.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
class DataCollectionRuleAssociation(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
association_name: Optional[pulumi.Input[str]] = None,
data_collection_endpoint_id: Optional[pulumi.Input[str]] = None,
data_collection_rule_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Definition of generic ARM proxy resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] association_name: The name of the association. The name is case insensitive.
:param pulumi.Input[str] data_collection_endpoint_id: The resource ID of the data collection endpoint that is to be associated.
:param pulumi.Input[str] data_collection_rule_id: The resource ID of the data collection rule that is to be associated.
:param pulumi.Input[str] description: Description of the association.
:param pulumi.Input[str] resource_uri: The identifier of the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DataCollectionRuleAssociationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Definition of generic ARM proxy resource.
:param str resource_name: The name of the resource.
:param DataCollectionRuleAssociationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DataCollectionRuleAssociationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
association_name: Optional[pulumi.Input[str]] = None,
data_collection_endpoint_id: Optional[pulumi.Input[str]] = None,
data_collection_rule_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DataCollectionRuleAssociationArgs.__new__(DataCollectionRuleAssociationArgs)
__props__.__dict__["association_name"] = association_name
__props__.__dict__["data_collection_endpoint_id"] = data_collection_endpoint_id
__props__.__dict__["data_collection_rule_id"] = data_collection_rule_id
__props__.__dict__["description"] = description
if resource_uri is None and not opts.urn:
raise TypeError("Missing required property 'resource_uri'")
__props__.__dict__["resource_uri"] = resource_uri
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:insights/v20210401:DataCollectionRuleAssociation"), pulumi.Alias(type_="azure-native:insights:DataCollectionRuleAssociation"), pulumi.Alias(type_="azure-nextgen:insights:DataCollectionRuleAssociation"), pulumi.Alias(type_="azure-native:insights/v20191101preview:DataCollectionRuleAssociation"), pulumi.Alias(type_="azure-nextgen:insights/v20191101preview:DataCollectionRuleAssociation")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DataCollectionRuleAssociation, __self__).__init__(
'azure-native:insights/v20210401:DataCollectionRuleAssociation',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DataCollectionRuleAssociation':
"""
Get an existing DataCollectionRuleAssociation resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = DataCollectionRuleAssociationArgs.__new__(DataCollectionRuleAssociationArgs)
__props__.__dict__["data_collection_endpoint_id"] = None
__props__.__dict__["data_collection_rule_id"] = None
__props__.__dict__["description"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return DataCollectionRuleAssociation(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dataCollectionEndpointId")
def data_collection_endpoint_id(self) -> pulumi.Output[Optional[str]]:
"""
The resource ID of the data collection endpoint that is to be associated.
"""
return pulumi.get(self, "data_collection_endpoint_id")
@property
@pulumi.getter(name="dataCollectionRuleId")
def data_collection_rule_id(self) -> pulumi.Output[Optional[str]]:
"""
The resource ID of the data collection rule that is to be associated.
"""
return pulumi.get(self, "data_collection_rule_id")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the association.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
Resource entity tag (ETag).
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The resource provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.DataCollectionRuleAssociationProxyOnlyResourceResponseSystemData']:
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
bc0b91140f22fc81bcbba5bcd8f3452133cf725e
|
207f0427e0ffb10941db14d8de08ccbeac83dac1
|
/gmail.py
|
45dc9d762624648a1e30049e1f655efb972a3d08
|
[] |
no_license
|
appollo88/py
|
0d9182b64928bcda6be0a3a36906b6144371acd7
|
1644d3f45a9b948a76f2a08df046db05d2f329a3
|
refs/heads/master
| 2021-01-20T14:39:24.128069
| 2017-02-22T05:46:33
| 2017-02-22T05:46:33
| 82,765,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 901
|
py
|
"""import smtplib
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login("liuxun931@gmail.com", "lx061511")
msg = "YOUR MESSAGE!"
server.sendmail("liuxun931@gmail.com", "liuxun931@163.com", msg)
server.quit()
"""
# smtplib module send mail
import smtplib
TO = 'liuxun931@163.com'
SUBJECT = 'TEST MAIL'
TEXT = 'Here is a message from python.'
# Gmail Sign In
gmail_sender = 'liuxun931@gmail.com'
gmail_passwd = 'lx061511'
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login(gmail_sender, gmail_passwd)
BODY = '\r\n'.join(['To: %s' % TO,
'From: %s' % gmail_sender,
'Subject: %s' % SUBJECT,
'', TEXT])
try:
server.sendmail(gmail_sender, [TO], BODY)
print ('email sent')
except:
print ('error sending mail')
server.quit()
|
[
"noreply@github.com"
] |
appollo88.noreply@github.com
|
b9691e61dfe1e73f0cfed348461860d2ce4d6495
|
16ecabb5d9010c7fa4aebb8ab852f7c6a19193db
|
/src/0809.py
|
0ba2428a1bbf7638358e2412cd9b40399abf0b68
|
[] |
no_license
|
LeeSM0518/OpenCV-python
|
74ff0d899d291a35f9cd82d2ef37835a0c5ccdf2
|
46c234879f5d48876ca0888bdede8bfb347b7c30
|
refs/heads/master
| 2020-04-30T19:35:33.201278
| 2020-02-25T14:35:20
| 2020-02-25T14:35:20
| 177,043,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
# 0809.py
import cv2
import numpy as np
#1
src = cv2.imread('./data/momentTest.jpg')
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
ret, bImage = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)
#2
##M = cv2.moments(bImage)
M = cv2.moments(bImage, True)
for key, value in M.items():
print('{}={}'.format(key, value))
#3
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
dst = src.copy()
cv2.circle(dst, (cx, cy), 5, (0,0,255), 2)
cv2.imshow('dst', dst)
cv2.waitKey()
cv2.destroyAllWindows()
|
[
"nalsm98@naver.com"
] |
nalsm98@naver.com
|
096c2a0a7401aae836823744ed882e946775d8c3
|
74309d28c3c966ab46fe1d7bd7c6d6ca9e7009d4
|
/setup.py
|
86192f497fb7f45cf50128f2fc1870d69363a8a8
|
[
"MIT"
] |
permissive
|
seporaitis/graphqlpy
|
c476b4632c3d117a95663ee88d1710a4999f22e7
|
c16623a00a851a785eaef7b27a72c35d49b0c4a4
|
refs/heads/master
| 2023-01-05T06:52:14.647528
| 2017-09-07T20:56:48
| 2017-09-07T20:56:48
| 102,777,202
| 1
| 0
|
MIT
| 2022-12-26T19:45:27
| 2017-09-07T19:25:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,497
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
from setuptools import find_packages, setup
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
version = get_version('graphqlpy')
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = []
test_requirements = []
setup(
name='graphqlpy',
version=version,
description="A humble attempt at a library generating GraphQL queries programatically.",
long_description=readme + '\n\n' + history,
author="Julius Seporaitis",
author_email='julius@seporaitis.net',
url='https://github.com/seporaitis/graphqlpy',
packages=find_packages(exclude=['tests', 'tests.*']),
package_dir={
'graphqlpy': 'graphqlpy',
},
include_package_data=True,
install_requires=requirements,
license="MIT",
zip_safe=False,
keywords='graphql',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements
)
|
[
"julius@seporaitis.net"
] |
julius@seporaitis.net
|
8d63e564dff2869969a823b0cef0bf2bc6eef4ef
|
064a954c8dd7d50720aa8fa748d24e8495b8f7d9
|
/OpenCv/字符投影.py
|
0258d496027be7b77d2b2ad6e748db532e8445a9
|
[] |
no_license
|
xianyichi/keras
|
73169c248dde73f0e49e19f117b21080d1b3ba14
|
7ca5ab7e0ef1291b97b985e5ec9c78785e2ff3ec
|
refs/heads/master
| 2021-06-10T23:02:02.354669
| 2021-05-20T12:59:41
| 2021-05-20T12:59:41
| 182,005,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,386
|
py
|
import cv2
import numpy
img = cv2.imread ('/Users/apple/PycharmProjects/keras/image/data/images/0_00h_0.png', cv2.COLOR_BGR2GRAY)
height, width = img.shape [ :2 ]
# print height, width
# resized = cv2.resize(img, (2*width,2*height), interpolation=cv2.INTER_CUBIC)
gray = cv2.cvtColor (img, cv2.COLOR_BGR2GRAY)
(_, thresh) = cv2.threshold (gray, 140, 255, cv2.THRESH_BINARY)
# 使文字增长成块
kernel = cv2.getStructuringElement (cv2.MORPH_RECT, (2, 2)) # 形态学处理,定义矩形结构
closed = cv2.erode (thresh, None, iterations=7)
# cv2.imshow('erode',closed)
height, width = closed.shape [ :2 ]
# print height, width
z = [ 0 ] * height
v = [ 0 ] * width
hfg = [ [ 0 for col in range (2) ] for row in range (height) ]
lfg = [ [ 0 for col in range (2) ] for row in range (width) ]
box = [ 0, 0, 0, 0 ]
# 水平投影
a = 0
emptyImage1 = numpy.zeros ((height, width, 3), numpy.uint8)
for y in range (0, height):
for x in range (0, width):
cp = closed [ y, x ]
# if np.any(closed[y,x]):
if cp == 0:
a = a + 1
else:
continue
z [ y ] = a
# print z[y]
a = 0
# 根据水平投影值选定行分割点
inline = 1
start = 0
j = 0
for i in range (0, height):
if inline == 1 and z [ i ] >= 150: # 从空白区进入文字区
start = i # 记录起始行分割点
# print i
inline = 0
elif (i - start > 3) and z [ i ] < 150 and inline == 0: # 从文字区进入空白区
inline = 1
hfg [ j ] [ 0 ] = start - 2 # 保存行分割位置
hfg [ j ] [ 1 ] = i + 2
j = j + 1
# 对每一行垂直投影、分割
a = 0
for p in range (0, j):
for x in range (0, width):
for y in range (hfg [ p ] [ 0 ], hfg [ p ] [ 1 ]):
cp1 = closed [ y, x ]
if cp1 == 0:
a = a + 1
else:
continue
v [ x ] = a # 保存每一列像素值
a = 0
# print width
# 垂直分割点
incol = 1
start1 = 0
j1 = 0
z1 = hfg [ p ] [ 0 ]
z2 = hfg [ p ] [ 1 ]
for i1 in range (0, width):
if incol == 1 and v [ i1 ] >= 20: # 从空白区进入文字区
start1 = i1 # 记录起始列分割点
incol = 0
elif (i1 - start1 > 3) and v [ i1 ] < 20 and incol == 0: # 从文字区进入空白区
incol = 1
lfg [ j1 ] [ 0 ] = start1 - 2 # 保存列分割位置
lfg [ j1 ] [ 1 ] = i1 + 2
l1 = start1 - 2
l2 = i1 + 2
j1 = j1 + 1
cv2.rectangle (img, (l1, z1), (l2, z2), (255, 0, 0), 2)
cv2.imshow ('result', img)
cv2.waitKey (0)
|
[
"1369362296@qq.com"
] |
1369362296@qq.com
|
1e342c9a885841dca5ceca8cad3414989c843045
|
abd2a91cb26dd7ca8d3fca6f9c4f5ef9dea2f066
|
/logReg.py
|
95a8eee77371997300560c19e27f423c142fc9fc
|
[] |
no_license
|
Saniewski/multiclass-perceptron
|
dd0018ce7cde93bec978c24e920853e19e16d938
|
36a475dc4c2f5142b5205259a69ee403248d6eea
|
refs/heads/master
| 2022-04-15T07:13:44.429956
| 2020-04-08T20:20:12
| 2020-04-08T20:20:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,638
|
py
|
import numpy as np
import matplotlib.pylab as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from scipy.special import expit
from plotka import plot_decision_regions
class LogisticRegressionGD(object):
def __init__(self, learningRate=0.05, epochs=100, random_state=1):
self.lr = learningRate
self.epochs = epochs
self.random_state = random_state
def fit(self, X, y):
rgen = np.random.RandomState(self.random_state)
self.weights = rgen.normal(loc=0.0, scale=0.01, size=X.shape[1])
self.bias = rgen.normal(loc=0.0, scale=0.01)
self.costs = []
for i in range(self.epochs):
net_input = self.net_input(X)
output = expit(net_input)
errors = (y - output)
self.weights += self.lr * X.T.dot(errors)
self.bias += self.lr * errors.sum()
cost = (-y.dot(np.log(output)) - ((1 - y).dot(np.log(1 - output))))
self.costs.append(cost)
return self
def net_input(self, X):
return np.dot(X, self.weights) + self.bias
def predict(self, X):
return np.where(self.net_input(X) >= 0.0, 1, 0)
class Multiclass(object):
def __init__(self, reg1, reg2):
self.reg1 = reg1
self.reg2 = reg2
def predict(self, X):
result = []
for data in X:
if self.reg1.predict(data) == 1:
result.append(0)
elif self.reg2.predict(data) == 1:
result.append(1)
else:
result.append(2)
return np.array(result)
def main():
r8 = float(input('Learning rate: '))
epochs = int(input('Epochs: '))
iris = datasets.load_iris()
X = iris.data[:, [1, 3]]
y = iris.target
y1 = y.copy()
y2 = y.copy()
y3 = y.copy()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1, stratify=y)
y1[(y1 != 0)] = -3
y1[y1 == 0] = 1
y1[y1 == -3] = 0
y3[(y3 != 2)] = -3
y3[y3 == 2] = 1
y3[y3 == -3] = 0
reg1 = LogisticRegressionGD(r8, epochs, 1)
reg1.fit(X, y1)
reg3 = LogisticRegressionGD(r8, epochs, 1)
reg3.fit(X, y3)
multi = Multiclass(reg1, reg3)
print(multi.predict(X_test))
print(reg1.predict(X_test))
plot_decision_regions(X=X_test, y=y_test, classifier=multi)
plt.xlabel(r'$x_1$')
plt.ylabel(r'$x_2$')
plt.legend(loc='upper left')
plt.show()
if __name__ == '__main__':
main()
|
[
"pawel.san16@gmail.com"
] |
pawel.san16@gmail.com
|
5d8cd2c7638647e1cdd05a42eaf90febc0a95726
|
5ebe757ed6a2a339525c349922a3218b9d2b3f94
|
/lstm-language-model/preprocess.py
|
3930b2bf16a8a4194f5abff4da1756b269b70a3c
|
[] |
no_license
|
esun0087/self_parser
|
aa3ef6103c470c5f85627fe59e6d82239bcd63d6
|
cae1f45be1c954839980334e16d343bfae27dbe6
|
refs/heads/master
| 2020-03-21T10:27:18.247597
| 2018-08-07T08:26:29
| 2018-08-07T08:26:29
| 138,451,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,516
|
py
|
import torch
import argparse
import data
def preprocess(opt):
print('Begin preprocessing')
train_dataset = data.DataSet(opt.train_data, display_freq=opt.display_freq)
train_dataset.max_dict = opt.dict_size
train_dataset.build_dict()
print('Save training data')
torch.save(train_dataset, opt.train_data + '.prep.train.pt')
val_dataset = data.DataSet(opt.val_data, display_freq=opt.display_freq)
val_dataset.change_dict(train_dataset.dictionary)
print('Save validation data')
torch.save(val_dataset, opt.val_data + '.prep.val.pt')
print('Preprocessing done')
if __name__ == '__main__':
parser = argparse.ArgumentParser('Preprocessing')
parser.add_argument('--train_data', type=str, default='data/penn/train.txt',
help='Training data path')
parser.add_argument('--val_data', type=str, default='data/penn/valid.txt',
help='Validation data path')
parser.add_argument('--dict_size', type=int, default=50000,
help='Reduce dictionary if overthis size')
parser.add_argument('--display_freq', type=int, default=100000,
help='Display progress every this number of sentences, 0 for no diplay')
parser.add_argument('--max_len', type=int, default=100,
help='Maximum length od=f sentence')
parser.add_argument('--trunc_len',type=int, default=100,
help='Truncate the sentence that longer than maximum length')
opt = parser.parse_args()
preprocess(opt)
|
[
"a1a2a3a4a5"
] |
a1a2a3a4a5
|
1c6a094af068444ca3d28073d89315729267ff26
|
e57613c79e9a7a014ae67c00ccaf7c8014011954
|
/lab3/Ast.py
|
fbe23583ed7d7db393eef7caeaf51eec4008e320
|
[] |
no_license
|
szymon-rogus/CompilersLabs
|
cfebbab381e8ded24a122b03baba23c1a011b60b
|
d0f878bdaf8cf584ff28cd2449e2fe2dd4aa6c90
|
refs/heads/master
| 2021-04-02T15:28:58.725704
| 2020-06-10T09:01:59
| 2020-06-10T09:01:59
| 248,289,803
| 0
| 0
| null | 2020-04-30T11:44:18
| 2020-03-18T16:51:10
|
Python
|
UTF-8
|
Python
| false
| false
| 6,214
|
py
|
class Node(object):
def __init__(self, type, children=None, leaf=None):
self.type = type
self.leaf = leaf
if children:
self.children = children
else:
self.children = []
class BinaryExpression(Node):
def __init__(self, left, operator, right):
super().__init__(self.__class__, [left, right], operator)
self.left = left
self.operator = operator
self.right = right
def __repr__(self):
return '{} {} {}'.format(self.left, self.operator, self.right)
class UnaryExpression(Node):
def __init__(self, operator, operand, left=True):
super().__init__(self.__class__, [operand], operator)
self.operator = operator
self.operand = operand
self.left = left
def __repr__(self):
order = [self.operator, self.operand] if self.left else [self.operand, self.operator]
return '{}{}'.format(order[0], order[1])
class Negation(UnaryExpression):
def __init__(self, operand):
super().__init__('-', operand)
class Transposition(UnaryExpression):
def __init__(self, operand):
super().__init__('\'', operand, False)
class Assignment(BinaryExpression):
pass
class Function(Node):
def __init__(self, name, argument):
super().__init__(self.__class__, [argument], name)
self.name = name
self.argument = argument
def __repr__(self):
return "{}({})".format(self.name, self.argument)
class Variable(Node):
def __init__(self, name):
super().__init__(self.__class__, [], name)
self.name = name
def __repr__(self):
return '{}'.format(self.name)
class If(Node):
def __init__(self, condition, expression, else_expression=None):
super().__init__(self.__class__, [condition, expression, else_expression], ["IF", "THEN", "ELSE"])
self.condition = condition
self.expression = expression
self.else_expression = else_expression
if else_expression == None:
self.children = self.children[:-1]
self.leaf = self.leaf[:-1]
def __repr__(self):
representation = 'IF {} THEN {}'.format(self.condition, self.expression)
result = representation + ' ELSE {}'.format(self.else_expression) \
if self.else_expression else representation
return result
class While(Node):
def __init__(self, condition, body):
super().__init__(self.__class__, [condition, body], "WHILE")
self.condition = condition
self.body = body
def __repr__(self):
return 'WHILE {} DO {}'.format(self.condition, self.body)
class Range(Node):
def __init__(self, start, end, step=1):
super().__init__(self.__class__, [start, end, step], "RANGE")
if step == 1: self.children = self.children[:-1]
self.start = start
self.end = end
self.step = step
def __repr__(self):
return '{}:{}:{}'.format(self.start, self.end, self.step)
class For(Node):
def __init__(self, id, range, body):
super().__init__(self.__class__, [id, range, body], "FOR")
self.id = id
self.range = range
self.body = body
def __repr__(self):
return 'FOR {} IN {} DO {}'.format(self.id, self.range, self.body)
class Break(Node):
def __init__(self):
super().__init__(self.__class__, [], "BREAK")
def __repr__(self):
return 'BREAK'
class Continue(Node):
def __init__(self):
super().__init__(self.__class__, [], "CONTINUE")
def __repr__(self):
return 'CONTINUE'
class Return(Node):
def __init__(self, result):
super().__init__(self.__class__, [result], "RETURN")
self.result = result
def __repr__(self):
return 'RETURN( {} )'.format(self.result)
class Print(Node):
def __init__(self, expression):
super().__init__(self.__class__, [expression], "PRINT")
self.expression = expression
def __repr__(self):
return 'PRINT( {} )'.format(self.expression)
class VariableAttribute(Node):
def __init__(self, variable, key):
super().__init__(self.__class__, [variable, key], "REF")
self.variable = variable
self.key = key
def __repr__(self):
return '{}[{}]'.format(self.variable, self.key)
class Error(Node):
pass
class CodeBlock(Node):
def __init__(self, instruction):
super().__init__(self.__class__, [instruction])
self.instructions = self.children
def __repr__(self):
return "{\n" + "\n".join(map(str, self.instructions)) + "\n}"
class Program(Node):
def __init__(self, program):
super().__init__(self.__class__, [program])
self.program = program
def __repr__(self):
return str(self.program)
class Instruction(Node):
def __init__(self, line):
super().__init__(self.__class__, [line])
self.line = line
def __repr__(self):
return str(self.line)
class Matrix(Node):
def __init__(self, rows):
super().__init__(self.__class__, [rows], "MATRIX")
self.dims = len(rows), len(rows[0])
self.rows = rows
def __repr__(self):
return str(self.rows)
class Value(Node):
def __init__(self, val):
super().__init__(self.__class__, [], val)
self.val = val
def __repr__(self):
return "{}({})".format(type(self.val).__name__, self.val)
class Rows(Node):
def __init__(self, sequence):
super().__init__(self.__class__, [sequence])
self.row_list = self.children
def __repr__(self):
return "[" + ", ".join(map(str, self.row_list)) + "]"
def __len__(self):
return len(self.row_list)
def __getitem__(self, item):
return self.row_list[item]
class Sequence(Node):
def __init__(self, expression):
super().__init__(self.__class__, [expression], "SEQ")
self.expressions = self.children
def __repr__(self):
return "[" + ", ".join(map(str, self.expressions)) + "]"
def __len__(self):
return len(self.expressions)
def __getitem__(self, item):
return self.expressions[item]
|
[
"benroszko@gmail.com"
] |
benroszko@gmail.com
|
829fb8cdd606f109189879a5e3ad261af91f8278
|
ca5bac9deca017e02b8af87ffaaa91d1eb6c6d07
|
/Si_Nd/example_code/plot_2D_Seasonal.py
|
a194819969a902c3d8ba3f4bf7a50b45dd6fcae3
|
[] |
no_license
|
ndoyesiny/metrics_workshop
|
36dcc0b444a8ab3b8a0f897c81ada142a5ba6ad1
|
b74f062c27243eb0705eab367167d1fb9eaf0cd8
|
refs/heads/master
| 2020-06-14T10:29:58.282850
| 2017-03-30T11:20:19
| 2017-03-30T11:20:19
| 75,197,976
| 0
| 0
| null | 2016-11-30T15:04:27
| 2016-11-30T15:04:26
| null |
UTF-8
|
Python
| false
| false
| 1,738
|
py
|
'''
plot_Func.py
This function make some plot
Author: Siny NDOYE, December 2016
'''
import os
import iris
import iris.quickplot as qplt
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.basemap as bm
#import pdb
#def plot_Func(cube2plot,outpath,mnth,nlevc):
def plot_Func_SAT(cube2plot,figpath,mnth,nlevc,xstart,xend,ystart,yend,title_name):
# pdb.set_trace()
# print cube2plot.collapsed(['time', 'latitude','longitude'],iris.analysis.MIN), nlevc
#levels = np.linspace(iris.analysis.MIN(cube2plot),iris.analysis.MAX(cube2plot) , nlevc)
plt.clf()
levels=np.linspace(282,302,nlevc)
levels=np.linspace(8,32,nlevc)
qplt.contourf(cube2plot, levels = levels, extend = 'max')
m = bm.Basemap(projection='cyl', llcrnrlat=ystart, urcrnrlat=yend, llcrnrlon=xstart, urcrnrlon=xend, resolution='c') # coarse resolution for grid
#m = bm.Basemap(projection='cyl', llcrnrlat=8.0, urcrnrlat=16.0, llcrnrlon=-20.0, urcrnrlon=20.0, resolution='c') # coarse resolution for grid
m.drawcoastlines(linewidth=2)
m.drawcountries(linewidth=1)
plt.title(title_name)
if not os.path.exists(figpath):
os.makedirs(figpath)
if mnth == 0:
plt.savefig(figpath +'Seasonal_average_DJF.png' )
plt.show()
if mnth == 1:
plt.savefig(figpath +'Seasonal_average_MAM.png' )
plt.show()
if mnth == 2:
plt.savefig(figpath +'Seasonal_average_JJA.png' )
plt.show()
if mnth == 3:
plt.savefig(figpath +'Seasonal_average_SON.png' )
plt.show()
#if __name__== '__main__':
# plot_Func(cube2plot,outpath,mnth,nlevc)
#plot_Func(cube2plot,outpath,mnth,nlevc,xstart,xend,ystart,yend)
ny
"""
|
[
"siny@lodyn416.locean-ipsl.upmc.fr"
] |
siny@lodyn416.locean-ipsl.upmc.fr
|
a2c60c899f14d1dd9b97de4c9161123df14940e5
|
753a569a2ce6466d236220d0ba8c61c39656cb87
|
/BP_gradient_descent/gradient_descent.py
|
6b569c8056faf552630e34d6a8c8f3d7eef9b218
|
[] |
no_license
|
RabbitTea/AI_DS-Learning
|
e26c5fa453bf5434ddbefbc323a94c74faaa282e
|
66db4e6079c1210447776b3324b30b6667af2172
|
refs/heads/master
| 2020-04-05T18:00:27.943196
| 2018-11-21T09:45:17
| 2018-11-21T09:45:17
| 157,084,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,524
|
py
|
#实现梯度下降算法
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xlrd
#Some helper functions for plotting and drawing lines
def plot_points(X, y):#将两个类别的点按照标签绘图
admitted = X[np.argwhere(y==1)]
rejected = X[np.argwhere(y==0)]
plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'blue', edgecolor = 'k')
plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'red', edgecolor = 'k')
def display(m, b, color='g--'):#绘制当前的分割直线
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
x = np.arange(-10, 10, 0.1)
plt.plot(x, m*x+b, color)
# Activation (sigmoid) function
def sigmoid(x):#激活函数 采用sigmod函数
return 1 / (1 + np.exp(-x))
#实现计算sigmoid(w1*x2+w2*x2+b),即计算输出预测值
def output_formula(features, weights, bias):
return sigmoid(np.dot(features, weights) + bias) #dot是矩阵乘法,feature是2*n矩阵,weight是n*1
def error_formula(y, output):#计算误差函数 针对每一个yi计算
return - y*np.log(output) - (1 - y) * np.log(1-output)
def update_weights(x, y, weights, bias, learnrate):#权重更新方法,根据梯度下降法来更新
output = output_formula(x, weights, bias)
d_error = -(y - output)
weights -= learnrate * d_error * x
bias -= learnrate * d_error
return weights, bias
#训练函数,用于训练分界线
def train(features, targets, epochs, learnrate, graph_lines=False):
errors = []
n_records, n_features = features.shape#n_records=100,n_features=2
last_loss = None
weights = np.random.normal(scale=1 / n_features ** .5, size=n_features) #初始值用随机数生成权重 2*1
bias = 0
display(-weights[0] / weights[1], -bias / weights[1]) # 画当前求解出来的分界线
for e in range(epochs): #迭代1000次
del_w = np.zeros(weights.shape)
for x, y in zip(features, targets):#通过zip拉锁函数将X与y的每个点结合起来
output = output_formula(x, weights, bias) #计算输出预测值yi 其中x是1*2,weight是2*1
error = error_formula(y, output)#计算每一个yi的误差
weights, bias = update_weights(x, y, weights, bias, learnrate)
print(weights,bias)
print(e)#注意 每次迭代里都对xi即100组数进行计算都更新了权重,即更新了100*迭代次数次,每次迭代都是以上次的结果重新计算100组数
# Printing out the log-loss error on the training set
out = output_formula(features, weights, bias)#计算迭代后的预测值,这里feature是n*2,weight是2*1,out是n*1的一列预测值
loss = np.mean(error_formula(targets, out))#对每个预测值的误差做算术平均
errors.append(loss)
if e % (epochs / 10) == 0:
print("\n========== Epoch", e, "==========")
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
predictions = out > 0.5
accuracy = np.mean(predictions == targets)
print("Accuracy: ", accuracy)
if graph_lines :#and e % (epochs / 100) == 0
display(-weights[0] / weights[1], -bias / weights[1])#画当前求解出来的分界线
# Plotting the solution boundary
plt.title("Solution boundary")
display(-weights[0] / weights[1], -bias / weights[1], 'black')#画最后一根求解出来的分界线
# Plotting the data
plot_points(features, targets)
plt.show()
# Plotting the error
plt.title("Error Plot")
plt.xlabel('Number of epochs')
plt.ylabel('Error')
plt.plot(errors)
plt.show()
if __name__ == '__main__':
np.random.seed(44)
epochs = 100
learnrate = 0.01
data = xlrd.open_workbook('F:\工程实践\工作安排\work3_BPGradientDescent\data.xls')
X = []
table = data.sheets()[0] # 打开第一张表
X1 = table.col_values(0)
X2 = table.col_values(1)
X.append(X1)
X.append(X2)
X = np.array(X).T # 将X转换为100*2的矩阵
Y = np.array(table.col_values(2)) # 第三列数据:数据点的标签
plot_points(X,Y)
plt.show()
train(X, Y, epochs, learnrate, True)
|
[
"354496262@qq.com"
] |
354496262@qq.com
|
d5a4535689e5faed501055cb510fae7e65574690
|
f4e7b66391205df44ea15e3bd9e93e4439393df0
|
/inputcheck.py
|
8037747f04d28cb4eb79fef72fd11160dbda0821
|
[] |
no_license
|
thakurakhil/chemical-NER
|
a2fcf93ad3bfaec95e3e6af42e75fe044354284d
|
9b47ab96f178e0e665688e4bcaf677f44db2919b
|
refs/heads/master
| 2021-08-08T20:36:15.448621
| 2017-11-11T04:01:12
| 2017-11-11T04:01:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
import csv
f = open('./inputtext/ninefilefeatures.txt', 'rb')
reader = csv.reader(f,delimiter='\t')
for row in reader:
if(len(row)!=8):
break
else:
for i in row:
if(i==''):
print row
break
|
[
"singhakhil33@gmail.com"
] |
singhakhil33@gmail.com
|
4edfcf8e234bf582b8a3e06752421dff27a5d562
|
679b923d6ba62d00ab5ad8aef3f82f42df71a58c
|
/Server_Kapfumvuti_Patel.py
|
3a730631bb548c6f480757df43a85d6b5b03bea9
|
[] |
no_license
|
GurenMarkV/Go-Back-N-Protocol
|
957086dbca5e4c60ed18ff2ee418016cb102e8f6
|
949c3db7bd38cc9e09a847853bc45531517a3620
|
refs/heads/master
| 2020-03-18T22:30:12.789811
| 2018-05-29T20:21:56
| 2018-05-29T20:21:56
| 135,348,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,023
|
py
|
# Project 1: Implementation of Go-Back-N Protocol
# Group Member: Daksh Patel ID: 104 030 031
# Group Member: Nyasha Kapfumvuti ID: 104 121 166
# Date: Mar 30th, 2018
import socket
import numpy
import time
import json
from random import randint
acked = [] # acknowledged packets
unAcked = [] # unacknowledged packets
ticker = 0 # 0.2 loss rate = 1/5 packets get "lost" => placed in unAcked
lostItem = 5 # every 5th item gets placed in unacked
returnVals = [] # array of values to be returned as acks/unacks
timer = time.localtime
packets = []
packet = ''
server_address = ('localhost', 10000)
serverSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
serverSocket.bind(server_address)
serverSocket.listen(1)
print('The server is ready to receive')
while True:
print('waiting for a connection')
connection, client_address = serverSocket.accept()
try:
print('client connected:', client_address)
while True:
data = connection.recv(1024) # data arrives as a string. Need to convert this back to an array
newPack = int(data)
if(randint(0,5) == 5):
print('packet was lost/corrupted')
connection.sendto(str(newPack).encode(), server_address)
else:
if newPack not in acked:
acked.append(newPack)
print('recieved sequence # ', str(newPack), ' successfully. Sending ack')
connection.sendto(str(newPack).encode(), server_address)
print('sent')
ticker += 1 # loss rate leads to every nth item getting lost
if data:
# send acknowledgement
# connection.sendto(str(newPack).encode(), server_address)
print('')
else:
break
finally:
connection.close()
print(acked)
|
[
"noreply@github.com"
] |
GurenMarkV.noreply@github.com
|
4e98ab90157e2164c540617da24de059870e5e34
|
3071ce441681abbfea11c9cc5a5ba853aecff2d2
|
/game_over.py
|
56bb93d1293913866d464c7cc38a5f883a36e269
|
[] |
no_license
|
xodapi/python_learning
|
d75ffc7c8312f52be3c5123fd003537943d75fe7
|
afd7ff56b8ccdfea42ccb3dc52ef25dfd44d3d68
|
refs/heads/master
| 2016-09-11T04:58:55.524656
| 2015-04-21T10:51:28
| 2015-04-21T10:51:28
| 28,742,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20
|
py
|
print('Game over')
|
[
"faneropi@gmail.com"
] |
faneropi@gmail.com
|
339289d6118565d385d545357077d0aeb36d8cc1
|
2a2def196a68319147631a4af93095d1a03de754
|
/MuZero/game/gym_wrappers.py
|
62ee3e3e4cc0c785f3b6090d3fd5fecc49ca4076
|
[] |
no_license
|
colindbrown/columbia-deep-learning-project
|
8b7d2dc791046426ff6030ec52d1c9dddc99de3c
|
9046552bd631270838b0e49a2b8c9c524d40f1ed
|
refs/heads/master
| 2023-05-25T14:39:55.978535
| 2020-04-29T20:16:59
| 2020-04-29T20:16:59
| 248,585,231
| 2
| 2
| null | 2022-06-22T01:52:03
| 2020-03-19T19:13:58
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 631
|
py
|
import gym
import numpy as np
class ScalingObservationWrapper(gym.ObservationWrapper):
"""
Wrapper that apply a min-max scaling of observations.
"""
def __init__(self, env, low=None, high=None):
super().__init__(env)
assert isinstance(env.observation_space, gym.spaces.Box)
low = np.array(self.observation_space.low if low is None else low)
high = np.array(self.observation_space.high if high is None else high)
self.mean = (high + low) / 2
self.max = high - self.mean
def observation(self, observation):
return (observation - self.mean) / self.max
|
[
"jayantsubramanian2020@Jayants-MacBook-Air.local"
] |
jayantsubramanian2020@Jayants-MacBook-Air.local
|
e0c97f958b39a77c224ebe75cd5b1fe26876f2f1
|
0c265021768e72b91b40d77e0c7d78fcf0e70935
|
/Recursion/Module1/SumOfNnumbers.py
|
6ea101cae243b483a2db6144bc28d7b927e62a97
|
[] |
no_license
|
pawarvishal/cninjads_python_problems
|
0b49fb987cb3b8571ff0fe2e6f617174d36fc7d6
|
380fea5e9e507087dbb5743a30770cae2d9bc0ae
|
refs/heads/master
| 2020-12-12T12:33:34.759314
| 2020-02-02T06:24:53
| 2020-02-02T06:24:53
| 234,127,793
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
# Calculate sum of n numbers
def sum_n(n):
if n == 0:
return 0
small_output = sum_n(n-1)
output = small_output + n
return output
num = int(input())
print(sum_n(num))
|
[
"openstack.vishal@gmail.com"
] |
openstack.vishal@gmail.com
|
a60cce92b01defcbf4760f93cdbc9f636e0e3cef
|
1503bb33834c463657977765e821620f189a4685
|
/p007.py
|
79f340ac96bd0e0708ffbde2fc2002a0b35e7944
|
[] |
no_license
|
JackPound/Euler-Problems
|
94a2ff36d92cc28c4a23586847698d33710f24b0
|
fac5975d4fa323b3f992daedc12aec1246dbdb82
|
refs/heads/master
| 2020-03-22T20:53:26.655150
| 2018-07-12T22:51:57
| 2018-07-12T22:51:57
| 140,639,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
# What is the 10 001st prime number?
def is_prime(number_to_check):
prime = True
for x in range (2, number_to_check):
if number_to_check % x == 0:
prime = False
break
return prime
def prime_position(at_position):
prime_list = []
count = 2
while len(prime_list) < at_position:
if is_prime(count):
prime_list.append(count)
count += 1
else:
count += 1
print(prime_list[-1])
prime_position(10001)
|
[
"jackpound@live.com"
] |
jackpound@live.com
|
5cd5169112b0e7cc5061b202aed603c35d5262cf
|
5ebdbc630bfdfc582a41d7e353e517604ab336ab
|
/Exec/SCIENCE/code_comp/analysis/plot_generator.py
|
db7190719ae0bfc4dc05d7b4d477646547dcf717
|
[
"BSD-3-Clause"
] |
permissive
|
pgrete/MAESTROeX
|
661fd437caa1508dbc910772ba4d6ed8b551176a
|
1d7e89365379eea57680f738f271c93d7f28e513
|
refs/heads/master
| 2020-08-23T17:24:46.488221
| 2019-11-01T21:31:27
| 2019-11-01T21:31:27
| 216,671,997
| 0
| 0
|
BSD-3-Clause
| 2019-10-21T21:52:22
| 2019-10-21T21:52:22
| null |
UTF-8
|
Python
| false
| false
| 7,139
|
py
|
#!/usr/bin/env python3
import yt
from yt.units import amu, cm
import os
import sys
import glob
import argparse
import numpy as np
import string
from collections import namedtuple
from functools import reduce
def parse_args():
# Argument information
description = """Generates plots of datasets using a specified yt plot function. Works with any slice or projection
plot, as well as ParticlePlot."""
datasets_help = "A list of datasets to be loaded by yt. Will be sorted by plot number by default."
func_help = "The plotting function to use. SlicePlot by default."
out_help = "The desired output directory for the image files."
var_help = "The variable to plot. Set to 'Temp' by default."
bounds_help = "The bounds for the colorbar."
cmap_help = "The colormap for the variable to plot."
log_help = "If provided, sets the plot to a logarithmic scale."
linthresh_help = "If provided, sets the linear threshold for a symlog plot"
time_help = "If provided, adds a timestamp to each plot with the given precision."
ext_help = "The extension of the file format to save to. PNG by default."
sort_help = """A floating point number specifying the digits to sort file names by. Digits preceding the decimal point
give the starting index, digits following the decimal point give the number of characters. Make negative for
descending order."""
xlim_help = "The x-axis limits."
ylim_help = "The y-axis limits."
zlim_help = "The z-axis limits."
normal_help = "The normal direction"
# Construct parser and parse
parser = argparse.ArgumentParser(description=description)
parser.add_argument('datasets', nargs='*', help=datasets_help)
parser.add_argument('-f', '--func', default='SlicePlot', help=func_help)
parser.add_argument('-o', '--out', default='', help=out_help)
parser.add_argument('-v', '--var', default='Temp', help=var_help)
parser.add_argument('-b', '--bounds', nargs=2, type=float, metavar=('LOWER', 'UPPER'), help=bounds_help)
parser.add_argument('-c', '--cmap', metavar=('NAME',), help=cmap_help)
parser.add_argument('--log', action='store_true', help=log_help)
parser.add_argument('--linthresh', type=float, help=linthresh_help)
parser.add_argument('-t', '--time', type=int, metavar=('PRECISION',), help=time_help)
parser.add_argument('-e', '--ext', type=lambda s: s.lower(), default='png', help=ext_help)
parser.add_argument('-s', '--sort', type=float, default=0.0, help=sort_help)
parser.add_argument('-x', '--xlim', nargs=2, type=float, metavar=('UPPER', 'LOWER'), help=xlim_help)
parser.add_argument('-y', '--ylim', nargs=2, type=float, metavar=('UPPER', 'LOWER'), help=ylim_help)
parser.add_argument('-z', '--zlim', nargs=2, type=float, metavar=('UPPER', 'LOWER'), help=zlim_help)
parser.add_argument('-n', '--normal', default='z', help=normal_help)
return parser.parse_args(sys.argv[1:])
def plot_generator(args):
coloropts = ['field_color', 'cmap', 'display_threshold', 'cbar']
ColorOpt = namedtuple('ColorOpt', field_names=coloropts)
optdict = dict(field_color=None, display_threshold=None, cmap=None, cbar=False)
color_opt = ColorOpt(**optdict)
# Make output directory
if not args.out:
args.out = os.getcwd()
if not os.path.exists(args.out):
os.makedirs(args.out)
# Grab files from working directory if none were specified
ts = args.datasets
if not ts:
ts = glob.glob('plt*')
# Exit if nothing could be loaded
if len(ts) < 1:
sys.exit("No files were available to be loaded.")
# Sort and load files
desc = args.sort < 0
start = abs(int(args.sort))
nchars = int(str(args.sort).split('.')[1])
if nchars == 0:
key = lambda fname: fname[start:]
else:
key = lambda fname: fname[start:start + nchars]
ts.sort(key=key, reverse=desc)
tf = lambda file: yt.load(file.rstrip('/'))
ts = list(map(tf, ts))
print("Successfully loaded the following files: {}\n".format(ts))
# Generate plots
func = getattr(yt, args.func)
field = args.var
def get_width(ds, xlim=None, ylim=None, zlim=None):
""" Get the width of the plot. """
if xlim is None:
xlim = ds.domain_left_edge[0], ds.domain_right_edge[0]
else:
xlim = xlim[0] * cm, xlim[1] * cm
if ylim is None:
ylim = ds.domain_left_edge[1], ds.domain_right_edge[1]
else:
ylim = ylim[0] * cm, ylim[1] * cm
xwidth = (xlim[1] - xlim[0]).in_cgs()
ywidth = (ylim[1] - ylim[0]).in_cgs()
if ds.domain_dimensions[2] == 1:
zwidth = 0.0
else:
if zlim is None:
zlim = ds.domain_left_edge[2], ds.domain_right_edge[2]
else:
zlim = zlim[0] * cm, zlim[1] * cm
zwidth = (zlim[1] - zlim[0]).in_cgs()
return xwidth, ywidth, zwidth
def get_center(ds, xlim=None, ylim=None, zlim=None):
""" Get the coordinates of the center of the plot. """
if xlim is None:
xlim = ds.domain_left_edge[0], ds.domain_right_edge[0]
else:
xlim = xlim[0] * cm, xlim[1] * cm
if ylim is None:
ylim = ds.domain_left_edge[1], ds.domain_right_edge[1]
else:
ylim = ylim[0] * cm, ylim[1] * cm
xctr = 0.5 * (xlim[0] + xlim[1])
yctr = 0.5 * (ylim[0] + ylim[1])
if ds.domain_dimensions[2] == 1:
zctr = 0.0
else:
if zlim is None:
zlim = ds.domain_left_edge[2], ds.domain_right_edge[2]
else:
zlim = zlim[0] * cm, zlim[1] * cm
zctr = 0.5 * (zlim[0] + zlim[1])
return xctr, yctr, zctr
print("Generating...")
# Loop and generate
for ds in ts:
settings = {}
settings['center'] = get_center(ds, args.xlim, args.ylim)
settings['width'] = get_width(ds, args.xlim, args.ylim)
settings['normal'] = args.normal
plot = func(ds, fields=field, **settings)
if args.cmap:
plot.set_cmap(field=field, cmap=args.cmap)
if args.linthresh:
plot.set_log(field, args.log, linthresh=args.linthresh)
else:
plot.set_log(field, args.log)
# print(args.bounds)
# sys.exit()
if args.bounds is not None:
plot.set_zlim(field, *args.bounds)
if args.time:
time_format = f't = {{time:.{args.time}f}}{{units}}'
plot.annotate_timestamp(corner='upper_left', time_format=time_format,
time_unit='s', draw_inset_box=True, inset_box_args={'alpha': 0.0})
suffix = args.func.replace('Plot', '').lower()
plot.save(os.path.join(args.out, f'{ds}_{field.translate(str.maketrans("","", string.punctuation))}_{suffix}.{args.ext}'))
print()
print("Task completed.")
if __name__ == "__main__":
args = parse_args()
plot_generator(args)
|
[
"aliceharpole@gmail.com"
] |
aliceharpole@gmail.com
|
2489e16d137ef37dbe2ade7e983532c88dcf0e31
|
50c0726bb32c8b0229d4b354f95e4bf654faa374
|
/Proyect-Lovelace/Molecular_Mass_Calculator.py
|
231e21b22291e47128b97b9e2b0759d8985a4e4e
|
[] |
no_license
|
JorgeAvilaG/programming-problems
|
291dec9e90aebf3a7643082a5605b495d288253e
|
1be1d845589bb430106c023ba5b10ae3a3517a1d
|
refs/heads/master
| 2020-04-18T23:41:35.703093
| 2019-01-27T16:11:50
| 2019-01-27T16:11:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,034
|
py
|
#This problem asks for a function to calculate the molecular mass for different compounds.
#This version passes the tests in the Proyect Lovelace but with complex formulas or special nomemclature
#will not work. Example: '(NCH3)3PO2' 'Ph3N'
import csv
import re
#A dictionary is used to load the atomic mass for each element.
elements = {}
with open('periodic_table.csv') as csvfile:
periodic_table_reader = csv.reader(csvfile, delimiter=',')
for row in periodic_table_reader:
elements[row[0]] = float(row[1])
#The function uses Regex to identify different elements and the times it appears.
def molecular_mass(chemical_formula):
mass = 0
atoms = re.findall('[A-Z][a-z]?\d*', chemical_formula)
for atom in atoms:
element = re.search('[A-Z][a-z]?',atom)[0]
number = re.search('\d+',atom)
if number:
number = int(re.search('\d+',atom)[0])
else:
number = 1
mass += elements[element]*number
return mass
|
[
"noreply@github.com"
] |
JorgeAvilaG.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.