blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
56f86dc6c0a259ca285bd88a0154b64f93efce6c | ae8d8bcf32c44807dd2754a6452fb58b5e5e1b20 | /app/main/__init__.py | c935b683268f5f9972b9bd8fc6c3cf941bdd315e | [] | no_license | pa564/FlaskProject_HelloWeb | 1faed891c839756215e6dfc15db9fbddd76e1c8e | e915fee7a5332c6f932bae1725bd90e8b9317c42 | refs/heads/master | 2021-01-01T16:55:04.002692 | 2017-07-28T14:49:41 | 2017-07-28T14:49:41 | 97,953,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | from flask import Blueprint
main = Blueprint('main', __name__)
'''通过实例化一个 Blueprint 类对象可以创建蓝本。这个构造函数有两个必须>指定的参数:蓝本的名字和蓝本所在的包或模块。和程序一样,大多数情况下第>二个参数使用 Python 的__name__ 变量即可。'''
from . import views, errors
'''程序的路由保存在包里的 app/main/views.py 模块中,而错误处理程序保存在 app/main/errors.py 模块中。导入这两个模块就能把路由和错误处理程序与蓝本关联起来。注意,这些模块在 app/main/__init__.py 脚本的末尾导入,这是为了避免循环导入依赖,因为在views.py 和 errors.py 中还要导入蓝本 main 。'''
| [
"554155355@qq.com"
] | 554155355@qq.com |
bfa4d9252d274dec3e0ce43f769c32937b556bd2 | c646ad2dfab80f7183076dde82a82e6e1a6222d2 | /athenatools/migrations/0020_auto_20190310_1740.py | be477bcef3efdd5ba21adaa8f83cd8f2ee03bde9 | [
"MIT"
] | permissive | taojy123/AthenaTools | b7f5a799dca60237fb69f312f5a913964ae00097 | 612b113c5c9aeb0e6612242540fa05b7f0ac02c5 | refs/heads/master | 2023-07-21T06:32:15.638271 | 2023-07-19T09:42:01 | 2023-07-19T09:42:01 | 141,523,525 | 9 | 2 | MIT | 2023-06-30T22:19:13 | 2018-07-19T04:14:20 | Python | UTF-8 | Python | false | false | 662 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-03-10 09:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('athenatools', '0019_auto_20190310_1737'),
]
operations = [
migrations.AlterField(
model_name='purchase',
name='storage',
field=models.CharField(choices=[('\u5ba4\u6e29', '\u5ba4\u6e29'), ('\u51b7\u51bb', '\u51b7\u51bb'), ('\u51b7\u85cf', '\u51b7\u85cf')], default='\u5ba4\u6e29', max_length=255, verbose_name=b'\xe8\xb4\xae\xe8\x97\x8f\xe6\x96\xb9\xe5\xbc\x8f'),
),
]
| [
"taojy123@163.com"
] | taojy123@163.com |
6049517f851c3bd08be8b1b02ae13dad86e973bf | 45b0104bdf4dfb4b49328787c225d3fd360c306e | /messages.py | 85cd6d9f2cb5182bcb2b00e3f5c62be123d8d9ee | [] | no_license | anishdhandore/Discord-Bot | da60161de653186e1f9c16f59a67049c49c4f003 | aac8ad7d64c61b0f3d3be1d33432defaebd27c2a | refs/heads/main | 2023-06-27T19:15:03.333582 | 2021-08-01T18:46:02 | 2021-08-01T18:46:02 | 391,691,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | import random
greetings = ["hi", "hello", "hey"]
sad = ["unhappy", "sad", "dejected", "lonely", "heartbroken", "hopeless", "grieved", "lost", "disgusted",
"troubled", "resigned"]
happy = ["happy", "cheerful", "ecstatic", "elated", "joyous", "pleased", "overjoyed", "delighted"]
health = ["how are you", "how is your health"]
activities = ["what's up", "what are you doing", "what are you upto", "wassup", "waddup"]
activity_answers = ["talking to you, sir!", "nm, you say", "bots don't have much to do"]
facts = ["Two stars"]
# print(random.choice(health)) | [
"anish.dhandore@gmail.com"
] | anish.dhandore@gmail.com |
f2b7baa30891dd9758879e169749092388203448 | 7cf793abff5c938022f296b8919c80e09b4c8c41 | /titanic/model.py | b2851b56e60408c49b66d5425b57052b7df96589 | [] | no_license | thelostpeace/kaggle | ac4571ace5b75ac155e3b3693987c19f11f5bc50 | cec4561302755054d71ab78a0354ad42dd732819 | refs/heads/master | 2022-12-09T17:08:16.326650 | 2020-09-15T07:40:57 | 2020-09-15T07:40:57 | 286,690,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,718 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.base import TransformerMixin
import glob
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from xgboost import XGBClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
from sklearn.utils.multiclass import type_of_target
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectFromModel
from sklearn.svm import LinearSVC
from xgboost import XGBRegressor
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
data = pd.read_csv('data/train.csv')
label_sex = LabelEncoder()
data.Sex = label_sex.fit_transform(data.Sex)
label_embarked = LabelEncoder()
embarked = data[data.Embarked.isnull() == False]
data.loc[embarked.index, 'Embarked'] = label_embarked.fit_transform(embarked.Embarked)
def map_cabin(val):
def remove_char(s, d):
if isinstance(val, str):
for c in d:
s = s.replace(c, '')
return ''.join(set(s))
else:
return val
return remove_char(val, " 0123456789")
data.Cabin = data.Cabin.map(map_cabin)
label_cabin = LabelEncoder()
cabin = data[data.Cabin.isnull() == False]
data.loc[cabin.index, 'Cabin'] = label_cabin.fit_transform(cabin.Cabin)
#fill missing embarked
embarked_train = data[data.Embarked.isnull() == False][['Pclass', 'Sex', 'SibSp', 'Parch', 'Fare', 'Embarked']]
embarked_x = embarked_train.drop(columns=['Embarked'])
embarked_y = embarked_train['Embarked'].astype('int64')
params = {
'n_estimators': range(100, 1000, 100),
'max_depth': [2,3,4,5,6],
'gamma': np.arange(0, 5, 0.5),
'min_child_weight': range(1, 6, 1),
'subsample': np.arange(0.6, 1, 0.1),
'colsample_bytree': np.arange(0.1, 1, 0.1)
}
xgb = XGBClassifier(learning_rate=0.02, objective='multi:softmax', n_jobs=6, num_class=3)
model_embarked = RandomizedSearchCV(xgb, param_distributions=params, n_iter=10, n_jobs=6, cv=StratifiedKFold(shuffle=True), verbose=3, random_state=1992)
model_embarked.fit(embarked_x, embarked_y)
embarked_fill = data[data.Embarked.isnull() == True][['Pclass', 'Sex', 'SibSp', 'Parch', 'Fare']]
data.loc[embarked_fill.index, 'Embarked'] = model_embarked.predict(embarked_fill)
# fill missing Cabin
cabin_train = data[data.Cabin.isnull() == False][['Pclass', 'Sex', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Cabin']]
cabin_x = cabin_train.drop(columns=['Cabin'])
cabin_y = cabin_train['Cabin'].astype('int64')
xgb = XGBClassifier(learning_rate=0.02, objective='multi:softmax', n_jobs=6, num_class=9)
model_cabin = RandomizedSearchCV(xgb, param_distributions=params, n_iter=10, n_jobs=6, cv=StratifiedKFold(shuffle=True), verbose=3, random_state=1992)
model_cabin.fit(cabin_x, cabin_y)
cabin_fill = data[data.Cabin.isnull() == True][['Pclass', 'Sex', 'SibSp', 'Parch', 'Fare', 'Embarked']]
data.loc[cabin_fill.index, 'Cabin'] = model_cabin.predict(cabin_fill)
# fill missing fare
fare_train = data[data.Fare != 0.][['Pclass', 'Sex', 'SibSp', 'Parch', 'Fare', 'Embarked']]
fare_x = fare_train.drop(columns=['Fare'])
fare_y = fare_train['Fare']
xgb = XGBRegressor(learning_rate=0.02, objective='reg:squarederror', n_jobs=6)
model_fare = RandomizedSearchCV(xgb, param_distributions=params, n_iter=10, n_jobs=6, cv=KFold(shuffle=True), verbose=3, random_state=1992)
model_fare.fit(fare_x, fare_y)
fare_fill = data[data.Fare == 0.][['Pclass', 'Sex', 'SibSp', 'Parch', 'Embarked']]
data.loc[fare_fill.index, 'Fare'] = model_fare.predict(fare_fill)
# fill missing age
age_train = data[data.Age.isnull() == False][['Pclass', 'Sex', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Age']]
age_x = age_train.drop(columns=['Age'])
age_y = age_train['Age']
xgb = XGBRegressor(learning_rate=0.02, objective='reg:squarederror', n_jobs=6)
model_age = RandomizedSearchCV(xgb, param_distributions=params, n_iter=10, n_jobs=6, cv=KFold(shuffle=True), verbose=3, random_state=1992)
model_age.fit(age_x, age_y)
age_fill = data[data.Age.isnull() == True][['Pclass', 'Sex', 'SibSp', 'Parch', 'Fare', 'Embarked']]
data.loc[age_fill.index, 'Age'] = model_age.predict(age_fill)
# now we predict Survived
train_x = data.drop(columns=['PassengerId', 'Survived', 'Name', 'Ticket'])
train_y = data['Survived']
xgb = XGBClassifier(learning_rate=0.02, objective='binary:logistic', n_jobs=6)
model = RandomizedSearchCV(xgb, param_distributions=params, n_iter=10, n_jobs=6, cv=StratifiedKFold(shuffle=True), verbose=3, random_state=1992)
model.fit(train_x, train_y)
data = pd.read_csv('data/test.csv')
test_x = data.drop(columns=['PassengerId', 'Name', 'Ticket'])
test_x.Sex = label_sex.transform(test_x.Sex)
test_x.Embarked = label_embarked.transform(test_x.Embarked)
fare_fill = test_x[test_x.Fare == 0.][['Pclass', 'Sex', 'SibSp', 'Parch', 'Embarked']]
test_x.loc[fare_fill.index, 'Fare'] = model_fare.predict(fare_fill)
test_x.Cabin = test_x.Cabin.map(map_cabin)
cabin = test_x[test_x.Cabin.isnull() == False]
test_x.loc[cabin.index, 'Cabin'] = label_cabin.transform(cabin.Cabin)
cabin_fill = test_x[test_x.Cabin.isnull() == True][['Pclass', 'Sex', 'SibSp', 'Parch', 'Fare', 'Embarked']]
test_x.loc[cabin_fill.index, 'Cabin'] = model_cabin.predict(cabin_fill)
age_fill = test_x[test_x.Age.isnull() == True][['Pclass', 'Sex', 'SibSp', 'Parch', 'Fare', 'Embarked']]
test_x.loc[age_fill.index, 'Age'] = model_age.predict(age_fill)
data['Survived'] = model.predict(test_x)
data.to_csv('result.csv', columns=['PassengerId', 'Survived'], index=False)
| [
"charlesliu.cn.bj@gmail.com"
] | charlesliu.cn.bj@gmail.com |
e7d82df61207595abcb4af0910991cfd3ad13011 | 6445a05c5562a01ded6eafd31cdc27c781592732 | /src/nira/common/utility.py | 3d263001fdc57a288574587faae5dcc2bc4bfb89 | [] | no_license | yaksea/nira | 54d51831f95681697517f1a8571c726212d5b9dd | 76f155224efc947f4616ad34c9dc4cbea4d6828b | refs/heads/master | 2016-08-03T16:14:08.409992 | 2014-05-27T02:45:49 | 2014-05-27T02:45:49 | 20,204,599 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,595 | py | #encoding=utf-8
'''
Created on 2012-2-9
@author: Administrator
'''
import socket
import time
import traceback
import os
import string
import urlparse
import re
import json
import datetime
import inspect
#方法一
def getLocalIP():
localIP = socket.gethostbyname(socket.gethostname())#得到本地ip
print "local ip:%s " % localIP
ipList = socket.gethostbyname_ex(socket.gethostname())
for i in ipList:
if i != localIP:
print "external IP:%s" % i
#方法二
myname = socket.getfqdn(socket.gethostname())
print socket.gethostbyname(myname)
#上面的方法在Linux下也可以使用,除此之外,Linux下还可以用下面的方法得到本机IP地址。
#
#Uses the Linux SIOCGIFADDR ioctl to find the IP address associated with a network interface, given the name of that interface, e.g. “eth0”. The address is returned as a string containing a dotted quad.
#import fcntl
#import struct
#
#def get_ip_address(ifname):
# s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# return socket.inet_ntoa(fcntl.ioctl(
# s.fileno(),
# 0x8915, # SIOCGIFADDR
# struct.pack('256s', ifname[:15])
# )[20:24])
def subDict(somedict, somekeys, default=None):
return dict([ (k, somedict.get(k, default)) for k in somekeys ])
#def sub_dict_remove(somedict, somekeys, default=None):
# return dict([ (k, somedict.pop(k, default)) for k in somekeys ])
DATETIME_FORMAT = ('%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d', '%m-%d')
#unix时间戳->时间字符串
def getFormattedTime(floatTime, type=0): #type:0:时间(精确到秒), 1:时间(精确到分), 2: 日期, 3:月-日
return time.strftime(DATETIME_FORMAT[type], time.localtime(floatTime))
#日期字符串->unix时间戳
def getTimeFromStr(dateStr, type=0): #type:0:时间(精确到秒), 1:时间(精确到分), 2: 日期,
d = datetime.datetime.strptime(dateStr, DATETIME_FORMAT[type])
return time.mktime(d.timetuple())
def getDateCode():
return time.strftime('%Y%m%d', time.localtime(time.time()))
def getMinuteCode():
return time.strftime('%Y%m%d%H%M', time.localtime(time.time()))
def getSecondCode():
return time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
def addDays(baseDatetime, days):
#return timestamp
if type(baseDatetime) in (int, float):
baseDatetime = datetime.date.fromtimestamp(baseDatetime)
return time.mktime((baseDatetime+datetime.timedelta(days=days)).timetuple())
def unionListKeepOrder(list1, list2):
#有序合集,保持list1的顺序
list1.extend(set(list2)-set(list1))
return list1
import uuid
def getVersion():
return int(time.time() * 1000)
def getUUID():
return str(uuid.uuid1()).replace('-', '');
def tryParse(value, type, defaultValue=None):
try:
return type(value)
except:
return defaultValue
def parseUrlParams(url):
result = urlparse.urlparse(url)
params = urlparse.parse_qs(result.query)
for k, v in params.items():
if len(v) == 1:
params[k] = v[0]
return params
comment_re = re.compile(
'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE
)
def parse_json(filename):
""" Parse a JSON file
First remove comments and then use the json module package
Comments look like :
// ...
or
/*
...
*/
"""
with open(filename) as f:
content = ''.join(f.readlines())
## Looking for comments
match = comment_re.search(content)
while match:
# single line comment
content = content[:match.start()] + content[match.end():]
match = comment_re.search(content)
# Return json file
return json.loads(content)
#transTable = string.maketrans('','')
#
#def stringPickout(str1, str2):
# return str1.translate(transTable, str2)
not_letters_or_digits = u'!"#$%^&\'()*+,./:;<=>?@[\]^`{|}~'
translate_table = dict((ord(char), u'') for char in not_letters_or_digits)
def pickout_non_alphanumerics(rawStr):
return rawStr.translate(translate_table)
def parseVersion(versionStr):
if versionStr.find('.')>0:
verArr = versionStr.split('.')
verTemp = verArr[0]
for ver in verArr[1:]:
verTemp += '%04d' % int(ver)
else:
verTemp = versionStr
try:
return int(verTemp)
except:
traceback.print_exc()
return 0
def versionCompare(version1, version2): #-1:小于 0:相等 1:大于
version1 = parseVersion(version1)
version2 = parseVersion(version2)
if version1>version2:
return 1
elif version1<version2:
return -1
else:
return 0
def urlJoin(url1, url2):
return url1.strip('/') +'/'+ url2.strip('/')
def getObjFromFile(path):
#nira 根路径起始 , "data/demo/anniversary.json"
return parse_json('%s/../%s'%(os.path.dirname(__file__), path))
# jsonStr = f.read()
# return json.loads(jsonStr)
class EmptyClass(object):
def __str__(self):
return str(self.toJson())
def toJson(self):
s = {}
for p in dir(self):
if not p.startswith('_') and not inspect.ismethod(getattr(self, p)):
s[p] = getattr(self, p)
return s
if __name__ == '__main__':
# print getObjFromFile("data/demo/anniversary.json")
# dd = {'a':1,'b':2,'c':3}
# cc = subDict(dd,('a','b','x'))
# print -cc['a']
# s = u'\u2006'
# s = s.encode('gbk','ignore')
# print len(s)
## ds = list('fdgerwtwert')
# ds1 = list('cvbdfsgdfsg')
# print unionListKeepOrder(ds, ds1)
# url1 = "http://192.168.94.19/uaps"
# url2 = "/bbs/"
# print urlJoin(url1, url2)
# print u'\u5f20\u51e1'
#
print getFormattedTime(1357627402)
# print getFormattedTime(1362036446)
# cc = ['sadf', 'weqr']
# for c in cc:
# c.replace('ad', 'xx')
# print cc
# cc = {'sadf':'wedqr', 'weqr':3}
#
# print set(cc)
# print urlJoin()
# print versionCompare('1.3.0', '1.2.4')
# print datetime.date(1997, 03, 31)
# tempPath = 'D:/Work/tnd_nira/src/nira/schedule/../static/vv'
# os.rmdir(tempPath)
# print datetime.date.today().timetuple()
# print u'\u60a8\u7684\u5ba2\u6237\u6709\u53d8\u52a8'
# url="/test.py?a=hello&b=world "
# result=parseUrlParams(url)
# print result
# print tryParse('123e', float)
# print int('001')
# print getDateCode()
pass
| [
"yaksea@gmail.com"
] | yaksea@gmail.com |
90d927ab56ec4de577635936ff751a48e36362a3 | 92a945c252d524e114c0be2f11c0c3dd0472c6dc | /Data Visualization/testing.py | 27e687d06aa0e4f592f1bba3edd71ba95b29a64a | [] | no_license | manojl711/demo | 5e061ceee3e1d4ddbfc89a3f4c06e4dc0fee91dd | 3dcf9b82fd938e2692e99ccbe13632c0c31cdc83 | refs/heads/master | 2020-08-08T18:52:33.476029 | 2019-10-09T11:41:09 | 2019-10-09T11:41:09 | 213,893,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | x=5 #todo write full function
print (x)
#todo read full oops
| [
"noreply@github.com"
] | manojl711.noreply@github.com |
5824796494ad3a28bb8813faf3037c2c1b03f5c8 | 0f56217c7ee34f79cc484fa04ab0688ffe9d044a | /src/main/python/secure_all/storage/json_store.py | 947967b87df10250ff7f3237f0cd2957de708420 | [] | no_license | 100429115/G82.T18.FP | 5379307cedbe0c0810a6806930751b96dad76bdd | e3afad4febffdf2fa0ec06f9ded9d982cd83e199 | refs/heads/main | 2023-05-04T01:37:56.309936 | 2021-05-21T15:43:52 | 2021-05-21T15:43:52 | 368,489,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,288 | py | """Generic class for JSON storage"""
import json
from secure_all.exception.access_management_exception import AccessManagementException
class JsonStore():
"""Managest stores based on JsonFiles"""
_FILE_PATH = ""
_ID_FIELD = ""
def __init__(self):
self._data_list = []
self.load_store()
def empty_store(self):
"""empty the store"""
self._data_list = []
self.save_store()
def load_store(self):
""""Loads _data_list from the json file
If the file is not found a new emtpy list is created """
try:
with open(self._FILE_PATH, "r", encoding="utf-8", newline="") as file:
self._data_list = json.load(file)
except FileNotFoundError as ex:
self._data_list = []
except json.JSONDecodeError as ex:
raise AccessManagementException("JSON Decode Error - Wrong JSON Format") from ex
def add_item(self, item):
"""Adds a new element to the list and saves the file
Since this is a generic class further verifications should be included
in the specific stores"""
self.load_store()
self._data_list.append(item.__dict__)
self.save_store()
def add_item2(self, item):
"""Implementing the restrictions related to add a dicc"""
# pylint: disable=import-outside-toplevel,cyclic-import
self.load_store()
self._data_list.append(item)
self.save_store()
def find_item(self, key):
"""find the value key in the _KEY_FIELD"""
self.load_store()
for item in self._data_list:
if item[self._ID_FIELD] == key:
return item
return None
def save_store(self):
"""Save the list in the json file _FILE_PATH
Now it is not necessary check the list because it was created in the __init__
so the only thing we need is to save the list in the file, raising and exception if
the file doesn't exists """
try:
with open(self._FILE_PATH, "w", encoding="utf-8", newline="") as file:
json.dump(self._data_list, file, indent=2)
except FileNotFoundError as ex:
raise AccessManagementException("Wrong file or file path") from ex
| [
"100429115@alumnos.uc3m.es"
] | 100429115@alumnos.uc3m.es |
25c179044cf74b0b9882aff57aff4f752a9b648e | 2ed516312ade74f8a66b8ede065615daae54625c | /twitter_tweet.py | 835710a8a63dcd45b2c5b54e836417eab9b5669c | [] | no_license | takashaaark/ai_blog | 88cf28e4d6a837b6a5e3ef640325a16447e3d165 | 4e08ea807d338b1cc32445de1ef769a370a2e3fa | refs/heads/master | 2021-01-15T11:40:09.158824 | 2017-08-08T00:44:41 | 2017-08-08T00:44:41 | 99,633,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,652 | py | import json
from requests_oauthlib import OAuth1Session
from twitter_settings import *
from datetime import datetime
def send_secure_request(png_address):
url_media = "https://upload.twitter.com/1.1/media/upload.json"
url_text = "https://api.twitter.com/1.1/statuses/update.json"
# OAuth認証 セッションを開始
twitter = OAuth1Session(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
# 画像投稿
files = {"media": open(png_address, 'rb')}
req_media = twitter.post(url_media, files=files)
# レスポンスを確認
if req_media.status_code != 200:
print("画像アップデート失敗: %s", req_media.text)
exit()
# Media ID を取得
media_id = json.loads(req_media.text)['media_id']
print("Media ID: %d" % media_id)
# Media ID を付加してテキストを投稿
target = 'takashaaark'
message = '画像認証です。よろしく頼みます'
text = '@' + target + ' ' + message
params = {'status': text, "media_ids": [media_id]}
req_media = twitter.post(url_text, params=params)
# 再びレスポンスを確認
if req_media.status_code != 200:
print("テキストアップデート失敗: %s", req_media.text)
exit()
print("送信完了:", text)
def tweet_sentence(message):
tweet_time = datetime.utcnow().strftime("%Y%m%d%H%M%S")
url_text = "https://api.twitter.com/1.1/statuses/update.json"
# OAuth認証 セッションを開始
twitter = OAuth1Session(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
text = message + '\n' + tweet_time
# Media ID を付加してテキストを投稿
params = {'status': text}
req_media = twitter.post(url_text, params=params)
# 再びレスポンスを確認
if req_media.status_code != 200:
print("テキストアップデート失敗: %s", req_media.text)
exit()
print("TWEET:", message)
def p_tweet_sentence(message):
"""
公開用ツイート
"""
url_text = "https://api.twitter.com/1.1/statuses/update.json"
# OAuth認証 セッションを開始
twitter = OAuth1Session(P_CONSUMER_KEY, P_CONSUMER_SECRET, P_ACCESS_TOKEN, P_ACCESS_TOKEN_SECRET)
text = message
# Media ID を付加してテキストを投稿
params = {'status': text}
req_media = twitter.post(url_text, params=params)
# 再びレスポンスを確認
if req_media.status_code != 200:
print("テキストアップデート失敗: %s", req_media.text)
exit()
print("TWEET:", message)
if __name__ == "__main__":
pass
| [
"T@T-no-MacBook-Pro.local"
] | T@T-no-MacBook-Pro.local |
c9cfa6125a47739e3b072a7f43bd89eec1d1bdaa | 78c60592fcdf250277777c035d9d86f647ed0723 | /pandas/konelpy-wordcloud/konelpy5movie.py | 52b47e089871d9006b28f6d8069f5d18cbc2cdb5 | [] | no_license | pyh3887/Python-Pandas | ba30428e45e0409fbea9babba0c95f65bfd18359 | 9bb998ecfc86f0894fc73ab77f7faf3d4fe174a7 | refs/heads/master | 2022-09-19T09:43:36.905666 | 2020-06-04T02:04:20 | 2020-06-04T02:04:20 | 267,988,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,500 | py | from bs4 import BeautifulSoup
import requests
from konlpy.tag import Okt
from collections import Counter
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
okt = Okt()
def movie_scrap(url):
result = []
for p in range(10):
r = requests.get(url + '&page=' + str(p))
soup = BeautifulSoup(r.content,'lxml',from_encoding='ms949')
#print(soup)
title = soup.find_all('td',{'class':'title'})
#print(title)
sub_result = []
for i in range(len(title)) :
sub_result.append(title[i].text
.replace('\r','')
.replace('\n','')
.replace('\t','')
.replace('\신고','')
.replace('-','')
.replace('...','') #필요없는값 제거
.replace('?','')
.replace('여곡성','')
.replace('고양이의 보은','')
.replace('날씨의 아이','')
.replace('알 포인트','')
.replace('코코','')
.replace('영화','')
)
result = result + sub_result
return(''.join(result))
yeogoksung = movie_scrap('https://movie.naver.com/movie/point/af/list.nhn?st=mcode&sword=171750&target=after')
rpoin = movie_scrap('https://movie.naver.com/movie/point/af/list.nhn?st=mcode&sword=37261&target=after')
nalci = movie_scrap('https://movie.naver.com/movie/point/af/list.nhn?st=mcode&sword=181114&target=after')
coco = movie_scrap('https://movie.naver.com/movie/point/af/list.nhn?st=mcode&sword=151728&target=after')
catmovie = movie_scrap('https://movie.naver.com/movie/point/af/list.nhn?st=mcode&sword=37073&target=after')
movies = [yeogoksung,rpoin,nalci,catmovie,coco]
print(movies)
words_basket = []
for mov in movies:
words = okt.pos(mov)
for word in words:
if(word[1] in ['Noun','Adjective'] and len(word[0])>= 2): # 명사 또는 형용사인 자료
words_basket.append(word[0])
#print(words_basket)
#print(Counter(words_basket).most_common(50)) #참고로 빈도수 높은 단어 확인
movies = [m.replace('ㅋㅋㅋㅋ',"") for m in movies] # 해당단어 잘라내기
movies = [m.replace('이런',"") for m in movies] # 해당단어 잘라내기
movies = [m.replace('있었고',"") for m in movies] # 해당단어 잘라내기
print(movies,len(movies))
print('------------------------')
def word_separate(movies):
result = []
for mov in movies:
words = okt.pos(mov)
one_result = []
for word in words:
if(word[1] in ['Noun','Adjective'] and len(word[0]) >= 2):
one_result.append(word[0])
result.append(' '.join(one_result))
return result
word_list = word_separate(movies)
print(word_list)
print('----------------------------------------')
# 토큰 생성 후 벡터화
# 1 : CountVectorizer
count = CountVectorizer(min_df= 2)
print(count)
cou_dtm = count.fit_transform(word_list).toarray()
print(cou_dtm)
cou_dtm_df = pd.DataFrame(cou_dtm, columns= count.get_feature_names() , index= ['yeogoksung','rpoin','nalci','catmovie','coco'])
print(cou_dtm_df) # 단어별 빈도 수
print('^^^' * 20)
# 2 : CountVectorizer()
idf_maker = TfidfVectorizer(min_df = 2 )
tfidf_dtm = idf_maker.fit_transform(word_list).toarray()
tfidf_dtm_df = pd.DataFrame(tfidf_dtm, columns = count.get_feature_names(), index = ['yeogoksung','rpoin','nalci','catmovie','coco'])
print(tfidf_dtm_df) #단어들의 중요도를 알 수 있는 가중치로 출력
# 코사인 유사도를 이용해 단어의 유사성 출력
def cosin_func(doc1,doc2):
bunja = sum(doc1 * doc2)
bunmo = (sum(doc1 ** 2) * sum(doc2 ** 2)) ** 0.5
return bunja/bunmo
res = np.zeros((5,5))
print(res)
print(res)
for i in range(5):
for j in range(5):
res[i,j] = cosin_func(tfidf_dtm_df.iloc[i], tfidf_dtm_df.iloc[j].values)
df = pd.DataFrame(res, index= ['yeogoksung','rpoin','nalci','catmovie','coco'] , columns = ['yeogoksung','rpoin','nalci','catmovie','coco'])
print(df)
| [
"bnb0409@gmail.com"
] | bnb0409@gmail.com |
636c4f2ea08562f9046de8d6850b359042accba1 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/2079.py | dabae5ddf4c853871e4c704366eb7ae6a0c75929 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | def answer(n):
n = [int(d) for d in str(n)]
lenn = len(n)
if (lenn == 1):
return n[0]
else:
for i in range(lenn-1, 0, -1):
last = n[i]
second_last = n[i-1]
if(last < second_last):
n[i] = 9
n[i-1] = int(n[i-1]) - 1
for j in range(i, lenn-1):
if(n[j] > n[j+1]):
n[j+1] = n[j]
return (int(''.join(map(str, n))))
testCase = int(input())
for etc in range(testCase):
print ("Case #" + str(etc + 1) + ": " + str(answer(int(input()))))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
5c8a1a9ea3fea4a00301238ee65e03d6d06d56f8 | 226cae75c568aed8b5a13890349fb9c6bf34d36a | /Scripts/FinalPrediction.py | 147f98b360e365d359f7b6dface2cd4b9523c79c | [] | no_license | shivamlohia/VehicleAssistant | 037f3ddbb1623e649326404f57b024500f5bea12 | 4da8944492e1689e6887c2f0596288268c349e83 | refs/heads/master | 2023-04-18T15:29:46.257225 | 2020-06-17T10:08:33 | 2020-06-17T10:08:33 | 362,826,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,262 | py | import pandas as pd
import numpy as np
import os
import joblib
import sys
originalData = pd.read_csv("D:/Rishu/VehicleAssistant/Scripts/Dataset/train-data.csv")
trainedData = pd.read_csv("D:/Rishu/VehicleAssistant/Scripts/Dataset/trained.csv")
trainedData.drop(trainedData.columns[0], axis=1, inplace=True)
trainedData.drop('Price',axis=1,inplace=True)
carList = str(sys.argv[1]).split(',')
carName = carList[0] + " " + carList[1]
intYear = int(carList[4], 10)
carList[4] = intYear
intKm = int(carList[5], 10)
carList[5] = intKm
for i in range(originalData.shape[0]):
if originalData["Name"][i].upper() == carName.upper():
seat = originalData['Seats'][i]
mlg = originalData['Mileage'][i]
eng = originalData['Engine'][i]
power = originalData['Power'][i]
fuel = originalData['Fuel_Type'][i]
owner = originalData['Owner_Type'][i]
trans = originalData['Transmission'][i]
data_list=[]
data_list.append(carList[5])
data_list.append(seat)
data_list.append(mlg)
data_list.append(eng)
data_list.append(power)
data_list.append(2020-carList[4])
for i in range(6,11):
if trainedData.columns[i] == 'Fuel_Type_'+ fuel:
data_list.append(1)
else:
data_list.append(0)
for i in range(11,13):
if trainedData.columns[i] == 'Transmission_'+ trans:
data_list.append(1)
else:
data_list.append(0)
for i in range(13,17):
if trainedData.columns[i] == 'Owner_Type_'+ owner:
data_list.append(1)
else:
data_list.append(0)
for i in range(17, len(trainedData.columns)):
if trainedData.columns[i] == 'brand_name_' + carList[0]:
data_list.append(1)
else:
data_list.append(0)
data_list[2]=data_list[2].split(' ')[0]
data_list[3]=data_list[3].split(' ')[0]
data_list[4]=data_list[4].split(' ')[0]
data_list[2]=float(data_list[2])
data_list[3]=float(data_list[3])
data_list[4]=float(data_list[4])
for i in range(len(data_list)):
data_list[i] = (data_list[i] - trainedData.iloc[:,i].mean())/trainedData.iloc[:,i].std()
x_frame = pd.DataFrame([data_list])
model = joblib.load('D:/Rishu/VehicleAssistant/Scripts/prediction.sav')
result = (model.predict(x_frame))[0][0]
price = np.expm1(result)
print("%.2f" % price, flush=True)
| [
"utkarshaanand123@gmail.com"
] | utkarshaanand123@gmail.com |
7f3cc3272047c3aac34731acfe8dd2e7dd57dba2 | 965453203f1858c986203d39ab18a5e33446b4c4 | /G02-project-1-final/lvq_digits.py | 34f7db572346c4f0d10072de7cf5f2446ae203a5 | [] | no_license | thoatran/MachineLearning | 6a1253287a8391478bd6bda2d998e0ee0500e221 | cb4f0fd93a020c78646c127a5cf274c975c8f5c8 | refs/heads/main | 2023-02-13T19:47:01.711976 | 2021-01-06T17:00:39 | 2021-01-06T17:00:39 | 327,376,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,613 | py | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, confusion_matrix
import pandas as pd
import seaborn as sns
from time import time
print('LVQ classifier, Digits dataset')
# np.random.seed(7)
# Xy_train_pd = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/optdigits/optdigits.tra',header=None)
Xy_train_pd = pd.read_csv('/Users/linhht20/Google Drive/ITC05F Machine Learning/optdigits.tra',header=None)
Xy_train = np.array(Xy_train_pd)
X_train = np.delete(Xy_train,-1,1)
y_train = np.copy(Xy_train[...,-1])
# print('Training set:',X_train.shape[0])
# Xy_test_pd = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/optdigits/optdigits.tes',header=None)
Xy_test_pd = pd.read_csv('/Users/linhht20/Google Drive/ITC05F Machine Learning/optdigits.tes',header=None)
Xy_test = np.array(Xy_test_pd)
X_test = np.delete(Xy_test,-1,1)
y_test = np.copy(Xy_test[...,-1])
x_min = np.min(X_test)
x_max = np.max(X_test)
# print('Test set:',X_test.shape[0])
print('Train size:', X_train.shape[0], ', test size:', X_test.shape[0])
print('Label:',np.unique(y_test))
# locate the best matching prototype
def get_best_matching_prototype (x,prototype_set):
dis_list = list()
for xyp in prototype_set:
dis = np.linalg.norm(x-xyp[:-1])
dis_list.append((xyp,dis))
dis_list.sort(key=lambda tup: tup[1])
return dis_list[0][0]
# create a random prototype vector
def create_random_prototype(data_train,idx):
n_records = len(data_train)
n_features = len(data_train[0])
xy_prototype = [data_train[np.random.randint(n_records)][i] for i in range(n_features-1)]
xy_prototype += [idx%len(np.unique(y_test))]
return xy_prototype
# training a set of prototype vectors
def train_prototypes(data_train, n_prototypes, lrate_init, n_epochs):
prototype_set = [create_random_prototype(data_train,i) for i in range(n_prototypes)]
err_vec = []
for epoch in range(n_epochs):
lrate = lrate_init * (1.0 - epoch/float(n_epochs))
sum_err = 0.0
for xy_train in data_train:
bmu = get_best_matching_prototype(xy_train[:-1], prototype_set) # bmu is a view of the prototype_set
err = xy_train[:-1] - bmu[:-1]
sum_err += np.linalg.norm(1/16*err)**2
# if bmu[-1] == xy_train[-1]:
# bmu[:-1] += lrate*err
# else:
# bmu[:-1] -= lrate*err
if bmu[-1] == xy_train[-1]:
bmu[:-1] += lrate*err
else:
# bmu[:-1] -= lrate*err
for i in range(len(bmu[:-1])):
tmp = bmu[i] - lrate*err[i]
if tmp < x_min:
bmu[i] = x_min
elif tmp > x_max:
bmu[i] = x_max
else:
bmu[i] = tmp
print('>epoch=%d, lrate=%.6f, err=%.3f'%(epoch,lrate,sum_err))
err_vec += [sum_err]
return (prototype_set,err_vec)
# main():
n_run = 1
lrate_init = 0.3
n_epochs = 100
n_prototypes = 100 # 10 clusters
accuracy_vec = np.zeros(n_run)
ttrain_vec = np.zeros(n_run)
ttest_vec = np.zeros(n_run)
for i_run in range(n_run):
# w = evaluate(lrate_init,n_epochs,n_prototypes)
# lvq training
ts_train = time()
# Xy_prototype,err_vec = np.array(train_prototypes(Xy_train, n_prototypes, lrate_init, n_epochs),dtype=object)
tmp1,tmp2 = train_prototypes(Xy_train, n_prototypes, lrate_init, n_epochs)
Xy_prototype = np.array(tmp1)
err_vec = np.array(tmp2)
ttrain_vec[i_run] = time() - ts_train
# print(Xy_prototype[0])
# print(np.unique(Xy_prototype[...,-1]))
# predict outputs
predict_list = list()
ts_test = time()
for x_test in X_test:
# lvq algo
predict_list.append(get_best_matching_prototype(x_test,Xy_prototype)[-1])
ttest_vec[i_run] = time()-ts_test
y_predict = np.array(predict_list)
# evaluate the prediction
e = 0
for i in range(len(y_predict)):
if y_predict[i] != y_test[i]:
e += 1
accuracy_vec[i_run] = 100*(1-e/len(y_test))
print('irun =', i_run, ', accuracy =', np.round(accuracy_vec[i_run],2), 'err =',np.round(err_vec[-1],2))
print('accuracy:',np.round(accuracy_vec[:10],2))
# print('train time:',np.round(ttrain_vec,5))
# print('test time:',np.round(ttest_vec,5))
print('average accuracy: %.2f%%'%(np.average(accuracy_vec)))
print('average train time: %.5fs'%(np.average(ttrain_vec)))
print('average test time: %.5fs'%(np.average(ttest_vec)))
accuracy_avg = np.average(accuracy_vec)
rtime_avg = np.average(ttest_vec)
# confution matrix
cm = confusion_matrix(y_test,y_predict)
cm_df = pd.DataFrame(cm,
index=['0', '1', '2','3','4','5','6','7','8','9'],
columns=['0', '1', '2','3','4','5','6','7','8','9'])
plt.figure(figsize=(7, 6))
sns.heatmap(cm_df, annot=True, fmt='g')
plt.title("LVQ Classifier, Digits dataset \nAvg. accuracy: {:.2f}%, Avg. running time: {:.3f} (s)".format(accuracy_avg, rtime_avg))
# plt.title('1NN Classifier, Iris dataset \nAccuracy: %.2f%, Running time: %.2f (s)'%(accuracy_avg, rtime_avg))
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
# error figure
plt.figure(figsize=(7, 6))
plt.plot(err_vec)
plt.xlabel('epoch')
plt.ylabel('error')
plt.grid()
plt.show() | [
"thoatran@Thoas-MacBook-Pro.local"
] | thoatran@Thoas-MacBook-Pro.local |
767116e335ce8d255d33fed6b21bfe818c1d9742 | 4717b8009fe3d42eace211092a1b7f12dc2f826b | /devel/lib/python2.7/dist-packages/aquacore/srv/_IsCalibrated.py | 3a2f0c8be82a4ba3b8ea7c9e48fdec368a09ad0d | [] | no_license | Shabirmean/Assignment1 | d8cc6e65d30072340568d2948118a26d3bca06d1 | ba789432de49b8a4ff7fe543984b22d63d13f9d6 | refs/heads/master | 2021-09-07T19:50:21.130729 | 2018-02-28T04:25:58 | 2018-02-28T04:25:58 | 108,614,820 | 0 | 0 | null | 2017-10-28T02:54:22 | 2017-10-28T02:54:22 | null | UTF-8 | Python | false | false | 6,719 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from aquacore/IsCalibratedRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class IsCalibratedRequest(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "aquacore/IsCalibratedRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(IsCalibratedRequest, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from aquacore/IsCalibratedResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class IsCalibratedResponse(genpy.Message):
_md5sum = "e431d687bf4b2c65fbd94b12ae0cb5d9"
_type = "aquacore/IsCalibratedResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """bool value
"""
__slots__ = ['value']
_slot_types = ['bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
value
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(IsCalibratedResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.value is None:
self.value = False
else:
self.value = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_B().pack(self.value))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.value,) = _get_struct_B().unpack(str[start:end])
self.value = bool(self.value)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_B().pack(self.value))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.value,) = _get_struct_B().unpack(str[start:end])
self.value = bool(self.value)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
class IsCalibrated(object):
_type = 'aquacore/IsCalibrated'
_md5sum = 'e431d687bf4b2c65fbd94b12ae0cb5d9'
_request_class = IsCalibratedRequest
_response_class = IsCalibratedResponse
| [
"shabir_tck09@hotmail.com"
] | shabir_tck09@hotmail.com |
0f2cf8d26e748018f6253715fe1c3ddd1f58f25b | cfdf920dd001a15f05cc8ec7fdec1ddc3d591b0e | /run.py | 10a70653045ea097e9d5039202654c8fcd3acee1 | [] | no_license | dean/app-template | ee0fc58791c685869a0020dca03e80c9d508004e | 49ccf9ef4f9e490e43ca6a7768dee20964deb6a8 | refs/heads/master | 2020-05-17T00:35:12.168919 | 2014-01-06T05:26:15 | 2014-01-06T05:26:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | import sys
import argparse
sys.dont_write_bytecode = True
from appname import app
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--ip", help="listen to this IP address",
default="0.0.0.0")
parser.add_argument("-p", "--port", help="listen to this port",
default="80", type=int)
parser.add_argument("-d", "--debug", help="turn debugging on",
default="--debug")
args = parser.parse_args()
app.run(args.ip, args.port, debug=True)
| [
"deanjohnson222@gmail.com"
] | deanjohnson222@gmail.com |
2f21e8ac91cc0ed1a3de6696a9a4c676c9582aec | f9bd85ed8e592ea05f12b613cecdebf274245520 | /netpyntest_lib/api.py | 5f822756952d354e4bbe671b87e6842c57d86f83 | [
"BSD-2-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | aespinosaalvarez/NetPyntest | 8d3fd2dd2623afa41dbbc183d48cee8c7e15e1b2 | 7189647ecfa584d4cef9763f753c2a74bf941c08 | refs/heads/master | 2020-05-29T09:33:00.692025 | 2016-08-08T21:11:57 | 2016-08-08T21:11:57 | 59,188,862 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,351 | py | # -*- coding: utf-8 -*-
"""
This file contains API calls and Data
"""
import six
from sys import path
from termcolor import colored
from os import geteuid
from os import path
from .data import *
__version__ = "1.0.0"
__all__ = ["run_console", "run", "GlobalParameters"]
# --------------------------------------------------------------------------
#
# Command line options
#
# --------------------------------------------------------------------------
def run_console(config):
"""
:param config: GlobalParameters option instance
:type config: `GlobalParameters`
:raises: TypeError
"""
if not isinstance(config, GlobalParameters):
raise TypeError("Expected GlobalParameters, got '%s' instead" % type(config))
#six.print_(colored("[*]", "blue"), "Starting NetPyntest execution")
run(config)
#six.print_(colored("[*]", "blue"), "Done!")
# ----------------------------------------------------------------------
#
# API call
#
# ----------------------------------------------------------------------
def run(config):
"""
:param config: GlobalParameters option instance
:type config: `GlobalParameters`
:raises: TypeError
"""
if not isinstance(config, GlobalParameters):
raise TypeError("Expected GlobalParameters, got '%s' instead" % type(config))
# --------------------------------------------------------------------------
# CHECK ROOT USER
# --------------------------------------------------------------------------
if geteuid():
six.print_(colored("[!] ERROR - Please run NetPyntest as root.", "red"))
exit()
# --------------------------------------------------------------------------
# CHECK CONFIG FILE
# --------------------------------------------------------------------------
if not path.isfile("control_file"):
six.print_("Creating config_file")
control_file = open("control_file", "w")
data = {'mac_flooding_pid': 0, 'port_stealing_pid': 0}
control_file.write(str(data))
control_file.close()
# --------------------------------------------------------------------------
# SELECT & LAUNCH ATTACK
# --------------------------------------------------------------------------
attack = config.attack[0]
action = config.action[0]
if config.interface != None:
iface = config.interface[0]
#TODO valid interface and introduce interface in calls
else:
iface = "eth0"
################ MAC FLOODING ##############
if attack == "mac_flooding":
from .libs.plugins.mac_flooding import start
from .libs.plugins.mac_flooding import stop
from .libs.plugins.mac_flooding import generate_pcap
if action == "start":#TODO This is not working for Python 2
from sys import version_info
if version_info[0] >=3:
if config.file != None:
file = config.file[0]
if path.isfile(file):
six.print_("[*] Starting MAC Flooding with file '{}'...".format(file))
from scapy.error import Scapy_Exception
try:
start(file, iface)
except Scapy_Exception:
six.print_(colored("[!] ERROR - File '{}' is not a valid PCAP file".format(file), "red"))
else:
six.print_(colored("[!] ERROR - File '{}' doesn't exist.".format(file), "red"))
else:
six.print_(colored("[!] ERROR - You must specify a PCAP file. You can generate one with 'sudo python netpyntest.py mac_flooding generate_pcap'", "red"))
else:
six.print_(colored("[!] ERROR - Sorry, currently this feature is only supported in Python 3 or higher", "red"))
elif action == "stop":
stop()
elif action == "generate_pcap":
if config.size == None:
six.print_("[*] Generating PCAP file with default size of 10000 packets")
generate_pcap(10000)
else:
size = config.size[0]
six.print_("[*] Generating PCAP file with size of {} packets".format(size))
generate_pcap(size)
six.print_(colored("[*] PCAP file generated", "green"))
else:
six.print_(colored("[!] ERROR - Action {} doesn't exist for MAC Flooding attack".format(action), "red"))
################ PORT STEALING ##############
elif attack == "port_stealing":
if action == "start":
if config.target != None:
target = config.target[0]
if validate_ip(target):
if config.output != None:
output = config.output[0]
from .libs.plugins.port_stealing import start
six.print_("[*] Starting Port Stealing...")
start(target, output, iface)
else:
six.print_(colored("[!] ERROR - No output file specified (-o)", "red"))
else:
six.print_(colored("[!] ERROR - IP isn't valid. Enter valid IPv4 address (-t)", "red"))
else:
six.print_(colored("[!] ERROR - You must specify a target (-t)", "red"))
elif action == "stop":
from .libs.plugins.port_stealing import stop
six.print_("[*] Stopping Port Stealing...")
stop()
else:
six.print_(colored("[!] ERROR - Action {} doesn't exist for Port Stealing attack".format(action), "red"))
################ SNMP ##############
elif attack == "snmp":
if action == "sniff":
from .libs.plugins.snmp import sniff_snmp
six.print_("[*] Starting SNMP sniffing...")
sniff_snmp(iface)
elif action == "get":
if config.com != None:
com = config.com[0]
else:
com = "public"
if config.target != None:
target = config.target[0]
if validate_ip(target):
if config.oid != None:
oid = config.oid[0]
from .libs.plugins.snmp import snmp_get
six.print_("[*] Performing SNMP GET request against host {} and OID {}...".format(target, oid))
snmp_get(target, oid, iface, com)
else:
six.print_(colored("[!] ERROR - No OID specified (-oid)", "red"))
else:
six.print_(colored("[!] ERROR - IP isn't valid. Enter valid IPv4 address.", "red"))
else:
six.print_(colored("[!] ERROR - You must specify a target (-t)", "red"))
elif action =="set":
if config.com != None:
com = config.com[0]
else:
com = "private"
if config.target != None:
target = config.target[0]
if validate_ip(target):
if config.oid != None:
oid = config.oid[0]
if config.value != None:
val = config.value[0]
from .libs.plugins.snmp import snmp_set
six.print_("[*] Performing SNMP SET request against host {}. Trying to set value {} in object {}...".format(target, val, oid))
snmp_set(target, oid, iface, com, val)
else:
six.print_(colored("[!] ERROR - No value specified (-v)", "red"))
else:
six.print_(colored("[!] ERROR - No OID specified (-oid)", "red"))
else:
six.print_(colored("[!] ERROR - IP isn't valid. Enter valid IPv4 address (-t)", "red"))
else:
six.print_(colored("[!] ERROR - You must specify a target (-t)", "red"))
elif action == "dictionary_attack":
if config.target != None:
target = config.target[0]
if validate_ip(target):
if config.dict != None:
dict = config.dict[0]
if path.isfile(dict):
from .libs.plugins.snmp import dictionary_attack
six.print_("[*] Starting SNMP dictionary attack...")
dictionary_attack(dict, target, iface)
else:
six.print_(colored("[!] ERROR - File '{}' doesn't exist.".format(dict), "red"))
else:
six.print_(colored("[!] ERROR - You must specify a dictionary file (-d)", "red"))
else:
six.print_(colored("[!] ERROR - IP isn't valid. Enter valid IPv4 address (-t)", "red"))
else:
six.print_(colored("[!] ERROR - You must specify a target (-t, --target)", "red"))
elif action == "dos":
if config.com != None:
com = config.com[0]
else:
com = "private"
if config.target != None:
target = config.target[0]
if validate_ip(target):
from .libs.plugins.snmp import snmp_DoS
six.print_("[*] Starting DoS attack to host {} with RW community {}...".format(target, com))
snmp_DoS(target, iface, com)
else:
six.print_(colored("[!] ERROR - IP isn't valid. Enter valid IPv4 address (-t, --target)", "red"))
else:
six.print_(colored("[!] ERROR - You must specify a target (-t)", "red"))
else:
six.print_(colored("[!] ERROR - Action {} doesn't exist for SNMP".format(action), "red"))
def validate_ip(s):
a = s.split('.')
if len(a) != 4:
return False
for x in a:
if not x.isdigit():
return False
i = int(x)
if i < 0 or i > 255:
return False
return True
| [
"noreply@github.com"
] | aespinosaalvarez.noreply@github.com |
5b0ae5bb0969903a0924612161f4a14169f658b2 | 3b20cafef71bf6b32fb461b64ff806593a69100b | /Part 8 - Deep Learning/Section 40 - Convolutional Neural Networks (CNN)/Python/convolutional_neural_network.py | 0f9177bc0c0a380118f7665146e81bdbc851c32e | [] | no_license | GraydonHall42/Machine-Learning-in-Python | 88124f632e1f912c2e0c5182ee6ec9857b3ff46c | a672ed5a145719aa9b219aef18bc7ab31bf89dd7 | refs/heads/main | 2023-07-16T19:52:37.914628 | 2021-08-29T00:00:06 | 2021-08-29T00:00:06 | 400,905,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,222 | py | # Convolutional Neural Network
# Importing the libraries
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
tf.__version__
# Part 1 - Data Preprocessing
training_set_path = r'C:\Users\grayd\Downloads\Section+40+-+Convolutional+Neural+Networks+(CNN)\Section 40 - Convolutional Neural Networks (CNN)\dataset\training_set'
test_set_path = r'C:\Users\grayd\Downloads\Section+40+-+Convolutional+Neural+Networks+(CNN)\Section 40 - Convolutional Neural Networks (CNN)\dataset\test_set'
# create train_datagen to augment our images
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
# apply train_datagen object to our dataset.
training_set = train_datagen.flow_from_directory(training_set_path,
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
# Preprocessing the Test set
test_datagen = ImageDataGenerator(rescale = 1./255)
test_set = test_datagen.flow_from_directory('dataset/test_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
# Part 2 - Building the CNN
# Initialising the CNN
cnn = tf.keras.models.Sequential()
# Step 1 - Convolution
cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[64, 64, 3]))
# Step 2 - Pooling
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
# Adding a second convolutional layer
cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu'))
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
# Step 3 - Flattening
cnn.add(tf.keras.layers.Flatten())
# Step 4 - Full Connection
cnn.add(tf.keras.layers.Dense(units=128, activation='relu'))
# Step 5 - Output Layer
cnn.add(tf.keras.layers.Dense(units=1, activation='sigmoid'))
# Part 3 - Training the CNN
# Compiling the CNN
cnn.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Training the CNN on the Training set and evaluating it on the Test set
cnn.fit(x = training_set, validation_data = test_set, epochs = 25)
# Part 4 - Making a single prediction
import numpy as np
from keras.preprocessing import image
single_img_path = r'C:\Users\grayd\Downloads\Section+40+-+Convolutional+Neural+Networks+(CNN)\Section 40 - Convolutional Neural Networks (CNN)\dataset\single_prediction'
# create image object, and resize to 64x64
# kept in downloads to keep out of onedrive!
test_image = image.load_img(single_img_path+'/cat_or_dog_2.jpg', target_size = (64, 64))
test_image = image.img_to_array(test_image) # convert image to 2D array
test_image = np.expand_dims(test_image, axis = 0) # have to add a 3rd dimension for the batch
result = cnn.predict(test_image) # make prediction for test image
training_set.class_indices
if result[0][0] == 1:
prediction = 'dog'
else:
prediction = 'cat'
print(prediction) | [
"gwhall@ualberta.ca"
] | gwhall@ualberta.ca |
ab9af6723c6116f3b71ccc60655438c85d7cc063 | ca3ff0bc4f7e9e8fcf677afa3a1a18dd2129a5d4 | /daily_problems/problem_201_to_300/223.py | 4dc6c1336c613927f995bedaac4769db5534fc33 | [
"MIT"
] | permissive | rrwt/daily-coding-challenge | d9b23a82a1a3c4824b8f1aeacf6584afc5189ce7 | 4dcd59eaff021be0b9b1aba1dda73248c81454b7 | refs/heads/master | 2022-05-29T04:32:44.406196 | 2022-05-25T01:12:01 | 2022-05-25T01:12:01 | 181,972,357 | 1 | 0 | MIT | 2021-04-20T19:58:43 | 2019-04-17T21:41:25 | Python | UTF-8 | Python | false | false | 1,077 | py | """
Typically, an implementation of in-order traversal of a binary tree
has O(h) space complexity, where h is the height of the tree.
Write a program to compute the in-order traversal of a binary tree using O(1) space.
"""
from daily_problems.binary_tree_node import Node
def inorder(node: Node) -> None:
"""
Time Complexity: O(n)
Space Complexity: O(1)
"""
while node:
if node.left:
runner = node.left
while runner.right and runner.right != node:
runner = runner.right
if runner.right == node:
runner.right = None
print(node.data)
node = node.right
else:
runner.right = node
node = node.left
else:
print(node.data)
node = node.right
if __name__ == "__main__":
root = Node(1)
root.left = Node(2)
root.left.left = Node(4)
root.left.right = Node(5)
root.right = Node(3)
root.right.left = Node(6)
root.right.right = Node(7)
inorder(root)
| [
"rohitrawat2000@gmail.com"
] | rohitrawat2000@gmail.com |
dba4b938431ce989cda1759e375370a014a0df06 | df454d2320604599ff4d3a8b433d9d66311ea800 | /Crs2_python_data_structures/ex8_05_file_string_parsing.py | 23cdf90b58fd4318bd3af089af694e41135c9131 | [] | no_license | aclaudio123/python-for-everybody | 84f71bc4cdc56cff4adace98414a5f570ef3ac72 | 92865da71872735076d897ed64961961baf32d88 | refs/heads/master | 2020-04-17T16:40:07.117512 | 2019-01-22T21:45:04 | 2019-01-22T21:45:04 | 166,750,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py | #
# Title: File processing 4: string parsing
# Author: Claudio Asangong
#
# 8.5 Open the file mbox-short.txt and read it line by line. When you find a
# line that starts with 'From ' like the following line:
# From stephen.marquard@uct.ac.za Sat Jan 5 09:14:16 2008
# You will parse the From line using split() and print out the second word in
# the line (i.e. the entire address of the person who sent the message).
# Then print out a count at the end.
# Hint: make sure not to include the lines that start with 'From:'.
# You can download the sample data at http://www.py4e.com/code3/mbox-short.txt
#
# Concepts: file processing, string parsing, error handling
fname = input("Enter file name: ")
try:
fhandler = open(fname)
except Exception e:
print("File not found", fname)
quit()
count = 0
for line in fhandler:
if line.startswith('From '):
llist = line.split()
print(llist[1])
count = count + 1
print("There were", count, "lines in the file with From as the first word")
| [
"aclaudio123@gmail.com"
] | aclaudio123@gmail.com |
510d289f293406782423dd2e47660241143a7eb2 | bbf17b65d1e7e963ce9b3c16951292842193a0cf | /fuzzinator/call/platform_info_decorator.py | a7216ab5180dd9ea52d3af459ae370154466d427 | [
"BSD-3-Clause"
] | permissive | tkeri/fuzzinator | 5bbb0a23ebc419f93e8221618074c4a53be22630 | e18b3db876c644cc06b57fb7815aa4f04bbd1775 | refs/heads/master | 2023-05-24T06:48:19.027415 | 2019-12-08T17:21:06 | 2019-12-08T17:21:06 | 152,223,017 | 1 | 0 | BSD-3-Clause | 2018-10-09T09:19:16 | 2018-10-09T09:19:16 | null | UTF-8 | Python | false | false | 1,230 | py | # Copyright (c) 2016-2019 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import platform
from . import CallableDecorator
class PlatformInfoDecorator(CallableDecorator):
"""
Decorator for SUT calls to extend issues with ``'platform'`` and ``'node'``
properties.
The new ``'platform'`` issue property will contain the result of Python's
:func:`platform.platform` and the ``'node'`` property will contain the
result of :func:`platform.node`.
**Example configuration snippet:**
.. code-block:: ini
[sut.foo]
#call=...
call.decorate(0)=fuzzinator.call.PlatformInfoDecorator
"""
def decorator(self, **kwargs):
def wrapper(fn):
def filter(*args, **kwargs):
issue = fn(*args, **kwargs)
if not issue:
return issue
issue['platform'] = platform.platform()
issue['node'] = platform.node()
return issue
return filter
return wrapper
| [
"reni@inf.u-szeged.hu"
] | reni@inf.u-szeged.hu |
4541d67b4ce5fdecbe1247766b337ab311ad34ec | 2b1b030afd5f7f2b5d7c694b0dd62e5277751562 | /Scripts/Sample.py | 21439ac152e33303a2a172b141152b5f1d8ee44f | [] | no_license | alexjorenby/IterativeFeatureExtraction | be9a85e9a98be2677885d6c56db3b9e7e87551d0 | adb2cf79b6c48a89e01b9c2731e14dbc15807399 | refs/heads/master | 2020-05-16T21:44:56.833527 | 2019-04-24T22:38:56 | 2019-04-24T22:38:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,691 | py | import pandas as pd
import numpy as np
import uuid
import os
import FormatDF as FDF
import Helpers as H
def get_sample(source_location, features, target, sample_size, custom_queries=[], threshold=0.5, even=True, feature_config='../../feature_config', save_seed=False, seed_directory='../../seeds'):
df = pd.read_csv(source_location)
df = FDF.format_column_names(df)
for q in custom_queries:
df = df.query(q)
df = nan_feature_filter(df, features, feature_config)
df = replace_null_features(df, feature_config)
df = clean_outliers(df, target)
if even:
sample_df_all = random_sample(df, int(len(df) * 0.9))
df_n = sample_df_all.query(str(target) + ' > ' + str(threshold))
df_n = df_n.sample(frac=1)
df_p = sample_df_all.query(str(target) + ' <= ' + str(threshold))
df_p = df_p.sample(frac=1)
sample_df = pd.concat([df_p.head(int(sample_size/2)), df_n.head(int(sample_size/2))], sort=False)
else:
sample_df = H.random_sample(df, sample_size)
sample_df = sample_df.sample(frac=1)
seed_folder = ''
if save_seed and len(seed_directory) > 1:
seed_id = str(uuid.uuid4().hex)
seed_folder = seed_directory + '/' + seed_id
os.mkdir(seed_folder)
sample_df.to_csv(seed_folder + '/sample.csv', sep=',', encoding='utf-8')
return sample_df, seed_folder
def clean_outliers(df, target):
m = np.mean(df[target])
s = np.std(df[target])
df.query(target + ' > ' + str(m-2*s) + ' and ' + target + ' <= ' + str(m+2*s))
return df
def nan_feature_filter(df, features, feature_config):
nan_features = H.list_from_file(feature_config + '/NAN')
acc = ''
for i in range(len(nan_features)-1):
if nan_features[i] in features:
acc += str(nan_features[i]) + ' != "nan" and '
acc += str(nan_features[len(nan_features)-1]) + ' != "nan"'
df = df.query(acc)
return df
def replace_null_features(df, feature_config):
null_features = H.list_from_file(feature_config + '/NULL')
for f in null_features:
col_type = df.dtypes[df.columns.get_loc(f)]
if col_type == "float64":
df[f].fillna(0.0, inplace=True)
else:
df[f].fillna("0", inplace=True)
df[f] = df[f].astype(str)
return df
def random_sample(df, sample_size):
sample_df = pd.DataFrame(data=None, columns=['sample_key'] + np.array(df.columns).tolist())
sample = df.sample(n=sample_size, replace=False)
sample_df[sample.columns] = sample[sample.columns]
sample_df[['sample_key']] = np.array([i for i in range(len(sample_df))]).reshape(len(sample_df),1)
return sample_df
| [
"alexjorenby@gmail.com"
] | alexjorenby@gmail.com |
53042686daf4e0e9234c01f7c30825091085fb82 | 1892a473b7eed6aaa712bc2959a1aca48beec284 | /forks/baselines/baselines/a2c/a2c.py | ccf3c974c9a1f5842ed949c55ad3d14cde79b5c5 | [
"MIT"
] | permissive | AndrewPaulChester/sage-code | d3753bc894f21ce057c1a273e54926e368529e2b | 9fe676bfbcbc6f642eca29b30a1027fba2a426a0 | refs/heads/main | 2023-05-05T19:08:21.655463 | 2021-05-27T05:21:54 | 2021-05-27T05:21:54 | 371,245,286 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,563 | py | import time
import functools
import tensorflow as tf
from forks.baselines.baselines import logger
from forks.baselines.baselines.common import set_global_seeds, explained_variance
from forks.baselines.baselines.common import tf_util
from forks.baselines.baselines.common.policies import build_policy
from forks.baselines.baselines.a2c.utils import Scheduler, find_trainable_variables
from forks.baselines.baselines.a2c.runner import Runner
from forks.baselines.baselines.ppo2.ppo2 import safemean
from collections import deque
from tensorflow import losses
class Model(object):
"""
We use this class to :
__init__:
- Creates the step_model
- Creates the train_model
train():
- Make the training part (feedforward and retropropagation of gradients)
save/load():
- Save load the model
"""
def __init__(self, policy, env, nsteps,
ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4,
alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear'):
sess = tf_util.get_session()
nenvs = env.num_envs
nbatch = nenvs*nsteps
with tf.variable_scope('a2c_model', reuse=tf.AUTO_REUSE):
# step_model is used for sampling
step_model = policy(nenvs, 1, sess)
# train_model is used to train our network
train_model = policy(nbatch, nsteps, sess)
A = tf.placeholder(train_model.action.dtype, train_model.action.shape)
ADV = tf.placeholder(tf.float32, [nbatch])
R = tf.placeholder(tf.float32, [nbatch])
LR = tf.placeholder(tf.float32, [])
# Calculate the loss
# Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss
# Policy loss
neglogpac = train_model.pd.neglogp(A)
# L = A(s,a) * -logpi(a|s)
pg_loss = tf.reduce_mean(ADV * neglogpac)
# Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy.
entropy = tf.reduce_mean(train_model.pd.entropy())
# Value loss
vf_loss = losses.mean_squared_error(tf.squeeze(train_model.vf), R)
loss = pg_loss - entropy*ent_coef + vf_loss * vf_coef
# Update parameters using loss
# 1. Get the model parameters
params = find_trainable_variables("a2c_model")
# 2. Calculate the gradients
grads = tf.gradients(loss, params)
if max_grad_norm is not None:
# Clip the gradients (normalize)
grads, grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, params))
# zip aggregate each gradient with parameters associated
# For instance zip(ABCD, xyza) => Ax, By, Cz, Da
# 3. Make op for one policy and value update step of A2C
trainer = tf.train.RMSPropOptimizer(learning_rate=LR, decay=alpha, epsilon=epsilon)
_train = trainer.apply_gradients(grads)
lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
def train(obs, states, rewards, masks, actions, values):
# Here we calculate advantage A(s,a) = R + yV(s') - V(s)
# rewards = R + yV(s')
advs = rewards - values
for step in range(len(obs)):
cur_lr = lr.value()
td_map = {train_model.X:obs, A:actions, ADV:advs, R:rewards, LR:cur_lr}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
policy_loss, value_loss, policy_entropy, _ = sess.run(
[pg_loss, vf_loss, entropy, _train],
td_map
)
return policy_loss, value_loss, policy_entropy
self.train = train
self.train_model = train_model
self.step_model = step_model
self.step = step_model.step
self.value = step_model.value
self.initial_state = step_model.initial_state
self.save = functools.partial(tf_util.save_variables, sess=sess)
self.load = functools.partial(tf_util.load_variables, sess=sess)
tf.global_variables_initializer().run(session=sess)
def learn(
network,
env,
seed=None,
nsteps=5,
total_timesteps=int(80e6),
vf_coef=0.5,
ent_coef=0.01,
max_grad_norm=0.5,
lr=7e-4,
lrschedule='linear',
epsilon=1e-5,
alpha=0.99,
gamma=0.99,
log_interval=100,
load_path=None,
**network_kwargs):
'''
Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm.
Parameters:
-----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See baselines.common/policies.py/lstm for more details on using recurrent nets in policies
env: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py)
seed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible)
nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int, total number of timesteps to train on (default: 80M)
vf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5)
ent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01)
max_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5)
lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4)
lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and
returns fraction of the learning rate (specified as lr) as output
epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5)
alpha: float, RMSProp decay parameter (default: 0.99)
gamma: float, reward discounting parameter (default: 0.99)
log_interval: int, specifies how frequently the logs are printed out (default: 100)
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
# Get the nb of env
nenvs = env.num_envs
policy = build_policy(env, network, **network_kwargs)
# Instantiate the model object (that creates step_model and train_model)
model = Model(policy=policy, env=env, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule)
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env, model, nsteps=nsteps, gamma=gamma)
epinfobuf = deque(maxlen=100)
# Calculate the batch_size
nbatch = nenvs*nsteps
# Start total timer
tstart = time.time()
for update in range(1, total_timesteps//nbatch+1):
# Get mini batch of experiences
obs, states, rewards, masks, actions, values, epinfos = runner.run()
epinfobuf.extend(epinfos)
policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values)
nseconds = time.time()-tstart
# Calculate the fps (frame per second)
fps = int((update*nbatch)/nseconds)
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, rewards)
logger.record_tabular("nupdates", update)
logger.record_tabular("total_timesteps", update*nbatch)
logger.record_tabular("fps", fps)
logger.record_tabular("policy_entropy", float(policy_entropy))
logger.record_tabular("value_loss", float(value_loss))
logger.record_tabular("explained_variance", float(ev))
logger.record_tabular("eprewmean", safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.record_tabular("eplenmean", safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.dump_tabular()
return model
| [
"48459485+AndrewPaulChester@users.noreply.github.com"
] | 48459485+AndrewPaulChester@users.noreply.github.com |
df7066c7097c363e0a5cfd24e56d80c089835999 | 2294cd66c22212d37bfb35d34e8edf34aae7b7a5 | /utils.py | 99b29716f4dd9afcc76b5bf8bf97a517a0274447 | [] | no_license | greenlet/research | 2dfe2007db1d76f6a77464175d8f405db93edf6b | 4ef1a11137a8d9b6fa203bee866615ecfb942eb4 | refs/heads/master | 2020-09-26T22:49:13.865178 | 2020-02-07T13:52:24 | 2020-02-07T14:06:23 | 226,360,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,310 | py | import cv2
import time
from datetime import datetime
import os
import math
from PIL import Image
import numpy as np
import shutil
import pprint
PP = pprint.PrettyPrinter(indent=4)
class FPS_Counter:
def __init__(self, name=None, lapse_sec=5):
self._tag = '{} '.format(name) if name else ''
self._lapse_sec = lapse_sec
self._start = time.time()
self._frames = 0
def next(self):
self._frames += 1
now = time.time()
delta_sec = now - self._start
if delta_sec >= self._lapse_sec:
fps = self._frames / delta_sec
self._start = now
# print('--> {:0.2} - {}'.format(delta_sec, self._frames))
self._frames = 0
print('{}FPS:{:.1f}'.format(self._tag, fps))
def cur_datetime():
return datetime.now().strftime('%Y%m%d_%H%M%S')
def save_frame(img):
width, height = img.shape[1], img.shape[0]
dt_str = cur_datetime()
file_name = 'frame_{}_{:.0f}x{:.0f}.jpg'.format(dt_str, width, height)
print('Saving frame: {}'.format(file_name))
if __file__ in dir():
path = os.path.split(__file__)[0]
else:
path = ''
file_path = os.path.join(path, '..', 'screens', file_name)
print(file_path)
cv2.imwrite(file_path, img)
def capture(cam_num=0):
print('OpenCV {}'.format(cv2.__version__))
cap = cv2.VideoCapture(cam_num)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
fps = cap.get(cv2.CAP_PROP_FPS)
print('OpenCV capture resolution: {:.0f}x{:.0f} {}'.format(
width, height, fps))
fpsc = FPS_Counter()
while cap.isOpened():
ret, img = cap.read()
img = cv2.flip(img, 1)
yield img
fpsc.next()
key = cv2.waitKey(1)
if key >= 0:
print('key pressed: {}'.format(key))
if key == 27 or key == ord('q') or key == ord('Q'):
break
elif key == 49:
save_frame(img)
cap.release()
def make_dir(*subpaths):
path = abs_path(*subpaths)
os.makedirs(path, exist_ok=True)
return path
def abs_path(*subpaths):
path = os.path.join(*subpaths)
if os.path.isabs(path):
return path
if '__file__' in globals():
parts = os.path.split(__file__)[:-1] + subpaths
return os.path.join(*parts)
return os.path.abspath(*subpaths)
def list_files(*subpaths, paths_only=False):
dir_path = abs_path(*subpaths)
res = []
for f in os.listdir(dir_path):
path = os.path.join(dir_path, f)
if os.path.isfile(path):
if (paths_only):
res.append(path)
else:
res.append((path, f))
return res
def clear_dir(*subpaths):
dir_path = abs_path(*subpaths)
for root, dirs, files in os.walk(dir_path):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
def clear_or_make_dir(*subpaths):
dir_path = abs_path(*subpaths)
if os.path.exists(dir_path):
clear_dir(dir_path)
else:
make_dir(dir_path)
def copy_files(from_path, to_path, clear_first=True, move=False, rename_cb=None):
from_path = abs_path(from_path) if type(from_path) == str else abs_path(*from_path)
to_path = abs_path(to_path) if type(to_path) == str else abs_path(*to_path)
if os.path.exists(to_path):
if clear_first:
clear_dir(to_path)
else:
make_dir(to_path)
for from_file_path, from_file_name in list_files(from_path, with_names=True):
to_file_name = rename_cb(from_file_name) if rename_cb else from_file_name
to_file_path = os.path.join(to_path, to_file_name)
shutil.copy(from_file_path, to_file_path)
def copy_file(from_path, to_path):
if os.path.exists(to_path):
if os.path.samefile(from_path, to_path):
return
os.remove(to_path)
shutil.copy(from_path, to_path)
def get_square(image, out_size=None):
w, h = image.size
sz = min(w, h)
if not out_size or out_size > sz:
out_size = sz
x_offset = (w - sz) // 2
y_offset = (h - sz) // 2
image = image.crop((x_offset, y_offset, x_offset + sz, y_offset + sz))
image.thumbnail((out_size, out_size))
return image
def fit_image(images, size, mask_rect=None, crop=False):
if type(images) != tuple and type(images) != list:
ret_as_list = False
images = [images]
else:
ret_as_list = True
images = images
if type(size) == int:
w_dst, h_dst = size, size
else:
w_dst, h_dst = size
w_src, h_src = images[0].size
w_dst_h_src = w_dst * h_src
w_src_h_dst = w_src * h_dst
if w_dst_h_src == w_src_h_dst:
if w_dst != w_src:
for i in range(len(images)):
images[i] = images[i].resize((w_dst, h_dst), Image.BILINEAR)
return images if ret_as_list else images[0]
r_dst = w_dst / h_dst
if w_dst_h_src > w_src_h_dst:
w_dst_fit = w_src
h_dst_fit = math.ceil(w_src / r_dst)
else:
w_dst_fit = math.ceil(h_src * r_dst)
h_dst_fit = h_src
if mask_rect:
x1, y1, x2, y2 = mask_rect
w, h = x2 - x1, y2 - y1
if w_dst_fit < w or h_dst_fit < h:
if not crop:
return None
if w_dst_fit < w:
x1 += (w - w_dst_fit) // 2
x2 = x1 + w_dst_fit
w = w_dst_fit
if h_dst_fit < h:
y1 += (h - h_dst_fit) // 2
y2 = y1 + h_dst_fit
h = h_dst_fit
x1_crop = max(x1 - (w_dst_fit - w) // 2, 0)
y1_crop = max(y1 - (h_dst_fit - h) // 2, 0)
else:
x1_crop = (w_src - w_dst_fit) // 2
y1_crop = (h_src - h_dst_fit) // 2
x2_crop = x1_crop + w_dst_fit
y2_crop = y1_crop + h_dst_fit
rect_crop = x1_crop, y1_crop, x2_crop, y2_crop
for i in range(len(images)):
images[i] = images[i].crop(rect_crop)
if w_dst != w_dst_fit:
images[i] = images[i].resize((w_dst, h_dst), Image.BILINEAR)
return images if ret_as_list else images[0]
def tile_images(images, size=None, margin=None):
n = len(images)
if not size:
w, h = images[0].size
if not margin:
margin = max(w // 10, 10)
n_hor = math.ceil(math.sqrt(n * w / h))
n_ver = math.ceil(n / n_hor)
size = ((w + margin) * n_hor + margin, (h + margin) * n_ver + margin)
if (size[0] > 1600 or size[1] > 1200):
size = (1600, 1200)
width, height = size
n_hor = math.ceil(math.sqrt(n * width / height))
n_ver = math.ceil(n / n_hor)
wc = (width - margin) // n_hor
hc = (height - margin) // n_ver
w = wc - margin
h = hc - margin
res = Image.new('RGB', (width, height), color='white')
x_ind, x_offset, y_offset = 0, margin, margin
for i, image in enumerate(images, 1):
image = image.resize((w, h), Image.BILINEAR)
res.paste(image, (x_offset, y_offset))
x_ind = (x_ind + 1) % n_hor
if x_ind == 0:
x_offset = margin
y_offset += hc
else:
x_offset += wc
x_offset = math.ceil(x_offset)
y_offset = math.ceil(y_offset)
return res
def img_white_balance(img, white_ratio):
for channel in range(img.shape[2]):
channel_max = np.percentile(img[:, :, channel], 100-white_ratio)
channel_min = np.percentile(img[:, :, channel], white_ratio)
img[:, :, channel] = (channel_max - channel_min) * (img[:, :, channel] / 255.0)
return img
def find_mask_rect(mask_img):
h, w = mask_img.shape
x_min, y_min, x_max, y_max = w, h, 0, 0
for x in range(w):
for y in range(h):
if mask_img[y, x]:
x_min = min(x_min, x)
y_min = min(y_min, y)
x_max = max(x_max, x)
y_max = max(y_max, y)
if x_min > x_max:
return None
return (x_min, y_min, x_max, y_max)
def pprint(obj):
PP.pprint(obj)
def prefixed(*parts):
path = os.path.join(*parts)
def prefix(*args):
return os.path.join(path, *args)
return prefix
| [
"mikhail.burakov@braingarden.ai"
] | mikhail.burakov@braingarden.ai |
9490176895b2ae659e3f07614b9df6cb3227b13d | 9e1853b14df342ced541cfa03de21a29b90928d6 | /src/prefect/tasks/monte_carlo/client.py | fa51f35a3fd1f1b15b7a832eb92e0bdd06a4d9ea | [
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] | permissive | Fraznist/prefect | 0f2200c7b337440ffab90ab0c8b323055d651ab6 | 96183e95ad069c927799a6a272bc3d9a9cce3197 | refs/heads/master | 2023-04-14T08:34:52.007193 | 2022-03-16T22:10:17 | 2022-03-16T22:10:17 | 251,570,909 | 2 | 0 | Apache-2.0 | 2023-04-08T08:56:51 | 2020-03-31T10:29:40 | Python | UTF-8 | Python | false | false | 8,033 | py | import pendulum
import requests
from typing import Any, Dict, List
from prefect.utilities.logging import get_logger
from prefect.utilities.graphql import format_graphql_request_error
from prefect.exceptions import ClientError
class MonteCarloClient:
def __init__(
self,
api_key_id: str,
api_token: str,
) -> None:
self.api_key_id = api_key_id
self.api_token = api_token
self.logger = get_logger()
self._api_url = "https://api.getmontecarlo.com/graphql"
def _send_graphql_request(
self, query: str, variables: dict = None
) -> Dict[str, Any]:
response = requests.post(
url=self._api_url,
json=dict(query=query, variables=variables),
headers={
"x-mcd-id": self.api_key_id,
"x-mcd-token": self.api_token,
"Content-Type": "application/json",
},
)
# Check if request returned a successful status
try:
response.raise_for_status()
except requests.HTTPError as exc:
if response.status_code == 400:
# Create a custom-formatted err message for graphql errors which always
# return a 400 status code and have "query" in the parameter dict
try:
graphql_msg = format_graphql_request_error(response)
except Exception:
# Fallback to a general message
graphql_msg = (
"This is likely caused by a poorly formatted GraphQL query or "
"mutation but the response could not be parsed for more details"
)
raise ClientError(f"{exc}\n{graphql_msg}") from exc
# Server-side and non-graphql errors will be raised without modification
raise
response = response.json()
self.logger.debug(
"Response: %s for request %s with variables %s", response, query, variables
)
return response
def get_resources(self) -> List[Dict[str, Any]]:
response = self._send_graphql_request(
query="""
query {
getResources {
name
type
id
uuid
isDefault
isUserProvided
}
}
"""
)
return response["data"]["getResources"]
def create_or_update_tags_for_mcon(
self, key: str, value: str, mcon: str
) -> Dict[str, Any]:
response = self._send_graphql_request(
query="""
mutation($mcon_id: String!, $key: String!, $value: String!) {
createOrUpdateObjectProperty(mconId: $mcon_id,
propertyName: $key, propertyValue: $value) {
objectProperty {
id
}
}
}
""",
variables=dict(mcon_id=mcon, key=key, value=value),
)
return response["data"]["createOrUpdateObjectProperty"]["objectProperty"]["id"]
def create_or_update_lineage_node(
self,
node_name: str,
object_id: str,
object_type: str,
resource_name: str,
):
response = self._send_graphql_request(
query="""
mutation($node_name: String!, $object_id: String!, $object_type: String!,
$resource_name: String! ) {
createOrUpdateLineageNode(
name: $node_name,
objectId: $object_id,
objectType: $object_type,
resourceName: $resource_name,
){
node{
nodeId
mcon
}
}
}
""",
variables=dict(
node_name=node_name,
object_id=object_id,
object_type=object_type,
resource_name=resource_name,
),
)
return response["data"]["createOrUpdateLineageNode"]["node"]["mcon"]
def create_or_update_lineage_node_with_multiple_tags(
self,
node_name: str,
object_id: str,
object_type: str,
resource_name: str,
tags: List[Dict[str, str]],
) -> str:
response = self._send_graphql_request(
query="""
mutation($node_name: String!, $object_id: String!, $object_type: String!,
$resource_name: String!, $tags: [ObjectPropertyInput]
) {
createOrUpdateLineageNode(
name: $node_name,
objectId: $object_id,
objectType: $object_type,
resourceName: $resource_name,
properties: $tags
){
node{
mcon
}
}
}
""",
variables=dict(
node_name=node_name,
object_id=object_id,
object_type=object_type,
resource_name=resource_name,
tags=tags,
),
)
return response["data"]["createOrUpdateLineageNode"]["node"]["mcon"]
def create_or_update_resource(self, resource_name: str, resource_type: str):
response = self._send_graphql_request(
query="""
mutation($resource_name: String!, $resource_type: String!) {
createOrUpdateResource(
isDefault: false,
name: $resource_name,
type: $resource_type,
) {
resource {
uuid
}
}
}
""",
variables=dict(resource_name=resource_name, resource_type=resource_type),
)
return response["data"]["createOrUpdateResource"]["resource"]["uuid"]
def create_or_update_lineage_edge(
self, source: dict, destination: dict, expire_at: str = None
):
if expire_at is None:
expire_at = pendulum.now().add(days=1).isoformat()
response = self._send_graphql_request(
query="""
mutation($destination_object_id: String!,
$destination_object_type: String!,
$destination_resource_name: String!,
$source_object_id: String!, $source_object_type: String!,
$source_resource_name: String!, $expire_at: DateTime) {
createOrUpdateLineageEdge(
destination: {
objectId: $destination_object_id
objectType: $destination_object_type
resourceName: $destination_resource_name
}
source: {
objectId: $source_object_id
objectType: $source_object_type
resourceName: $source_resource_name
}
expireAt: $expire_at
){
edge{
edgeId
}
}
}
""",
variables=dict(
destination_object_id=destination["object_id"],
destination_object_type=destination["object_type"],
destination_resource_name=destination["resource_name"],
source_object_id=source["object_id"],
source_object_type=source["object_type"],
source_resource_name=source["resource_name"],
expire_at=expire_at,
),
)
return response["data"]["createOrUpdateLineageEdge"]["edge"]["edgeId"]
| [
"noreply@github.com"
] | Fraznist.noreply@github.com |
91f40cdb27689ed23d26bf5c7eedd39430af985b | 407e5a2ff25d9e769ca2707a2d67b72372fd284e | /lang/nl.py | 0ce3c5822360ad23eb62ded1dda66a678b85bac8 | [
"MIT"
] | permissive | 2099365072/AMM | 076a7c7ae955c7b663359c26f5c4be4271a46600 | e09c03245c3298acbce02dd3c288147572598722 | refs/heads/master | 2020-03-23T11:29:39.175696 | 2018-03-21T22:11:32 | 2018-03-21T22:11:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | """
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Audiophiles Music Manager Build 20180119 VER0.0.0PREALPHA *
* (C)2017 Mattijs Snepvangers pegasus.ict@gmail.com *
* /lang/nl.py Dutch language file VER0.0.0PREALPHA *
* License: MIT Please keep my name in the credits *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
"""
lang['init'] = "Even geduld aub, initialiseren."
lang['yes'] = "ja"
lang['no'] = "nee"
lang['wait'] = "Even geduld aub..."
lang['trydb'] = "Proberen te verbinden met de database..."
lang['create'] = "aanmaken"
lang['select'] = "selecteren"
lang['ok'] = "ok"
lang['cancel'] = "annuleren"
#lang['']
| [
"pegasus.ict@gmail.com"
] | pegasus.ict@gmail.com |
610e5269ddf735a2c71b8377c209889b1a33125f | ac759792ec27bd354864d7ed4b6acdc2bed0f41d | /intro/adjacentElementsProduct.py | 04a0d3e1df7a0bbfa93c929bc201789e578b3870 | [] | no_license | Junist96/codefights | 1dd8ec27ac73c8ab10c859822700b5df9ac27192 | e30cde9ab0b1c474d5f629220fe382dca36690c7 | refs/heads/master | 2020-03-22T11:02:16.115613 | 2018-01-30T02:11:31 | 2018-01-30T02:11:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | def adjacentElementsProduct(inputArray):
ia = inputArray
return max([ia[i] * ia[i+1] for i in range(len(ia)-1)])
| [
"rev.hyeok@gmail.com"
] | rev.hyeok@gmail.com |
083a55ab9b3726ba95cfc1f8990e1edfd802881a | b969d8975d04aae145aba754f52c88ff126eaabe | /mysite/uploads/migrations/0001_initial.py | 4d897b33085494883111829afc0577f42a202376 | [] | no_license | Florent-Vanhollebeke/exercice_martin | 2c937bfa61fe47534536ad29cd1c8f5faf655af8 | 796ca2ee37a9dc021c2ca972e5cc3a0c014f2905 | refs/heads/main | 2023-03-19T12:12:12.613657 | 2021-03-04T12:46:26 | 2021-03-04T12:46:26 | 343,694,522 | 0 | 0 | null | 2021-03-04T12:46:27 | 2021-03-02T08:11:28 | Python | UTF-8 | Python | false | false | 1,611 | py | # Generated by Django 3.1.7 on 2021-03-03 16:35
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Person",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("uuid", models.UUIDField(default=uuid.uuid4, editable=False)),
("first_name", models.CharField(max_length=200)),
("last_name", models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name="File",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=50, null=True)),
("content", models.FileField(upload_to="document/")),
(
"person",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="uploads.person"
),
),
],
),
]
| [
"florent.vanhollebeke@gmail.com"
] | florent.vanhollebeke@gmail.com |
d9cb5d3c7cf83ca0f9e171e92e251dd5ad3a0635 | a8830481cdb33ff7d0945c95036f586b5cb0894b | /maketemplates/plot_single_SESN.py | 0413c2e793f8d607295ebd13adb509e4dac75f8f | [] | no_license | fedhere/GPSNtempl | b6f2c4824a32387d6f767335448e44637f362878 | cdcad219f0f3c03d3d9856f64c0bd104535c021d | refs/heads/main | 2023-07-12T01:41:32.367338 | 2023-06-26T00:50:09 | 2023-06-26T00:50:09 | 87,359,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,306 | py | import os
import pylab as pl
import matplotlib.pyplot as plt
import sys
import pandas as pd
import numpy as np
from matplotlib.ticker import (MultipleLocator)
# s = json.load( open(str(os.getenv ('PUI2015'))+"/fbb_matplotlibrc.json") )
# pl.rcParams.update(s)
cmd_folder = os.path.realpath(os.getenv("SESNCFAlib"))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import snclasses as snstuff
import matplotlib as mpl
mpl.use('agg')
pl.rcParams['figure.figsize']=(10,10)
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = ['Times New Roman'] + plt.rcParams['font.serif']
bands = ['U','B','V', 'g', 'R', 'I', 'rp','ip','up','J','H','K','m2','w1','w2']
color_bands = {'U':'k','up':'k','B':'#0066cc','V':'#47b56c','R':'#b20000','I':'m',
'rp':'#b20000','ip':'m', 'g': '#317e4b',
'J':'#4F088A','H':'#FFB700','K':'#A4A4A4',
'm2':'#708090', 'w2':'#a9b2bc', 'w1':'#434d56'}
plot_vmax = False
allbands = False
allsne = pd.read_csv(os.getenv("SESNCFAlib") +
"/SESNessentials.csv")['SNname'].values
if __name__ == '__main__':
# uncomment for all lcvs to be read in
if len(sys.argv) > 1:
for arg in sys.argv:
if arg.startswith('name='):
allsne = arg.split('=')[1].split(',')
elif arg.startswith('band='):
bands = arg.split('=')[1].split(',')
elif arg.startswith('vmax'):
plot_vmax = True
elif arg.startswith('allbands'):
allbands = True
for sn in allsne:
for b in bands:
bb = b
if b == 'ip':
bb = 'i'
if b == 'up':
bb = 'u'
if b == 'rp':
bb = 'r'
try:
thissn = snstuff.mysn(sn, addlit=True)
except AttributeError:
continue
if len(thissn.optfiles) + len(thissn.fnir) == 0:
print("bad sn")
# read metadata for SN
thissn.readinfofileall(verbose=False, earliest=False, loose=True)
thissn.printsn()
# check SN is ok and load data
if thissn.Vmax is None or thissn.Vmax == 0 or np.isnan(thissn.Vmax):
print("bad sn")
print(" starting loading ")
# print (os.environ['SESNPATH'] + "/finalphot/*" + \
# thissn.snnameshort.upper() + ".*[cf]")
# print (os.environ['SESNPATH'] + "/finalphot/*" + \
# thissn.snnameshort.lower() + ".*[cf]")
# print( glob.glob(os.environ['SESNPATH'] + "/finalphot/*" + \
# thissn.snnameshort.upper() + ".*[cf]") + \
# glob.glob(os.environ['SESNPATH'] + "/finalphot/*" + \
# thissn.snnameshort.lower() + ".*[cf]") )
lc, flux, dflux, snname = thissn.loadsn2(verbose=False)
thissn.setphot()
thissn.getphot()
thissn.setphase()
thissn.sortlc()
# thissn.printsn()
# check that it is k
if np.array([n for n in thissn.filters.values()]).sum() == 0:
print("bad sn")
if len(thissn.photometry[bb]['mjd']) == 0:
print('No photometry for '+ sn+ ' in band '+b)
continue
xmin = thissn.photometry[bb]['mjd'].min()
if xmin - thissn.Vmax < -1000:
x = thissn.photometry[bb]['mjd'] - 55000.5#- thissn.Vmax + 2400000.5
x2 = thissn.photometry[bb]['mjd'] - thissn.Vmax + 2400000.5
# vmax = thissn.Vmax - 55000.5
elif xmin - thissn.Vmax > 1000:
x = thissn.photometry[bb]['mjd'] - 2455000.5 #- thissn.Vmax - 2400000.5
x2 = thissn.photometry[bb]['mjd'] - thissn.Vmax - 2400000.5
# vmax = thissn.Vmax - 2455000.5
else:
x = thissn.photometry[bb]['mjd'] #- thissn.Vmax
x2 = thissn.photometry[bb]['mjd'] - thissn.Vmax
# vmax = thissn.Vmax
if thissn.Vmax > 2400000:
vmax = thissn.Vmax - 2455000.5
else:
vmax = thissn.Vmax - 55000.5
dvmax = thissn.dVmax
y = thissn.photometry[bb]['mag']
# x2 = thissn.photometry[bb]['mjd'] -
# y = y.min() - y
yerr = thissn.photometry[bb]['dmag']
fig = plt.figure(figsize=(14, 14))
ax = plt.gca()
ax2 = ax.twiny()
# plt.errorbar(x, y, yerr = yerr, color = color_bands[b],fmt = '.', ls = '-', linewidth = 1)
plt.errorbar(x, y, yerr=yerr, color=color_bands[b], fmt='^', linewidth=1, markersize=20, label=bb)
ax.yaxis.get_ticklocs(minor=True)
ax.minorticks_on()
ax.invert_yaxis()
ax.tick_params(axis="both", direction="in", which="major", right=True, top=True, size=10, labelsize=40,
width=2)
ax2.tick_params(axis="both", direction="in", which="major", right=True, top=True, size=10, labelsize=40,
width=2)
ax.tick_params(axis="both", direction="in", which="minor", right=True, left=True,
bottom=True, top=True, size=6, width=1)
ax2.tick_params(axis="both", direction="in", which="minor", right=True, left=True,
bottom=True, top=True, size=6, width=1)
ax.xaxis.set_minor_locator(MultipleLocator(5))
ax2.set_xticks([vmax-10, vmax, vmax+10, vmax+20, vmax+30, vmax+40])
# ax2.set_xbound(ax.get_xbound())
ax2.set_xticklabels([-10, 0, 10, 20, 30, 40])
ax2.set_xlabel('Phase (days)', size=50)
plt.legend(loc='upper right', ncol=2, prop={'size': 35})
plt.axvline(vmax, color='grey', linewidth=5)
# ax.axvline(vmax - dvmax, color='grey')
# ax.axvline(vmax + dvmax, color='grey')
ax.set_xlabel('JD - 2455000.5 (days)', size=50)
ax.set_ylabel('Magnitude', size=50)
plt.savefig("outputs/Plot_lc_%s_%s.png" % (sn, b))
| [
"somayeh.khakpash@gmail.com"
] | somayeh.khakpash@gmail.com |
4bbfc08eeae4172cd81dd50a0ed35d50ab7a920f | e344705eb6b8bae2e06cd658f540219f8b6e7973 | /web-gaode/model/models.py | 4312acd8233615fac60c0dcab66ba88a3016712e | [] | no_license | dustw/my-scrapy | 023211f1d32b2883ef58249ec528ec15658db86d | 7263680cfa16e2615187b745c97fbc3cacf3780a | refs/heads/master | 2020-04-11T21:13:45.622353 | 2017-08-23T02:49:31 | 2017-08-23T02:49:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,965 | py | import pymysql
class Models(object):
def __init__(self, database, user, password):
self.database = database
self.user = user
self.password = password
self.conn = pymysql.connect(host='localhost', port=3306, user=self.user, passwd=self.password, db=self.database, charset='utf8')
def create_table(self, table_name):
req = {'status': 0, 'msg': ""}
cur = self.conn.cursor()
sql = "select table_name from information_schema.tables"
cur.execute(sql)
table_names = cur.fetchall()
for each in table_names:
if table_name in each:
req['status'] = 1
req['msg'] = 'exists'
if req['status'] == 0:
sql1 = "create table %s like template" % table_name
cur.execute(sql1)
req['status'] = 0
req['msg'] = "success"
self.conn.commit()
return req
def close_db(self):
self.conn.close()
class MDPModels(object):
def __init__(self, database, user, password):
self.database = database
self.user = user
self.password = password
self.conn = pymysql.connect(host='localhost', port=3306, user=self.user, passwd=self.password, db=self.database, charset='utf8')
def create_table(self, table_name):
req = {'status': 0, 'msg': ""}
cur = self.conn.cursor()
sql = "select table_name from information_schema.tables"
cur.execute(sql)
table_names = cur.fetchall()
for each in table_names:
if table_name in each:
req['status'] = 1
req['msg'] = 'exists'
if req['status'] == 0:
sql1 = "create table %s like template" % table_name
cur.execute(sql1)
req['status'] = 0
req['msg'] = "success"
self.conn.commit()
return req
def close_db(self):
self.conn.close() | [
"azraelkuan@gmail.com"
] | azraelkuan@gmail.com |
1b7d73f05488c156b0a5cde7e0c9deb9525698b6 | 3d99c01787ce8b88ac366bedc6c5ef9b50bdfe08 | /occurances of W and C in 1line.py | 42fb4458bf00f79a9e3750b9f1b67f62eae1fd6b | [] | no_license | Mu-Wahba/Python-Exercises | 7128f4d0f1833c0304a8701d0ef6e5b194c368a6 | a8c1197406ac25f835f14c40b1f8a827737ae3e1 | refs/heads/master | 2021-09-01T04:17:08.010766 | 2017-12-24T18:21:53 | 2017-12-24T18:21:53 | 115,280,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | def word(line,w)
print({w:line.count(w) for w in line}
| [
"noreply@github.com"
] | Mu-Wahba.noreply@github.com |
86ea046eff9a54bf6301b03c7bb461a5ec0e0706 | c245e7baf6342159537f37caf78a1af05894d8d6 | /Sıralama.py | 2654a1d0d91cf8c20c3ff59cfb686527faff9182 | [
"MIT"
] | permissive | Barisbozbas/KelimeAlgilamaV1 | c7ee69c660e8c27965412d1a7ac935a6f596dec4 | 8f9094f9b059d154be47e7e4e7588746c5dbc2cf | refs/heads/main | 2023-02-01T05:01:30.374331 | 2020-12-20T13:50:39 | 2020-12-20T13:50:39 | 323,078,725 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | import numpy as np
import pandas as pd
import cv2
import pytesseract
from PIL import ImageGrab
import time
from numpy import savetxt
pytesseract.pytesseract.tesseract_cmd = r'D:\Tesseract-OCR\tesseract.exe'
img = cv2.imread('bir.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
boxes= pytesseract.image_to_data(img)
# print(boxes)
#b listeliyerek b sütün boxes.splitlines yaparak ise liste içerisinde ki satırları parçalıyoruz a da sayaç işlevinde
for a,b in enumerate(boxes.splitlines()):
# print(b)
if a!=0:
b = b.split()
if len(b)==12:
x,y,w,h = int(b[6]),int(b[7]),int(b[8]),int(b[9])
cv2.putText(img,b[11],(x,y-5),cv2.FONT_HERSHEY_SIMPLEX,1,(50,50,255),2)
c=print(b[11])
c=cv2.rectangle(img, (x,y), (x+w, y+h), (50, 50, 255), 2)
cv2.imshow('img', img)
cv2.waitKey(0)
# boxes=pytesseract.image_to_data(img)
#Harf harf tespit
# for x,b in enumerate(boxes.splitlines()):
# if x!=0:
# b=b.split()
# print(b)
# if len(b)==12:
# x, y, w, h = int(b[6]), int(b[7]), int(b[8]), int(b[9])
# # hImg, wImg,_ = img.shape
# # boxes = pytesseract.image_to_boxes(img)
# # for b in boxes.splitlines():
# # print(b)
# # b = b.split(' ')
# # print(b)
# # x, y, w, h = int(b[1]), int(b[2]), int(b[3]), int(b[4])
# # cv2.rectangle(img, (x,hImg- y), (w,hImg- h), (50, 50, 255), 1)
# # cv2.putText(img,b[0],(x,hImg- y+25),cv2.FONT_HERSHEY_SIMPLEX,1,(50,50,255),2)
| [
"b.bosbas@gmail.com"
] | b.bosbas@gmail.com |
df8deb825c797a1ca96645c2dd6a746572243100 | 584b44d855235f20a66eab6d7680c4d686e9b8fc | /mas_framework/__init__.py | ec89d41a586df25d6a0521f6cea4ea1f4889d472 | [] | no_license | dpathania1/DMASF | a06bf4f9ead584b35e8fc719aefeec4c67a03aa3 | 2e4f74584cea5bdc115436b5ee12b17d600d75ff | refs/heads/master | 2021-01-21T00:17:13.801241 | 2015-07-22T01:28:35 | 2015-07-22T01:38:43 | 39,477,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | import api
import db
from constants import *
from defagenthandler import *
from defworldhandler import *
| [
"deepika.pathania@iamplus.com"
] | deepika.pathania@iamplus.com |
784008aa176130df12041add5e7389e7f0ecf13d | 3cb6c930f29008a0e57bab24cb9b1e9f08f98ef2 | /production/modules/stdlib/python_source/ref_code/dapps_web_config_PIA_template.py | 762ba99448e2912f5a73e3be13b446ad19a36de7 | [] | no_license | email2dba/peoplesoft_config_puppet | 0b8caa62b2f9bd267f3e6e0d2ad10077577db382 | 1b74727e6eb722a39bd0addedfa60fca2879c481 | refs/heads/main | 2023-06-11T19:08:56.292943 | 2021-06-28T22:42:49 | 2021-06-28T22:42:49 | 381,177,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,152 | py |
import os
#Please set eviroment before running this
#. /psoft/pia/pia92/ndv/webserv/NDV_WEB11_1/bin/setEnv.sh
#/apps/mwhome/weblogic/weblogic1213_dapps-web11/wlserver/common/bin/wlst.sh ./dapps_web_config_PIA_template.py
#
##connect('websystem','dapps$2020','t3://dapps-web11.dev.amsd.company.com:14081')
connect('_ADMIN_ID_','_ADMIN_PWD_','_ADMIN_URL_')
edit()
startEdit()
cd('/')
cd ('/Servers/PIA')
set('ListenAddress','_LISTENADDR_')
set('ListenPortEnabled' , 'False')
save()
cd('/Servers/PIA')
##cmo.setCustomIdentityKeyStore("piaconfig/keystore/pskey")
##cmo.setCustomTrustKeyStore("piaconfig/keystore/pskey")
set('CustomIdentityKeyStorePassPhrase', '_IDN_KEYSTORE_PWD_')
set('CustomTrustKeyStorePassPhrase', '_TRST_KEYSTORE_PWD_')
cmo.setKeyStores('CustomIdentityAndCustomTrust')
#cmo.setCustomIdentityKeyStoreType('JKS')
#cmo.setCustomTrustKeyStoreType('JKS')
save()
cd('/Servers/PIA/SSL/PIA')
cmo.setServerPrivateKeyAlias('_PRVT_KEY_ALIAS_')
set('ServerPrivateKeyPassPhrase', '_PRVT_KEY_PWD_')
save()
cd('/')
cd ('/AppDeployments/peoplesoft')
set('Targets',jarray.array([ObjectName('com.bea:Name=PIA,Type=Server'), ObjectName('com.bea:Name=WebLogicAdmin,Type=Server'), ObjectName('com.bea:Name=PSEMHUB,Type=Server'), ObjectName('com.bea:Name=RPS,Type=Server')], ObjectName))
save()
cd('/')
cd ('/AppDeployments/peoplesoft/SubDeployments')
set('Targets',jarray.array([ObjectName('com.bea:Name=PIA,Type=Server'), ObjectName('com.bea:Name=WebLogicAdmin,Type=Server'), ObjectName('com.bea:Name=PSEMHUB,Type=Server'), ObjectName('com.bea:Name=RPS,Type=Server')], ObjectName))
save()
cd('/')
cd ('/AppDeployments/peoplesoft/SubDeployments/PSEMHUB')
set('Targets',jarray.array([ObjectName('com.bea:Name=PIA,Type=Server'), ObjectName('com.bea:Name=WebLogicAdmin,Type=Server'), ObjectName('com.bea:Name=PSEMHUB,Type=Server'), ObjectName('com.bea:Name=RPS,Type=Server')], ObjectName))
save()
cd('/')
cd ('/AppDeployments/peoplesoft/SubDeployments/PSIGW')
set('Targets',jarray.array([ObjectName('com.bea:Name=PIA,Type=Server'), ObjectName('com.bea:Name=WebLogicAdmin,Type=Server'), ObjectName('com.bea:Name=PSEMHUB,Type=Server'), ObjectName('com.bea:Name=RPS,Type=Server')], ObjectName))
save()
cd('/')
cd ('/AppDeployments/peoplesoft/SubDeployments/PSINTERLINKS')
set('Targets',jarray.array([ObjectName('com.bea:Name=PIA,Type=Server'), ObjectName('com.bea:Name=WebLogicAdmin,Type=Server'), ObjectName('com.bea:Name=PSEMHUB,Type=Server'), ObjectName('com.bea:Name=RPS,Type=Server')], ObjectName))
save()
cd('/')
cd ('/AppDeployments/peoplesoft/SubDeployments/pspc')
set('Targets',jarray.array([ObjectName('com.bea:Name=PIA,Type=Server'), ObjectName('com.bea:Name=WebLogicAdmin,Type=Server'), ObjectName('com.bea:Name=PSEMHUB,Type=Server'), ObjectName('com.bea:Name=RPS,Type=Server')], ObjectName))
save()
cd('/')
cd ('/AppDeployments/peoplesoft/SubDeployments/')
set('Targets',jarray.array([ObjectName('com.bea:Name=PIA,Type=Server'), ObjectName('com.bea:Name=WebLogicAdmin,Type=Server'), ObjectName('com.bea:Name=PSEMHUB,Type=Server'), ObjectName('com.bea:Name=RPS,Type=Server')], ObjectName))
save()
activate()
exit()
| [
"email2dba@gmail.com"
] | email2dba@gmail.com |
2703f34cf65fa4f9917950dce1bd9579586c9655 | 525422435c4bcd14003e9669e66801d8ae5220b8 | /playground/playground.py | d92d0fc9ac7971d3833c8cfc6df2916b9e666e05 | [] | no_license | CMOW5/python101 | 6833f8b28cab3b17b0d8481cb4c3ef79416c4f8f | 6260e2a2b135809b684828b1faf355f54f96ae3e | refs/heads/master | 2020-04-14T13:45:23.591718 | 2019-01-04T20:02:08 | 2019-01-04T20:02:08 | 163,877,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | class MyClass:
"""A simple example class"""
i = 12345
def f(self):
return 'hello world'
print(MyClass.i)
MyClass.i = 'changed'
b = MyClass()
print(MyClass.i)
print(b.i)
MyClass.counter = 2
print(MyClass.counter) | [
"tec.cmos@gmail.com"
] | tec.cmos@gmail.com |
18fe578dc4619ac221feb6fd2d7db9595a61fcda | 5a99ef908f10d3796db6182d2e7bff5b16686eff | /whichday/whichday_v4.0.py | f15713105e5add064d03afe9b763d0fccff32add | [] | no_license | zhihui2015/learnPython | 33b395411375f02a335962d15994820657f53ff6 | 4f99a7c9b7e7921e9d7eff8233a98d333eb32216 | refs/heads/master | 2020-06-22T23:06:28.638965 | 2019-07-23T12:21:49 | 2019-07-23T12:21:49 | 198,425,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,415 | py | """
作者:郑智慧
版本:4.0
日期:2019/7/13
功能:输入日期判断第几天-元组tuple使用
2.0功能:用列表list实现
3.0功能:用集合set实现
4.0功能:用字典dict实现
"""
import datetime as dt
def is_leap_year(year):
"""
输入年份判断是否是闰年
:param year: 年份
:return: 是或否
"""
is_leap = False
if (year % 400 == 0) or ((year % 4 == 0) and (year % 100 != 0)):
is_leap = True
return is_leap
def main():
"""
主函数
:return:
"""
input_date_str = input('请输入日期(yyyy/mm/dd): ')
input_date = dt.datetime.strptime(input_date_str, format('%Y/%m/%d'))
print(input_date)
year = input_date.year
month = input_date.month
day = input_date.day
month_day_dict = {1: 31,
2: 28,
3: 31,
4: 30,
5: 31,
6: 30,
7: 31,
8: 31,
9: 30,
10: 31,
11: 30,
12: 31}
days = 0
days += day
for i in range(1, month):
days += month_day_dict[i]
if month > 2 and is_leap_year(year):
days += 1
print('这是{}年的第{}天。'.format(year, days))
if __name__ == "__main__":
main()
| [
"zhihui.zheng@qq.com"
] | zhihui.zheng@qq.com |
5e128497813679539492963b6c4de706d0910067 | 5c223403f463a3441b73357e52fb5c58e0b1ec63 | /two_sum_python.py | 5c94002fba10db1431896aa05409a52c40d3c9e5 | [] | no_license | JasonWayne/leetcode | d99b5d6dc992eda52046d4f5eb681094e804d936 | 06074ee9fb42421e55bf03ee317eff987c549af4 | refs/heads/master | 2016-08-06T14:34:45.065009 | 2015-10-15T09:30:56 | 2015-10-15T09:30:56 | 22,906,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | class Solution:
# @return a tuple, (index1, index2)
def twoSum(self, num, target):
dic = {}
for i in range(len(num)):
try:
index1 = dic[target - num[i]]
index2 = i
if index1 < index2:
return (index1+1, index2+1)
else:
return (index2+1, index1+1)
except:
dic[num[i]] = i
def test():
num = [-3, 4, 3, 34, 3,43]
target = 0
sol = Solution()
res = sol.twoSum(num, target)
print(res)
| [
"wuwenjie0102@gmail.com"
] | wuwenjie0102@gmail.com |
85f5ed913181b6267d99589a640341b8e5440341 | 90962368ccbfd007ff81a45f3a20909fe84f0bd6 | /disciplines.py | 5357e13d7947965b2ccb58e4d64e02a766b979c2 | [] | no_license | JessBrunker/KniziaDecathlon | 0d7b031ebc5aa308167910a442466021a449667c | c615092c80ce206660c587a804c4926c8733572b | refs/heads/master | 2020-03-31T20:11:48.796544 | 2018-10-11T04:14:34 | 2018-10-11T04:14:34 | 152,529,553 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,537 | py | from player import Player
from discipline_descriptions import print_description
from scoring import print_scores, updateTotals
import decathlon
import os
def validateYesOrNo(text):
'''gets either a y or n for heights function'''
while not text or text not in ['y', 'n']:
text = input('Please enter either y or n: ')
return text
def heights(players, discipline, min_height, max_height):
'''Players roll to reach an increasing value, starting at min_height,
and maxing out at max_height'''
print_description(discipline)
stopped_count = 0 # number of players who have faulted
scores = [{'failed': False, 'height': 0} for player in players]
# loops through all heights
for height in range(min_height, max_height+1, 2):
print('Current height: {}'.format(height))
# loops through players
for id, player in enumerate(players):
# player failed
if scores[id]['failed']:
continue
try_it = input('{} - attempt? y/n: '.format(player.name))
try_it = validateYesOrNo(try_it)
if try_it == 'y':
success = input('Did you succeed? y/n: ')
success = validateYesOrNo(success)
if success == 'n':
scores[id]['failed'] = True
stopped_count += 1
else:
scores[id]['height'] = height
print()
# all players failed
if stopped_count == len(players):
break
# for showing the currently mastered heights
os.system('clear')
print_scores(players)
print_description(discipline)
print('Mastered heights:')
for id, player in enumerate(players):
if scores[id]['failed']: # only shows if player failed
still_going = ' - FAILED'
else:
still_going = ''
print('{}: {}{}'.format(
player.name, scores[id]['height'], still_going))
print('\n')
# update scores in players dict
for id, player in enumerate(players):
player.scores[discipline] = scores[id]['height']
os.system('clear')
updateTotals(players)
def one_attempt(players, discipline, min_score, max_score):
'''Players get one attempt at the discipline. Possible scores are
min_score <= score <= max_score'''
print_description(discipline)
for player in players:
score = input('Score for {}: '.format(player.name))
score = decathlon.validateInput(score,
minimum=min_score,
maximum=max_score)
player.scores[discipline] = score
os.system('clear')
updateTotals(players)
def three_attempts(players, discipline, min_score, max_score):
'''Players have three attempts to get the highest score possible.
Scores range from min_score <= score <= max_score'''
print_description(discipline)
for player in players:
print('\n{} attempts'.format(player.name))
best_score = 0
for attempt in range(1, 4):
score = input('Attempt {}: '.format(attempt))
score = decathlon.validateInput(score, minimum=min_score,
maximum=max_score, invalid=True)
best_score = max(best_score, score)
player.scores[discipline] = best_score
print()
os.system('clear')
updateTotals(players)
| [
"sloththing5@gmail.com"
] | sloththing5@gmail.com |
48ca4154ce4ec5d4d39946ed6370ae73ff6eb5cf | 7f4ca59b41d7ba124645de95fa47cd5b99f3c450 | /LearningBasic/BasicModel/mnist_with_summaries.py | b5d3e55ca3e053b0d4f7bccd980580aff13396cf | [] | no_license | awp4211/TensorFlowLearning | 7281b6dd4fbeff6f590501c63648acc1790cc4fe | e60c3fa2137f6b11ff5de35803c8c73bcfb0ef70 | refs/heads/master | 2020-04-10T15:56:06.396097 | 2017-03-19T14:35:49 | 2017-03-19T14:35:49 | 67,996,834 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,548 | py | # -*- coding: utf-8 -*-
"""
A simple MNIST classifier which displays summaries in TensorBoard
This is an unimpressive MNIST model,but it is a good example of using
tf.name_scope to make graph legible in the TensorBoard graph explorer,and of
naming summary tags so that they are grouped meaningfully in TensorBoard
"""
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean('fake_data', False, 'If true, uses fake data '
'for unit testing.')
flags.DEFINE_integer('max_steps', 1000, 'Number of steps to run trainer.')
flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')
flags.DEFINE_float('dropout', 0.9, 'Keep probability for training dropout.')
flags.DEFINE_string('data_dir', 'MNIST_data', 'Directory for storing data')
flags.DEFINE_string('summaries_dir', 'LOG', 'Summaries directory')
def train():
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir,
one_hot=True,
fake_data=FLAGS.fake_data)
sess = tf.InteractiveSession()
with tf.name_scope('input'):
x = tf.placeholder("float",[None,784],name='x-input')
y_ = tf.placeholder("float",[None,10],name="y-input")
with tf.name_scope('input_reshape'):
image_shaped_input = tf.reshape(x,[-1,28,28,1])
tf.image_summary('imput',image_shaped_input,10)
# We can't initialize these variables to 0(the network will get stuck)
def weight_variable(shape):
initial = tf.truncated_normal(shape=shape,stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1,shape=shape)
return initial
def variable_summaries(var,name):
# Attach a lot of summaries to as Tensor
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/'+name,mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var-mean)))
tf.scalar_summary('sttdev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(var))
tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
def nn_layer(input_tensor,input_dim,output_dim,layer_name,act=tf.nn.relu):
# Adding a name scope ensures logical grouping of the layers in the graph
with tf.name_scope(layer_name):
# This Variable will hold the state of the weights for the layer
with tf.name_scope('weights'):
weights = weight_variable([input_dim,output_dim])
variable_summaries(weights,layer_name + '/weights')
with tf.name_scope('biases'):
biases = bias_variable([output_dim])
variable_summaries(biases, layer_name + '/biases')
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
tf.histogram_summary(layer_name + '/pre_activations', preactivate)
activations = act(preactivate, 'activation')
tf.histogram_summary(layer_name+'/activations',activations)
return activations
hidden1 = nn_layer(x,784,500,'layer1')
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
tf.scalar_summary('dropout_keep_probability',keep_prob)
dropped = tf.nn.dropout(hidden1,keep_prob=keep_prob)
y = nn_layer(dropped,500,10,'layer2',act=tf.nn.softmax)
with tf.name_scope('cross_entropy'):
diff = y_ * tf.log(y)
with tf.name_scope('total'):
cross_entropy = -tf.reduce_mean(diff)
tf.scalar_summary('cross entropy',cross_entropy)
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(cross_entropy)
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
tf.scalar_summary('accuracy',accuracy)
# Merge all the summaries and write them out to LOG
merged = tf.merge_all_summaries()
train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir+'/train',sess.graph)
test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir+'/test')
tf.initialize_all_variables().run()
# Train the model and also write summaries
# Every 10th step,measure test-set accuracy,and write test summaries
# All other steps,run train_step on training data and add training summaries
def feed_dict(train):
if train or FLAGS.fake_data:
xs,ys = mnist.train.next_batch(100,fake_data=FLAGS.fake_data)
k = FLAGS.dropout
else:
xs,ys = mnist.test.images,mnist.test.labels
k = 1.0
return {x:xs,y_:ys,keep_prob:k}
for i in range(FLAGS.max_steps):
if i % 10 == 0:
# Record summaries and test-set accuracy
summary,acc = sess.run([merged,accuracy],feed_dict=feed_dict(False))
test_writer.add_summary(summary,i)
print('Accuracy at step %s:%s'%(i,acc))
else:
# Record train set summaries and train
if i % 100 == 99:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary,_=sess.run([merged,train_step],
feed_dict=feed_dict(True),
options=run_options,
run_metadata=run_metadata)
train_writer.add_run_metadata(run_metadata,'step%03d'%i)
train_writer.add_summary(summary,i)
print('Adding run metadata for',i)
else:
# Record a summary
summary,_ = sess.run([merged,train_step],feed_dict=feed_dict(True))
train_writer.add_summary(summary,i)
train_writer.close()
test_writer.close()
def main(_):
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
train()
if __name__ == '__main__':
tf.app.run() | [
"1097028825@qq.com"
] | 1097028825@qq.com |
340c175b92734cc7c9e5016aa448f704506f356e | 820492d60b65fefc322f7ee902ed90ac22393160 | /test.py | 995b66e059e4105147c7d0d7a1f0ffe15a87f4b1 | [] | no_license | ChristianGold/TechTree | e3d570f863b71a746b08dce75394a8b4dd7bb6b2 | f021409a161f38ca929589c250ccf32c8b102667 | refs/heads/master | 2020-05-21T00:26:23.403820 | 2014-10-25T18:55:00 | 2014-10-25T18:55:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,523 | py | __author__ = 'Christian'
from PySide import QtGui, QtCore
import sys
class Main(QtGui.QMainWindow):
def __init__(self, parent = None):
super(Main, self).__init__(parent)
# main button
self.addButton = QtGui.QPushButton('button to add other widgets')
self.addButton.clicked.connect(self.addWidget)
# scroll area widget contents - layout
self.scrollLayout = QtGui.QFormLayout()
# scroll area widget contents
self.scrollWidget = QtGui.QWidget()
self.scrollWidget.setLayout(self.scrollLayout)
# scroll area
self.scrollArea = QtGui.QScrollArea()
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setWidget(self.scrollWidget)
# main layout
self.mainLayout = QtGui.QVBoxLayout()
# add all main to the main vLayout
self.mainLayout.addWidget(self.addButton)
self.mainLayout.addWidget(self.scrollArea)
# central widget
self.centralWidget = QtGui.QWidget()
self.centralWidget.setLayout(self.mainLayout)
# set central widget
self.setCentralWidget(self.centralWidget)
def addWidget(self):
self.scrollLayout.addRow(TestButton())
class TestButton(QtGui.QPushButton):
def __init__( self, parent=None):
super(TestButton, self).__init__(parent)
self.setText("I am in Test widget")
self.clicked.connect(self.deleteLater)
app = QtGui.QApplication(sys.argv)
myWidget = Main()
myWidget.show()
app.exec_() | [
"christian.gold@stud.h-da.de"
] | christian.gold@stud.h-da.de |
b15770ca426b1ffea780f265f913236bc36b3d1a | d5ea1c0dffaf6a4ae0ebfc2b02801e8d25627fc5 | /PerfectCRM-master/beeflow/urls.py | e21322a864003d2df3c1250ece9ee84cfe2000a7 | [] | no_license | dba-base/python-homework | c0681fbb5846879378a5717a0b1f694e135df0e6 | 0f507783503de5cd9202e7d6bf63fc5ae3c26272 | refs/heads/master | 2020-04-05T13:40:35.312320 | 2018-09-29T09:44:26 | 2018-09-29T09:44:26 | 94,948,396 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 483 | py |
from django.conf.urls import url,include
from beeflow import views
urlpatterns = [
url(r'^my_application/$', views.my_application,name='my_application'),
url(r'^flow_detail/(\d+)/$', views.flow_detail,name='flow_detail'),
url(r'^my_approvals/$', views.my_approvals,name='my_approvals'),
url(r'^my_approval_records/$', views.my_approval_records,name='my_approval_records'),
url(r'^flow_examination/(\d+)/$', views.flow_examination,name='flow_examination'),
]
| [
"haoxiaoyu424@outlook.com"
] | haoxiaoyu424@outlook.com |
9c5a0d0aae13b2405e6c1a0eae1cf25e63ec0318 | d2189145e7be2c836017bea0d09a473bf1bc5a63 | /Reposiciones/PerezAyalaYocoyaniEhecatzin/Quinta/primos.py | e85dae701e63e19bd1b500914226c948c5d3fa98 | [] | no_license | emilianoNM/Tecnicas3 | 12d10ce8d78803c8d2cd6a721786a68f7ee2809d | 6ad7f0427ab9e23643a28ac16889bca8791421d0 | refs/heads/master | 2020-03-25T18:06:34.126165 | 2018-11-24T04:42:14 | 2018-11-24T04:42:14 | 144,013,045 | 3 | 5 | null | 2018-09-14T10:47:26 | 2018-08-08T12:49:57 | Python | UTF-8 | Python | false | false | 440 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 16 16:19:34 2018
@author: yocoy
"""
def main():
numeros = [1 for x in range(1, 1000)]
for x in range(2,len(numeros)):
if numeros[x] == 1:
for y in range(2, len(numeros)):
if y%x == 0 and x != y:
numeros[y] = 0
for x in range(len(numeros)):
if numeros[x] == 1:
print(str(x)+' ', end='')
main() | [
"yocoyaniperez1@gmail.com"
] | yocoyaniperez1@gmail.com |
197f3b6ba2b6fab2b3ea295c720dea1a3f368f2a | 05e39916e4399e7e5df589d04968b18e2d6469f9 | /47.py | 7b3edf73118f36203456f73ce85eb63e2e1050cb | [] | no_license | shilpa2020/python | a74b92c865a288dd9354c11feb3ca93d4eeca5c0 | 2caaf41b9bd396be1a5d92456019ce75df7172ac | refs/heads/master | 2020-06-07T14:24:10.866281 | 2019-08-03T06:19:42 | 2019-08-03T06:19:42 | 193,040,866 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | p=int(input())
q=list(map(int,input().split()))
q.sort()
print(q[0],q[p-1])
| [
"noreply@github.com"
] | shilpa2020.noreply@github.com |
e4a12e794fc84462053c583a6a68668683bb7797 | b665981fd47afa19ebc45c3c3734658229117ab7 | /REMApp/Views/property_category_views/property_category_views.py | 73fc4d167499132934ecd67ec97c3ed09211853e | [] | no_license | mubaskid/RealEstateManagement | bf9f30378f780ace045d16acc18b939dce696ef5 | 113afeb1c44e8723db94330d55124e1e3f07037c | refs/heads/master | 2023-08-22T21:18:54.352315 | 2021-09-27T10:30:32 | 2021-09-27T10:30:32 | 397,660,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,479 | py | from django.http import Http404, HttpRequest, JsonResponse
from django.shortcuts import render, redirect
from REMApp.Services.ServiceFactory import REMApp_service_container
from REMApp.dto.PropertyCategoryDto import CreatePropertyCategoryDto, UpdatePropertyCategoryDto, \
ListPropertyCategoryDto, PropertyCategoryDetailsDto
from REMApp.models import Property_category
from django.contrib.auth.decorators import login_required
def create_property_category(request):
context = {
}
__create_if_post_method(context, request)
if request.method == "POST" and context["saved"]:
return redirect("home_property_category")
return render(request, "", context)
def edit_property_category(request, property_category_id):
property_category_details_dto = __get_property_details_dto_or_raise_404(property_category_id)
context = {
"title": f"Edit property {property_category_details_dto.description}",
"property": property_category_details_dto,
}
new_property_category_details_dto = __edit_if_post_method(context, property_category_id, request)
if new_property_category_details_dto is not None:
context["property"] = new_property_category_details_dto
return render(request, "", context)
def list_property(request, property_category_id):
property_category = __get_property_details_dto_or_raise_404(property_category_id)
context = {
"title": f"Property{property_category.description}",
"property_category": property_category
}
return render(request, "", context)
def delete_property(request, property_category_id: int):
try:
Property_category().delete(property_category_id, request)
return redirect("home_property")
except Exception:
raise Http404("Property does not exist")
def view_property(request, property_category_id: int):
properties = __get_property_details_dto_or_raise_404(property_category_id)
context = {
"title": f"PropertyCategory{properties.property_category_id}",
"properties": properties
}
return render(request, "", context)
@login_required(login_url='login')
def home_property_category(request):
properties = REMApp_service_container.property_type_management_service().list()
context = {
"title": "PropertyCategory",
"properties": properties
}
return render(request, "", context)
def get_property_category_for_select(request):
property_category = REMApp_service_container.property_type_management_service().get_all_for_select_list()
context = {
"property_category": property_category
}
return JsonResponse(context, request)
def __create_if_post_method(context, request):
if request.method == "POST":
try:
property_category = __get_create_property_dto_from_request(request)
REMApp_service_container.property_type_management_service(request).create(property_category)
context["saved"] = True
except Exception as a:
print(a)
context["saved"] = False
def __edit_if_post_method(context, property_category_id: int, request: HttpRequest) -> PropertyCategoryDetailsDto:
if request.method == "POST":
try:
properties = __get_edit_property_category_dto_from_request(property_category_id, request)
REMApp_service_container.property_type_management_service().update(property_category_id, properties)
context["saved"] = True
return __get_property_details_dto_or_raise_404(property_category_id)
except Exception as p:
print(p)
context["saved"] = False
def __get_create_property_dto_from_request(request: HttpRequest) -> CreatePropertyCategoryDto:
create_property_category_dto = CreatePropertyCategoryDto()
create_property_category_dto.name = request.POST["name_name"]
create_property_category_dto.property_category_id = request.POST["property_category_id"]
create_property_category_dto.description = request.POST["description"]
__set_property_category_attributes_from_request(create_property_category_dto, request)
return create_property_category_dto
def __get_edit_property_category_dto_from_request(property_category_id: int, request: HttpRequest) -> \
UpdatePropertyCategoryDto:
update_property_category_dto = UpdatePropertyCategoryDto()
update_property_category_dto.id = property_category_id
__set_property_category_attributes_from_request(update_property_category_dto, request)
return update_property_category_dto
def __set_property_category_attributes_from_request(update_property_category_dto, request):
update_property_category_dto.name = request.POST["name"]
update_property_category_dto.property_category_id = request.POST["property_category_id"]
def __get_property_details_dto_or_raise_404(property_category_id) -> PropertyCategoryDetailsDto:
try:
properties = REMApp_service_container.property_type_management_service().get(id=property_category_id)
except Property_category.DoesNotExist:
raise Http404("The requested admin does not exist")
return properties
def __get_list_property_dto_or_raise_404(property_id) -> ListPropertyCategoryDto:
try:
properties = REMApp_service_container.property_management_service().get(id=property_id)
except Property_category.DoesNotExist:
raise Http404("List of Properties not found")
return properties
| [
"77334849+mubaskid@users.noreply.github.com"
] | 77334849+mubaskid@users.noreply.github.com |
1142c1932395ab312d55919edaaefe701f155b08 | 26a37bbf6013bd8b46b2e57f5c99d8018b5f7e8b | /paper-experiments/natural-language-processing/train_lm.py | c6f5a480dd86ac455cdb6ac116c0ea5384619940 | [
"MIT"
] | permissive | udaykamal20/dfa-scales-to-modern-deep-learning | 835721c11a04e69fc36e352717e837e82e6a94cc | 336a2116fd4be50daac17b4ac0913ce82ded0665 | refs/heads/master | 2023-03-26T01:36:32.220543 | 2020-11-02T09:49:14 | 2020-11-02T09:52:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,513 | py | """
Trains a Transformer langugage model.
Author: François Boniface
"""
import argparse
import math
import numpy as np
import os
import pickle
from radam import RAdam
import torch
import torch.nn as nn
import torch.optim as optim
import torchtext
import time
import youtokentome as yttm
import yaml
import transformer
import utils
from tinydfa.alignment import AlignmentMeasurement
# ********** CONFIG **********
parser = argparse.ArgumentParser()
parser.add_argument("--gpu_id", help="which GPU to use", type=int, default=0)
parser.add_argument("--dfa", type=str, default='none', choices=['none', 'simple', 'full'])
parser.add_argument("--no_training", help="not actually use DFA", action='store_true')
parser.add_argument("--dfa_after_vocab", help="place DFA after projection to vocab size (before by default)", action='store_true')
parser.add_argument("--dfa_embed", help="place DFA after the input embedding layer", action='store_true')
parser.add_argument("--alignment", action='store_true')
parser.add_argument("--max_epochs", type=int, default=100)
parser.add_argument("--patience", help=" number of consecutive epochs without improvement before early stopping", type=int, default=5)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--chunk_length", type=int, default=128)
parser.add_argument("--dmodel", help="model dimension", type=int, default=512)
parser.add_argument("--dff", help="dim_feedforward", type=int, default=2048)
parser.add_argument("--nlayers", help="number of encoder layers", type=int, default=6)
parser.add_argument("--nheads", help="number of attention heads", type=int, default=8)
parser.add_argument("--dropout", help="dropout probability", type=float, default=0.1)
parser.add_argument("--attention", type=str, default='standard', choices=['standard', 'fixed', 'dense', 'random'])
parser.add_argument("--nolayernorm", action='store_true')
parser.add_argument("--tie_embeddings", action='store_true')
parser.add_argument("--optim", type=str, default='noam', choices=['noam', 'adam', '1cycle', 'radam', 'schedule'])
parser.add_argument("--max_lr", help="max learning rate (after warmup)", type=float, default=None)
parser.add_argument("--beta1", type=float, default=0.9)
parser.add_argument("--beta2", type=float, default=0.999)
parser.add_argument("--init_lr", type=float, default=1e-7)
parser.add_argument("--warmup", help="number of warmup steps of the optimizer (increasing lr)", type=int, default=4000)
parser.add_argument("--schedule_patience", type=int, default=1)
parser.add_argument("--schedule_factor", type=int, default=0.2)
parser.add_argument("--dataset", type=str, default='wikitext103', choices=['wikitext2', 'wikitext103'])
parser.add_argument("--bpe_path", type=str, default='bpe_models/wikitext-2.bpe.32000')
parser.add_argument("--savedir", help="relative path of saving directory", type=str, default='experiments')
args = parser.parse_args()
print(args)
if args.attention == 'fixed' and args.nheads != 4:
print("WARNING: if fixed attention heads are used, their number is fixed to 4. The nheads argument will be ignored.")
args.nheads = 4
# ********** CREATE DIRECTORY AND SAVE CONFIG **********
dfa_suffix = '_dfa' if args.dfa != 'none' else ''
exp_name = f'LM_{args.dataset}' + dfa_suffix
exp_number = utils.count_same_name(args.savedir, exp_name) + 1
exp_dir = os.path.join(args.savedir, f'{exp_name}_{exp_number}')
print(f'Will save to {exp_dir}')
if not os.path.exists(exp_dir):
os.mkdir(exp_dir)
losses_save_path = os.path.join(exp_dir, 'losses.npy')
with open(os.path.join(exp_dir, 'config.yml'), 'w') as f:
yaml.dump(args.__dict__, f)
print('Configuration file written')
# ************** CREATE DATASET, MODEL AND OPTIMIZER******************
bpe = yttm.BPE(model=args.bpe_path)
TEXT = torchtext.data.Field(tokenize=lambda x: utils.bpe_tokenize(x, bpe), lower=True)
train_txt, val_txt, test_txt = utils.get_datasets(args.dataset).splits(TEXT)
print('Dataset fetched')
TEXT.build_vocab(train_txt)
vocab_size = len(TEXT.vocab.stoi)
print(f"Unique tokens in vocabulary: {len(TEXT.vocab)}")
device = torch.device(f"cuda:{args.gpu_id}" if torch.cuda.is_available() else "cpu")
train_data = utils.batchify(train_txt, TEXT, args.batch_size, device)
val_data = utils.batchify(val_txt, TEXT, args.batch_size, device)
layernorm = not args.nolayernorm
model = transformer.LMTransformer(vocab_size, args.dmodel, args.nheads,
args.dff, args.nlayers, args.dropout,
tie_embeddings=args.tie_embeddings,
dfa=args.dfa, no_training=args.no_training,
dfa_after_vocab=args.dfa_after_vocab,
dfa_embed=args.dfa_embed,
attn=args.attention,
layernorm=layernorm)
print(f"The model has {utils.count_parameters(model)} trainable parameters")
model.to(device)
criterion = nn.CrossEntropyLoss()
scheduler = None
betas = (args.beta1, args.beta2)
if args.optim == 'noam':
base_optim = optim.Adam(model.parameters(), lr=args.init_lr, betas=betas, eps=1e-9)
optimizer = utils.NoamOpt(args.dmodel, 1, args.warmup, base_optim)
elif args.optim == 'adam':
optimizer = optim.Adam(model.parameters(), lr=args.init_lr, betas=betas, eps=1e-9)
elif args.optim == '1cycle':
optimizer = optim.Adam(model.parameters(), lr=args.init_lr, betas=betas, eps=1e-9)
scheduler = optim.lr_scheduler.OneCycleLR(optimizer,
max_lr=args.max_lr,
steps_per_epoch=len(train_data),
epochs=args.max_epochs)
elif args.optim == 'radam':
optimizer = RAdam(model.parameters(), lr=args.init_lr, betas=betas, eps=1e-9)
elif args.optim == 'schedule':
optimizer = optim.Adam(model.parameters(), lr=args.init_lr, betas=betas, eps=1e-9)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
factor=args.schedule_factor,
patience=args.schedule_patience)
# **************** TRAINING ******************
print('Training starts...')
alignment = None
if args.alignment:
alignment = AlignmentMeasurement(model, torch.device(f"cuda:{args.gpu_id+1}"))
alignments = []
train_losses, val_losses, durations = [], [], []
best_val_loss = float("inf")
epochs_wo_improvement = 0
model_save_path = None
steps = 0
for epoch in range(1, args.max_epochs + 1):
epoch_start_time = time.time()
if alignment:
train_loss, align_dic = utils.run_epoch(model, train_data, criterion, optimizer, vocab_size, args.chunk_length, alignment)
alignments.append(align_dic)
else:
train_loss = utils.run_epoch(model, train_data, criterion, optimizer, vocab_size, args.chunk_length)
train_losses.append(train_loss)
steps += len(train_data)
val_loss = utils.evaluate(model, val_data, criterion, vocab_size, args.chunk_length)
val_losses.append(val_loss)
if scheduler:
scheduler.step(val_loss)
epoch_duration = time.time() - epoch_start_time
durations.append(epoch_duration)
print('-' * 89)
print('| End of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | valid perplexity {:8.2f}'
.format(epoch, epoch_duration, val_loss, math.exp(val_loss)))
print('-' * 89)
if val_loss < best_val_loss:
best_val_loss = val_loss
# delete previous checkpoint
if model_save_path is not None:
os.remove(model_save_path)
# save current state
model_save_path = os.path.join(exp_dir, f'model_{epoch}.pt')
torch.save({
'model': model.state_dict(),
'optim': optimizer.state_dict(),
'epoch': epoch,
'step': steps
}, model_save_path)
epochs_wo_improvement = 0
else:
epochs_wo_improvement += 1
stats = {
'train_losses': train_losses,
'valid_losses': val_losses,
'mean_epoch_duration': np.mean(durations)
}
with open(os.path.join(exp_dir, f'stats.pkl'), 'wb') as f:
pickle.dump(stats, f)
if alignment:
with open(os.path.join(exp_dir, f'alignments.pkl'), 'wb') as f:
pickle.dump(alignments, f)
if epochs_wo_improvement == args.patience:
print('Early stopping')
break
| [
"julien@lighton.io"
] | julien@lighton.io |
f461bad6951e11a80cf5a167b05601eb0a0bc5fa | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02921/s749768491.py | f0ebc17a8297dea1a1ed4f76ccf22e230f971998 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | #template
from collections import Counter
def inputlist(): return [int(j) for j in input().split()]
#template
S = input()
T = input()
ans = 0
for i in range(3):
if S[i] == T[i]:
ans +=1
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2ec8dda061a4b6e196fca838539550992ab5f3b7 | 23261d97f11c502f882a003cf9d65e7de2aa860e | /src/data/data_wire100.py | 62893df19570105de8355248eab42455eb000a10 | [] | no_license | shohruh-abduakhatov-portfolio/mivs-py | 4a7161066dcb3551d866af8238db85672ce95ee8 | 4419deb48db8a71555481c2fff09c553440e8702 | refs/heads/master | 2023-01-11T18:02:30.802698 | 2020-11-16T17:41:52 | 2020-11-16T17:41:52 | 312,632,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | data = {'products': [15185, 11936, 22497], 'drug_qty_req': [1000, 1000, 1000], 'input_capital': 2000000000,
'cash': 1000000000, 'wire': 1000000000, 'suppliers': [119, 133], 'cash_cost': [], 'wire_25': [], 'wire_50': [],
'wire_100': [[9490, 8122], [2000000000, 2000000000], [2000000000, 2000000000]],
'min_sum_per_supplier': [300000, 300000], 'discount_threshold': [2000000000, 2000000000],
'discount_percent': [0, 0], 'supp_overhead': [0, 0]}
| [
"shokhrukh.abduahadov@gmail.com"
] | shokhrukh.abduahadov@gmail.com |
b9968374dda80a11b7dbba60386eefbcd3b123a6 | 8c77a82a87efc70acdd399546e2e1c1edccac61b | /past_edition/tt.py | e910d714028a0a0d456a1e353c90f013f0496a2e | [] | no_license | RobinROAR/EzCondor | ec6d75cfd3c673f8a7cdf78b6b287a0ea553b4f5 | c5fa12605e91f35aaf86981130719a93be53682f | refs/heads/master | 2021-01-20T12:42:24.733556 | 2017-09-12T19:05:36 | 2017-09-12T19:05:36 | 90,397,721 | 4 | 2 | null | 2017-05-19T16:27:44 | 2017-05-05T16:51:18 | Python | UTF-8 | Python | false | false | 1,105 | py | #!/usr/bin/env python
# -*-coding:utf-8 -*-
#Robin
#04.21 2017
import random
network = {'A':['B','D','E'],'B':['D','A','E'],'C':['B','F'],'D':['A','B'],'E':['A','F'],'F':['C','E']}
def diffusion(network,source,p):
visited = {}
#create a list to store influenced node
influenced = {}
for node in network:
visited[node] = False
influenced[node] = False
visited[source] = True
queue = [source]
influenced[source] = True
while queue!=[] :
now = queue.pop(0)
#只有被influcenced的才扩散
if influenced[now] == True:
for neighbor in network[now]:
if visited[neighbor] == False:
visited[neighbor] = True
#有p的概率传播成功
if random.random()<= p:
influenced[neighbor] = True
queue.append(neighbor)
#统计结果
result = []
for key,value in influenced.items():
if value == True:
result.append(key)
return len(result)
print diffusion(network,'A',0.15) | [
"zrb915@live.com"
] | zrb915@live.com |
f4301df7d5f207572af701f67fa7761e53da7866 | 639194c45587d5fc539da4d385c1e4cbfe93d371 | /Class work 20.02/Task1.1.py | 1beae1cb669c22e8218b79e23f90cfc8f479abe7 | [] | no_license | ZHasaral/DataAnalys | 5cd26c5b99e27da5fbb440f002d9ce61569258ae | 0afb80301dc1efde2c74c231ba90221a1023b1bd | refs/heads/master | 2022-11-03T11:44:25.929030 | 2018-05-01T13:01:17 | 2018-05-01T13:01:17 | 121,307,393 | 0 | 1 | null | 2022-10-10T01:55:11 | 2018-02-12T21:59:38 | Python | UTF-8 | Python | false | false | 28 | py | p = 1/10+1/10+1/10
print(p)
| [
"zhasaralture@gmail.com"
] | zhasaralture@gmail.com |
c4e7412ad2c5f533b556a899ea8e91074f97f06d | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/eve/client/script/ui/station/captainsquarters/__init__.py | 072ece094445c140829bf808523c4b6fe2d64411 | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 149 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\station\captainsquarters\__init__.py
pass
| [
"masaho.shiro@gmail.com"
] | masaho.shiro@gmail.com |
2d69f6761c50550f08a79d32a1bdeba6e0b5edc5 | 345b93a69cc545712a9cfc96197f7ac896e91a45 | /blog/pelican-to-hugo.py | 5d0433f80a309a44f1805e45c69fc7f4bc7143f0 | [
"MIT"
] | permissive | rushiagr/rushiagr.github.com | 9afaf057bb90a10f9485ecc69535c28ccd5e0749 | a214ed788971a5acb4d14fb77662ef2ce185189a | refs/heads/master | 2020-05-22T01:41:09.072119 | 2019-12-02T13:52:01 | 2019-12-02T13:52:01 | 7,132,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,853 | py | import os
# NOTE: assumes 'Category: ' is the last values in heading of a pelican
# markdown
markdown_dir = '/home/r/src/blog/npf/content/blog/markdowns/'
files = os.listdir(markdown_dir)
for f in files:
if f.endswith('.markdown'):
with open(markdown_dir+f, 'r') as in_f:
# files shouldn't have more than one dot
outfile_name = f.split('.')[-2] + '.md'
with open(markdown_dir+outfile_name, 'w') as out_f:
out_f.write('+++\n')
lines = in_f.readlines()
for line in lines:
if line.startswith('Title: '):
_, title = line.split(':', 1)
title = title.strip()
out_f.write('title = "'+title+'"\n')
elif line.startswith('Date: '):
_, date = line.split(':', 1)
date = date.strip()
out_f.write('date = "'+date+'T00:00:00-00:00"\n')
elif line.startswith('Author: '):
continue
elif line.startswith('Summary: '):
continue
elif line.startswith('Slug: '):
continue
elif line.startswith('Tags: '):
_, tags = line.split(':', 1)
tags = tags.strip().split(',')
tags = ['"'+tag.strip()+'"' for tag in tags]
tags = ', '.join(tags)
out_f.write('tags = ['+tags+']\n')
elif line.startswith('Category: '):
out_f.write('type = "post"\n')
out_f.write('+++\n')
pass
else:
out_f.write(line)
| [
"rushi.agr@gmail.com"
] | rushi.agr@gmail.com |
47783a022d396e58a0d895f3b946bc69f9186aac | 1150571dd2924fb2d1da55a1568401df58a0ae11 | /main.py | 2023c89c9306b67ae12de860fa093976cb18bda1 | [] | no_license | Paulware/piPair | 69ddc2353bc9d08ea8a369de1da7b8cc96445b45 | 64bc6a65552c3c22873c3ce227dc4365f16c21c2 | refs/heads/master | 2023-08-30T12:16:01.574791 | 2023-08-18T20:40:11 | 2023-08-18T20:40:11 | 251,141,929 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,170 | py | import pygame
import subprocess
import os
import socket
import select
import math
import time
import glob
import random
import sys
import datetime
import mtgScreens
import inputOutput
import utilityScreens
import cardDatabase
from pygame.locals import *
if (sys.version_info.major < 3) or ((sys.version_info.major <= 3) and (sys.version_info.minor < 7)):
print ( "You must run this program with python 3.7 or greater" )
assert sys.version_info >= (3,7)
print (str(sys.version_info))
# Include game files
import chat
import checkers
import tictactoe
import chess
import mtg
import diplomacy
import panzerleader
exec (chat.CHAT)
exec (checkers.CHECKERS)
exec (tictactoe.TICTACTOE)
exec (chess.CHESS)
exec (mtg.MTG)
exec (diplomacy.DIPLOMACY)
exec (panzerleader.PANZERLEADER)
WHITE = (255, 255, 255)
BLACK = ( 0, 0, 0)
GREEN = ( 0, 255, 0)
BRIGHTBLUE = ( 0, 50, 255)
BROWN = (174, 94, 0)
RED = (255, 0, 0)
DARKRED = (139, 26, 26)
DARKGREY = (128, 128, 128)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
DARKGREEN = (0, 100, 0)
DARKBLUE = (75, 0, 130)
LIGHTBLUE = (64, 244, 208)
TEXTBGCOLOR1 = BRIGHTBLUE
TEXTBGCOLOR2 = GREEN
GRIDLINECOLOR = BLACK
TEXTCOLOR = WHITE
HINTCOLOR = BROWN
tcpSocket = None
tcpConnection = None
allDecks = {}
gameList = ['Chat', 'Tic Tac Toe', 'Checkers', 'Chess', 'MTG', 'Diplomacy', 'PanzerLeader']
iAmHost = False
joining = ''
DISPLAYWIDTH=800
DISPLAYHEIGHT=600
UDPPORT = 3333
configFilename = 'mainConfig.txt'
rightClick = False
move = None
udpCounter = 0
pollReady = False
# Sleep so that the desktop display can initialize itself
#time.sleep(15)
myIpAddress = socket.gethostbyname(socket.gethostname())
print ("pygame.init")
pygame.init()
print ("get the clock")
MAINCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((DISPLAYWIDTH, DISPLAYHEIGHT),HWSURFACE|DOUBLEBUF|RESIZABLE)
utilScreen = utilityScreens.utilityScreens (DISPLAYSURF)
myIO = inputOutput.inputOutput(utilScreen)
FONT = pygame.font.Font('freesansbold.ttf', 16)
BIGFONT = pygame.font.Font('freesansbold.ttf', 32)
#pygame.display.toggle_fullscreen()
pygame.display.set_caption('Flippy')
def readConfigData():
global iAmHost
global myIO
if not os.path.exists (configFilename):
f = open (configFilename, 'w' )
f.write ( 'client\n' )
f.close()
# Read configuration data
try:
f = open ( configFilename, 'r')
line = f.readline().strip().lower()
f.close()
print ("Read line: " + line)
if line == 'host':
print ( 'You are host' )
iAmHost = True
myIO.games = gameList
elif line == 'client':
print ( 'You are client')
iAmHost = False
myIO.games = []
except Exception as ex:
assert False, 'error while reading config data, line:[' + line + '] exception: ' + str(ex)
iAmHost = None
'''
Utilities
'''
def rotate (image, angle):
# calcaulate the axis aligned bounding box of the rotated image
w, h = image.get_size()
originPos = (w//2,h//2)
box = [pygame.math.Vector2(p) for p in [(0, 0), (w, 0), (w, -h), (0, -h)]]
box_rotate = [p.rotate(angle) for p in box]
min_box = (min(box_rotate, key=lambda p: p[0])[0], min(box_rotate, key=lambda p: p[1])[1])
max_box = (max(box_rotate, key=lambda p: p[0])[0], max(box_rotate, key=lambda p: p[1])[1])
# calculate the translation of the pivot
pivot = pygame.math.Vector2(originPos[0], -originPos[1])
pivot_rotate = pivot.rotate(angle)
pivot_move = pivot_rotate - pivot
# get a rotated image
rotated_image = pygame.transform.rotate(image, angle)
return rotated_image
def blitRotate(image, pos, angle):
global DISPLAYSURF
surf = DISPLAYSURF
# calculate the axis aligned bounding box of the rotated image
w, h = image.get_size()
originPos = (w//2,h//2)
box = [pygame.math.Vector2(p) for p in [(0, 0), (w, 0), (w, -h), (0, -h)]]
box_rotate = [p.rotate(angle) for p in box]
min_box = (min(box_rotate, key=lambda p: p[0])[0], min(box_rotate, key=lambda p: p[1])[1])
max_box = (max(box_rotate, key=lambda p: p[0])[0], max(box_rotate, key=lambda p: p[1])[1])
# calculate the translation of the pivot
pivot = pygame.math.Vector2(originPos[0], -originPos[1])
pivot_rotate = pivot.rotate(angle)
pivot_move = pivot_rotate - pivot
# calculate the upper left origin of the rotated image
x = int(pos[0] - originPos[0] + min_box[0] - pivot_move[0])
y = int(pos[1] - originPos[1] - max_box[1] + pivot_move[1])
# TODO: change x,y to ints
# print ( "origin = (" + str(x) + "," + str(y) + ")" )
origin = (x,y)
# get a rotated image
rotated_image = pygame.transform.rotate(image, angle)
# rotate and blit the image
surf.blit(rotated_image, origin)
# draw rectangle around the image
# pygame.draw.rect (surf, (255, 0, 0), (*origin, *rotated_image.get_size()),2)
lastStatus = ''
def drawStatus (message):
global lastStatus
if message != lastStatus:
print (message)
# print ( 'Show status: ' + message )
height = DISPLAYHEIGHT - 23
pygame.draw.line(DISPLAYSURF, RED, (0, height), (DISPLAYWIDTH, height)) #status line
showLine (message, 1, height+4) # Show status message
pygame.display.update()
lastStatus = message
def showLastStatus ():
# global lastStatus
drawStatus (lastStatus)
def showStatus (status):
global statusMessage
statusMessage = status
print ( 'showStatus(' + statusMessage + ')' )
lastPrintMessage = ''
nextPrintTime = 0
def myPrint (message):
global lastPrintMessage
global nextPrintTime
if time.time() <= nextPrintTime:
print ( 'Spam filter, clearing message: [' + message + ']')
message = ''
if message != '':
print ( message )
nextPrintTime = time.time() + 1
def showCh (ch,x,y):
surface = FONT.render(str(ch), True, TEXTCOLOR, TEXTBGCOLOR2)
rect = surface.get_rect()
rect.topleft = (x,y)
DISPLAYSURF.blit(surface, rect)
pygame.display.update()
def chOffset (ch):
offsets = { '.':4, ':':4, ',':4, '-':4, ' ':4, '(':4, ')':4, '[':5, ']':5, '\'':4, '/':4, '=':9, \
'A':11, 'I':4, 'W':14, 'O':12, 'M':13, \
'a':9, 'b':9, 'c':9, 'e':9, 'f':6, 'i':4, 'j':4, 'k':9, 'l':4, 'm':14, 'r':6, 's':9, 't':5, 'x':9, 'v':9, 'w':12, 'y':9, 'z':8, \
'0':9, '1':9, '2':9, '3':9, '4':9, '5':9, '6':9, '7':9, '8':9, '9':9 \
}
offset = 10
if ch in offsets.keys():
offset = offsets[ch]
return offset
def showLine ( line, x,y ):
height = DISPLAYHEIGHT - 23
pygame.draw.rect(DISPLAYSURF, BLACK, (0,height+2,DISPLAYWIDTH,height+2+25))
pygame.display.update()
for ch in line:
showCh (ch, x, y)
x = x + chOffset (ch)
def getInput (x,y):
line = ''
quit = False
while not quit:
typeInput,data,addr = myIO.getKeyOrUdp()
if typeInput == 'key':
if data == chr(13):
quit = True
elif data == chr(273): # Up
line = data
quit = True
elif data == chr(274): # Down
line = data
quit = True
elif data == chr(275): # Right
line = data
quit = True
elif data == chr(276): # Left
line = data
quit = True
else:
if data == chr(8):
print ( "backspace detected")
if len(line) > 0:
lastCh = line[len(line)-1]
x = x - chOffset (lastCh) #Todo need to get lastCh from
showCh (' ', x, y)
showCh (' ', x+4, y)
showCh (' ', x+8, y)
line = line[:len(line)-1]
else:
line = line + data
ch = data
showCh (ch, x, y)
x = x + chOffset(ch)
elif typeInput == pygame.MOUSEBUTTONUP:
line = data
quit = True
elif typeInput == pygame.MOUSEBUTTONDOWN:
line = data
quit = True
elif typeInput == pygame.MOUSEMOTION:
line = data
quit = True
elif typeInput == 'udp':
line = data
quit = True
elif typeInput == 'tcp':
line = data
print ( 'got some tcp data yo: ' + data)
quit = True
if typeInput != pygame.MOUSEMOTION:
print ( "getInput: " + str(line))
return (typeInput,line,addr)
def updateWpaSupplicant (ssid, password):
try:
f = open ( '/etc/wpa_supplicant/wpa_supplicant.conf', 'w')
lines = f.readlines()
f.close()
found = False
for line in lines:
if line.find ( 'network=') > -1:
found = True
break
if found:
f = open ( '/etc/wpa_supplicant/wpa_supplicant.conf', 'w')
for line in lines:
if line.lower().find ( 'ssid=') > -1:
f.write ( ' ssid=\"' + ssid + '\"\n')
elif line.lower().find ( 'psk=') > -1:
f.write ( ' psk=\"' + password + '\"\n')
else:
f.write (line)
f.close()
else:
f = open ( '/etc/wpa_supplicant/wpa_supplicant.conf', 'a')
f.write ( 'network={\n')
f.write ( ' ssid=\"' + ssid + '\"\n')
f.write ( ' psk=\"' + password + '\"\n')
f.write ( '}\n' )
f.close()
except Exception as ex:
print ("Could not modify wpa_supplicate because: " + str(ex) )
def createLabel (msg, x, y):
surface = FONT.render(msg, True, TEXTCOLOR, TEXTBGCOLOR2)
rect = surface.get_rect()
rect.topleft = (x,y)
return ((surface,rect))
def showLabel (msg, x, y):
(surface, rect) = createLabel (msg, x, y)
DISPLAYSURF.blit(surface, rect)
def showLabels (labels, locations):
sprites = []
i = 0
for label in labels:
x = locations[i][0]
y = locations[i][1]
(surface, rect) = createLabel (label, x, y)
sprites.append (DISPLAYSURF.blit(surface, rect))
i = i + 1
return sprites
def actionsToIcons (actions):
filenames = []
locations = []
x = 50
y = 10
for action in actions:
filenames.append ( 'images/' + action + '.jpg' )
locations.append ( (x,y) )
x = x + 110
return (filenames,locations)
def showImages (filenames,locations):
images = []
try:
for filename in filenames:
images.append ( pygame.image.load (filename) )
except Exception as ex:
if str(ex).find ('Couldn\'t open') > -1:
print ( '\n***ERR\nDoes this file exist?: ' + filename + '\n')
else:
print ( '\n***ERR\nCould not load: ' + filename + ' because: ' + str(ex) + '\n')
# Sprites contain rectangular information
sprites = []
try:
i = 0
for image in images:
sprites.append (DISPLAYSURF.blit (image, locations[i]) )
i = i + 1
pygame.display.update()
except Exception as ex:
print ( 'main.showImages, could not place sprite on surface because: ' + str(ex))
print ( 'filenames: ' + str(filenames) + ' locations: ' + str(locations) )
return (images,sprites)
def getSpriteClick (eventType, pos, sprites):
found = -1
try:
if sprites != None:
if eventType == pygame.MOUSEBUTTONDOWN:
#print ( 'click: ' + str(pos) + ' in sprites: ' + str(sprites) + '?')
count = 0
for sprite in sprites:
if sprite.collidepoint(pos):
found = count
#print ( "Yes! in sprite: " + str(count))
break
count = count + 1
except Exception as ex:
print ( 'Could not getSpriteClick because: ' + str(ex) + 'sprites: ' + str(sprites))
#if found == -1:
# print ( 'No!')
return found
def scanForSsids ():
ssids = []
print ( "Show wlan ssids" )
try:
os.system ( 'iw dev wlan0 scan | grep SSID > it.log')
f = open ( 'it.log', 'r')
lines = f.readlines()
f.close()
for line in lines:
data = line.split ( 'SSID:' )
ssid = data[1].strip()
if ssid != '':
if ssid.find ( '\\x00') == -1:
ssids.append(ssid)
except Exception as ex: # This might be a windows machine
ssids = ['RichardsWiFi', 'Logan\'s Wifi', 'NETGEAR14', 'Net751', 'Fake']
print ("Got exception: " + str(ex))
print (str(ssids))
return ssids
def showList(ssids):
# print ('showList' + str(ssids) )
i = 0
y = 75
locations = []
for ssid in ssids:
x = 150
locations.append ( (x,y))
y = y + 35
labels = showLabels (ssids, locations)
(ssidSurf, ssidRect) = createLabel ('Click on SSID to join (password=\'ABCD1234\')', 50, 20)
pygame.display.update()
return labels
def joinSSID (ssid):
print ("joinSSID")
print ( "Join this ssid yo (reboot may be necessary):" + ssid )
lastMessage = ""
udpCount = 0
udpMessages = []
acks = []
messageStartTime = time.time()
def readLines (filename, match):
found = False
lines = []
try:
f = open (filename, 'r')
lines = f.readlines()
f.close()
for line in lines:
if line.find (match) > -1:
found = True
break
except Exception as ex:
print ( 'Could not readLines because: ' + str(ex))
return (lines,found)
def modifyDhcpcd():
filename = '/etc/dhcpcd.conf'
print ( 'modify /etc/dhcpcd.conf' )
(lines,found) = readLines ( filename, 'interface wlan0')
if found:
print ( "Not modifying /etc/dhcpcd.conf because interface wlan0 already exists" )
else:
try:
f = open ( filename, 'w')
for line in lines:
f.write ( line )
f.write ( 'interface wlan0\n' )
f.write ( ' static ip_address=192.168.4.1/24\n' )
f.write ( ' nohook wpa_supplicant\n' )
f.close()
except Exception as ex:
print ( 'Could not update ' + filename + ' because: ' + str(ex))
def modifyDnsmasq():
filename = '/etc/dnsmasq.conf'
print ( 'Modify /etc/dnsmasq.conf' )
(lines,found) = readLines ( filename, 'interface=wlan0')
if found:
print ( "Not modifying /etc/dhcpcd.conf because \'interface=wlan0\' already exists")
try:
f = open ( filename, 'w')
for line in lines:
f.write ( line )
f.write ( 'interface=wlan0\n' )
f.write ( ' dhcp-range=192.168.4.2,192.168.4.20,255.255.255.0,24h\n' )
f.close()
except Exception as ex:
print ( 'Could not modify ' + filename + ' because: ' + str (ex) )
def modifyHostapd(ssid, password='ABCD1234'):
filename = '/etc/hostapd/hostapd.conf'
print ( 'Modify /etc/hostapd/hostapd.conf' )
try:
f = open ( filename, 'w')
f.write ( 'interface=wlan0\n' )
f.write ( 'driver=nl80211\n' )
f.write ( 'ssid=' + ssid + '\n' )
f.write ( 'hw_mode=g\n' )
f.write ( 'channel=7\n' )
f.write ( 'wmm_enabled=0\n' )
f.write ( 'macaddr_acl=0\n' )
f.write ( 'auth_algs=1\n' )
f.write ( 'ignore_broadcast_ssid=0\n' )
f.write ( 'wpa=2\n' )
f.write ( 'wpa_passphrase=' + password + '\n' )
f.write ( 'wpa_key_mgmt=WPA-PSK\n' )
f.write ( 'wpa_pairwise=TKIP\n' )
f.write ( 'rsn_pairwise=CCMP\n' )
f.close()
except Exception as ex:
print ( 'Could not modify ' + filename + ' because: ' + str (ex) )
def extractImage (sheetFilename,x1,y1,x2,y2,finalWidth,finalHeight):
sheet = pygame.image.load(sheetFilename)
width = x2 - x1
height = y2 - y1
image = pygame.Surface((width, height), pygame.SRCALPHA)
image = image.convert_alpha()
image.blit(sheet, (0, 0), (x1,y1,x2,y2))
image = pygame.transform.scale(image, (finalWidth, finalHeight))
return image
def commLogWrite (message):
commLog.write ( str.encode (message) )
'''
Pages
'''
def hostPage (showOnly=False):
global iAmHost
global games
global gameList
global myIO
pygame.display.set_caption('You are now host, click below to change SSID')
f = open ( configFilename, 'w')
f.write ( 'host\n' )
f.close()
iAmHost = True
myIO.games = gameList
DISPLAYSURF.fill((BLACK))
(images,sprites) = showImages (['images/ok.jpg'], [(400,400)] )
(surface, rect) = createLabel ('Enter the name of your host ssid', 50, 20)
DISPLAYSURF.blit(surface, rect)
(surface, rect) = createLabel ('SSID:', 250, 55)
DISPLAYSURF.blit(surface, rect)
pygame.display.update()
quit = False
while not quit:
(eventType,data,addr) = getInput (300,55)
if eventType == 'key':
print ( 'Got an ssid: [' + data + ']' )
if data != '':
pygame.display.set_caption('Hosting SSID: ' + data)
print ( 'ssid: [' + data + ']')
modifyDhcpcd()
modifyDnsmasq()
modifyHostapd(data)
quit = True
sprite = getSpriteClick (eventType, data, sprites )
if sprite != -1: # Quit is the only other option
mainPage ()
quit = True
# Show the list the SSIDS and join an ssid when it is selected
# Note: reboot may be necessary
def joinPage(showOnly=False):
global iAmHost
global myIO
f = open ( configFilename, 'w')
f.write ( 'client\n' )
f.close()
pygame.display.set_caption('You are client, click below to join an SSID')
f = open ( configFilename, 'w')
f.write ( 'client\n' )
f.close()
iAmHost = False
myIO.games = []
DISPLAYSURF.fill((BLACK))
(images,sprites) = showImages (['images/quit.jpg', 'images/join.jpg'], [(400,400), (200,200)] )
(ssidSurf, ssidRect) = createLabel ('Press Join to show SSIDs', 50, 20)
DISPLAYSURF.blit(ssidSurf, ssidRect)
pygame.display.update()
quit = False
ssids = scanForSsids()
labels = showList(ssids)
quit = False
while not quit and not showOnly:
(eventType,data,addr) = getInput (100,100)
# Check if an ssid is clicked on
sprite = getSpriteClick (eventType, data, labels )
if sprite != -1:
print ("Selected label: " + str(sprite))
quit = True
# All passwords are the same (ABCD1234)
updateWpaSupplicant (ssids[sprite], 'ABCD1234')
os.system ( 'reboot') # reboot the pi4
joinSSID (ssids[sprite])
mainPage ()
sprite = getSpriteClick (eventType, data, sprites )
if sprite != -1:
print ("Selected command: " + str(sprite))
mainPage ()
quit = True
# Show the list the games and play a game when it is selected
def gamePage(showOnly=False):
global games
global iAmHost
global myIO
quit = False
showTimeout = 0
count = 0
print ( 'gamePage, myIO.games: ' + str(myIO.games) + ' iAmHost: ' + str(iAmHost))
while not quit and not showOnly:
eventType,data,addr = myIO.getKeyOrUdp() # This call sets myIO.games
# Update the list of games once a second
if time.time() > showTimeout:
count = count + 1
DISPLAYSURF.fill((BLACK))
labels = showList(myIO.games)
if len(myIO.games) > 0:
pygame.display.set_caption('Please select a game')
else:
pygame.display.set_caption('Waiting for opponent to choose game to host')
showTimeout = time.time() + 1
if iAmHost:
showLabel ('Select a game to host', 50, 20)
myIO.games = gameList
else:
if myIO.games == []:
showLabel ('Waiting on host to choose a game', 50, 20 )
else:
showLabel ('Select a game to join', 50, 20)
(images,sprites) = showImages (['images/quit.jpg'], [(400,400)] )
pygame.display.update()
# Check if a game is clicked on
sprite = getSpriteClick (eventType, data, labels )
if sprite != -1:
game = myIO.games[sprite]
if iAmHost:
myIO.games = [ game ]
myIO.udpBroadcast ( 'exec:self.games=' + str(myIO.games))
print ("Selected game: " + str(sprite))
game = game.replace ( ' ', '' ).lower()
exec (game + 'Page()' ) # Show the game page
quit = True
mainPage ()
sprite = getSpriteClick (eventType, data, sprites )
if sprite != -1: # Quit is the only other option
print ("Selected command: " + str(sprite))
mainPage ()
quit = True
def mainPage(showOnly = True):
pygame.display.set_caption('Host Join or Play')
locations = [ (400,400), (300,100), (100,100), (500,100)]
height = DISPLAYHEIGHT - 50
DISPLAYSURF.fill((BLACK))
showStatus ( "All Operations Check")
(images,sprites) = showImages ( ['images/quit.jpg', 'images/host.jpg', 'images/join.jpg', 'images/game.jpg'], locations)
pygame.display.update()
quit = False
while not quit and not showOnly:
(eventType, data, addr) = getInput (100,100)
sprite = getSpriteClick (eventType, data, sprites )
if sprite != -1:
if sprite == 0:
quit = True
break
elif sprite == 1:
hostPage()
mainPage()
elif sprite == 2:
joinPage()
mainPage()
elif sprite == 3:
gamePage()
mainPage()
readConfigData()
mainPage(False)
| [
"Paulware@hotmail.com"
] | Paulware@hotmail.com |
6caf19ccec6e404b463f8c52b40ea8a0ff60958c | 044902b0b1f646e070cf02669c1ae98d71725ed8 | /encode.py | d9bfe4df9f1428fe2ba7c22df4425e9948a8a1e9 | [] | no_license | Andyblack-J/dissertation | 2629929aff140fe83f1e5008810129e89ff3b276 | 5a0074083e94699549246c809acce2770d1b27f8 | refs/heads/master | 2022-02-19T03:06:32.303641 | 2019-08-22T15:56:39 | 2019-08-22T15:56:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | import pandas as pd
import csv
feature_cols = []
class_cols = []
total_data = []
names = ['protocol', 'range(m)', 'power_src', 'weight(g)', 'processing_power(ghz)', 'device_type'] # column headings
file = pd.read_csv('dataset.csv', names=names)
non_enc = pd.DataFrame(file, columns=['range(m)', 'weight(g)', 'processing_power(ghz)', 'device_type']) # columns that do not require encoding
enc_cols = pd.DataFrame(file, columns=['protocol', 'power_src']) # columns that require Dummy encoding
new_cols = pd.get_dummies(enc_cols, columns=enc_cols) # encode 'protocol' and 'power_src' columns
for col in new_cols:
new_cols[col] = new_cols[col].astype(object) # change the type of newly encoded columns to object
concat_datasets = pd.concat([non_enc, new_cols], axis=1) # concatenate the encoded columns with non-encoded columns
cols = list(concat_datasets.columns.values)
cols.pop(cols.index('device_type')) # pop the column 'type' from the list of columns
new_dataset = concat_datasets[cols+['device_type']] # append 'type' to end of list
| [
"noreply@github.com"
] | Andyblack-J.noreply@github.com |
f6b1e76b27f1e64c31d6e67f6fa842fa15e5f14a | c6c450d750bcc559882c6f211f952b411505d6d8 | /apps/notifications/api/__init__.py | 1d8d28c94a98ee4df71b145b32f12948388c8d2a | [] | no_license | ESCL/pjtracker | e26ea09136f35f5c85ea8d68a63fd94fab2629da | 4dcf0e6a37e8753ae9d69d663c0c280fcca0a26c | refs/heads/develop | 2021-09-26T09:18:54.158051 | 2021-09-10T23:16:07 | 2021-09-10T23:16:07 | 52,280,177 | 1 | 0 | null | 2021-09-10T23:16:08 | 2016-02-22T14:45:55 | Python | UTF-8 | Python | false | false | 20 | py | __author__ = 'kako'
| [
"claudio.melendrez@gmail.com"
] | claudio.melendrez@gmail.com |
da00c3afef5f3dca44d8629a6444474c612c849e | d055327573defefc33cdce5e522c5dfa5e0678fe | /Code/seungah/process_emergency.py | 0e8c68451a9ef4b21a0e03e240df2ab68dcb17fb | [] | no_license | aaung310/multicampus-final | 0d7e4c3cbe120dde06eaad4bb137014210deaff4 | 5f1555f0632f8e23b9a9bb94868315ec93989d73 | refs/heads/master | 2023-08-01T02:05:57.689480 | 2021-09-09T04:47:11 | 2021-09-09T04:47:11 | 399,992,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | from pyspark.sql import SparkSession
from pyspark.sql.functions import explode
spark = SparkSession \
.builder \
.appName("emergencyMsg") \
.getOrCreate()
MsgSchema = spark.read.format('json').load('/home/lab11/emergency/20210903105648EmergencyMsg20210903.json').schema
MsgDf = spark.readStream.schema(MsgSchema).json('/home/lab11/emergency/*.json')
df_msg = MsgDf.select(explode(MsgDf.DisasterMsg.row).alias("emergency")).select('emergency.*')
df_msg.coalesce(1).writeStream.format('json') \
.option("checkpointLocation", "/home/lab11/emergency_check") \
.option("path", "/home/lab11/emergency_msg") \
.trigger(processingTime='7200 seconds') \
.start().awaitTermination() | [
"osacnlc@gmail.com"
] | osacnlc@gmail.com |
ffc7523a927d5edc83e1ee9dfa70f3b0b0fa6141 | 9f82983f5f119635931a0233ec86aa223f5f57ec | /base/forms.py | 3c6b4c9513823cde1a962ba30aa02ff0513ae688 | [] | no_license | Arox/d_and_d | d49532cd0b0a824aea3f4767200fa9463d2ae6a0 | d707c5cdb557f23f12c99ac8f1b7bd7c86e2a935 | refs/heads/master | 2020-06-02T00:34:12.648579 | 2015-03-21T10:21:01 | 2015-03-21T10:21:01 | 32,378,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | # -*- coding: utf-8 -*-
from django import forms
class BaseParametersForm(forms.Form):
m_name = forms.CharField(max_length=30)
m_descriptions = forms.CharField(widget=forms.Textarea)
def clean(self):
v_cleaned_data = super(BaseParametersForm, self).clean()
v_name = v_cleaned_data.get('name')
v_descriptions = v_cleaned_data.get(descriptions)
if len(v_name) < 1:
raise forms.ValidationError('Name is empty')
if len(v_descriptions) < 1:
raise forms.ValidationError('Description is empty')
return v_cleaned_data | [
"mailofarox@gmail.com"
] | mailofarox@gmail.com |
86f69a7f557474425cd8757000ce208fc7eb132e | d37bb79552c434254ddd233a5c6988087b6bb25a | /venvbill/Scripts/pviews-script.py | f67f6bc881dccb74e93f9189881a8ad332f3862d | [] | no_license | stevegleds/100daysbilltracker | 60d3c1bb72bd461e93cfda50255c40a801a40826 | d956c28418aafa23f8e3335681b286cdcf8421f2 | refs/heads/master | 2022-11-30T16:07:51.241935 | 2019-09-05T19:31:45 | 2019-09-05T19:31:45 | 205,573,137 | 1 | 1 | null | 2022-11-20T00:17:12 | 2019-08-31T17:05:03 | Python | UTF-8 | Python | false | false | 494 | py | #!s:\pythoncode\myprojects\learning\100daysofweb-with-python-course\days\089-092-deployment\your-turn\billtracker\venvbill\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pyramid','console_scripts','pviews'
__requires__ = 'pyramid'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pyramid', 'console_scripts', 'pviews')()
)
| [
"stevegleds@gmail.com"
] | stevegleds@gmail.com |
470f942cb97c5e14e703fd2ff95468074831ecbb | 8c6075f2c2b3e602acfb13698d99ecc7c9bb5831 | /newspaper_project/articles/migrations/0003_alter_comment_author.py | 9592a748628ca91a7acf3451691a8cdc3e20671e | [] | no_license | Skylahaustine/newspaper_app | 88b9ce77df3c408dad9140815379336c683d75c6 | 82548c73bb2fabae3151c414cdb170d3df07c6b3 | refs/heads/master | 2023-04-03T12:53:24.852456 | 2021-04-14T14:51:01 | 2021-04-14T14:51:01 | 357,113,802 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | # Generated by Django 3.2 on 2021-04-14 13:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('articles', '0002_comment'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL),
),
]
| [
"austineskylah9195@gmail.com"
] | austineskylah9195@gmail.com |
6aa4f1e92cf40a2a54d246b9838b42141c55a991 | 17236fd20fdf4473e04eccdecddacb7ee312b8a6 | /cookies/migrations/0002_jobapplicants.py | 1b47981fa7d293f5ad34e31c4e83a5d3b851bb83 | [] | no_license | sachin-badhwar/DjangoWithDocker | 568362196e7ff06e7d76b60e6a50f1d40272ea24 | 8779aceed2425359a8e9f8d1d728905e13d3704e | refs/heads/master | 2022-12-08T00:17:11.722195 | 2020-08-16T04:20:29 | 2020-08-16T04:20:29 | 287,671,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | # Generated by Django 3.0.8 on 2020-08-09 14:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cookies', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='JobApplicants',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
('email', models.EmailField(max_length=150)),
('image', models.ImageField(blank=True, null=True, upload_to='')),
('resume', models.FileField(blank=True, null=True, upload_to='')),
],
),
]
| [
"budhwar58@gmail.com"
] | budhwar58@gmail.com |
11d5b28248cb64bca115295c66161c98cdfe4418 | f7c394164568ee5c8dbf963a488ec284408de59e | /po/testcase/test_index.py | 19b426d0048a3b35d2597939b5d87c8feeb69cfd | [] | no_license | saberpan1/Hogwarts16-web-wx | 156a0d46f91eeb4b291a561d4cc8316f9bffb4f1 | 6838b13dc4d98650a6fcad61afc104a490ecb1e4 | refs/heads/main | 2023-02-02T21:58:41.291985 | 2020-12-21T06:20:37 | 2020-12-21T06:20:37 | 323,246,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | from po.page.index_page import IndexPage
class TestIndex:
def setup_class(self):
self.index_page = IndexPage()
def test_login(self):
self.index_page.goto_login().login_scan()
def test_register(self):
self.index_page.goto_register().register()
| [
"saberpan1@163.com"
] | saberpan1@163.com |
276e3fd63ef0480cf6523e2c30879f5db3f42839 | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /PORMain/pirates/world/DistributedIsland.py | 8e40f2092539eaf12ccaaf262930526eef68b234 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 52,439 | py | from panda3d.core import AlphaTestAttrib, CollideMask, CollisionInvSphere, CollisionNode, FadeLODNode, Filename, Fog, LODNode, Light, NodePath, PandaNode, RenderAttrib, TextNode, Texture, TextureStage, VBase4, Vec3, Vec4
import random
import re
import imp
from direct.actor import *
from direct.distributed import DistributedCartesianGrid
from direct.task import Task
from direct.showbase.PythonUtil import report
from direct.interval.IntervalGlobal import *
from direct.gui.OnscreenText import OnscreenText
from direct.gui.DirectGui import DGG
from otp.nametag.Nametag import Nametag
from otp.nametag.NametagGroup import NametagGroup
from otp.otpbase import OTPGlobals
from otp.otpbase import OTPRender
from pirates.ai import HolidayGlobals
from pirates.audio import SoundGlobals
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.effects.LanternGlow import LanternGlow
from pirates.effects.BlackSmoke import BlackSmoke
from pirates.effects.VolcanoEffect import VolcanoEffect
from pirates.effects.FeastFire import FeastFire
from pirates.effects import FireworkGlobals
from pirates.effects.FireworkShow import FireworkShow
from pirates.world import ZoneLOD
from pirates.world import WorldGlobals
from pirates.world import DistributedGameArea
from pirates.world.LocationConstants import LocationIds
from pirates.distributed import DistributedInteractive
from pirates.piratesgui import PiratesGuiGlobals, RadarGui
from pirates.seapatch.Water import IslandWaterParameters
from pirates.swamp.Swamp import Swamp
from pirates.seapatch.SeaPatch import SeaPatch
from pirates.seapatch.Reflection import Reflection
from pirates.piratesbase import TODGlobals
from pirates.pvp import PVPGlobals
from pirates.map.Minimap import IslandMap
from pirates.map.Mappable import MappableGrid
from direct.gui import DirectGuiGlobals
from pirates.battle.Teamable import Teamable
class DistributedIsland(DistributedGameArea.DistributedGameArea, DistributedCartesianGrid.DistributedCartesianGrid, ZoneLOD.ZoneLOD, Teamable, MappableGrid):
SiegeIcon = None
notify = directNotify.newCategory('DistributedIsland')
def __init__(self, cr):
DistributedGameArea.DistributedGameArea.__init__(self, cr)
DistributedCartesianGrid.DistributedCartesianGrid.__init__(self, cr)
Teamable.__init__(self)
MappableGrid.__init__(self)
self.islandShoreWave = None
self.islandObjectsLoaded = False
self.animControls = None
self.sphereRadii = [
1000,
2000,
3000,
100000]
self.sphereCenter = [
0,
0]
ZoneLOD.ZoneLOD.__init__(self, self.uniqueName)
self.parentWorld = None
self.gridSphere = None
self.nameText = None
self.geom = None
self.dockingLOD = None
self.dockingLodFog = None
self.dockingChar = None
self.playerBarrierNP = None
self.islandLowLod = None
self.islandLowLodFog = None
self.fogTransitionIval = None
self.gold = 0
self.islandTunnel = []
self.hasTunnelsOnRadar = False
self.name = 'Island Name'
self.nametag = None
self.nametag3d = None
self.volcanoEffect = None
self.feastFireEnabled = False
self.feastFireEffect = None
self.fireworkShowEnabled = False
self.fireworkShowLegal = False
self.fireworkShowType = 0
self.fireworkShow = None
self.islandMapModelPath = None
self.mapName = None
self.objsCached = False
self.oceanVisEnabled = base.config.GetBool('ocean-visibility', False)
self.flatShipsOnIsland = base.config.GetBool('flat-ships-on-island', True)
self.locationSphereName = ''
self.SiegeIcons = []
if not DistributedIsland.SiegeIcon:
logos = loader.loadModel('models/textureCards/sailLogo')
if logos:
DistributedIsland.SiegeIcon = [
logos.find('**/logo_french_flag'),
logos.find('**/logo_spanish_flag')]
def announceGenerate(self):
DistributedGameArea.DistributedGameArea.announceGenerate(self)
DistributedCartesianGrid.DistributedCartesianGrid.announceGenerate(self)
self.accept('docked', self.resetZoneLODs)
self.accept('toggleIslandNametag', self.setNameVisible)
self.loadDockingLOD()
self.loadIslandLowLod()
detailLevel = base.options.terrain_detail_level
sailingLOD = FadeLODNode('sailingLOD')
sailingLOD.setFadeTime(2)
if detailLevel == 0:
sailingLOD.addSwitch(5000, 0)
sailingLOD.addSwitch(100000, 5000)
elif detailLevel == 1:
sailingLOD.addSwitch(10000, 0)
sailingLOD.addSwitch(100000, 10000)
elif detailLevel == 2:
sailingLOD.addSwitch(20000, 0)
sailingLOD.addSwitch(100000, 20000)
self.sailingLOD = self.attachNewNode(sailingLOD)
if self.dockingLOD:
self.dockingLOD.reparentTo(self.sailingLOD)
self.islandLowLod.reparentTo(self.sailingLOD)
else:
self.islandLowLod.reparentTo(self.sailingLOD)
self.islandLowLod.copyTo(self.sailingLOD)
self.loadWaterRing()
gridSphereName = self.uniqueName('GridSphere')
self.gridSphereEnterEvent = 'enter' + gridSphereName
self.gridSphereExitEvent = 'exit' + gridSphereName
self.setLodCollideMask(self.getLodCollideMask() | PiratesGlobals.ShipCollideBitmask)
self.setZoneRadii(self.sphereRadii, self.sphereCenter)
islandLOD = FadeLODNode('islandLOD')
islandLOD.addSwitch(10000, 0)
islandLOD.addSwitch(20000, 10000)
islandLOD.setFadeTime(0.5)
lodnp = NodePath(islandLOD)
lodnp.reparentTo(self.builder.areaGeometry)
lodnp.showThrough(OTPRender.ReflectionCameraBitmask)
self.geomLOD = lodnp
self.highDetail = lodnp.attachNewNode('highDetail')
self.lowDetail = lodnp.attachNewNode('lowDetail')
self.parentWorld.islands[self.doId] = self
#self.initializeNametag3d()
#self.setName(self.name)
self.addActive()
self.understandable = 1
self.setPlayerType(NametagGroup.CCNormal)
self.placeOnMap()
self.accept('timeOfDayChange', self.timeOfDayChanged)
def disable(self):
self.turnOff()
self.unloadIslandLowLod()
self.unloadDockingLOD()
self.sailingLOD.detachNode()
self.sailingLOD = None
self.unloadWaterRing()
self.removeFromMap()
self.ignore('docked')
self.ignore('toggleIslandNametag')
self.ignore('timeOfDayChange')
self.stopCustomEffects()
if self.fogTransitionIval:
self.fogTransitionIval.pause()
self.fogTransitionIval = None
ZoneLOD.ZoneLOD.cleanup(self)
DistributedGameArea.DistributedGameArea.disable(self)
DistributedCartesianGrid.DistributedCartesianGrid.disable(self)
self.deleteZoneCollisions()
try:
self.parentWorld.islands.pop(self.doId, None)
except:
pass
self.parentWorld = None
self.removeActive()
self.deleteNametag3d()
def delete(self):
DistributedGameArea.DistributedGameArea.delete(self)
DistributedCartesianGrid.DistributedCartesianGrid.delete(self)
ZoneLOD.ZoneLOD.delete(self)
self.unloadPlayerBarrier()
self.remove_node()
while len(self.SiegeIcons):
icon = self.SiegeIcons.pop()
icon.remove_node()
icon = None
def turnOff(self, cache = False):
self.stopCustomEffects()
if not cache:
self.setZoneLevelOuter()
localAvatar.clearInterestNamed(None, [
'IslandLocal'])
DistributedGameArea.DistributedGameArea.turnOff(self)
DistributedCartesianGrid.DistributedCartesianGrid.turnOff(self)
ZoneLOD.ZoneLOD.turnOff(self)
def turnOn(self, av = None):
self.startCustomEffects()
if base.shipsVisibleFromIsland:
self.parentWorld.worldGrid.startProcessVisibility(localAvatar)
if av:
self.setZoneLevel(0)
self.addObjectToGrid(av)
self.loadConnectors()
localAvatar.setInterest(self.doId, PiratesGlobals.IslandLocalZone, [
'IslandLocal'])
DistributedGameArea.DistributedGameArea.turnOn(self)
DistributedCartesianGrid.DistributedCartesianGrid.turnOn(self, av)
ZoneLOD.ZoneLOD.turnOn(self)
def isGridParent(self):
return 1
def addObjectToGrid(self, av):
DistributedCartesianGrid.DistributedCartesianGrid.addObjectToGrid(self, av)
if av.isLocal():
self.updateAvReturnLocation(av)
self.startProcessVisibility(av)
def setLocation(self, parentId, zoneId):
DistributedGameArea.DistributedGameArea.setLocation(self, parentId, zoneId)
world = self.cr.doId2do.get(parentId)
if parentId not in (0, self.cr.getGameDoId()):
pass
if world:
self.reparentTo(world)
self.parentWorld = world
def setZoneSphereSize(self, rad0, rad1, rad2):
self.sphereRadii = [
rad0,
rad1,
rad2,
100000]
def getZoneSphereSize(self):
return self.sphereRadii
def setZoneSphereCenter(self, x, y):
self.sphereCenter = [
x,
y]
def getZoneSphereCenter(self):
return self.sphereCenter
def getMusicName(self):
islandName = self.getName()
musicName = self.MusicNames.get(islandName, self.MusicDefault)
return musicName
def loadZoneLevel(self, level):
if level == 0:
self.islandObjectsLoaded = True
self.hideSailingLOD()
base.loadingScreen.beginStep('island terrain')
self.retrieveIslandTerrain()
base.loadingScreen.endStep('island terrain')
self.builder.loadObjects()
base.loadingScreen.beginStep('rest', 1, 8)
base.loadingScreen.tick()
self.loadConnectors()
self.listenForLocationSphere()
base.loadingScreen.tick()
self.startCustomEffects(island = True)
base.loadingScreen.tick()
self.water = SeaPatch(render, Reflection.getGlobalReflection(), todMgr = base.cr.timeOfDayManager)
base.loadingScreen.tick()
self.water.loadSeaPatchFile('out.spf')
base.loadingScreen.tick()
self.water.updateWater(0)
base.loadingScreen.tick()
messenger.send('toggleIslandNametag', [
0])
if self.isDockable():
self.setupMinimap()
if self.minimap and localAvatar.getMinimapObject():
self.minimap.addObject(localAvatar.getMinimapObject())
localAvatar.guiMgr.setMinimap(self.minimap)
localAvatar.setInterest(self.doId, PiratesGlobals.IslandLocalZone, [
'IslandLocal'])
if base.config.GetBool('island-prepare-scene', 1) and base.win.getGsg():
render.prepareScene(base.win.getGsg())
self.initBlockers(self)
base.loadingScreen.tick()
self.builder.checkForHolidayObjects()
base.loadingScreen.tick()
self.handleEnterGameArea()
base.loadingScreen.tick()
base.loadingScreen.endStep('rest')
elif level == 1:
localAvatar.setInterest(self.doId, PiratesGlobals.IslandShipDeployerZone, [
'ShipDeployer'])
messenger.send('toggleIslandNametag', [
1])
if not self.undockable:
localAvatar.setPort(self.doId)
else:
localAvatar.guiMgr.createWarning(PLocalizer.HeavyFogWarning, PiratesGuiGlobals.TextFG6, duration = 6.0)
elif level == 2:
if self.waterRing:
self.setIslandWaterParameters(True)
self.addToOceanSeapatch()
elif level == 3:
self.allEnabled = False
self.showName()
elif level == 4:
pass
base.loadingScreen.tick()
self.updateCustomEffects(level)
def unloadZoneLevel(self, level):
if level == 0:
self.islandObjectsLoaded = False
self.handleExitGameArea()
self.unloadConnectors()
self.cleanupIslandData()
self.unloadIslandShoreWave()
self.stopListenForLocationSphere()
base.localAvatar.guiMgr.clearMinimap(self.minimap)
self.destroyMinimap()
base.musicMgr.requestCurMusicFadeOut(removeFromPlaylist = True)
self.showSailingLOD()
localAvatar.clearInterestNamed(None, [
'IslandLocal'])
elif level == 1:
localAvatar.clearInterestNamed(None, [
'ShipDeployer'])
localAvatar.clearPort(self.doId)
elif level == 2:
self.showName()
self.removeFromOceanSeapatch()
elif level == 3:
self.hideName()
elif level == 4:
pass
self.updateCustomEffects(level + 1)
def handleChildArrive(self, child, zoneId):
DistributedGameArea.DistributedGameArea.handleChildArrive(self, child, zoneId)
base.loadingScreen.tick()
if child.isLocal():
self.childArrived(self.doId, self.getParentObj())
messenger.send('docked')
self.accept('ship_vis_change', self.shipVisibilityChanged)
base.loadingScreen.tick()
if not base.cr.config.GetBool('remove-island-barriers', 0):
self.setupPlayerBarrier()
if not base.shipsVisibleFromIsland:
self.parentWorld.worldGrid.stopProcessVisibility()
else:
self.parentWorld.worldGrid.startProcessVisibility(localAvatar)
base.hideShipNametags = True
base.loadingScreen.tick()
messenger.send('hide-ship-nametags')
base.loadingScreen.tick()
if base.shipsVisibleFromIsland == 1:
base.showShipFlats = True
messenger.send('far-ships')
else:
base.showShipFlats = False
messenger.send('normal-ships')
self.setZoneLevel(0)
self.turnOn(localAvatar)
def handleChildLeave(self, child, zoneId):
if child.isLocal():
self.childLeft(self.doId, self.getParentObj())
self.ignore('ship_vis_change')
self.unloadPlayerBarrier()
messenger.send('normal-ships')
base.showShipFlats = False
base.hideShipNametags = False
messenger.send('show-ship-nametags')
self.turnOff()
DistributedGameArea.DistributedGameArea.handleChildLeave(self, child, zoneId)
def handleEnterGameArea(self, collEntry = None):
if self.uniqueId == LocationIds.KINGSHEAD_ISLAND:
self.accept(PiratesGlobals.EVENT_SPHERE_SNEAK + PiratesGlobals.SPHERE_ENTER_SUFFIX, self._handleSneakIntoKingshead)
DistributedGameArea.DistributedGameArea.handleEnterGameArea(self, collEntry)
def handleExitGameArea(self, collEntry = None):
if self.uniqueId == LocationIds.KINGSHEAD_ISLAND:
self.ignore(PiratesGlobals.EVENT_SPHERE_SNEAK + PiratesGlobals.SPHERE_ENTER_SUFFIX)
DistributedGameArea.DistributedGameArea.handleExitGameArea(self, collEntry)
def _handleSneakIntoKingshead(self, msgName, avId):
if avId == localAvatar.doId:
localAvatar.motionFSM.off()
self.sendUpdate('requestEntryToIsland')
if self.uniqueId == LocationIds.KINGSHEAD_ISLAND:
localAvatar.guiMgr.messageStack.addTextMessage(PLocalizer.EnterKingsheadMessage)
def setupPlayerBarrier(self):
if not self.playerBarrierNP:
playerBarrier = CollisionInvSphere(self.zoneCenter[0], self.zoneCenter[1], 0, self.zoneRadii[0] * 0.95)
playerBarrier.setTangible(1)
cName = self.uniqueName('PlayerBarrier')
cSphereNode = CollisionNode(cName)
cSphereNode.setIntoCollideMask(OTPGlobals.WallBitmask | OTPGlobals.GhostBitmask)
cSphereNode.addSolid(playerBarrier)
self.playerBarrierNP = self.attachNewNode(cSphereNode)
self.accept('enter' + self.uniqueName('PlayerBarrier'), self.enteredPlayerBarrier)
self.accept('islandPlayerBarrier', self.setPlayerBarrier)
self.setPlayerBarrier(1)
def enteredPlayerBarrier(self, *args):
localAvatar.guiMgr.createWarning(PLocalizer.IslandPlayerBarrierWarning, PiratesGuiGlobals.TextFG6)
def unloadPlayerBarrier(self):
self.ignore('enter' + self.uniqueName('PlayerBarrier'))
self.ignore('islandPlayerBarrier')
if self.playerBarrierNP:
self.playerBarrierNP.remove_node()
self.playerBarrierNP = None
def setPlayerBarrier(self, isOn):
if self.playerBarrierNP:
if isOn:
self.playerBarrierNP.unstash()
else:
self.playerBarrierNP.stash()
def addIslandToOcean(self):
if self.parentWorld.worldGrid:
self.parentWorld.worldGrid.addIslandGrid(self)
else:
self.notify.error('worldGrid is none for %s %s' % (self.parentWorld, self))
def removeIslandFromOcean(self):
if self.parentWorld:
self.parentWorld.worldGrid.removeIslandGrid(self)
def setLinks(self, links):
DistributedGameArea.DistributedGameArea.setLinks(self, links)
if self.lastZoneLevel == 0:
self.loadConnectors()
def setModelPath(self, modelPath):
self.modelPath = modelPath
def loadIslandLowLod(self):
flatName = self.modelPath.split('_zero')[0]
if not self.islandLowLod:
self.islandLowLod = loader.loadModel('%s_low' % flatName, okMissing = False)
self.islandLowLod.flattenStrong()
self.islandLowLod.hide(OTPRender.MainCameraBitmask)
self.islandLowLod.showThrough(OTPRender.EnviroCameraBitmask)
self.islandLowLodFog = self.islandLowLod.find('**/fog')
if self.islandLowLodFog:
self.islandLowLodFog.setLightOff()
self.islandLowLodFog.setDepthWrite(0)
todMgr = base.cr.timeOfDayManager
if todMgr:
self.islandLowLodFog.setColorScale(TODGlobals.getTodEnvSetting(todMgr.currentState, todMgr.environment, 'FogColor') / 3.0 + Vec4(0, 0, 0, 1))
def unloadIslandLowLod(self):
if self.islandLowLod:
self.islandLowLod.remove_node()
self.islandLowLod = None
def loadIslandMapModel(self):
if not self.islandMapModelPath:
mapModelName = self.modelPath.split('_zero')
self.islandMapModelPath = mapModelName[0] + '_worldmap'
def placeOnMap(self):
self.loadIslandMapModel()
if not (self.mapName) and self.islandMapModelPath:
mapPage = localAvatar.guiMgr.mapPage
self.mapName = mapPage.addIsland(self.name, self.uniqueId, self.islandMapModelPath, self.getPos(), self.getH())
def removeFromMap(self):
if self.mapName:
mapPage = localAvatar.guiMgr.mapPage
mapPage.removeIsland(self.mapName)
self.mapName = None
def loadIslandShoreWave(self, parent):
base.loadingScreen.tick()
if self.islandShoreWave:
return None
lowend = ''
if base.options.getTerrainDetailSetting() == 0:
lowend = '_lowend'
islandBaseName = self.modelPath.split('_zero')[0]
base.loadingScreen.tick()
waveModel = loader.loadModel(islandBaseName + lowend + '_wave_none', okMissing = True)
if lowend != '' and not waveModel:
lowend = ''
waveModel = loader.loadModel(islandBaseName + lowend + '_wave_none', okMissing = True)
if waveModel:
waveModel.setBin('water', 10)
self.islandShoreWave = Actor.Actor(waveModel)
self.islandShoreWave.loadAnims({
'idle': islandBaseName + lowend + '_wave_idle' })
self.islandShoreWave.reparentTo(parent)
self.islandShoreWave.loop('idle')
self.islandShoreWave.setBin('water', 10)
meshes = self.islandShoreWave.findAllMatches('**/mesh_tide1')
if not meshes.isEmpty():
mesh = meshes[0]
joints = self.islandShoreWave.findAllMatches('**/uvj_WakeWhiteTide1')
if joints.getNumPaths():
mesh.setTexProjector(mesh.findTextureStage('default'), joints[0], parent)
meshes = self.islandShoreWave.findAllMatches('**/mesh_tide2')
if not meshes.isEmpty():
mesh = meshes[0]
joints = self.islandShoreWave.findAllMatches('**/uvj_WakeWhiteTide2')
if joints.getNumPaths():
mesh.setTexProjector(mesh.findTextureStage('default'), joints[0], parent)
lavaCombo = self.islandShoreWave.findAllMatches('**/lava_combo_*')
if lavaCombo.getNumPaths():
lavaComboRoot = self.islandShoreWave.find('**/+Character').attachNewNode('lavaCombo')
lavaComboRoot.setDepthWrite(1, 100)
lavaCombo.reparentTo(lavaComboRoot)
joint = self.islandShoreWave.find('**/uvj_LavaCombo1')
lavaComboRoot.setTexProjector(lavaComboRoot.findTextureStage('default'), joint, parent)
lavaHot = self.islandShoreWave.findAllMatches('**/lava_hot_*')
if lavaHot.getNumPaths():
lavaHotRoot = self.islandShoreWave.find('**/+Character').attachNewNode('lavaHot')
lavaHotRoot.setDepthWrite(1, 100)
lavaHot.reparentTo(lavaHotRoot)
joint = self.islandShoreWave.find('**/uvj_LavaHot1')
lavaHotRoot.setTexProjector(lavaHotRoot.findTextureStage('default'), joint, parent)
lavaCool = self.islandShoreWave.findAllMatches('**/lava_cool_*')
if lavaCool.getNumPaths():
lavaCoolRoot = self.islandShoreWave.find('**/+Character').attachNewNode('lavaCool')
lavaCoolRoot.setDepthWrite(1, 100)
lavaCool.reparentTo(lavaCoolRoot)
joint = self.islandShoreWave.find('**/uvj_LavaCool1')
lavaCoolRoot.setTexProjector(lavaCoolRoot.findTextureStage('default'), joint, parent)
self.islandShoreWave.setPlayRate(0.800000, 'idle')
OTPRender.renderReflection(False, self.islandShoreWave, 'p_island_shore', None)
alpha_test_attrib = AlphaTestAttrib.make(RenderAttrib.MAlways, 0)
self.islandShoreWave.setAttrib(alpha_test_attrib, 100)
self.islandShoreWave.setTwoSided(1, 100)
self.islandShoreWave.setDepthWrite(0, 100)
def unloadIslandShoreWave(self):
if self.islandShoreWave:
self.islandShoreWave.delete()
self.islandShoreWave = None
def foo(self):
collNodes = self.geom.findAllMatches('**/+CollisionNode')
for collNode in collNodes:
curMask = collNode.node().getIntoCollideMask()
if curMask.hasBitsInCommon(OTPGlobals.FloorBitmask):
self.setupCannonballLandColl(collNode, PiratesGlobals.TargetBitmask | curMask, 0)
continue
def loadDockingLOD(self):
islandBaseName = self.modelPath.split('_zero')[0]
if self.dockingLOD:
self.dockingLOD.detachNode()
self.dockingLOD = loader.loadModel(islandBaseName + '_dock_lod', okMissing = True)
if self.dockingLOD:
self.dockingLOD.hide(OTPRender.MainCameraBitmask)
self.dockingLOD.showThrough(OTPRender.EnviroCameraBitmask)
self.dockingLOD.findAllMatches('**/water_*').detach()
self.dockingLOD.flattenStrong()
self.dockingLodFog = self.dockingLOD.find('**/fog')
if self.dockingLodFog:
self.dockingLodFog.setLightOff()
self.dockingLodFog.setDepthWrite(0)
todMgr = base.cr.timeOfDayManager
if todMgr:
self.dockingLodFog.setColorScale(TODGlobals.getTodEnvSetting(todMgr.currentState, todMgr.environment, 'FogColor') / 3.0 + Vec4(0, 0, 0, 1))
def unloadDockingLOD(self):
if self.dockingLOD:
self.dockingLOD.remove_node()
self.dockingLOD = None
def showSailingLOD(self):
self.sailingLOD.show()
def hideSailingLOD(self):
self.sailingLOD.hide()
def loadTerrain(self):
islandBaseName = self.modelPath.split('_zero')[0]
self.geom = self.loadWholeModel(islandBaseName)
self.geom.findAllMatches('**/water_*').detach()
def loadWholeModel(self, name):
lowend = ''
if base.options.getTerrainDetailSetting() == 0:
lowend = '_lowend'
zeroModel = loader.loadModel(name + lowend + '_zero', okMissing = True)
if not zeroModel:
zeroModel = loader.loadModel(name + lowend, okMissing = True)
if lowend != '' and not zeroModel:
zeroModel = loader.loadModel(name + '_zero', okMissing = True)
if not zeroModel:
zeroModel = loader.loadModel(name)
geom = zeroModel
collNode = geom.find('**/cannoncol*')
if collNode != collNode.notFound():
collNode.node().setIntoCollideMask(collNode.node().getIntoCollideMask() | PiratesGlobals.TargetBitmask | OTPGlobals.CameraBitmask)
collNode.setTag('objType', str(PiratesGlobals.COLL_BLOCKER))
return geom
def addToOceanSeapatch(self):
if self.parentWorld and self.parentWorld.getWater():
self.parentWorld.getWater().patch.addFlatWell(self.uniqueName('flatWell'), self, self.zoneCenter[0], self.zoneCenter[1], self.zoneRadii[0], self.zoneRadii[0] + 100)
def removeFromOceanSeapatch(self):
if self.parentWorld.getWater():
self.parentWorld.getWater().patch.removeFlatWell(self.uniqueName('flatWell'))
def loadIslandStuff(self):
self.largeObjects = self.geom.findAllMatches('**/*bldg*')
for b in self.largeObjects:
b.wrtReparentTo(self.largeObjectsHigh)
wallGeom = b.find('**/wall*_n_window*')
roofGeom = b.find('**/roof')
for c in [
wallGeom,
roofGeom]:
self.setupCannonballBldgColl(c, PiratesGlobals.TargetBitmask)
details = [
self.geom.find('**/barrels'),
self.geom.find('**/crates'),
self.geom.find('**/canopys'),
self.geom.find('**/bushes')]
for detail in details:
if not detail.isEmpty():
detail.wrtReparentTo(self.smallObjectsHigh)
detail.flattenLight()
continue
self.smallObjects = details
del details
details = [
self.geom.find('**/palmtrees'),
self.geom.find('**/pier')]
for detail in details:
if not detail.isEmpty():
detail.wrtReparentTo(self.medObjectsHigh)
detail.flattenLight()
continue
self.mediumObjects = details
def setName(self, name):
self.name = name
if not self.nametag:
self.createNametag(self.name)
else:
self.nametag.setName(name)
self.nametag.setDisplayName(' ')
if self.nameText:
self.nameText['text'] = name
siegeTeam = self.getSiegeTeam()
if siegeTeam and self.SiegeIcon:
color = VBase4(PVPGlobals.getSiegeColor(siegeTeam))
color.setW(0.7)
icon = self.SiegeIcon[siegeTeam - 1].copyTo(NodePath('siegeIcons'))
icon.reparentTo(self.nameText)
self.SiegeIcons.append(icon)
icon.setZ(1.5)
icon.setScale(0.75)
else:
color = Vec4(0.6, 0.6, 1, 0.4)
self.nameText['fg'] = color
def setDisplayName(self, str):
self.nametag.setDisplayName(str)
def getName(self):
return self.name
def getNameVisible(self):
return self._DistributedIsland__nameVisible
def setNameVisible(self, bool):
self._DistributedIsland__nameVisible = bool
if bool:
self.showName()
if not bool:
self.hideName()
def hideName(self):
self.nametag.getNametag3d().setContents(Nametag.CSpeech | Nametag.CThought)
def showName(self):
if self._DistributedIsland__nameVisible:
self.nametag.getNametag3d().setContents(Nametag.CName | Nametag.CSpeech | Nametag.CThought)
def hideNametag2d(self):
self.nametag2dContents = 0
self.nametag.getNametag2d().setContents(self.nametag2dContents & self.nametag2dDist)
def showNametag2d(self):
self.nametag2dContents = self.nametag2dNormalContents
self.nametag2dContents = Nametag.CSpeech
self.nametag.getNametag2d().setContents(self.nametag2dContents & self.nametag2dDist)
def hideNametag3d(self):
self.nametag.getNametag3d().setContents(0)
def showNametag3d(self):
if self._DistributedIsland__nameVisible:
self.nametag.getNametag3d().setContents(Nametag.CName | Nametag.CSpeech | Nametag.CThought)
else:
self.nametag.getNametag3d().setContents(0)
def setPickable(self, flag):
self.nametag.setActive(flag)
def clickedNametag(self):
if self.nametag.isActive():
messenger.send('clickedNametag', [
self])
def initializeNametag3d(self):
self.deleteNametag3d()
self.nametag.setFont(PiratesGlobals.getPirateFont())
nametagNode = self.nametag.getNametag3d().upcastToPandaNode()
self.nametag3d.attachNewNode(nametagNode)
self.nametag3d.setFogOff()
self.nametag3d.setLightOff()
self.nametag3d.setColorScaleOff(100)
self.nametag3d.setDepthWrite(0)
self.iconNodePath = self.nametag.getNameIcon()
if self.iconNodePath.isEmpty():
self.notify.warning('empty iconNodePath in initializeNametag3d')
return 0
if not self.nameText:
self.nameText = OnscreenText(fg = Vec4(1, 1, 1, 1), bg = Vec4(0, 0, 0, 0), scale = 1.1, align = TextNode.ACenter, mayChange = 1, font = PiratesGlobals.getPirateBoldOutlineFont())
self.nameText.setDepthWrite(0)
self.nameText.reparentTo(self.iconNodePath)
self.nameText.setColorScaleOff(100)
self.nameText.setLightOff()
self.nameText.setFogOff()
def deleteNametag3d(self):
children = self.nametag3d.getChildren()
for i in xrange(children.getNumPaths()):
children[i].remove_node()
def addActive(self):
if base.wantNametags:
self.nametag.manage(base.marginManager)
self.accept(self.nametag.getUniqueId(), self.clickedNametag)
def removeActive(self):
if base.wantNametags and self.nametag:
self.nametag.unmanage(base.marginManager)
self.ignore(self.nametag.getUniqueId())
def createNametag(self, name):
self._DistributedIsland__nameVisible = 1
self.nametag = NametagGroup()
self.nametag.setAvatar(self)
self.nametag.setFont(PiratesGlobals.getPirateFont())
self.nametag2dContents = Nametag.CName
self.nametag2dDist = Nametag.CName
self.nametag2dNormalContents = Nametag.CName
self.nametag3d = self.attachNewNode('nametag3d')
self.nametag3d.setTag('cam', 'nametag')
#self.nametag.setName(name)
self.nametag.setNameWordwrap(PiratesGlobals.NAMETAG_WORDWRAP)
OTPRender.renderReflection(False, self.nametag3d, 'p_island_nametag', None)
self.nametag3d.setPos(0, 0, WorldGlobals.getNametagHeight(self.name))
self.setNametagScale(WorldGlobals.getNametagScale(self.name))
self.nametag3d.setFogOff()
self.setPickable(0)
self.nametag.setColorCode(1)
def getNametagScale(self):
return self.nametagScale
def setNametagScale(self, scale):
self.nametagScale = scale
self.nametag3d.setScale(scale)
def setPlayerType(self, playerType):
self.playerType = playerType
self.nametag.setColorCode(self.playerType)
def setIslandWaterParameters(self, use_alpha_map):
if self.islandWaterParameters:
if self.parentWorld:
self.islandWaterParameters.setIslandWaterParameters(self.parentWorld.getWater(), use_alpha_map)
def setX(self, *args, **kwargs):
DistributedGameArea.DistributedGameArea.setX(self, *args, **kwargs)
mapPage = base.localAvatar.guiMgr.mapPage
mapPage.updateIsland(self.mapName, worldPos = self.getPos())
def setY(self, *args, **kwargs):
DistributedGameArea.DistributedGameArea.setY(self, *args, **kwargs)
mapPage = base.localAvatar.guiMgr.mapPage
mapPage.updateIsland(self.mapName, worldPos = self.getPos())
def setH(self, *args, **kwargs):
DistributedGameArea.DistributedGameArea.setH(self, *args, **kwargs)
mapPage = base.localAvatar.guiMgr.mapPage
mapPage.updateIsland(self.mapName, rotation = self.getH())
def getTeam(self):
return PiratesGlobals.ISLAND_TEAM
def updateAvReturnLocation(self, av):
av.d_requestReturnLocation(self.doId)
def updateAvIsland(self, av):
av.d_requestCurrentIsland(self.doId)
def startFloatables(self):
world = base.cr.getActiveWorld()
if world:
water = world.getWater()
if water:
for (uid, obj) in self.floatables.iteritems():
water.addFloatable(uid, obj, mass = 5)
def stopFloatables(self):
world = base.cr.getActiveWorld()
if world:
water = world.getWater()
if water:
for uid in self.floatables:
water.removeFloatable(uid)
def setOceanVisEnabled(self, enabled):
self.oceanVisEnabled = enabled
if self.lastZoneLevel == 0:
if not self.oceanVisEnabled:
self.parentWorld.worldGrid.stopProcessVisibility()
else:
self.parentWorld.worldGrid.startProcessVisibility(localAvatar)
def setFlatShips(self, value):
self.flatShipsOnIsland = value
if self.lastZoneLevel == 0:
if self.flatShipsOnIsland:
messenger.send('far-ships')
base.showShipFlats = True
else:
messenger.send('normal-ships')
base.showShipFlats = False
def listenForLocationSphere(self):
self.locationSphereName = 'locSphere-%s' % self.uniqueId
msgName = PiratesGlobals.LOCATION_SPHERE
self.accept('enter' + self.locationSphereName, self.cr.getActiveWorld().enteredSphere, extraArgs = [
[
msgName]])
self.accept('exit' + self.locationSphereName, self.cr.getActiveWorld().exitedSphere, extraArgs = [
[
msgName]])
def stopListenForLocationSphere(self):
if self.locationSphereName:
self.ignore('enter' + self.locationSphereName)
self.ignore('exit' + self.locationSphereName)
def buildDockingLOD(self):
dockingCache = self.getDockingCache()
self.loadDockingLOD()
for obj in self.dockingLOD.findAllMatches('**/=ignore-lighting'):
obj.setLightOff(1000)
dockingCache.setData(self.dockingLOD.node(), 0)
base.bamCache.store(dockingCache)
def retrieveDockingLOD(self):
dockingCache = self.getDockingCache()
if dockingCache.hasData() and base.config.GetBool('want-disk-cache', 0):
data = dockingCache.getData()
newData = data.copySubgraph()
self.dockingLOD = NodePath(newData)
else:
self.buildDockingLOD()
islandBaseName = self.modelPath.split('_zero')[0]
dockingChar = loader.loadModel(islandBaseName + '_dock_lod_none', okMissing = True)
if dockingChar:
self.dockingChar = Actor.Actor(dockingChar)
self.dockingChar.loadAnims({
'idle': islandBaseName + '_dock_lod_idle' })
self.dockingChar.reparentTo(self.dockingLOD)
joint = self.dockingChar.find('**/uvj_LavaCombo1')
self.dockingChar.loop('idle')
self.dockingChar.setTexProjector(self.dockingChar.findTextureStage('default'), joint, self.dockingLOD)
self.dockingLOD.reparentTo(self)
self.dockingLOD.hide(OTPRender.MainCameraBitmask)
self.dockingLOD.showThrough(OTPRender.EnviroCameraBitmask)
def buildIslandTerrain(self):
islandGeomCache = self.getIslandCache()
self.loadTerrain()
flat = self.geom.find('**/island_flat_lod')
if not flat.isEmpty():
flat.remove_node()
for obj in self.geom.findAllMatches('**/=ignore-lighting'):
obj.setLightOff(1000)
islandGeomCache.setData(self.geom.node(), 0)
base.bamCache.store(islandGeomCache)
def retrieveIslandTerrain(self):
islandGeomCache = self.getIslandCache()
if islandGeomCache.hasData() and base.config.GetBool('want-disk-cache', 0):
data = islandGeomCache.getData()
newData = data.copySubgraph()
self.geom = NodePath(newData)
else:
self.buildIslandTerrain()
self.geom.reparentTo(self)
self.geom.hide(OTPRender.MainCameraBitmask)
self.geom.showThrough(OTPRender.EnviroCameraBitmask)
self.hideMapNodes()
self.loadIslandShoreWave(self.geom)
def cleanupIslandData(self):
self.builder.cleanupData()
self.cleanupTerrain()
def cleanupTerrain(self):
self.geom.remove_node()
self.geom = None
def cleanupDockingLOD(self):
if self.dockingChar:
self.dockingChar.cleanup()
self.dockingChar = None
self.dockingLOD.remove_node()
self.dockingLOD = None
def getCoreCache(self):
return base.bamCache.lookup(Filename('/%s_%s_core_%s_%s.bam' % (self.name, self.uniqueId, base.cr.getServerVersion(), base.gridDetail)), 'bam')
def getGridCache(self):
return base.bamCache.lookup(Filename('/%s_%s_grid_%s.bam' % (self.name, self.uniqueId, base.gridDetail)), 'bam')
def getAnimCache(self):
return base.bamCache.lookup(Filename('/%s_%s_anims_%s.bam' % (self.name, self.uniqueId, base.gridDetail)), 'bam')
def getLargeObjectsCache(self):
return base.bamCache.lookup(Filename('/%s_%s_large_%s.bam' % (self.name, self.uniqueId, base.gridDetail)), 'bam')
def getIslandCache(self):
return base.bamCache.lookup(Filename('/%s_%s_island_%s_%s.bam' % (self.name, self.uniqueId, base.cr.getServerVersion(), base.gridDetail)), 'bam')
def getDockingCache(self):
return base.bamCache.lookup(Filename('/%s_%s_island_docking_%s_%s.bam' % (self.name, self.uniqueId, base.cr.getServerVersion(), base.gridDetail)), 'bam')
def getSiegeTeam(self):
return base.cr.distributedDistrict.worldCreator.getPvpIslandTeam(self.uniqueId)
def isInInvasion(self):
return False
def getArmorScale(self):
return 1.0
def setUndockable(self, undockable):
self.undockable = undockable
def isDockable(self):
return not (self.undockable)
def shipVisibilityChanged(self, value):
if value == 0:
self.parentWorld.worldGrid.stopProcessVisibility()
elif value == 1:
self.parentWorld.worldGrid.startProcessVisibility(localAvatar)
base.showShipFlats = True
messenger.send('far-ships')
elif value == 2:
self.parentWorld.worldGrid.startProcessVisibility(localAvatar)
base.showShipFlats = False
messenger.send('normal-ships')
def setupMinimap(self):
if not (self.minimap) and not self.getMapNode().isEmpty():
self.minimap = IslandMap(self)
def destroyMinimap(self):
if self.minimap:
self.minimap.destroy()
self.minimap = None
def getGridParameters(self):
return (self.cellWidth, self.viewingRadius)
def getMapName(self):
return 'map-' + self.getName()
if __dev__:
def setZoneLevel(self, *args, **kw):
ZoneLOD.ZoneLOD.setZoneLevel(self, *args, **kw)
def getIslandTransform(self):
return (self.getX(), self.getY(), self.getZ(), self.getH())
def setIslandTransform(self, x, y, z, h):
self.setXYZH(x, y, z, h)
def startCustomEffects(self, interior = False, island = False):
DistributedGameArea.DistributedGameArea.startCustomEffects(self, interior = False, loadIslandMusic = island)
if self.uniqueId == LocationIds.DEL_FUEGO_ISLAND:
self.startVolcanoEffects()
if self.uniqueId == LocationIds.TORTUGA_ISLAND:
if not (self.feastFireEffect) and self.getFeastFireEnabled():
self.startFeastEffects()
self.updateCustomEffects(self.lastZoneLevel)
self.builder.resumeSFX()
def updateCustomEffects(self, level):
if self.uniqueId == LocationIds.DEL_FUEGO_ISLAND:
self.startVolcanoEffects()
if self.uniqueId == LocationIds.TORTUGA_ISLAND:
if not (self.feastFireEffect) and self.getFeastFireEnabled():
self.startFeastEffects()
if level == 0:
if self.feastFireEffect:
self.feastFireEffect.startMainEffects()
self.feastFireEffect.stopFarEffects()
if level == 1 or level == 2:
if self.feastFireEffect:
self.feastFireEffect.stopMainEffects()
self.feastFireEffect.startFarEffects()
if level == 3:
if self.feastFireEffect:
self.feastFireEffect.stopMainEffects()
self.feastFireEffect.startFarEffects()
if self.fireworkShowEnabled:
if level in [
0,
1,
2]:
self.fireworkShowLegal = True
self.fireWorksStartTime = 0.0
if base.cr.timeOfDayManager and not base.cr.timeOfDayManager.checkTimeOfDayToggle('fireWorksShow'):
base.cr.timeOfDayManager.addTimeOfDayToggle('fireWorksShow', self.fireWorksStartTime, self.fireWorksStartTime + 2.0, startMethod = self.beginDailyFireworkShow, endMethod = self.destroyFireworkShow)
else:
self.fireWorksStartTime = None
self.fireworkShowLegal = False
self.destroyFireworkShow()
if base.cr.timeOfDayManager:
base.cr.timeOfDayManager.removeTimeOfDayToggle('fireWorksShow')
def stopCustomEffects(self):
DistributedGameArea.DistributedGameArea.stopCustomEffects(self)
if base.cr.timeOfDayManager:
base.cr.timeOfDayManager.removeTimeOfDayToggle('fireWorksShow')
self.destroyFireworkShow()
if self.volcanoEffect:
self.volcanoEffect.destroy()
self.volcanoEffect = None
if self.feastFireEffect:
self.feastFireEffect.stopMainEffects()
self.feastFireEffect.stopFarEffects()
if self.fireworkShow:
self.destroyFireworkShow()
if self.builder:
self.builder.pauseSFX()
def startVolcanoEffects(self):
if not self.volcanoEffect:
self.volcanoEffect = VolcanoEffect()
self.volcanoEffect.reparentTo(self)
self.volcanoEffect.setPos(Vec3(-286, 180, 865))
self.volcanoEffect.enable()
def makeLavaErupt(self):
if self.lastZoneLevel in [
0,
1,
2]:
if not self.volcanoEffect:
self.startVolcanoEffects()
self.volcanoEffect.startLavaEruption()
def startLavaFlow(self):
self.stopLavaFlow()
lavaGeom = self.geom.find('**/lava')
if not lavaGeom.isEmpty():
lavaGeom.setLightOff()
if base.main_rtt:
lavaGeom.setFogOff()
lavaGeom.showThrough(OTPRender.GlowCameraBitmask)
tex = None
if not lavaGeom.findTextureStage('VertexColor'):
ts = TextureStage('VertexColor')
ts.setSort(30)
tex = lavaGeom.findTexture('*')
if tex:
lavaGeom.setTexture(ts, tex)
tsSet = lavaGeom.findAllTextureStages()
tsSet = [ tsSet[x] for x in xrange(tsSet.getNumTextureStages()) ]
tsSet.sort(key = lambda x: x.getSort())
if not tsSet:
return None
TS = TextureStage
tsSet[0].setCombineRgb(TS.CMReplace, TS.CSTexture, TS.COSrcColor)
tsSet[1].setCombineRgb(TS.CMAdd, TS.CSTexture, TS.COSrcColor, TS.CSPrevious, TS.COSrcColor)
tsSet[2].setCombineRgb(TS.CMInterpolate, TS.CSTexture, TS.COSrcColor, TS.CSPrevious, TS.COSrcColor, TS.CSPrimaryColor, TS.COSrcAlpha)
lavaSpeed = {
0: 0.04,
1: 0.02,
2: 0.01 }
if tex:
tsSet[3].setCombineRgb(TS.CMModulate, TS.CSPrevious, TS.COSrcColor, TS.CSPrimaryColor, TS.COSrcColor)
tsSet[3].setCombineAlpha(TS.CMReplace, TS.CSConstant, TS.COSrcAlpha)
tsSet[3].setColor(Vec4(1))
lavaSpeed[3] = 0.0
def flowLava(task):
dt = globalClock.getDt()
for key in lavaSpeed.keys():
offset = lavaGeom.getTexOffset(tsSet[key])[0]
offset -= lavaSpeed[key] * dt
offset %= 1.0
lavaGeom.setTexOffset(tsSet[key], offset, 0)
return Task.cont
taskMgr.add(flowLava, self.uniqueName('flowLava'))
def stopLavaFlow(self):
return None
if self.geom and not self.geom.isEmpty():
lavaGeom = self.geom.find('**/lava_red*')
if lavaGeom and not lavaGeom.isEmpty():
lavaGeom.clearLight()
lavaGeom.clearFog()
taskMgr.remove(self.uniqueName('flowLava'))
def setFeastFireEnabled(self, value):
if self.feastFireEnabled == value:
return None
self.feastFireEnabled = value
if self.feastFireEnabled:
self.startFeastEffects()
self.updateCustomEffects(self.lastZoneLevel)
else:
self.stopFeastEffects()
def getFeastFireEnabled(self):
return self.feastFireEnabled
def startFeastEffects(self):
if not (self.feastFireEffect) and self.getFeastFireEnabled():
self.feastFireEffect = FeastFire()
self.feastFireEffect.setCustomSettings()
self.feastFireEffect.reparentTo(self)
self.feastFireEffect.setPos(278, -166, 4.5)
def stopFeastEffects(self):
if self.feastFireEffect:
self.feastFireEffect.stopLoop()
self.feastFireEffect = None
def setFireworkShowEnabled(self, isEnabled, showType):
self.fireworkShowEnabled = isEnabled
self.fireworkShowType = showType
if self.fireworkShowEnabled:
self.createFireworkShow()
self.updateCustomEffects(self.lastZoneLevel)
else:
self.destroyFireworkShow()
def getFireworkShowEnabled(self):
return self.fireworkShowEnabled
def createFireworkShow(self):
if not self.fireworkShow:
self.fireworkShow = FireworkShow(self.fireworkShowType)
def destroyFireworkShow(self):
if self.fireworkShow:
self.fireworkShow.cleanupShow()
self.fireworkShow = None
def tryToBeginFireworkShow(self):
if self.fireworkShowLegal and base.cr.timeOfDayManager:
timeUntilShow = base.cr.timeOfDayManager.getTimeUntil(PiratesGlobals.TOD_STARS)
if timeUntilShow <= 0:
self.beginFireworkShow(timeStamp = -1 * timeUntilShow)
else:
self.destroyFireworkShow()
def beginFireworkShow(self, task = None, timeStamp = 0.0):
self.createFireworkShow()
if self.fireworkShow and not self.fireworkShow.isPlaying():
self.fireworkShow.begin(timeStamp)
self.fireworkShow.reparentTo(self)
self.fireworkShow.setPos(render, FireworkGlobals.getShowPosition(self.uniqueId))
self.fireworkShow.setHpr(render, FireworkGlobals.getShowOrientation(self.uniqueId))
def beginDailyFireworkShow(self, task = None):
self.createFireworkShow()
if self.fireworkShow and not self.fireworkShow.isPlaying():
currentTime = base.cr.timeOfDayManager.getCurrentIngameTime()
startTimeDiff = currentTime - self.fireWorksStartTime
startTimeDifSeconds = base.cr.timeOfDayManager.gameHoursToRealSeconds(startTimeDiff)
duration = self.fireworkShow.getDuration()
if startTimeDifSeconds < duration:
self.fireworkShow.begin(startTimeDiff)
self.fireworkShow.reparentTo(self)
self.fireworkShow.setPos(render, FireworkGlobals.getShowPosition(self.uniqueId))
self.fireworkShow.setHpr(render, FireworkGlobals.getShowOrientation(self.uniqueId))
def ensureLoaded(self):
self.setZoneLevel(0)
DistributedGameArea.DistributedGameArea.ensureLoaded(self)
def resetZoneLODs(self):
if localAvatar.parentId != self.doId:
self.setZoneLevel(3)
def loadWaterRing(self):
islandBaseName = self.modelPath.split('_zero')[0]
self.waterRing = loader.loadModel(islandBaseName + '_ocean', okMissing = True)
if self.waterRing:
self.waterRing.hide(OTPRender.MainCameraBitmask)
self.waterRing.show(OTPRender.EnviroCameraBitmask)
self.waterRing.reparentTo(self)
self.initializeIslandWaterParameters(self.waterRing)
else:
self.setIslandWaterParameters(False)
def unloadWaterRing(self):
self.setIslandWaterParameters(False)
if self.waterRing:
self.waterRing.detachNode()
self.waterRing = None
def setFogColor(self, fogColor):
if self.dockingLodFog:
self.dockingLodFog.setColorScale(fogColor)
if self.islandLowLodFog:
self.islandLowLodFog.setColorScale(fogColor)
def timeOfDayChanged(self, stateId = None, stateDuration = 0.0, elapsedTime = 0.0, transitionTime = 0.0):
if self.dockingLodFog:
todMgr = base.cr.timeOfDayManager
transitionTime = todMgr.cycleDuration * TODGlobals.getStateTransitionTime(todMgr.cycleType, todMgr.currentState)
fromFogColor = TODGlobals.getTodEnvSetting(todMgr.lastState, todMgr.environment, 'FogColor') / 2.5 + Vec4(0, 0, 0, 1)
toFogColor = TODGlobals.getTodEnvSetting(todMgr.currentState, todMgr.environment, 'FogColor') / 2.5 + Vec4(0, 0, 0, 1)
if self.fogTransitionIval:
self.fogTransitionIval.pause()
self.fogTransitionIval = None
self.fogTransitionIval = LerpFunctionInterval(self.setFogColor, duration = transitionTime, toData = toFogColor, fromData = fromFogColor)
self.fogTransitionIval.start(elapsedTime)
| [
"brandoncarden12345@gmail.com"
] | brandoncarden12345@gmail.com |
728679f4bb098e01726803ae4d67a6e61f04268f | 8e4c54ae58606c54cd18e6d2bc65b5ab9825ca21 | /3.14.py | d951398ef3d670a12568c3227f2fb12783cddba3 | [] | no_license | Nurken-01/Nurken | a1ecd7d12f0e5fcd7149eebda8e583b28a345036 | 80a396848055d4b0e11db16ff608fb4563ea00e3 | refs/heads/master | 2021-05-17T01:31:48.096494 | 2020-05-02T16:32:16 | 2020-05-02T16:32:16 | 250,557,797 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | print(input().count(' ') + 1)
| [
"noreply@github.com"
] | Nurken-01.noreply@github.com |
a6ff3611a8dfbf9bf3c146b426e98458facf28af | 328ebfdbcef076ce0e930715f9bd786d7498185b | /lang/python/Complete-Python-Developer-in-2021-Zero-to-Mastery/1st pass/11-modules/newjokes/venv/lib/python3.9/site-packages/pip/_internal/operations/build/wheel.py | a220c178e108251b9d3e129b4a904cefc781fdcf | [] | no_license | pnowak2/learnjs | b618e6f9563b3e86be0b1a21d647698e289daec0 | f8842b4e9e5d2eae6fb4e0d663b6699d74c90e9c | refs/heads/master | 2023-08-30T05:04:00.227920 | 2023-08-18T10:58:24 | 2023-08-18T10:58:24 | 41,912,571 | 3 | 0 | null | 2023-03-31T06:58:40 | 2015-09-04T11:36:13 | Python | UTF-8 | Python | false | false | 2,986 | py | <<<<<<< HEAD
import logging
import os
from pip._internal.utils.subprocess import runner_with_spinner_message
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List, Optional
from pip._vendor.pep517.wrappers import Pep517HookCaller
logger = logging.getLogger(__name__)
def build_wheel_pep517(
name, # type: str
backend, # type: Pep517HookCaller
metadata_directory, # type: str
build_options, # type: List[str]
tempd, # type: str
):
# type: (...) -> Optional[str]
"""Build one InstallRequirement using the PEP 517 build process.
Returns path to wheel if successfully built. Otherwise, returns None.
"""
assert metadata_directory is not None
if build_options:
# PEP 517 does not support --build-options
logger.error('Cannot build wheel for %s using PEP 517 when '
'--build-option is present', name)
return None
try:
logger.debug('Destination directory: %s', tempd)
runner = runner_with_spinner_message(
f'Building wheel for {name} (PEP 517)'
)
with backend.subprocess_runner(runner):
wheel_name = backend.build_wheel(
tempd,
metadata_directory=metadata_directory,
)
except Exception:
logger.error('Failed building wheel for %s', name)
return None
return os.path.join(tempd, wheel_name)
=======
import logging
import os
from pip._internal.utils.subprocess import runner_with_spinner_message
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List, Optional
from pip._vendor.pep517.wrappers import Pep517HookCaller
logger = logging.getLogger(__name__)
def build_wheel_pep517(
name, # type: str
backend, # type: Pep517HookCaller
metadata_directory, # type: str
build_options, # type: List[str]
tempd, # type: str
):
# type: (...) -> Optional[str]
"""Build one InstallRequirement using the PEP 517 build process.
Returns path to wheel if successfully built. Otherwise, returns None.
"""
assert metadata_directory is not None
if build_options:
# PEP 517 does not support --build-options
logger.error('Cannot build wheel for %s using PEP 517 when '
'--build-option is present', name)
return None
try:
logger.debug('Destination directory: %s', tempd)
runner = runner_with_spinner_message(
f'Building wheel for {name} (PEP 517)'
)
with backend.subprocess_runner(runner):
wheel_name = backend.build_wheel(
tempd,
metadata_directory=metadata_directory,
)
except Exception:
logger.error('Failed building wheel for %s', name)
return None
return os.path.join(tempd, wheel_name)
>>>>>>> 09ca5278bea3c4aca18b55f7b3bde8928f648bf3
| [
"p.nowak2@gmail.com"
] | p.nowak2@gmail.com |
d6c4d600acd854d883ab2cfbac30c3717bdb938e | 82989230972673eb0b1065b2f1830e9aee9c92e8 | /main.py | 6b2ae19b4322d2fc0a058f4ca315fc7fc61bd2b7 | [] | no_license | liz6688/AttentionImageClass | b798acfd2b4d404d142e72f4799d489cc8e0903d | c8a55412f9794e4b891f26e1955b043c01926d31 | refs/heads/master | 2022-01-07T12:42:10.059157 | 2018-08-06T02:45:09 | 2018-08-06T02:45:09 | 437,567,083 | 1 | 0 | null | 2021-12-12T14:25:48 | 2021-12-12T14:25:46 | null | UTF-8 | Python | false | false | 15,616 | py | import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import torch.backends.cudnn as cudnn
import torchvision
from torchvision import transforms as transforms
import numpy as np
import visdom
import torch.nn.functional as F
import argparse
import os
from models.RMA_module import RMA_module
from models.loss import loss_function
from utils import get_target_transform as target_trans
# data visualization
vis = visdom.Visdom(env='baseline(no priori)')
# GPU setting
os.environ.setdefault("CUDA_VISIBLE_DEVICES", "2")
# ==================================================================
# Constants
# ==================================================================
EPOCH = 45 # number of times for each run-through
BATCH_SIZE = 16 # number of images for each epoch
LEARNING_RATE = 1e-5 # default learning rate
WEIGHT_DECAY = 0 # default weight decay
N = 512 # size of input images (512 or 640)
MOMENTUM = (0.9, 0.999) # momentum in Adam optimization
TOPK = 3 # top k highest-ranked labels
GPU_IN_USE = torch.cuda.is_available() # whether using GPU
DIR_TRAIN_IMAGES = '../dataset/train2017/'
DIR_TEST_IMAGES = '../dataset/val2017/'
PATH_TRAIN_ANNFILE = '../dataset/annotations/instances_train2017.json'
PATH_TEST_ANNFILE = '../dataset/annotations/instances_val2017.json'
PATH_MODEL_PARAMS = './params/params_no_priori.pkl'
NUM_CATEGORIES = 80
LOSS_OUTPUT_INTERVAL = 100
# ==================================================================
# Global Variables
# ==================================================================
# one iteration means one mini-batch finishs a forward-backward process
current_training_iteration = torch.tensor([1])
current_test_iteration = torch.tensor([1])
loss_graph_window = 'loss graph'
test_f1_graph_window = 'test OF1 and CF1 graph'
evaluation_window = 'six evaluation metrics'
#category_id_window = 'category ids of prediction and ground-truth'
of1 = 0.
cf1 = 0.
# ==================================================================
# Parser Initialization
# ==================================================================
parser = argparse.ArgumentParser(description='Pytorch Implementation of ICCV2017_AttentionImageClass')
parser.add_argument('--lr', default=LEARNING_RATE, type=float, help='learning rate')
parser.add_argument('--epoch', default=EPOCH, type=int, help='number of epochs')
parser.add_argument('--trainBatchSize', default=BATCH_SIZE, type=int, help='training batch size')
parser.add_argument('--testBatchSize', default=BATCH_SIZE, type=int, help='testing batch size')
parser.add_argument('--weightDecay', default=WEIGHT_DECAY, type=float, help='weight decay')
parser.add_argument('--pathModelParams', default=PATH_MODEL_PARAMS, type=str, help='path of model parameters')
parser.add_argument('--saveModel', default=True, type=bool, help='save model parameters')
parser.add_argument('--loadModel', default=False, type=bool, help='load model parameters')
args = parser.parse_args()
# ==================================================================
# Prepare Dataset(training & test)
# ==================================================================
print('***** Prepare Data ******')
# transforms of training dataset
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_transforms = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5), # default value is 0.5
transforms.Resize((N, N)),
transforms.ToTensor(),
normalize
])
# transforms of test dataset
test_transforms = transforms.Compose([
transforms.Resize((N, N)),
transforms.ToTensor(),
normalize
])
train_dataset = torchvision.datasets.CocoDetection(root=DIR_TRAIN_IMAGES, annFile=PATH_TRAIN_ANNFILE,
transform=train_transforms, target_transform=target_trans)
test_dataset = torchvision.datasets.CocoDetection(root=DIR_TEST_IMAGES, annFile=PATH_TEST_ANNFILE,
transform=test_transforms, target_transform=target_trans)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=args.trainBatchSize, shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=args.testBatchSize, shuffle=False, num_workers=2)
print('Data Preparation : Finished')
# ==================================================================
# Prepare Model
# ==================================================================
print('\n***** Prepare Model *****')
vgg16 = torchvision.models.vgg16(pretrained=True)
for param in vgg16.features.parameters():
param.requires_grad=False
extract_features = vgg16.features
RMA = RMA_module(lstm_input_size=14, lstm_hidden_size=4096, zk_size=4096)
if args.loadModel:
RMA.load_state_dict(torch.load(args.pathModelParams))
if GPU_IN_USE:
print('CUDA_VISIBLE_DEVICES:', os.environ['CUDA_VISIBLE_DEVICES'])
print('cuda: move all model parameters and buffers to the GPU')
extract_features.cuda()
RMA.cuda()
cudnn.benchmark = True
# Adam optimization
optimizer = optim.Adam(RMA.parameters(), lr=args.lr, weight_decay=args.weightDecay, betas=MOMENTUM)
# scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[75, 150], gamma=0.5) # lr decay
print('Model Preparation : Finished')
# Train
# ================================================================================
# data: [torch.cuda.FloatTensor of size [batch_size, 3, N, N] N=512/640]
# target: [torch.cuda.FloatTensor of size [batch_size, num_categories]]
# output: [torch.cuda.FloatTensor of size [batch_size, num_categories]]
# prediction: [
# [torch.cuda.FloatTensor of size [batch_size, TOPK] (TOPK)],
# [torch.cuda.LongTensor of size [batch_size, TOPK] (index of TOPK)]
# ]
# ================================================================================
def train():
print('train:')
RMA.train() # set the module in training mode
train_loss = 0. # sum of train loss up to current batch
global current_training_iteration
sum_prediction_label = torch.zeros(1, 80) + 1e-6
sum_correct_prediction_label = torch.zeros(1, 80)
sum_ground_truth_label = torch.zeros(1, 80)
for batch_num, (data, target) in enumerate(train_loader):
if target.sum() == 0:
continue
target = target.index_select(0, torch.nonzero(target.sum(dim=1)).view(-1))
data = data.index_select(0, torch.nonzero(target.sum(dim=1)).view(-1))
if GPU_IN_USE:
data, target = data.cuda(), target.cuda()
# -----forward-----
optimizer.zero_grad()
f_I = extract_features(data)
output, M = RMA(f_I)
# ---end forward---
# ---calculate loss and backward---
loss = loss_function(output, target, M, add_constraint=True)
loss.backward()
optimizer.step()
# ----------end backward-----------
train_loss += loss
prediction = torch.topk(F.softmax(output, dim=1), 10, dim=1)
filter = prediction[0].eq(0.1) + prediction[0].gt(0.1)
prediction_index = torch.mul(prediction[1]+1, filter.type(torch.cuda.LongTensor))
extend_eye_mat = torch.cat((torch.zeros(1, 80), torch.eye(80)), 0)
prediction_label = extend_eye_mat[prediction_index.view(-1)].view(-1, 10, 80).sum(dim=1)
correct_prediction_label = (target.cpu().byte() & prediction_label.byte()).type(torch.FloatTensor)
#count the sum of label vector
sum_prediction_label += prediction_label.sum(dim=0)
sum_correct_prediction_label += correct_prediction_label.sum(dim=0)
sum_ground_truth_label += target.cpu().sum(dim=0)
#for i in range(0, target.size(0)):
# print('-----------------')
# print('ground-truth: ', target[i].nonzero().view(-1))
# print('prediction: ', prediction[1][i])
# print('-----------------')
if batch_num % LOSS_OUTPUT_INTERVAL == 0:
# visualization: draw the train loss graph
vis.line(
X=current_training_iteration,
Y=torch.tensor([train_loss.data]) / (batch_num+1),
win=loss_graph_window,
name='train loss',
update=None if current_training_iteration == 1 else 'append',
opts=dict(xlabel='iteration', ylabel='loss', showlegend=True)
)
print('loss %.3f (batch %d)' % (train_loss/(batch_num+1), batch_num+1))
current_training_iteration += LOSS_OUTPUT_INTERVAL
# evaluation metrics
o_p = torch.div(sum_correct_prediction_label.sum(), sum_prediction_label.sum())
o_r = torch.div(sum_correct_prediction_label.sum(), sum_ground_truth_label.sum())
of1 = torch.div(2 * o_p * o_r, o_p + o_r)
c_p = (torch.div(sum_correct_prediction_label, sum_prediction_label)).sum() / NUM_CATEGORIES
c_r = (torch.div(sum_correct_prediction_label, sum_ground_truth_label)).sum() / NUM_CATEGORIES
cf1 = torch.div(2 * c_p * c_r, c_p + c_r)
return c_p, c_r, cf1, o_p, o_r, of1
# Test
# ================================================================================
# data: [torch.cuda.FloatTensor of size [batch_size, 3, N, N] N=512/640]
# target: [torch.cuda.FloatTensor of size [batch_size, num_categories]]
# output: [torch.cuda.FloatTensor of size [batch_size, num_categories]]
# prediction: [
# [torch.cuda.FloatTensor of size [batch_size, TOPK] (TOPK)],
# [torch.cuda.LongTensor of size [batch_size, TOPK] (index of TOPK)]
# ]
# ================================================================================
def test():
print('test:')
RMA.eval() # set the module in evaluation mode
test_loss = 0. # sum of train loss up to current batch
global current_test_iteration
sum_prediction_label = torch.zeros(1, 80) + 1e-6
sum_correct_prediction_label = torch.zeros(1, 80)
sum_ground_truth_label = torch.zeros(1, 80)
for batch_num, (data, target) in enumerate(test_loader):
if target.sum() == 0:
continue
target = target.index_select(0, torch.nonzero(target.sum(dim=1)).view(-1))
data = data.index_select(0, torch.nonzero(target.sum(dim=1)).view(-1))
if GPU_IN_USE:
data, target = data.cuda(), target.cuda() # set up GPU Tensor
f_I = extract_features(data)
output, M = RMA(f_I)
loss = loss_function(output, target, M, add_constraint=True)
test_loss += loss
prediction = torch.topk(F.softmax(output, dim=1), 10, dim=1)
filter = prediction[0].eq(0.1) + prediction[0].gt(0.1)
prediction_index = torch.mul(prediction[1]+1, filter.type(torch.cuda.LongTensor))
extend_eye_mat = torch.cat((torch.zeros(1, 80), torch.eye(80)), 0)
prediction_label = extend_eye_mat[prediction_index.view(-1)].view(-1, 10, 80).sum(dim=1)
correct_prediction_label = (target.cpu().byte() & prediction_label.byte()).type(torch.FloatTensor)
#count the sum of label vector
sum_prediction_label += prediction_label.sum(dim=0)
sum_correct_prediction_label += correct_prediction_label.sum(dim=0)
sum_ground_truth_label += target.cpu().sum(dim=0)
#for i in range(0, target.size(0)):
# print('-----------------')
# print('ground-truth: ', target[i].nonzero().view(-1))
# print('prediction: ', prediction_index[i] - 1)
# print('-----------------')
#
if batch_num % LOSS_OUTPUT_INTERVAL == 0:
# visualization: draw the test loss graph
vis.line(
X=current_test_iteration,
Y=torch.tensor([test_loss.data]) / (batch_num+1),
win=loss_graph_window,
name='test loss',
update='insert' if current_test_iteration == 1 else 'append',
opts=dict(showlegend=True),
)
print('loss %.3f (batch %d)' % (test_loss / (batch_num+1), batch_num+1))
current_test_iteration += LOSS_OUTPUT_INTERVAL
# evaluation metrics
o_p = torch.div(sum_correct_prediction_label.sum(), sum_prediction_label.sum())
o_r = torch.div(sum_correct_prediction_label.sum(), sum_ground_truth_label.sum())
of1 = torch.div(2 * o_p * o_r, o_p + o_r)
c_p = (torch.div(sum_correct_prediction_label, sum_prediction_label)).sum() / NUM_CATEGORIES
c_r = (torch.div(sum_correct_prediction_label, sum_ground_truth_label)).sum() / NUM_CATEGORIES
cf1 = torch.div(2 * c_p * c_r, c_p + c_r)
return c_p, c_r, cf1, o_p, o_r, of1
# ==================================================================
# Save Model
# ==================================================================
def save():
torch.save(RMA.state_dict(), args.pathModelParams)
print('Checkpoint saved to {}'.format(args.pathModelParams))
# ==================================================================
# Main Loop
# ==================================================================
for current_epoch in range(1, args.epoch + 1):
print('\n===> epoch: %d/%d' % (current_epoch, args.epoch))
train_cp, train_cr, train_cf1, train_op, train_or, train_of1 = train()
with torch.no_grad():
test_cp, test_cr, test_cf1, test_op, test_or, test_of1 = test()
evaluation_metrics = '''
<pre>
===> epoch: %d/%d<br/>
-------------------------------------------------------------
| CP | CR | CF1 | OP | OR | OF1 |
-------------------------------------------------------------
| %.3f | %.3f | %.3f | %.3f | %.3f | %.3f |
-------------------------------------------------------------
</pre>
''' % (current_epoch, args.epoch, test_cp, test_cr, test_cf1, test_op, test_or, test_of1)
# visualization
vis.line(
X=torch.tensor([current_epoch]),
Y=torch.tensor([test_cf1]),
name='test_CF1',
win=test_f1_graph_window,
update=None if current_epoch == 1 else 'append',
opts=dict(xlabel='epoch', ylabel='F1', showlegend=True, title='Evaluation of Test (CF1 / OF1)')
)
vis.line(
X=torch.tensor([current_epoch]),
Y=torch.tensor([test_of1]),
name='test_OF1',
win=test_f1_graph_window,
update='insert' if current_epoch == 1 else 'append',
opts=dict(showlegend=True)
)
vis.text(
evaluation_metrics,
win=evaluation_window,
append=False if current_epoch == 1 else True
)
if test_of1 > of1 and test_cf1 > cf1:
if args.saveModel:
save()
of1 = test_of1
cf1 = test_cf1
if current_epoch == args.epoch:
print('===> BEST PERFORMANCE (OF1/CF1): %.3f / %.3f' % (of1, cf1))
| [
"874244887@qq.com"
] | 874244887@qq.com |
c484785c956ddd198702fec0b53edaac9087fd8b | 34a3581cf39bf853f99407ae4c990103be0216b9 | /simpleSubHacker.py | 54dc234e1322af259a02b1fb57fbb2dce7fa3e32 | [] | no_license | mrevilg/CodeCrack | 287d619b3a8c1e10c9d8fea8f298aa070803c214 | 3470e5a31a1fc4cef15fc78115aefa47e8f94d4c | refs/heads/master | 2020-08-22T17:15:33.192890 | 2020-03-30T23:54:43 | 2020-03-30T23:54:43 | 216,445,349 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,896 | py |
import os, re, copy, pyperclip, simpleSubCipher, wordPatterns, makeWordPatterns
LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def main():
message = 'Sy l nlx sr pyyacao l ylwj eiswi upar lulsxrj isr sxrjsxwjr, ia esmm rwctjsxsza sj wmpramh, lxo txmarr jia aqsoaxwa sr pqaceiamnsxu, ia esmm caytra jp famsaqa sj. Sy, px jia pjiac ilxo, ia sr pyyacao rpnajisxu eiswi lyypcor l calrpx ypc lwjsxu sx lwwpcolxwa jp isr sxrjsxwjr, ia esmm lwwabj sj aqax px jia rmsuijarj aqsoaxwa. Jia pcsusx py nhjir sr agbmlsxao sx jisr elh. -Facjclxo Ctrramm'
# Determine the possible valid ciphertext translations.
print('Hacking...')
letterMapping = hackSimpleSub(message)
# Display the results to the user.
print('Mapping:')
print(letterMapping)
print()
print('Original ciphertext:')
print(message)
print()
print('Copying hacked message to clipboard:')
hackedMessage = decryptWithCipherletterMapping(message, letterMapping)
pyperclip.copy(hackedMessage)
print(hackedMessage)
def getBlankCipherLetterMapping():
return {'A': [], 'B': [], 'C': [], 'D': [], 'E': [], 'F': [], 'G': [], 'H': [], 'I': [], 'J': [], 'K': [], 'L': [], 'M': [], 'N': [], 'O': [], 'P': [], 'Q': [], 'R': [], 'S': [], 'T': [], 'U': [], 'V': [], 'W': [], 'X': [], 'Y': [], 'Z': []}
def addLettersToMapping(letterMapping, cipherWord, candidate):
for i in range(len(cipherWord)):
if candidate[i] not in letterMapping[cipherWord[i]]:
letterMapping[cipherWord[i]].append(candidate[i])
def intersectMappings(mapA, MapB):
intersectedMapping = getBlankCipherLetterMapping()
for letter in LETTERS:
if mapA[letter] == []:
intersectedMapping[letter] = copy.deepcopy(mapB[letter])
elif mapB[letter] == []:
intersectedMapping[letter] = copy.deepcopy(mapA[letter])
else:
for mappedLetter in mapA[letter]:
if mappedLetter in mapB[letter]:
intersectedMapping[letter].append(mappedLetter)
return intersectedMapping
def removeSolvedLettersFromMapping(letterMapping):
loopAgain = True
while loopAgain:
loopAgain = False
solvedLetters = []
for cipherLetter in LETTERS:
if len(letterMapping[cipherLetter]) == 1:
solvedLetters.append(letterMapping[cipherLetter][0])
for cipherLetter in LETTERS:
for s in solvedLetters:
if len(letterMapping[cipherLetter]) != 1 and s in letterMapping[cipherLetter]:
letterMapping[cipherLetter].remove(s)
if len(letterMapping[cipherLetter]) == 1:
loopAgain = True
return letterMapping
def hackSimpleSub(message):
intersectedMap = getBlankCipherLetterMapping()
cipherwordList = nonLettersOrSpacePattern.sub('',message.upper()).split()
for cipherword in cipherwordList:
candidateMap = getBlankCipherLetterMapping()
wordPattern = makeWordPatterns.getWordPattern(cipherword)
if wordPattern not in wordPatterns.allPatterns:
continue
for candidate in wordPatterns.allPatterns[wordPattern]:
addLettersToMapping(candidateMap, cipherWord, candidate)
intersectedMap = intersectMappings(intersectedMap, candidateMap)
return removeSolvedLettersFromMapping(intersectedMap)
def decryptWithCipherletterMapping(ciphertext, letterMapping):
key = ['x'] * len(LETTERS)
for cipherletter in LETTERS:
if len(letterMapping[cipherletter]) == 1:
keyIndex = LETTERS.find(letterMapping[cipherletter][0])
key[keyIndex] = cipherletter
else:
ciphertext = ciphertext.replace(cipherletter.lower(), '_')
ciphertext = ciphertext.replace(cipherletter.upper(), '_')
key = ''.join(key)
if __name__ == '__main__':
main() | [
"mrevilg@gmail.com"
] | mrevilg@gmail.com |
9d729f5128f7c62143279c5e9a55cea878b11fed | 39ec999720f91cb37122bf36ee4c405f96655ac4 | /crawler/crawler.py | 17c45f188e1ca8bf924876ec13a6b0129f91629e | [
"MIT"
] | permissive | KevinIRIS/NewsHub | baaaa6b4a142fe3ab836fd08dc3bf2f66d156d0d | b15c5f7feb8af8223b39f7d87f8093141b10aba7 | refs/heads/master | 2021-09-01T23:06:47.450793 | 2017-12-29T03:34:14 | 2017-12-29T03:34:14 | 115,287,441 | 1 | 0 | MIT | 2017-12-29T03:34:15 | 2017-12-24T21:14:59 | Python | UTF-8 | Python | false | false | 339 | py | import tweepy
from OAuth import OAuth
import sys
if __name__ == "__main__":
if len(sys.argv) != 5:
print("invalid parameters")
exit(-1)
ck = sys.argv[1]
cs = sys.argv[2]
at = sys.argv[3]
ats = sys.argv[4]
auth = OAuth(ck,cs,at,ats)
api = auth.get_api()
user = api.get_user('KevinMoveFast') | [
"kz899@nyu.edu"
] | kz899@nyu.edu |
26b355ece52fe0a86f90c4b2f0830f13c9b9f2a4 | ce1291271199e992cd97169d73a5a902fdf208b7 | /predictors/python_predictor/python_predictor/gens/google/monitoring/v3/group_service_pb2.py | d8d4f9b2151264f62157bae1af941d055e7f6b88 | [
"ISC",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jayantco/scratch | 5d03a8218bc54c7b9ce0ced04c50a42c9abfd77a | 34c565ddce7e64ecf835f17080a21c8526154501 | refs/heads/master | 2022-03-30T09:50:06.210213 | 2019-09-26T15:50:32 | 2019-09-26T15:50:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 40,639 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/monitoring/v3/group_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import monitored_resource_pb2 as google_dot_api_dot_monitored__resource__pb2
from google.monitoring.v3 import common_pb2 as google_dot_monitoring_dot_v3_dot_common__pb2
from google.monitoring.v3 import group_pb2 as google_dot_monitoring_dot_v3_dot_group__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/monitoring/v3/group_service.proto',
package='google.monitoring.v3',
syntax='proto3',
serialized_pb=_b('\n(google/monitoring/v3/group_service.proto\x12\x14google.monitoring.v3\x1a\x1cgoogle/api/annotations.proto\x1a#google/api/monitored_resource.proto\x1a!google/monitoring/v3/common.proto\x1a google/monitoring/v3/group.proto\x1a\x1bgoogle/protobuf/empty.proto\"\xad\x01\n\x11ListGroupsRequest\x12\x0c\n\x04name\x18\x07 \x01(\t\x12\x1b\n\x11\x63hildren_of_group\x18\x02 \x01(\tH\x00\x12\x1c\n\x12\x61ncestors_of_group\x18\x03 \x01(\tH\x00\x12\x1e\n\x14\x64\x65scendants_of_group\x18\x04 \x01(\tH\x00\x12\x11\n\tpage_size\x18\x05 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\tB\x08\n\x06\x66ilter\"Y\n\x12ListGroupsResponse\x12*\n\x05group\x18\x01 \x03(\x0b\x32\x1b.google.monitoring.v3.Group\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"\x1f\n\x0fGetGroupRequest\x12\x0c\n\x04name\x18\x03 \x01(\t\"e\n\x12\x43reateGroupRequest\x12\x0c\n\x04name\x18\x04 \x01(\t\x12*\n\x05group\x18\x02 \x01(\x0b\x32\x1b.google.monitoring.v3.Group\x12\x15\n\rvalidate_only\x18\x03 \x01(\x08\"W\n\x12UpdateGroupRequest\x12*\n\x05group\x18\x02 \x01(\x0b\x32\x1b.google.monitoring.v3.Group\x12\x15\n\rvalidate_only\x18\x03 \x01(\x08\"\"\n\x12\x44\x65leteGroupRequest\x12\x0c\n\x04name\x18\x03 \x01(\t\"\x94\x01\n\x17ListGroupMembersRequest\x12\x0c\n\x04name\x18\x07 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x05 \x01(\t\x12\x34\n\x08interval\x18\x06 \x01(\x0b\x32\".google.monitoring.v3.TimeInterval\"w\n\x18ListGroupMembersResponse\x12.\n\x07members\x18\x01 \x03(\x0b\x32\x1d.google.api.MonitoredResource\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x12\n\ntotal_size\x18\x03 \x01(\x05\x32\xbb\x06\n\x0cGroupService\x12\x85\x01\n\nListGroups\x12\'.google.monitoring.v3.ListGroupsRequest\x1a(.google.monitoring.v3.ListGroupsResponse\"$\x82\xd3\xe4\x93\x02\x1e\x12\x1c/v3/{name=projects/*}/groups\x12v\n\x08GetGroup\x12%.google.monitoring.v3.GetGroupRequest\x1a\x1b.google.monitoring.v3.Group\"&\x82\xd3\xe4\x93\x02 \x12\x1e/v3/{name=projects/*/groups/*}\x12\x81\x01\n\x0b\x43reateGroup\x12(.google.monitoring.v3.CreateGroupRequest\x1a\x1b.google.monitoring.v3.Group\"+\x82\xd3\xe4\x93\x02%\"\x1c/v3/{name=projects/*}/groups:\x05group\x12\x89\x01\n\x0bUpdateGroup\x12(.google.monitoring.v3.UpdateGroupRequest\x1a\x1b.google.monitoring.v3.Group\"3\x82\xd3\xe4\x93\x02-\x1a$/v3/{group.name=projects/*/groups/*}:\x05group\x12w\n\x0b\x44\x65leteGroup\x12(.google.monitoring.v3.DeleteGroupRequest\x1a\x16.google.protobuf.Empty\"&\x82\xd3\xe4\x93\x02 *\x1e/v3/{name=projects/*/groups/*}\x12\xa1\x01\n\x10ListGroupMembers\x12-.google.monitoring.v3.ListGroupMembersRequest\x1a..google.monitoring.v3.ListGroupMembersResponse\".\x82\xd3\xe4\x93\x02(\x12&/v3/{name=projects/*/groups/*}/membersB\x8c\x01\n\x18\x63om.google.monitoring.v3B\x11GroupServiceProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3b\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_monitored__resource__pb2.DESCRIPTOR,google_dot_monitoring_dot_v3_dot_common__pb2.DESCRIPTOR,google_dot_monitoring_dot_v3_dot_group__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_LISTGROUPSREQUEST = _descriptor.Descriptor(
name='ListGroupsRequest',
full_name='google.monitoring.v3.ListGroupsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.monitoring.v3.ListGroupsRequest.name', index=0,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='children_of_group', full_name='google.monitoring.v3.ListGroupsRequest.children_of_group', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ancestors_of_group', full_name='google.monitoring.v3.ListGroupsRequest.ancestors_of_group', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='descendants_of_group', full_name='google.monitoring.v3.ListGroupsRequest.descendants_of_group', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_size', full_name='google.monitoring.v3.ListGroupsRequest.page_size', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_token', full_name='google.monitoring.v3.ListGroupsRequest.page_token', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='filter', full_name='google.monitoring.v3.ListGroupsRequest.filter',
index=0, containing_type=None, fields=[]),
],
serialized_start=232,
serialized_end=405,
)
_LISTGROUPSRESPONSE = _descriptor.Descriptor(
name='ListGroupsResponse',
full_name='google.monitoring.v3.ListGroupsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='group', full_name='google.monitoring.v3.ListGroupsResponse.group', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='google.monitoring.v3.ListGroupsResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=407,
serialized_end=496,
)
_GETGROUPREQUEST = _descriptor.Descriptor(
name='GetGroupRequest',
full_name='google.monitoring.v3.GetGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.monitoring.v3.GetGroupRequest.name', index=0,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=498,
serialized_end=529,
)
_CREATEGROUPREQUEST = _descriptor.Descriptor(
name='CreateGroupRequest',
full_name='google.monitoring.v3.CreateGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.monitoring.v3.CreateGroupRequest.name', index=0,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='group', full_name='google.monitoring.v3.CreateGroupRequest.group', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='validate_only', full_name='google.monitoring.v3.CreateGroupRequest.validate_only', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=531,
serialized_end=632,
)
_UPDATEGROUPREQUEST = _descriptor.Descriptor(
name='UpdateGroupRequest',
full_name='google.monitoring.v3.UpdateGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='group', full_name='google.monitoring.v3.UpdateGroupRequest.group', index=0,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='validate_only', full_name='google.monitoring.v3.UpdateGroupRequest.validate_only', index=1,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=634,
serialized_end=721,
)
_DELETEGROUPREQUEST = _descriptor.Descriptor(
name='DeleteGroupRequest',
full_name='google.monitoring.v3.DeleteGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.monitoring.v3.DeleteGroupRequest.name', index=0,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=723,
serialized_end=757,
)
_LISTGROUPMEMBERSREQUEST = _descriptor.Descriptor(
name='ListGroupMembersRequest',
full_name='google.monitoring.v3.ListGroupMembersRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.monitoring.v3.ListGroupMembersRequest.name', index=0,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_size', full_name='google.monitoring.v3.ListGroupMembersRequest.page_size', index=1,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_token', full_name='google.monitoring.v3.ListGroupMembersRequest.page_token', index=2,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='filter', full_name='google.monitoring.v3.ListGroupMembersRequest.filter', index=3,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='interval', full_name='google.monitoring.v3.ListGroupMembersRequest.interval', index=4,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=760,
serialized_end=908,
)
_LISTGROUPMEMBERSRESPONSE = _descriptor.Descriptor(
name='ListGroupMembersResponse',
full_name='google.monitoring.v3.ListGroupMembersResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='members', full_name='google.monitoring.v3.ListGroupMembersResponse.members', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='google.monitoring.v3.ListGroupMembersResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='total_size', full_name='google.monitoring.v3.ListGroupMembersResponse.total_size', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=910,
serialized_end=1029,
)
_LISTGROUPSREQUEST.oneofs_by_name['filter'].fields.append(
_LISTGROUPSREQUEST.fields_by_name['children_of_group'])
_LISTGROUPSREQUEST.fields_by_name['children_of_group'].containing_oneof = _LISTGROUPSREQUEST.oneofs_by_name['filter']
_LISTGROUPSREQUEST.oneofs_by_name['filter'].fields.append(
_LISTGROUPSREQUEST.fields_by_name['ancestors_of_group'])
_LISTGROUPSREQUEST.fields_by_name['ancestors_of_group'].containing_oneof = _LISTGROUPSREQUEST.oneofs_by_name['filter']
_LISTGROUPSREQUEST.oneofs_by_name['filter'].fields.append(
_LISTGROUPSREQUEST.fields_by_name['descendants_of_group'])
_LISTGROUPSREQUEST.fields_by_name['descendants_of_group'].containing_oneof = _LISTGROUPSREQUEST.oneofs_by_name['filter']
_LISTGROUPSRESPONSE.fields_by_name['group'].message_type = google_dot_monitoring_dot_v3_dot_group__pb2._GROUP
_CREATEGROUPREQUEST.fields_by_name['group'].message_type = google_dot_monitoring_dot_v3_dot_group__pb2._GROUP
_UPDATEGROUPREQUEST.fields_by_name['group'].message_type = google_dot_monitoring_dot_v3_dot_group__pb2._GROUP
_LISTGROUPMEMBERSREQUEST.fields_by_name['interval'].message_type = google_dot_monitoring_dot_v3_dot_common__pb2._TIMEINTERVAL
_LISTGROUPMEMBERSRESPONSE.fields_by_name['members'].message_type = google_dot_api_dot_monitored__resource__pb2._MONITOREDRESOURCE
DESCRIPTOR.message_types_by_name['ListGroupsRequest'] = _LISTGROUPSREQUEST
DESCRIPTOR.message_types_by_name['ListGroupsResponse'] = _LISTGROUPSRESPONSE
DESCRIPTOR.message_types_by_name['GetGroupRequest'] = _GETGROUPREQUEST
DESCRIPTOR.message_types_by_name['CreateGroupRequest'] = _CREATEGROUPREQUEST
DESCRIPTOR.message_types_by_name['UpdateGroupRequest'] = _UPDATEGROUPREQUEST
DESCRIPTOR.message_types_by_name['DeleteGroupRequest'] = _DELETEGROUPREQUEST
DESCRIPTOR.message_types_by_name['ListGroupMembersRequest'] = _LISTGROUPMEMBERSREQUEST
DESCRIPTOR.message_types_by_name['ListGroupMembersResponse'] = _LISTGROUPMEMBERSRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ListGroupsRequest = _reflection.GeneratedProtocolMessageType('ListGroupsRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTGROUPSREQUEST,
__module__ = 'google.monitoring.v3.group_service_pb2'
# @@protoc_insertion_point(class_scope:google.monitoring.v3.ListGroupsRequest)
))
_sym_db.RegisterMessage(ListGroupsRequest)
ListGroupsResponse = _reflection.GeneratedProtocolMessageType('ListGroupsResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTGROUPSRESPONSE,
__module__ = 'google.monitoring.v3.group_service_pb2'
# @@protoc_insertion_point(class_scope:google.monitoring.v3.ListGroupsResponse)
))
_sym_db.RegisterMessage(ListGroupsResponse)
GetGroupRequest = _reflection.GeneratedProtocolMessageType('GetGroupRequest', (_message.Message,), dict(
DESCRIPTOR = _GETGROUPREQUEST,
__module__ = 'google.monitoring.v3.group_service_pb2'
# @@protoc_insertion_point(class_scope:google.monitoring.v3.GetGroupRequest)
))
_sym_db.RegisterMessage(GetGroupRequest)
CreateGroupRequest = _reflection.GeneratedProtocolMessageType('CreateGroupRequest', (_message.Message,), dict(
DESCRIPTOR = _CREATEGROUPREQUEST,
__module__ = 'google.monitoring.v3.group_service_pb2'
# @@protoc_insertion_point(class_scope:google.monitoring.v3.CreateGroupRequest)
))
_sym_db.RegisterMessage(CreateGroupRequest)
UpdateGroupRequest = _reflection.GeneratedProtocolMessageType('UpdateGroupRequest', (_message.Message,), dict(
DESCRIPTOR = _UPDATEGROUPREQUEST,
__module__ = 'google.monitoring.v3.group_service_pb2'
# @@protoc_insertion_point(class_scope:google.monitoring.v3.UpdateGroupRequest)
))
_sym_db.RegisterMessage(UpdateGroupRequest)
DeleteGroupRequest = _reflection.GeneratedProtocolMessageType('DeleteGroupRequest', (_message.Message,), dict(
DESCRIPTOR = _DELETEGROUPREQUEST,
__module__ = 'google.monitoring.v3.group_service_pb2'
# @@protoc_insertion_point(class_scope:google.monitoring.v3.DeleteGroupRequest)
))
_sym_db.RegisterMessage(DeleteGroupRequest)
ListGroupMembersRequest = _reflection.GeneratedProtocolMessageType('ListGroupMembersRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTGROUPMEMBERSREQUEST,
__module__ = 'google.monitoring.v3.group_service_pb2'
# @@protoc_insertion_point(class_scope:google.monitoring.v3.ListGroupMembersRequest)
))
_sym_db.RegisterMessage(ListGroupMembersRequest)
ListGroupMembersResponse = _reflection.GeneratedProtocolMessageType('ListGroupMembersResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTGROUPMEMBERSRESPONSE,
__module__ = 'google.monitoring.v3.group_service_pb2'
# @@protoc_insertion_point(class_scope:google.monitoring.v3.ListGroupMembersResponse)
))
_sym_db.RegisterMessage(ListGroupMembersResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030com.google.monitoring.v3B\021GroupServiceProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3'))
_GROUPSERVICE = _descriptor.ServiceDescriptor(
name='GroupService',
full_name='google.monitoring.v3.GroupService',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=1032,
serialized_end=1859,
methods=[
_descriptor.MethodDescriptor(
name='ListGroups',
full_name='google.monitoring.v3.GroupService.ListGroups',
index=0,
containing_service=None,
input_type=_LISTGROUPSREQUEST,
output_type=_LISTGROUPSRESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\036\022\034/v3/{name=projects/*}/groups')),
),
_descriptor.MethodDescriptor(
name='GetGroup',
full_name='google.monitoring.v3.GroupService.GetGroup',
index=1,
containing_service=None,
input_type=_GETGROUPREQUEST,
output_type=google_dot_monitoring_dot_v3_dot_group__pb2._GROUP,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002 \022\036/v3/{name=projects/*/groups/*}')),
),
_descriptor.MethodDescriptor(
name='CreateGroup',
full_name='google.monitoring.v3.GroupService.CreateGroup',
index=2,
containing_service=None,
input_type=_CREATEGROUPREQUEST,
output_type=google_dot_monitoring_dot_v3_dot_group__pb2._GROUP,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002%\"\034/v3/{name=projects/*}/groups:\005group')),
),
_descriptor.MethodDescriptor(
name='UpdateGroup',
full_name='google.monitoring.v3.GroupService.UpdateGroup',
index=3,
containing_service=None,
input_type=_UPDATEGROUPREQUEST,
output_type=google_dot_monitoring_dot_v3_dot_group__pb2._GROUP,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002-\032$/v3/{group.name=projects/*/groups/*}:\005group')),
),
_descriptor.MethodDescriptor(
name='DeleteGroup',
full_name='google.monitoring.v3.GroupService.DeleteGroup',
index=4,
containing_service=None,
input_type=_DELETEGROUPREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002 *\036/v3/{name=projects/*/groups/*}')),
),
_descriptor.MethodDescriptor(
name='ListGroupMembers',
full_name='google.monitoring.v3.GroupService.ListGroupMembers',
index=5,
containing_service=None,
input_type=_LISTGROUPMEMBERSREQUEST,
output_type=_LISTGROUPMEMBERSRESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002(\022&/v3/{name=projects/*/groups/*}/members')),
),
])
_sym_db.RegisterServiceDescriptor(_GROUPSERVICE)
DESCRIPTOR.services_by_name['GroupService'] = _GROUPSERVICE
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class GroupServiceStub(object):
"""The Group API lets you inspect and manage your
[groups](google.monitoring.v3.Group).
A group is a named filter that is used to identify
a collection of monitored resources. Groups are typically used to
mirror the physical and/or logical topology of the environment.
Because group membership is computed dynamically, monitored
resources that are started in the future are automatically placed
in matching groups. By using a group to name monitored resources in,
for example, an alert policy, the target of that alert policy is
updated automatically as monitored resources are added and removed
from the infrastructure.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListGroups = channel.unary_unary(
'/google.monitoring.v3.GroupService/ListGroups',
request_serializer=ListGroupsRequest.SerializeToString,
response_deserializer=ListGroupsResponse.FromString,
)
self.GetGroup = channel.unary_unary(
'/google.monitoring.v3.GroupService/GetGroup',
request_serializer=GetGroupRequest.SerializeToString,
response_deserializer=google_dot_monitoring_dot_v3_dot_group__pb2.Group.FromString,
)
self.CreateGroup = channel.unary_unary(
'/google.monitoring.v3.GroupService/CreateGroup',
request_serializer=CreateGroupRequest.SerializeToString,
response_deserializer=google_dot_monitoring_dot_v3_dot_group__pb2.Group.FromString,
)
self.UpdateGroup = channel.unary_unary(
'/google.monitoring.v3.GroupService/UpdateGroup',
request_serializer=UpdateGroupRequest.SerializeToString,
response_deserializer=google_dot_monitoring_dot_v3_dot_group__pb2.Group.FromString,
)
self.DeleteGroup = channel.unary_unary(
'/google.monitoring.v3.GroupService/DeleteGroup',
request_serializer=DeleteGroupRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListGroupMembers = channel.unary_unary(
'/google.monitoring.v3.GroupService/ListGroupMembers',
request_serializer=ListGroupMembersRequest.SerializeToString,
response_deserializer=ListGroupMembersResponse.FromString,
)
class GroupServiceServicer(object):
"""The Group API lets you inspect and manage your
[groups](google.monitoring.v3.Group).
A group is a named filter that is used to identify
a collection of monitored resources. Groups are typically used to
mirror the physical and/or logical topology of the environment.
Because group membership is computed dynamically, monitored
resources that are started in the future are automatically placed
in matching groups. By using a group to name monitored resources in,
for example, an alert policy, the target of that alert policy is
updated automatically as monitored resources are added and removed
from the infrastructure.
"""
def ListGroups(self, request, context):
"""Lists the existing groups.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetGroup(self, request, context):
"""Gets a single group.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateGroup(self, request, context):
"""Creates a new group.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateGroup(self, request, context):
"""Updates an existing group.
You can change any group attributes except `name`.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteGroup(self, request, context):
"""Deletes an existing group.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListGroupMembers(self, request, context):
"""Lists the monitored resources that are members of a group.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GroupServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'ListGroups': grpc.unary_unary_rpc_method_handler(
servicer.ListGroups,
request_deserializer=ListGroupsRequest.FromString,
response_serializer=ListGroupsResponse.SerializeToString,
),
'GetGroup': grpc.unary_unary_rpc_method_handler(
servicer.GetGroup,
request_deserializer=GetGroupRequest.FromString,
response_serializer=google_dot_monitoring_dot_v3_dot_group__pb2.Group.SerializeToString,
),
'CreateGroup': grpc.unary_unary_rpc_method_handler(
servicer.CreateGroup,
request_deserializer=CreateGroupRequest.FromString,
response_serializer=google_dot_monitoring_dot_v3_dot_group__pb2.Group.SerializeToString,
),
'UpdateGroup': grpc.unary_unary_rpc_method_handler(
servicer.UpdateGroup,
request_deserializer=UpdateGroupRequest.FromString,
response_serializer=google_dot_monitoring_dot_v3_dot_group__pb2.Group.SerializeToString,
),
'DeleteGroup': grpc.unary_unary_rpc_method_handler(
servicer.DeleteGroup,
request_deserializer=DeleteGroupRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ListGroupMembers': grpc.unary_unary_rpc_method_handler(
servicer.ListGroupMembers,
request_deserializer=ListGroupMembersRequest.FromString,
response_serializer=ListGroupMembersResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.monitoring.v3.GroupService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaGroupServiceServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""The Group API lets you inspect and manage your
[groups](google.monitoring.v3.Group).
A group is a named filter that is used to identify
a collection of monitored resources. Groups are typically used to
mirror the physical and/or logical topology of the environment.
Because group membership is computed dynamically, monitored
resources that are started in the future are automatically placed
in matching groups. By using a group to name monitored resources in,
for example, an alert policy, the target of that alert policy is
updated automatically as monitored resources are added and removed
from the infrastructure.
"""
def ListGroups(self, request, context):
"""Lists the existing groups.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def GetGroup(self, request, context):
"""Gets a single group.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def CreateGroup(self, request, context):
"""Creates a new group.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def UpdateGroup(self, request, context):
"""Updates an existing group.
You can change any group attributes except `name`.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def DeleteGroup(self, request, context):
"""Deletes an existing group.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def ListGroupMembers(self, request, context):
"""Lists the monitored resources that are members of a group.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaGroupServiceStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""The Group API lets you inspect and manage your
[groups](google.monitoring.v3.Group).
A group is a named filter that is used to identify
a collection of monitored resources. Groups are typically used to
mirror the physical and/or logical topology of the environment.
Because group membership is computed dynamically, monitored
resources that are started in the future are automatically placed
in matching groups. By using a group to name monitored resources in,
for example, an alert policy, the target of that alert policy is
updated automatically as monitored resources are added and removed
from the infrastructure.
"""
def ListGroups(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Lists the existing groups.
"""
raise NotImplementedError()
ListGroups.future = None
def GetGroup(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Gets a single group.
"""
raise NotImplementedError()
GetGroup.future = None
def CreateGroup(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Creates a new group.
"""
raise NotImplementedError()
CreateGroup.future = None
def UpdateGroup(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Updates an existing group.
You can change any group attributes except `name`.
"""
raise NotImplementedError()
UpdateGroup.future = None
def DeleteGroup(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Deletes an existing group.
"""
raise NotImplementedError()
DeleteGroup.future = None
def ListGroupMembers(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Lists the monitored resources that are members of a group.
"""
raise NotImplementedError()
ListGroupMembers.future = None
def beta_create_GroupService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('google.monitoring.v3.GroupService', 'CreateGroup'): CreateGroupRequest.FromString,
('google.monitoring.v3.GroupService', 'DeleteGroup'): DeleteGroupRequest.FromString,
('google.monitoring.v3.GroupService', 'GetGroup'): GetGroupRequest.FromString,
('google.monitoring.v3.GroupService', 'ListGroupMembers'): ListGroupMembersRequest.FromString,
('google.monitoring.v3.GroupService', 'ListGroups'): ListGroupsRequest.FromString,
('google.monitoring.v3.GroupService', 'UpdateGroup'): UpdateGroupRequest.FromString,
}
response_serializers = {
('google.monitoring.v3.GroupService', 'CreateGroup'): google_dot_monitoring_dot_v3_dot_group__pb2.Group.SerializeToString,
('google.monitoring.v3.GroupService', 'DeleteGroup'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
('google.monitoring.v3.GroupService', 'GetGroup'): google_dot_monitoring_dot_v3_dot_group__pb2.Group.SerializeToString,
('google.monitoring.v3.GroupService', 'ListGroupMembers'): ListGroupMembersResponse.SerializeToString,
('google.monitoring.v3.GroupService', 'ListGroups'): ListGroupsResponse.SerializeToString,
('google.monitoring.v3.GroupService', 'UpdateGroup'): google_dot_monitoring_dot_v3_dot_group__pb2.Group.SerializeToString,
}
method_implementations = {
('google.monitoring.v3.GroupService', 'CreateGroup'): face_utilities.unary_unary_inline(servicer.CreateGroup),
('google.monitoring.v3.GroupService', 'DeleteGroup'): face_utilities.unary_unary_inline(servicer.DeleteGroup),
('google.monitoring.v3.GroupService', 'GetGroup'): face_utilities.unary_unary_inline(servicer.GetGroup),
('google.monitoring.v3.GroupService', 'ListGroupMembers'): face_utilities.unary_unary_inline(servicer.ListGroupMembers),
('google.monitoring.v3.GroupService', 'ListGroups'): face_utilities.unary_unary_inline(servicer.ListGroups),
('google.monitoring.v3.GroupService', 'UpdateGroup'): face_utilities.unary_unary_inline(servicer.UpdateGroup),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_GroupService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('google.monitoring.v3.GroupService', 'CreateGroup'): CreateGroupRequest.SerializeToString,
('google.monitoring.v3.GroupService', 'DeleteGroup'): DeleteGroupRequest.SerializeToString,
('google.monitoring.v3.GroupService', 'GetGroup'): GetGroupRequest.SerializeToString,
('google.monitoring.v3.GroupService', 'ListGroupMembers'): ListGroupMembersRequest.SerializeToString,
('google.monitoring.v3.GroupService', 'ListGroups'): ListGroupsRequest.SerializeToString,
('google.monitoring.v3.GroupService', 'UpdateGroup'): UpdateGroupRequest.SerializeToString,
}
response_deserializers = {
('google.monitoring.v3.GroupService', 'CreateGroup'): google_dot_monitoring_dot_v3_dot_group__pb2.Group.FromString,
('google.monitoring.v3.GroupService', 'DeleteGroup'): google_dot_protobuf_dot_empty__pb2.Empty.FromString,
('google.monitoring.v3.GroupService', 'GetGroup'): google_dot_monitoring_dot_v3_dot_group__pb2.Group.FromString,
('google.monitoring.v3.GroupService', 'ListGroupMembers'): ListGroupMembersResponse.FromString,
('google.monitoring.v3.GroupService', 'ListGroups'): ListGroupsResponse.FromString,
('google.monitoring.v3.GroupService', 'UpdateGroup'): google_dot_monitoring_dot_v3_dot_group__pb2.Group.FromString,
}
cardinalities = {
'CreateGroup': cardinality.Cardinality.UNARY_UNARY,
'DeleteGroup': cardinality.Cardinality.UNARY_UNARY,
'GetGroup': cardinality.Cardinality.UNARY_UNARY,
'ListGroupMembers': cardinality.Cardinality.UNARY_UNARY,
'ListGroups': cardinality.Cardinality.UNARY_UNARY,
'UpdateGroup': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.monitoring.v3.GroupService', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| [
"abduld@wolfram.com"
] | abduld@wolfram.com |
a6b562ca3b529e764016b310b0c353d282227f87 | 6c86af7af7be5bbb2f4506e844c765653642db34 | /hackerrank_Set _symmetric_difference.py | e44878d98234ad261b646ba73cdca3ba1674cb50 | [] | no_license | Tarun-Sharma9168/Python-Programming | 83e64ac48cef3959adac24ea45b3985816499cbc | 73c8b38ba68deda74a22be49d0e9113448b7163f | refs/heads/master | 2020-08-11T03:53:39.571654 | 2020-04-29T20:49:10 | 2020-04-29T20:49:10 | 214,485,665 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py |
_, a = input(), set(input().split())
_, b = input(), set(input().split())
print(len(a.symmetric_difference(b)))
| [
"noreply@github.com"
] | Tarun-Sharma9168.noreply@github.com |
1a8d47a3a1f44665557418479cfd884da4e336be | 9366d7c14a6e1f2d10451fea8bbef82d3f3256c5 | /Warehouse2.py | e9dd02407ba12f6308659a34caa8707ce48406a9 | [] | no_license | hackeziah/PythonSample | f8746ef0e41bd45395911b710da34d0a922931fb | 43a584e01aedd8a473bc6f9515f24db064d288b5 | refs/heads/master | 2021-05-05T22:49:04.716676 | 2018-01-04T16:12:46 | 2018-01-04T16:12:46 | 116,280,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | from tkinter import *
win = Tk()
win.title("Ware House")
toolbar = Frame(win, bg = "cyan")
insertButt=Button(toolbar,text="Insert Image", command=doNothing)
insertButt.pack(side = LEFT,padx=3,pady=3)
printButt=Button(toolbar,text="Print", command=doNothing)
printButt.pack(side = LEFT,padx=3,pady=3)
toolbar.pack(side=TOP,fill=X)
status =Label(win,text="test testing...", bd=2, relief=SUNKEN,anchor=W)
status.pack(side = BOTTOM,fill=X)
win.mainloop()
| [
"hackevz@github.com"
] | hackevz@github.com |
131bc8166f96adf6c9f8444372b078308807317c | dfae3deb16a014bffeb0078a613422526491b568 | /engine/Point.py | afb8cd96b3f964be476b693b4c4db26fe2be53ae | [] | no_license | Vinniekun/microcosmos | d841c38511748e719df121fc86b68d1e3a506877 | fb7613c5d5d2400b99235e4ed2a208f6beeda454 | refs/heads/master | 2021-01-11T11:45:36.166151 | 2017-01-23T18:44:08 | 2017-01-23T18:44:08 | 79,834,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | class Point:
def __init__(self, x=0, y=0):
if type(x) is list:
self.x = x[0]
self.y = x[1]
else:
self.x = x
self.y = y
def xy(self):
return [self.x, self.y]
def ixy(self):
return [int(self.x), int(self.y)]
def translate(self, vec):
self.x += vec.x
self.y += vec.y
def rotate(self, alfa, root=None):
if root is None:
root = Point()
x, y = self.x - root.x, self.y - root.y
self.x = x * cos(alfa) - y * sin(alfa) + root.x
self.y = x * sin(alfa) + y * cos(alfa) + root.y
def scale(self, rate):
if rate.__class__ != Point:
rate = Point(rate, rate)
self.x *= rate.x
self.y *= rate.y
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return Point(self.x - other.x, self.y - other.y)
def __str__(self):
return '<Point (' + str(self.x) + ', ' + str(self.y) + ')>'
| [
"vdreifke@inf.ufsm.br"
] | vdreifke@inf.ufsm.br |
c133ae8e271089e53678d102066f2a1a3ba4b21f | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/17tli.py | 9627b9db58866eb838a6a4663f0d4b47a99da5b4 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 43 | py | ii = [('SadlMLP.py', 1), ('WilkJMC.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
2daba501ddc7a0a6d078eccd72c72700818c8318 | 491f876526c5eced1d9d71fd9137e10202fcbf3e | /codes/heatMap.py | 4fdb4009275ca85072197a35a575a2dfc2aea2c3 | [] | no_license | ailvtu/GraduationDesign | d21c6c20310124988d9b7c46d13982637360ea29 | a5d40d735f12af860d591c1b47cf61dfaaa8dc6b | refs/heads/master | 2020-06-14T22:18:19.520165 | 2017-01-09T02:37:45 | 2017-01-09T02:37:45 | 75,404,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,292 | py | from matplotlib import mpl
import matplotlib.pyplot as plt
import numpy as np
import os
from readFiles import readFile
from filters import LPF,HPF,meanF,standard
magneticDataPath = '/home/dash/Pictures/sensorData/4lines'
def reverse(FileName):
listRe = []
for dirs in os.listdir(magneticDataPath +FileName):
print dirs
CF01x,CF01y,CF01z,CF01xyz = readFile(magneticDataPath +FileName,dirs);
listRe.append(CF01xyz.reverse())
return listRe,
def readData(FileName):
Datalist=[]
MinLen = []
for dirs in os.listdir(magneticDataPath +FileName):
print dirs
CF01x,CF01y,CF01z,CF01xyz = readFile(magneticDataPath +FileName,dirs);
Datalist.append(CF01xyz);MinLen.append(len(CF01xyz))
Datalist.append(CF01xyz);MinLen.append(len(CF01xyz))
minL = min(MinLen)
return minL,Datalist
minLL,CFlist = readData('/FI')
print minLL
hpTest =[]
mCFlist=[]
hpfCFlist=[]
standList = []
for Mdata in CFlist:
mCFlist.append(meanF(Mdata,5))
for mCF0 in mCFlist:
hpfCFlist.append(HPF(mCF0[:minLL/5]))
#standList = standard(mCFlist)
for data in mCFlist:
standList.append(standard(data))
hpTest = np.array(hpfCFlist)
data=np.clip(hpTest,-3,3)
fig = plt.figure()
ax = fig.add_subplot(111)
im = ax.imshow(data)
plt.colorbar(im,ticks=[-2,0,2])
plt.show() | [
"2442844656@qq.com"
] | 2442844656@qq.com |
c3ec5a34f644a33dc74bd22698c196e0352b4494 | 042c8678d6a7c280f672b107fc7e1c686406a746 | /models/__init__.py | fec373ee2ecd3ac596b7e22a5cfaeb046669a6be | [] | no_license | AhmedOmi/AirBnB_clone | d09d3ad511b7dd499cea4e168cacba893678e8c9 | 92bc2d2b01bcd282811f2d2dcbc0f7a42bfadd4a | refs/heads/master | 2021-01-03T21:11:42.166687 | 2020-03-08T11:58:49 | 2020-03-08T11:58:49 | 241,307,868 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | """ import FileStorage from models.engine"""
from models.engine.file_storage import FileStorage
storage = FileStorage()
storage.reload()
| [
"ahmedomarmiledi@gmail.com"
] | ahmedomarmiledi@gmail.com |
f88595be8e081d9770afe97186b1d01d3c80d317 | b013d963a0cb1f6d9d9e09d3264c787796c8eb07 | /crawler/fabfile.py | 96dc5028a4951610febc7a464ccef462eee3bd49 | [] | no_license | mnorkin/miniature-nemesis | 61e67c95f0c9ecfb35ecf747b7a1194c8013c49e | 32dcaac66086a0019362eb2e93285485faed1a49 | refs/heads/master | 2021-03-24T12:08:51.448310 | 2013-08-27T22:30:04 | 2013-08-27T22:30:04 | 7,631,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | true | false | 4,515 | py | from fabric.api import *
from fabric.contrib import files
from fabric.operations import *
import datetime
env.project_name = 'crawler'
def environment():
env.user = 'agurkas'
env.hosts = ['185.5.55.178']
env.deploy_user = 'agurkas'
env.version = datetime.datetime.now().strftime("%Y-%m-%d_%H_%M_%S")
env.release = env.version
env.code_root = '/home/%s/crawler' % env.user
env.activate = 'source %s/bin/activate' % env.code_root
env.code_root_parent = '/home/%s' % env.user
env.whole_path = '%s/releases/%s/%s' % (
env.code_root, env.release, env.project_name)
env.code_path_symlinked = '%s/releases/current/%s' % (
env.code_root, env.project_name)
def clean():
"""
Cleaning precompiled files
"""
local('rm *.pyc')
def virtualenv(command):
"""
Virtualenv `sub shell`
"""
with cd(env.code_root):
run(env.activate + '; ' + command)
def reset_permissions():
"""
Resetting the permissions of the trivial paths
"""
sudo('chown %s -R %s' % (env.deploy_user, env.code_root_parent))
sudo('chgrp %s -R %s' % (env.deploy_user, env.code_root_parent))
def setup():
"""
Full setup of the system
"""
require('hosts', provided_by=[environment])
require('code_root')
run('mkdir -p %s' % (env.code_root))
virtualenv('mkdir releases; mkdir shared; mkdir packages')
reset_permissions()
deploy()
def deploy():
"""
Deployment of the app
"""
require('hosts', provided_by=[environment])
require('whole_path', provided_by=[environment])
require('code_root')
upload_tar_from_git(env.whole_path)
install_requirements()
symlink_current_release()
restart()
def update():
"""
Small, tiny update of the system
"""
require('hosts', provided_by=[environment])
require('whole_path', provided_by=[environment])
require('code_root')
upload_tar_from_git(env.whole_path)
install_requirements()
symlink_current_release()
restart()
def upload_tar_from_git(path):
"""
Making an archive and upload it to the host
"""
require('release', provided_by=[environment])
require('whole_path', provided_by=[environment])
local('git archive --format=tar slave | gzip > %s.tar.gz' % env.release)
run('mkdir -p %s' % path)
put('%s.tar.gz' % env.release, '/tmp', mode=0755)
run('mv /tmp/%s.tar.gz %s/packages/' % (env.release, env.code_root))
run('cd %s && tar zxf ../../../packages/%s.tar.gz' % (
env.whole_path, env.release))
local('rm %s.tar.gz' % env.release)
reset_permissions()
def install_requirements():
"""
Installation of the requirements of the application
"""
require('release', provided_by=[environment])
require('whole_path', provided_by=[environment])
sudo('cd %s; virtualenv .;source ./bin/activate;\
export PATH=/usr/bin:"$PATH";\
pip install -r %s/requirements.txt' % (env.code_root, env.whole_path))
# virtualenv('export PATH=/usr/bin:$PATH')
# virtualenv('pip install -r %s/requirements.txt' % env.whole_path)
reset_permissions()
def symlink_current_release():
"""
Linking the current release
"""
require('release', provided_by=[environment])
symlink_path = '%s/releases/current' % env.code_root
if not files.exists(symlink_path):
with cd(env.code_root):
run('ln -s %s/ releases/current' % env.release)
else:
with cd(env.code_root):
run('ln -nsf %s/ releases/current' % env.release)
with cd(env.code_root):
run('chown %s -R releases/current' % env.deploy_user)
run('chgrp %s -R releases/current' % env.deploy_user)
with cd(env.code_root + '/releases/current'):
run('chmod +x %s/deamon.py' % env.project_name)
# Set the appropriate permissions to launch the daemon
def restart():
"""
Restarting web server
"""
stop()
start()
def stop():
"""
Stopping the web crawler deamon
"""
deamon_root = "%s/releases/current/%s/deamon.py" % (
env.code_root, env.project_name)
if files.exists(deamon_root):
sudo(
'%s/releases/current/%s/deamon.py stop; sleep 2' %
(env.code_root, env.project_name))
def start():
"""
Starting the web crawler deamon
"""
project_path = '%s/releases/current/%s' % (env.code_root, env.project_name)
virtualenv('%s/deamon.py start; sleep 2' % project_path)
| [
"m.norkin@gmail.com"
] | m.norkin@gmail.com |
a5c10b70e7bf6efb6002838d418b47764db12cae | b1571f4ee376d789b8094777fd81c4fb47a89cf1 | /AtCoder/本番/ABC/until-ABC153/ABC151.py | 569ed4667f5f02b0b2122f2653cb7c84f6b8b831 | [] | no_license | hiroyaonoe/Competitive-programming | e49e43f8853602ba73e658cab423bd91ebbe9286 | 2949e10eec3a38498bedb57ea41a2491916bab1c | refs/heads/master | 2021-06-23T21:56:33.232931 | 2021-05-30T15:27:31 | 2021-05-30T15:27:31 | 225,863,783 | 2 | 0 | null | 2020-06-14T17:54:28 | 2019-12-04T12:37:24 | Python | UTF-8 | Python | false | false | 1,871 | py | # n=input()
# # print(chr(ord(n)+1))
# n,k,m=map(int,input().split())
# a=[]*(n-1)
# a=map(int,input().split())
# ans=m*n-sum(a)
# if ans<0:ans=0
# if ans>k:ans=-1
# print(ans)
# n,m=map(int,input().split())
# p=[[] for i in range(m)]
# s=[[] for i in range(m)]
# k=[True for i in range(n)]
# ac=0
# wa=[0 for i in range(n)]
#
# for i in range(m):
# p[i],s[i]=input().split()
#
# p=list(map(int,p))
#
# for i in range(m):
# if k[p[i]-1]:
# if s[i]=="AC":
# k[p[i]-1]=False
# ac+=1
# else:
# wa[p[i]-1]+=1
#
# for i in range(n):
# if k[i]:
# wa[i]=0
#
# print(ac,sum(wa))
# import sys,copy
# sys.setrecursionlimit(100000000)
# h,w = map(int,input().split())
# ss=[[[] for i in range(w)] for j in range(h)]
# s=[[[] for i in range(w)] for j in range(h)]
# for i in range(h):
# s[i]=list(input())
#
# dxdy=[[-1,0],[0,-1],[1,0],[0,1]]
#
# def search(x,y,qx,qy,cnt):
# global aans
# for dx,dy in dxdy:
# if (0<=x+dx<=w-1)&(0<=y+dy<=h-1):
# if (qx!=x+dx)|(qy!=y+dy):
# if s[y+dy][x+dx]==".":
# cnt+=1
# if cnt<=aans:
# if (x+dx == gx) & (y+dy == gy):
# aans = min(aans, cnt)
# cnt-=1
# else:
# search(x+dx,y+dy,x,y,cnt)
#
# ans=0
# for x in range(w):
# for y in range(h):
# for gx in range(w):
# for gy in range(h):
# aans=1000
# if (x!=gx)|(y!=gy):
# if (s[y][x]==".")&(s[gy][gx]=="."):
# search(x,y,0,0,0)
# ans=max(aans,ans)
# print(ans)
# n,m=map(int,input().split())
# a=map(int,input().split())
# a.sort()
# dis=[a[i+1]-a[i] for i in range(n-1)]
| [
"onoehiroya@gmail.com"
] | onoehiroya@gmail.com |
dc5b0b711378264e280bf256d70e0ea2a48d8e55 | c25e8f2fb5fcd7560f50cf77f49353816ceeffab | /Python for Everybody/Course_2_Python Data Structures/Assignment_7_2.py | 2c4996fa160ea310306688d0f272bfee276810a4 | [] | no_license | koteshrv/Coursera | c7d75928d095142d2f39013708741ea324dd6464 | 5f37ce922b1f76abcd8582f7dd1c7b674162dd64 | refs/heads/master | 2023-02-09T06:52:31.975940 | 2021-01-05T18:10:23 | 2021-01-05T18:10:23 | 294,464,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | # 7.2 Write a program that prompts for a file name, then opens that file and reads through the file, looking for lines of the form:
# X-DSPAM-Confidence: 0.8475
# Count these lines and extract the floating point values from each of the lines and compute the average of those values and produce an output as shown below. Do not use the sum() function or a variable named sum in your solution.
# You can download the sample data at http://www.py4e.com/code3/mbox-short.txt
# Use the file name mbox-short.txt as the file name
fname = input("Enter file name: ")
fh = open(fname)
s = 0
count = 0
for line in fh:
if not line.startswith("X-DSPAM-Confidence:") : continue
zero = line.find('0')
value = line[zero : ]
s = s + float(value)
count = count + 1
avg = float(s / count)
print("Average spam confidence:", avg)
| [
"kkotesh100@gmail.com"
] | kkotesh100@gmail.com |
6de650c167d612b2ef89c70f56608188692dcc84 | a1f4cef5c1a1f3cc89c1c6e9095c203f41e33cd1 | /Control_Statements/file1.py | a26ec5230d01e952015131404d05cf1004ade558 | [] | no_license | simrangrover5/KIITPYTHONBATCH2021 | a10eb217a691c6fda878d3b7840e9c8fa6c59731 | aacd1fb5ffdf46b1b57778d5194f1820b0fb1733 | refs/heads/master | 2023-07-08T21:46:00.576144 | 2021-08-14T17:40:21 | 2021-08-14T17:40:21 | 386,839,065 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | """This file is used to find the numbers divisible by other numbers"""
start = int(input("\n Enter any number : "))
end = int(input("\n Enter second number : "))
n1 = int(input("\n Enter number 1 : "))
n2 = int(input("\n Enter number 2 : "))
op = input("\n Enter operation and/or : ").strip().lower()
if op == "and":
while start<=end:
if start%n1==0 and start%n2==0:
print(start)
start += 1
elif op == "or":
while start<=end:
if start%n1==0 or start%n2==0:
print(start)
start += 1
else:
print("\n INCORRECT OPTION")
| [
"simrangrover5@gmail.com"
] | simrangrover5@gmail.com |
405c0786cb6c72d12ebcea3af73a60d1be141a66 | 02952ddf96e7960a3faef74485f4ffc12bcf2973 | /pySDC/implementations/controller_classes/allinclusive_multigrid_nonMPI.py | 843ff8d36fcd546348fe3ee655544a4165fa6118 | [
"BSD-2-Clause"
] | permissive | danielru/pySDC | 5decca37e1ecea643fe21dac0f978e3fdaa24ac6 | 558b2b4db3aeb97e6a87e41cd4958a8a948af37a | refs/heads/master | 2020-12-25T10:58:57.215298 | 2017-03-21T06:45:59 | 2017-03-21T06:45:59 | 31,062,846 | 0 | 0 | null | 2015-02-20T11:52:33 | 2015-02-20T11:52:33 | null | UTF-8 | Python | false | false | 15,706 | py | import itertools
import copy as cp
import numpy as np
from pySDC.core.Controller import controller
from pySDC.core import Step as stepclass
from pySDC.core.Errors import ControllerError, CommunicationError
class allinclusive_multigrid_nonMPI(controller):
"""
PFASST controller, running serialized version of PFASST in blocks (MG-style)
"""
def __init__(self, num_procs, controller_params, description):
"""
Initialization routine for PFASST controller
Args:
num_procs: number of parallel time steps (still serial, though), can be 1
controller_params: parameter set for the controller and the steps
description: all the parameters to set up the rest (levels, problems, transfer, ...)
"""
# call parent's initialization routine
super(allinclusive_multigrid_nonMPI, self).__init__(controller_params)
self.MS = []
# simply append step after step and generate the hierarchies
for p in range(num_procs):
self.MS.append(stepclass.step(description))
if self.params.dump_setup:
self.dump_setup(step=self.MS[0], controller_params=controller_params, description=description)
assert not (len(self.MS) > 1 and len(self.MS[0].levels) == 1), "ERROR: multigrid cannot do MSSDC"
if num_procs > 1 and len(self.MS[0].levels) > 1:
for S in self.MS:
for L in S.levels:
assert L.sweep.coll.right_is_node, "For PFASST to work, we assume uend^k = u_M^k"
def run(self, u0, t0, Tend):
"""
Main driver for running the serial version of SDC, MSSDC, MLSDC and PFASST (virtual parallelism)
Args:
u0: initial values
t0: starting time
Tend: ending time
Returns:
end values on the finest level
stats object containing statistics for each step, each level and each iteration
"""
# some initializations and reset of statistics
uend = None
num_procs = len(self.MS)
self.hooks.reset_stats()
# initial ordering of the steps: 0,1,...,Np-1
slots = [p for p in range(num_procs)]
# initialize time variables of each step
time = [t0 + sum(self.MS[j].dt for j in range(p)) for p in slots]
# determine which steps are still active (time < Tend)
active = [time[p] < Tend - 10 * np.finfo(float).eps for p in slots]
# compress slots according to active steps, i.e. remove all steps which have times above Tend
active_slots = list(itertools.compress(slots, active))
# initialize block of steps with u0
self.restart_block(active_slots, time, u0)
# call pre-run hook
for S in self.MS:
self.hooks.pre_run(step=S, level_number=0)
# main loop: as long as at least one step is still active (time < Tend), do something
while any(active):
MS_active = []
for p in active_slots:
MS_active.append(self.MS[p])
while not all([MS_active[p].status.done for p in range(len(MS_active))]):
MS_active = self.pfasst(MS_active)
for p in range(len(MS_active)):
self.MS[active_slots[p]] = MS_active[p]
# uend is uend of the last active step in the list
uend = self.MS[active_slots[-1]].levels[0].uend
for p in active_slots:
time[p] += num_procs * self.MS[p].dt
# determine new set of active steps and compress slots accordingly
active = [time[p] < Tend - 10 * np.finfo(float).eps for p in slots]
active_slots = list(itertools.compress(slots, active))
# restart active steps (reset all values and pass uend to u0)
self.restart_block(active_slots, time, uend)
# call post-run hook
for S in self.MS:
self.hooks.post_run(step=S, level_number=0)
return uend, self.hooks.return_stats()
def restart_block(self, active_slots, time, u0):
"""
Helper routine to reset/restart block of (active) steps
Args:
active_slots: list of active steps
time: list of new times
u0: initial value to distribute across the steps
"""
# loop over active slots (not directly, since we need the previous entry as well)
for j in range(len(active_slots)):
# get slot number
p = active_slots[j]
# store current slot number for diagnostics
self.MS[p].status.slot = p
# store link to previous step
self.MS[p].prev = self.MS[active_slots[j - 1]]
# resets step
self.MS[p].reset_step()
# determine whether I am the first and/or last in line
self.MS[p].status.first = active_slots.index(p) == 0
self.MS[p].status.last = active_slots.index(p) == len(active_slots) - 1
# intialize step with u0
self.MS[p].init_step(u0)
# reset some values
self.MS[p].status.done = False
self.MS[p].status.iter = 1
self.MS[p].status.stage = 'SPREAD'
for l in self.MS[p].levels:
l.tag = None
for p in active_slots:
for lvl in self.MS[p].levels:
lvl.status.time = time[p]
@staticmethod
def recv(target, source, tag=None):
"""
Receive function
Args:
target: level which will receive the values
source: level which initiated the send
tag: identifier to check if this message is really for me
"""
if tag is not None and source.tag != tag:
raise CommunicationError('source and target tag are not the same, got %s and %s' % (source.tag, tag))
# simply do a deepcopy of the values uend to become the new u0 at the target
target.u[0] = target.prob.dtype_u(source.uend)
# re-evaluate f on left interval boundary
target.f[0] = target.prob.eval_f(target.u[0], target.time)
@staticmethod
def send(source, tag):
"""
Send function
Args:
source: level which has the new values
tag: identifier for this message
"""
# sending here means computing uend ("one-sided communication")
source.sweep.compute_end_point()
source.tag = cp.deepcopy(tag)
def predictor(self, MS):
"""
Predictor function, extracted from the stepwise implementation (will be also used by matrix sweppers)
Args:
MS: all active steps
Returns:
all active steps
"""
# loop over all steps
for S in MS:
# restrict to coarsest level
for l in range(1, len(S.levels)):
S.transfer(source=S.levels[l - 1], target=S.levels[l])
# loop over all steps
for q in range(len(MS)):
# loop over last steps: [1,2,3,4], [2,3,4], [3,4], [4]
for p in range(q, len(MS)):
S = MS[p]
# do the sweep with new values
S.levels[-1].sweep.update_nodes()
# send updated values on coarsest level
self.logger.debug('Process %2i provides data on level %2i with tag %s -- PREDICT'
% (S.status.slot, len(S.levels) - 1, 0))
self.send(S.levels[-1], tag=(len(S.levels), 0, S.status.slot))
# loop over last steps: [2,3,4], [3,4], [4]
for p in range(q + 1, len(MS)):
S = MS[p]
# receive values sent during previous sweep
self.logger.debug('Process %2i receives from %2i on level %2i with tag %s -- PREDICT' %
(S.status.slot, S.prev.status.slot, len(S.levels) - 1, 0))
self.recv(S.levels[-1], S.prev.levels[-1], tag=(len(S.levels), 0, S.prev.status.slot))
# loop over all steps
for S in MS:
# interpolate back to finest level
for l in range(len(S.levels) - 1, 0, -1):
S.transfer(source=S.levels[l], target=S.levels[l - 1])
return MS
def pfasst(self, MS):
"""
Main function including the stages of SDC, MLSDC and PFASST (the "controller")
For the workflow of this controller, check out one of our PFASST talks
Args:
MS: all active steps
Returns:
all active steps
"""
# if all stages are the same, continue, otherwise abort
if all(S.status.stage for S in MS):
stage = MS[0].status.stage
else:
raise ControllerError('not all stages are equal')
self.logger.debug(stage)
if stage == 'SPREAD':
# (potentially) serial spreading phase
for S in MS:
# first stage: spread values
self.hooks.pre_step(step=S, level_number=0)
# call predictor from sweeper
S.levels[0].sweep.predict()
# update stage
if len(S.levels) > 1 and self.params.predict: # MLSDC or PFASST with predict
S.status.stage = 'PREDICT'
else:
self.hooks.pre_iteration(step=S, level_number=0)
S.status.stage = 'IT_FINE'
return MS
elif stage == 'PREDICT':
# call predictor (serial)
MS = self.predictor(MS)
for S in MS:
# update stage
self.hooks.pre_iteration(step=S, level_number=0)
S.status.stage = 'IT_FINE'
return MS
elif stage == 'IT_FINE':
# do fine sweep for all steps (virtually parallel)
for S in MS:
# standard sweep workflow: update nodes, compute residual, log progress
self.hooks.pre_sweep(step=S, level_number=0)
for k in range(S.levels[0].params.nsweeps):
S.levels[0].sweep.update_nodes()
S.levels[0].sweep.compute_residual()
self.hooks.post_sweep(step=S, level_number=0)
# update stage
S.status.stage = 'IT_CHECK'
return MS
elif stage == 'IT_CHECK':
# check whether to stop iterating (parallel)
for S in MS:
self.hooks.post_iteration(step=S, level_number=0)
S.status.done = self.check_convergence(S)
# if not everyone is ready yet, keep doing stuff
if not all(S.status.done for S in MS):
for S in MS:
S.status.done = False
# increment iteration count here (and only here)
S.status.iter += 1
self.hooks.pre_iteration(step=S, level_number=0)
# multi-level or single-level?
if len(S.levels) > 1: # MLSDC or PFASST
S.status.stage = 'IT_UP'
else: # SDC
S.status.stage = 'IT_FINE'
else:
# if everyone is ready, end
for S in MS:
S.levels[0].sweep.compute_end_point()
self.hooks.post_step(step=S, level_number=0)
S.status.stage = 'DONE'
return MS
elif stage == 'IT_UP':
# go up the hierarchy from finest to coarsest level (parallel)
for S in MS:
S.transfer(source=S.levels[0], target=S.levels[1])
# sweep and send on middle levels (not on finest, not on coarsest, though)
for l in range(1, len(S.levels) - 1):
self.hooks.pre_sweep(step=S, level_number=l)
for k in range(S.levels[l].params.nsweeps):
S.levels[l].sweep.update_nodes()
S.levels[l].sweep.compute_residual()
self.hooks.post_sweep(step=S, level_number=l)
# transfer further up the hierarchy
S.transfer(source=S.levels[l], target=S.levels[l + 1])
# update stage
S.status.stage = 'IT_COARSE'
return MS
elif stage == 'IT_COARSE':
# sweeps on coarsest level (serial/blocking)
for S in MS:
# receive from previous step (if not first)
if not S.status.first:
self.logger.debug('Process %2i receives from %2i on level %2i with tag %s' %
(S.status.slot, S.prev.status.slot, len(S.levels) - 1, S.status.iter))
self.recv(S.levels[-1], S.prev.levels[-1], tag=(len(S.levels), S.status.iter, S.prev.status.slot))
# do the sweep
self.hooks.pre_sweep(step=S, level_number=len(S.levels) - 1)
S.levels[-1].sweep.update_nodes()
S.levels[-1].sweep.compute_residual()
self.hooks.post_sweep(step=S, level_number=len(S.levels) - 1)
# send to succ step
if not S.status.last:
self.logger.debug('Process %2i provides data on level %2i with tag %s'
% (S.status.slot, len(S.levels) - 1, S.status.iter))
self.send(S.levels[-1], tag=(len(S.levels), S.status.iter, S.status.slot))
# update stage
if len(S.levels) > 1: # MLSDC or PFASST
S.status.stage = 'IT_DOWN'
else: # MSSDC
S.status.stage = 'IT_CHECK'
return MS
elif stage == 'IT_DOWN':
# prolong corrections down to finest level (parallel)
for S in MS:
# receive and sweep on middle levels (except for coarsest level)
for l in range(len(S.levels) - 1, 0, -1):
# prolong values
S.transfer(source=S.levels[l], target=S.levels[l - 1])
# send updated values forward
if self.params.fine_comm and not S.status.last:
self.logger.debug('Process %2i provides data on level %2i with tag %s'
% (S.status.slot, l - 1, S.status.iter))
self.send(S.levels[l - 1], tag=(l - 1, S.status.iter, S.status.slot))
# # receive values
if self.params.fine_comm and not S.status.first:
self.logger.debug('Process %2i receives from %2i on level %2i with tag %s' %
(S.status.slot, S.prev.status.slot, l - 1, S.status.iter))
self.recv(S.levels[l - 1], S.prev.levels[l - 1], tag=(l - 1, S.status.iter, S.prev.status.slot))
# on middle levels: do sweep as usual
if l - 1 > 0:
self.hooks.pre_sweep(step=S, level_number=l - 1)
for k in range(S.levels[l - 1].params.nsweeps):
S.levels[l - 1].sweep.update_nodes()
S.levels[l - 1].sweep.compute_residual()
self.hooks.post_sweep(step=S, level_number=l - 1)
# update stage
S.status.stage = 'IT_FINE'
return MS
else:
raise ControllerError('Unknown stage, got %s' % stage)
| [
"r.speck@fz-juelich.de"
] | r.speck@fz-juelich.de |
9da580aa7400dd35965456da6425fef026b647ea | 0cd6e891818d10db121f15006d43fc13d5e58b54 | /predict.py | 743a092267776e3916ceba21e79d529aa2040524 | [] | no_license | danny-ell77/FireSage | 82f8915b7a87c1e8b18ccfc38f415391a50e2e11 | cffc4cecbbe3802928fb1d94ab5e18846354501f | refs/heads/main | 2023-07-06T18:42:51.661764 | 2021-07-24T05:17:42 | 2021-07-24T05:17:42 | 384,995,566 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,951 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START aiplatform_predict_custom_trained_model_sample]
import os
from typing import Dict
import base64
from tensorflow import convert_to_tensor
from tensorflow import float32
from tensorflow.keras.preprocessing import image
from numpy import expand_dims
from PIL import Image
from google.cloud import aiplatform
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "bubbly-mantis-311315-d7ae76acdf25.json"
def predict_custom_trained_model_sample(
project: str,
endpoint_id: str,
instance_dict: Dict,
location: str = "us-east1",
api_endpoint: str = "us-east1-aiplatform.googleapis.com",
):
# The AI Platform services require regional API endpoints.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.PredictionServiceClient(
client_options=client_options)
# The format of each instance should conform to the deployed model's prediction input schema.
# instance = json_format.ParseDict(instance_dict, Value())
instances = [instance_dict]
parameters_dict = {}
parameters = json_format.ParseDict(parameters_dict, Value())
endpoint = client.endpoint_path(
project=project, location=location, endpoint=endpoint_id
)
response = client.predict(
endpoint=endpoint, instances=instances, parameters=parameters
)
print("response")
print(" deployed_model_id:", response.deployed_model_id)
# The predictions are a google.protobuf.Value representation of the model's predictions.
predictions = response.predictions
for prediction in predictions:
print(" prediction:", dict(prediction))
with open("Images/fire/71x71.jpg", 'rb') as img_bytes:
filo = img_bytes.read()
img = image.load_img("Images/fire/71x71.jpg")
image = image.img_to_array(img, )
print(image.shape)
# image = expand_dims(image, axis=0)
predict_custom_trained_model_sample(
project="846552341928",
endpoint_id="8106620066755248128",
location="us-east1",
instance_dict={"xception_input": [image.tolist(), ]}
)
# [END aiplatform_predict_custom_trained_model_sample]
#{"b4": base64.b64encode(filo).decode('utf-8')},
| [
"danielolahskybrow@gmail.com"
] | danielolahskybrow@gmail.com |
64cbcfa67802f09b6d5d8566cc34bfac757dd430 | 36af72aabf6f39ce465e94a5a2f4bf13025f8ce8 | /keras_retinanet/utils/anchors.py | 62cf44654aae68a88ab23721fc066a57f5cafcf7 | [
"Apache-2.0"
] | permissive | prodriguezsahagun/retinanet_customanchors | 144a72340dc8bedd7319f5b4dcc4612999a91d2b | be8d87a6fd05554e5d4fe4419aa01c1d3cba876d | refs/heads/master | 2020-06-27T09:29:58.977171 | 2019-08-10T19:44:35 | 2019-08-10T19:44:35 | 199,912,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,549 | py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import keras
from ..utils.compute_overlap import compute_overlap
class AnchorParameters:
""" The parameteres that define how anchors are generated.
Args
sizes : List of sizes to use. Each size corresponds to one feature level.
strides : List of strides to use. Each stride correspond to one feature level.
ratios : List of ratios to use per location in a feature map.
scales : List of scales to use per location in a feature map.
"""
def __init__(self, sizes, strides, ratios, scales):
self.sizes = sizes
self.strides = strides
self.ratios = ratios
self.scales = scales
def num_anchors(self):
return len(self.ratios) * len(self.scales)
"""
The default anchor parameters.
"""
AnchorParameters.default = AnchorParameters(
sizes = [32, 64, 128, 256, 512],
strides = [8, 16, 32, 64, 128],
ratios = np.array([0.08, 0.19, 0.21, 0.24, 0.25, 0.28, 0.3, 0.31, 0.32, 0.32, 0.34, 0.34, 0.37, 0.37, 0.37, 0.39, 0.4, 0.4, 0.41, 0.41, 0.43, 0.43, 0.44, 0.46, 0.49, 0.49, 0.51, 0.51, 0.52, 0.53, 0.53, 0.53, 0.53, 0.58, 0.58, 0.63, 0.64, 0.65, 0.65, 0.65, 0.66, 0.66, 0.68, 0.68, 0.72, 0.77, 0.79, 0.8, 0.83, 0.83, 0.84, 0.84, 0.84, 0.85, 0.85, 0.85, 0.93, 0.94, 0.95, 0.96, 0.99, 1.0, 1.01, 1.04, 1.04, 1.08, 1.14, 1.2, 1.22, 1.27, 1.31, 1.33, 1.35, 1.39, 1.39, 1.4, 1.53, 1.53, 1.67, 1.82, 1.85, 1.86, 1.93, 1.94, 2.0, 2.06, 2.11, 2.24, 2.85, 3.0, 3.08, 3.09, 3.51, 3.57, 4.22, 4.53, 4.7, 5.07, 5.32, 5.75, 7.0, 7.99, 11.08, 11.2, 23.53], keras.backend.floatx()),
scales = np.array([2 ** 0, 2 ** (1.0 / 4.0), 2 ** (2.0 / 4.0), 2 ** (3.0 / 4.0)], keras.backend.floatx()),
)
def anchor_targets_bbox(
anchors,
image_group,
annotations_group,
num_classes,
negative_overlap=0.4,
positive_overlap=0.5
):
""" Generate anchor targets for bbox detection.
Args
anchors: np.array of annotations of shape (N, 4) for (x1, y1, x2, y2).
image_group: List of BGR images.
annotations_group: List of annotations (np.array of shape (N, 5) for (x1, y1, x2, y2, label)).
num_classes: Number of classes to predict.
mask_shape: If the image is padded with zeros, mask_shape can be used to mark the relevant part of the image.
negative_overlap: IoU overlap for negative anchors (all anchors with overlap < negative_overlap are negative).
positive_overlap: IoU overlap or positive anchors (all anchors with overlap > positive_overlap are positive).
Returns
labels_batch: batch that contains labels & anchor states (np.array of shape (batch_size, N, num_classes + 1),
where N is the number of anchors for an image and the last column defines the anchor state (-1 for ignore, 0 for bg, 1 for fg).
regression_batch: batch that contains bounding-box regression targets for an image & anchor states (np.array of shape (batch_size, N, 4 + 1),
where N is the number of anchors for an image, the first 4 columns define regression targets for (x1, y1, x2, y2) and the
last column defines anchor states (-1 for ignore, 0 for bg, 1 for fg).
"""
assert(len(image_group) == len(annotations_group)), "The length of the images and annotations need to be equal."
assert(len(annotations_group) > 0), "No data received to compute anchor targets for."
for annotations in annotations_group:
assert('bboxes' in annotations), "Annotations should contain bboxes."
assert('labels' in annotations), "Annotations should contain labels."
batch_size = len(image_group)
regression_batch = np.zeros((batch_size, anchors.shape[0], 4 + 1), dtype=keras.backend.floatx())
labels_batch = np.zeros((batch_size, anchors.shape[0], num_classes + 1), dtype=keras.backend.floatx())
# compute labels and regression targets
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
if annotations['bboxes'].shape[0]:
# obtain indices of gt annotations with the greatest overlap
positive_indices, ignore_indices, argmax_overlaps_inds = compute_gt_annotations(anchors, annotations['bboxes'], negative_overlap, positive_overlap)
labels_batch[index, ignore_indices, -1] = -1
labels_batch[index, positive_indices, -1] = 1
regression_batch[index, ignore_indices, -1] = -1
regression_batch[index, positive_indices, -1] = 1
# compute target class labels
labels_batch[index, positive_indices, annotations['labels'][argmax_overlaps_inds[positive_indices]].astype(int)] = 1
regression_batch[index, :, :-1] = bbox_transform(anchors, annotations['bboxes'][argmax_overlaps_inds, :])
# ignore annotations outside of image
if image.shape:
anchors_centers = np.vstack([(anchors[:, 0] + anchors[:, 2]) / 2, (anchors[:, 1] + anchors[:, 3]) / 2]).T
indices = np.logical_or(anchors_centers[:, 0] >= image.shape[1], anchors_centers[:, 1] >= image.shape[0])
labels_batch[index, indices, -1] = -1
regression_batch[index, indices, -1] = -1
return regression_batch, labels_batch
def compute_gt_annotations(
anchors,
annotations,
negative_overlap=0.4,
positive_overlap=0.5
):
""" Obtain indices of gt annotations with the greatest overlap.
Args
anchors: np.array of annotations of shape (N, 4) for (x1, y1, x2, y2).
annotations: np.array of shape (N, 5) for (x1, y1, x2, y2, label).
negative_overlap: IoU overlap for negative anchors (all anchors with overlap < negative_overlap are negative).
positive_overlap: IoU overlap or positive anchors (all anchors with overlap > positive_overlap are positive).
Returns
positive_indices: indices of positive anchors
ignore_indices: indices of ignored anchors
argmax_overlaps_inds: ordered overlaps indices
"""
overlaps = compute_overlap(anchors.astype(np.float64), annotations.astype(np.float64))
argmax_overlaps_inds = np.argmax(overlaps, axis=1)
max_overlaps = overlaps[np.arange(overlaps.shape[0]), argmax_overlaps_inds]
# assign "dont care" labels
positive_indices = max_overlaps >= positive_overlap
ignore_indices = (max_overlaps > negative_overlap) & ~positive_indices
return positive_indices, ignore_indices, argmax_overlaps_inds
def layer_shapes(image_shape, model):
"""Compute layer shapes given input image shape and the model.
Args
image_shape: The shape of the image.
model: The model to use for computing how the image shape is transformed in the pyramid.
Returns
A dictionary mapping layer names to image shapes.
"""
shape = {
model.layers[0].name: (None,) + image_shape,
}
for layer in model.layers[1:]:
nodes = layer._inbound_nodes
for node in nodes:
inputs = [shape[lr.name] for lr in node.inbound_layers]
if not inputs:
continue
shape[layer.name] = layer.compute_output_shape(inputs[0] if len(inputs) == 1 else inputs)
return shape
def make_shapes_callback(model):
""" Make a function for getting the shape of the pyramid levels.
"""
def get_shapes(image_shape, pyramid_levels):
shape = layer_shapes(image_shape, model)
image_shapes = [shape["P{}".format(level)][1:3] for level in pyramid_levels]
return image_shapes
return get_shapes
def guess_shapes(image_shape, pyramid_levels):
"""Guess shapes based on pyramid levels.
Args
image_shape: The shape of the image.
pyramid_levels: A list of what pyramid levels are used.
Returns
A list of image shapes at each pyramid level.
"""
image_shape = np.array(image_shape[:2])
image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in pyramid_levels]
return image_shapes
def anchors_for_shape(
image_shape,
pyramid_levels=None,
anchor_params=None,
shapes_callback=None,
):
""" Generators anchors for a given shape.
Args
image_shape: The shape of the image.
pyramid_levels: List of ints representing which pyramids to use (defaults to [3, 4, 5, 6, 7]).
anchor_params: Struct containing anchor parameters. If None, default values are used.
shapes_callback: Function to call for getting the shape of the image at different pyramid levels.
Returns
np.array of shape (N, 4) containing the (x1, y1, x2, y2) coordinates for the anchors.
"""
if pyramid_levels is None:
pyramid_levels = [3, 4, 5, 6, 7]
if anchor_params is None:
anchor_params = AnchorParameters.default
if shapes_callback is None:
shapes_callback = guess_shapes
image_shapes = shapes_callback(image_shape, pyramid_levels)
# compute anchors over all pyramid levels
all_anchors = np.zeros((0, 4))
for idx, p in enumerate(pyramid_levels):
anchors = generate_anchors(
base_size=anchor_params.sizes[idx],
ratios= np.array([0.08, 0.19, 0.21, 0.24, 0.25, 0.28, 0.3, 0.31, 0.32, 0.32, 0.34, 0.34, 0.37, 0.37, 0.37, 0.39, 0.4, 0.4, 0.41, 0.41, 0.43, 0.43, 0.44, 0.46, 0.49, 0.49, 0.51, 0.51, 0.52, 0.53, 0.53, 0.53, 0.53, 0.58, 0.58, 0.63, 0.64, 0.65, 0.65, 0.65, 0.66, 0.66, 0.68, 0.68, 0.72, 0.77, 0.79, 0.8, 0.83, 0.83, 0.84, 0.84, 0.84, 0.85, 0.85, 0.85, 0.93, 0.94, 0.95, 0.96, 0.99, 1.0, 1.01, 1.04, 1.04, 1.08, 1.14, 1.2, 1.22, 1.27, 1.31, 1.33, 1.35, 1.39, 1.39, 1.4, 1.53, 1.53, 1.67, 1.82, 1.85, 1.86, 1.93, 1.94, 2.0, 2.06, 2.11, 2.24, 2.85, 3.0, 3.08, 3.09, 3.51, 3.57, 4.22, 4.53, 4.7, 5.07, 5.32, 5.75, 7.0, 7.99, 11.08, 11.2, 23.53], keras.backend.floatx()),
scales=anchor_params.scales
)
shifted_anchors = shift(image_shapes[idx], anchor_params.strides[idx], anchors)
all_anchors = np.append(all_anchors, shifted_anchors, axis=0)
return all_anchors
def shift(shape, stride, anchors):
""" Produce shifted anchors based on shape of the map and stride size.
Args
shape : Shape to shift the anchors over.
stride : Stride to shift the anchors with over the shape.
anchors: The anchors to apply at each location.
"""
# create a grid starting from half stride from the top left corner
shift_x = (np.arange(0, shape[1]) + 0.5) * stride
shift_y = (np.arange(0, shape[0]) + 0.5) * stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((
shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel()
)).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = anchors.shape[0]
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
return all_anchors
def generate_anchors(base_size=16, ratios=None, scales=None):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales w.r.t. a reference window.
"""
if ratios is None:
ratios = np.array([0.08, 0.19, 0.21, 0.24, 0.25, 0.28, 0.3, 0.31, 0.32, 0.32, 0.34, 0.34, 0.37, 0.37, 0.37, 0.39, 0.4, 0.4, 0.41, 0.41, 0.43, 0.43, 0.44, 0.46, 0.49, 0.49, 0.51, 0.51, 0.52, 0.53, 0.53, 0.53, 0.53, 0.58, 0.58, 0.63, 0.64, 0.65, 0.65, 0.65, 0.66, 0.66, 0.68, 0.68, 0.72, 0.77, 0.79, 0.8, 0.83, 0.83, 0.84, 0.84, 0.84, 0.85, 0.85, 0.85, 0.93, 0.94, 0.95, 0.96, 0.99, 1.0, 1.01, 1.04, 1.04, 1.08, 1.14, 1.2, 1.22, 1.27, 1.31, 1.33, 1.35, 1.39, 1.39, 1.4, 1.53, 1.53, 1.67, 1.82, 1.85, 1.86, 1.93, 1.94, 2.0, 2.06, 2.11, 2.24, 2.85, 3.0, 3.08, 3.09, 3.51, 3.57, 4.22, 4.53, 4.7, 5.07, 5.32, 5.75, 7.0, 7.99, 11.08, 11.2, 23.53], keras.backend.floatx())
if scales is None:
scales = AnchorParameters.default.scales
num_anchors = len(ratios) * len(scales)
# initialize output anchors
anchors = np.zeros((num_anchors, 4))
# scale base_size
anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T
# compute areas of anchors
areas = anchors[:, 2] * anchors[:, 3]
# correct for ratios
anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales)))
anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales))
# transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)
anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T
anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T
return anchors
def bbox_transform(anchors, gt_boxes, mean=None, std=None):
"""Compute bounding-box regression targets for an image."""
if mean is None:
mean = np.array([0, 0, 0, 0])
if std is None:
std = np.array([0.2, 0.2, 0.2, 0.2])
if isinstance(mean, (list, tuple)):
mean = np.array(mean)
elif not isinstance(mean, np.ndarray):
raise ValueError('Expected mean to be a np.ndarray, list or tuple. Received: {}'.format(type(mean)))
if isinstance(std, (list, tuple)):
std = np.array(std)
elif not isinstance(std, np.ndarray):
raise ValueError('Expected std to be a np.ndarray, list or tuple. Received: {}'.format(type(std)))
anchor_widths = anchors[:, 2] - anchors[:, 0]
anchor_heights = anchors[:, 3] - anchors[:, 1]
targets_dx1 = (gt_boxes[:, 0] - anchors[:, 0]) / anchor_widths
targets_dy1 = (gt_boxes[:, 1] - anchors[:, 1]) / anchor_heights
targets_dx2 = (gt_boxes[:, 2] - anchors[:, 2]) / anchor_widths
targets_dy2 = (gt_boxes[:, 3] - anchors[:, 3]) / anchor_heights
targets = np.stack((targets_dx1, targets_dy1, targets_dx2, targets_dy2))
targets = targets.T
targets = (targets - mean) / std
return targets
| [
"pabloalesanco6@gmail.com"
] | pabloalesanco6@gmail.com |
279b3a868bd3c4b9d9f4ae2327e04e35998bbb7a | 728c4d2c66238caff6370e3cedb73274bd679462 | /src/__init__.py | 3711b71cbcc61b152edb0920f71be5cb062bba79 | [] | no_license | Gautamaggrawal/Flask_microservice | 4945f446cd65eee15e697a5879ff44a98294c08c | cc389b3e70d464ee094c51ae3cf1e5b4f4ca4909 | refs/heads/master | 2021-06-28T07:55:47.750525 | 2019-09-04T17:55:15 | 2019-09-04T17:55:15 | 206,238,413 | 0 | 0 | null | 2021-03-20T01:38:44 | 2019-09-04T05:27:42 | Python | UTF-8 | Python | false | false | 1,010 | py | from flask import Flask
from .auth import auth as auth_blueprint
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from .models import db,User
def create_app():
flask_app = Flask(__name__,template_folder='templates')
flask_app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
flask_app.config['SECRET_KEY'] = '9OLWxND4o83j4K4iuopO'
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
login_manager.init_app(flask_app)
flask_app.app_context().push()
db.init_app(flask_app)
db.create_all()
@login_manager.user_loader
def load_user(user_id):
# since the user_id is just the primary key of our user table, use it in the query for the user
return User.query.get(int(user_id))
flask_app.register_blueprint(auth_blueprint)
# blueprint for non-auth parts of app
# from .app import main as main_blueprint
# flask_app.register_blueprint(main_blueprint)
return flask_app
| [
"gautamaggrawalsd@yahoo.in"
] | gautamaggrawalsd@yahoo.in |
4c6a4eb50735e1f46c827493a8a0b6f754b3e9a1 | ec2f2820ee32da7887ce0170839a26754d06a7f6 | /tasks/migrations/0006_auto_20170402_1152.py | c433a40aee0f476f43601201449d96f1be975383 | [] | no_license | maniche04/djIntranet | ab3b8e4be6e68bdebdca8795108f7387018d9c79 | 9fcddf99b549d956a110466c21f1150c56c9ca56 | refs/heads/master | 2021-01-18T20:51:55.609532 | 2017-04-02T15:43:09 | 2017-04-02T15:43:09 | 86,996,506 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-02 07:52
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tasks', '0005_auto_20170115_0937'),
]
operations = [
migrations.AlterModelOptions(
name='project',
options={'permissions': (('view_project', 'View Project'),)},
),
migrations.AlterModelOptions(
name='task',
options={'permissions': (('view_task', 'View Task'),)},
),
]
| [
"manish.ilam@gmail.com"
] | manish.ilam@gmail.com |
69f4293f1665d54b9434876b8d3686a0f325da57 | b9c05b40730f9e129222d22cdd845b14adb15b71 | /go_land.py | d31c21000a95512fd15db6511c6cfb392b99b957 | [
"MIT"
] | permissive | LijoDXL/OceanographyWithPython | cffcad5e7028ccc7f03088250e5208f3026f9fd0 | 294d150e50a938492311876d1cdfccac8be298e7 | refs/heads/master | 2020-08-23T12:18:20.315667 | 2020-08-15T20:44:28 | 2020-08-15T20:44:28 | 216,614,887 | 4 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,259 | py | # don't be overwhelmed by this function
# It just adds coastlines and a nicely
# formatted latitude,longitude labelling
import cartopy as cr
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import cartopy.feature as cfeature
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
def fig_beauty(ax,xlim=None,ylim=None,ocean=False):
'''
sets the aesthetics of the figure
parameter :
============
xlim : list
lower and upper lon values to set the plot extent
ylim : list
lower and upper lat values to set the plot extent
ax : matplotlib axes
axis of the plotted figure
ocean: boolean
whether to add ocean
'''
if xlim is not None:
ax.set_xlim(xlim[0],xlim[1])
if ylim is not None:
ax.set_ylim(ylim[0],ylim[1]);
ax.add_feature(cfeature.LAND)
if ocean:
ax.add_feature(cfeature.OCEAN)
ax.add_feature(cfeature.COASTLINE,)
ax.set_ylabel('')
ax.set_yticklabels('')
gl=ax.gridlines(color='black',linestyle='--',alpha=0.15,linewidth=2)
gl.xlabels_bottom=True
gl.ylabels_left = True
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
| [
"lijoabrahamjoseph91@gmail.com"
] | lijoabrahamjoseph91@gmail.com |
f5600a8c8c393063f0c45e66414654ad9520152b | 23a4214363636cab398aa9633218c4b2f6733545 | /content/pkg/deps/python/dassana/common/cache.py | 3522a89f43c46462ff648604c0393daa36aa5879 | [
"Apache-2.0"
] | permissive | Ctfbuster/dassana | c724c28ed9495d7a8cffef745ad613df7e573eb2 | ecfd7f53b8b0ea5152e071486bd0b8e549a708a2 | refs/heads/main | 2023-08-25T23:43:05.172523 | 2021-10-19T13:10:51 | 2021-10-19T13:10:51 | 410,211,650 | 0 | 0 | Apache-2.0 | 2021-09-25T07:55:53 | 2021-09-25T07:55:53 | null | UTF-8 | Python | false | false | 4,656 | py | from hashlib import sha256
from json import dumps
from aws_lambda_powertools.utilities.typing import LambdaContext
from cachetools import TTLCache, cached, LRUCache
class _HashedTuple(tuple):
"""A tuple that ensures that hash() will be called no more than once
per element, since cache decorators will hash the key multiple
times on a cache miss. See also _HashedSeq in the standard
library functools implementation.
"""
__hashvalue = None
def __hash__(self, hash=tuple.__hash__):
hashvalue = self.__hashvalue
if hashvalue is None:
self.__hashvalue = hashvalue = hash(self)
return hashvalue
def __add__(self, other, add=tuple.__add__):
return _HashedTuple(add(self, other))
def __radd__(self, other, add=tuple.__add__):
return _HashedTuple(add(other, self))
def __getstate__(self):
return {}
_kwmark = (_HashedTuple,)
def generate_hash(func, *args, **kwargs) -> hex:
"""
Dassana hash function used for caching AWS clients. The hash key is on service, region, and LambdaContext (if
available). The most common case is to cache SDK clients with hashing on the service and region, and/or
AWS credentials.
For same-account client fetching, context is popped from **kwargs s.t hashing is done purely on the service and
region without interfering with client creation.
For cross-account client fetching, DassanaEngine injects AWS credentials exchanged through STS as custom env
in LambdaContext which is concatenated as a HashTuple with service and region to generate the hash. The creds
are unpacked into **kwargs for hash generation (the sorting will ensure 1:1 hashing for identical objects
regardless of key order).
:param func: function
Not involved in the hashing scheme, it is just included in generate_hash as this function is wired
into the make_cached_call under configure_ttl_cache.
:param args: arguments
:param kwargs: keyword arguments
:return: md5 hash
"""
if issubclass(type(kwargs.get('context')), LambdaContext):
context = dict(filter(lambda x:
x[0] in ['aws_access_key_id', 'aws_secret_access_key', 'aws_session_token'],
kwargs.pop('context').client_context.env.items()))
kwargs = {
**kwargs,
**context
}
for k, v in dict(kwargs).items():
if issubclass(type(v), dict):
pop_v = kwargs.pop(k)
kwargs = {
**kwargs,
k: dumps(pop_v, sort_keys=True, default=str).encode('utf-8')
}
return sha256(hex(sum(sorted(kwargs.items()), _kwmark).__hash__()).encode()).hexdigest()
def configure_ttl_cache(maxsize=1024, ttl=60, hash_op=generate_hash):
"""
Decorator that initializes a TTL Cache which is utilized in any subsequent function calls with the hashing key
defined generate_hash. The following implementation is built as part of higher order functions to enable
flexibility: the biggest benefit is caching can be deployed on any function calls throughout Dassana Actions whereby
the intention / control flow can be defined on a purpose basis.
:param maxsize: maximum size of the TTL cache
:param ttl: time to live (in seconds) of items in cache
:param hash_op: function with hashing scheme applying to the same args that make_cached_call consumes
:return: a function with another function as its first parameter which calls on keyword arguments
f_1(f_2(name1=value1, ..., nameN=valueN), keywords: name1=value1, name2=value2, nameN=valueN) -> f_2
"""
cache = TTLCache(maxsize=maxsize, ttl=ttl)
@cached(cache, key=hash_op)
def make_cached_call(func, *args, **kwargs):
return func(**kwargs)
return make_cached_call
def configure_lru_cache(maxsize=1024, hash_op=generate_hash):
"""
Decorator that initializes a LRU Cache which is utilized in any subsequent function calls with the hashing key
defined generate_hash.
:param maxsize: maximum size of the LRU cache
:param hash_op: function with hashing scheme applying to the same args that make_cached_call consumes
:return: a function with another function as its first parameter which calls on keyword arguments
f_1(f_2(name1=value1, ..., nameN=valueN), keywords: name1=value1, name2=value2, nameN=valueN) -> f_2
"""
cache = LRUCache(maxsize=maxsize)
@cached(cache, key=hash_op)
def make_cached_call(func, *args, **kwargs):
return func(**kwargs)
return make_cached_call
| [
"noreply@github.com"
] | Ctfbuster.noreply@github.com |
bbd443101974a2682be5ddc0c8899a35e0286840 | 071416b05026dfc5e21e32c7f1846ab8475acf97 | /flowcontrols/maxof2.py | f29a9834bf67aff1ed422d09ef4178dbd8ee970e | [] | no_license | Sreerag07/bankproject | a7acf65b45c9e5c3ccace7ff3d755c33cf8a4fb0 | bb28e7c92cbfa1c1810d20eb4767a479eee5f015 | refs/heads/master | 2023-04-20T01:15:47.496059 | 2021-05-19T08:03:54 | 2021-05-19T08:03:54 | 368,788,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | num1=int(input("Enter first number"))
num2=int(input("Enter second number"))
if(num1<num2):
print(num2,"is greater than",num1)
else:
print(num1,"greater than",num2) | [
"sgpakkam@gmail.com"
] | sgpakkam@gmail.com |
38120b81104c04a2c47dbae220284a34e22896ab | d60b41462ab5dd83f3e95d740cacc2d1e5051232 | /examples/src/dbnd_examples/orchestration/dbnd_spark/word_count.py | 664d7555b5f8b4fba8877f105cc1cbb5ac69cbe0 | [
"Apache-2.0"
] | permissive | hyunjay/dbnd | 0581ca493fc2a3fa1cab74d0bb8fa620b1669f85 | ab5a8ebf5984e73d0c7129a6898fed98a239b90b | refs/heads/master | 2023-07-05T10:02:07.112720 | 2021-08-05T13:53:50 | 2021-08-05T13:53:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,967 | py | import pyspark.sql as spark
from databand import output, parameter, pipeline
from databand.tasks import PipelineTask, PySparkTask, SparkTask
from dbnd import log_dataframe, log_metric
from dbnd_examples.dbnd_spark import spark_folder, spark_script
from dbnd_spark.spark import PySparkInlineTask, spark_task
from dbnd_spark.spark_config import SparkConfig
from targets import Target
from targets.target_config import FileFormat
COUNT_WITH_HTML_MAIN_CLASS = "ai.databand.examples.WordCountWithHtml"
WORD_COUNT_MAIN_CLASS = "ai.databand.examples.WordCount"
def _mvn_target_file(*path):
return spark_folder("spark-jvm/target", *path)
class WordCountTask(SparkTask):
text = parameter.data
counters = output
main_class = WORD_COUNT_MAIN_CLASS
defaults = {SparkConfig.driver_memory: "2G"}
def application_args(self):
return [self.text, self.counters]
class WordCountPySparkTask(PySparkTask):
text = parameter.data
counters = output
python_script = spark_script("word_count.py")
def application_args(self):
return [self.text, self.counters]
class WordCountPipeline(PipelineTask):
text = parameter.data
with_spark = output
with_pyspark = output
def band(self):
self.with_spark = WordCountTask(text=self.text)
self.with_pyspark = WordCountPySparkTask(text=self.text)
@pipeline
def word_count_new_cluster():
wc = WordCountTask()
from dbnd_gcp.dataproc.dataproc import DataProcCtrl
create = DataProcCtrl(wc).create_engine()
wc.set_upstream(create)
@spark_task(result=output[spark.DataFrame])
def word_count_inline(text=parameter.csv[spark.DataFrame], counters=output.txt.data):
# type: (spark.DataFrame, Target) -> spark.DataFrame
from operator import add
from dbnd_spark.spark import get_spark_session
lines = text.rdd.map(lambda r: r[0])
counts = (
lines.flatMap(lambda x: x.split(" ")).map(lambda x: (x, 1)).reduceByKey(add)
)
counts.saveAsTextFile(str(counters))
output = counts.collect()
for (word, count) in output:
print("%s: %i" % (word, count))
counts_df = get_spark_session().createDataFrame(counts)
log_dataframe("counts_df", counts_df)
log_metric("test", 1)
return counts_df
class WordCountSparkInline(PySparkInlineTask):
text = parameter.csv[spark.DataFrame]
counters = output.txt.data
counters_auto_save = output[spark.DataFrame]
def run(self):
from operator import add
from dbnd_spark.spark import get_spark_session
lines = self.text.rdd.map(lambda r: r[0])
counts = (
lines.flatMap(lambda x: x.split(" ")).map(lambda x: (x, 1)).reduceByKey(add)
)
counts.saveAsTextFile(str(self.counters))
output = counts.collect()
for (word, count) in output:
print("%s: %i" % (word, count))
self.counters_auto_save = get_spark_session().createDataFrame(counts)
| [
"viktor.danyliuk@databand.ai"
] | viktor.danyliuk@databand.ai |
599e2fc5c6b112eed4b7744efae3c7f0da0e6138 | 45f6a4dfc837998565d4e4e4cde258a27fdbd424 | /learn_tu_you/wx_superboss/trunk/hall37-newfish/src/newfish/player/friend_player.py | 8703d88e47c7657705c6f268ca176b7b5e80748f | [] | no_license | isoundy000/learn_python | c220966c42187335c5342269cafc6811ac04bab3 | fa1591863985a418fd361eb6dac36d1301bc1231 | refs/heads/master | 2022-12-29T10:27:37.857107 | 2020-10-16T03:52:44 | 2020-10-16T03:52:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,501 | py | # -*- coding=utf-8 -*-
"""
Created by lichen on 2018/1/12.
"""
import random
from freetime.util import log as ftlog
from newfish.player.normal_player import FishNormalPlayer
class FishFriendPlayer(FishNormalPlayer):
def triggerCatchFishEvent(self, event):
"""覆盖父类的方法"""
self.achieveSystem and self.achieveSystem.triggerCatchFishEvent(event)
self.activitySystem and self.activitySystem.dealCatchFish(event)
coinAddition = 0
if 0 < event.gainChip < self.catchBonus: # 捕获金币加成
coinAddition = event.gainChip
self.catchBonus -= coinAddition
if ftlog.is_debug():
ftlog.debug("triggerCatchFishEvent", event.userId, self.catchBonus, event.gainChip, coinAddition)
for player in self.table.players:
if player and player.taskSystemUser:
player.taskSystemUser.dealCatchEvent(event, coinAddition)
def triggerComboEvent(self, event):
"""
触发连击事件
"""
for player in self.table.players:
if player and player.taskSystemUser:
player.taskSystemUser.dealComboEvent(event)
def triggerUseSkillEvent(self, event):
"""处理使用技能事件"""
self.activitySystem and self.activitySystem.useSkill(event.skillId)
for player in self.table.players:
if player and player.taskSystemUser:
player.taskSystemUser.dealUserSkillEvent(event) | [
"1737785826@qq.com"
] | 1737785826@qq.com |
b178449f746233506378a65ca7fb558a662b9136 | 2b091b847d90ed95d3b14e494596c94be3aa99f5 | /app.py | 5d37eebc1b312bdcf6c752cf2abce94706061d2a | [] | no_license | shun-uscpa/image6 | 37e5f14d546efe29a323d4dd2a14ec4fb45ce6de | 29668a2f0ce20d938348930bfbf183477436adc9 | refs/heads/main | 2023-06-14T01:08:18.038809 | 2021-07-11T12:00:47 | 2021-07-11T12:00:47 | 384,937,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | import plotly.express as px
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from skimage import data
import json
img = data.chelsea()
fig = px.imshow(img)
fig.update_layout(dragmode="drawclosedpath")
config = {
"modeBarButtonsToAdd": [
"drawline",
"drawopenpath",
"drawclosedpath",
"drawcircle",
"drawrect",
"eraseshape",
]
}
# Build App
app = dash.Dash(__name__)
server = app.server
app.layout = html.Div(
[
html.H4("Draw a shape, then modify it"),
dcc.Graph(id="fig-image", figure=fig, config=config),
dcc.Markdown("Characteristics of shapes"),
html.Pre(id="annotations-pre"),
]
)
@app.callback(
Output("annotations-pre", "children"),
Input("fig-image", "relayoutData"),
prevent_initial_call=True,
)
def on_new_annotation(relayout_data):
for key in relayout_data:
if "shapes" in key:
return json.dumps(key + ': ' + relayout_data[key], indent=2)
return dash.no_update
if __name__ == "__main__":
app.run_server(debug=True) | [
"noreply@github.com"
] | shun-uscpa.noreply@github.com |
bbb2ee76bc0fc41253293dc9058ef1aeacb5d875 | 0a6e09e89c7a84848c923858f64dcb0b40bbcf3e | /Introduction/Ex1.py | cbc9e6cac46a57e4bc188c37b886edf1f4d0ce9a | [] | no_license | etu32270/Python | 1a472130ae5f69a6f4ed28fc6b98e0f7473b8e60 | dd64bd4880b7019124b27efd9a355fde56b8ec47 | refs/heads/master | 2020-03-31T05:36:40.061845 | 2018-10-18T10:05:08 | 2018-10-18T10:05:08 | 151,952,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | def onetoten():
a = 0
for a in range(10):
print(a)
a += a
onetoten()
def square():
a = 0
for a in range(20):
print(a ** 2)
a += a
square()
def cube():
a = 0
for a in range(20):
print(a ** 3)
a += a
cube() | [
"etu32270@henallux.be"
] | etu32270@henallux.be |
f7b455d25d636ce14e2c9d514ff1bbcd1e1d609e | 67ee1ba812828adb742d0f7e278696693653b3bc | /myvenv/Lib/site-packages/django/db/models/fields/related.py | 59a6220b2f0835292b583d3975e4f2ffbdd64529 | [] | no_license | Nivial/my-first-blog | 909c92fbfee31e677eeeaeb8bdca789386173f76 | aee65c5c4b6006bc61741735808eb8b608596e61 | refs/heads/master | 2021-01-10T15:06:14.590661 | 2016-02-06T07:04:50 | 2016-02-06T07:04:50 | 51,192,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114,515 | py | from __future__ import unicode_literals
import warnings
from operator import attrgetter
from django import forms
from django.apps import apps
from django.core import checks, exceptions
from django.core.exceptions import FieldDoesNotExist
from django.db import connection, connections, router, transaction
from django.db.backends import utils
from django.db.models import Q, signals
from django.db.models.deletion import CASCADE, SET_DEFAULT, SET_NULL
from django.db.models.fields import (
BLANK_CHOICE_DASH, AutoField, Field, IntegerField, PositiveIntegerField,
PositiveSmallIntegerField,
)
from django.db.models.lookups import IsNull
from django.db.models.query import QuerySet
from django.db.models.query_utils import PathInfo
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text, smart_text
from django.utils.functional import cached_property, curry
from django.utils.translation import ugettext_lazy as _
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
if isinstance(relation, six.string_types):
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
else:
# it's actually a model class
app_label = relation._meta.app_label
model_name = relation._meta.object_name
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_registered_model raises a LookupError, it means
# that the related model isn't loaded yet, so we need to pend the relation
# until the class is prepared.
try:
model = cls._meta.apps.get_registered_model(app_label, model_name)
except LookupError:
key = (app_label, model_name)
value = (cls, field, operation)
cls._meta.apps._pending_lookups.setdefault(key, []).append(value)
else:
operation(field, model, cls)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in sender._meta.apps._pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
class RelatedField(Field):
# Field flags
one_to_many = False
one_to_one = False
many_to_many = False
many_to_one = False
@cached_property
def related_model(self):
# Can't cache this property until all the models are loaded.
apps.check_models_ready()
return self.rel.to
def check(self, **kwargs):
errors = super(RelatedField, self).check(**kwargs)
errors.extend(self._check_related_name_is_valid())
errors.extend(self._check_relation_model_exists())
errors.extend(self._check_referencing_to_swapped_model())
errors.extend(self._check_clashes())
return errors
def _check_related_name_is_valid(self):
import re
import keyword
related_name = self.rel.related_name
is_valid_id = (related_name and re.match('^[a-zA-Z_][a-zA-Z0-9_]*$', related_name)
and not keyword.iskeyword(related_name))
if related_name and not (is_valid_id or related_name.endswith('+')):
return [
checks.Error(
"The name '%s' is invalid related_name for field %s.%s" %
(self.rel.related_name, self.model._meta.object_name,
self.name),
hint="Related name must be a valid Python identifier or end with a '+'",
obj=self,
id='fields.E306',
)
]
return []
def _check_relation_model_exists(self):
rel_is_missing = self.rel.to not in apps.get_models()
rel_is_string = isinstance(self.rel.to, six.string_types)
model_name = self.rel.to if rel_is_string else self.rel.to._meta.object_name
if rel_is_missing and (rel_is_string or not self.rel.to._meta.swapped):
return [
checks.Error(
("Field defines a relation with model '%s', which "
"is either not installed, or is abstract.") % model_name,
hint=None,
obj=self,
id='fields.E300',
)
]
return []
def _check_referencing_to_swapped_model(self):
if (self.rel.to not in apps.get_models() and
not isinstance(self.rel.to, six.string_types) and
self.rel.to._meta.swapped):
model = "%s.%s" % (
self.rel.to._meta.app_label,
self.rel.to._meta.object_name
)
return [
checks.Error(
("Field defines a relation with the model '%s', "
"which has been swapped out.") % model,
hint="Update the relation to point at 'settings.%s'." % self.rel.to._meta.swappable,
obj=self,
id='fields.E301',
)
]
return []
def _check_clashes(self):
""" Check accessor and reverse query name clashes. """
from django.db.models.base import ModelBase
errors = []
opts = self.model._meta
# `f.rel.to` may be a string instead of a model. Skip if model name is
# not resolved.
if not isinstance(self.rel.to, ModelBase):
return []
# If the field doesn't install backward relation on the target model (so
# `is_hidden` returns True), then there are no clashes to check and we
# can skip these fields.
if self.rel.is_hidden():
return []
try:
self.rel
except AttributeError:
return []
# Consider that we are checking field `Model.foreign` and the models
# are:
#
# class Target(models.Model):
# model = models.IntegerField()
# model_set = models.IntegerField()
#
# class Model(models.Model):
# foreign = models.ForeignKey(Target)
# m2m = models.ManyToManyField(Target)
rel_opts = self.rel.to._meta
# rel_opts.object_name == "Target"
rel_name = self.rel.get_accessor_name() # i. e. "model_set"
rel_query_name = self.related_query_name() # i. e. "model"
field_name = "%s.%s" % (opts.object_name,
self.name) # i. e. "Model.field"
# Check clashes between accessor or reverse query name of `field`
# and any other field name -- i.e. accessor for Model.foreign is
# model_set and it clashes with Target.model_set.
potential_clashes = rel_opts.fields + rel_opts.many_to_many
for clash_field in potential_clashes:
clash_name = "%s.%s" % (rel_opts.object_name,
clash_field.name) # i. e. "Target.model_set"
if clash_field.name == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E302',
)
)
if clash_field.name == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E303',
)
)
# Check clashes between accessors/reverse query names of `field` and
# any other field accessor -- i. e. Model.foreign accessor clashes with
# Model.m2m accessor.
potential_clashes = (r for r in rel_opts.related_objects if r.field is not self)
for clash_field in potential_clashes:
clash_name = "%s.%s" % ( # i. e. "Model.m2m"
clash_field.related_model._meta.object_name,
clash_field.field.name)
if clash_field.get_accessor_name() == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with reverse accessor for '%s'." % (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E304',
)
)
if clash_field.get_accessor_name() == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with reverse query name for '%s'."
% (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E305',
)
)
return errors
def db_type(self, connection):
'''By default related field will not have a column
as it relates columns to another table'''
return None
def contribute_to_class(self, cls, name, virtual_only=False):
sup = super(RelatedField, self)
# Store the opts for related_query_name()
self.opts = cls._meta
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name, virtual_only=virtual_only)
if not cls._meta.abstract and self.rel.related_name:
related_name = force_text(self.rel.related_name) % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower()
}
self.rel.related_name = related_name
other = self.rel.to
if isinstance(other, six.string_types) or other._meta.pk is None:
def resolve_related_class(field, model, cls):
field.rel.to = model
field.do_related_class(model, cls)
add_lazy_relation(cls, self, other, resolve_related_class)
else:
self.do_related_class(other, cls)
@property
def swappable_setting(self):
"""
Gets the setting that this is powered from for swapping, or None
if it's not swapped in / marked with swappable=False.
"""
if self.swappable:
# Work out string form of "to"
if isinstance(self.rel.to, six.string_types):
to_string = self.rel.to
else:
to_string = "%s.%s" % (
self.rel.to._meta.app_label,
self.rel.to._meta.object_name,
)
# See if anything swapped/swappable matches
for model in apps.get_models(include_swapped=True):
if model._meta.swapped:
if model._meta.swapped == to_string:
return model._meta.swappable
if ("%s.%s" % (model._meta.app_label, model._meta.object_name)) == to_string and model._meta.swappable:
return model._meta.swappable
return None
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.model_name + '_' + self.rel.to._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.rel.to._meta.verbose_name
self.rel.set_field_name()
@property
def related(self):
warnings.warn(
"Usage of field.related has been deprecated. Use field.rel instead.",
RemovedInDjango20Warning, 2)
return self.rel
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
if not cls._meta.abstract:
self.contribute_to_related_class(other, self.rel)
def get_limit_choices_to(self):
"""Returns 'limit_choices_to' for this model field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.rel.limit_choices_to):
return self.rel.limit_choices_to()
return self.rel.limit_choices_to
def formfield(self, **kwargs):
"""Passes ``limit_choices_to`` to field being constructed.
Only passes it if there is a type that supports related fields.
This is a similar strategy used to pass the ``queryset`` to the field
being constructed.
"""
defaults = {}
if hasattr(self.rel, 'get_related_field'):
# If this is a callable, do not invoke it here. Just pass
# it in the defaults for when the form class will later be
# instantiated.
limit_choices_to = self.rel.limit_choices_to
defaults.update({
'limit_choices_to': limit_choices_to,
})
defaults.update(kwargs)
return super(RelatedField, self).formfield(**defaults)
def related_query_name(self):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_query_name or self.rel.related_name or self.opts.model_name
class SingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception isn't created at initialization time for the sake of
# consistency with `ReverseSingleRelatedObjectDescriptor`.
return type(
str('RelatedObjectDoesNotExist'),
(self.related.related_model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.related.related_model._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.related.related_model._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = attrgetter(self.related.field.attname)
instance_attr = lambda obj: obj._get_pk_val()
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.related.field.name: instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
rel_obj_cache_name = self.related.field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
related_pk = instance._get_pk_val()
if related_pk is None:
rel_obj = None
else:
params = {}
for lh_field, rh_field in self.related.field.related_fields:
params['%s__%s' % (self.related.field.name, rh_field.name)] = getattr(instance, rh_field.attname)
try:
rel_obj = self.get_queryset(instance=instance).get(**params)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
setattr(rel_obj, self.related.field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()
)
)
else:
return rel_obj
def __set__(self, instance, value):
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null is False:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' % (
instance._meta.object_name,
self.related.get_accessor_name(),
)
)
elif value is not None and not isinstance(value, self.related.related_model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.related_model._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
if not self.related.field.allow_unsaved_instance_assignment and None in related_pk:
raise ValueError(
'Cannot assign "%r": "%s" instance isn\'t saved in the database.' %
(value, instance._meta.object_name)
)
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
self.cache_name = self.field.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception can't be created at initialization time since the
# related model might not be resolved yet; `rel.to` might still be
# a string model reference.
return type(
str('RelatedObjectDoesNotExist'),
(self.field.rel.to.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.field.rel.to._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.field.rel.to._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
related_field = self.field.foreign_related_fields[0]
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.rel.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.rel.multiple:
rel_obj_cache_name = self.field.rel.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
val = self.field.get_local_related_value(instance)
if None in val:
rel_obj = None
else:
params = {
rh_field.attname: getattr(instance, lh_field.attname)
for lh_field, rh_field in self.field.related_fields}
qs = self.get_queryset(instance=instance)
extra_filter = self.field.get_extra_descriptor_filter(instance)
if isinstance(extra_filter, dict):
params.update(extra_filter)
qs = qs.filter(**params)
else:
qs = qs.filter(extra_filter, **params)
# Assuming the database enforces foreign keys, this won't fail.
rel_obj = qs.get()
if not self.field.rel.multiple:
setattr(rel_obj, self.field.rel.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj
def __set__(self, instance, value):
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null is False:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name)
)
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.field.name,
self.field.rel.to._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.cache_name, None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
setattr(related, self.field.rel.get_cache_name(), None)
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, None)
# Set the values of the related field.
else:
for lh_field, rh_field in self.field.related_fields:
pk = value._get_pk_val()
if not self.field.allow_unsaved_instance_assignment and pk is None:
raise ValueError(
'Cannot assign "%r": "%s" instance isn\'t saved in the database.' %
(value, self.field.rel.to._meta.object_name)
)
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
if value is not None and not self.field.rel.multiple:
setattr(value, self.field.rel.get_cache_name(), instance)
def create_foreign_related_manager(superclass, rel_field, rel_model):
class RelatedManager(superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__()
self.instance = instance
self.core_filters = {rel_field.name: instance}
self.model = rel_model
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_foreign_related_manager(manager.__class__, rel_field, rel_model)
return manager_class(self.instance)
do_not_call_in_templates = True
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[rel_field.related_query_name()]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
qs = super(RelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
qs = qs.filter(**self.core_filters)
for field in rel_field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return qs.none()
qs._known_related_objects = {rel_field: {self.instance.pk: self.instance}}
return qs
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(RelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = rel_field.get_local_related_value
instance_attr = rel_field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % rel_field.name: instances}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_field.name, instance)
cache_name = rel_field.related_query_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name
def add(self, *objs):
objs = list(objs)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" %
(self.model._meta.object_name, obj))
setattr(obj, rel_field.name, self.instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
val = rel_field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
# Is obj actually part of this descriptor set?
if rel_field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise rel_field.rel.to.DoesNotExist("%r is not related to %r." % (obj, self.instance))
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.update()` is intrinsically atomic.
queryset.update(**{rel_field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, rel_field.name, None)
obj.save(update_fields=[rel_field.name])
_clear.alters_data = True
return RelatedManager
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.related_manager_cls(instance)
def __set__(self, instance, value):
# Force evaluation of `value` in case it's a queryset whose
# value could be affected by `manager.clear()`. Refs #19816.
value = tuple(value)
manager = self.__get__(instance)
db = router.db_for_write(manager.model, instance=manager.instance)
with transaction.atomic(using=db, savepoint=False):
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's default
# manager.
return create_foreign_related_manager(
self.related.related_model._default_manager.__class__,
self.related.field,
self.related.related_model,
)
def create_many_related_manager(superclass, rel):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
class ManyRelatedManager(superclass):
def __init__(self, model=None, query_field_name=None, instance=None, symmetrical=None,
source_field_name=None, target_field_name=None, reverse=False,
through=None, prefetch_cache_name=None):
super(ManyRelatedManager, self).__init__()
self.model = model
self.query_field_name = query_field_name
source_field = through._meta.get_field(source_field_name)
source_related_fields = source_field.related_fields
self.core_filters = {}
for lh_field, rh_field in source_related_fields:
self.core_filters['%s__%s' % (query_field_name, rh_field.name)] = getattr(instance, rh_field.attname)
self.instance = instance
self.symmetrical = symmetrical
self.source_field = source_field
self.target_field = through._meta.get_field(target_field_name)
self.source_field_name = source_field_name
self.target_field_name = target_field_name
self.reverse = reverse
self.through = through
self.prefetch_cache_name = prefetch_cache_name
self.related_val = source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, source_field_name))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_many_related_manager(manager.__class__, rel)
return manager_class(
model=self.model,
query_field_name=self.query_field_name,
instance=self.instance,
symmetrical=self.symmetrical,
source_field_name=self.source_field_name,
target_field_name=self.target_field_name,
reverse=self.reverse,
through=self.through,
prefetch_cache_name=self.prefetch_cache_name,
)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q(**{self.source_field_name: self.related_val})
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (not isinstance(removed_vals, QuerySet) or
removed_vals._has_filters())
if removed_vals_filters:
filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})
if self.symmetrical:
symmetrical_filters = Q(**{self.target_field_name: self.related_val})
if removed_vals_filters:
symmetrical_filters &= Q(
**{'%s__in' % self.source_field_name: removed_vals})
filters |= symmetrical_filters
return filters
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
qs = super(ManyRelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
return qs._next_is_sticky().filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(ManyRelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {'%s__in' % self.query_field_name: instances}
queryset = queryset._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = self.through._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select={
'_prefetch_related_val_%s' % f.attname:
'%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields})
return (
queryset,
lambda result: tuple(
getattr(result, '_prefetch_related_val_%s' % f.attname)
for f in fk.local_related_fields
),
lambda inst: tuple(getattr(inst, f.attname) for f in fk.foreign_related_fields),
False,
self.prefetch_cache_name,
)
def add(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use add() on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use remove() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
filters = self._build_remove_filters(super(ManyRelatedManager, self).get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use create() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
update_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db)
)
fk_val = self.through._meta.get_field(
target_field_name).get_foreign_related_value(obj)[0]
if fk_val is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name)
)
new_ids.add(fk_val)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r" %
(self.model._meta.object_name, obj)
)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = (self.through._default_manager.using(db)
.values_list(target_field_name, flat=True)
.filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: new_ids,
}))
new_ids = new_ids - set(vals)
with transaction.atomic(using=db, savepoint=False):
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
# Send a signal to the other end if need be.
signals.m2m_changed.send(sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
target_model_qs = super(ManyRelatedManager, self).get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{
'%s__in' % self.target_field.related_field.attname: old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related
# model's default manager.
return create_many_related_manager(
self.related.related_model._default_manager.__class__,
self.related.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
rel_model = self.related.related_model
manager = self.related_manager_cls(
model=rel_model,
query_field_name=self.related.field.name,
prefetch_cache_name=self.related.field.related_query_name(),
instance=instance,
symmetrical=False,
source_field_name=self.related.field.m2m_reverse_field_name(),
target_field_name=self.related.field.m2m_field_name(),
reverse=True,
through=self.related.field.rel.through,
)
return manager
def __set__(self, instance, value):
if not self.related.field.rel.through._meta.auto_created:
opts = self.related.field.rel.through._meta
raise AttributeError(
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name)
)
# Force evaluation of `value` in case it's a queryset whose
# value could be affected by `manager.clear()`. Refs #19816.
value = tuple(value)
manager = self.__get__(instance)
db = router.db_for_write(manager.through, instance=manager.instance)
with transaction.atomic(using=db, savepoint=False):
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.field.rel.through
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's
# default manager.
return create_many_related_manager(
self.field.rel.to._default_manager.__class__,
self.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
manager = self.related_manager_cls(
model=self.field.rel.to,
query_field_name=self.field.related_query_name(),
prefetch_cache_name=self.field.name,
instance=instance,
symmetrical=self.field.rel.symmetrical,
source_field_name=self.field.m2m_field_name(),
target_field_name=self.field.m2m_reverse_field_name(),
reverse=False,
through=self.field.rel.through,
)
return manager
def __set__(self, instance, value):
if not self.field.rel.through._meta.auto_created:
opts = self.field.rel.through._meta
raise AttributeError(
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name)
)
# Force evaluation of `value` in case it's a queryset whose
# value could be affected by `manager.clear()`. Refs #19816.
value = tuple(value)
manager = self.__get__(instance)
db = router.db_for_write(manager.through, instance=manager.instance)
with transaction.atomic(using=db, savepoint=False):
manager.clear()
manager.add(*value)
class ForeignObjectRel(object):
# Field flags
auto_created = True
concrete = False
editable = False
is_relation = True
def __init__(self, field, to, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
self.field = field
self.to = to
self.related_name = related_name
self.related_query_name = related_query_name
self.limit_choices_to = {} if limit_choices_to is None else limit_choices_to
self.multiple = True
self.parent_link = parent_link
self.on_delete = on_delete
self.symmetrical = False
# Some of the following cached_properties can't be initialized in
# __init__ as the field doesn't have its model yet. Calling these methods
# before field.contribute_to_class() has been called will result in
# AttributeError
@cached_property
def model(self):
return self.to
@cached_property
def hidden(self):
return self.is_hidden()
@cached_property
def name(self):
return self.field.related_query_name()
@cached_property
def related_model(self):
if not self.field.model:
raise AttributeError(
"This property can't be accessed before self.field.contribute_to_class has been called.")
return self.field.model
@cached_property
def many_to_many(self):
return self.field.many_to_many
@cached_property
def many_to_one(self):
return self.field.one_to_many
@cached_property
def one_to_many(self):
return self.field.many_to_one
@cached_property
def one_to_one(self):
return self.field.one_to_one
def __repr__(self):
return '<%s: %s.%s>' % (
type(self).__name__,
self.related_model._meta.app_label,
self.related_model._meta.model_name,
)
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH,
limit_to_currently_related=False):
"""
Returns choices with a default blank choices included, for use as
SelectField choices for this field.
Analog of django.db.models.fields.Field.get_choices(), provided
initially for utilization by RelatedFieldListFilter.
"""
first_choice = blank_choice if include_blank else []
queryset = self.related_model._default_manager.all()
if limit_to_currently_related:
queryset = queryset.complex_filter(
{'%s__isnull' % self.related_model._meta.model_name: False}
)
lst = [(x._get_pk_val(), smart_text(x)) for x in queryset]
return first_choice + lst
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
# Defer to the actual field definition for db prep
return self.field.get_db_prep_lookup(lookup_type, value, connection=connection, prepared=prepared)
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name is not None and self.related_name[-1] == '+'
def get_joining_columns(self):
return self.field.get_reverse_joining_columns()
def get_extra_restriction(self, where_class, alias, related_alias):
return self.field.get_extra_restriction(where_class, related_alias, alias)
def set_field_name(self):
"""
Sets the related field's name, this is not available until later stages
of app loading, so set_field_name is called from
set_attributes_from_rel()
"""
# By default foreign object doesn't relate to any remote field (for
# example custom multicolumn joins currently have no remote field).
self.field_name = None
def get_accessor_name(self, model=None):
# This method encapsulates the logic that decides what name to give an
# accessor descriptor that retrieves related many-to-one or
# many-to-many objects. It uses the lower-cased object_name + "_set",
# but this can be overridden with the "related_name" option.
# Due to backwards compatibility ModelForms need to be able to provide
# an alternate model. See BaseInlineFormSet.get_default_prefix().
opts = model._meta if model else self.related_model._meta
model = model or self.related_model
if self.multiple:
# If this is a symmetrical m2m relation on self, there is no reverse accessor.
if self.symmetrical and model == self.to:
return None
if self.related_name:
return self.related_name
if opts.default_related_name:
return opts.default_related_name % {
'model_name': opts.model_name.lower(),
'app_label': opts.app_label.lower(),
}
return opts.model_name + ('_set' if self.multiple else '')
def get_cache_name(self):
return "_%s_cache" % self.get_accessor_name()
def get_path_info(self):
return self.field.get_reverse_path_info()
class ManyToOneRel(ForeignObjectRel):
def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
super(ManyToOneRel, self).__init__(
field, to, related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name)
self.field_name = field_name
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
field = self.to._meta.get_field(self.field_name)
if not field.concrete:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return field
def set_field_name(self):
self.field_name = self.field_name or self.to._meta.pk.name
class OneToOneRel(ManyToOneRel):
def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
super(OneToOneRel, self).__init__(field, to, field_name,
related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name)
self.multiple = False
class ManyToManyRel(ForeignObjectRel):
def __init__(self, field, to, related_name=None, limit_choices_to=None,
symmetrical=True, through=None, through_fields=None,
db_constraint=True, related_query_name=None):
if through and not db_constraint:
raise ValueError("Can't supply a through model and db_constraint=False")
if through_fields and not through:
raise ValueError("Cannot specify through_fields without a through model")
super(ManyToManyRel, self).__init__(
field, to, related_name=related_name,
limit_choices_to=limit_choices_to, related_query_name=related_query_name)
self.symmetrical = symmetrical
self.multiple = True
self.through = through
self.through_fields = through_fields
self.db_constraint = db_constraint
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name is not None and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the field in the 'to' object to which this relationship is tied.
Provided for symmetry with ManyToOneRel.
"""
opts = self.through._meta
if self.through_fields:
field = opts.get_field(self.through_fields[0])
else:
for field in opts.fields:
rel = getattr(field, 'rel', None)
if rel and rel.to == self.to:
break
return field.foreign_related_fields[0]
class ForeignObject(RelatedField):
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
allow_unsaved_instance_assignment = False
requires_unique_target = True
related_accessor_class = ForeignRelatedObjectsDescriptor
def __init__(self, to, from_fields, to_fields, swappable=True, **kwargs):
self.from_fields = from_fields
self.to_fields = to_fields
self.swappable = swappable
if 'rel' not in kwargs:
kwargs['rel'] = ForeignObjectRel(
self, to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
super(ForeignObject, self).__init__(**kwargs)
def check(self, **kwargs):
errors = super(ForeignObject, self).check(**kwargs)
errors.extend(self._check_unique_target())
return errors
def _check_unique_target(self):
rel_is_string = isinstance(self.rel.to, six.string_types)
if rel_is_string or not self.requires_unique_target:
return []
# Skip if the
try:
self.foreign_related_fields
except FieldDoesNotExist:
return []
try:
self.rel
except AttributeError:
return []
has_unique_field = any(rel_field.unique
for rel_field in self.foreign_related_fields)
if not has_unique_field and len(self.foreign_related_fields) > 1:
field_combination = ', '.join("'%s'" % rel_field.name
for rel_field in self.foreign_related_fields)
model_name = self.rel.to.__name__
return [
checks.Error(
"None of the fields %s on model '%s' have a unique=True constraint."
% (field_combination, model_name),
hint=None,
obj=self,
id='fields.E310',
)
]
elif not has_unique_field:
field_name = self.foreign_related_fields[0].name
model_name = self.rel.to.__name__
return [
checks.Error(
("'%s.%s' must set unique=True "
"because it is referenced by a foreign key.") % (model_name, field_name),
hint=None,
obj=self,
id='fields.E311',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ForeignObject, self).deconstruct()
kwargs['from_fields'] = self.from_fields
kwargs['to_fields'] = self.to_fields
if self.rel.related_name is not None:
kwargs['related_name'] = self.rel.related_name
if self.rel.related_query_name is not None:
kwargs['related_query_name'] = self.rel.related_query_name
if self.rel.on_delete != CASCADE:
kwargs['on_delete'] = self.rel.on_delete
if self.rel.parent_link:
kwargs['parent_link'] = self.rel.parent_link
# Work out string form of "to"
if isinstance(self.rel.to, six.string_types):
kwargs['to'] = self.rel.to
else:
kwargs['to'] = "%s.%s" % (self.rel.to._meta.app_label, self.rel.to._meta.object_name)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ForeignKey pointing to a model "
"that is swapped in place of more than one model (%s and %s)"
% (kwargs['to'].setting_name, swappable_setting)
)
# Set it
from django.db.migrations.writer import SettingsReference
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def resolve_related_fields(self):
if len(self.from_fields) < 1 or len(self.from_fields) != len(self.to_fields):
raise ValueError('Foreign Object from and to fields must be the same non-zero length')
if isinstance(self.rel.to, six.string_types):
raise ValueError('Related model %r cannot be resolved' % self.rel.to)
related_fields = []
for index in range(len(self.from_fields)):
from_field_name = self.from_fields[index]
to_field_name = self.to_fields[index]
from_field = (self if from_field_name == 'self'
else self.opts.get_field(from_field_name))
to_field = (self.rel.to._meta.pk if to_field_name is None
else self.rel.to._meta.get_field(to_field_name))
related_fields.append((from_field, to_field))
return related_fields
@property
def related_fields(self):
if not hasattr(self, '_related_fields'):
self._related_fields = self.resolve_related_fields()
return self._related_fields
@property
def reverse_related_fields(self):
return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields]
@property
def local_related_fields(self):
return tuple(lhs_field for lhs_field, rhs_field in self.related_fields)
@property
def foreign_related_fields(self):
return tuple(rhs_field for lhs_field, rhs_field in self.related_fields)
def get_local_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.local_related_fields)
def get_foreign_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.foreign_related_fields)
@staticmethod
def get_instance_value_for_fields(instance, fields):
ret = []
opts = instance._meta
for field in fields:
# Gotcha: in some cases (like fixture loading) a model can have
# different values in parent_ptr_id and parent's id. So, use
# instance.pk (that is, parent_ptr_id) when asked for instance.id.
if field.primary_key:
possible_parent_link = opts.get_ancestor_link(field.model)
if (not possible_parent_link or
possible_parent_link.primary_key or
possible_parent_link.model._meta.abstract):
ret.append(instance.pk)
continue
ret.append(getattr(instance, field.attname))
return tuple(ret)
def get_attname_column(self):
attname, column = super(ForeignObject, self).get_attname_column()
return attname, None
def get_joining_columns(self, reverse_join=False):
source = self.reverse_related_fields if reverse_join else self.related_fields
return tuple((lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source)
def get_reverse_joining_columns(self):
return self.get_joining_columns(reverse_join=True)
def get_extra_descriptor_filter(self, instance):
"""
Returns an extra filter condition for related object fetching when
user does 'instance.fieldname', that is the extra filter is used in
the descriptor of the field.
The filter should be either a dict usable in .filter(**kwargs) call or
a Q-object. The condition will be ANDed together with the relation's
joining columns.
A parallel method is get_extra_restriction() which is used in
JOIN and subquery conditions.
"""
return {}
def get_extra_restriction(self, where_class, alias, related_alias):
"""
Returns a pair condition used for joining and subquery pushdown. The
condition is something that responds to as_sql(compiler, connection)
method.
Note that currently referring both the 'alias' and 'related_alias'
will not work in some conditions, like subquery pushdown.
A parallel method is get_extra_descriptor_filter() which is used in
instance.fieldname related object fetching.
"""
return None
def get_path_info(self):
"""
Get path from this field to the related model.
"""
opts = self.rel.to._meta
from_opts = self.model._meta
return [PathInfo(from_opts, opts, self.foreign_related_fields, self, False, True)]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.rel.to._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
return pathinfos
def get_lookup_constraint(self, constraint_class, alias, targets, sources, lookups,
raw_value):
from django.db.models.sql.where import SubqueryConstraint, AND, OR
root_constraint = constraint_class()
assert len(targets) == len(sources)
if len(lookups) > 1:
raise exceptions.FieldError('Relation fields do not support nested lookups')
lookup_type = lookups[0]
def get_normalized_value(value):
from django.db.models import Model
if isinstance(value, Model):
value_list = []
for source in sources:
# Account for one-to-one relations when sent a different model
while not isinstance(value, source.model) and source.rel:
source = source.rel.to._meta.get_field(source.rel.field_name)
value_list.append(getattr(value, source.attname))
return tuple(value_list)
elif not isinstance(value, tuple):
return (value,)
return value
is_multicolumn = len(self.related_fields) > 1
if (hasattr(raw_value, '_as_sql') or
hasattr(raw_value, 'get_compiler')):
root_constraint.add(SubqueryConstraint(alias, [target.column for target in targets],
[source.name for source in sources], raw_value),
AND)
elif lookup_type == 'isnull':
root_constraint.add(IsNull(targets[0].get_col(alias, sources[0]), raw_value), AND)
elif (lookup_type == 'exact' or (lookup_type in ['gt', 'lt', 'gte', 'lte']
and not is_multicolumn)):
value = get_normalized_value(raw_value)
for target, source, val in zip(targets, sources, value):
lookup_class = target.get_lookup(lookup_type)
root_constraint.add(
lookup_class(target.get_col(alias, source), val), AND)
elif lookup_type in ['range', 'in'] and not is_multicolumn:
values = [get_normalized_value(value) for value in raw_value]
value = [val[0] for val in values]
lookup_class = targets[0].get_lookup(lookup_type)
root_constraint.add(lookup_class(targets[0].get_col(alias, sources[0]), value), AND)
elif lookup_type == 'in':
values = [get_normalized_value(value) for value in raw_value]
for value in values:
value_constraint = constraint_class()
for source, target, val in zip(sources, targets, value):
lookup_class = target.get_lookup('exact')
lookup = lookup_class(target.get_col(alias, source), val)
value_constraint.add(lookup, AND)
root_constraint.add(value_constraint, OR)
else:
raise TypeError('Related Field got invalid lookup: %s' % lookup_type)
return root_constraint
@property
def attnames(self):
return tuple(field.attname for field in self.local_related_fields)
def get_defaults(self):
return tuple(field.get_default() for field in self.local_related_fields)
def contribute_to_class(self, cls, name, virtual_only=False):
super(ForeignObject, self).contribute_to_class(cls, name, virtual_only=virtual_only)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.related_model._meta.swapped:
setattr(cls, related.get_accessor_name(), self.related_accessor_class(related))
# While 'limit_choices_to' might be a callable, simply pass
# it along for later - this is too early because it's still
# model load time.
if self.rel.limit_choices_to:
cls._meta.related_fkey_lookups.append(self.rel.limit_choices_to)
class ForeignKey(ForeignObject):
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
empty_strings_allowed = False
default_error_messages = {
'invalid': _('%(model)s instance with %(field)s %(value)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, to_field=None, rel_class=ManyToOneRel,
db_constraint=True, **kwargs):
try:
to._meta.model_name
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), (
"%s(%r) is invalid. First parameter to ForeignKey must be "
"either a model, a model name, or the string %r" % (
self.__class__.__name__, to,
RECURSIVE_RELATIONSHIP_CONSTANT,
)
)
else:
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
if 'db_index' not in kwargs:
kwargs['db_index'] = True
self.db_constraint = db_constraint
kwargs['rel'] = rel_class(
self, to, to_field,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
super(ForeignKey, self).__init__(to, ['self'], [to_field], **kwargs)
def check(self, **kwargs):
errors = super(ForeignKey, self).check(**kwargs)
errors.extend(self._check_on_delete())
errors.extend(self._check_unique())
return errors
def _check_on_delete(self):
on_delete = getattr(self.rel, 'on_delete', None)
if on_delete == SET_NULL and not self.null:
return [
checks.Error(
'Field specifies on_delete=SET_NULL, but cannot be null.',
hint='Set null=True argument on the field, or change the on_delete rule.',
obj=self,
id='fields.E320',
)
]
elif on_delete == SET_DEFAULT and not self.has_default():
return [
checks.Error(
'Field specifies on_delete=SET_DEFAULT, but has no default value.',
hint='Set a default value, or change the on_delete rule.',
obj=self,
id='fields.E321',
)
]
else:
return []
def _check_unique(self, **kwargs):
return [
checks.Warning(
'Setting unique=True on a ForeignKey has the same effect as using a OneToOneField.',
hint='ForeignKey(unique=True) is usually better served by a OneToOneField.',
obj=self,
id='fields.W342',
)
] if self.unique else []
def deconstruct(self):
name, path, args, kwargs = super(ForeignKey, self).deconstruct()
del kwargs['to_fields']
del kwargs['from_fields']
# Handle the simpler arguments
if self.db_index:
del kwargs['db_index']
else:
kwargs['db_index'] = False
if self.db_constraint is not True:
kwargs['db_constraint'] = self.db_constraint
# Rel needs more work.
to_meta = getattr(self.rel.to, "_meta", None)
if self.rel.field_name and (not to_meta or (to_meta.pk and self.rel.field_name != to_meta.pk.name)):
kwargs['to_field'] = self.rel.field_name
return name, path, args, kwargs
@property
def related_field(self):
return self.foreign_related_fields[0]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.rel.to._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
return pathinfos
def validate(self, value, model_instance):
if self.rel.parent_link:
return
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(model_instance.__class__, instance=model_instance)
qs = self.rel.to._default_manager.using(using).filter(
**{self.rel.field_name: value}
)
qs = qs.complex_filter(self.get_limit_choices_to())
if not qs.exists():
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={
'model': self.rel.to._meta.verbose_name, 'pk': value,
'field': self.rel.field_name, 'value': value,
}, # 'pk' is included for backwards compatibility
)
def get_attname(self):
return '%s_id' % self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.related_field.attname)
return field_default
def get_db_prep_save(self, value, connection):
if value is None or (value == '' and
(not self.related_field.empty_strings_allowed or
connection.features.interprets_empty_strings_as_nulls)):
return None
else:
return self.related_field.get_db_prep_save(value, connection=connection)
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_text(choice_list[1][0])
return super(ForeignKey, self).value_to_string(obj)
def contribute_to_related_class(self, cls, related):
super(ForeignKey, self).contribute_to_related_class(cls, related)
if self.rel.field_name is None:
self.rel.field_name = cls._meta.pk.name
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
if isinstance(self.rel.to, six.string_types):
raise ValueError("Cannot create form field for %r yet, because "
"its related model %r has not been loaded yet" %
(self.name, self.rel.to))
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.using(db),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.related_field
if (isinstance(rel_field, AutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, (PositiveIntegerField,
PositiveSmallIntegerField)))):
return IntegerField().db_type(connection=connection)
return rel_field.db_type(connection=connection)
def db_parameters(self, connection):
return {"type": self.db_type(connection), "check": []}
def convert_empty_strings(self, value, expression, connection, context):
if (not value) and isinstance(value, six.string_types):
return None
return value
def get_db_converters(self, connection):
converters = super(ForeignKey, self).get_db_converters(connection)
if connection.features.interprets_empty_strings_as_nulls:
converters += [self.convert_empty_strings]
return converters
def get_col(self, alias, output_field=None):
return super(ForeignKey, self).get_col(alias, output_field or self.related_field)
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
# Field flags
many_to_many = False
many_to_one = False
one_to_many = False
one_to_one = True
related_accessor_class = SingleRelatedObjectDescriptor
description = _("One-to-one relationship")
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(OneToOneField, self).deconstruct()
if "unique" in kwargs:
del kwargs['unique']
return name, path, args, kwargs
def formfield(self, **kwargs):
if self.rel.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.rel.to):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def _check_unique(self, **kwargs):
# override ForeignKey since check isn't applicable here
return []
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
managed = True
if isinstance(field.rel.to, six.string_types) and field.rel.to != RECURSIVE_RELATIONSHIP_CONSTANT:
to_model = field.rel.to
to = to_model.split('.')[-1]
def set_managed(field, model, cls):
field.rel.through._meta.managed = model._meta.managed or cls._meta.managed
add_lazy_relation(klass, field, to_model, set_managed)
elif isinstance(field.rel.to, six.string_types):
to = klass._meta.object_name
to_model = klass
managed = klass._meta.managed
else:
to = field.rel.to._meta.object_name
to_model = field.rel.to
managed = klass._meta.managed or to_model._meta.managed
name = '%s_%s' % (klass._meta.object_name, field.name)
if field.rel.to == RECURSIVE_RELATIONSHIP_CONSTANT or to == klass._meta.object_name:
from_ = 'from_%s' % to.lower()
to = 'to_%s' % to.lower()
else:
from_ = klass._meta.model_name
to = to.lower()
meta = type(str('Meta'), (object,), {
'db_table': field._get_m2m_db_table(klass._meta),
'managed': managed,
'auto_created': klass,
'app_label': klass._meta.app_label,
'db_tablespace': klass._meta.db_tablespace,
'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
'apps': field.model._meta.apps,
})
# Construct and return the new class.
return type(str(name), (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(
klass,
related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.rel.db_constraint,
),
to: models.ForeignKey(
to_model,
related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.rel.db_constraint,
)
})
class ManyToManyField(RelatedField):
# Field flags
many_to_many = True
many_to_one = False
one_to_many = False
one_to_one = False
description = _("Many-to-many relationship")
def __init__(self, to, db_constraint=True, swappable=True, **kwargs):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), (
"%s(%r) is invalid. First parameter to ManyToManyField must be "
"either a model, a model name, or the string %r" %
(self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
)
# Class names must be ASCII in Python 2.x, so we forcibly coerce it
# here to break early if there's a problem.
to = str(to)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(
self, to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', to == RECURSIVE_RELATIONSHIP_CONSTANT),
through=kwargs.pop('through', None),
through_fields=kwargs.pop('through_fields', None),
db_constraint=db_constraint,
)
self.swappable = swappable
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
super(ManyToManyField, self).__init__(**kwargs)
def check(self, **kwargs):
errors = super(ManyToManyField, self).check(**kwargs)
errors.extend(self._check_unique(**kwargs))
errors.extend(self._check_relationship_model(**kwargs))
errors.extend(self._check_ignored_options(**kwargs))
return errors
def _check_unique(self, **kwargs):
if self.unique:
return [
checks.Error(
'ManyToManyFields cannot be unique.',
hint=None,
obj=self,
id='fields.E330',
)
]
return []
def _check_ignored_options(self, **kwargs):
warnings = []
if self.null:
warnings.append(
checks.Warning(
'null has no effect on ManyToManyField.',
hint=None,
obj=self,
id='fields.W340',
)
)
if len(self._validators) > 0:
warnings.append(
checks.Warning(
'ManyToManyField does not support validators.',
hint=None,
obj=self,
id='fields.W341',
)
)
return warnings
def _check_relationship_model(self, from_model=None, **kwargs):
if hasattr(self.rel.through, '_meta'):
qualified_model_name = "%s.%s" % (
self.rel.through._meta.app_label, self.rel.through.__name__)
else:
qualified_model_name = self.rel.through
errors = []
if self.rel.through not in apps.get_models(include_auto_created=True):
# The relationship model is not installed.
errors.append(
checks.Error(
("Field specifies a many-to-many relation through model "
"'%s', which has not been installed.") %
qualified_model_name,
hint=None,
obj=self,
id='fields.E331',
)
)
else:
assert from_model is not None, \
"ManyToManyField with intermediate " \
"tables cannot be checked if you don't pass the model " \
"where the field is attached to."
# Set some useful local variables
to_model = self.rel.to
from_model_name = from_model._meta.object_name
if isinstance(to_model, six.string_types):
to_model_name = to_model
else:
to_model_name = to_model._meta.object_name
relationship_model_name = self.rel.through._meta.object_name
self_referential = from_model == to_model
# Check symmetrical attribute.
if (self_referential and self.rel.symmetrical and
not self.rel.through._meta.auto_created):
errors.append(
checks.Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=self,
id='fields.E332',
)
)
# Count foreign keys in intermediate model
if self_referential:
seen_self = sum(from_model == getattr(field.rel, 'to', None)
for field in self.rel.through._meta.fields)
if seen_self > 2 and not self.rel.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than two foreign keys "
"to '%s', which is ambiguous. You must specify "
"which two foreign keys Django should use via the "
"through_fields keyword argument.") % (self, from_model_name),
hint=("Use through_fields to specify which two "
"foreign keys Django should use."),
obj=self.rel.through,
id='fields.E333',
)
)
else:
# Count foreign keys in relationship model
seen_from = sum(from_model == getattr(field.rel, 'to', None)
for field in self.rel.through._meta.fields)
seen_to = sum(to_model == getattr(field.rel, 'to', None)
for field in self.rel.through._meta.fields)
if seen_from > 1 and not self.rel.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"from '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument.") % (self, from_model_name),
hint=('If you want to create a recursive relationship, '
'use ForeignKey("self", symmetrical=False, '
'through="%s").') % relationship_model_name,
obj=self,
id='fields.E334',
)
)
if seen_to > 1 and not self.rel.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"to '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument.") % (self, to_model_name),
hint=('If you want to create a recursive '
'relationship, use ForeignKey("self", '
'symmetrical=False, through="%s").') % relationship_model_name,
obj=self,
id='fields.E335',
)
)
if seen_from == 0 or seen_to == 0:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it does not have a foreign key to '%s' or '%s'.") % (
self, from_model_name, to_model_name
),
hint=None,
obj=self.rel.through,
id='fields.E336',
)
)
# Validate `through_fields`
if self.rel.through_fields is not None:
# Validate that we're given an iterable of at least two items
# and that none of them is "falsy"
if not (len(self.rel.through_fields) >= 2 and
self.rel.through_fields[0] and self.rel.through_fields[1]):
errors.append(
checks.Error(
("Field specifies 'through_fields' but does not "
"provide the names of the two link fields that should be "
"used for the relation through model "
"'%s'.") % qualified_model_name,
hint=("Make sure you specify 'through_fields' as "
"through_fields=('field1', 'field2')"),
obj=self,
id='fields.E337',
)
)
# Validate the given through fields -- they should be actual
# fields on the through model, and also be foreign keys to the
# expected models
else:
assert from_model is not None, \
"ManyToManyField with intermediate " \
"tables cannot be checked if you don't pass the model " \
"where the field is attached to."
source, through, target = from_model, self.rel.through, self.rel.to
source_field_name, target_field_name = self.rel.through_fields[:2]
for field_name, related_model in ((source_field_name, source),
(target_field_name, target)):
possible_field_names = []
for f in through._meta.fields:
if hasattr(f, 'rel') and getattr(f.rel, 'to', None) == related_model:
possible_field_names.append(f.name)
if possible_field_names:
hint = ("Did you mean one of the following foreign "
"keys to '%s': %s?") % (related_model._meta.object_name,
', '.join(possible_field_names))
else:
hint = None
try:
field = through._meta.get_field(field_name)
except FieldDoesNotExist:
errors.append(
checks.Error(
("The intermediary model '%s' has no field '%s'.") % (
qualified_model_name, field_name),
hint=hint,
obj=self,
id='fields.E338',
)
)
else:
if not (hasattr(field, 'rel') and
getattr(field.rel, 'to', None) == related_model):
errors.append(
checks.Error(
"'%s.%s' is not a foreign key to '%s'." % (
through._meta.object_name, field_name,
related_model._meta.object_name),
hint=hint,
obj=self,
id='fields.E339',
)
)
return errors
def deconstruct(self):
name, path, args, kwargs = super(ManyToManyField, self).deconstruct()
# Handle the simpler arguments
if self.db_table is not None:
kwargs['db_table'] = self.db_table
if self.rel.db_constraint is not True:
kwargs['db_constraint'] = self.rel.db_constraint
if self.rel.related_name is not None:
kwargs['related_name'] = self.rel.related_name
if self.rel.related_query_name is not None:
kwargs['related_query_name'] = self.rel.related_query_name
# Rel needs more work.
if isinstance(self.rel.to, six.string_types):
kwargs['to'] = self.rel.to
else:
kwargs['to'] = "%s.%s" % (self.rel.to._meta.app_label, self.rel.to._meta.object_name)
if getattr(self.rel, 'through', None) is not None:
if isinstance(self.rel.through, six.string_types):
kwargs['through'] = self.rel.through
elif not self.rel.through._meta.auto_created:
kwargs['through'] = "%s.%s" % (self.rel.through._meta.app_label, self.rel.through._meta.object_name)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ManyToManyField pointing to a "
"model that is swapped in place of more than one model "
"(%s and %s)" % (kwargs['to'].setting_name, swappable_setting)
)
# Set it
from django.db.migrations.writer import SettingsReference
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def _get_path_info(self, direct=False):
"""
Called by both direct and indirect m2m traversal.
"""
pathinfos = []
int_model = self.rel.through
linkfield1 = int_model._meta.get_field(self.m2m_field_name())
linkfield2 = int_model._meta.get_field(self.m2m_reverse_field_name())
if direct:
join1infos = linkfield1.get_reverse_path_info()
join2infos = linkfield2.get_path_info()
else:
join1infos = linkfield2.get_reverse_path_info()
join2infos = linkfield1.get_path_info()
pathinfos.extend(join1infos)
pathinfos.extend(join2infos)
return pathinfos
def get_path_info(self):
return self._get_path_info(direct=True)
def get_reverse_path_info(self):
return self._get_path_info(direct=False)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.rel.through is not None:
return self.rel.through._meta.db_table
elif self.db_table:
return self.db_table
else:
return utils.truncate_name('%s_%s' % (opts.db_table, self.name),
connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"Function that can be curried to provide the source accessor or DB column name for the m2m table"
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
if self.rel.through_fields is not None:
link_field_name = self.rel.through_fields[0]
else:
link_field_name = None
for f in self.rel.through._meta.fields:
if (f.is_relation and f.rel.to == related.related_model and
(link_field_name is None or link_field_name == f.name)):
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"Function that can be curried to provide the related accessor or DB column name for the m2m table"
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
if self.rel.through_fields is not None:
link_field_name = self.rel.through_fields[1]
else:
link_field_name = None
for f in self.rel.through._meta.fields:
# NOTE f.rel.to != f.related_model
if f.is_relation and f.rel.to == related.model:
if link_field_name is None and related.related_model == related.model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
elif link_field_name is None or link_field_name == f.name:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_text(data)
def contribute_to_class(self, cls, name, **kwargs):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.rel.symmetrical and (self.rel.to == "self" or self.rel.to == cls._meta.object_name):
self.rel.related_name = "%s_rel_+" % name
super(ManyToManyField, self).contribute_to_class(cls, name, **kwargs)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
# 3) The class owning the m2m field has been swapped out.
if not self.rel.through and not cls._meta.abstract and not cls._meta.swapped:
self.rel.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
# Populate some necessary rel arguments so that cross-app relations
# work correctly.
if isinstance(self.rel.through, six.string_types):
def resolve_through_model(field, model, cls):
field.rel.through = model
add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.related_model._meta.swapped:
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = curry(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name')
get_m2m_rel = curry(self._get_m2m_attr, related, 'rel')
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'rel')
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.rel.to._default_manager.using(db),
}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
def db_parameters(self, connection):
return {"type": None, "check": None}
| [
"darkgobal@mail.ru"
] | darkgobal@mail.ru |
9cf5bb94518552a7aa21ac5f78ca47356e811428 | 918f0fdef0e9224aa1a0596479178618290055ec | /configs/_base_/models/pgd.py | 5bde52efb454224fc795fe03a6e255f8f7245ff0 | [
"Apache-2.0"
] | permissive | Tsinghua-MARS-Lab/futr3d | b7eb3a0c9d92a58759c9c43e96bfd024a2e3de96 | 9130d71e487bad47f5dbcffd696fe9e4a838104f | refs/heads/main | 2023-07-24T15:40:00.121665 | 2023-07-06T05:50:45 | 2023-07-06T05:50:45 | 499,766,918 | 188 | 27 | MIT | 2022-06-19T10:42:03 | 2022-06-04T08:21:17 | Python | UTF-8 | Python | false | false | 1,842 | py | _base_ = './fcos3d.py'
# model settings
model = dict(
bbox_head=dict(
_delete_=True,
type='PGDHead',
num_classes=10,
in_channels=256,
stacked_convs=2,
feat_channels=256,
use_direction_classifier=True,
diff_rad_by_sin=True,
pred_attrs=True,
pred_velo=True,
pred_bbox2d=True,
pred_keypoints=False,
dir_offset=0.7854, # pi/4
strides=[8, 16, 32, 64, 128],
group_reg_dims=(2, 1, 3, 1, 2), # offset, depth, size, rot, velo
cls_branch=(256, ),
reg_branch=(
(256, ), # offset
(256, ), # depth
(256, ), # size
(256, ), # rot
() # velo
),
dir_branch=(256, ),
attr_branch=(256, ),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
loss_dir=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_attr=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
norm_on_bbox=True,
centerness_on_reg=True,
center_sampling=True,
conv_bias=True,
dcn_on_last_conv=True,
use_depth_classifier=True,
depth_branch=(256, ),
depth_range=(0, 50),
depth_unit=10,
division='uniform',
depth_bins=6,
bbox_coder=dict(type='PGDBBoxCoder', code_size=9)),
test_cfg=dict(nms_pre=1000, nms_thr=0.8, score_thr=0.01, max_per_img=200))
| [
"noreply@github.com"
] | Tsinghua-MARS-Lab.noreply@github.com |
b3b79d2540f0851da6b4cce21473eaf994bc8ac8 | 2062e0e40b47769f029df2f1fc586dede856b0ea | /python/postprocessing/datasets/data2016_v5.py | 7f58b27231fa93908319bd66b0f951f272b2adf9 | [] | no_license | cericeci/nanoAOD-tools | c92eb625e3462064f2c5cc62a3cb501377d41743 | 2fcd2d4d8cbc3ad29b2433ce6a5efc84adf33a25 | refs/heads/master | 2020-04-03T15:46:00.647505 | 2019-11-11T15:51:01 | 2019-11-11T15:51:01 | 155,376,369 | 0 | 1 | null | 2019-10-28T19:31:46 | 2018-10-30T11:47:38 | Python | UTF-8 | Python | false | false | 4,354 | py | from PhysicsTools.NanoAODTools.postprocessing.datasets.componentContainer import ComponentContainer
SingleMuon = [
ComponentContainer('SingleMuon_Run2016B', '/SingleMuon/Run2016B_ver2-Nano1June2019_ver2-v1/NANOAOD'),
ComponentContainer('SingleMuon_Run2016C', '/SingleMuon/Run2016C-Nano1June2019-v1/NANOAOD'),
ComponentContainer('SingleMuon_Run2016D', '/SingleMuon/Run2016D-Nano1June2019-v1/NANOAOD'),
ComponentContainer('SingleMuon_Run2016E', '/SingleMuon/Run2016E-Nano1June2019-v1/NANOAOD'),
ComponentContainer('SingleMuon_Run2016F', '/SingleMuon/Run2016F-Nano1June2019-v1/NANOAOD'),
ComponentContainer('SingleMuon_Run2016G', '/SingleMuon/Run2016G-Nano1June2019-v1/NANOAOD'),
ComponentContainer('SingleMuon_Run2016H', '/SingleMuon/Run2016H-Nano1June2019-v1/NANOAOD'),
]
SingleElectron = [
ComponentContainer('SingleElectron_Run2016B', '/SingleElectron/Run2016B_ver2-Nano1June2019_ver2-v1/NANOAOD'),
ComponentContainer('SingleElectron_Run2016C', '/SingleElectron/Run2016C-Nano1June2019-v1/NANOAOD'),
ComponentContainer('SingleElectron_Run2016D', '/SingleElectron/Run2016D-Nano1June2019-v1/NANOAOD'),
ComponentContainer('SingleElectron_Run2016E', '/SingleElectron/Run2016E-Nano1June2019-v1/NANOAOD'),
ComponentContainer('SingleElectron_Run2016F', '/SingleElectron/Run2016F-Nano1June2019-v1/NANOAOD'),
ComponentContainer('SingleElectron_Run2016G', '/SingleElectron/Run2016G-Nano1June2019-v1/NANOAOD'),
ComponentContainer('SingleElectron_Run2016H', '/SingleElectron/Run2016H-Nano1June2019-v1/NANOAOD'),
]
MuonEG = [
ComponentContainer('MuonEG_Run2016B', '/MuonEG/Run2016B_ver2-Nano1June2019_ver2-v1/NANOAOD'),
ComponentContainer('MuonEG_Run2016C', '/MuonEG/Run2016C-Nano1June2019-v1/NANOAOD'),
ComponentContainer('MuonEG_Run2016D', '/MuonEG/Run2016D-Nano1June2019-v1/NANOAOD'),
ComponentContainer('MuonEG_Run2016E', '/MuonEG/Run2016E-Nano1June2019-v3/NANOAOD'),
ComponentContainer('MuonEG_Run2016F', '/MuonEG/Run2016F-Nano1June2019-v1/NANOAOD'),
ComponentContainer('MuonEG_Run2016G', '/MuonEG/Run2016G-Nano1June2019-v1/NANOAOD'),
ComponentContainer('MuonEG_Run2016H', '/MuonEG/Run2016H-Nano1June2019-v1/NANOAOD'),
]
DoubleMuon = [
#ComponentContainer('DoubleMuon_Run2016B', '/DoubleMuon/Run2016B_ver2-Nano1June2019_ver2-v1/NANOAOD'),
#ComponentContainer('DoubleMuon_Run2016C', '/DoubleMuon/Run2016C-Nano1June2019-v1/NANOAOD'),
#ComponentContainer('DoubleMuon_Run2016D', '/DoubleMuon/Run2016D-Nano1June2019-v1/NANOAOD'),
#ComponentContainer('DoubleMuon_Run2016E', '/DoubleMuon/Run2016E-Nano1June2019-v1/NANOAOD'),
ComponentContainer('DoubleMuon_Run2016F', '/DoubleMuon/Run2016F-Nano1June2019-v1/NANOAOD'),
ComponentContainer('DoubleMuon_Run2016G', '/DoubleMuon/Run2016G-Nano1June2019-v1/NANOAOD'),
#ComponentContainer('DoubleMuon_Run2016H', '/DoubleMuon/Run2016H-Nano1June2019-v1/NANOAOD'),
]
DoubleEG = [
#ComponentContainer('DoubleEG_Run2016B', '/DoubleEG/Run2016B_ver2-Nano1June2019_ver2-v1/NANOAOD'),
#ComponentContainer('DoubleEG_Run2016C', '/DoubleEG/Run2016C-Nano1June2019-v1/NANOAOD'),
#ComponentContainer('DoubleEG_Run2016D', '/DoubleEG/Run2016D-Nano1June2019-v1/NANOAOD'),
ComponentContainer('DoubleEG_Run2016E', '/DoubleEG/Run2016E-Nano1June2019-v1/NANOAOD'),
ComponentContainer('DoubleEG_Run2016F', '/DoubleEG/Run2016F-Nano1June2019-v1/NANOAOD'),
ComponentContainer('DoubleEG_Run2016G', '/DoubleEG/Run2016G-Nano1June2019-v1/NANOAOD'),
ComponentContainer('DoubleEG_Run2016H', '/DoubleEG/Run2016H-Nano1June2019-v1/NANOAOD'),
]
MET = [
ComponentContainer('MET_Run2016B', '/MET/Run2016B_ver2-Nano1June2019_ver2-v1/NANOAOD'),
ComponentContainer('MET_Run2016C', '/MET/Run2016C-Nano1June2019-v1/NANOAOD'),
ComponentContainer('MET_Run2016D', '/MET/Run2016D-Nano1June2019-v1/NANOAOD'),
ComponentContainer('MET_Run2016E', '/MET/Run2016E-Nano1June2019-v1/NANOAOD'),
ComponentContainer('MET_Run2016F', '/MET/Run2016F-Nano1June2019-v1/NANOAOD'),
ComponentContainer('MET_Run2016G', '/MET/Run2016G-Nano1June2019-v1/NANOAOD'),
ComponentContainer('MET_Run2016H', '/MET/Run2016H-Nano1June2019-v3/NANOAOD'),
]
samples = DoubleMuon + DoubleEG #SingleMuon +SingleElectron+MuonEG+DoubleMuon+DoubleEG+MET
for sample in samples:
sample.options['isData'] = True
| [
"cericeci@cern.ch"
] | cericeci@cern.ch |
509b2052e93dc02e9e8f97eb7cc10fb9b0b181a0 | 7b55cfc4ffa7678e4c7b8f2312831ebbd549e54f | /proj1/tests/other-tests/oskis-angels_tests/regexperts_tests/correct/func_nest_defs.py | fcbe6d5a9f1217b313e3ce3e869fba529175c3e8 | [] | no_license | czchen1/cs164-projects | 0d330efef85421e611a436b165428ba0ddfb3512 | a04cafbcaafd32e518227dacf89a6d7837bf9f57 | refs/heads/master | 2020-03-27T04:03:31.727524 | 2018-08-23T21:43:46 | 2018-08-23T21:43:46 | 145,909,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | def f():
x = 4
def g():
def g1(x, y, z):
x = 4; y = 3; z = 2
return 5
g()
def h():
def i():
print "hi"
def z(): print "one line"; print "one line"
| [
"czchen@mit.edu"
] | czchen@mit.edu |
5d6ac8fe13dff9c90f787e5970edeaa51cc5ef95 | 09b3f97fe44e0fcbad1fcffdd598559e64dedf3e | /train_on_batch.py | a1a5bf07aaf0856fb323e6cbcc3d3314be7adf20 | [] | no_license | CAHLR/DKT_pre | 7f647f4a6d47ab4dcd7b518b781f7e00680dbfbc | d32da7300b3181bd846b746456e3cb88da22e14a | refs/heads/master | 2021-06-20T14:30:46.414465 | 2017-07-13T07:11:49 | 2017-07-13T07:11:49 | 92,901,808 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,950 | py | # coding: utf-8
import numpy as np
import csv
import utils
from keras.models import Model
from dataAssist import DataAssistMatrix
from keras.layers import Input, Dense, Dropout, Masking
from keras.layers import Embedding
from keras.layers import LSTM
from keras.layers import merge
from keras.layers import Dot
from keras import backend as K
from theano import tensor as T
from theano import config
from theano import printing
from keras.layers import Lambda
import theano
import numpy as np
import utils
import my_callbacks
import pdb
from DKT import *
from keras.preprocessing import sequence
import pdb
import my_callbacks
import pickle
from dataAssist import DataAssistMatrix, student
import random
import sys
data = DataAssistMatrix()
data.build()
batch_size = 16
input_dim_order = int(data.max_questionID + 1)
input_dim = 2 * input_dim_order
epoch = 10
hidden_layer_size = 512
validation_slpit = 0.2 #extract validation data from training data
validation_data = [] # sample validation data based on validation_split on every epoch
train_data = []
for student in data.trainData:
if random.uniform(0,1)<validation_slpit:
validation_data.append(student)
else: train_data.append(student)
print('The total size of raw data is: ', sys.getsizeof(data.trainData))
data.trainData = [] # To save memory.
DKTmodel = DKTnet(input_dim, input_dim_order, hidden_layer_size, batch_size, epoch)
DKTmodel.build_train_on_batch()
sum_acc = [] # using for earlystopping
sum_rmse = []# using for earlystopping
for epo in range(epoch):
'''Initializing'''
x_train = []
y_train = []
y_train_order = []
num_student = 0 # num of TRAINING student in each epoch
print ('Now starts the ',epo+1,'th epoch')
'''Training part starts from now'''
random.shuffle(train_data)
print('Training data is shuffled')
for student in train_data:
num_student += 1
# print (num_student)
if num_student % batch_size == 0:
if num_student % (batch_size*10) == 0:
print ("Training when num student is",num_student)
x_train = np.array(x_train)
y_train = np.array(y_train)
y_train_order = np.array(y_train_order)
x_train = x_train[:,:-1,:]
y_train = y_train[:,1:,:]
y_train_order = y_train_order[:,1:,:]
DKTmodel.train_on_batch(x_train, y_train, y_train_order)
x_train = []
y_train = []
y_train_order = []
x_single_train = np.zeros([input_dim, data.longest])
y_single_train = np.zeros([1, data.longest])
y_single_train_order = np.zeros([input_dim_order, data.longest])
for i in range(student.n_answers):
if student.correct[i] == 1.: # if correct
x_single_train[student.ID[i]*2-1, i] = 1.
elif student.correct[i] == 0.: # if wrong
x_single_train[student.ID[i]*2, i] = 1.
else:
print (student.correct[i])
print ("wrong length with student's n_answers or correct")
y_single_train[0, i] = student.correct[i]
y_single_train_order[student.ID[i], i] = 1.
for i in range(data.longest-student.n_answers):
x_single_train[:,student.n_answers + i] = -1
y_single_train[:,student.n_answers + i] = -1
#notice that the padding value of order is still zero.
y_single_train_order[:,student.n_answers + i] = 0
x_single_train = np.transpose(x_single_train)
y_single_train = np.transpose(y_single_train)
y_single_train_order = np.transpose(y_single_train_order)
x_train.append(x_single_train)
y_train.append(y_single_train)
y_train_order.append(y_single_train_order)
print ("train num students", num_student)
print ("validation num students", len(validation_data))
'''Validation part starts from now'''
x_val = []
y_val = []
y_val_order = []
num_val = 0
y_pred_total = []
y_true_total = []
rmse = []
acc = []
callback = TestCallback()
for student in validation_data:
num_val += 1
if num_val % batch_size == 0:
if num_val % (batch_size*10) == 0:
print ("Predicting when num student is",num_val)
x_val = np.array(x_val)
y_val = np.array(y_val)
y_val_order = np.array(y_val_order)
x_val = x_val[:,:-1,:]
y_val = y_val[:,1:,:]
y_val_order = y_val_order[:,1:,:]
# DKTmodel = DKTnet(input_dim, input_dim_order, hidden_layer_size, batch_size, epoch,
# x_val, y_val, y_val_order)
# DKTmodel.train_on_batch()
y_pred = DKTmodel.predict(x_val,y_val_order)
y_pred.flatten()
y_val.flatten()
# y_val is y_true
tmp_rmse, tmp_acc = callback.rmse_masking_on_batch(y_val, y_pred, y_val_order)
rmse += (tmp_rmse)
acc += (tmp_acc)
# y_pred_total = y_pred_total + list(y_pred)
# y_true_total = y_true_total + list(y_val)
x_val = []
y_val = []
y_val_order = []
x_single_val = np.zeros([input_dim, data.longest])
y_single_val = np.zeros([1, data.longest])
y_single_val_order = np.zeros([input_dim_order, data.longest])
for i in range(student.n_answers):
if student.correct[i] == 1.: # if correct
x_single_val[student.ID[i]*2-1, i] = 1.
elif student.correct[i] == 0.: # if wrong
x_single_val[student.ID[i]*2, i] = 1.
else:
print (student.correct[i])
print ("wrong length with student's n_answers or correct")
y_single_val[0, i] = student.correct[i]
y_single_val_order[student.ID[i], i] = 1.
for i in range(data.longest-student.n_answers):
x_single_val[:,student.n_answers + i] = -1
y_single_val[:,student.n_answers + i] = -1
#notice that the padding value of order is still zero.
y_single_val_order[:,student.n_answers + i] = 0
x_single_val = np.transpose(x_single_val)
y_single_val = np.transpose(y_single_val)
y_single_val_order = np.transpose(y_single_val_order)
x_val.append(x_single_val)
y_val.append(y_single_val)
y_val_order.append(y_single_val_order)
avg_rmse, avg_acc = sum(rmse)/float(len(rmse)), sum(acc)/float(len(acc))
print('\nTesting avg_rmse: {}\n'.format(avg_rmse))
print('\nTesting avg_acc: {}\n'.format(avg_acc))
sum_acc.append(avg_acc)
sum_rmse.append(avg_rmse)
if len(sum_acc)>=3 and sum_acc[-1]<sum_acc[-2] and sum_acc[-2]<sum_acc[-3]: # patience is 2
print ('sum_acc:',sum_acc)
print ('sum_rmse:', sum_rmse)
pdb.set_trace()
| [
"hcr14@mails.tsinghua.edu.cn"
] | hcr14@mails.tsinghua.edu.cn |
ec634087baf06ea0641e82efaf57a58e65246093 | 98dec9457017be2a4b1f7ac7f72963c06382737b | /what_do_you_see.py | ca11b7e2c94e5b33fc5cb31deed8b90e227cfa9d | [] | no_license | leweryan/data_science | 09aae0cd47ecf4930da937e864c8bb7c9982ddf4 | 686bc1087ad6f605c1f609ab93692ec1deba70f6 | refs/heads/master | 2021-01-25T01:21:37.464564 | 2017-06-20T16:58:06 | 2017-06-20T16:58:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,498 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
def main():
csv_eur_usd = pd.read_csv('EURUSD_15m_BID_01.01.2010-31.12.2016.csv')
eur_usd = pd.DataFrame(csv_eur_usd)
"""
Data looks like:
Time Open High Low Close Volume
0 2010-01-01 00:00 1.43283 1.43293 1.43224 1.43293 608600007.1
"""
# CALCULATE relevant metrics and columns to be used later
eur_usd['range'] = eur_usd['High'] - eur_usd['Low']
eur_usd['Time'] = pd.to_datetime(eur_usd['Time'])
eur_usd['Day'] = eur_usd['Time'].dt.weekday_name
average = eur_usd['range'].mean()
standard_deviation = eur_usd['range'].std()
average_volume = eur_usd['Volume'].mean()
standard_deviation_volume = eur_usd['Volume'].std()
detailed_output = False
print(
"-------\n"
"PREMISE\n"
"-------\n"
"As larger change in price provides greater potential profits for "
"trades in Forex, we want to find the largest candles (candle size "
"represents price fluctations within a given time window (in this "
"case, 15 minutes).")
print(
"\n"
"\n"
"-------------------------------------------------------\n"
"PLOT 1: Price Ranges (with Standard Deviation and Mean)\n"
"-------------------------------------------------------")
plt.figure(figsize=(14, 7))
plt.subplot(1, 2, 1)
plt.axvline(x=average, color='blue', label='Average')
plt.axvline(
x=0 if (average-standard_deviation) < 0
else (average-standard_deviation),
linestyle='dashed', color='green', label='Average - 1SD')
plt.axvline(
x=average+standard_deviation, linestyle='dashed',
color='green', label='Average + 1SD')
count_above_SD = len(
eur_usd[eur_usd['range']>(average+standard_deviation)])
total_count = eur_usd['range'].count()
if detailed_output:
print(
"Of all candles:\n"
" {} of {} of all candles are greater than "
"1 SD above average (~{}%)".format(
count_above_SD, total_count,
round((100.0*count_above_SD)/total_count)))
plt.hist(eur_usd['range'], bins=500)
plt.xlim([-.0001, .0045])
plt.title('EUR/USD Candle Range (High - Low) Distribution')
plt.ylabel('# Candles')
plt.xlabel('Range of Candle (High-Low)')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(eur_usd['Time'], eur_usd['range'], '*', markersize=0.5, label='')
plt.axhline(y=average, color='blue', label='Average')
plt.axhline(
y=average+standard_deviation, linestyle='dashed', color='green',
label='Average + 1 SD')
plt.ylim([-0.0001, 0.008])
plt.title('EUR/USD Candle Range (High - Low) By Date')
plt.ylabel('Candle Range (High-Low)')
plt.xlabel('Date and Time')
plt.xticks(rotation='vertical')
plt.legend()
plt.show()
print(
"...so\n"
"Let's get an overview of candle sizes.\n"
"\n"
"Plotting the candle size distribution, we can see that many candles "
"are very small, so we want to filter out those candles (specifically "
"if they have no price change, likely denoting the market is closed), "
"and recalculate the mean and standard deviation to avoid noise.\n"
"\n"
"Plotting (above average) candles by time, shows peaks in "
"concentrated clusters, suggesting candles with higher range occur in "
"groups by time, but not obviously predictably, as they are "
"irregularly distributed.")
# Recalculate after filter out candles with no price change
eur_usd = eur_usd[eur_usd['range'] > 0]
average = eur_usd['range'].mean()
standard_deviation = eur_usd['range'].std()
average_volume = eur_usd['Volume'].mean()
standard_deviation_volume = eur_usd['Volume'].std()
print(
"\n"
"\n"
"-------------------------------\n"
"PLOT 2: Volume vs. Candle Range\n"
"-------------------------------")
plt.figure(figsize=(14, 7))
plt.plot(
eur_usd['Volume'], eur_usd['range'], '*', markersize=0.2, alpha=0.4,
color='C0', label='All Candles')
plt.axhline(y=average, color='blue', label='Average')
plt.axhline(
y=average+standard_deviation, linestyle='dashed', color='green',
label='Average + 1SD')
plt.axhline(
y=average-standard_deviation, linestyle='dashed', color='green',
label='Average - 1SD')
first_peak = 1.06 * (10 ** 9)
plt.axvline(
x=first_peak, color='pink', linestyle='dotted', linewidth = 1.5,
label='1st Peak')
second_peak = 3.4 * (10 ** 9)
plt.axvline(
x=second_peak, color='red', linestyle='dotted', linewidth = 1.5,
label='2nd Peak')
volume_drop_off = 6.0 * (10 ** 9)
plt.axvline(
x=volume_drop_off, color='orange', linestyle='dotted', linewidth = 1.5,
label='Drop Off')
plt.ylim([0, .008])
plt.xlim([0, 1.5*(10**10)])
plt.title('EUR/USD Volume Vs. Candle Range')
plt.ylabel('Price Change')
plt.xlabel('Volume')
plt.legend()
plt.show()
print(
"...so\n"
"Perhaps certain volumes will have larger candles?\n"
"\n"
"It seems that there are two approximate peaks with a high "
"concentration of large candles located at certain volumes. Perhaps "
"more interestingly, the bottom of the distribution has a general "
"upward slope.\n"
"\n"
"Let's consider candles about midway through this concentrated body's "
"upward slope, which we can arbitrarily pick as the second peak. It "
"seems that there are less large candles after volume {}, and only a "
"sparse distribution of candles under the average, so let's add an "
"additional constraint to consider candles below this volume.".format(
volume_drop_off))
print(
"\n"
"\n"
"---------------------------------------------------------\n"
"PLOT 3: Candle Range Distribution (All vs. Larger Volume)\n"
"---------------------------------------------------------")
plt.figure(figsize=(14, 7))
plt.axvline(x=average, color='blue', label='Average')
plt.axvline(x=0 if (average-standard_deviation) < 0
else (average-standard_deviation),
linestyle='dashed', color='green', label='Average - 1SD')
plt.axvline(x=average+standard_deviation, linestyle='dashed',
color='green', label='Average + 1SD')
plt.hist(
eur_usd['range'], bins=500, color='C0', normed=True, alpha=0.6,
label='All Candles')
with_large_volume = eur_usd[eur_usd['Volume']>second_peak]
with_large_volume = with_large_volume[with_large_volume['Volume']<volume_drop_off]
plt.hist(
with_large_volume['range'], bins=500, color='orange', normed=True,
alpha=0.6, label='Candles With Higher Volume')
plt.axvline(x=with_large_volume['range'].mean(), color='red',
label='Mean (For Larger Volume)')
large_volume_count = len(with_large_volume)
percent_with_volume = round((100.0*large_volume_count)/total_count)
if detailed_output:
print(
"For candles with volume greater than {}:\n"
" {} of all {} candles are in this set. (~{}%)".
format(second_peak, large_volume_count, total_count,
percent_with_volume))
percent_greater_than_average = round(
(100.0*len(with_large_volume[with_large_volume['range']>average]))
/large_volume_count)
percent_greater_than_standard_deviation = round(
(100.0*len(with_large_volume[with_large_volume['range']
>(average+standard_deviation)]))/large_volume_count)
if detailed_output:
print(
" {}% of set is larger than original average\n"
" {}% of set is larger than orignal average + 1 SD"
.format(percent_greater_than_average,
percent_greater_than_standard_deviation))
plt.xlim([-.0001, .0084])
plt.ylim([0, 1000])
plt.title("EUR/USD Candle Range Distribution (All vs. Large Volume)")
plt.ylabel('# Candles (Normalized)')
plt.xlabel('Range of Candle (High-Low)')
plt.xticks(rotation='vertical')
plt.legend()
plt.show()
"""
For candles with volume greater than 3400000000.0:
32530 of all 245444 candles are in this set. (~13.0%)
68% of set is larger than original average
32% of set is larger than orignal average + 1 SD
"""
print(
"...so\n"
"Looking at candles with volumes past our second peak in candle size:"
"\n"
"We see that {}% of candles are larger than the average, as opposed "
"to ~50% unfiltered. The trade off is that there are less total "
"candles ({}% in this set) and inherently less total candles above "
"average size in this set.".format(
percent_greater_than_average, percent_with_volume))
print(
"\n"
"\n"
"------------------------------------------------------\n"
"PLOT 4: Price Fluctuation by Day of Week\n"
"------------------------------------------------------\n")
plt.figure(figsize=(14, 7))
plt.subplot(1, 3, 1)
day_conversion = {
'Monday':0,
'Tuesday':1,
'Wednesday': 2,
'Thursday': 3,
'Friday': 4,
'Saturday': 5,
'Sunday': 6
}
total_count_day = pd.DataFrame(eur_usd.groupby('Day').count()['range'])
for index in total_count_day.index:
total_count_day.loc[index, 'Day_Number'] = day_conversion[index]
plt.bar(
total_count_day['Day_Number'], total_count_day['range'], color='blue')
plt.xticks(range(8), ('Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun'))
plt.title("EUR/USD Total Candle Count\nBy Day of Week")
plt.ylabel('# Candles')
plt.xlabel('Day of Week')
plt.subplot(1, 3, 2)
count_day = eur_usd[eur_usd['range'] > average]
count_day = pd.DataFrame(count_day.groupby('Day').count()['range'])
for index in count_day.index:
count_day.loc[index, 'Day_Number'] = day_conversion[index]
plt.bar(count_day['Day_Number'], count_day['range'], color='blue')
plt.xticks(range(8), ('Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun'))
plt.title("EUR/USD Above Average Candle\nCount By Day of Week")
plt.ylabel('# Candles')
plt.xlabel('Day of Week')
plt.subplot(1, 3, 3)
sum_day = eur_usd[eur_usd['range'] > average]
sum_day = pd.DataFrame(sum_day.groupby('Day').sum()['range'])
for index in sum_day.index:
sum_day.loc[index, 'Day_Number'] = day_conversion[index]
plt.bar(sum_day['Day_Number'], sum_day['range'], color='green')
plt.xticks(range(8), ('Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun'))
plt.title("EUR/USD Cumulative Candle Size\n(Above Average) By Day of Week")
plt.ylabel('Candle Size Total')
plt.xlabel('Day of Week')
plt.tight_layout()
plt.show()
print(
"...so\n"
"Let's see if any days are especially better for finding large "
"candles.\n"
"We can see that Tuesday through Friday are the best days to find "
"above average candles, as these days have the most total candles, "
"most candles above average size, and the largest cumulative candle "
"range. Of these four days, none is especially outstanding.")
print(
"\n"
"\n"
"-----------------------------------\n"
"PLOT 5: Price Level vs. Price Range\n"
"-----------------------------------")
plt.figure(figsize=(14, 7))
plt.plot(
eur_usd['Open'], eur_usd['range'], '*', color='C0', markersize=0.2,
alpha=.1, label='')
plt.plot(
eur_usd['High'], eur_usd['range'], '*', color='C0', markersize=0.2,
alpha=.1, label='')
plt.plot(
eur_usd['Low'], eur_usd['range'], '*', color='C0', markersize=0.2,
alpha=.1, label='')
plt.plot(
eur_usd['Close'], eur_usd['range'], '*', color='C0', markersize=0.2,
alpha=.1, label='')
plt.axhline(y=average, color='blue', label='Average')
plt.axhline(
y=average+standard_deviation, linestyle='dashed', color='green',
label='Average + 1SD')
price_threshold = 1.394
plt.axvline(
x=price_threshold, linestyle='dashed', color='red',
label='Price Threshold')
plt.ylim(-0.0001, .006)
plt.title("Price Level vs. Price Range")
plt.xlabel("Price Level")
plt.ylabel("Price Range")
plt.legend()
plt.show()
print(
"...so\n"
"Let's see if any price levels are especially better for finding "
"large candles.\n"
"With darker columns and sparse gaps, it seems that candles, in "
"general, occur more frequently at certain price levels, but large "
"candles do not relatively become more common than smaller candles "
"at any particular price level, except for perhaps slightly past "
"price of {}. The trade off is that candles occur much less "
"frequently past this price level.".format(price_threshold))
print(
"\n"
"\n"
"---------------------------------------------------------------\n"
"PLOT 6: Candle Range Distribution (All vs. Higher Price Levels)\n"
"---------------------------------------------------------------")
plt.figure(figsize=(14, 7))
plt.axvline(x=average, color='blue', label='Average')
plt.axvline(
x=0 if (average-standard_deviation) < 0
else (average-standard_deviation),
linestyle='dashed', color='green', label='Average - 1SD')
plt.axvline(
x=average+standard_deviation, linestyle='dashed', color='green',
label='Average + 1SD')
plt.hist(
eur_usd['range'], bins=500, color='C0', normed=True, alpha=0.6,
label='All Candles')
with_high_price = eur_usd[eur_usd['High']>price_threshold]
plt.hist(
with_high_price['range'], bins=500, color='orange', normed=True,
alpha=0.6, label='Candles With Higher Price')
plt.axvline(x=with_high_price['range'].mean(), color='red',
label='Average for Higher Price')
high_price_count = len(with_high_price)
percent_with_price = round((100.0*high_price_count)/total_count)
if detailed_output:
print(
"For candles with price high greater than price {}:\n"
" {} of all {} candles are in this set. (~{}%)".
format(
price_threshold, high_price_count, total_count,
percent_with_price))
percent_greater_than_average = round(
(100.0*len(with_high_price[with_high_price['range']>average]))
/high_price_count)
percent_greater_than_standard_deviation = round(
(100.0*len(with_high_price[with_high_price['range']
>(average+standard_deviation)]))/high_price_count)
if detailed_output:
print(
" {}% of set is larger than original average\n"
" {}% of set is larger than orignal average + 1 SD"
.format(percent_greater_than_average,
percent_greater_than_standard_deviation))
plt.xlim([-.0001, .005])
plt.title("EUR/USD Candle Range Distribution (All vs. High Price)")
plt.ylabel('# Candles (Normalized)')
plt.xlabel('Range of Candle (High-Low)')
plt.xticks(rotation='vertical')
plt.legend()
plt.show()
"""
For candles with price high greater than price 1.394:
16133 of all 245444 candles are in this set. (~7.0%)
63% of set is larger than original average
24% of set is larger than orignal average + 1 SD
"""
print(
"...so\n"
"Looking at candles with prices past our price threshold, {}:\n"
"{}% (as opposed to 50% without filtering) of candles are above the "
"average range, so there is a slight increase in average candle size "
"when considering this subset. The trade off, is that we only have "
"{}% of all candles, and so we inherently have less large candles."
.format(
price_threshold, percent_greater_than_average, percent_with_price))
if __name__ == '__main__':
main() | [
"leweryan@ucla.edu"
] | leweryan@ucla.edu |
14946cf49c21ab2682957c8a0a6f6f81dad0a42f | db415a9fef09c6edd7852d34da9203d295d488db | /apps/lead_app/apps.py | 16aa5187af41dfe92bc3f14942760e189edbd1ad | [] | no_license | cleibow/lead_generator | 3016ee0347c6eb6a742fca1b2ee70e071f56dab8 | 8a5edcd6e2f82f718f7b37c7473512105e268a40 | refs/heads/master | 2021-01-24T03:26:09.755244 | 2018-02-26T00:09:18 | 2018-02-26T00:09:18 | 122,890,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class LeadAppConfig(AppConfig):
name = 'lead_app'
| [
"cleibow@tulane.edu"
] | cleibow@tulane.edu |
f2a17579595b39ecde9b8618e55ad4f799fb1645 | e396ac87856ae8991c10e50aa7dca49979749d6e | /lib/libServer.py | 74b89ea0bb8404c8617ce443697513e0ea7eafbc | [] | no_license | rosig/Infracom-Project | b3f6f5899b9b2b673f06f765bd48bfb7717a5494 | 0bc0da9c7cb32c2a984d8ed2a8da75b56d0f91df | refs/heads/master | 2020-04-04T14:42:47.288146 | 2018-12-18T17:50:38 | 2018-12-18T17:50:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | from lib.constants import *
from socket import *
class TCPServerSocket:
def __init__(self):
self.socket = socket(AF_INET, SOCK_STREAM)
self.connectedSockets = []
self.socket.bind(('', CLI_REP_PORT))
self.socket.listen(1)
def acceptConnection(self):
connSocket = self.socket.accept()[0]
self.connectedSockets.append(connSocket)
print("Connected")
return self.connectedSockets.index(connSocket)
def recvMessage(self, ind):
sock = self.connectedSockets[ind]
msg = sock.recv(BUFFER_SIZE)
return msg.decode('utf-8')
def sendMessage(self, msg, ind):
self.connectedSockets[ind].send(msg.encode('utf-8'))
def closeConnection(self, ind):
self.connectedSockets[ind].close()
def close(self):
self.socket.close()
| [
"38705588+rosinaldoguedes@users.noreply.github.com"
] | 38705588+rosinaldoguedes@users.noreply.github.com |
406729e6eccfd065f28c740e5baefd7cfbba114d | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnungath.py | e19f8d0632c95a744bf3da271d720a8dcfd1418d | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 44 | py | ii = [('RoscTTI2.py', 1), ('IrviWVD.py', 1)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
853e4dfd424ea2e85d872450425c1f99af5e2679 | f21b9a08cae008fd09e14ded25fdd0b3b651da4d | /tests/web/test_async_web_client.py | f7302dcdfe980bdae4fa2bc3afb82caaa9e1660b | [
"MIT"
] | permissive | nrajasekar-quotient/python-slackclient | 46ec4adf030994090123558742199077bfd0b595 | 042bb05edbada181e3bc29b12c5d95ffb75588a0 | refs/heads/main | 2022-11-24T09:46:05.924063 | 2020-08-03T04:40:47 | 2020-08-03T04:40:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,121 | py | import re
import unittest
import aiohttp
import slack.errors as err
from slack import AsyncWebClient
from tests.helpers import async_test
from tests.web.mock_web_api_server import setup_mock_web_api_server, cleanup_mock_web_api_server
class TestAsyncWebClient(unittest.TestCase):
def setUp(self):
setup_mock_web_api_server(self)
self.client = AsyncWebClient(
token="xoxp-1234",
base_url="http://localhost:8888",
)
def tearDown(self):
cleanup_mock_web_api_server(self)
pattern_for_language = re.compile("python/(\\S+)", re.IGNORECASE)
pattern_for_package_identifier = re.compile("slackclient/(\\S+)")
@async_test
async def test_api_calls_return_a_future(self):
self.client.token = "xoxb-api_test"
resp = await self.client.api_test()
self.assertEqual(200, resp.status_code)
self.assertTrue(resp["ok"])
@async_test
async def test_requests_can_be_paginated(self):
self.client.token = "xoxb-users_list_pagination"
users = []
async for page in await self.client.users_list(limit=2):
users = users + page["members"]
self.assertTrue(len(users) == 4)
@async_test
async def test_request_pagination_stops_when_next_cursor_is_missing(self):
self.client.token = "xoxb-users_list_pagination_1"
users = []
async for page in await self.client.users_list(limit=2):
users = users + page["members"]
self.assertTrue(len(users) == 2)
@async_test
async def test_json_can_only_be_sent_with_post_requests(self):
with self.assertRaises(err.SlackRequestError):
await self.client.api_call("fake.method", http_verb="GET", json={})
@async_test
async def test_slack_api_error_is_raised_on_unsuccessful_responses(self):
self.client.token = "xoxb-api_test_false"
with self.assertRaises(err.SlackApiError):
await self.client.api_test()
self.client.token = "xoxb-500"
with self.assertRaises(err.SlackApiError):
await self.client.api_test()
@async_test
async def test_slack_api_rate_limiting_exception_returns_retry_after(self):
self.client.token = "xoxb-rate_limited"
try:
await self.client.api_test()
except err.SlackApiError as slack_api_error:
self.assertFalse(slack_api_error.response["ok"])
self.assertEqual(429, slack_api_error.response.status_code)
self.assertEqual(30, int(slack_api_error.response.headers["Retry-After"]))
@async_test
async def test_the_api_call_files_argument_creates_the_expected_data(self):
self.client.token = "xoxb-users_setPhoto"
resp = await self.client.users_setPhoto(image="tests/data/slack_logo.png")
self.assertEqual(200, resp.status_code)
@async_test
async def test_issue_560_bool_in_params_sync(self):
self.client.token = "xoxb-conversations_list"
await self.client.conversations_list(exclude_archived=1) # ok
await self.client.conversations_list(exclude_archived="true") # ok
await self.client.conversations_list(exclude_archived=True) # ok
@async_test
async def test_issue_690_oauth_v2_access_async(self):
self.client.token = ""
resp = await self.client.oauth_v2_access(
client_id="111.222",
client_secret="secret",
code="codeeeeeeeeee",
)
self.assertIsNone(resp["error"])
with self.assertRaises(err.SlackApiError):
await self.client.oauth_v2_access(
client_id="999.999",
client_secret="secret",
code="codeeeeeeeeee",
)
@async_test
async def test_issue_690_oauth_access_async(self):
self.client.token = ""
resp = await self.client.oauth_access(client_id="111.222", client_secret="secret", code="codeeeeeeeeee")
self.assertIsNone(resp["error"])
with self.assertRaises(err.SlackApiError):
await self.client.oauth_access(client_id="999.999", client_secret="secret", code="codeeeeeeeeee")
@async_test
async def test_token_param_async(self):
with self.assertRaises(err.SlackApiError):
await self.client.users_list()
resp = await self.client.users_list(token="xoxb-users_list_pagination")
self.assertIsNone(resp["error"])
with self.assertRaises(err.SlackApiError):
await self.client.users_list()
@async_test
async def test_timeout_issue_712_async(self):
with self.assertRaises(Exception):
await self.client.users_list(token="xoxb-timeout")
@async_test
async def test_html_response_body_issue_718_async(self):
try:
await self.client.users_list(token="xoxb-html_response")
self.fail("SlackApiError expected here")
except err.SlackApiError as e:
self.assertTrue(
str(e).startswith("Failed to parse the response body: Expecting value: line 1 column 1 (char 0)"), e)
| [
"noreply@github.com"
] | nrajasekar-quotient.noreply@github.com |
5d13bd0311f09d27712d869ce713dbf2512b4c31 | 557125555de5aa6cb3501468b89e4bc8e2e6f94c | /plot_corner.py | bfadbf611b05541936007ca6f24cef85da090a54 | [
"MIT"
] | permissive | jacob975/deep_learning | 3cb5a9844549fbc503db2682db9946a2fed5b18e | b62853b72ebdac84a79ce1be792d421938e26079 | refs/heads/master | 2022-10-30T00:59:57.472905 | 2022-10-08T06:25:21 | 2022-10-08T06:25:21 | 116,223,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,996 | py | #!/usr/bin/python3
'''
Abstract:
This is a program for plotting the corner plot of given band index.
Usage:
plot_corner.py [band index code] [sed table] [cls table]
e.g. $plot_corner.py 678 sed_table.txt cls_table.txt
A corner plot of band 6, 7, and 8 will be returned.
Output:
1. The figure of corner diagram.
Editor:
Jacob975
##################################
# Python3 #
# This code is made in python3 #
##################################
20200805
##################1##################
update log
2020085 version alpha 1
1. The code works.
'''
import time
import numpy as np
from sys import argv
from matplotlib import pyplot as plt
from convert_lib import set_SCAO, mJy_to_mag_noerr
def load_flux_color(band_index, sed_table):
outp = None
# For Flux
if band_index[0] == 'f':
seq1 = int(band_index[1]) - 1
flux1 = sed_table[:, seq1]
mag1 = mJy_to_mag_noerr(
SCAO_system[bands[seq1]][2],
flux1
)
outp = mag1
out_name = "{0} (mag)".format(SCAO_system[bands[seq1]][0])
# FOr Color
elif band_index[0] == 'c':
seq1 = int(band_index[1]) - 1
seq2 = int(band_index[2]) - 1
flux1 = sed_table[:, seq1]
flux2 = sed_table[:, seq2]
mag1 = mJy_to_mag_noerr(
SCAO_system[bands[seq1]][2],
flux1
)
mag2 = mJy_to_mag_noerr(
SCAO_system[bands[seq2]][2],
flux2
)
outp = mag1 - mag2
out_name = "{0} - {1} (mag)".format(
SCAO_system[bands[seq1]][0],
SCAO_system[bands[seq2]][0]
)
return outp, out_name
def adjust_ax(inp_axes, row_i, col_i):
# Adjust the panel
inp_ax = inp_axes[row_i, col_i]
inp_ax.grid(True)
inp_ax.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
direction='in'
)
inp_ax.tick_params(
axis='y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
direction='in'
)
#--------------------------------------------
# Main code
if __name__ == "__main__":
VERBOSE = 0
# Measure time
start_time = time.time()
#-----------------------------------
# Load argv
if len(argv) != 4:
print ("The number of arguments is wrong.")
print ("Usage: plot_corner.py [band index code] [sed table] [cls table]")
print ("Example: plot_ccdiag.py 678 sed_table.txt cls_table.txt")
exit()
band_index_code = argv[1]
sed_table_name = argv[2]
cls_table_name = argv[3]
#-----------------------------------
# Initialize the band system
SCAO_system = set_SCAO()
bands = [
'J',
'H',
'K',
'IR1',
'IR2',
'IR3',
'IR4',
'MP1'
]
#-----------------------------------
# Load data
print("Load data")
sed_table = np.loadtxt(sed_table_name)
cls_table = np.loadtxt(cls_table_name, dtype = int)
index_star = np.where(cls_table == 0)[0]
index_gala = np.where(cls_table == 1)[0]
index_ysos = np.where(cls_table == 2)[0]
# Load sed data
num_index = len(band_index_code)
data_index_list = []
data_list = []
data_name_list = []
for c in band_index_code:
tmp_index = 'f{0}'.format(c)
tmp_data, tmp_name = load_flux_color(tmp_index, sed_table)
data_index_list.append(tmp_index)
data_list.append(tmp_data)
data_name_list.append(tmp_name)
#-----------------------------------
# Plot the color-color diagram
print("Plot the diagram")
fig, axes = plt.subplots(
num_index, num_index,
figsize = (10,10),
)
# Adjust the panel style
fig.suptitle('Corner plot')
plt.subplots_adjust(wspace=0, hspace=0)
for i in range(num_index):
for j in range(num_index):
# Plot the histogram
if i == j:
axes[i,j].invert_xaxis()
axes[i,j].hist(
data_list[i][index_star],
50,
normed = 1,
facecolor = "b",
edgecolor = 'None',
alpha = 0.3,
zorder = 100,
)
axes[i,j].hist(
data_list[i][index_gala],
50,
normed = 1,
facecolor = "g",
edgecolor = 'None',
alpha = 0.3,
zorder = 100,
)
axes[i,j].hist(
data_list[i][index_ysos],
50,
normed = 1,
facecolor = "r",
edgecolor = 'None',
alpha = 0.3,
zorder = 100,
)
# Plot the mag-mag diagram
elif i > j:
axes[i,j].invert_xaxis()
axes[i,j].invert_yaxis()
adjust_ax(axes, i, j)
axes[i,j].scatter(
data_list[j][index_star],
data_list[i][index_star],
color = 'b',
s = 1,
)
axes[i,j].scatter(
data_list[j][index_gala],
data_list[i][index_gala],
color = 'g',
s = 1,
)
axes[i,j].scatter(
data_list[j][index_ysos],
data_list[i][index_ysos],
color = 'r',
s = 1,
)
elif i < j:
axes[i,j].hist(
data_list[i][index_star],
50,
normed = 1,
facecolor = "b",
edgecolor = 'None',
alpha = 0.3,
zorder = 100,
)
axes[i,j].set_visible(False)
# Set labels visibilities
for i in range(num_index):
for j in range(num_index):
if i == len(band_index_code)-1:
axes[i,j].set_xlabel(
data_name_list[j],
)
else:
axes[i,j].tick_params(axis='x', colors='None')
if j == 0:
axes[i,j].set_ylabel(
data_name_list[i],
)
else:
axes[i,j].tick_params(axis='y', colors='None')
plt.savefig(
"corner_f{0}.png".format(
band_index_code,
),
dpi = 200)
plt.close()
#-----------------------------------
# Measure time
elapsed_time = time.time() - start_time
print("Exiting Main Program, spending ", elapsed_time, "seconds.")
| [
"z123a123s123@gmail.com"
] | z123a123s123@gmail.com |
2ad593494aec7f24b4010959bfb2b2136835505d | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_815.py | e5a12e2fb75af53cc32166eb31cf3dec7bb5caf1 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | # Selecting related objects in django
claimer = User.objects.get(name='test')
claimed_opponents = User.objects.filter(gameclaim_opponent__me__user=claimer)
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
074eb0578e10e7cd4fd282894986c2e1459b8715 | 6dd20bbc9fc371403eedc1d81aadbf9beafcdb57 | /lecciones/23/excepciones_2.py | 9d3adee3e4b0e4da286a01d36338973cf2f87efc | [
"MIT"
] | permissive | ImAlexisSaez/curso-python-desde-0 | 0b491a32fa946da7b14c220b383be0ae43ad68e8 | c4a84dae0804adefe4ee6024b411d8ed288da759 | refs/heads/master | 2020-05-18T11:51:12.365319 | 2019-06-18T08:54:08 | 2019-06-18T08:54:08 | 184,390,607 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | def evalua_edad(edad):
if edad < 0:
raise TypeError("No se permiten edades negativas.")
if edad < 20:
return "Eres muy joven."
elif edad < 40:
return "Eres joven."
elif edad < 65:
return "Eres maduro."
elif edad < 100:
return "Cuídate."
print(evalua_edad(18))
print(evalua_edad(70))
print(evalua_edad(-15))
| [
"cucoalexis@hotmail.com"
] | cucoalexis@hotmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.