blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
044fd67886bf5e38dd991c48b38f1dc4f3bfd6a5
|
885569925c4c564b18121c17a85e03419ffbc308
|
/app.py
|
73c9aa7f38dc5fa7c33045e29a73bf8bfa579657
|
[] |
no_license
|
lluidesia/facial-keypoint-detection
|
711cc9d7a7dd49f74a57779553a27fccc36731f8
|
d2c042ca3532c646e7d7bc9557907f235e5ab072
|
refs/heads/master
| 2020-09-05T11:24:40.404776
| 2019-11-06T21:36:11
| 2019-11-06T21:36:11
| 220,089,464
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 942
|
py
|
import io
import os
from flask import Flask, render_template, send_file, request, redirect, url_for
from PIL import Image
app = Flask(__name__)
basedir = os.path.abspath(os.path.dirname(__file__))
app.config.update(
UPLOADED_PATH=os.path.join(basedir, 'uploads'),
)
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/upload', methods=['POST'])
def upload():
f = request.files.get('file')
f.save(os.path.join(app.config['UPLOADED_PATH'], f.filename))
return redirect(url_for('go_to_image', file_name=f.filename))
@app.route('/go_to_image', methods=['GET'])
def go_to_image():
file_object = io.BytesIO()
img = Image.open(os.path.join(app.config['UPLOADED_PATH'], request.args.get('file_name')))
img.save(file_object, 'PNG')
file_object.seek(0)
return send_file(file_object, mimetype='image/PNG')
if __name__ == '__main__':
app.run(debug=True)
|
[
"liudaprysiazhna@gmail.com"
] |
liudaprysiazhna@gmail.com
|
f6a756d8901c7c8cdf61ca05ec0781a2c12777a4
|
427a148400c529d9bce48933605ded8aa0fbf015
|
/Buble_Sort.py
|
2f503758366bba48c6c13388ad92fc2a14618bea
|
[] |
no_license
|
selimbd91/General_Python
|
9491838472c28d267f2a026497b5360878b3d22e
|
0154a86c205a7ddef127e43a8cefe1b66016bcd1
|
refs/heads/master
| 2020-12-21T11:35:34.009225
| 2020-07-21T22:41:49
| 2020-07-21T22:41:49
| 236,419,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 946
|
py
|
import random
class Sorting:
def __init__(self, n):
self.lists = [random.randint(1,20) for i in range(n)]
print(self.lists)
def buble_sort(self):
for i in range(len(self.lists)):
for j in range(i+1,len(self.lists)):
if self.lists[i] > self.lists[j]:
temp = self.lists[i]
self.lists[i] = self.lists[j]
self.lists[j] = temp
print(self.lists)
def selection_sort(self):
for i in range(len(self.lists)):
min_pos = i
for j in range(i, len(self.lists)):
if self.lists[min_pos] > self.lists[j]:
min_pos = j
temp = self.lists[i]
self.lists[i] = self.lists[min_pos]
self.lists[min_pos] = temp
print(self.lists)
obj = Sorting(10)
#obj.buble_sort()
obj.selection_sort()
|
[
"noreply@github.com"
] |
noreply@github.com
|
ab32585b9e7c9dd55c0620ab746825b726ad0590
|
885291f5c66f242bb84effc27c400b8a1a5e5284
|
/diary/management/commands/backup_diary.py
|
b27fb50578bc733591e384472544174b780f0074
|
[] |
no_license
|
maataro/django_app
|
175e6aa153606cae7af79d98e4c43ea16ed0fe08
|
e3f75ea44856a4aa800f997e5aa8d6d90dac46f3
|
refs/heads/master
| 2023-01-05T06:46:55.516279
| 2020-10-05T17:13:41
| 2020-10-05T17:13:41
| 298,172,335
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,581
|
py
|
import csv
import datetime
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from ...models import Diary
class Command(BaseCommand):
help = "Backup Diary data"
def handle(self, *args, **options):
# 実行時のYYYYMMDDを取得
date = datetime.date.today().strftime("%Y%m%d")
# 保存ファイルの相対パス
file_path = settings.BACKUP_PATH + 'diary_' + date + '.csv'
# 保存ディレクトリが存在しなければ作成
os.makedirs(settings.BACKUP_PATH, exist_ok=True)
# バックアップファイルの作成
with open(file_path, 'w') as file:
writer = csv.writer(file)
# ヘッダーの書き込み
header = [field.name for field in Diary._meta.fields]
writer.writerow(header)
# Diaryテーブルの全データを取得
diaries = Diary.objects.all()
# データ部分の書き込み
for diary in diaries:
writer.writerow([str(diary.user),
diary.title,
diary.content,
str(diary.photo1),
str(diary.photo2),
str(diary.photo3),
str(diary.created_at),
str(diary.updated_at)])
# 保存ディレクトリのファイルリストを取得
files = os.listdir(settings.BACKUP_PATH)
# ファイルが設定数以上あったら一番古いファイルを削除
if len(files) >= settings.NUM_SAVED_BACKUP:
files.sort()
os.remove(settings.BACKUP_PATH + files[0])
|
[
"masahiro.infinite77@gmail.com"
] |
masahiro.infinite77@gmail.com
|
d8329d3ce6551bc43f12339119f7cc1a1dc10936
|
93465443f6cb0bfe98c46efa9ad61383fc183470
|
/demo/HelloWord.py
|
6547b68d76cbf8be0825bebd881bba37727a3a7f
|
[] |
no_license
|
zhangli1229/gy-1906A
|
3b1352d82a715d83a8fbc15aeb1ae8fb510739ed
|
54aeb5a3788afce9ecb67fcb84faa86a635c74d0
|
refs/heads/master
| 2020-06-22T20:41:08.829994
| 2019-07-23T09:45:22
| 2019-07-23T09:45:22
| 198,394,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28
|
py
|
a="fjgkfgj"
print(type(a))
|
[
"1208269415@qq.com"
] |
1208269415@qq.com
|
cef9a68afdddd61d9d2c7d5510d7a38174bc8f1c
|
4b68243d9db908945ee500174a8a12be27d150f9
|
/pogoprotos/networking/requests/messages/update_fitness_metrics_message_pb2.py
|
522382d168f4fe3adab53afbb40fe730c7070bd9
|
[] |
no_license
|
ykram/pogoprotos-py
|
7285c86498f57dcbbec8e6c947597e82b2518d80
|
a045b0140740625d9a19ded53ece385a16c4ad4a
|
refs/heads/master
| 2020-04-20T10:19:51.628964
| 2019-02-02T02:58:03
| 2019-02-02T02:58:03
| 168,787,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 2,937
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/networking/requests/messages/update_fitness_metrics_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.data.fitness import fitness_sample_pb2 as pogoprotos_dot_data_dot_fitness_dot_fitness__sample__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/update_fitness_metrics_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nLpogoprotos/networking/requests/messages/update_fitness_metrics_message.proto\x12\'pogoprotos.networking.requests.messages\x1a,pogoprotos/data/fitness/fitness_sample.proto\"^\n\x1bUpdateFitnessMetricsMessage\x12?\n\x0f\x66itness_samples\x18\x01 \x03(\x0b\x32&.pogoprotos.data.fitness.FitnessSampleb\x06proto3')
,
dependencies=[pogoprotos_dot_data_dot_fitness_dot_fitness__sample__pb2.DESCRIPTOR,])
_UPDATEFITNESSMETRICSMESSAGE = _descriptor.Descriptor(
name='UpdateFitnessMetricsMessage',
full_name='pogoprotos.networking.requests.messages.UpdateFitnessMetricsMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='fitness_samples', full_name='pogoprotos.networking.requests.messages.UpdateFitnessMetricsMessage.fitness_samples', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=167,
serialized_end=261,
)
_UPDATEFITNESSMETRICSMESSAGE.fields_by_name['fitness_samples'].message_type = pogoprotos_dot_data_dot_fitness_dot_fitness__sample__pb2._FITNESSSAMPLE
DESCRIPTOR.message_types_by_name['UpdateFitnessMetricsMessage'] = _UPDATEFITNESSMETRICSMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UpdateFitnessMetricsMessage = _reflection.GeneratedProtocolMessageType('UpdateFitnessMetricsMessage', (_message.Message,), dict(
DESCRIPTOR = _UPDATEFITNESSMETRICSMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.update_fitness_metrics_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.UpdateFitnessMetricsMessage)
))
_sym_db.RegisterMessage(UpdateFitnessMetricsMessage)
# @@protoc_insertion_point(module_scope)
|
[
"mark@noffle.net"
] |
mark@noffle.net
|
3acd601e6d39cf8b48f57ba59897836edd48fc79
|
59812860bc22356059bc5bf59a784c8535978b25
|
/utils.py
|
26243da58bf66dabbe19372cb62d5a0fae473788
|
[] |
no_license
|
Folifolo/backprop
|
049c1f07b839e0f939903da601c11a31938a8cd5
|
afe938aac37cf3e86778a33e17469dbf74a7961e
|
refs/heads/master
| 2020-08-18T01:18:54.810518
| 2019-11-10T10:37:05
| 2019-11-10T10:37:05
| 215,731,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
import numpy as np
def relu(X):
return X * (X > 0)
def reluD(X):
return X > 0
#=====================
def calculate_E(label, prediction, size):
return -np.sum(label * np.log(prediction.T)) / size
def calculate_acc(label, prediction):
prediction = np.argmax(prediction, axis= 0)
label = np.argmax(label, axis= 1)
return (prediction == label).mean()
|
[
"Folifolo@yandex.ru"
] |
Folifolo@yandex.ru
|
441d362c54f38d41048090be65997b9096bd1c3e
|
567c75c7801a475c26b81f94bd7b91986933d99b
|
/a3/sdp/raw/media.py
|
eee9efeaad77fae0b6d188487ef07d4bc33269fb
|
[] |
no_license
|
rcslabs/a3-media-controller
|
443c79bf0c341c45eeb7734d058de052f8e5d54f
|
4457fe10a2d432d0e57cc2b3a914d4e4556b9695
|
refs/heads/master
| 2021-02-13T08:00:42.996741
| 2020-03-03T15:54:37
| 2020-03-03T15:54:37
| 244,678,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,146
|
py
|
#!/usr/bin/env python
"""
sdp.raw.Media object
"""
import attribute
from entity import MediaDescription, MediaDescriptionProto, ConnectionData
from a3.media import MediaType
class Media(object):
def __init__(self, media_type=MediaType.AUDIO, media_description=None):
assert type(media_type) is MediaType
assert media_description is None or type(media_description) is MediaDescription
if media_description:
self.__media_description = media_description
else:
self.__media_description = MediaDescription(media_type,
0,
MediaDescriptionProto.RTP_AVP,
[])
self.__media_title = None
self.__connection_data = None
self.__bandwidths = []
self.__encryption_key = None
self.__attributes = attribute.AttributeCollection()
@property
def media_type(self):
return self.__media_description.media_type
@property
def media_description(self):
"""
:rtype : MediaDescription
"""
return self.__media_description
@media_description.setter
def media_description(self, media_description):
assert type(media_description) is MediaDescription
self.__media_description = media_description
@property
def media_title(self):
return self.__media_title
@property
def connection_data(self):
return self.__connection_data
@connection_data.setter
def connection_data(self, connection_data):
assert connection_data is None or type(connection_data) is ConnectionData
self.__connection_data = connection_data
@property
def attributes(self):
return self.__attributes
@attributes.setter
def attributes(self, attributes):
assert type(attributes) is attribute.AttributeCollection
self.__attributes = attributes
def add_attribute(self, str_name, value=None):
return self.__attributes.append(attribute.Attribute(str_name, value))
def remove_attribute(self, attribute):
return self.__attributes.remove(attribute)
def to_str_list(self):
lines = []
lines.append("m=" + str(self.__media_description)) # m= (media name and transport address)
if self.__media_title: lines.append("i=" + str(self.__media_title)) # i=* (media title)
if self.__connection_data: lines.append("c=" + str(self.__connection_data)) # c=* (connection information)
for b in self.__bandwidths: lines.append("b=" + str(b)) # b=* (zero or more bandwidth information lines)
if self.__encryption_key: lines.append("k=" + str(self.__encryption_key)) # k=* (encryption key)
lines += self.__attributes.to_str_list() # a=* (zero or more media attribute lines)
return lines
def __str__(self):
return "\r\n".join(self.to_str_list()) + "\r\n"
|
[
"yury.krikun@44e08e39-4b91-0410-a4d4-833ecb1b66d7"
] |
yury.krikun@44e08e39-4b91-0410-a4d4-833ecb1b66d7
|
24067e7967bb71a2a6e31c1d0ec61bb2845bfd63
|
46d68965d76de48d1cee28f6218c9de60526eb83
|
/scheduler/migrations/0005_auto__add_field_schedule_paused_at.py
|
e23cc371afc2a2e13ed4a981792f081a6767e89c
|
[] |
no_license
|
bradmenezes/reminderapp
|
e6b99dbb0be6df10100223f567376599317fafc6
|
7b1b57f61fd1df4bc9d6a8d8afb2b28a04d24935
|
refs/heads/master
| 2020-05-04T21:11:25.182515
| 2014-08-23T23:30:42
| 2014-08-23T23:30:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,170
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Schedule.paused_at'
db.add_column(u'scheduler_schedule', 'paused_at',
self.gf('django.db.models.fields.DateTimeField')(default=None, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Schedule.paused_at'
db.delete_column(u'scheduler_schedule', 'paused_at')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'scheduler.schedule': {
'Meta': {'object_name': 'Schedule'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'day_of_week': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'ONE_OFF'", 'max_length': '10'}),
'hour': ('django.db.models.fields.IntegerField', [], {'default': '12'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '150', 'blank': 'True'}),
'minute': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'paused_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Custom'", 'max_length': '15'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['scheduler']
|
[
"bradmenezes10@gmail.com"
] |
bradmenezes10@gmail.com
|
c234418751a55af246126450f10af864edf22721
|
e7aa98a1d9dfb60a38f192c2168734255376197d
|
/soup_test.py
|
9327a002d1411d39016e6664d4366c53d1da14e5
|
[] |
no_license
|
mksvdmtr/python_csv_learning
|
e22cc97d662bf204fa46fbc325d76e08999aba92
|
c54739635526c0286c8d1c0ed19f11d8c3b8d7a3
|
refs/heads/master
| 2023-05-31T14:25:34.270688
| 2020-04-29T09:14:07
| 2020-04-29T09:14:07
| 259,878,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 560
|
py
|
from bs4 import BeautifulSoup
import requests
import csv
resp = requests.get("http://quotes.toscrape.com/")
html_data = BeautifulSoup(resp.text, 'html.parser')
quotes = html_data.find_all(class_='quote')
with open("quotes.csv", "w") as file:
field_names = ['Author', 'Quote', 'Tegs']
writer = csv.DictWriter(file, field_names, delimiter=";")
writer.writeheader()
for q in quotes:
writer.writerow({'Author': q.find(class_='author').get_text(), 'Quote': q.find(class_='text').get_text(), 'Tegs': q.find(class_='keywords')['content']})
|
[
"mksvdmtr@yandex.ru"
] |
mksvdmtr@yandex.ru
|
092ccc2f819176198bb2b988a4e323d43bc51b42
|
20c6da49ec4028eba8249be8df6919fc4a920319
|
/model_style_transfer/project/data_loader/ade_dataset.py
|
45fb0584d1fd8c9c642df0f7b5670af4df434b7e
|
[] |
no_license
|
SavaStevanovic/NetMLDemonstrator
|
01819c73186666989b427429ae6a8422ab9bf164
|
c1325b1e8706c019178be51455d670cee56e0819
|
refs/heads/master
| 2023-08-08T18:39:07.084442
| 2023-03-26T08:45:03
| 2023-03-26T08:45:03
| 252,441,080
| 0
| 0
| null | 2023-06-18T09:48:24
| 2020-04-02T11:52:20
|
Python
|
UTF-8
|
Python
| false
| false
| 658
|
py
|
from torch.utils.data import Dataset
from PIL import Image
import glob
import os
class ADEChallengeData2016(Dataset):
def __init__(self, mode, folder_path):
super(ADEChallengeData2016, self).__init__()
img_files = glob.glob(os.path.join(
'/Data/segmentation', folder_path, 'annotations', mode, '*.png'))
self.data = [(x.replace('.png', '.jpg').replace(
'annotations', 'images'), x) for x in img_files]
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img_path, segm_path = self.data[index]
data = Image.open(img_path, mode='r')
return data
|
[
"sava.stevanovic@pmf.edu.rs"
] |
sava.stevanovic@pmf.edu.rs
|
c0789528e506bd6d2e3b0093a98b9dc59f8a4f48
|
2c39c8b1ad251db6c616c14460db7a431c7550da
|
/09_sql.py
|
ee88e94738afa3006b6a58d9cd2f131c0f40de53
|
[] |
no_license
|
tendaitt/sql-with-python
|
876ca2d1cb24d120835c87fef77ba8252ca4d137
|
3b499e952f704eb77e8b02f50e7c7ecce791a052
|
refs/heads/master
| 2020-08-29T13:14:22.518835
| 2019-10-31T13:00:17
| 2019-10-31T13:00:17
| 218,042,508
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
# JOINing data from multiple tables
import sqlite3
with sqlite3.connect("new.db") as connection:
c = connection.cursor()
c.execute("""SELECT DISTINCT population.city, population.population,
regions.region FROM population, regions WHERE
population.city = regions.city ORDER by population.city ASC""")
rows = c.fetchall()
for r in rows:
print(f"City: {r[0]}")
print(f"Population: {r[1]}")
print(f"Region: {r[2]}")
print("")
|
[
"970497+tendaitt@users.noreply.github.com"
] |
970497+tendaitt@users.noreply.github.com
|
af5b1cb49d6bdd5100e5a537254e5e9d02155253
|
399466b75d2427be9ef5efdb4fe6ed21af438872
|
/project_4/handlers/json_trans.py
|
566e9c36271909500c7e7b0e4b7ff5b3dd2f2670
|
[] |
no_license
|
MessiLeo919/Flowpp
|
a3a20a1fdad54d9f73916ad4c7cc99e096ad808f
|
187d8a09e15e7ab2628976ecd73e06339e421f92
|
refs/heads/master
| 2020-03-21T14:48:56.801827
| 2018-06-29T03:20:28
| 2018-06-29T03:20:28
| 138,367,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,143
|
py
|
import json
import os
import time
def json_transfer(file):
try:
with open(file,"r") as f:
load_dict = json.load(f) #转化为字典
# print(type(load_dict)) #转化为字典
print("加载文件完成...")
print('----------------')
List_date=[]
List_date_1 = []
List_date_2 = []
List_date_3 = []
for list_class in load_dict.keys():
# print(list_class)
# List_class.append(list_class) #获取四大类
if list_class in ["action","service"]:
app_L_1 = []
dict_All_1 = []
for i in load_dict[list_class]: #i为大类下面每一个字典
a=i['name_cn'].split("_")[0]
if a not in app_L_1:
app_L_1.append(a)
dict_name={a:[i['name_cn']]}
dict_All_1.append(dict_name)
else:
dict_All_1[app_L_1.index(a)][a].append(i['name_cn'])
if list_class =="action":
List_date_1.append(app_L_1)
List_date_1.append(dict_All_1)
else:
List_date_2.append(app_L_1)
List_date_2.append(dict_All_1)
elif list_class =="application":
app_L_2 = []
dict_All_2 = []
for i in load_dict[list_class]: #i为大类下面每一个字典
# print(i)
b = i['domain']
# print(b)
if b not in app_L_2:
app_L_2.append(b)
dict_name = {b: [i['name_cn']]}
dict_All_2.append(dict_name)
else:
dict_All_2[app_L_2.index(b)][b].append(i['name_cn'])
List_date_3.append(app_L_2)
List_date_3.append(dict_All_2)
List_date=List_date_1+List_date_2+List_date_3
# print("List_date_1:length",len(List_date_1),"-----------------\n")
# print(List_date_1)
# print("List_date_2:length",len(List_date_2),"-----------------\n")
# print(List_date_2)
# print("List_date_3:length",len(List_date_3),"-----------------\n")
# print(List_date_3)
print(len(List_date),"-----------------")
return List_date
except FileNotFoundError:
return [[],[],[],[],[],[]]
def json_modified(file,List_results):
print("转换开始...")
with open(file,"r") as f:
load_dict = json.load(f) #转化为字典
print("type-load_dict\n")
print(type(load_dict))
print('----------------')
with open("handlers/download/[Finished]meta.json", "w", encoding='utf-8') as fp:
print("新建...")
for list_class in load_dict.keys():
# print(list_class)
# List_class.append(list_class) #获取四大类
if list_class in ["action","service","application"]:
for i in load_dict[list_class]: #i为大类下面每一个字典
if i['name_cn'] in List_results:
i["selection"] = True
else:
i["selection"] = False
# i.update({"selection": false})
json.dump(load_dict, fp, indent=4,ensure_ascii=False)
print("转换完成")
def deletefile():
"""删除小于minSize的文件(单位:K)"""
files = os.listdir(os.getcwd()+'/handlers/upload') # 列出目录下的文件
# for file in files:
os.remove(file) # 删除文件
print(file + " deleted")
# time.sleep(30)
deletefile()
|
[
"690995749@qq.com"
] |
690995749@qq.com
|
ac071ec7c195c0c7838f31cdd9f41fe37a46ad9c
|
a44a9279258ace54be0ea6d410e6ddb5a2d72bcb
|
/project-addons/custom_reports/models/product.py
|
719faf154fd24aa8c981b08a03877ad3b5b456aa
|
[] |
no_license
|
athlontado/PXGO_00064_2014_PHA
|
346f33185a07c2e1766a7cc79cd300252d9b2480
|
3086baba490e47a5dcc7942c7c5fee9fc047ddcd
|
refs/heads/master
| 2020-04-06T03:56:15.828784
| 2016-04-18T12:24:53
| 2016-04-18T12:24:53
| 59,216,028
| 0
| 0
| null | 2016-05-19T14:50:54
| 2016-05-19T14:50:54
| null |
UTF-8
|
Python
| false
| false
| 1,240
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Pharmadus. All Rights Reserved
# $Óscar Salvador <oscar.salvador@pharmadus.com>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models
class ProductCategory(models.Model):
_inherit = 'product.category'
commissions_parent_category = fields.Boolean('Commissions parent category',
default=False)
|
[
"oscar.salvador@pharmadus.com"
] |
oscar.salvador@pharmadus.com
|
66e5e2cd1dd250b00922b3b3211b1c0c1c510d35
|
53565e19de1d345552f5f469f4e4ea311a421bb8
|
/app/artist/models/artist.py
|
de30a6078bcfde1cf589a711184a2c568c8bfd52
|
[] |
no_license
|
standbyme227/fc-melon
|
18e17aa8b85906a62e1631e54a70ff85d72ea435
|
8f0f4d40021f75a025e91fa6aebea143bccb6ce3
|
refs/heads/master
| 2021-05-03T18:59:13.495171
| 2018-03-20T02:32:02
| 2018-03-20T02:32:02
| 120,418,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,632
|
py
|
from django.conf import settings
from django.db import models
from django.forms import model_to_dict
from django.http import JsonResponse, HttpResponse
from .artist_youtube import ArtistYouTube
from .managers import ArtistManager
__all__ = (
'Artist',
)
class Artist(models.Model):
BLOOD_TYPE_A = 'a'
BLOOD_TYPE_B = 'b'
BLOOD_TYPE_O = 'o'
BLOOD_TYPE_AB = 'c'
BLOOD_TYPE_OTHER = 'x'
CHOICES_BLOOD_TYPE = (
(BLOOD_TYPE_A, 'A형'),
(BLOOD_TYPE_B, 'B형'),
(BLOOD_TYPE_O, 'O형'),
(BLOOD_TYPE_AB, 'AB형'),
(BLOOD_TYPE_OTHER, '기타'),
)
melon_id = models.CharField('멜론 Artist ID', max_length=20, blank=True, null=True, unique=True)
image = models.ImageField('프로필 이미지', upload_to='artist', blank=True)
# upload_to는 media폴더를 기준으로 그안의 경로를 지정
name = models.CharField('이름', max_length=50, )
real_name = models.CharField('본명', max_length=30, blank=True, default='')
nationality = models.CharField('국적', max_length=50, blank=True, )
birth_date = models.DateField(max_length=50, blank=True, null=True, )
constellation = models.CharField('별자리', max_length=30, blank=True, null=True)
blood_type = models.CharField('혈액형', max_length=50, blank=True, choices=CHOICES_BLOOD_TYPE)
# choices를 넣어야지만 위의 선택을 이용할 수 있다.
intro = models.TextField('소개', blank=True)
# likes = models.IntegerField(default=0)
like_users = models.ManyToManyField(
settings.AUTH_USER_MODEL,
through='ArtistLike',
related_name='like_artists',
blank=True,
)
youtube_videos = models.ManyToManyField(
ArtistYouTube,
related_name='artists',
blank=True,
)
objects = ArtistManager()
def __str__(self):
return self.name
def toggle_like_user(self, user):
# 자신이 'artist이며 user가 주어진 user인 ArtistLike를 가져오거나 없으면 생성
like, like_created = self.like_user_info_list.get_or_create(user=user)
# 만약 이미 잇엇을 경우 (새로 생성 X)
if not like_created:
# Like를 지워줌
like.delete()
# 생성여부를 반환
return like_created
# if self.like_users.filter(user=user).exists():
# self.like_users.filter(user).delete()
# else:
# self.like_users.create(user=user)
# # 자신이 artist이며, 주어진 user와의 ArtistLike의 QuerySet
# query = ArtistLike.objects.filter(artist=self, user=user)
# # QuerySet이 존재할 졍우
# if query.exists():
# query.delete()
# return False
# # QuerySet이 존재하지 않을 경우
# else:
# ArtistLike.objects.create(artist=self, user=user)
# return True
def to_json(self):
from django.db.models.fields.files import FieldFile
from django.contrib.auth import get_user_model
user_class = get_user_model()
ret = model_to_dict(self)
# model_to_dict의 결과가 dict
# 해당 dict의 item을 순회하며
# JSON Serialize할때 에러나는 타입의 value를
# 적절히 변환해서 value에 다시 대입
def convert_value(value):
if isinstance(value, FieldFile):
return value.url if value else None
elif isinstance(value, user_class):
return value.pk
elif isinstance(value, ArtistYouTube):
return value.pk
return value
def convert_obj(obj):
"""
객체 또는 컨테이너 객체에 포함된 객체들 중
직렬화가 불가능한 객체를 가능하도록 형태를 변환해주는 함수
:param obj:
:return: convert_value()를 거친 객체
"""
if isinstance(obj, list):
# list타입일 경우 각 항목을 순회하며 index에 해당하는 값을 변환
for index, item in enumerate(obj):
obj[index] = convert_obj(item)
elif isinstance(obj, dict):
# dict타입일 경우 각 항목을 순회하며 key에 해당하는 값을 변환
for key, value in obj.items():
obj[key] = convert_obj(value)
# list나 dict가 아닐 경우, 객체 자체를 변환한 값을 리턴
return convert_value(obj)
convert_obj(ret)
return ret
|
[
"standbyme227@gmail.com"
] |
standbyme227@gmail.com
|
915530cd328d21c79b8adaeb11cafbd1d868abb7
|
d79aa2d186ed54ef786094bb2608eae911b4527c
|
/backend/bayfieldopen/bayfieldopen/auth/migrations/0001_initial.py
|
0f71f7f216a6879502cc03f641ca582050e57286
|
[] |
no_license
|
JordanRClark/BayfieldOpen
|
23b6a4cb80a34964d90f8d9d32ac0b01f2e2d4ea
|
1bdefdaaad5372f92913afafd10c0242f5fb514e
|
refs/heads/main
| 2023-03-07T07:00:19.776300
| 2021-02-15T05:52:13
| 2021-02-15T05:52:13
| 338,867,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,873
|
py
|
# Generated by Django 3.1.6 on 2021-02-14 23:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('first_name', models.CharField(blank=True, max_length=256)),
('middle_name', models.CharField(blank=True, max_length=256)),
('last_name', models.CharField(blank=True, max_length=256)),
('email', models.EmailField(max_length=254, null=True, unique=True)),
('handicap', models.DecimalField(blank=True, decimal_places=2, max_digits=2, null=True)),
('is_superuser', models.BooleanField(default=False)),
('is_staff', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
[
"jordan@Jordans-MacBook-Pro.local"
] |
jordan@Jordans-MacBook-Pro.local
|
63acff57f79dae33fea912cb9cec6449292df6f5
|
b3717c1c9eb195c0d6205a653f97cb719118c166
|
/blog/models.py
|
744ae9bd29bad66ab8edd786bab55a1069e098a9
|
[] |
no_license
|
Harithmech/my-first-blog
|
c551323a295da0682e56067af900731e1af43fd6
|
bccf0a258ba1e361fa8c5ec5f00c5cc9b9498bbe
|
refs/heads/master
| 2023-02-28T16:01:50.138599
| 2021-02-11T11:45:21
| 2021-02-11T11:45:21
| 266,046,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
from django.db import models
# Create your models here.
from django.conf import settings
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
|
[
"harithmech@gmail.com"
] |
harithmech@gmail.com
|
80d457fe0e0df539d494873fa3d8e41ce774ae0b
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_1/palemale/a.py
|
f78d73ef5adea50522114802f390513ce3e2cfff
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 810
|
py
|
import os, sys
with open(sys.argv[1], 'r') as infile:
N = int(infile.readline().strip())
for x in xrange(1, N+1):
T = infile.readline().strip()
cases = set(list(T))
intT = int(T)
current = intT
count = 2
stablecount = 0
while len(cases) < 10:
current = count*intT
count += 1
cur_num = len(cases)
cases.update(list(str(current)))
if cur_num == len(cases):
stablecount += 1
else:
stablecount = 0
if stablecount > 100:
current = 'INSOMNIA'
break
if isinstance(current, int):
current = str(current)
print "Case #%s: %s" % (x, current)
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
fb22d74665a1f8e43b4181a24e234e13bccf6c86
|
9ecd34e90be231dd368abd1e2b7543f45eca2ad2
|
/node_modules/socket.io/node_modules/socket.io-client/node_modules/ws/build/config.gypi
|
d920cef5c455ffef4ea25c9e9fb44568c26761e5
|
[
"MIT"
] |
permissive
|
pruthvikar/timer
|
a9339ed0e0e532da366e5433bba33a50453be6cc
|
d59e65cff1861d93d54d9cea31114e29ce43fc81
|
refs/heads/master
| 2020-06-08T04:01:51.862856
| 2013-04-29T01:23:02
| 2013-04-29T01:23:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,854
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 42,
"host_arch": "x64",
"node_install_npm": "true",
"node_install_waf": "true",
"node_prefix": "out/dist-osx/usr/local",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"target_arch": "x64",
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"nodedir": "/Users/pruthvikarreddy/.node-gyp/0.8.19",
"copy_dev_lib": "true",
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/bin/bash",
"parseable": "",
"userignorefile": "/Users/pruthvikarreddy/.npmignore",
"cache_max": "null",
"init_author_email": "",
"sign_git_tag": "",
"ignore": "",
"long": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"npat": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"coverage": "",
"json": "",
"pre": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/pruthvikarreddy/.npm-init.js",
"userconfig": "/Users/pruthvikarreddy/.npmrc",
"npaturl": "http://npat.npmjs.org/",
"node_version": "v0.8.19",
"user": "",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"optional": "true",
"username": "",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "null",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"yes": "",
"fetch_retry_maxtimeout": "60000",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/pruthvikarreddy/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "node/v0.8.19 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "0.0.0",
"umask": "18",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/s3/14_2l8xd5sg5xmz0lg1c9j880000gn/T/",
"unsafe_perm": "true",
"link": "",
"prefix": "/usr/local"
}
}
|
[
"Pruthvikar@gmail.com"
] |
Pruthvikar@gmail.com
|
2b2a54641d5f56d801a5a0f1798713935087ef28
|
09e5cfe06e437989a2ccf2aeecb9c73eb998a36c
|
/modules/cctbx_project/simtbx/run_tests.py
|
5c3244e65192c78f2e1b57410133b5e40024a0a5
|
[
"BSD-3-Clause",
"BSD-3-Clause-LBNL"
] |
permissive
|
jorgediazjr/dials-dev20191018
|
b81b19653624cee39207b7cefb8dfcb2e99b79eb
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
refs/heads/master
| 2020-08-21T02:48:54.719532
| 2020-01-25T01:41:37
| 2020-01-25T01:41:37
| 216,089,955
| 0
| 1
|
BSD-3-Clause
| 2020-01-25T01:41:39
| 2019-10-18T19:03:17
|
Python
|
UTF-8
|
Python
| false
| false
| 468
|
py
|
from __future__ import absolute_import, division, print_function
from libtbx import test_utils
import libtbx.load_env
tst_list = (
"$D/nanoBragg/tst_nanoBragg_minimal.py",
"$D/nanoBragg/tst_nanoBragg_mosaic.py",
"$D/nanoBragg/tst_gaussian_mosaicity.py",
)
def run():
build_dir = libtbx.env.under_build("simtbx")
dist_dir = libtbx.env.dist_path("simtbx")
test_utils.run_tests(build_dir, dist_dir, tst_list)
if (__name__ == "__main__"):
run()
|
[
"jorge7soccer@gmail.com"
] |
jorge7soccer@gmail.com
|
aa41fbd83ac1923d6fda08de4cc8f3ebd55904e0
|
90390ddcc21d2f2c0dd5ee3c0e7a3d8d61be9638
|
/wsgi/app/forms.py
|
4141cbb7183fc430344eb1bf806ca44a244d8598
|
[
"MIT"
] |
permissive
|
pjamesjoyce/lcoptview_legacy
|
b27926e31c16f1fca07c6294e66d706fcb600682
|
e0ebeb155d6f62d8619d33cf48db98bab8b7a4cd
|
refs/heads/master
| 2021-07-16T11:38:58.451239
| 2017-09-26T10:43:50
| 2017-09-26T10:43:50
| 107,691,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
from flask_wtf import FlaskForm
from wtforms import TextField, PasswordField
from wtforms.validators import DataRequired
class LoginForm(FlaskForm):
login_data = TextField('username or email', validators=[DataRequired()])
password = PasswordField('password', validators=[DataRequired()])
class RegistrationForm(FlaskForm):
username = TextField('username', validators=[DataRequired()])
email = TextField('email', validators=[DataRequired()])
password = PasswordField('password', validators=[DataRequired()])
password_repeat = PasswordField('repeat password', validators=[DataRequired()])
|
[
"pjamesjoyce@gmail.com"
] |
pjamesjoyce@gmail.com
|
3823340ea644b2feec0858721dad3a7c2d67d330
|
1b597dd7630f9a3023faf557e383b0fae703e72b
|
/test_autogalaxy/unit/aggregator/test_aggregator.py
|
40b7acd97191da8084e06012b80ef34395849c57
|
[
"MIT"
] |
permissive
|
knut0815/PyAutoGalaxy
|
96e9dfc558182169c41e19d3297cdf46b42d5f77
|
cc2bc0db5080a278ba7519f94d2a8b2468141e2d
|
refs/heads/master
| 2023-03-05T00:59:51.594715
| 2021-02-09T18:21:30
| 2021-02-09T18:21:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,428
|
py
|
from os import path
import pytest
import autofit as af
import autogalaxy as ag
from autogalaxy.mock import mock
directory = path.dirname(path.realpath(__file__))
@pytest.fixture(name="path")
def make_path():
return path.join("{}".format(path.dirname(path.realpath(__file__))), "files")
@pytest.fixture(name="samples")
def make_samples():
galaxy_0 = ag.Galaxy(redshift=0.5, light=ag.lp.EllipticalSersic(centre=(0.0, 1.0)))
galaxy_1 = ag.Galaxy(redshift=1.0, light=ag.lp.EllipticalSersic())
plane = ag.Plane(galaxies=[galaxy_0, galaxy_1])
return mock.MockSamples(max_log_likelihood_instance=plane)
def test__dataset_generator_from_aggregator(imaging_7x7, mask_7x7, samples):
phase_imaging_7x7 = ag.PhaseImaging(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, light=ag.lp.EllipticalSersic),
source=ag.GalaxyModel(redshift=1.0, light=ag.lp.EllipticalSersic),
),
search=mock.MockSearch(samples=samples, name="test_phase_aggregator"),
)
imaging_7x7.positions = ag.Grid2DIrregular([[1.0, 1.0], [2.0, 2.0]])
phase_imaging_7x7.run(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults(samples=samples)
)
agg = af.Aggregator(directory=phase_imaging_7x7.paths.output_path)
dataset = list(agg.values("dataset"))
print(dataset)
def test__plane_generator_from_aggregator(imaging_7x7, mask_7x7, samples):
phase_imaging_7x7 = ag.PhaseImaging(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, light=ag.lp.EllipticalSersic),
source=ag.GalaxyModel(redshift=1.0, light=ag.lp.EllipticalSersic),
),
search=mock.MockSearch(samples=samples, name="test_phase_aggregator"),
)
phase_imaging_7x7.run(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults(samples=samples)
)
agg = af.Aggregator(directory=phase_imaging_7x7.paths.output_path)
plane_gen = ag.agg.Plane(aggregator=agg)
for plane in plane_gen:
assert plane.galaxies[0].redshift == 0.5
assert plane.galaxies[0].light.centre == (0.0, 1.0)
assert plane.galaxies[1].redshift == 1.0
def test__masked_imaging_generator_from_aggregator(imaging_7x7, mask_7x7, samples):
phase_imaging_7x7 = ag.PhaseImaging(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, light=ag.lp.EllipticalSersic),
source=ag.GalaxyModel(redshift=1.0, light=ag.lp.EllipticalSersic),
),
settings=ag.SettingsPhaseImaging(
settings_masked_imaging=ag.SettingsMaskedImaging(
grid_class=ag.Grid2DIterate,
grid_inversion_class=ag.Grid2DIterate,
fractional_accuracy=0.5,
sub_steps=[2],
)
),
search=mock.MockSearch(samples=samples, name="test_phase_aggregator"),
)
phase_imaging_7x7.run(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults(samples=samples)
)
agg = af.Aggregator(directory=phase_imaging_7x7.paths.output_path)
masked_imaging_gen = ag.agg.MaskedImaging(aggregator=agg)
for masked_imaging in masked_imaging_gen:
assert (masked_imaging.imaging.image == imaging_7x7.image).all()
assert isinstance(masked_imaging.grid, ag.Grid2DIterate)
assert isinstance(masked_imaging.grid_inversion, ag.Grid2DIterate)
assert masked_imaging.grid.sub_steps == [2]
assert masked_imaging.grid.fractional_accuracy == 0.5
def test__fit_imaging_generator_from_aggregator(imaging_7x7, mask_7x7, samples):
phase_imaging_7x7 = ag.PhaseImaging(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, light=ag.lp.EllipticalSersic),
source=ag.GalaxyModel(redshift=1.0, light=ag.lp.EllipticalSersic),
),
search=mock.MockSearch(samples=samples, name="test_phase_aggregator"),
)
phase_imaging_7x7.run(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults(samples=samples)
)
agg = af.Aggregator(directory=phase_imaging_7x7.paths.output_path)
fit_imaging_gen = ag.agg.FitImaging(aggregator=agg)
for fit_imaging in fit_imaging_gen:
assert (fit_imaging.masked_imaging.imaging.image == imaging_7x7.image).all()
def test__masked_interferometer_generator_from_aggregator(
interferometer_7, visibilities_mask_7, mask_7x7, samples
):
phase_interferometer_7x7 = ag.PhaseInterferometer(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, light=ag.lp.EllipticalSersic),
source=ag.GalaxyModel(redshift=1.0, light=ag.lp.EllipticalSersic),
),
settings=ag.SettingsPhaseInterferometer(
settings_masked_interferometer=ag.SettingsMaskedInterferometer(
grid_class=ag.Grid2DIterate,
grid_inversion_class=ag.Grid2DIterate,
fractional_accuracy=0.5,
sub_steps=[2],
transformer_class=ag.TransformerDFT,
)
),
search=mock.MockSearch(samples=samples, name="test_phase_aggregator"),
real_space_mask=mask_7x7,
)
phase_interferometer_7x7.run(
dataset=interferometer_7,
mask=visibilities_mask_7,
results=mock.MockResults(samples=samples),
)
agg = af.Aggregator(directory=phase_interferometer_7x7.paths.output_path)
masked_interferometer_gen = ag.agg.MaskedInterferometer(aggregator=agg)
for masked_interferometer in masked_interferometer_gen:
assert (
masked_interferometer.interferometer.visibilities
== interferometer_7.visibilities
).all()
assert (masked_interferometer.real_space_mask == mask_7x7).all()
assert isinstance(masked_interferometer.grid, ag.Grid2DIterate)
assert isinstance(masked_interferometer.grid_inversion, ag.Grid2DIterate)
assert masked_interferometer.grid.sub_steps == [2]
assert masked_interferometer.grid.fractional_accuracy == 0.5
assert isinstance(masked_interferometer.transformer, ag.TransformerDFT)
def test__fit_interferometer_generator_from_aggregator(
interferometer_7, visibilities_mask_7, mask_7x7, samples
):
phase_interferometer_7x7 = ag.PhaseInterferometer(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, light=ag.lp.EllipticalSersic),
source=ag.GalaxyModel(redshift=1.0, light=ag.lp.EllipticalSersic),
),
search=mock.MockSearch(samples=samples, name="test_phase_aggregator"),
real_space_mask=mask_7x7,
)
phase_interferometer_7x7.run(
dataset=interferometer_7,
mask=visibilities_mask_7,
results=mock.MockResults(samples=samples),
)
agg = af.Aggregator(directory=phase_interferometer_7x7.paths.output_path)
fit_interferometer_gen = ag.agg.FitInterferometer(aggregator=agg)
for fit_interferometer in fit_interferometer_gen:
assert (
fit_interferometer.masked_interferometer.interferometer.visibilities
== interferometer_7.visibilities
).all()
assert (
fit_interferometer.masked_interferometer.real_space_mask == mask_7x7
).all()
|
[
"james.w.nightingale@durham.ac.uk"
] |
james.w.nightingale@durham.ac.uk
|
e262beb5a4a594f5ccd34d82a63f92f441f87e62
|
786550172250f9f9b14bd923151efc4759349c61
|
/dlkit/relationship/license.py
|
6abeebe8829ffb7bb0c4f68c4f65d28aaaa9cab3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mitsei/dlkit-doc
|
6d88c1686fa9395047b43028b41cd4315c5bacc4
|
b320d57e91bfb32fd88e1fce01d3ddb5935aa9dd
|
refs/heads/master
| 2021-01-14T12:47:42.203154
| 2015-05-11T02:47:03
| 2015-05-11T02:47:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,776
|
py
|
# -*- coding: utf-8 -*-
"""Relationship Open Service Interface Definitions
relationship version 3.0.0
Copyright (c) 2010 Ingenescus. All Rights Reserved.
This Work is being provided by the copyright holder(s) subject to the
following license. By obtaining, using and/or copying this Work, you
agree that you have read, understand, and will comply with the following
terms and conditions.
Permission to use, copy and distribute unmodified versions of this Work,
for any purpose, without fee or royalty is hereby granted, provided that
you include the above copyright notices and the terms of this license on
ALL copies of the Work or portions thereof.
You may nodify or create Derivatives of this Work only for your internal
purposes. You shall not distribute or transfer any such Derivative of
this Work to any location or to any third party. For the purposes of
this license, "Derivative" shall mean any derivative of the Work as
defined in the United States Copyright Act of 1976, such as a
translation or modification.
This Work and the information contained herein is provided on an "AS IS"
basis WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS IN THE WORK.
The export of software employing encryption technology may require a
specific license from the United States Government. It is the
responsibility of any person or organization contemplating export to
obtain such a license before exporting this Work.
"""
|
[
"birdland@Jeff-PM13.local"
] |
birdland@Jeff-PM13.local
|
24fb58232485d43c8a84ef050f7eb2521e36f3dc
|
811ecc8def16dc1bdb8f5a5c2c9e1d01b3f9c319
|
/chat_bot/handlers/simple_qa.py
|
54c7f4b080136b0e97702dec899b7b12a668befc
|
[] |
no_license
|
jiaojianglong/MyBot
|
7dd00959d6cf54d728b213b77084ce375b731ad3
|
9781b183cf168832b3c962d420e7f0a63287c4db
|
refs/heads/master
| 2020-06-13T22:34:15.829604
| 2020-03-29T01:31:24
| 2020-03-29T01:31:24
| 194,809,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 924
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/2/22
# @Author : JiaoJianglong
from handlers.base.basehandler import QABaseHandler
from models.es.rg_search_question import RGSearchQuestion
from models.es.rg_question_answer import RGQuestionAnswer
class SimpleQAHandler(QABaseHandler):
question_model = RGSearchQuestion()
answer_model = RGQuestionAnswer()
@QABaseHandler.decorator.threadpool_decorator
def post(self, *args, **kwargs):
result = self.init_parameter()
content = self.get_argument("content")
parameter = {"content":content}
process_list = [
self.process.QuestionSearchProcessor,#问题检索
]
process_instance = self.process_flow(process_list)
result_parameter = process_instance.handle(parameter)
result = result_parameter.get("result")
return result
|
[
"447151999@qq.com"
] |
447151999@qq.com
|
5ed6d98894bbff63047d401e8e20a8797425bf11
|
3ff5361ce05978b675483092ad33a14d7ed52c6f
|
/pipeline/bootstrap_pk.py
|
f4556387a6f7dd2c25b344d825adb017b8c0d11d
|
[] |
no_license
|
bayu-wilson/lyb_pk2
|
9091010eeb1f9fd8325aad70cd259d3cb37d547b
|
33520f5db248886be24950af0da23a20cbacdb95
|
refs/heads/master
| 2022-11-27T20:40:13.137323
| 2020-07-02T03:24:59
| 2020-07-02T03:24:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,492
|
py
|
#!/usr/bin/env python
import numpy as np
import inis
import pandas as pd
import options as opt
from QuasarSpectrum import QuasarSpectrum
from scipy.interpolate import griddata
tag = inis.tag
cat_name = inis.cat_name
rescale_flux = inis.rescale_flux
QuasarSpectrum.load_cat(cat_name)
nqso = QuasarSpectrum.nqso
print("Loading Data")
qso_list = []
for i in range(nqso):
q = QuasarSpectrum.load_qso_data(i,tag=tag,rescale_flux=rescale_flux)
if "noB" in tag:
q.get_new_forest(rescale_flux=rescale_flux,wrange =
(opt.lyb_min,opt.lyb_max,opt.lyb_rest, opt.xs_beta))
q.get_new_forest(rescale_flux=rescale_flux,wrange =
(opt.ovi_min,opt.ovi_max,opt.ovi_rest_d1, opt.xs_ovi))
q.get_new_forest(rescale_flux=rescale_flux,wrange =
(opt.ovi_min,opt.ovi_max,opt.ovi_rest_d2, opt.xs_ovi))
qso_list.append(q)
print("Done!\n")
# BOOTSTRAP SPECIFIC
np.random.seed(1)
nz_arr = QuasarSpectrum.all_redshifts,QuasarSpectrum.all_names
M = inis.M # M Samples
mf_msrmnts = ['mf_a', 'nvar_a','dloglambda_a', 'npow_a',
'mf_tot', 'nvar_tot','dloglambda_tot', 'npow_tot','z','mf_b',
'var_atot','npow_atot']
n_mf_msrmnts = len(mf_msrmnts)
pk_msrmnts = ['k','Paa','Ptot','Pab','Qab','Pbb','num','z']
n_pk_msrmnts = len(pk_msrmnts)
mf_bootstrap = np.zeros((M,opt.zbinlen,n_mf_msrmnts))
pk_bootstrap = np.zeros((M,opt.kbinlen*opt.zbinlen, n_pk_msrmnts))
# Paa_table = []
# Pbb_table = []
# Pab_table = []
# Qab_table = []
for m in range(M):
opt.updt(M, m)
qmask = np.floor(np.random.rand(nqso)*nqso).astype(int)
qso_arr = np.array(qso_list)[qmask]
zbin_msr_matrix = np.zeros((opt.zbinlen,n_mf_msrmnts))
# flux_pdf = [[] for tmp in range(opt.zbinlen)]
# pdf_bins = np.arange(-0.025,1.05,.05)
for zidx in range(opt.zbinlen):
msrmnt_in_zbin = np.zeros(n_mf_msrmnts)
count_in_zbin = np.zeros(n_mf_msrmnts)
#opt.updt(opt.zbinlen, zidx)
zbin_msrmnt = [[] for idx in range(n_mf_msrmnts)]
for i in range(nqso):
#i=85
#zidx = 6
zpix_a = qso_arr[i].get_zpix(opt.lya_rest)
name = qso_arr[i].name
mask = qso_arr[i].get_zmask(forest=(opt.lya_min,opt.lya_max,opt.lya_rest),
zpix=zpix_a,zidx=zidx,zedges=opt.zbin_edges,name=name)
#FLUX PDF
#flux_pdf[zidx].append(np.histogram(qso_arr[i].flux[mask],bins=pdf_bins)[0])#/np.nansum(mask))
zpix_b = qso_arr[i].get_zpix(opt.lyb_rest) # Here is where I want to change optical depth of lyb pixels
mask_b = qso_arr[i].get_zmask(forest=(opt.lyb_min,opt.lyb_max,opt.lyb_rest),
zpix=zpix_b,zidx=zidx,zedges=opt.zbin_edges,name=name)
za = zpix_a[mask]#qso_arr[i].wavelength[mask]/opt.lya_rest-1
ztot = zpix_b[mask_b]#qso_arr[i].wavelength[mask_b]/opt.lyb_rest-1
try:
new_af_mask = (za>np.min(ztot))&(za<np.max(ztot))
new_bf_mask = (ztot>np.min(za))&(ztot<np.max(za))
ferra = qso_arr[i].err_flux[mask][new_af_mask]
ferrtot = qso_arr[i].err_flux[mask_b][new_bf_mask]
# Interpolating to the smaller one
if len(ferrtot)<=len(ferra):
ferra = griddata(za[new_af_mask],ferra,ztot[new_bf_mask],method='linear')
ferra = ferra[np.isfinite(ferra)]
else:
ferrtot = griddata(ztot[new_bf_mask],ferrtot,za[new_af_mask],method='linear')
ferrtot = ferrtot[np.isfinite(ferrtot)]
#print(np.nansum(ferra*ferrtot))
msrmnt_in_zbin[10]+= np.sum(ferra*ferrtot)*0 #CHANGED 5/3/19 after meeting with Matt
# var lya-tot 10
count_in_zbin[10] += len(ferra) # len lya_tot 10
except:
pass
msrmnt_in_zbin[0]+= np.sum(qso_arr[i].flux[mask]) # mf lya 0
count_in_zbin[0] += np.sum(mask) # len lya 0
msrmnt_in_zbin[1]+= np.sum(qso_arr[i].err_flux[mask]**2) # var lya 1
count_in_zbin[1] += np.sum(mask) # len lya 1
msrmnt_in_zbin[2]+= np.sum(qso_arr[i].dloglambda[mask]) # dloglam lya 2
count_in_zbin[2] += np.sum(mask) # len lya 2
msrmnt_in_zbin[4]+= np.sum(qso_arr[i].flux[mask_b]) # mf tot 4
count_in_zbin[4] += np.sum(mask_b) # len tot 4
msrmnt_in_zbin[5]+= np.sum(qso_arr[i].err_flux[mask_b]**2) # var tot 5
count_in_zbin[5] += np.sum(mask_b) # len tot 5
msrmnt_in_zbin[6]+= np.sum(qso_arr[i].dloglambda[mask_b]) # dloglam tot 6
count_in_zbin[6] += np.sum(mask_b) # len tot 6
zbin_msr_matrix[zidx] = msrmnt_in_zbin/count_in_zbin
#print(count_in_zbin[0])
#print(msrmnt_in_zbin[0])
#opt.updt(opt.zbinlen, opt.zbinlen)
#print("Done!\n")
# npow alpha 3
zbin_msr_matrix.T[3] = list(QuasarSpectrum.get_npow(mf=zbin_msr_matrix.T[0],
nvar=zbin_msr_matrix.T[1],
dloglambda=zbin_msr_matrix.T[2]))
# npow total 7
zbin_msr_matrix.T[7] = list(QuasarSpectrum.get_npow(mf=zbin_msr_matrix.T[4],
nvar=zbin_msr_matrix.T[5],
dloglambda=zbin_msr_matrix.T[6]))
# zbins 8
zbin_msr_matrix.T[8] = opt.zbin_centers
# npow lya-tot 11
zbin_msr_matrix.T[11] = ((zbin_msr_matrix.T[4]*zbin_msr_matrix.T[0])**(-1)*
zbin_msr_matrix.T[10] * np.pi / (opt.kmax-opt.kmin))
#print('1',zbin_msr_matrix.T[10])
mf_output_df = pd.DataFrame(zbin_msr_matrix)
mf_output_df.columns = mf_msrmnts
zab_centers = opt.find_za(opt.zbin_centers) #converting lyb zbins to the equivalent, lower, lya zbins
len_zab = len(zab_centers)
#Gives corresponding lya bin for each lyb bin. organized by increasing z.
bin_zab=np.ones(len_zab)*np.nan
for i in range(len_zab):
for j in range(len_zab):
if (zab_centers[i]>opt.zbin_edges[j])&(zab_centers[i]<opt.zbin_edges[j+1]):
bin_zab[i] = (opt.zbin_centers[j])
mf_lyb = np.ones(len_zab)*np.nan #nan until proven otherwise
for i in range(len_zab):
if bin_zab[i] in mf_output_df.z.values:
za_idx = mf_output_df.z == bin_zab[i]
ztot_idx = i
mf_lyb[i] = mf_output_df.mf_tot[ztot_idx]/mf_output_df.mf_a[za_idx]
mf_output_df['mf_b'] = mf_lyb
mf_bootstrap[m] = mf_output_df
# POWER SPECTRUM
znk_matrix = np.zeros((opt.zbinlen,n_pk_msrmnts,opt.kbinlen)) # 7 zbins,6 measurements, 20 kbins
#print("Pk")
for zidx in range(opt.zbinlen):
#opt.updt(opt.zbinlen, zidx)
msrmnt_in_kbin = np.zeros((n_pk_msrmnts,opt.kbinlen))
count_in_kbin = np.zeros((n_pk_msrmnts,opt.kbinlen))
msrmnt_in_kbin[0] = opt.kbin_centers
count_in_kbin[0] = np.ones_like(opt.kbin_centers)
count_in_kbin[6] = np.ones_like(opt.kbin_centers)
msrmnt_in_kbin[7] = np.ones_like(opt.kbin_centers) * opt.zbin_centers[zidx]
count_in_kbin[7] = np.ones_like(opt.kbin_centers)
for qidx in range(nqso):
# LYA FOREST: P ALPHA ALPHA
zpix_a = qso_arr[qidx].get_zpix(opt.lya_rest)
zmask_a = qso_arr[qidx].get_zmask(forest=(opt.lya_min,opt.lya_max,opt.lya_rest),
zpix=zpix_a,zidx=zidx,zedges=opt.zbin_edges,name=name)
if np.sum(zmask_a)>opt.min_pix:
kpix,pk = qso_arr[qidx].get_autopower(mf_output_df.mf_a[zidx],zmask_a)
for kidx in range(opt.kbinlen):
npow = mf_output_df.npow_a.values[zidx]
kmask = qso_arr[qidx].get_kmask(kpix=kpix,kidx=kidx,kedges=opt.kbin_edges)
pk_sub = qso_arr[qidx].get_pk_subsets(kpix=kpix,pk=pk,zmask=zmask_a,kmask=kmask,
corr_tag=tag,npow=npow)
#znk_matrix[zidx][1,kidx] += np.sum(pk_sub)
#znk_matrix[zidx][6,kidx] += len(pk_sub)
msrmnt_in_kbin[1,kidx] += np.sum(pk_sub) #Paa
#msrmnt_in_kbin[6,kidx] += len(pk_sub)
count_in_kbin[1,kidx] += len(pk_sub) #num is Paa
# LYB FOREST: P TOTAL TOTAL
zpix_tot = qso_arr[qidx].get_zpix(opt.lyb_rest)
zmask_tot = qso_arr[qidx].get_zmask(forest=(opt.lyb_min,opt.lyb_max,opt.lyb_rest),
zpix=zpix_tot,zidx=zidx,zedges=opt.zbin_edges,name=name)
if (np.sum(zmask_tot)>opt.min_pix):
kpix,pk = qso_arr[qidx].get_autopower(mf_output_df.mf_tot[zidx],zmask_tot)
for kidx in range(opt.kbinlen):
npow = mf_output_df.npow_tot.values[zidx]
kmask = qso_arr[qidx].get_kmask(kpix=kpix,kidx=kidx,kedges=opt.kbin_edges)
pk_sub = qso_arr[qidx].get_pk_subsets(kpix=kpix,pk=pk,zmask=zmask_tot,kmask=kmask,
corr_tag=tag,npow=npow)
msrmnt_in_kbin[2,kidx] += np.nansum(pk_sub)
count_in_kbin[2,kidx] += len(pk_sub)
#Cross power
if (np.sum(zmask_a)>opt.min_pix)&(np.sum(zmask_tot)>opt.min_pix):
kpix,pab,qab,dlam,res = qso_arr[qidx].cross_pk_fft(mask_lya=zmask_a,mask_lyb=zmask_tot,
mf_lya=mf_output_df.mf_a[zidx],
mf_lyb=mf_output_df.mf_tot[zidx])
npow = mf_output_df.npow_atot.values[zidx]
for kidx in range(opt.kbinlen):
kmask = qso_arr[qidx].get_kmask(kpix=kpix,kidx=kidx,kedges=opt.kbin_edges)
pab_sub,qab_sub = qso_arr[qidx].get_xpk_subsets(kpix,pab,qab,dlam,res,tag,npow,kmask)
#msrmnt_in_kbin[2,kidx] += np.nansum(pk_sub)
#count_in_kbin[2,kidx] += len(pk_sub)
# msrmnt_in_kbin[3,kidx] += np.nansum(pab_sub) #remove!
#print(np.nansum(pab_sub))
msrmnt_in_kbin[3,kidx] += np.nansum(pab_sub)
count_in_kbin[3,kidx] += len(pab_sub)
msrmnt_in_kbin[4,kidx] += np.sum(qab_sub) #remove!
count_in_kbin[4,kidx] += len(qab_sub)
msrmnt_in_kbin[6,kidx] += len(pab_sub)
znk_matrix[zidx] = msrmnt_in_kbin/count_in_kbin
#opt.updt(opt.zbinlen, opt.zbinlen)
#print("Done!\n")
# Finding Lyman beta power
for i in range(len_zab):
if bin_zab[i] in opt.zbin_centers:
za_idx = np.where(opt.zbin_centers == bin_zab[i])[0][0]
znk_matrix[i][5] = znk_matrix[i][2]-znk_matrix[za_idx][1]
# Making 3d pk matrix into 2d pk data frame
x = pd.DataFrame(znk_matrix[0].T,columns=pk_msrmnts)
for i in range(1,opt.zbinlen):
x = x.append(pd.DataFrame(znk_matrix[i].T,columns=pk_msrmnts))
#print(np.shape(x))
pk_bootstrap[m] = x
mf_bootstrap_2d = np.concatenate(mf_bootstrap.T)
pk_bootstrap_2d = np.concatenate(pk_bootstrap.T)
#np.reshape(mf_bootstrap.T,(M,opt.zbinlen*n_mf_msrmnts))
if inis.save_boot_mf:
np.savetxt(inis.save_boot_mf_path,mf_bootstrap_2d)#).to_csv(inis.save_boot_mf_path, index=False)
if inis.save_boot_pk:
np.savetxt(inis.save_boot_pk_path,pk_bootstrap_2d)
# print(pk_bootstrap[m])
opt.updt(M, M)
# PLOTTING ROUTINE
# import matplotlib.pyplot as plt
# qwer = mf_bootstrap.T[0]
# # Add a colorbar
# fig,ax = plt.subplots(1, 2,gridspec_kw={'width_ratios': [1,1]})
# fig.set_size_inches(12,8)
#
# im = ax[0].imshow(np.corrcoef(qwer), cmap = plt.cm.jet)
# fig.colorbar(im, ax=ax[0])
# # set the color limits - not necessary here, but good to know how.
# im.set_clim(0.0, 1.0)
# #plt.show()
#
# x = [np.var(qwer.T[:i].T) for i in range(1,M)]
# ax[1].plot(x)
# med = np.median(x)
# ax[1].set_ylim(med - med*0.01,med + med*0.01)
# fig.tight_layout()
# fig.savefig('../plot/figures/test.pdf')
# plt.clf()
# plt.show()
# print()
# qwer = np.reshape(np.concatenate(mf_bootstrap.T,axis=1)[0],(M,n_mf_msrmnts))
# print("HERE")
# print(qwer)
# print(qwer)
# print(np.concatenate(mf_bootstrap.T,axis=1)[0])
# qwer = np.reshape(np.concatenate(mf_bootstrap.T,axis=1)[0],(M,n_mf_msrmnts))
# # print(qwer)
# print(np.corrcoef(qwer))
# import matplotlib.pyplot as plt
# plt.imshow(np.corrcoef(qwer))
# plt.show()
# print()
# print(np.nanmean(mf_bootstrap.T,axis=2))
# print(np.nanvar(mf_bootstrap.T,axis=2))
# print(np.corrcoef(np.reshape(np.concatenate(mf_bootstrap,axis=1)[0],(M,n_mf_msrmnts))))
# print(np.mean(mf_bootstrap.T[1],axis=1))/
# for i in range(n_mf_msrmnts):
# #print(mf_bootstrap[0].T[i])
# print(np.nanmean(mf_bootstrap[0].T[i]))
# print(np.nanvar(mf_bootstrap[0].T[i]))
#
# print(np.nanmean(mf_bootstrap[0].T,axis=1))
# print('0')
# print(mf_bootstrap[0].T)
# print("1")
# print(mf_bootstrap[0].T[0])
# print("2")
# print(np.nanmean(mf_bootstrap[0].T[0]))
# print("3")
# print(np.nanmean(mf_bootstrap[0],axis=1))
# print("4")
|
[
"bwilson3853@gmail.com"
] |
bwilson3853@gmail.com
|
0bf9f14a7d8f3b313cb14ebe38a4ae36709d9164
|
92237641f61e9b35ff6af6294153a75074757bec
|
/Algorithm/programmers/lv2/lv2_짝지어 제거하기.py
|
dc49c17ce25e718214f85eb4831fb672b343a239
|
[] |
no_license
|
taepd/study
|
8ded115765c4f804813e255d9272b727bf41ec80
|
846d3f2a5a4100225b750f00f992a640e9287d9c
|
refs/heads/master
| 2023-03-08T13:56:57.366577
| 2022-05-08T15:24:35
| 2022-05-08T15:24:35
| 245,838,600
| 0
| 1
| null | 2023-03-05T23:54:41
| 2020-03-08T15:25:15
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 278
|
py
|
def solution(s):
stack = []
for e in s:
if not stack:
stack.append(e)
else:
if stack[-1] == e:
stack.pop()
else:
stack.append(e)
if stack:
return 0
else:
return 1
|
[
"taepd1@gmail.com"
] |
taepd1@gmail.com
|
854209911af300efb0ca7e652dd682ae077fa116
|
475110b2e65643eb6f1dcdaa5bd4ee8b74f7900d
|
/backbones/densenet.py
|
3c278afca1f1509b21b1fcd34b8fd4ccdafe8ee3
|
[] |
no_license
|
lgzbryant/classification_project
|
47308d10142c9d871fa8c9a9d45d4f26f5509dce
|
93856ee9bd11d82d6c0ff7785a3ab435631f039a
|
refs/heads/master
| 2020-06-17T07:26:09.304920
| 2019-07-08T16:04:17
| 2019-07-08T16:04:17
| 195,845,254
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,834
|
py
|
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from collections import OrderedDict
from .utils import load_state_dict_from_url
__all__ = ['DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161']
model_urls = {
'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth',
'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth',
'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth',
'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth',
}
def _bn_function_factory(norm, relu, conv):
def bn_function(*inputs):
concated_features = torch.cat(inputs, 1)
bottleneck_output = conv(relu(norm(concated_features)))
return bottleneck_output
return bn_function
class _DenseLayer(nn.Sequential):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, memory_efficient=False):
super(_DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
growth_rate, kernel_size=1, stride=1,
bias=False)),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=1,
bias=False)),
self.drop_rate = drop_rate
self.memory_efficient = memory_efficient
def forward(self, *prev_features):
bn_function = _bn_function_factory(self.norm1, self.relu1, self.conv1)
if self.memory_efficient and any(prev_feature.requires_grad for prev_feature in prev_features):
bottleneck_output = cp.checkpoint(bn_function, *prev_features)
else:
bottleneck_output = bn_function(*prev_features)
new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate,
training=self.training)
return new_features
class _DenseBlock(nn.Module):
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, memory_efficient=False):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.add_module('denselayer%d' % (i + 1), layer)
def forward(self, init_features):
features = [init_features]
for name, layer in self.named_children():
new_features = layer(*features)
features.append(new_features)
return torch.cat(features, 1)
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
memory_efficient (bool) - set to True to use checkpointing. Much more memory efficient,
but slower. Default: *False*
"""
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000, memory_efficient=False):
super(DenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2,
padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate,
memory_efficient=memory_efficient
)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features,
num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
# Linear layer
# self.classifier = nn.Linear(num_features, num_classes)
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1)).view(features.size(0), -1)
# out = self.classifier(out)
return out
def _load_state_dict(model, model_url, progress):
# '.'s are no longer allowed in module names, but previous _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict = load_state_dict_from_url(model_url, progress=progress)
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
def _densenet(arch, growth_rate, block_config, num_init_features, pretrained, progress,
**kwargs):
model = DenseNet(growth_rate, block_config, num_init_features, **kwargs)
if pretrained:
_load_state_dict(model, model_urls[arch], progress)
return model
def densenet121(pretrained=False, progress=True, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress,
**kwargs)
def densenet161(pretrained=False, progress=True, **kwargs):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress,
**kwargs)
def densenet169(pretrained=False, progress=True, **kwargs):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress,
**kwargs)
def densenet201(pretrained=False, progress=True, **kwargs):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress, **kwargs)
|
[
"noreply@github.com"
] |
noreply@github.com
|
cc4c287a1f334475b6b5e06eaabaf792d66e3b32
|
2018321e9f9a3edc1d92cf986df1826138fe6b50
|
/pythonProject/button.py
|
d283c5fb028e73dba1f8517a1b8edb3a15eb47e9
|
[] |
no_license
|
thanyi/alien-invasion
|
7a8ede01e65d8a58accd686bb7219ba58f7e5e0d
|
de96c3123a74c3009eca43edced6693d97b3c8f0
|
refs/heads/master
| 2023-02-11T10:20:33.110836
| 2021-01-02T11:24:44
| 2021-01-02T11:24:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,478
|
py
|
import pygame.font
import pygame
class Button():
def __init__(self, msg, screen):
self.screen = screen
self.screen_rect = self.screen.get_rect()
#填充图片的大小
self.width , self.height = 150,50
#背景颜色、字体颜色和字体
self.button_color = (0,255,0)
self.text_color = (255,255,255)
self.font = pygame.font.SysFont(None,36)
self.rect = pygame.Rect(0,0,self.width,self.height)
self.rect.centerx = self.screen_rect.centerx
self.rect.centery = self.screen_rect.centery
self.re_rect = pygame.Rect(0, 0, self.width, self.height)
self.re_rect.centerx = self.screen_rect.centerx
self.re_rect.centery = self.screen_rect.centery
self.prep_msg(msg)
def prep_msg(self,msg):
'''将其变成图像'''
self.msg_image =self.font.render(msg, True, self.text_color,self.button_color)
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
def draw_button(self):
self.screen.fill(self.button_color,self.rect)
self.screen.blit(self.msg_image,self.msg_image_rect)
def draw_replay_button(self):
self.rect.centerx = self.msg_image_rect.x+20
self.rect.centery = self.msg_image_rect.y+210
self.screen.fill(self.button_color,self.rect)
self.screen.blit(self.msg_image,(self.msg_image_rect.x,self.msg_image_rect.y+200))
|
[
"yiceyuan@126.com"
] |
yiceyuan@126.com
|
66329e2ef7a59a9819fbce4fe1a7b235e7eb2f9b
|
e90fdf3c083b43ebe5ead840190b81fd923e2798
|
/Employee.py
|
122dbf7023239dcd9345a05620e0749701118341
|
[] |
no_license
|
michaelgagliardi/2019_2020_projects
|
c643ba68aff6bf634a07c0e8863303eebe6defac
|
217d32eaaea9a0f3a9bb5ff069f4e21492ec3b64
|
refs/heads/main
| 2023-04-03T03:20:19.293052
| 2021-04-05T21:01:36
| 2021-04-05T21:01:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
######### to complete
class Employee:
def __init__(self,ID="none",name="none",age=0,exp=0,job="none",is_from_MA="none"):
self.ID=ID
self.name=name
self.age=age
self.exp=exp
self.job=job
self.is_from_MA=is_from_MA
|
[
"noreply@github.com"
] |
noreply@github.com
|
cfd05d15243758d9c263176fe03fefdad10205cd
|
e7287d286d127311118d2689bd97b513b95ae046
|
/ext/exmpSorted.py
|
d5da2e587885bca0af4ae73ba5e6eae3ed0a88ce
|
[] |
no_license
|
Reihan-amn/DiseaseClassificationProlem_Kaggle
|
3da32457d45165d2cb6549054b72203604e12481
|
9f1407e33a910a37af3eced2c5ddebdf4d3f7530
|
refs/heads/master
| 2021-05-11T00:49:05.955718
| 2018-01-21T07:14:18
| 2018-01-21T07:14:18
| 118,311,674
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
from ext import exmpDataFact
Bclass = exmpDataFact.getDataMatrixFromCSV("./DeathRecords.csv")
print('type: ', type(Bclass),' shape: ',Bclass.shape)
''''first row deleted '''
# columns_name = np.array(data2DMat[0])
# features_label = np.delete(columns_name, [0,24], axis = None)
# data2DMat = np.delete(data2DMat, (0), axis=0)
#
#
# '''class labelizing'''
# clss = data2DMat[:, 24]
# print("class lenght:" ,len(clss))
# clssa = exmpDataFact.classDiscreteToInteger(clss)
# classes = clssa[:,None]
|
[
"reihan@Reihanehs-MacBook-Pro.local"
] |
reihan@Reihanehs-MacBook-Pro.local
|
7ffc3b6f85cff22c0f89b8e8b676438dd3d048fe
|
1488889766d328b856b8910d7f1a50b52b754774
|
/firstsite/settings.py
|
ae74f052934497765e6907a52736af417b6336d6
|
[] |
no_license
|
vipul-rathod/firstsite
|
1790cb3f67fb5b690c9cc24161ddb9b13d458718
|
84c09a3c66b58ff92b8b12d83c49c48d0d9bd82c
|
refs/heads/master
| 2021-05-18T22:53:15.355755
| 2020-03-31T20:47:12
| 2020-03-31T20:47:12
| 251,464,363
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,126
|
py
|
"""
Django settings for firstsite project.
Generated by 'django-admin startproject' using Django 2.0.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't+q$0yid9(a5jz74d#iiq*7yvdzdh@p^g$sb(q^cv-(0%ul9=w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'firstsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'firstsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"vipsravs787@gmail.com"
] |
vipsravs787@gmail.com
|
69b384952afa18b41fb769869d637c21f4a61bbb
|
2075052d028ed31a30bdb9acb0a2022c2634f52b
|
/chat/consumers.py
|
761dd8369a35c0e33e7d8ef65e1ce163904ade18
|
[] |
no_license
|
igoo-Y/live_chat_app
|
b67704caa2e5944b131a4299716e501b555985b5
|
d65c87a35d3f3a120da35290addb798e412dad72
|
refs/heads/main
| 2023-06-30T13:21:49.860265
| 2021-08-03T09:11:29
| 2021-08-03T09:11:29
| 392,256,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,140
|
py
|
import json
from channels.generic.websocket import AsyncWebsocketConsumer
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.room_name = self.scope["url_route"]["kwargs"]["room_name"]
self.room_group_name = "chat_%s" % self.room_name
# Join room group
await self.channel_layer.group_add(self.room_group_name, self.channel_name)
await self.accept()
async def disconnect(self, close_code):
# Leave room group
await self.channel_layer.group_discard(self.room_group_name, self.channel_name)
# Receive message from WebSocket
async def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json["message"]
# Send message to room group
await self.channel_layer.group_send(
self.room_group_name, {"type": "chat_message", "message": message}
)
# Receive message from room group
async def chat_message(self, event):
message = event["message"]
# Send message to WebSocket
await self.send(text_data=json.dumps({"message": message}))
|
[
"79055280+igoo-Y@users.noreply.github.com"
] |
79055280+igoo-Y@users.noreply.github.com
|
542448cbdd184491b07c34dbda1e18c8b39b173d
|
8cdd03812d2cfad8fd9f9d5c83b38a59f9c9c8e8
|
/Code/Backend/Test/stepper2.py
|
7c6561f3f043ca1ceefb8becef5888d374680e6f
|
[] |
no_license
|
MaenhoudtTom/project1
|
46d018ee1a9abd72f90c1f2124ad0a00bb0c46fa
|
ad050accf62aa629bef1d77922dc2207dd78df01
|
refs/heads/master
| 2022-11-07T23:14:31.559237
| 2020-06-24T07:07:09
| 2020-06-24T07:07:09
| 271,761,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
control_pins = [12, 16, 20, 21]
for pin in control_pins:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, 0)
halfstep_seq = [
[1,0,0,0],
[1,1,0,0],
[0,1,0,0],
[0,1,1,0],
[0,0,1,0],
[0,0,1,1],
[0,0,0,1],
[1,0,0,1]
]
for i in range(64):
for halfstep in range(8):
for pin in range(4):
GPIO.output(control_pins[pin], halfstep_seq[halfstep][pin])
time.sleep(0.001)
GPIO.cleanup()
|
[
"55881682+MaenhoudtTom@users.noreply.github.com"
] |
55881682+MaenhoudtTom@users.noreply.github.com
|
766305b90df2cd606aef9971c1c2f21fb0412565
|
fc557979eb04d242f892303dde54b47616a2f74b
|
/blogProject/website/models.py
|
1af510c90ecfbd423549a1ee5b579993d5366434
|
[] |
no_license
|
supr-pr/blogProject
|
99ad3ee03b004c56bc44551847bf3e123ab6a7cf
|
b77775f05f8b0929f854794ca2a0c68f089bc7b8
|
refs/heads/master
| 2021-01-02T09:19:02.479888
| 2017-08-21T10:11:23
| 2017-08-21T10:11:23
| 99,191,378
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,830
|
py
|
from django.db import models
from django.forms import ModelForm
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
# from comments.models import Comment
# @property
# def commens(self):
# qs = Comment.objects.filter_by_instance(instance)
# return qs
class Category(models.Model):
category = models.CharField(max_length=100, default='Blog')
def __str__ (self):
return self.category
class Author(models.Model):
author = models.CharField(max_length=100, default='Anonymous')
def __str__(self):
return self.author
# class Cmnt(models.Model):
# title = models.CharField(max_length=100)
# date = models.DateTimeField()
# user = models.ForeignKey(Author)
# user = models.ForeignKey(settings.AUTH_USER_MODEL, default=1)
# content_type = models.ForeignKey(ContenType, on_delete= models.CASCADE)
# object_id = models.PositiveIntegerField()
# comment_object = GenericForeignKey('content_type', 'object_id')
# content = models.TextField()
# timestamp = models.DateTimeField(auto_now_add=True)
# objects = CommentManager()
# # def get_fields_and_values(self):
# # return [(field, field.value_to_string(self)) for field in Post._meta.fields]
# def __str__ (self):
# return str(self.user.username)
# # return self.author
# class CommentManager(models.Manager):
# def filter_by_instance(self,instance):
# content_type = ContenType.objects.get_for_model(instance, __class__)
# obj_id = instance.obj_id
# # comments = Comment.objects.filter(content_type=content_type, obj_id= obj_id)
# qs = super (CommentManager, self).filter(content_type=content_type, object_id=obj_id)
# # comments = Comment.objects.filter(content_type=content_type, object_id=obj_id)
# return qs
class Post(models.Model):
title = models.CharField(max_length=100)
body = models.TextField()
date = models.DateTimeField()
author = models.ForeignKey(Author)
category = models.ForeignKey(Category)
# cmnt = models.ForeignKey(Cmnt)
# def get_fields_and_values(self):
# return [(field, field.value_to_string(self)) for field in Post._meta.fields]
def __str__ (self):
return self.title
class Comnt(models.Model):
content = models.TextField(default='')
# date = models.DateTimeField(auto_now=True, auto_now_add=False)
date = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(Author)
comnt_on = models.ForeignKey(Post)
updated = models.DateTimeField(auto_now=True)
my_date_field = models.DateTimeField(auto_now=True, auto_now_add=False)
def __str__ (self):
return self.content
@property
def title(self):
return self.content
class ComntForm(ModelForm):
class Meta:
model = Comnt
fields = [
'content',
'author',
# 'email2'
'comnt_on'
]
|
[
"supr.pr@gmail.com"
] |
supr.pr@gmail.com
|
a8271bda23a6157bb2fe3136fe4736b8542b2437
|
1bc350d7a026763ace5d007958e093cf2eeada93
|
/cos_distance.py
|
66dcb2fa6cd48fde2a06f125d88c7a26f2718323
|
[] |
no_license
|
yuridadt/recommender_systems_lab
|
2d6abb32e4f7cdd46816fbfcac97903a3b02a9f8
|
15025a1949748a65ef4eb1836dbdd643aebec805
|
refs/heads/master
| 2020-12-09T17:40:23.484989
| 2020-01-16T20:34:15
| 2020-01-16T20:34:15
| 233,372,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,198
|
py
|
#Рассчитываем косинусное расстояние для кр
#https://habr.com/ru/post/150399/
import csv
import math
#обходим tuple unpacking in python 3
def star(f):
return lambda args: f(*args)
def ReadFile (filename = "cos.csv"):
f = open (filename)
r = csv.reader (f)
mentions = dict()
for line in r:
user = line[0]
product = line[1]
rate = float(line[2])
if not user in mentions:
mentions[user] = dict()
mentions[user][product] = rate
f.close()
return mentions
def distCosine (vecA, vecB):
def dotProduct (vecA, vecB):
d = 0.0
for dim in vecA:
if dim in vecB:
d += vecA[dim]*vecB[dim]
return d
return dotProduct (vecA,vecB) / math.sqrt(dotProduct(vecA,vecA)) / math.sqrt(dotProduct(vecB,vecB))
def makeRecommendation (userID, userRates, nBestUsers, nBestProducts):
matches = [(u, distCosine(userRates[userID], userRates[u])) for u in userRates if u != userID]
bestMatches = sorted(matches, key=star(lambda x,y: (y ,x)), reverse=True)[:nBestUsers]
print ("Most correlated with '%s' users:" % userID)
for line in bestMatches:
print (" UserID: %6s Coeff: %6.4f" % (line[0], line[1]))
sim = dict()
sim_all = sum([x[1] for x in bestMatches])
bestMatches = dict([x for x in bestMatches if x[1] > 0.0])
for relatedUser in bestMatches:
for product in userRates[relatedUser]:
if not product in userRates[userID]:
if not product in sim:
sim[product] = 0.0
sim[product] += userRates[relatedUser][product] * bestMatches[relatedUser]
for product in sim:
sim[product] /= sim_all
bestProducts = sorted(sim.items(), key=star(lambda x,y: (y,x)), reverse=True)[:nBestProducts]
print ("Most correlated products:")
for prodInfo in bestProducts:
print (" ProductID: %6s CorrelationCoeff: %6.4f" % (prodInfo[0], prodInfo[1]))
return [(x[0], x[1]) for x in bestProducts]
if __name__ == '__main__':
rec = makeRecommendation('ivan', ReadFile(), 5, 5)
print ('...end of calculations...')
|
[
"48838615+yuridadt@users.noreply.github.com"
] |
48838615+yuridadt@users.noreply.github.com
|
67a41c24e4933a1aeb0fd35e31eb01bda1c58d33
|
d2b0a5d0702162f32fa277103a2962a9b34b3850
|
/demo/demo_special_purpose_publication_occupancy.py
|
9fd9fcc768d9047027291e00faf328dd7c0f13c4
|
[] |
no_license
|
ruoxijia/pad
|
e0835b5ce39de1ce8c0a85d8387cebfbe0248c79
|
7a8c4710c82c2f45454458dd992a88ea8fde82b1
|
refs/heads/master
| 2021-10-10T17:28:59.560879
| 2017-11-02T06:28:21
| 2017-11-02T06:28:21
| 109,215,905
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,685
|
py
|
from helper import Utilities, PerformanceEvaluation
import pandas as pd
from metric_learning import Subsampling, MetricLearning
from user_feedback import Similarity
from scipy.misc import comb
"""
In the demo, we will showcase an example of special purpose publication.
The data user wants the published database to maximally retain the information about lunch time.
"""
# Initialization of some useful classes
util = Utilities()
pe = PerformanceEvaluation()
mel = MetricLearning()
# step 1: get the database to be published
day_profile = pd.read_pickle('../dataset/dataframe_all_binary.pkl')
day_profile = day_profile.iloc[0::4,0::60]
rep_mode = 'mean'
anonymity_level = 2 # desired anonymity level
# step 2: data user specifies his/her interest. In the example, the data user is interested in preserving the
# information of a segment of entire time series. In this case, he/she would also need to specify the starting and
# ending time of the time series segment of interest.
interest = 'segment'
window = [11,15] # window specifies the starting and ending time of the period that the data user is interested in
# step 3: pre-sanitize the database
sanitized_profile_baseline = util.sanitize_data(day_profile, distance_metric='euclidean',
anonymity_level=anonymity_level,rep_mode = rep_mode)
loss_generic_metric = pe.get_information_loss(data_gt=day_profile,
data_sanitized=sanitized_profile_baseline.round(),
window=window)
print("information loss with generic metric %s" % loss_generic_metric)
df_subsampled_from = sanitized_profile_baseline.drop_duplicates().sample(frac=1)
subsample_size_max = int(comb(len(df_subsampled_from),2))
print('total number of pairs is %s' % subsample_size_max)
# step 4: sample a subset of pre-sanitized database and form the data points into pairs
subsample_size = int(round(subsample_size_max))
sp = Subsampling(data=df_subsampled_from)
data_pair = sp.uniform_sampling(subsample_size=subsample_size)
# User receives the data pairs and label the similarity
sim = Similarity(data=data_pair)
sim.extract_interested_attribute(interest=interest, window=window)
similarity_label, class_label = sim.label_via_silhouette_analysis(range_n_clusters=range(2,8))
# step 5: PAD learns a distance metric that represents the interest of the user from the labeled data pairs
# lam_vec is a set of candidate lambda's for weighting the l1-norm penalty in the metric learning optimization problem.
# The lambda that achieves lowest testing error will be selected for generating the distance metric
dist_metric = mel.learn_with_simialrity_label_regularization(data=data_pair,
label=similarity_label,
lam_vec=[0,0.1,1,10],
train_portion=0.8)
# step 6: the original database is privatized using the learned metric
sanitized_profile = util.sanitize_data(day_profile, distance_metric="mahalanobis",
anonymity_level=anonymity_level, rep_mode=rep_mode, VI=dist_metric)
# (optionally for evaluation purpose) Evaluating the information loss of the sanitized database
loss_learned_metric = pe.get_information_loss(data_gt=day_profile,
data_sanitized=sanitized_profile.round(),
window=window)
print("sampled size %s" % subsample_size)
print("information loss with learned metric %s" % loss_learned_metric)
|
[
"ruoxijia@berkeley.edu"
] |
ruoxijia@berkeley.edu
|
b708ef0ba29cc97092ba45507823ff4dd82a5350
|
97062249c6eb04069c6fb01e71d06bc334c828e1
|
/desktop/core/ext-py/Django-1.6.10/tests/decorators/tests.py
|
05016be231c1703dbabc6a7a8f688f91e33ceaf2
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
Albertsss/hue
|
1c8b31c64cc420a029f5b5b80712fb3d0c6cbd6e
|
454d320dd09b6f7946f3cc05bc97c3e2ca6cd485
|
refs/heads/master
| 2021-07-08T17:21:13.237871
| 2018-05-30T06:03:21
| 2018-05-30T06:03:21
| 135,386,450
| 0
| 1
|
Apache-2.0
| 2020-07-25T13:36:58
| 2018-05-30T04:06:18
|
Python
|
UTF-8
|
Python
| false
| false
| 8,243
|
py
|
from functools import wraps
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required, permission_required, user_passes_test
from django.http import HttpResponse, HttpRequest, HttpResponseNotAllowed
from django.middleware.clickjacking import XFrameOptionsMiddleware
from django.utils.decorators import method_decorator
from django.utils.functional import allow_lazy, lazy, memoize
from django.utils.unittest import TestCase
from django.views.decorators.cache import cache_page, never_cache, cache_control
from django.views.decorators.clickjacking import xframe_options_deny, xframe_options_sameorigin, xframe_options_exempt
from django.views.decorators.http import require_http_methods, require_GET, require_POST, require_safe, condition
from django.views.decorators.vary import vary_on_headers, vary_on_cookie
def fully_decorated(request):
"""Expected __doc__"""
return HttpResponse('<html><body>dummy</body></html>')
fully_decorated.anything = "Expected __dict__"
def compose(*functions):
# compose(f, g)(*args, **kwargs) == f(g(*args, **kwargs))
functions = list(reversed(functions))
def _inner(*args, **kwargs):
result = functions[0](*args, **kwargs)
for f in functions[1:]:
result = f(result)
return result
return _inner
full_decorator = compose(
# django.views.decorators.http
require_http_methods(["GET"]),
require_GET,
require_POST,
require_safe,
condition(lambda r: None, lambda r: None),
# django.views.decorators.vary
vary_on_headers('Accept-language'),
vary_on_cookie,
# django.views.decorators.cache
cache_page(60*15),
cache_control(private=True),
never_cache,
# django.contrib.auth.decorators
# Apply user_passes_test twice to check #9474
user_passes_test(lambda u:True),
login_required,
permission_required('change_world'),
# django.contrib.admin.views.decorators
staff_member_required,
# django.utils.functional
lambda f: memoize(f, {}, 1),
allow_lazy,
lazy,
)
fully_decorated = full_decorator(fully_decorated)
class DecoratorsTest(TestCase):
def test_attributes(self):
"""
Tests that django decorators set certain attributes of the wrapped
function.
"""
self.assertEqual(fully_decorated.__name__, 'fully_decorated')
self.assertEqual(fully_decorated.__doc__, 'Expected __doc__')
self.assertEqual(fully_decorated.__dict__['anything'], 'Expected __dict__')
def test_user_passes_test_composition(self):
"""
Test that the user_passes_test decorator can be applied multiple times
(#9474).
"""
def test1(user):
user.decorators_applied.append('test1')
return True
def test2(user):
user.decorators_applied.append('test2')
return True
def callback(request):
return request.user.decorators_applied
callback = user_passes_test(test1)(callback)
callback = user_passes_test(test2)(callback)
class DummyUser(object): pass
class DummyRequest(object): pass
request = DummyRequest()
request.user = DummyUser()
request.user.decorators_applied = []
response = callback(request)
self.assertEqual(response, ['test2', 'test1'])
def test_cache_page_new_style(self):
"""
Test that we can call cache_page the new way
"""
def my_view(request):
return "response"
my_view_cached = cache_page(123)(my_view)
self.assertEqual(my_view_cached(HttpRequest()), "response")
my_view_cached2 = cache_page(123, key_prefix="test")(my_view)
self.assertEqual(my_view_cached2(HttpRequest()), "response")
def test_require_safe_accepts_only_safe_methods(self):
"""
Test for the require_safe decorator.
A view returns either a response or an exception.
Refs #15637.
"""
def my_view(request):
return HttpResponse("OK")
my_safe_view = require_safe(my_view)
request = HttpRequest()
request.method = 'GET'
self.assertIsInstance(my_safe_view(request), HttpResponse)
request.method = 'HEAD'
self.assertIsInstance(my_safe_view(request), HttpResponse)
request.method = 'POST'
self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed)
request.method = 'PUT'
self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed)
request.method = 'DELETE'
self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed)
# For testing method_decorator, a decorator that assumes a single argument.
# We will get type arguments if there is a mismatch in the number of arguments.
def simple_dec(func):
def wrapper(arg):
return func("test:" + arg)
return wraps(func)(wrapper)
simple_dec_m = method_decorator(simple_dec)
# For testing method_decorator, two decorators that add an attribute to the function
def myattr_dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.myattr = True
return wraps(func)(wrapper)
myattr_dec_m = method_decorator(myattr_dec)
def myattr2_dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.myattr2 = True
return wraps(func)(wrapper)
myattr2_dec_m = method_decorator(myattr2_dec)
class MethodDecoratorTests(TestCase):
"""
Tests for method_decorator
"""
def test_preserve_signature(self):
class Test(object):
@simple_dec_m
def say(self, arg):
return arg
self.assertEqual("test:hello", Test().say("hello"))
def test_preserve_attributes(self):
# Sanity check myattr_dec and myattr2_dec
@myattr_dec
@myattr2_dec
def func():
pass
self.assertEqual(getattr(func, 'myattr', False), True)
self.assertEqual(getattr(func, 'myattr2', False), True)
# Now check method_decorator
class Test(object):
@myattr_dec_m
@myattr2_dec_m
def method(self):
"A method"
pass
self.assertEqual(getattr(Test().method, 'myattr', False), True)
self.assertEqual(getattr(Test().method, 'myattr2', False), True)
self.assertEqual(getattr(Test.method, 'myattr', False), True)
self.assertEqual(getattr(Test.method, 'myattr2', False), True)
self.assertEqual(Test.method.__doc__, 'A method')
self.assertEqual(Test.method.__name__, 'method')
class XFrameOptionsDecoratorsTests(TestCase):
"""
Tests for the X-Frame-Options decorators.
"""
def test_deny_decorator(self):
"""
Ensures @xframe_options_deny properly sets the X-Frame-Options header.
"""
@xframe_options_deny
def a_view(request):
return HttpResponse()
r = a_view(HttpRequest())
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_sameorigin_decorator(self):
"""
Ensures @xframe_options_sameorigin properly sets the X-Frame-Options
header.
"""
@xframe_options_sameorigin
def a_view(request):
return HttpResponse()
r = a_view(HttpRequest())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_exempt_decorator(self):
"""
Ensures @xframe_options_exempt properly instructs the
XFrameOptionsMiddleware to NOT set the header.
"""
@xframe_options_exempt
def a_view(request):
return HttpResponse()
req = HttpRequest()
resp = a_view(req)
self.assertEqual(resp.get('X-Frame-Options', None), None)
self.assertTrue(resp.xframe_options_exempt)
# Since the real purpose of the exempt decorator is to suppress
# the middleware's functionality, let's make sure it actually works...
r = XFrameOptionsMiddleware().process_response(req, resp)
self.assertEqual(r.get('X-Frame-Options', None), None)
|
[
"540227148@qq.com"
] |
540227148@qq.com
|
c7538c4194854b3c7982b74de054fe7f78f8728e
|
1d70ee049c5198b75567e0000c45ef879f6d39be
|
/JobMatchPonos/server/modules/cv/cv_api.py
|
25e0c70dc1df46469890788a794b2b58526bf6f8
|
[] |
no_license
|
alexawl/Job-Match-Ponos-Back
|
95f28185f71c38733973bc6d730947455c2e6c93
|
c48b4bfddfbf2f4f5aa95409fd2c6ee4f469d9dd
|
refs/heads/master
| 2022-09-30T15:07:19.875016
| 2020-02-01T06:50:34
| 2020-02-01T06:50:34
| 237,575,072
| 0
| 0
| null | 2021-05-06T20:07:41
| 2020-02-01T06:49:35
|
Python
|
UTF-8
|
Python
| false
| false
| 463
|
py
|
from flask_restful import Resource
from flask import request, session
from mongoengine import NotUniqueError
import json
from mongoengine import connect
from jobmatcher.server.authentication.authentication import require_authentication
from jobmatcher.server.authentication.web_token import generate_access_token
from jobmatcher.server.modules.cv import cv_schemas
from jobmatcher.server.utils import utils as u
from jobmatcher.server.modules.cv.CV import CV
|
[
"alexawl@bellsouth.net"
] |
alexawl@bellsouth.net
|
282283fc9217210a557858c3ef384120074f0fe2
|
5424af93e136131ec74efdb307181d3f0c173a02
|
/test/test2.py
|
7139c731fc4268a201b8ee4a5f6f45cc90c43d76
|
[] |
no_license
|
koy1619/python_test
|
8e70021a73d53d9192f6ace2ec346e5e3a92919f
|
c2315ccb2e66a111ed66b28bb19c7abafaa5c8b7
|
refs/heads/master
| 2021-01-15T21:50:20.460932
| 2014-06-23T07:37:31
| 2014-06-23T07:37:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 852
|
py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#5-5 取余。取一个任意小于1 美元的金额,然后计算可以换成最少多少枚硬币。硬币有1
#美分,5 美分,10 美分,25 美分四种。1 美元等于100 美分。举例来说,0.76 美元换算结果
#应该是 3 枚25 美分,1 枚1 美分。类似76 枚1 美分,2 枚25 美分+2 枚10 美分+1 枚5 美分+1
#枚1 美分这样的结果都是不符合要求的。
money = raw_input('输入任意小于1 美元的金额:')
print money,'美元换算结果'
money = float(money)
money *= 100
money = int(money)
cent25 = money / 25
money %= 25
cent10 = money / 10
money %= 10
cent5 = money / 5
money %= 5
cent1 = money
if cent25 :
print '25美分*',cent25
if cent10 :
print '10美分*',cent10
if cent5 :
print '5美分*',cent5
if cent1 :
print '1美分*',cent1
|
[
"maxiaolei007@sina.com"
] |
maxiaolei007@sina.com
|
fc08528884d6626eaf34cd5a97c19cbd3a353798
|
24a9f5f1b8d8b550b9b70f5837e7de94ca76692c
|
/Lab07.py
|
2ba74a2602492c526e417c46f3f8c003551fb5f7
|
[] |
no_license
|
syfiawoo/python-graphics-lab
|
2548dfe45f9da50fa5f2578bf9489261c89b7681
|
edd1fec368f28219aca2ebf9f709caaca53270e3
|
refs/heads/master
| 2021-01-20T19:00:26.232748
| 2012-07-03T10:17:08
| 2012-07-03T10:17:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
from graphics import *
#add any functions or classes you might define here
# create a window with width = 700 and height = 500
win = GraphWin('Program Name', 700, 500)
# add your code below this point
win.mainloop()
|
[
"moi@fiawoo-PC.(none)"
] |
moi@fiawoo-PC.(none)
|
d5aadd00bb09ce66112973792bc22e130386a94a
|
b21c237d6cbd8dcea90fa18154cc3b9e11508129
|
/main.py
|
c6e6b8969d727e1b239b8a924268cb8d729c773f
|
[] |
no_license
|
topoko123/fastapi-get-post-patch-put-del-basic
|
4ad72de27b18e3deb3a5e107b826742057d30a13
|
fe8165c714fc0e48bdd4c2b4dd5b5b74d8e1aab8
|
refs/heads/main
| 2023-05-08T16:32:00.387261
| 2021-05-30T11:26:34
| 2021-05-30T11:26:34
| 372,191,718
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,518
|
py
|
from fastapi import FastAPI
from pydantic import BaseModel #Import BaseModel for receive
from typing import List, Optional
import uvicorn
class Item(BaseModel):
name : Optional[str] = None
description: Optional[str] = None
price : Optional[float] = None
class Address(BaseModel): #Collections of user address
house_no : str
village_no : str
alley : str
lane : str
road : str
sub_district : str
district : str
province : str
postal_code : str
class UserBase(BaseModel):
firstname : str
lastname : str
age : Optional[int] = None
email : Optional[str] = None
address : List[Address] #List[Address] <-- From class address(BaseModel)
class ListAll(BaseModel):
list_user : List[UserBase] #List[UserBase] <-- From class UserBase(BaseModel)
app = FastAPI()
items = { # Fake_db
'1': {
'name': 'mouse',
'description': 'This is a Mouse',
'price': 590,
},
'2': {
'name': 'keyboard',
'description': 'This is a Keyboard :)',
'price': 3490
}
}
@app.get('/items/{item_id}' )
async def read_item(item_id: str):
if item_id == '0':
return items
else:
return items[item_id]
@app.get('/fullname/{firstname}/{lastname}')
async def read_fullname(firstname: str, lastname: str):
return firstname, lastname
@app.put('/items/{item_id}')
async def update_item(item_id: str, item: Item): #Item is from class Item(BaseModel)
items[item_id].update(**item.dict())
msg = 'update success'
return msg, item
@app.patch('/items/patch/{item_id}')
async def update_item_patch(item_id: str, item: Item): #Item is from class Item(BaseModel)
stored_item_data = items[item_id]
stored_item_model = Item(**stored_item_data)
update_data = item.dict(exclude_unset=True)
update_item = stored_item_model.copy(update=update_data)
items[item_id] = dict(update_item)
return items[item_id]
@app.delete('/items/delete/{item_id}')
async def delete_item(item_id: str):
items.pop(item_id)
return 'delete success'
#------------------------------------------------------------------#
@app.post('/user', response_model=ListAll)
async def user(request: ListAll): #ListAll is from class ListAll(BaseModel)
return request
if __name__ == '__main__':
uvicorn.run(app, host="0.0.0.0", port=80, debug=True)
|
[
"m131.mdsoft@gmail.com"
] |
m131.mdsoft@gmail.com
|
239859618e261ad2335982654069188d008796f4
|
34a4a61dfc837bd0e55f985869c32baef2b9b867
|
/Shelter.back/domain/enums.py
|
27561a99064728b3fa4fa6991b66a3793c826c2f
|
[] |
no_license
|
BlueInt32/shelter
|
5934984b6a256659ec4a20f80926488c5687083c
|
fc8c238a501c608ae38e2dd01724c7b91ff77a1a
|
refs/heads/master
| 2023-01-13T00:31:24.370302
| 2020-05-26T05:22:12
| 2020-05-26T05:22:12
| 225,805,189
| 0
| 0
| null | 2023-01-05T03:45:58
| 2019-12-04T07:24:39
|
Vue
|
UTF-8
|
Python
| false
| false
| 83
|
py
|
from enum import Enum
class PersistanceType(Enum):
CREATE = 1,
UPDATE = 2
|
[
"simon.budin@gmail.com"
] |
simon.budin@gmail.com
|
0e029895d75465efd99006fba963cce56d4204ed
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/pandas-dev_pandas/pandas-master/pandas/tests/test_nanops.py
|
937c20d009b6bfb2143c62b9aa96a110e0d6c71f
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 43,023
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
from functools import partial
import warnings
import numpy as np
from pandas import Series, isnull
from pandas.types.common import is_integer_dtype
import pandas.core.nanops as nanops
import pandas.util.testing as tm
use_bn = nanops._USE_BOTTLENECK
class TestnanopsDataFrame(tm.TestCase):
def setUp(self):
np.random.seed(11235)
nanops._USE_BOTTLENECK = False
self.arr_shape = (11, 7, 5)
self.arr_float = np.random.randn(*self.arr_shape)
self.arr_float1 = np.random.randn(*self.arr_shape)
self.arr_complex = self.arr_float + self.arr_float1 * 1j
self.arr_int = np.random.randint(-10, 10, self.arr_shape)
self.arr_bool = np.random.randint(0, 2, self.arr_shape) == 0
self.arr_str = np.abs(self.arr_float).astype('S')
self.arr_utf = np.abs(self.arr_float).astype('U')
self.arr_date = np.random.randint(0, 20000,
self.arr_shape).astype('M8[ns]')
self.arr_tdelta = np.random.randint(0, 20000,
self.arr_shape).astype('m8[ns]')
self.arr_nan = np.tile(np.nan, self.arr_shape)
self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan])
self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan])
self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1])
self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan])
self.arr_inf = self.arr_float * np.inf
self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf])
self.arr_float1_inf = np.vstack([self.arr_float1, self.arr_inf])
self.arr_inf_float1 = np.vstack([self.arr_inf, self.arr_float1])
self.arr_inf_inf = np.vstack([self.arr_inf, self.arr_inf])
self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf])
self.arr_float_nan_inf = np.vstack([self.arr_float, self.arr_nan,
self.arr_inf])
self.arr_nan_float1_inf = np.vstack([self.arr_float, self.arr_inf,
self.arr_nan])
self.arr_nan_nan_inf = np.vstack([self.arr_nan, self.arr_nan,
self.arr_inf])
self.arr_obj = np.vstack([self.arr_float.astype(
'O'), self.arr_int.astype('O'), self.arr_bool.astype(
'O'), self.arr_complex.astype('O'), self.arr_str.astype(
'O'), self.arr_utf.astype('O'), self.arr_date.astype('O'),
self.arr_tdelta.astype('O')])
with np.errstate(invalid='ignore'):
self.arr_nan_nanj = self.arr_nan + self.arr_nan * 1j
self.arr_complex_nan = np.vstack([self.arr_complex,
self.arr_nan_nanj])
self.arr_nan_infj = self.arr_inf * 1j
self.arr_complex_nan_infj = np.vstack([self.arr_complex,
self.arr_nan_infj])
self.arr_float_2d = self.arr_float[:, :, 0]
self.arr_float1_2d = self.arr_float1[:, :, 0]
self.arr_complex_2d = self.arr_complex[:, :, 0]
self.arr_int_2d = self.arr_int[:, :, 0]
self.arr_bool_2d = self.arr_bool[:, :, 0]
self.arr_str_2d = self.arr_str[:, :, 0]
self.arr_utf_2d = self.arr_utf[:, :, 0]
self.arr_date_2d = self.arr_date[:, :, 0]
self.arr_tdelta_2d = self.arr_tdelta[:, :, 0]
self.arr_nan_2d = self.arr_nan[:, :, 0]
self.arr_float_nan_2d = self.arr_float_nan[:, :, 0]
self.arr_float1_nan_2d = self.arr_float1_nan[:, :, 0]
self.arr_nan_float1_2d = self.arr_nan_float1[:, :, 0]
self.arr_nan_nan_2d = self.arr_nan_nan[:, :, 0]
self.arr_nan_nanj_2d = self.arr_nan_nanj[:, :, 0]
self.arr_complex_nan_2d = self.arr_complex_nan[:, :, 0]
self.arr_inf_2d = self.arr_inf[:, :, 0]
self.arr_float_inf_2d = self.arr_float_inf[:, :, 0]
self.arr_nan_inf_2d = self.arr_nan_inf[:, :, 0]
self.arr_float_nan_inf_2d = self.arr_float_nan_inf[:, :, 0]
self.arr_nan_nan_inf_2d = self.arr_nan_nan_inf[:, :, 0]
self.arr_float_1d = self.arr_float[:, 0, 0]
self.arr_float1_1d = self.arr_float1[:, 0, 0]
self.arr_complex_1d = self.arr_complex[:, 0, 0]
self.arr_int_1d = self.arr_int[:, 0, 0]
self.arr_bool_1d = self.arr_bool[:, 0, 0]
self.arr_str_1d = self.arr_str[:, 0, 0]
self.arr_utf_1d = self.arr_utf[:, 0, 0]
self.arr_date_1d = self.arr_date[:, 0, 0]
self.arr_tdelta_1d = self.arr_tdelta[:, 0, 0]
self.arr_nan_1d = self.arr_nan[:, 0, 0]
self.arr_float_nan_1d = self.arr_float_nan[:, 0, 0]
self.arr_float1_nan_1d = self.arr_float1_nan[:, 0, 0]
self.arr_nan_float1_1d = self.arr_nan_float1[:, 0, 0]
self.arr_nan_nan_1d = self.arr_nan_nan[:, 0, 0]
self.arr_nan_nanj_1d = self.arr_nan_nanj[:, 0, 0]
self.arr_complex_nan_1d = self.arr_complex_nan[:, 0, 0]
self.arr_inf_1d = self.arr_inf.ravel()
self.arr_float_inf_1d = self.arr_float_inf[:, 0, 0]
self.arr_nan_inf_1d = self.arr_nan_inf[:, 0, 0]
self.arr_float_nan_inf_1d = self.arr_float_nan_inf[:, 0, 0]
self.arr_nan_nan_inf_1d = self.arr_nan_nan_inf[:, 0, 0]
def tearDown(self):
nanops._USE_BOTTLENECK = use_bn
def check_results(self, targ, res, axis, check_dtype=True):
res = getattr(res, 'asm8', res)
res = getattr(res, 'values', res)
# timedeltas are a beast here
def _coerce_tds(targ, res):
if hasattr(targ, 'dtype') and targ.dtype == 'm8[ns]':
if len(targ) == 1:
targ = targ[0].item()
res = res.item()
else:
targ = targ.view('i8')
return targ, res
try:
if axis != 0 and hasattr(
targ, 'shape') and targ.ndim and targ.shape != res.shape:
res = np.split(res, [targ.shape[0]], axis=0)[0]
except:
targ, res = _coerce_tds(targ, res)
try:
tm.assert_almost_equal(targ, res, check_dtype=check_dtype)
except:
# handle timedelta dtypes
if hasattr(targ, 'dtype') and targ.dtype == 'm8[ns]':
targ, res = _coerce_tds(targ, res)
tm.assert_almost_equal(targ, res, check_dtype=check_dtype)
return
# There are sometimes rounding errors with
# complex and object dtypes.
# If it isn't one of those, re-raise the error.
if not hasattr(res, 'dtype') or res.dtype.kind not in ['c', 'O']:
raise
# convert object dtypes to something that can be split into
# real and imaginary parts
if res.dtype.kind == 'O':
if targ.dtype.kind != 'O':
res = res.astype(targ.dtype)
else:
try:
res = res.astype('c16')
except:
res = res.astype('f8')
try:
targ = targ.astype('c16')
except:
targ = targ.astype('f8')
# there should never be a case where numpy returns an object
# but nanops doesn't, so make that an exception
elif targ.dtype.kind == 'O':
raise
tm.assert_almost_equal(targ.real, res.real,
check_dtype=check_dtype)
tm.assert_almost_equal(targ.imag, res.imag,
check_dtype=check_dtype)
def check_fun_data(self, testfunc, targfunc, testarval, targarval,
targarnanval, check_dtype=True, **kwargs):
for axis in list(range(targarval.ndim)) + [None]:
for skipna in [False, True]:
targartempval = targarval if skipna else targarnanval
try:
targ = targfunc(targartempval, axis=axis, **kwargs)
res = testfunc(testarval, axis=axis, skipna=skipna,
**kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
if skipna:
res = testfunc(testarval, axis=axis, **kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
if axis is None:
res = testfunc(testarval, skipna=skipna, **kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
if skipna and axis is None:
res = testfunc(testarval, **kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
except BaseException as exc:
exc.args += ('axis: %s of %s' % (axis, testarval.ndim - 1),
'skipna: %s' % skipna, 'kwargs: %s' % kwargs)
raise
if testarval.ndim <= 1:
return
try:
testarval2 = np.take(testarval, 0, axis=-1)
targarval2 = np.take(targarval, 0, axis=-1)
targarnanval2 = np.take(targarnanval, 0, axis=-1)
except ValueError:
return
self.check_fun_data(testfunc, targfunc, testarval2, targarval2,
targarnanval2, check_dtype=check_dtype, **kwargs)
def check_fun(self, testfunc, targfunc, testar, targar=None,
targarnan=None, **kwargs):
if targar is None:
targar = testar
if targarnan is None:
targarnan = testar
testarval = getattr(self, testar)
targarval = getattr(self, targar)
targarnanval = getattr(self, targarnan)
try:
self.check_fun_data(testfunc, targfunc, testarval, targarval,
targarnanval, **kwargs)
except BaseException as exc:
exc.args += ('testar: %s' % testar, 'targar: %s' % targar,
'targarnan: %s' % targarnan)
raise
def check_funs(self, testfunc, targfunc, allow_complex=True,
allow_all_nan=True, allow_str=True, allow_date=True,
allow_tdelta=True, allow_obj=True, **kwargs):
self.check_fun(testfunc, targfunc, 'arr_float', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_float_nan', 'arr_float',
**kwargs)
self.check_fun(testfunc, targfunc, 'arr_int', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_bool', **kwargs)
objs = [self.arr_float.astype('O'), self.arr_int.astype('O'),
self.arr_bool.astype('O')]
if allow_all_nan:
self.check_fun(testfunc, targfunc, 'arr_nan', **kwargs)
if allow_complex:
self.check_fun(testfunc, targfunc, 'arr_complex', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_complex_nan',
'arr_complex', **kwargs)
if allow_all_nan:
self.check_fun(testfunc, targfunc, 'arr_nan_nanj', **kwargs)
objs += [self.arr_complex.astype('O')]
if allow_str:
self.check_fun(testfunc, targfunc, 'arr_str', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_utf', **kwargs)
objs += [self.arr_str.astype('O'), self.arr_utf.astype('O')]
if allow_date:
try:
targfunc(self.arr_date)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, 'arr_date', **kwargs)
objs += [self.arr_date.astype('O')]
if allow_tdelta:
try:
targfunc(self.arr_tdelta)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, 'arr_tdelta', **kwargs)
objs += [self.arr_tdelta.astype('O')]
if allow_obj:
self.arr_obj = np.vstack(objs)
# some nanops handle object dtypes better than their numpy
# counterparts, so the numpy functions need to be given something
# else
if allow_obj == 'convert':
targfunc = partial(self._badobj_wrap, func=targfunc,
allow_complex=allow_complex)
self.check_fun(testfunc, targfunc, 'arr_obj', **kwargs)
def check_funs_ddof(self,
testfunc,
targfunc,
allow_complex=True,
allow_all_nan=True,
allow_str=True,
allow_date=False,
allow_tdelta=False,
allow_obj=True, ):
for ddof in range(3):
try:
self.check_funs(testfunc, targfunc, allow_complex,
allow_all_nan, allow_str, allow_date,
allow_tdelta, allow_obj, ddof=ddof)
except BaseException as exc:
exc.args += ('ddof %s' % ddof, )
raise
def _badobj_wrap(self, value, func, allow_complex=True, **kwargs):
if value.dtype.kind == 'O':
if allow_complex:
value = value.astype('c16')
else:
value = value.astype('f8')
return func(value, **kwargs)
def test_nanany(self):
self.check_funs(nanops.nanany, np.any, allow_all_nan=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nanall(self):
self.check_funs(nanops.nanall, np.all, allow_all_nan=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nansum(self):
self.check_funs(nanops.nansum, np.sum, allow_str=False,
allow_date=False, allow_tdelta=True, check_dtype=False)
def test_nanmean(self):
self.check_funs(nanops.nanmean, np.mean, allow_complex=False,
allow_obj=False, allow_str=False, allow_date=False,
allow_tdelta=True)
def test_nanmean_overflow(self):
# GH 10155
# In the previous implementation mean can overflow for int dtypes, it
# is now consistent with numpy
# numpy < 1.9.0 is not computing this correctly
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.9.0':
for a in [2 ** 55, -2 ** 55, 20150515061816532]:
s = Series(a, index=range(500), dtype=np.int64)
result = s.mean()
np_result = s.values.mean()
self.assertEqual(result, a)
self.assertEqual(result, np_result)
self.assertTrue(result.dtype == np.float64)
def test_returned_dtype(self):
dtypes = [np.int16, np.int32, np.int64, np.float32, np.float64]
if hasattr(np, 'float128'):
dtypes.append(np.float128)
for dtype in dtypes:
s = Series(range(10), dtype=dtype)
group_a = ['mean', 'std', 'var', 'skew', 'kurt']
group_b = ['min', 'max']
for method in group_a + group_b:
result = getattr(s, method)()
if is_integer_dtype(dtype) and method in group_a:
self.assertTrue(
result.dtype == np.float64,
"return dtype expected from %s is np.float64, "
"got %s instead" % (method, result.dtype))
else:
self.assertTrue(
result.dtype == dtype,
"return dtype expected from %s is %s, "
"got %s instead" % (method, dtype, result.dtype))
def test_nanmedian(self):
with warnings.catch_warnings(record=True):
self.check_funs(nanops.nanmedian, np.median, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
def test_nanvar(self):
self.check_funs_ddof(nanops.nanvar, np.var, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
def test_nanstd(self):
self.check_funs_ddof(nanops.nanstd, np.std, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
def test_nansem(self):
tm.skip_if_no_package('scipy.stats')
tm._skip_if_scipy_0_17()
from scipy.stats import sem
self.check_funs_ddof(nanops.nansem, sem, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
def _minmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
if res.dtype.kind == 'm':
res = np.atleast_1d(res)
return res
def test_nanmin(self):
func = partial(self._minmax_wrap, func=np.min)
self.check_funs(nanops.nanmin, func, allow_str=False, allow_obj=False)
def test_nanmax(self):
func = partial(self._minmax_wrap, func=np.max)
self.check_funs(nanops.nanmax, func, allow_str=False, allow_obj=False)
def _argminmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
nans = np.min(value, axis)
nullnan = isnull(nans)
if res.ndim:
res[nullnan] = -1
elif (hasattr(nullnan, 'all') and nullnan.all() or
not hasattr(nullnan, 'all') and nullnan):
res = -1
return res
def test_nanargmax(self):
func = partial(self._argminmax_wrap, func=np.argmax)
self.check_funs(nanops.nanargmax, func, allow_str=False,
allow_obj=False, allow_date=True, allow_tdelta=True)
def test_nanargmin(self):
func = partial(self._argminmax_wrap, func=np.argmin)
if tm.sys.version_info[0:2] == (2, 6):
self.check_funs(nanops.nanargmin, func, allow_date=True,
allow_tdelta=True, allow_str=False,
allow_obj=False)
else:
self.check_funs(nanops.nanargmin, func, allow_str=False,
allow_obj=False)
def _skew_kurt_wrap(self, values, axis=None, func=None):
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
result = func(values, axis=axis, bias=False)
# fix for handling cases where all elements in an axis are the same
if isinstance(result, np.ndarray):
result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0
return result
elif np.max(values) == np.min(values):
return 0.
return result
def test_nanskew(self):
tm.skip_if_no_package('scipy.stats')
tm._skip_if_scipy_0_17()
from scipy.stats import skew
func = partial(self._skew_kurt_wrap, func=skew)
self.check_funs(nanops.nanskew, func, allow_complex=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nankurt(self):
tm.skip_if_no_package('scipy.stats')
tm._skip_if_scipy_0_17()
from scipy.stats import kurtosis
func1 = partial(kurtosis, fisher=True)
func = partial(self._skew_kurt_wrap, func=func1)
self.check_funs(nanops.nankurt, func, allow_complex=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nanprod(self):
self.check_funs(nanops.nanprod, np.prod, allow_str=False,
allow_date=False, allow_tdelta=False)
def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs)
res01 = checkfun(self.arr_float_2d, self.arr_float1_2d,
min_periods=len(self.arr_float_2d) - 1, **kwargs)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d,
**kwargs)
res11 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d,
min_periods=len(self.arr_float_2d) - 1, **kwargs)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d, **kwargs)
res21 = checkfun(self.arr_float_2d, self.arr_nan_2d, **kwargs)
res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d, **kwargs)
res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d,
**kwargs)
res24 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d,
min_periods=len(self.arr_float_2d) - 1, **kwargs)
res25 = checkfun(self.arr_float_2d, self.arr_float1_2d,
min_periods=len(self.arr_float_2d) + 1, **kwargs)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_1d, self.arr_float1_1d, **kwargs)
res01 = checkfun(self.arr_float_1d, self.arr_float1_1d,
min_periods=len(self.arr_float_1d) - 1, **kwargs)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d,
**kwargs)
res11 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d,
min_periods=len(self.arr_float_1d) - 1, **kwargs)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d, **kwargs)
res21 = checkfun(self.arr_float_1d, self.arr_nan_1d, **kwargs)
res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d, **kwargs)
res23 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d,
**kwargs)
res24 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d,
min_periods=len(self.arr_float_1d) - 1, **kwargs)
res25 = checkfun(self.arr_float_1d, self.arr_float1_1d,
min_periods=len(self.arr_float_1d) + 1, **kwargs)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def test_nancorr(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat,
self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1)
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat,
self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='pearson')
def test_nancorr_pearson(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat,
self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='pearson')
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat,
self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='pearson')
def test_nancorr_kendall(self):
tm.skip_if_no_package('scipy.stats')
from scipy.stats import kendalltau
targ0 = kendalltau(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='kendall')
targ0 = kendalltau(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='kendall')
def test_nancorr_spearman(self):
tm.skip_if_no_package('scipy.stats')
from scipy.stats import spearmanr
targ0 = spearmanr(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='spearman')
targ0 = spearmanr(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='spearman')
def test_nancov(self):
targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1)
targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1)
def check_nancomp(self, checkfun, targ0):
arr_float = self.arr_float
arr_float1 = self.arr_float1
arr_nan = self.arr_nan
arr_nan_nan = self.arr_nan_nan
arr_float_nan = self.arr_float_nan
arr_float1_nan = self.arr_float1_nan
arr_nan_float1 = self.arr_nan_float1
while targ0.ndim:
try:
res0 = checkfun(arr_float, arr_float1)
tm.assert_almost_equal(targ0, res0)
if targ0.ndim > 1:
targ1 = np.vstack([targ0, arr_nan])
else:
targ1 = np.hstack([targ0, arr_nan])
res1 = checkfun(arr_float_nan, arr_float1_nan)
tm.assert_numpy_array_equal(targ1, res1, check_dtype=False)
targ2 = arr_nan_nan
res2 = checkfun(arr_float_nan, arr_nan_float1)
tm.assert_numpy_array_equal(targ2, res2, check_dtype=False)
except Exception as exc:
exc.args += ('ndim: %s' % arr_float.ndim, )
raise
try:
arr_float = np.take(arr_float, 0, axis=-1)
arr_float1 = np.take(arr_float1, 0, axis=-1)
arr_nan = np.take(arr_nan, 0, axis=-1)
arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1)
arr_float_nan = np.take(arr_float_nan, 0, axis=-1)
arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1)
arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1)
targ0 = np.take(targ0, 0, axis=-1)
except ValueError:
break
def test_nangt(self):
targ0 = self.arr_float > self.arr_float1
self.check_nancomp(nanops.nangt, targ0)
def test_nange(self):
targ0 = self.arr_float >= self.arr_float1
self.check_nancomp(nanops.nange, targ0)
def test_nanlt(self):
targ0 = self.arr_float < self.arr_float1
self.check_nancomp(nanops.nanlt, targ0)
def test_nanle(self):
targ0 = self.arr_float <= self.arr_float1
self.check_nancomp(nanops.nanle, targ0)
def test_naneq(self):
targ0 = self.arr_float == self.arr_float1
self.check_nancomp(nanops.naneq, targ0)
def test_nanne(self):
targ0 = self.arr_float != self.arr_float1
self.check_nancomp(nanops.nanne, targ0)
def check_bool(self, func, value, correct, *args, **kwargs):
while getattr(value, 'ndim', True):
try:
res0 = func(value, *args, **kwargs)
if correct:
self.assertTrue(res0)
else:
self.assertFalse(res0)
except BaseException as exc:
exc.args += ('dim: %s' % getattr(value, 'ndim', value), )
raise
if not hasattr(value, 'ndim'):
break
try:
value = np.take(value, 0, axis=-1)
except ValueError:
break
def test__has_infs(self):
pairs = [('arr_complex', False), ('arr_int', False),
('arr_bool', False), ('arr_str', False), ('arr_utf', False),
('arr_complex', False), ('arr_complex_nan', False),
('arr_nan_nanj', False), ('arr_nan_infj', True),
('arr_complex_nan_infj', True)]
pairs_float = [('arr_float', False), ('arr_nan', False),
('arr_float_nan', False), ('arr_nan_nan', False),
('arr_float_inf', True), ('arr_inf', True),
('arr_nan_inf', True), ('arr_float_nan_inf', True),
('arr_nan_nan_inf', True)]
for arr, correct in pairs:
val = getattr(self, arr)
try:
self.check_bool(nanops._has_infs, val, correct)
except BaseException as exc:
exc.args += (arr, )
raise
for arr, correct in pairs_float:
val = getattr(self, arr)
try:
self.check_bool(nanops._has_infs, val, correct)
self.check_bool(nanops._has_infs, val.astype('f4'), correct)
self.check_bool(nanops._has_infs, val.astype('f2'), correct)
except BaseException as exc:
exc.args += (arr, )
raise
def test__isfinite(self):
pairs = [('arr_complex', False), ('arr_int', False),
('arr_bool', False), ('arr_str', False), ('arr_utf', False),
('arr_complex', False), ('arr_complex_nan', True),
('arr_nan_nanj', True), ('arr_nan_infj', True),
('arr_complex_nan_infj', True)]
pairs_float = [('arr_float', False), ('arr_nan', True),
('arr_float_nan', True), ('arr_nan_nan', True),
('arr_float_inf', True), ('arr_inf', True),
('arr_nan_inf', True), ('arr_float_nan_inf', True),
('arr_nan_nan_inf', True)]
func1 = lambda x: np.any(nanops._isfinite(x).ravel())
# TODO: unused?
# func2 = lambda x: np.any(nanops._isfinite(x).values.ravel())
for arr, correct in pairs:
val = getattr(self, arr)
try:
self.check_bool(func1, val, correct)
except BaseException as exc:
exc.args += (arr, )
raise
for arr, correct in pairs_float:
val = getattr(self, arr)
try:
self.check_bool(func1, val, correct)
self.check_bool(func1, val.astype('f4'), correct)
self.check_bool(func1, val.astype('f2'), correct)
except BaseException as exc:
exc.args += (arr, )
raise
def test__bn_ok_dtype(self):
self.assertTrue(nanops._bn_ok_dtype(self.arr_float.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_complex.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_int.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_bool.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_str.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_utf.dtype, 'test'))
self.assertFalse(nanops._bn_ok_dtype(self.arr_date.dtype, 'test'))
self.assertFalse(nanops._bn_ok_dtype(self.arr_tdelta.dtype, 'test'))
self.assertFalse(nanops._bn_ok_dtype(self.arr_obj.dtype, 'test'))
class TestEnsureNumeric(tm.TestCase):
def test_numeric_values(self):
# Test integer
self.assertEqual(nanops._ensure_numeric(1), 1, 'Failed for int')
# Test float
self.assertEqual(nanops._ensure_numeric(1.1), 1.1, 'Failed for float')
# Test complex
self.assertEqual(nanops._ensure_numeric(1 + 2j), 1 + 2j,
'Failed for complex')
def test_ndarray(self):
# Test numeric ndarray
values = np.array([1, 2, 3])
self.assertTrue(np.allclose(nanops._ensure_numeric(values), values),
'Failed for numeric ndarray')
# Test object ndarray
o_values = values.astype(object)
self.assertTrue(np.allclose(nanops._ensure_numeric(o_values), values),
'Failed for object ndarray')
# Test convertible string ndarray
s_values = np.array(['1', '2', '3'], dtype=object)
self.assertTrue(np.allclose(nanops._ensure_numeric(s_values), values),
'Failed for convertible string ndarray')
# Test non-convertible string ndarray
s_values = np.array(['foo', 'bar', 'baz'], dtype=object)
self.assertRaises(ValueError, lambda: nanops._ensure_numeric(s_values))
def test_convertable_values(self):
self.assertTrue(np.allclose(nanops._ensure_numeric('1'), 1.0),
'Failed for convertible integer string')
self.assertTrue(np.allclose(nanops._ensure_numeric('1.1'), 1.1),
'Failed for convertible float string')
self.assertTrue(np.allclose(nanops._ensure_numeric('1+1j'), 1 + 1j),
'Failed for convertible complex string')
def test_non_convertable_values(self):
self.assertRaises(TypeError, lambda: nanops._ensure_numeric('foo'))
self.assertRaises(TypeError, lambda: nanops._ensure_numeric({}))
self.assertRaises(TypeError, lambda: nanops._ensure_numeric([]))
class TestNanvarFixedValues(tm.TestCase):
# xref GH10242
def setUp(self):
# Samples from a normal distribution.
self.variance = variance = 3.0
self.samples = self.prng.normal(scale=variance ** 0.5, size=100000)
def test_nanvar_all_finite(self):
samples = self.samples
actual_variance = nanops.nanvar(samples)
tm.assert_almost_equal(actual_variance, self.variance,
check_less_precise=2)
def test_nanvar_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_variance = nanops.nanvar(samples, skipna=True)
tm.assert_almost_equal(actual_variance, self.variance,
check_less_precise=2)
actual_variance = nanops.nanvar(samples, skipna=False)
tm.assert_almost_equal(actual_variance, np.nan, check_less_precise=2)
def test_nanstd_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_std = nanops.nanstd(samples, skipna=True)
tm.assert_almost_equal(actual_std, self.variance ** 0.5,
check_less_precise=2)
actual_std = nanops.nanvar(samples, skipna=False)
tm.assert_almost_equal(actual_std, np.nan,
check_less_precise=2)
def test_nanvar_axis(self):
# Generate some sample data.
samples_norm = self.samples
samples_unif = self.prng.uniform(size=samples_norm.shape[0])
samples = np.vstack([samples_norm, samples_unif])
actual_variance = nanops.nanvar(samples, axis=1)
tm.assert_almost_equal(actual_variance, np.array(
[self.variance, 1.0 / 12]), check_less_precise=2)
def test_nanvar_ddof(self):
n = 5
samples = self.prng.uniform(size=(10000, n + 1))
samples[:, -1] = np.nan # Force use of our own algorithm.
variance_0 = nanops.nanvar(samples, axis=1, skipna=True, ddof=0).mean()
variance_1 = nanops.nanvar(samples, axis=1, skipna=True, ddof=1).mean()
variance_2 = nanops.nanvar(samples, axis=1, skipna=True, ddof=2).mean()
# The unbiased estimate.
var = 1.0 / 12
tm.assert_almost_equal(variance_1, var,
check_less_precise=2)
# The underestimated variance.
tm.assert_almost_equal(variance_0, (n - 1.0) / n * var,
check_less_precise=2)
# The overestimated variance.
tm.assert_almost_equal(variance_2, (n - 1.0) / (n - 2.0) * var,
check_less_precise=2)
def test_ground_truth(self):
# Test against values that were precomputed with Numpy.
samples = np.empty((4, 4))
samples[:3, :3] = np.array([[0.97303362, 0.21869576, 0.55560287
], [0.72980153, 0.03109364, 0.99155171],
[0.09317602, 0.60078248, 0.15871292]])
samples[3] = samples[:, 3] = np.nan
# Actual variances along axis=0, 1 for ddof=0, 1, 2
variance = np.array([[[0.13762259, 0.05619224, 0.11568816
], [0.20643388, 0.08428837, 0.17353224],
[0.41286776, 0.16857673, 0.34706449]],
[[0.09519783, 0.16435395, 0.05082054
], [0.14279674, 0.24653093, 0.07623082],
[0.28559348, 0.49306186, 0.15246163]]])
# Test nanvar.
for axis in range(2):
for ddof in range(3):
var = nanops.nanvar(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(var[:3], variance[axis, ddof])
self.assertTrue(np.isnan(var[3]))
# Test nanstd.
for axis in range(2):
for ddof in range(3):
std = nanops.nanstd(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(std[:3], variance[axis, ddof] ** 0.5)
self.assertTrue(np.isnan(std[3]))
def test_nanstd_roundoff(self):
# Regression test for GH 10242 (test data taken from GH 10489). Ensure
# that variance is stable.
data = Series(766897346 * np.ones(10))
for ddof in range(3):
result = data.std(ddof=ddof)
self.assertEqual(result, 0.0)
@property
def prng(self):
return np.random.RandomState(1234)
class TestNanskewFixedValues(tm.TestCase):
# xref GH 11974
def setUp(self):
# Test data + skewness value (computed with scipy.stats.skew)
self.samples = np.sin(np.linspace(0, 1, 200))
self.actual_skew = -0.1875895205961754
def test_constant_series(self):
# xref GH 11974
for val in [3075.2, 3075.3, 3075.5]:
data = val * np.ones(300)
skew = nanops.nanskew(data)
self.assertEqual(skew, 0.0)
def test_all_finite(self):
alpha, beta = 0.3, 0.1
left_tailed = self.prng.beta(alpha, beta, size=100)
self.assertLess(nanops.nanskew(left_tailed), 0)
alpha, beta = 0.1, 0.3
right_tailed = self.prng.beta(alpha, beta, size=100)
self.assertGreater(nanops.nanskew(right_tailed), 0)
def test_ground_truth(self):
skew = nanops.nanskew(self.samples)
self.assertAlmostEqual(skew, self.actual_skew)
def test_axis(self):
samples = np.vstack([self.samples,
np.nan * np.ones(len(self.samples))])
skew = nanops.nanskew(samples, axis=1)
tm.assert_almost_equal(skew, np.array([self.actual_skew, np.nan]))
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
skew = nanops.nanskew(samples, skipna=False)
self.assertTrue(np.isnan(skew))
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
skew = nanops.nanskew(samples, skipna=True)
tm.assert_almost_equal(skew, self.actual_skew)
@property
def prng(self):
return np.random.RandomState(1234)
class TestNankurtFixedValues(tm.TestCase):
# xref GH 11974
def setUp(self):
# Test data + kurtosis value (computed with scipy.stats.kurtosis)
self.samples = np.sin(np.linspace(0, 1, 200))
self.actual_kurt = -1.2058303433799713
def test_constant_series(self):
# xref GH 11974
for val in [3075.2, 3075.3, 3075.5]:
data = val * np.ones(300)
kurt = nanops.nankurt(data)
self.assertEqual(kurt, 0.0)
def test_all_finite(self):
alpha, beta = 0.3, 0.1
left_tailed = self.prng.beta(alpha, beta, size=100)
self.assertLess(nanops.nankurt(left_tailed), 0)
alpha, beta = 0.1, 0.3
right_tailed = self.prng.beta(alpha, beta, size=100)
self.assertGreater(nanops.nankurt(right_tailed), 0)
def test_ground_truth(self):
kurt = nanops.nankurt(self.samples)
self.assertAlmostEqual(kurt, self.actual_kurt)
def test_axis(self):
samples = np.vstack([self.samples,
np.nan * np.ones(len(self.samples))])
kurt = nanops.nankurt(samples, axis=1)
tm.assert_almost_equal(kurt, np.array([self.actual_kurt, np.nan]))
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
kurt = nanops.nankurt(samples, skipna=False)
self.assertTrue(np.isnan(kurt))
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
kurt = nanops.nankurt(samples, skipna=True)
tm.assert_almost_equal(kurt, self.actual_kurt)
@property
def prng(self):
return np.random.RandomState(1234)
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
a13c30cb58afafc6423d27afc1fc0999a91c663c
|
54fdc31183763afafe5c07de6cb4ed4167fbf2f2
|
/projet_logiciel/main_fin_p3c1.py
|
6bef301691d6719391cf4b517283db923e506f13
|
[] |
no_license
|
Patrick-Wampe/Debugging-avec-Python
|
e089130352c02ae78d04fe4ee4f8286d5eabce9f
|
fd9d36b45e063b25a694e0974f6051c26ae2ee94
|
refs/heads/main
| 2023-05-14T14:18:03.832620
| 2021-06-10T10:35:20
| 2021-06-10T10:35:20
| 374,808,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,179
|
py
|
# -*- coding: utf8 -*-
from random import randint
import datetime
print("Bienvenue dans le jeu Pierre Feuille Ciseaux")
nom = "debogeur"
annee_actuelle = datetime.datetime.now().strftime("%Y")
annee_de_naissance = 2000
print(f"Bonjour {nom}, nous sommes en {annee_actuelle}, du coup tu as {int(annee_actuelle) - annee_de_naissance}.")
class pierre_feuille_ciseaux :
def _init_(self, nouveau_score_joueur, nouveau_score_ia, label_joueur, label_ia):
self.score_joueur = 0
self.score_intelligence_artificielle = 0
self.nouveau_score_joueur = nouveau_score_joueur
self.nouveau_score_ia = nouveau_score_ia
self.label_joueur = label_joueur
self.label_ia = label_ia
def maj_scores(self, choix_ia, choix_joueur):
if choix_ia == 1 and choix_joueur == 2:
self.score_joueur += 1
elif choix_ia == 2 and choix_joueur == 1:
self.score_intelligence_artificielle += 1
elif choix_ia == 1 and choix_joueur == 3:
self.score_intelligence_artificielle += 1
elif choix_ia == 3 and choix_joueur == 1:
self.score_joueur += 1
elif choix_ia == 3 and choix_joueur == 2:
self.score_intelligence_artificielle += 1
elif choix_ia == 2 and choix_joueur == 3:
self.score_joueur += 1
def jouer(self, choix_joueur):
choix_ia = randint(1,3)
if choix_ia==1:
self.label_ia.configure(image=pierre)
elif choix_ia==2:
self.label_ia.configure(image=feuille)
else:
self.label_ia.configure(image=ciseaux)
self.maj_scores(choix_ia,choix_joueur)
self.nouveau_score_joueur.configure(text=str(self.score_joueur))
self.nouveau_score_ia.configure(text=str(self.score_intelligence_artificielle))
def jouer_pierre():
self.jouer(1)
self.label_joueur.configure(image=pierre)
def jouer_feuille(self):
self.jouer(2)
self.label_joueur.configure(image=feuille)
def jouer_ciseaux(self):
self.jouer(3)
self.label_joueur.configure(image=ciseaux)
def rejouer(self):
self.score_joueur = 0
self.score_intelligence_artificielle = 0
self.nouveau_score_joueur.configure(text=str(self.score_joueur))
self.nouveau_score_ia.configure(text=str(self.score_intelligence_artificielle))
self.label_joueur.configure(image=zero)
self.label_ia.configure(image=zero)
from tkinter import PhotoImage, Tk
try:
versus = PhotoImage(file ='vs.gif')
pierre = PhotoImage(file ='pierre.gif')
feuille = PhotoImage(file ='feuille.gif')
ciseaux = PhotoImage(file ='ciseaux.gif')
except RuntimeError:
print("""Il y a un erreur au niveau de la fonction PhotoImage()
D'après l'erreur tu as importé les images avant de créer la fenêtre.""")
fenetre = Tk()
fenetre.title("Pierre Feuille Ciseaux")
try :
texte1 = Label(fenetre, text="Vous", font=("Arial", "20", "bold"))
texte1.grid(row=0,column=0)
texte2 = Label(fenetre, text="Intelligence artificielle", font=("Arial", 20, "bold"))
texte2.grid(row=0,column=2)
texte3 = Label(fenetre, text="Pour jouer, cliquez sur une des icônes ci-dessous.",font=("Arial", 20, "bold"))
texte3.grid(row=3, columnspan =3, pady =5)
nouveau_score_joueur = Label(fenetre, text="0", font=("Arial", 20, "bold"))
nouveau_score_joueur.grid(row=1, column=0)
nouveau_score_ia = Label(fenetre, text="0", font=("Arial", 20, "bold"))
nouveau_score_ia.grid(row=1, column=2)
label_joueur = Label(fenetre, image=zero)
label_joueur.grid(row =2, column =0)
label_vs = Label(fenetre, image=versus)
label_vs.grid(row =2, column =1)
label_ia = Label(fenetre, image=zero)
label_ia.grid(row =2, column =2)
except NameError:
print("La fonction Label() n'a pas été importée.")
try:
zero = PhotoImage(file ='zero.jpg')
except :
print("Une exception _tkinter.TclError a été levée.")
try:
jeu = pierre_feuille_ciseaux(nouveau_score_joueur, nouveau_score_ia, label_joueur, label_ia)
except NameError:
print("Un des paramètres de la classe pose problème.")
try:
bouton_pierre = Button(fenetre,command=jeu.jouer_pierre).configure(image=pierre).grid(row =4, column =0)
bouton_feuille = Button(fenetre,command=jeu.jouer_feuille)
bouton_feuille.configure(image=feuille)
bouton_feuille.grid(row =4, column =1,)
bouton_ciseaux = Button(fenetre,command=jeu.jouer_ciseaux)
bouton_ciseaux.configure(image=ciseaux)
bouton_ciseaux.grid(row =4, column =2)
bouton_recommence = Button(fenetre,text='Rejouer',command=jeu.rejouer,font=("Courier", 20, "bold"))
bouton_recommencer.grid(row =5, column =0, pady =10, sticky=E)
bouton_quitter = Button(fenetre,text='Quitter',command=quit,font=("Courier", 20, "bold"))
bouton_quitter.grid(row =5, column =2, pady =10, sticky=W)
except NameError:
print("La fonction Button() n'a été importée.")
fenetre.mainloop()
|
[
"noreply@github.com"
] |
noreply@github.com
|
9ca2e633e67d0d45a394bc37f48e58a9aa50e852
|
72fc93362a930d2ef8c951c6c53f9862abf60b03
|
/captain_console/user/migrations/0002_auto_20200509_1717.py
|
6e4cd570b49e6b936f684c1885309a9879b319d7
|
[] |
no_license
|
valdisbaerings/VN_2
|
434a08ad1cac0ba2fb45dc0d5707cc62cc117771
|
52ab14980e6e2c39d9007f3e9cefe276d5aebbb6
|
refs/heads/master
| 2022-06-20T04:51:58.070386
| 2020-05-14T16:40:50
| 2020-05-14T16:40:50
| 262,390,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
# Generated by Django 3.0.6 on 2020-05-09 17:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='profile_image',
field=models.CharField(default='https://img.icons8.com/pastel-glyph/512/000000/user-male--v1.png', max_length=9999),
),
]
|
[
"saralindsveins@gmail.com"
] |
saralindsveins@gmail.com
|
f617f398edfe0e8e5b141797e5bcd020e521010e
|
925c667f928d8a08a22cbb6c0bffbd5264a4db85
|
/primerProyectoDjango/firtPro/firtPro/asgi.py
|
e210908218fc3e14599a75b10486c36247c84939
|
[] |
no_license
|
jesusManuelJuarez/jesusManuelJuarez
|
62dd5770791e8c08942fe753a1cead687db3ab7a
|
c74ed3734ccd2a4e4ee8c5672cccd2102b54d5af
|
refs/heads/master
| 2022-04-11T12:23:59.928223
| 2020-04-02T22:32:43
| 2020-04-02T22:32:43
| 250,353,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
"""
ASGI config for firtPro project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'firtPro.settings')
application = get_asgi_application()
|
[
"noreply@github.com"
] |
noreply@github.com
|
a11962ae95b28d1923e23d0a5c514d53c454524e
|
7889f7f0532db6a7f81e6f8630e399c90438b2b9
|
/3.7.1/_downloads/a54f19823bde998a456571636498aa98/auto_subplots_adjust.py
|
bd6326b8291f4b1a16db182e1f642d2279a8f0b0
|
[] |
no_license
|
matplotlib/matplotlib.github.com
|
ef5d23a5bf77cb5af675f1a8273d641e410b2560
|
2a60d39490941a524e5385670d488c86083a032c
|
refs/heads/main
| 2023-08-16T18:46:58.934777
| 2023-08-10T05:07:57
| 2023-08-10T05:08:30
| 1,385,150
| 25
| 59
| null | 2023-08-30T15:59:50
| 2011-02-19T03:27:35
| null |
UTF-8
|
Python
| false
| false
| 3,366
|
py
|
"""
===============================================
Programmatically controlling subplot adjustment
===============================================
.. note::
This example is primarily intended to show some advanced concepts in
Matplotlib.
If you are only looking for having enough space for your labels, it is
almost always simpler and good enough to either set the subplot parameters
manually using `.Figure.subplots_adjust`, or use one of the automatic
layout mechanisms
(:doc:`/tutorials/intermediate/constrainedlayout_guide` or
:doc:`/tutorials/intermediate/tight_layout_guide`).
This example describes a user-defined way to read out Artist sizes and
set the subplot parameters accordingly. Its main purpose is to illustrate
some advanced concepts like reading out text positions, working with
bounding boxes and transforms and using
:ref:`events <event-handling-tutorial>`. But it can also serve as a starting
point if you want to automate the layouting and need more flexibility than
tight layout and constrained layout.
Below, we collect the bounding boxes of all y-labels and move the left border
of the subplot to the right so that it leaves enough room for the union of all
the bounding boxes.
There's one catch with calculating text bounding boxes:
Querying the text bounding boxes (`.Text.get_window_extent`) needs a
renderer (`.RendererBase` instance), to calculate the text size. This renderer
is only available after the figure has been drawn (`.Figure.draw`).
A solution to this is putting the adjustment logic in a draw callback.
This function is executed after the figure has been drawn. It can now check
if the subplot leaves enough room for the text. If not, the subplot parameters
are updated and second draw is triggered.
.. redirect-from:: /gallery/pyplots/auto_subplots_adjust
"""
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
fig, ax = plt.subplots()
ax.plot(range(10))
ax.set_yticks([2, 5, 7], labels=['really, really, really', 'long', 'labels'])
def on_draw(event):
bboxes = []
for label in ax.get_yticklabels():
# Bounding box in pixels
bbox_px = label.get_window_extent()
# Transform to relative figure coordinates. This is the inverse of
# transFigure.
bbox_fig = bbox_px.transformed(fig.transFigure.inverted())
bboxes.append(bbox_fig)
# the bbox that bounds all the bboxes, again in relative figure coords
bbox = mtransforms.Bbox.union(bboxes)
if fig.subplotpars.left < bbox.width:
# Move the subplot left edge more to the right
fig.subplots_adjust(left=1.1*bbox.width) # pad a little
fig.canvas.draw()
fig.canvas.mpl_connect('draw_event', on_draw)
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.artist.Artist.get_window_extent`
# - `matplotlib.transforms.Bbox`
# - `matplotlib.transforms.BboxBase.transformed`
# - `matplotlib.transforms.BboxBase.union`
# - `matplotlib.transforms.Transform.inverted`
# - `matplotlib.figure.Figure.subplots_adjust`
# - `matplotlib.figure.SubplotParams`
# - `matplotlib.backend_bases.FigureCanvasBase.mpl_connect`
|
[
"quantum.analyst@gmail.com"
] |
quantum.analyst@gmail.com
|
12148e25092c6f6329984c046651dba85edfb209
|
3c2e75d3563053dd186dcff324fd84eba561f2a7
|
/python/onos/rsm/__init__.py
|
b94966701cbd5b870c5f829c3de02c8bd040a16c
|
[
"Apache-2.0"
] |
permissive
|
stjordanis/onos-api
|
00c2434090b9f51d7eacf00f082abd7f2146c1fc
|
13fca9dc160a23bc9d89e4ef33ee2da9b2a8ee48
|
refs/heads/master
| 2023-09-02T11:07:58.824154
| 2021-11-01T17:40:27
| 2021-11-01T17:40:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 10,002
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: onos/rsm/rsm.proto
# plugin: python-betterproto
from dataclasses import dataclass
from typing import Dict, List, Optional
import betterproto
from betterproto.grpc.grpclib_server import ServiceBase
import grpclib
class SliceType(betterproto.Enum):
SLICE_TYPE_DL_SLICE = 0
SLICE_TYPE_UL_SLICE = 1
class SchedulerType(betterproto.Enum):
SCHEDULER_TYPE_ROUND_ROBIN = 0
SCHEDULER_TYPE_PROPORTIONALLY_FAIR = 1
SCHEDULER_TYPE_QOS_BASED = 2
class UeIdType(betterproto.Enum):
UE_ID_TYPE_CU_UE_F1_AP_ID = 0
UE_ID_TYPE_DU_UE_F1_AP_ID = 1
UE_ID_TYPE_RAN_UE_NGAP_ID = 2
UE_ID_TYPE_AMF_UE_NGAP_ID = 3
UE_ID_TYPE_ENB_UE_S1_AP_ID = 4
@dataclass(eq=False, repr=False)
class SliceItem(betterproto.Message):
e2_node_id: str = betterproto.string_field(1)
slice_ids: List[str] = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class Ack(betterproto.Message):
success: bool = betterproto.bool_field(1)
cause: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class CreateSliceRequest(betterproto.Message):
e2_node_id: str = betterproto.string_field(1)
slice_id: str = betterproto.string_field(2)
scheduler_type: "SchedulerType" = betterproto.enum_field(3)
weight: str = betterproto.string_field(4)
slice_type: "SliceType" = betterproto.enum_field(5)
@dataclass(eq=False, repr=False)
class CreateSliceResponse(betterproto.Message):
ack: "Ack" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class UpdateSliceRequest(betterproto.Message):
e2_node_id: str = betterproto.string_field(1)
slice_id: str = betterproto.string_field(2)
scheduler_type: "SchedulerType" = betterproto.enum_field(3)
weight: str = betterproto.string_field(4)
slice_type: "SliceType" = betterproto.enum_field(5)
@dataclass(eq=False, repr=False)
class UpdateSliceResponse(betterproto.Message):
ack: "Ack" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class DeleteSliceRequest(betterproto.Message):
e2_node_id: str = betterproto.string_field(1)
slice_id: str = betterproto.string_field(2)
slice_type: "SliceType" = betterproto.enum_field(3)
@dataclass(eq=False, repr=False)
class DeleteSliceResponse(betterproto.Message):
ack: "Ack" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class SliceAssocItem(betterproto.Message):
ue_slice_assoc_id: str = betterproto.string_field(1)
e2_node_id: str = betterproto.string_field(2)
ue_id: List["UeIdType"] = betterproto.enum_field(3)
slice_id: str = betterproto.string_field(4)
@dataclass(eq=False, repr=False)
class UeId(betterproto.Message):
ue_id: str = betterproto.string_field(1)
type: "UeIdType" = betterproto.enum_field(2)
@dataclass(eq=False, repr=False)
class SetUeSliceAssociationRequest(betterproto.Message):
e2_node_id: str = betterproto.string_field(1)
ue_id: List["UeId"] = betterproto.message_field(2)
dl_slice_id: str = betterproto.string_field(3)
ul_slice_id: str = betterproto.string_field(4)
drb_id: str = betterproto.string_field(5)
@dataclass(eq=False, repr=False)
class SetUeSliceAssociationResponse(betterproto.Message):
ack: "Ack" = betterproto.message_field(1)
assigned_ue_slice_assoc_id: str = betterproto.string_field(2)
class RsmStub(betterproto.ServiceStub):
async def create_slice(
self,
*,
e2_node_id: str = "",
slice_id: str = "",
scheduler_type: "SchedulerType" = None,
weight: str = "",
slice_type: "SliceType" = None,
) -> "CreateSliceResponse":
request = CreateSliceRequest()
request.e2_node_id = e2_node_id
request.slice_id = slice_id
request.scheduler_type = scheduler_type
request.weight = weight
request.slice_type = slice_type
return await self._unary_unary(
"/onos.rsm.Rsm/CreateSlice", request, CreateSliceResponse
)
async def update_slice(
self,
*,
e2_node_id: str = "",
slice_id: str = "",
scheduler_type: "SchedulerType" = None,
weight: str = "",
slice_type: "SliceType" = None,
) -> "UpdateSliceResponse":
request = UpdateSliceRequest()
request.e2_node_id = e2_node_id
request.slice_id = slice_id
request.scheduler_type = scheduler_type
request.weight = weight
request.slice_type = slice_type
return await self._unary_unary(
"/onos.rsm.Rsm/UpdateSlice", request, UpdateSliceResponse
)
async def delete_slice(
self,
*,
e2_node_id: str = "",
slice_id: str = "",
slice_type: "SliceType" = None,
) -> "DeleteSliceResponse":
request = DeleteSliceRequest()
request.e2_node_id = e2_node_id
request.slice_id = slice_id
request.slice_type = slice_type
return await self._unary_unary(
"/onos.rsm.Rsm/DeleteSlice", request, DeleteSliceResponse
)
async def set_ue_slice_association(
self,
*,
e2_node_id: str = "",
ue_id: Optional[List["UeId"]] = None,
dl_slice_id: str = "",
ul_slice_id: str = "",
drb_id: str = "",
) -> "SetUeSliceAssociationResponse":
ue_id = ue_id or []
request = SetUeSliceAssociationRequest()
request.e2_node_id = e2_node_id
if ue_id is not None:
request.ue_id = ue_id
request.dl_slice_id = dl_slice_id
request.ul_slice_id = ul_slice_id
request.drb_id = drb_id
return await self._unary_unary(
"/onos.rsm.Rsm/SetUeSliceAssociation",
request,
SetUeSliceAssociationResponse,
)
class RsmBase(ServiceBase):
async def create_slice(
self,
e2_node_id: str,
slice_id: str,
scheduler_type: "SchedulerType",
weight: str,
slice_type: "SliceType",
) -> "CreateSliceResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def update_slice(
self,
e2_node_id: str,
slice_id: str,
scheduler_type: "SchedulerType",
weight: str,
slice_type: "SliceType",
) -> "UpdateSliceResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def delete_slice(
self, e2_node_id: str, slice_id: str, slice_type: "SliceType"
) -> "DeleteSliceResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def set_ue_slice_association(
self,
e2_node_id: str,
ue_id: Optional[List["UeId"]],
dl_slice_id: str,
ul_slice_id: str,
drb_id: str,
) -> "SetUeSliceAssociationResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def __rpc_create_slice(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"e2_node_id": request.e2_node_id,
"slice_id": request.slice_id,
"scheduler_type": request.scheduler_type,
"weight": request.weight,
"slice_type": request.slice_type,
}
response = await self.create_slice(**request_kwargs)
await stream.send_message(response)
async def __rpc_update_slice(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"e2_node_id": request.e2_node_id,
"slice_id": request.slice_id,
"scheduler_type": request.scheduler_type,
"weight": request.weight,
"slice_type": request.slice_type,
}
response = await self.update_slice(**request_kwargs)
await stream.send_message(response)
async def __rpc_delete_slice(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"e2_node_id": request.e2_node_id,
"slice_id": request.slice_id,
"slice_type": request.slice_type,
}
response = await self.delete_slice(**request_kwargs)
await stream.send_message(response)
async def __rpc_set_ue_slice_association(
self, stream: grpclib.server.Stream
) -> None:
request = await stream.recv_message()
request_kwargs = {
"e2_node_id": request.e2_node_id,
"ue_id": request.ue_id,
"dl_slice_id": request.dl_slice_id,
"ul_slice_id": request.ul_slice_id,
"drb_id": request.drb_id,
}
response = await self.set_ue_slice_association(**request_kwargs)
await stream.send_message(response)
def __mapping__(self) -> Dict[str, grpclib.const.Handler]:
return {
"/onos.rsm.Rsm/CreateSlice": grpclib.const.Handler(
self.__rpc_create_slice,
grpclib.const.Cardinality.UNARY_UNARY,
CreateSliceRequest,
CreateSliceResponse,
),
"/onos.rsm.Rsm/UpdateSlice": grpclib.const.Handler(
self.__rpc_update_slice,
grpclib.const.Cardinality.UNARY_UNARY,
UpdateSliceRequest,
UpdateSliceResponse,
),
"/onos.rsm.Rsm/DeleteSlice": grpclib.const.Handler(
self.__rpc_delete_slice,
grpclib.const.Cardinality.UNARY_UNARY,
DeleteSliceRequest,
DeleteSliceResponse,
),
"/onos.rsm.Rsm/SetUeSliceAssociation": grpclib.const.Handler(
self.__rpc_set_ue_slice_association,
grpclib.const.Cardinality.UNARY_UNARY,
SetUeSliceAssociationRequest,
SetUeSliceAssociationResponse,
),
}
|
[
"noreply@github.com"
] |
noreply@github.com
|
db0098a5052813ec73014ee3786d77074ce4c3cf
|
a0727e9880bd4cbd575b1ead8404fac2a076ae7e
|
/src/tests/api/test_rol.py
|
1c6f99376508b971e3ca4eab9191f0341534da56
|
[
"MIT"
] |
permissive
|
Haythem122/fastapi-docker
|
57620631e18310df8aa2e8a27909249ffc52bf22
|
90b84161599fa23225649aef8a0532a571d350fe
|
refs/heads/master
| 2023-08-12T10:58:43.601587
| 2021-09-20T23:11:47
| 2021-09-20T23:11:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,980
|
py
|
def test_create_role(client, super_user_token):
data = {
'name': 'new_role'
}
response = client.post(
'/api/v1/role', json=data, headers={
'token': super_user_token
}
)
assert response.status_code == 201, response.text
response_json = response.json()
expected_data = {'name': 'new_role', 'id': 3}
assert response_json == expected_data
def test_get_role(client, super_user_token):
response = client.get(
'/api/v1/role/1', headers={
'token': super_user_token
}
)
assert response.status_code == 200, response.text
response_json = response.json()
expected_data = {'name': 'ADMINISTRATOR', 'id': 1}
assert response_json == expected_data
def test_get_roles(client, super_user_token):
response = client.get(
'/api/v1/roles?page=1', headers={
'token': super_user_token
}
)
assert response.status_code == 200, response.text
response_json = response.json()
expected_data = {
'previous_page': None, 'next_page': None, 'total': 2,
'pages': 1, 'data': [
{'name': 'ADMINISTRATOR', 'id': 1},
{'name': 'BASIC', 'id': 2}
]
}
assert response_json == expected_data
def test_update_role(client, super_user_token):
data = {'name': 'new_name', 'id': 2}
response = client.put(
'/api/v1/role', json=data,
headers={
'token': super_user_token
}
)
assert response.status_code == 200, response.text
response_json = response.json()
assert response_json == data
def test_delete_role(client, super_user_token):
response = client.delete(
'/api/v1/role/2',
headers={
'token': super_user_token
}
)
assert response.status_code == 200, response.text
response_json = response.json()
expected_data = {'name': 'BASIC', 'id': 2}
assert response_json == expected_data
|
[
"jeremysilvasilva@gmail.com"
] |
jeremysilvasilva@gmail.com
|
c4281a41c161ba65c8915083ae81b981745630ca
|
9775ab319e5c1f2270a132b0244f0847db42589b
|
/nilai/migrations/0008_auto_20210117_1010.py
|
d2abe2075f8f24b61525b7b5c136dcc1bf54b97d
|
[] |
no_license
|
nabaman/SPK-SAW
|
9aa8dfaf1bf5162bae1dc5c97e2b3e033a08294b
|
5c0b8d491f23939615aa968cd52f081072fe2230
|
refs/heads/master
| 2023-02-18T17:38:21.028901
| 2021-01-22T15:37:06
| 2021-01-22T15:37:06
| 331,987,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
# Generated by Django 3.1.5 on 2021-01-17 10:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nilai', '0007_auto_20210117_1004'),
]
operations = [
migrations.RemoveField(
model_name='data_krips',
name='kriteria',
),
migrations.AddField(
model_name='data_kriteria',
name='krips',
field=models.ManyToManyField(to='nilai.Data_Krips'),
),
]
|
[
"naba.alvian@gmail.com"
] |
naba.alvian@gmail.com
|
5db6fa1f99c5b7ac65079c7fd585ce8c7915f235
|
817085c4009e48db05e4a30815fdd92ee27513f9
|
/venv/Scripts/pip-script.py
|
87639661c7805bc7bbf60fa04e4b53e33d5922f8
|
[] |
no_license
|
bluesnie/novel
|
7e3a2f403def8fe3e1d9c8c1ba4e2a80344c39e0
|
c11076ca61c619a2b7c1423d742d3f4c63dc1fed
|
refs/heads/master
| 2020-04-24T02:07:07.516575
| 2019-02-20T07:44:19
| 2019-02-20T07:44:19
| 171,486,867
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
#!C:\Users\lenovo\PycharmProjects\novel\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
|
[
"l"
] |
l
|
ba15778cb67c98978cb7fc27b5541dcb2ceeba8a
|
3e0e9508b3b8d5609392fd64c57491d278c73c89
|
/venv/bin/gunicorn
|
c5ff15177c6bba9a6f255f72a472181d98d2c757
|
[] |
no_license
|
raghav96/DatabaseWebApp
|
9d59c9866e6977aac0ead06d242e01c5724211d0
|
a81ba383393c61877ad40acc44cda30ef2d7b19e
|
refs/heads/master
| 2021-01-02T08:36:38.272371
| 2017-08-01T18:21:47
| 2017-08-01T18:21:47
| 99,030,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
#!/Users/raghav/flask-app/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"raghav96@github.com"
] |
raghav96@github.com
|
|
2fc6d94c3b2b11ba86ebe114e83a451f636f25c9
|
18df22a18f34b4e3066e797f024fb2e475408847
|
/praca_z_danymi.py
|
527424c0273538f106cccc209a5f923f8c55814d
|
[] |
no_license
|
Karolinak246/Programing-in-Python
|
8841d10495d661ad3708a1ba07cfe92c1c93a87d
|
47093309b5cee0424552cb5fee0272cf78758e8c
|
refs/heads/main
| 2023-01-31T20:09:50.361568
| 2020-12-15T23:02:25
| 2020-12-15T23:02:25
| 321,491,646
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,998
|
py
|
################################
# 7.12.2020
# Kopczak Karolina
# Programowanie w jezyku Python
# Zadania: PRACA Z DANYMI
################################
#!/usr/bin/env python3
path = "/home/pi/Documents/Python"
from xml.dom import minidom
import xml.dom.minidom
import os
import numpy as np
import pandas as pd
import csv
### TASK 1 ###
def xml():
file = minidom.parse("XML/somexml.xml")
tag = file.getElementsByTagName("CATALOG")[0]
print("Tag name before changing: ", file.firstChild.tagName)
tag.tagName = "change"
file.writexml(open('XML/some_changed_xml.xml', 'w'))
file2 = minidom.parse("XML/some_changed_xml.xml")
print("Tag name after changing: ", file2.firstChild.tagName)
xml()
### TASK 2 ###
def csvjson():
if os.path.isfile("CSV/file.csv")==True:
try:
data = pd.read_csv("CSV/file.csv")
print(data)
deleterecord = input ("Would you like to delete last record? Y/N ")
if deleterecord == "y" or deleterecord == "Y":
data.drop(data.tail(1).index, inplace = True)
print (data)
data.to_csv("CSV/file.csv", sep ="\t")
except:
print("The file is empty, data will be added.")
data = pd.DataFrame()
else:
print("There is no file at such name. file.csv will be created.")
newrecord = input("Would you like to add new record? Y/N ")
if newrecord == "y" or newrecord == "Y":
recA = input("Enter the title of the movie: ")
recB = input("Enter the code of CD: ")
recC = input("Enter Client's name: ")
recD = input("Enter Clinet's surname: ")
recE = input("Enter phone number: ")
recF = input("Enter amount of days (how long will the film be on loan?): ")
recG = input("Enter the price: ")
df = pd.DataFrame({ "Title":[recA],
"Code":[recB],
"Name":[recC],
"Surname":[recD],
"Phone_number":[recE],
"Days":[recF],
"Price":[recG]})
if data.empty == True:
df.to_csv("CSV/file.csv", sep = "\t", header = True)
else:
df.to_csv("CSV/file.csv", sep = "\t", mode = "a", header = False)
csvjson()
|
[
"noreply@github.com"
] |
noreply@github.com
|
b28bbc203b60e128307f6f9d8d309793f3dc1e1a
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/yXZhG7zq6dWhWhirt_24.py
|
1b4a59876a63f5dfdb4b9e7de1d41c308c735314
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
def is_prime(n):
if (n==1):
return False
for i in range(2,round(n**(0.5))+1):
if i!=n and (n%i)==0:
return False
return True
def filter_primes(num):
return [n for n in num if is_prime(n)]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
bbda84923f2c455dc60051aa1e126bf4dd187233
|
4a88ec266b64521fcaef88d92cb2b57776d3192b
|
/powerUsageNotification/powerUsageNotification.py
|
132c11551e75d59adb52856ce265d156f20d6af7
|
[
"MIT"
] |
permissive
|
johntdyer/appdaemon-scripts
|
4e5ea345d27d54d8133be212e5f7af57b8dfd57f
|
ce7e32a919be5a835d0bdf95e6650ff34b699220
|
refs/heads/master
| 2020-03-31T18:01:49.517418
| 2018-10-07T14:06:38
| 2018-10-07T14:06:38
| 152,443,705
| 1
| 0
| null | 2018-10-10T15:10:00
| 2018-10-10T15:09:59
| null |
UTF-8
|
Python
| false
| false
| 4,100
|
py
|
import appdaemon.plugins.hass.hassapi as hass
import globals
#
# App which notifies you when a power usage sensor indicated a device is on/off
#
#
# Args:
#
# app_switch: on/off switch for this app. example: input_boolean.turn_fan_on_when_hot
# sensor: power sensor. example: sensor.dishwasher_power_usage
# notify_name: Who to notify. example: group_notifications
# delay: seconds to wait until a the device is considered "off". example: 60
# threshold: amount of "usage" which indicated the device is on. example: 2
# alternative_name: Name to use in notification. example: Waschmaschine
#
# Release Notes
#
# Version 1.3:
# use Notify App
#
# Version 1.2:
# message now directly in own yaml instead of message module
#
# Version 1.1:
# Added app_switch
#
# Version 1.0:
# Initial Version
class PowerUsageNotification(hass.Hass):
def initialize(self):
self.timer_handle_list = []
self.listen_event_handle_list = []
self.listen_state_handle_list = []
self.app_switch = globals.get_arg(self.args,"app_switch")
self.sensor = globals.get_arg(self.args,"sensor")
self.alternative_name = globals.get_arg(self.args,"alternative_name")
self.notify_name = globals.get_arg(self.args,"notify_name")
self.delay = globals.get_arg(self.args,"delay")
self.threshold = globals.get_arg(self.args,"threshold")
self.message = globals.get_arg(self.args,"message_DE")
self.message_off = globals.get_arg(self.args,"message_off_DE")
self.triggered = False
self.isWaitingHandle = None
self.notifier = self.get_app('Notifier')
# Subscribe to sensors
self.listen_state_handle_list.append(self.listen_state(self.state_change, self.sensor))
def state_change(self, entity, attribute, old, new, kwargs):
if self.get_state(self.app_switch) == "on":
# Initial: power usage goes up
if ( new != None and new != "" and not self.triggered and float(new) > self.threshold ):
self.triggered = True
self.log("Power Usage is: {}".format(float(new)))
self.log("Setting triggered to: {}".format(self.triggered))
self.notifier.notify(self.notify_name, self.message.format(self.alternative_name))
# Power usage goes down below threshold
elif ( new != None and new != "" and self.triggered and self.isWaitingHandle == None and float(new) <= self.threshold):
self.log("Waiting: {} seconds to notify.".format(self.delay))
self.isWaitingHandle = self.run_in(self.notify_device_off,self.delay)
self.log("Setting isWaitingHandle to: {}".format(self.isWaitingHandle))
self.timer_handle_list.append(self.isWaitingHandle)
# Power usage goes up before delay
elif( new != None and new != "" and self.triggered and self.isWaitingHandle != None and float(new) > self.threshold):
self.log("Cancelling timer")
self.cancel_timer(self.isWaitingHandle)
self.isWaitingHandle = None
self.log("Setting isWaitingHandle to: {}".format(self.isWaitingHandle))
def notify_device_off(self, kwargs):
"""Notify User that device is off. This may get cancelled if it turns on again in the meantime"""
self.triggered = False
self.log("Setting triggered to: {}".format(self.triggered))
self.isWaitingHandle = None
self.log("Setting isWaitingHandle to: {}".format(self.isWaitingHandle))
self.log("Notifying user")
self.notifier.notify(self.notify_name, self.message_off.format(self.alternative_name))
def terminate(self):
for timer_handle in self.timer_handle_list:
self.cancel_timer(timer_handle)
for listen_event_handle in self.listen_event_handle_list:
self.cancel_listen_event(listen_event_handle)
for listen_state_handle in self.listen_state_handle_list:
self.cancel_listen_state(listen_state_handle)
|
[
"k.eifinger@googlemail.com"
] |
k.eifinger@googlemail.com
|
4eba43e78eb66a2ee052c5643ef10dcfafa48917
|
f299be6bd4ce2d34b3ddf625ecc44a9d99ce9bb7
|
/etc/docker/dev/Other-certs/test-create-rules-pau.py
|
2c74ee483cf7fed6431c058b6f05049b0c511100
|
[
"Apache-2.0"
] |
permissive
|
pic-es/rucio
|
8a4688e26dd45062ff45ddc29dffa96ea2d7562f
|
f91ae9e79be62890d975a333b7976c89f7fed735
|
refs/heads/master
| 2022-12-30T13:34:01.663270
| 2020-09-04T13:31:24
| 2020-09-04T13:31:24
| 281,892,842
| 1
| 0
|
Apache-2.0
| 2020-07-23T08:16:19
| 2020-07-23T08:16:18
| null |
UTF-8
|
Python
| false
| false
| 31,015
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from __future__ import absolute_import, division, print_function
__author__ = "Agustin Bruzzese"
__copyright__ = "Copyright (C) 2020 Agustin Bruzzese"
__revision__ = "$Id$"
__version__ = "0.2"
import sys
sys.path.append("/usr/lib64/python3.6/site-packages/")
import gfal2
import io
import json
import linecache
import logging
import numpy as np
import os
import os.path
import random
import re
import time
import uuid
import zipfile
import string
import pathlib
import time
import pytz
from urllib.parse import urlunsplit
import graphyte, socket
from dateutil import parser
from datetime import (
datetime,
tzinfo,
timedelta,
timezone,
)
from gfal2 import (
Gfal2Context,
GError,
)
from io import StringIO
# Set Rucio virtual environment configuration
os.environ['RUCIO_HOME']=os.path.expanduser('~/Rucio-v2/rucio')
from rucio.rse import rsemanager as rsemgr
from rucio.client.client import Client
from rucio.client.didclient import DIDClient
from rucio.client.replicaclient import ReplicaClient
import rucio.rse.rsemanager as rsemgr
from rucio.client import RuleClient
from rucio.common.exception import (AccountNotFound, Duplicate, RucioException, DuplicateRule, InvalidObject, DataIdentifierAlreadyExists, FileAlreadyExists, RucioException,
AccessDenied, InsufficientAccountLimit, RuleNotFound, AccessDenied, InvalidRSEExpression,
InvalidReplicationRule, RucioException, DataIdentifierNotFound, InsufficientTargetRSEs,
ReplicationRuleCreationTemporaryFailed, InvalidRuleWeight, StagingAreaRuleRequiresLifetime)
from rucio.common.utils import adler32, detect_client_location, execute, generate_uuid, md5, send_trace, GLOBALLY_SUPPORTED_CHECKSUMS
gfal2.set_verbose(gfal2.verbose_level.debug)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Import Magic naming
from lfn2pfn import *
# In[2]:
class Rucio :
def __init__(self, myscope, orgRse, destRse, account='bruzzese', working_folder=None):
self.myscope = myscope
self.orgRse = orgRse
self.destRse = destRse
self.working_folder = working_folder
self.gfal = Gfal2Context()
self.didc = DIDClient()
self.repc = ReplicaClient()
self.rulesClient = RuleClient()
# Configuration
self.account = account
# account=account
self.client = Client(account=self.account)
# Get list of all RSEs
def rses(self) :
rses_lists = list()
for single_rse in list(self.client.list_rses()) :
rses_lists.append(single_rse['rse'])
return(rses_lists)
def usage(self,s_rse) :
return(list(self.client.get_local_account_usage(account=self.account,rse=s_rse))[0])
def rules(self) :
return(list(self.client.list_account_rules(account=self.account)))
def myfunc(self):
print("Hello your setting are account=%s, scope=%s, origin RSE =%s and destination RSE =%s" %(self.account, self.myscope, self.orgRse, self.destRse))
def file_exists(self, pfn) :
try :
self.gfal.stat(pfn).st_size
return(True)
except :
return(False)
def get_rse_url(self):
"""
Return the base path of the rucio url
"""
rse_settings = rsemgr.get_rse_info(self.orgRse)
protocol = rse_settings['protocols'][0]
schema = protocol['scheme']
prefix = protocol['prefix']
port = protocol['port']
rucioserver = protocol['hostname']
rse_url = list()
if None not in (schema,str(rucioserver+':'+str(port)),prefix):
rse_url.extend([schema,rucioserver+':'+str(port),prefix,'',''])
if self.working_folder != None :
# Check if our test folder exists
path = os.path.join(urlunsplit(rse_url), self.working_folder)
self.gfal.mkdir_rec(path, 775)
return(path)
else :
return(urlunsplit(rse_url))
else :
return('Wrong url parameters')
def check_replica(self, lfn, dest_rse=None):
"""
Check if a replica of the given file at the site already exists.
"""
if lfn :
replicas = list(
self.client.list_replicas([{
'scope': self.myscope,
'name': lfn
}], rse_expression=dest_rse))
if replicas:
for replica in replicas:
if isinstance(replica,dict) :
if dest_rse in replica['rses']:
path = replica['rses'][dest_rse][0]
return(path)
return(False)
############################
## Create Metadata for DIDs
############################
def getFileMetaData(self, p_file, origenrse=None):
"""
Get the size and checksum for every file in the run from defined path
"""
'''
generate the registration of the file in a RSE :
:param rse: the RSE name.
:param scope: The scope of the file.
:param name: The name of the file.
:param bytes: The size in bytes.
:param adler32: adler32 checksum.
:param pfn: PFN of the file for non deterministic RSE
:param dsn: is the dataset name.
'''
name = os.path.basename(p_file)
name = name.replace('/','')
replica = {
'scope': self.myscope,
'name': name.replace('+','_'),
'adler32': self.gfal.checksum(p_file, 'adler32'),
'bytes': self.gfal.stat(p_file).st_size,
'pfn': p_file,
"meta": {"guid": str(generate_uuid())}
}
Data = dict();
Data['replica'] = replica
Data['scope'] = self.myscope
return(Data)
############################
## Create Groups of DIDs
############################
def createDataset(self, new_dataset) :
logger.debug("| - - Checking if a provided dataset exists: %s for a scope %s" % (new_dataset, self.myscope))
try:
self.client.add_dataset(scope=self.myscope, name=new_dataset)
return(True)
except DataIdentifierAlreadyExists:
return(False)
except Duplicate as error:
return generate_http_error_flask(409, 'Duplicate', error.args[0])
except AccountNotFound as error:
return generate_http_error_flask(404, 'AccountNotFound', error.args[0])
except RucioException as error:
exc_type, exc_obj, tb = sys.exc_info()
logger.debug(exc_obj)
def createcontainer(self, name_container):
'''
registration of the dataset into a container :
:param name_container: the container's name
:param info_dataset : contains,
the scope: The scope of the file.
the name: The dataset name.
'''
logger.debug("| - - - registering container %s" % name_container)
try:
self.client.add_container(scope=self.myscope, name=name_container)
except DataIdentifierAlreadyExists:
logger.debug("| - - - Container %s already exists" % name_container)
except Duplicate as error:
return generate_http_error_flask(409, 'Duplicate', error.args[0])
except AccountNotFound as error:
return generate_http_error_flask(404, 'AccountNotFound', error.args[0])
except RucioException as error:
exc_type, exc_obj, tb = sys.exc_info()
logger.debug(exc_obj)
############################
## General funciotn for registering a did into a GROUP of DID (CONTAINER/DATASET)
############################
def registerIntoGroup(self,n_file, new_dataset):
"""
Attaching a DID to a GROUP
"""
type_1 = self.client.get_did(scope=self.myscope, name=new_dataset)
type_2 = self.client.get_did(scope=self.myscope, name=n_file)
try:
self.client.attach_dids(scope=self.myscope, name=new_dataset, dids=[{'scope':self.myscope, 'name':n_file}])
except RucioException:
logger.debug("| - - - %s already attached to %s" %(type_2['type'],type_1['type']))
############################
## MAGIC functions
############################
def create_groups(self, organization) :
# 2.1) Create the dataset and containers for the file
self.createDataset(organization['dataset_1'])
# 2.1.1) Attach the dataset and containers for the file
self.registerIntoGroup(organization['replica'], organization['dataset_1'])
# 2.2) Create the dataset and containers for the file
self.createcontainer(organization['container_1'])
# 2.2.1) Attach the dataset and containers for the file
self.registerIntoGroup(organization['dataset_1'], organization['container_1'])
# 2.3) Create the dataset and containers for the file
self.createcontainer(organization['container_2'])
# 2.3.1) Attach the dataset and containers for the file
self.registerIntoGroup(organization['container_1'], organization['container_2'])
# 2.4) Create the dataset and containers for the file
self.createcontainer(organization['container_3'])
# 2.4.1) Attach the dataset and containers for the file
self.registerIntoGroup(organization['container_2'], organization['container_3'])
############################
## Create Rule for DIDs
############################
def addReplicaRule(self, destRSE, group):
"""
Create a replication rule for one dataset at a destination RSE
"""
type_1 = self.client.get_did(scope=self.myscope, name=group)
logger.debug("| - - - Creating replica rule for %s %s at rse: %s" % (type_1['type'], group, destRSE))
if destRSE:
try:
rule = self.rulesClient.add_replication_rule([{"scope":self.myscope,"name":group}],copies=1, rse_expression=destRSE, grouping='ALL', account=self.account, purge_replicas=True)
logger.debug("| - - - - Rule succesfully replicated at %s" % destRSE)
logger.debug("| - - - - - The %s has the following id %s" % (rule, destRSE))
return(rule[0])
except DuplicateRule:
exc_type, exc_obj, tb = sys.exc_info()
rules = list(self.client.list_account_rules(account=self.account))
if rules :
for rule in rules :
if rule['rse_expression'] == destRSE and rule['scope'] == self.myscope and rule['name'] == group:
logger.debug('| - - - - Rule already exists %s which contains the following DID %s:%s %s' % (rule['id'],self.myscope, group, str(exc_obj)))
except ReplicationRuleCreationTemporaryFailed:
exc_type, exc_obj, tb = sys.exc_info()
rules = list(self.client.list_account_rules(account=self.account))
if rules :
for rule in rules :
if rule['rse_expression'] == destRSE and rule['scope'] == self.myscope and rule['name'] == group:
print('| - - - - Rule already exists %s which contains the following DID %s:%s %s' % (rule['id'],self.myscope, group, str(exc_obj)))
############################
## Create Rules for not registered DIDs
############################
def outdated_register_replica(self, filemds, dest_RSE, org_RSE):
"""
Register file replica.
"""
carrier_dataset = 'outdated_replication_dataset' + '-' + str(uuid.uuid4())
creation = self.createDataset(carrier_dataset)
# Make sure your dataset is ephemeral
self.client.set_metadata(scope=self.myscope, name=carrier_dataset, key='lifetime', value=86400) # 86400 in seconds = 1 day
# Create a completly new create the RULE:
for filemd in filemds :
outdated = filemd['replica']['name']
self.registerIntoGroup(outdated, carrier_dataset)
# Add dummy dataset for replicating at Destination RSE
rule_child = self.addReplicaRule(dest_RSE, group=carrier_dataset)
# Add dummy dataset for replicating Origin RSE
rule_parent = self.addReplicaRule(org_RSE, group=carrier_dataset)
# Create a relation rule between origin and destiny RSE, so that the source data can be deleted
rule = self.client.update_replication_rule(rule_id=rule_parent, options={'lifetime': 10, 'child_rule_id':rule_child, 'purge_replicas':True})
logger.debug('| - - - - Creating relationship between parent %s and child %s : %s' % (rule_parent, rule_child, rule))
# Create a relation rule between the destinity rule RSE with itself, to delete the dummy rule, whiles keeping the destiny files
rule = self.client.update_replication_rule(rule_id=rule_child, options={'lifetime': 10, 'child_rule_id':rule_child})
logger.debug('| - - - - Creating relationship between parent %s and child %s : %s' % (rule_parent, rule_child, rule))
############################
## Create Dictionary for Grafana
############################
def stats_rules(self, rules) :
'''
Gather general information about
total number of rules, and stats.
'''
RUCIO = dict()
if rules :
for rule in rules :
if 'outdated_replication_dataset' not in rule['name'] :
if 'Rules' not in RUCIO :
RUCIO['Rules'] = {
'total_stuck' : 0,
'total_replicating' : 0,
'total_ok' : 0,
'total_rules': 0
}
RUCIO['Rules']['total_rules'] += 1
if rule['state'] == 'REPLICATING' :
RUCIO['Rules']['total_replicating'] += 1
elif rule['state'] == 'STUCK' :
RUCIO['Rules']['total_stuck'] += 1
elif rule['state'] == 'OK' :
RUCIO['Rules']['total_ok'] += 1
else :
RUCIO['Rules']['total_rules'] += 1
if rule['state'] == 'REPLICATING' :
RUCIO['Rules']['total_replicating'] += 1
elif rule['state'] == 'STUCK' :
RUCIO['Rules']['total_stuck'] += 1
elif rule['state'] == 'OK' :
RUCIO['Rules']['total_ok'] += 1
if 'AllRules' not in RUCIO :
RUCIO['AllRules'] = {
'total_stuck' : 0,
'total_replicating' : 0,
'total_ok' : 0,
'total_rules': 0
}
RUCIO['AllRules']['total_rules'] += 1
if rule['state'] == 'REPLICATING' :
RUCIO['AllRules']['total_replicating'] += 1
elif rule['state'] == 'STUCK' :
RUCIO['AllRules']['total_stuck'] += 1
elif rule['state'] == 'OK' :
RUCIO['AllRules']['total_ok'] += 1
else :
RUCIO['AllRules']['total_rules'] += 1
if rule['state'] == 'REPLICATING' :
RUCIO['AllRules']['total_replicating'] += 1
elif rule['state'] == 'STUCK' :
RUCIO['AllRules']['total_stuck'] += 1
elif rule['state'] == 'OK' :
RUCIO['AllRules']['total_ok'] += 1
##################
if 'Grouping' not in RUCIO :
RUCIO['Grouping'] = {
'file' : 0,
'dataset' : 0,
'container' : 0
}
if rule['did_type'] == 'CONTAINER' :
RUCIO['Grouping']['container'] += 1
elif rule['did_type'] == 'DATASET' :
RUCIO['Grouping']['dataset'] += 1
elif rule['did_type'] == 'FILE' :
RUCIO['Grouping']['file'] += 1
else :
if rule['did_type'] == 'CONTAINER' :
RUCIO['Grouping']['container'] += 1
elif rule['did_type'] == 'DATASET' :
RUCIO['Grouping']['dataset'] += 1
elif rule['did_type'] == 'FILE' :
RUCIO['Grouping']['file'] += 1
return(RUCIO)
def stats_replica_rules(self, rules) :
'''
Gather specific information about
state and number of replicas.
'''
REPLICAS = dict()
REPLICAS['RSE'] = {}
if rules :
# Creates a key for all the RSEs that we have replicas
for rule in rules :
# if the RSE is not in the dictionary
#print(rule['rse_expression'], REPLICAS['RSE'])
if rule['rse_expression'] not in REPLICAS['RSE'] :
#print(REPLICAS)
REPLICAS['RSE'][rule['rse_expression']] = {
'total_replica_stuck' : rule['locks_stuck_cnt'],
'total_replica_replicating' : rule['locks_replicating_cnt'],
'total_replica_ok' : rule['locks_ok_cnt']
}
# else if it is, update replica numbers
else :
REPLICAS['RSE'][rule['rse_expression']]['total_replica_stuck'] += rule['locks_stuck_cnt']
REPLICAS['RSE'][rule['rse_expression']]['total_replica_replicating'] += rule['locks_replicating_cnt']
REPLICAS['RSE'][rule['rse_expression']]['total_replica_ok'] += rule['locks_ok_cnt']
return(REPLICAS)
def stats_usage_rules(self, all_rses) :
STORAGE = dict()
STORAGE['USAGE'] = {}
for x_rse in all_rses :
rses = self.usage(x_rse)
if rses['bytes'] != 0 :
if rses['rse'] not in STORAGE['USAGE'] :
STORAGE['USAGE'][rses['rse']] = {
'total_bytes_used' : rses['bytes']
}
# else if it is, update replica numbers
else :
STORAGE['USAGE'][rses['rse']]['total_bytes_used'] += rses['bytes']
return(STORAGE)
# In[3]:
class Look_for_Files :
def __init__(self) :
self.gfal = Gfal2Context()
def check_directory(self, path):
try :
full_path = self.gfal.listdir(str(path))
is_dir_or_not = True
except:
is_dir_or_not = False
return(is_dir_or_not)
def scrap_through_files(self, path) :
all_files = []
# Itinerate over all the entries
listFiles = self.gfal.listdir(str(self.path))
for file in listFiles :
# Create full Path
fullPath = os.path.join(self.path, file)
is_dir = self.check_directory(fullPath)
# If entry is a directory then get the list of files in
if is_dir == True :
pass
else :
all_files.append(fullPath)
return(all_files)
def scrap_through_dir(self, path) :
logger.debug("*-Listin files from url : %s" % path)
all_files = []
# Itinerate over all the entries
listFiles = self.gfal.listdir(str(path))
for file in listFiles :
# Create full Path
fullPath = os.path.join(path, file)
is_dir = self.check_directory(fullPath)
# If entry is a directory then get the list of files in
if is_dir == True :
logger.debug('|--- ' + fullPath + ' its a directory ')
all_files = all_files + self.scrap_through_dir(fullPath)
else :
logger.debug('|--- '+ fullPath + ' its a file')
all_files.append(fullPath)
return(all_files)
# In[4]:
############################
# Check existence of json File
############################
def json_write(data, filename='Rucio-bkp.json'):
with io.open(filename, 'w') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def json_check(json_file_name='Rucio-bkp.json') :
# checks if file exists
if not os.path.isfile(json_file_name) :
logger.debug("Either file is missing or is not readable, creating file...")
return(False)
elif os.stat(json_file_name).st_size == 0 :
os.remove(json_file_name)
return(False)
elif os.path.isfile(json_file_name) and os.access(json_file_name, os.R_OK) :
logger.debug("File exists in JSON and is readable")
return(True)
# In[5]:
def register_rucio() :
# Look for files in the orgRse
l1 = Look_for_Files()
listOfFiles = l1.scrap_through_dir(r1.get_rse_url())
if listOfFiles :
# Create a dictionary with the properties for writing a json
result_dict = dict();
for dest in r1.destRse :
# Create an array for those files that has not been replicated
n_unreplicated = []
for n in range(0,len(listOfFiles)):
# for n in range(0,20):
name = str(listOfFiles[n])
logger.debug('| - ' + str(n) + ' - ' + str(len(listOfFiles)) + ' name : ' + name)
# Break down the file path
f_name = base=os.path.basename(name)
# Check if file is already is registered at a particular destination RSE
check = r1.check_replica(lfn=f_name.replace('+','_'), dest_rse=dest)
# If it is registered, skip add replica
if check != False : ## needs to be changed to False
logger.debug('| - - The FILE %s already have a replica at RSE %s : %s' % (f_name, dest, check))
# Else, if the files has no replica at destination RSE
else :
# 1) Get the file metadata
metaData = r1.getFileMetaData(name, r1.orgRse)
r1.client.add_replicas(rse=r1.orgRse, files=[metaData['replica']])
# 2) Look for create and attach groups
# look at script lfn2pfn.py
group = groups(name)
# functions : groups and create_groups
r1.create_groups(group)
# 3) Add information to Json file :
temp_dict = dict()
temp_dict[f_name] = {}
temp_dict[f_name]['Properties'] = {**metaData['replica'], **{'updated': datetime.utcnow().replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%SZ')}}
temp_dict[f_name]['Organization'] = group
temp_dict[f_name]['Replicated'] = {dest : {**{'state': 'REPLICATING'}, **{'registered': datetime.utcnow().replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%SZ')}}}
# 4) Contruct a dictionary
if f_name in result_dict :
result_dict[f_name]['Replicated'].update(temp_dict[f_name]['Replicated'])
# if its is the first entry, add the RSE where it was found :
elif f_name not in result_dict :
origin = { r1.orgRse : {
'path': name,
'registered': datetime.utcnow().replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%SZ'),
'state': 'ALIVE',
}}
temp_dict[f_name]['Replicated'].update(origin)
result_dict[f_name] = temp_dict[f_name]
# 5) Create the Main Replication Rule at Destination RSE
main_rule = r1.addReplicaRule(dest, group['container_3'])
logger.debug("| - - - - Getting parameters for rse %s" % dest)
# 6 ) Create the json array
# Finally, add them to a general list
n_unreplicated.append(metaData)
logger.debug('Your are going to replicate %s files' % str(len(n_unreplicated)))
print('Your are going to replicate %s files' % str(len(n_unreplicated)))
## Now, create Dummy rules between the ORIGIN and DESTINATION RSEs
if len(n_unreplicated) > 0 :
r1.outdated_register_replica(n_unreplicated, dest, r1.orgRse)
# Finally return the information of the replicas as a dictionary
return(result_dict)
# In[6]:
def stateCheck(json_file='Rucio-bkp.json'):
with open(json_file) as f :
data_keys = json.load(f)
for file in data_keys :
for ele in data_keys[file].values():
if isinstance(ele,dict):
for key, value in ele.items():
if key in r1.rses() :
if 'path' in value:
if value['state'] == 'ALIVE' :
# Check for deleted files
try :
existence = r1.file_exists(value['path'])
# If gfal fails, it means that the file still exists
except :
print('failed')
dead_state = dict()
dead_state = {'state': 'DEAD',
'deleted': datetime.utcnow().replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%SZ')
}
data_keys[file]['Replicated'][key].update(dead_state)
elif 'state' in value :
# Check completed transference files
if value['state'] == 'REPLICATING' :
#check = check_replica(DEFAULT_SCOPE, file.strip('+').replace('+','_'), dest_rse=info[0])
check = r1.check_replica(lfn=file.replace('+','_'), dest_rse=key)
#if there's no replica at destiny RSE
if check != False :
replication_state = dict()
replication_state = {'path': check,
'copied': datetime.utcnow().replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%SZ'),
'state': 'ALIVE'}
# Update the dictionary with the file properties
data_keys[file]['Replicated'][key].update(replication_state)
return(data_keys)
# In[7]:
class Grafana :
def __init__(self) :
self.gr_prefix = [line for line in open('/etc/collectd.d/write_graphite-config.conf', 'r').readlines() if "Prefix" in line][0].strip().split()[1].strip('"')
## Prepare data for plots replicas
def prepare_grafana(self, dictionary, string='RUCIO.') :
metric_list = []
for key in dictionary.keys() :
if isinstance(dictionary[key],int):
metric_list.append((str(string+key),dictionary[key]) )
elif isinstance(dictionary[key],dict):
metric_list.extend(self.prepare_grafana(dictionary[key], str(string+key+'.')))
return(metric_list)
def send_to_graf(self, dictionary, myport=2013, myprotocol='udp') :
for key in self.prepare_grafana(dictionary):
if (key[0], key[1]) is not None :
#print(key[0].lower(),key[1])
graphyte.Sender('graphite01.pic.es', port=myport, protocol=myprotocol, prefix=self.gr_prefix + socket.gethostname().replace(".","_")).send(key[0].lower(), key[1])
graphyte.Sender('graphite02.pic.es', port=myport, protocol=myprotocol, prefix=self.gr_prefix + socket.gethostname().replace(".","_")).send(key[0].lower(), key[1])
# In[8]:
if __name__ == '__main__':
# Initialize Rucio class and functions
r1 = Rucio(myscope='test-pau', orgRse='XRD2-NON-DET',
destRse=['XRD1-DET'],
account='pau', working_folder='Server-test')
r1.myfunc()
# It creates the main rule for replication at Destinatio RSE (see rses_catch)
replication_dict = register_rucio()
if json_check() == True :
check_dict = stateCheck()
# if both results resulted ok
if isinstance(replication_dict,dict) & isinstance(check_dict,dict):
replication_dict.update(check_dict)
elif not check_dict :
replication_dict = replication_dict
elif not replication_dict:
replication_dict = check_dict
# creates a resulting dictionary with the files found with their respective
# RSEs where they have been replicated
json_write(replication_dict)
'''# Load grafana module
g1 = Grafana()
# 1) Plot general state of rules
g1.send_to_graf(r1.stats_rules(r1.rules()))
# 2) Plot state of replicas per RSE
g1.send_to_graf(r1.stats_replica_rules(r1.rules()))
# 3) Plot RSE usage
g1.send_to_graf(r1.stats_usage_rules(r1.rses()))'''
|
[
"bruzzese.agustin@gmail.com"
] |
bruzzese.agustin@gmail.com
|
a842ae5ed2fa9404270a2b872f3c9f04a42ac434
|
2652fd6261631794535589427a384693365a585e
|
/trunk/workspace/Squish/src/TestScript/UI/suite_UI_51/tst_UI_51_Pref_BufferAutoView/test.py
|
e1a9d9fe2212331ae4697f3a3269cdded8842a9c
|
[] |
no_license
|
ptqatester1/ptqa
|
88c652380167f64a953bfd7a65041e7d8ac48c90
|
5b5997ea459e9aac17db8da2041e2af331927104
|
refs/heads/master
| 2021-01-21T19:06:49.275364
| 2017-06-19T03:15:00
| 2017-06-19T03:15:00
| 92,115,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,735
|
py
|
from API.Utility.Util import Util
from API.Utility import UtilConst
from API.MenuBar.Options.Options import Options
from API.MenuBar.Options.OptionsConst import OptionsConst
from API.MenuBar.Options.Preferences.Miscellaneous.MiscellaneousConst import MiscellaneousConst
from API.SimulationPanel.EventList.EventListConst import EventListConst
from API.SimulationPanel.EventListFilters.EventListFilters import EventListFilters
from API.SimulationPanel.PlayControls.PlayControlsConst import PlayControlsConst
from API.MenuBar.Options.Preferences.PreferencesConst import PreferencesConst
util = Util()
options = Options()
eventListFilters = EventListFilters()
def main():
util.init()
util.open("UI13.pkt", UtilConst.UI_TEST )
util.speedUpConvergence()
editOptionsSetting()
checkpoint1()
resetOptionsSetting()
def editOptionsSetting():
options.selectOptionsItem(OptionsConst.PREFERENCES)
util.clickTab(PreferencesConst.TAB_BAR, PreferencesConst.MISCELLANEOUS)
util.clickButton(MiscellaneousConst.AUTO_VIEW_PREVIOUS_EVENTS)
util.close(OptionsConst.OPTIONS_DIALOG)
def checkpoint1():
util.clickOnSimulation()
util.clickButton(EventListConst.RESET_SIMULATION)
for i in range(0, 8):
util.clickButton(PlayControlsConst.CAPTURE_FORWARD)
snooze(10)
if (object.exists(PlayControlsConst.BUFFER_FULL_DIALOG_LABEL)):
test.fail("Buffer window found")
else:
test.passes("Buffer window not found")
def resetOptionsSetting():
options.selectOptionsItem(OptionsConst.PREFERENCES)
util.clickTab(PreferencesConst.TAB_BAR, PreferencesConst.MISCELLANEOUS)
util.clickButton(MiscellaneousConst.PROMPT)
util.close(OptionsConst.OPTIONS_DIALOG)
|
[
"ptqatester1@gmail.com"
] |
ptqatester1@gmail.com
|
ae0aef6a83feebd6e73b83650c28cc5c4c536818
|
ce3cbf8ed094808408634f6e9257bb575f65e654
|
/principal/forms.py
|
9f7048e6ed1cb70adc34136e3bc8e162d7d7a137
|
[] |
no_license
|
iosamuel/meetup
|
165e6e4bf83c843507c45cb1ad78709d787c711a
|
af7a746dee0344ca41eae3b64571a7fa3ca6a39c
|
refs/heads/master
| 2016-09-06T08:52:28.898449
| 2013-06-01T15:56:29
| 2013-06-01T15:56:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
from django import forms
from .models import *
class EventosForm(forms.ModelForm):
class Meta:
model = Eventos
# widgets = {
# 'nombre': forms.InputText(attrs={'class':'inputB inputA'})
# }
# <input class="inputA inputB">
class UsuariosForm(forms.ModelForm):
class Meta:
model = Usuarios
exclude = ('eventos',)
|
[
"samuelb1311@gmail.com"
] |
samuelb1311@gmail.com
|
ca4715ede55c40f5896201b4076cd2db446a2b25
|
33f87a7b8bf317894bff46d1f57dce5aea363b77
|
/Unit_3/Prog_10.py
|
a827d6fbf1eaabadf8c7d56ec8cfd2b16f0d7647
|
[] |
no_license
|
beulah444/Dr.AIT_Python_Course_2015
|
dd7f31659591225393bb81a4918d9a7df09e5f27
|
9fe4dd1fe0cbc093795901f8f272afe25adccef4
|
refs/heads/master
| 2021-01-22T17:28:51.383517
| 2016-06-10T19:37:57
| 2016-06-10T19:37:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
__author__ = 'Dr.S.Gowrishankar'
class ListSum:
def __init__(self, listArg):
self.listArg = listArg
def __init__(self, listArg):
self.listArg = listArg
def ComputeSum(self):
print sum(self.listArg)
#a =[1,2,3,4,5]
#ls = ListSum(a) #no need to write these two lines
ls = ListSum([1,2,3,4,5]) # you can directly pass the list values
ls.ComputeSum()
|
[
"learndatasciencewithr@gmail.com"
] |
learndatasciencewithr@gmail.com
|
b155ef0630a0290fc9d2d59511401268ad789fe4
|
ec3ec2e292da450b095c8a4963a8aeb222d141bf
|
/model/DAO/ClienteDAO.py
|
4349684129a4f5fa88ae857c388fd1c13621fc68
|
[] |
no_license
|
ojuliomiguel/Gerenciador-de-Fiados
|
b3aa97d8b65e595445f2598fc9e1bae1df289430
|
da239a8de12d30bf5cd284322086fdffcfc9f851
|
refs/heads/master
| 2021-10-28T00:38:53.399247
| 2019-04-20T17:02:04
| 2019-04-20T17:02:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,480
|
py
|
import sqlite3
import os
from DataBase.ConexaoSQL import ConexaoSQL
class ClienteDAO:
def buscarCliente(nome):
try:
con = ConexaoSQL.conexaoBd()
cur = con.cursor()
query = """
SELECT id, nome
FROM clientes
WHERE nome = '{}'
""".format(nome)
cur.execute(query)
data = cur.fetchone()
return data[1]
except TypeError:
print('Retorno da funcao buscarCliente() vazio')
def cadastrarCliente(cliente):
con = ConexaoSQL.conexaoBd()
cur = con.cursor()
query = """
INSERT INTO clientes (nome)
VALUES ('{}')
""".format(cliente.Nome)
cur.execute(query)
con.commit()
print('Dados cadastrados com sucesso')
def excluirCliente(nome):
con = ConexaoSQL.conexaoBd()
cur = con.cursor()
query = """
DELETE
FROM clientes
WHERE nome = '{}'
""".format(nome)
cur.execute(query)
con.commit()
def listarClientes():
try:
con = ConexaoSQL.conexaoBd()
cur = con.cursor()
query = """
SELECT id, nome
FROM clientes
"""
cur.execute(query)
data = cur.fetchall()
return data
except TypeError:
print('Retorno da funcao buscarCliente() vazio')
|
[
"juliomiguelsouzacosta@gmail.com"
] |
juliomiguelsouzacosta@gmail.com
|
de440b2a397df3a824c6c85058fa980aa28a0340
|
52083499c5661e9034758135bf1ee2e625a9d2fb
|
/src/surround_view_sim/build/catkin_generated/generate_cached_setup.py
|
ca755387a7ec711615f8b35eb5609d860ad867bb
|
[] |
no_license
|
pennluo/SurroundView_Simulation
|
12d186545a5b53a32c47e8cfb1f1bfd3cc1f946f
|
ae7d98caf9c89325bcf776f119e9fd655cac8ebb
|
refs/heads/master
| 2022-09-30T12:05:24.610092
| 2019-04-14T15:59:55
| 2019-04-14T15:59:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,368
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/opt/ros/kinetic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/reuben/catkin_ws/src/Surround_View_Sim/src/surround_view_sim/build/devel/env.sh')
output_filename = '/home/reuben/catkin_ws/src/Surround_View_Sim/src/surround_view_sim/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
[
"luopengchn@gmail.com"
] |
luopengchn@gmail.com
|
f2542cb91db283b5aae1bcebc74918ad123999bd
|
a78aff707dbaf053c78ead5f4631fc3ba0f0611b
|
/Code/data.py
|
eb210da3c8715ef8d4061df88a1b36dd87efea14
|
[] |
no_license
|
GitteVW/Multi-Instance-Experiments
|
b6ac3eed60df3ee8938dc3f96696c0924f3d98bb
|
47de57f56c4b407765d9355dd97a4f27d2f0ba07
|
refs/heads/master
| 2020-05-16T21:27:36.589189
| 2015-01-26T21:16:29
| 2015-01-26T21:16:29
| 28,873,499
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,689
|
py
|
'''
Datasets and algorithms
'''
labels_sival=['ajaxorange','apple','banana','bluescrunge','dirtyworkgloves','juliespot','checkeredscarf','wd40can','candlewithholder','glazedwoodpot','cokecan','smileyfacedoll','dataminingbook','rapbook','feltflowerrug','translucentbowl','greenteabox','cardboardbox','dirtyrunningshoe','largespoon','goldmedal','spritecan','stripednotebook','fabricsoftenerbox','woodrollingpin']
labels_text=['alt_atheism','comp_graphics','comp_os_ms-windows_misc','comp_sys_ibm_pc_hardware','comp_sys_mac_hardware','comp_windows_x','misc_forsale','rec_autos','rec_motorcycles',\
'rec_sport_baseball','rec_sport_hockey','sci_crypt','sci_electronics','sci_med','sci_space','soc_religion_christian','talk_politics_guns','talk_politics_mideast','talk_politics_misc','talk_religion_misc']
labels_uci=['adult_12', 'adult_13', 'adult_23', 'adult_14', 'adult_24', 'adult_15', 'adult_25', 'adult_110', 'adult_210','diabetes_12', 'diabetes_13', 'diabetes_23','spam_12', 'spam_13', 'spam_23', 'spam_14', 'spam_24', 'spam_15', 'spam_25', 'spam_110', 'spam_210','tictactoe_12', 'tictactoe_13', 'tictactoe_23', 'transfusion_12', 'transfusion_13', 'transfusion_23']
datasets={'uci':labels_uci,'text':labels_text,'sival':labels_sival}
milearners=['MILR','AdaBoostM1','MILRC','MIRI','MIDD','MDD','MIOptimalBall','TLD','MIWrapper','CitationKNN','MISMO','SimpleMI','MIEMDD','MISVM']
silearners=['AdaBoostM1','RBFNetwork', 'J48','SMO','IBk','Logistic']
learnerTuples=[('SimpleMI','J48'),('AdaBoostM1','AdaBoostM1'),('MIWrapper','J48'),('CitationKNN','IBk'),('MILR','Logistic'),('MISMO','SMO')]
def createFeatureList(label,datasetType):
"""
Create a list with each item being the description of one single-instance feature (arff format) for a given label and dataset type (SIVAL, text, UCI).
"""
def createFeature(i):
return '\t@attribute f'+str(i)+': numeric\n'
if datasetType=='sival':
featureList = list(map(createFeature, range(30)))
if datasetType =='text':
featureList = list(map(createFeature, range(200)))
if datasetType =='uci':
if label.split('_')[0]=='transfusion':
featureList = list(map(createFeature, range(4)))
if label.split('_')[0]=='tictactoe':
featureList = list(map(createFeature, range(9)))
if label.split('_')[0]=='spam':
featureList = list(map(createFeature, range(57)))
if label.split('_')[0]=='adult':
featureList = list(map(createFeature, range(14)))
if label.split('_')[0]=='diabetes': # pima indians
featureList = list(map(createFeature, range(8)))
return featureList
|
[
"gitte.vanwinckelen@gmail.com"
] |
gitte.vanwinckelen@gmail.com
|
a643d38e90646191463eca1bc229387c66c1a11f
|
65e0c11d690b32c832b943fb43a4206739ddf733
|
/bsdradius/trunk/bsdradius/configDefaults.py
|
244f2e4ef1d3488677ee5ad1c6d9c71ef18e43ac
|
[
"BSD-3-Clause"
] |
permissive
|
Cloudxtreme/bsdradius
|
b5100062ed75c3201d179e190fd89770d8934aee
|
69dba67e27215dce49875e94a7eedbbdf77bc784
|
refs/heads/master
| 2021-05-28T16:50:14.711056
| 2015-04-30T11:54:17
| 2015-04-30T11:54:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,442
|
py
|
## BSDRadius is released under BSD license.
## Copyright (c) 2006, DATA TECH LABS
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
## * Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
## * Neither the name of the DATA TECH LABS nor the names of its contributors
## may be used to endorse or promote products derived from this software without
## specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
## ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
## ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Define configuration defaults here
"""
# HeadURL $HeadURL: file:///Z:/backup/svn/bsdradius/trunk/bsdradius/configDefaults.py $
# Author: $Author: valts $
# File version: $Revision: 278 $
# Last changes: $Date: 2006-11-26 15:45:52 +0200 (Sv, 26 Nov 2006) $
prefix = '/usr/local'
# define default values
# format: {'section' : {'option' : value}}
defaultOptions = {
'PATHS' : {
'prefix' : prefix,
'conf_dir' : '%(prefix)s/etc/bsdradius',
'run_dir' : '%(prefix)s/var/run',
'log_dir' : '%(prefix)s/var/log/bsdradius',
'user_module_dir' : '%(conf_dir)s/user_modules',
'dictionary_dir' : '%(prefix)s/share/bsdradius/dictionaries',
'dictionary_file' : '%(dictionary_dir)s/dictionary',
'server_log_file' : '%(log_dir)s/bsdradiusd.log',
'pid_file' : '%(run_dir)s/bsdradiusd.pid',
'clients_file' : '%(conf_dir)s/clients.conf',
'modules_file' : '%(conf_dir)s/modules.conf',
'user_modules_file' : '%(conf_dir)s/user_modules.conf',
'config_file' : '%(conf_dir)s/bsdradiusd.conf'
},
'SERVER' : {
'home' : '',
'user' : '',
'group' : '',
'auth_port' : '1812',
'acct_port' : '1813',
'number_of_threads' : '10',
'foreground' : 'no',
'no_threads' : 'no',
'log_to_screen': 'no',
'log_to_file' : 'no',
'debug_mode' : 'no',
'log_client' : '',
'fast_accounting': 'no',
},
'DATABASE' : {
'enable' : 'no',
'type' : 'postgresql',
'host' : 'localhost',
'user' : 'bsdradius',
'pass' : '',
'name' : 'bsdradius',
'refresh_rate' : '60',
'clients_query' : 'select address, name, secret from radiusClients',
},
'AUTHORIZATION' : {
'packet_timeout' : '5',
'auth_queue_maxlength' : '300',
'modules' : '',
},
'ACCOUNTING' : {
'acct_queue_maxlength' : '300',
'modules' : '',
},
}
# Define option types.
# It is really neccessary to define only other types
# than string because Config parser converts everything
# to string by default.
# Format: {'section' : {'option' : 'type'}}
defaultTypes = {
'SERVER' : {
'auth_port' : 'int',
'acct_port' : 'int',
'number_of_threads' : 'int',
'foreground' : 'bool',
'no_threads' : 'bool',
'log_to_screen': 'bool',
'log_to_file': 'bool',
'debug_mode' : 'bool',
'fast_accounting': 'bool',
},
'DATABASE' : {
'enable' : 'bool',
'refresh_rate' : 'int',
},
'AUTHORIZATION' : {
'packet_timeout' : 'int',
'auth_queue_maxlength' : 'int',
},
'ACCOUNTING' : {
'acct_queue_maxlength' : 'int',
},
}
# configuration defaults for one BSD Radius module
moduleConfigDefaults = {
'enable': 'yes',
'configfile': '',
'startup_module': '',
'startup_function': '',
'authorization_module': '',
'authorization_function': '',
'authentication_module': '',
'authentication_function': '',
'accounting_module': '',
'accounting_function': '',
'shutdown_module': '',
'shutdown_function': '',
'pythonpath' : '',
}
|
[
"valdiic@72071c86-a5be-11dd-a5cd-697bfd0a0cef"
] |
valdiic@72071c86-a5be-11dd-a5cd-697bfd0a0cef
|
97381c7e465346939150233934c4f52ea147926a
|
1c6c127b7f22ad7c19b2cc0f265973711d6e3e01
|
/bus/migrations/0002_auto_20200227_1737.py
|
917f3db6cdb94721f19e9baa1456db73cb3805ac
|
[] |
no_license
|
ramsheedrd/BusRoute
|
1e0e7f94653c5fe6c6318dfe8079ed6b8f1b4c51
|
bd61d37d16171777a28797195ff387b3fee57cde
|
refs/heads/master
| 2022-04-16T09:44:07.774609
| 2020-04-05T12:31:21
| 2020-04-05T12:31:21
| 253,231,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,010
|
py
|
# Generated by Django 2.0.3 on 2020-02-27 12:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bus', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DistrictModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
('district', models.CharField(max_length=50)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='placesmodel',
name='district',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bus.DistrictModel'),
preserve_default=False,
),
]
|
[
"ramsheedkc10@gmail.com"
] |
ramsheedkc10@gmail.com
|
02247885ad972b4756997bffe5c07f2ebd394d4a
|
908b6ee862375003eac9af6c8ea5b32533be4709
|
/collective/phantasy/atphantasy/content/phantasyschema.py
|
1842cf48d3c9871472abac28e8375078dccb4886
|
[] |
no_license
|
collective/collective.phantasy
|
c7199995d98be183eb163f5b73c3e89147f2e8a6
|
86cb66b0905f6bb284cfc03201dd5bbfa8f9010e
|
refs/heads/master
| 2023-08-19T13:18:13.507960
| 2015-01-19T15:32:07
| 2015-01-19T15:32:07
| 29,517,296
| 0
| 0
| null | 2015-01-20T07:24:00
| 2015-01-20T07:24:00
| null |
UTF-8
|
Python
| false
| false
| 37,998
|
py
|
from Products.Archetypes.public import *
from collective.phantasy.config import I18N_DOMAIN
from Products.SmartColorWidget.Widget import SmartColorWidget
from Products.ATContentTypes.configuration import zconf
from Products.Archetypes.atapi import AnnotationStorage
from Products.validation.config import validation
from Products.validation.validators.SupplValidators import MaxSizeValidator
from Products.validation import V_REQUIRED
validation.register(MaxSizeValidator('checkImageMaxSize',
maxsize=zconf.ATImage.max_file_size))
from collective.phantasy import phantasyMessageFactory as _
try:
from iw.fss.FileSystemStorage import FileSystemStorage
HAS_FSS = True
except :
HAS_FSS = False
try:
from Products.FCKeditor.FckWidget import FckWidget
HAS_FCKWIDGET = True
except:
HAS_FCKWIDGET = False
PRESERVED_SCHEMATAS = ['default', 'images', 'dimensions', 'colors', 'fonts', 'borders', 'plone-overloads', 'viewlets', 'dynamic-viewlets']
CUSTOM_TOOL_BAR = """[
['Source','Preview','-','Templates'],
['Cut','Copy','Paste','PasteText','RemoveFormat'],
['Bold','Italic','Underline','StrikeThrough','-','Subscript','Superscript'],
['OrderedList','UnorderedList','-','Outdent','Indent'],
['Link','Unlink','Anchor','Image','imgmapPopup','Flash'],
['Style','FontFormat'],
['FitWindow']
]"""
def finalizePhantasySchema(schema):
"""Finalizes schema to alter some fields
"""
# Id must be valid and make description invisible
schema['id'].validators = ('isValidId',)
schema['description'].widget.visible = {'view':'invisible', 'edit':'invisible'}
# FSS Storage for skin screenshot if iw.fss is available
if HAS_FSS :
schema['screenshot'].storage = FileSystemStorage()
for fieldName in schema.keys() :
if schema[fieldName].schemata not in PRESERVED_SCHEMATAS :
# hide ATCTFolder metadata fields unuseful for skins
schema[fieldName].widget.visible = {'view':'invisible', 'edit':'invisible'}
# FCKWidget for viewlet fields if FCK is available
if HAS_FCKWIDGET and schema[fieldName].schemata == 'viewlets' :
schema[fieldName].widget = FckWidget (
description = schema[fieldName].widget.description,
label = schema[fieldName].widget.label,
rows=12,
width = '100%',
height ='150px',
fck_toolbar = 'Custom',
fck_custom_toolbar = CUSTOM_TOOL_BAR,
file_portal_type = 'PhantasySkinFile',
image_portal_type = 'PhantasySkinImage',
browse_images_portal_types = ['PhantasySkinImage', 'Image'],
fck_force_other_path_method = 'get_phantasy_relative_path',
fck_force_other_root_method = 'get_phantasy_relative_path',
# force no paragraphs in viewlets
keyboard_entermode = 'div',
allow_link_byuid = False,
start_expanded = True,
allow_file_upload = False)
if fieldName == 'logoViewlet' :
css_id = 'portal-logo'
elif fieldName == 'footerViewlet' :
css_id = 'portal-footer'
elif fieldName == 'colophonViewlet' :
css_id = 'portal-colophon'
schema[fieldName].widget.fck_area_css_id = css_id
schema[fieldName].widget.fck_area_css_class = ''
# Make a copy to reinitialize all layers
new_schema = schema.copy()
return new_schema
# in skin schema fields with same name as standard plone base_properties must always be required
PhantasyFieldsSchema = Schema((
StringField(
'cssfile',
schemata ='default',
widget=StringWidget(
description = _(u'description_css_file', u"""Enter a stylesheet file name, don't forget to upload the file in this skin.
This css will be applied at the end (after all properties). Use './myimage.jpg' in this css
to reference an image called 'myimage.jpg' from this skin."""),
label = _(u'label_css_file', u'Css File Name'),
),
),
ImageField(
'screenshot',
required=False,
primary=False,
languageIndependent=True,
storage = AnnotationStorage(migrate=True),
swallowResizeExceptions = zconf.swallowImageResizeExceptions.enable,
pil_quality = zconf.pil_config.quality,
pil_resize_algo = zconf.pil_config.resize_algo,
max_size = zconf.ATImage.max_image_dimension,
sizes= {'large' : (768, 768),
'preview' : (400, 400),
'mini' : (200, 200),
'thumb' : (128, 128),
'tile' : (64, 64),
'icon' : (32, 32),
'listing' : (16, 16),
},
validators = (('checkImageMaxSize', V_REQUIRED)),
widget = ImageWidget(
description = _(u'description_phantasy_screenshot',
default=u'Upload a screen Shot for this skin, used to help users to select a skin'),
label= _(u'label_phantasy_screenshot', default=u'Screen Shot'),
show_content_type = False,
preview_scale = 'mini',
),
),
# fields for viewlets overrides
TextField('logoViewlet',
schemata ='viewlets',
required=False,
searchable=False,
validators = ('isTidyHtmlWithCleanup',),
allowable_content_types = ('text/html',),
default_content_type = 'text/html',
default_output_type = 'text/x-html-safe',
widget = RichWidget(
description = _(u'description_logo_viewlet', u"""Override the logo viewlet,
you can add images or links with rich editor"""),
label = _(u'label_logo_viewlet', u'Logo Viewlet'),
rows = 25,
allow_file_upload = False),
),
TextField('footerViewlet',
schemata ='viewlets',
required=False,
searchable=False,
validators = ('isTidyHtmlWithCleanup',),
allowable_content_types = ('text/html',),
default_content_type = 'text/html',
default_output_type = 'text/x-html-safe',
widget = RichWidget(
description = _(u'description_footer_viewlet', u"""Override the footer viewlet,
you can add images or links with rich editor"""),
label = _(u'label_footer_viewlet', u'Footer Viewlet'),
rows = 25,
allow_file_upload = False),
),
TextField('colophonViewlet',
schemata ='viewlets',
required=False,
searchable=False,
validators = ('isTidyHtmlWithCleanup',),
allowable_content_types = ('text/html',),
default_content_type = 'text/html',
default_output_type = 'text/x-html-safe',
widget = RichWidget(
description = _(u'description_colophon_viewlet', u"""Override the colophon viewlet,
you can add images or links with rich editor"""),
label = _(u'label_colophon_viewlet', u'Colophon Viewlet'),
i18n_domain = I18N_DOMAIN,
rows = 25,
allow_file_upload = False),
),
BooleanField(
'displaySearchBoxViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_searchbox_viewlet',
u"""Do you want to display the searchbox viewlet with live search in header ?"""),
label = _(u'label_display_searchbox_viewlet', u'Display Searchbox ?'),
),
),
BooleanField(
'displayBreadCrumbsViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_breadcrumbs_viewlet',
u"""Do you want to display the breadcrumbs viewlet in top of content ?"""),
label = _(u'label_display_breadcrumbs_viewlet', u'Display Bread Crumbs ?'),
),
),
BooleanField(
'displayGlobalSectionsViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_globalsections_viewlet',
u"""Do you want to display the global sections viewlet (horizontal navigation at top) ?"""),
label = _(u'label_display_globalsections_viewlet', u'Display Global Sections ?'),
),
),
BooleanField(
'displayPersonalBarViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_personalbar_viewlet',
u"""Do you want to display the personal bar viewlet (links : login, preferences ...) ?"""),
label = _(u'label_display_personalbar_viewlet', u'Display Personal Bar ?'),
),
),
BooleanField(
'displaySiteActionsViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_siteactions_viewlet',
u"""Do you want to display the site actions viewlet (links : site map, contact ...) ?"""),
label = _(u'label_display_siteactions_viewlet', u'Display Site Actions ?'),
),
),
BooleanField(
'displayDocumentActionsViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_documentactions_viewlet',
u"""Do you want to display the document actions viewlet (link: print, send this page ...) ?"""),
label = _(u'label_display_documentactions_viewlet', u'Display Document Actions ?'),
),
),
BooleanField(
'displayDocumentBylineViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_documentbyline_viewlet',
u"""Do you want to display the document by line viewlet for each content (author, date and keywords) ?"""),
label = _(u'label_display_documentbyline_viewlet', u'Display Document By Line ?'),
),
),
# fields for images
# logoName property is no more used in standard plone css
# so we make it invisible
StringField(
'logoName',
schemata ='images',
required=1,
widget=StringWidget(
label='Logo Name',
visible = {'view':'invisible', 'edit':'invisible'},
description = "Choose the logo file name, upload the image in the skin to overload it",
i18n_domain = I18N_DOMAIN,
),
),
StringField(
'backgroundImageName',
schemata ='images',
widget=StringWidget(
description = _(u'description_background_image_name', u"""Enter the background image name for the page, upload the image in this skin"""),
label = _(u'label_background_image_name', u'Background Image Name'),
),
),
StringField(
'backgroundImagePosition',
schemata ='images',
default="top left",
vocabulary = [("top left", _(u"Top Left")),
("top right", _(u"Top Right")),
("top center", _(u"Top Center")),
("center left", _(u"Center Left")),
("center right", _(u"Center Right")),
("center center", _(u"Center Center")),
("bottom left", _(u"Bottom Left")),
("bottom right", _(u"Bottom Right")),
("bottom center", _(u"Bottom Center"))],
widget=SelectionWidget(
description = _(u'description_background_image_position', u"""Choose the background image position for the page"""),
label = _(u'label_background_image_position', u'Background Image Position'),
format='select',
),
),
StringField(
'backgroundImageRepeat',
schemata ='images',
default="no-repeat",
vocabulary = [("no-repeat", "No repeat"),
("repeat-x", "Horizontal Repeat"),
("repeat-y", "Vertical Repeat"),
("repeat", "mosaic repeat")],
widget=SelectionWidget(
description = _(u'description_background_image_repeat', u"""Choose the background image repeat for the page"""),
label = _(u'label_background_image_repeat', u'Background Image Repeat'),
format='select',
),
),
StringField(
'portalBackgroundImageName',
schemata ='images',
widget=StringWidget(
description = _(u'description_portal_background_image_name', u"""Enter the background image name for the portal, upload the image in this skin"""),
label = _(u'label_portal_background_image_name', u'Portal Background Image Name'),
),
),
StringField(
'contentBackgroundImageName',
schemata ='images',
widget=StringWidget(
description = _(u'description_content_background_image_name', u"""Choose the background image name for the content, upload the image in this skin"""),
label = _(u'label_contentl_background_image_name', u'Content Background Image Name'),
),
),
StringField(
'headerBackgroundImageName',
schemata ='images',
widget=StringWidget(
description = _(u'description_header_background_image_name', u"""Choose the background image name for the header, upload the image in this skin"""),
label = _(u'label_header_background_image_name', u'Header Background Image Name'),
),
),
# this property is never used is standard plone css
# so we make it invisible
StringField(
'portalMinWidth',
schemata ='dimensions',
widget=StringWidget(
label='Portal min width',
visible = {'view':'invisible', 'edit':'invisible'},
description = "Choose the portal min width in px em or %",
),
),
StringField(
'portalWidth',
schemata ='dimensions',
default = '100%',
widget=StringWidget(
description = _(u'description_portal_width', u"""Choose the portal min width in px em or %"""),
label = _(u'label_portal_width', u'Portal width'),
),
),
StringField(
'portalHorizontalPosition',
schemata ='dimensions',
default="",
vocabulary = [("0", _(u"undefined")),
("0 auto 0 auto", _(u"centered")),
("0 auto 0 0", _(u"on left")),
("0 0 0 auto", _(u"on right"))],
widget=SelectionWidget(
description = _(u'description_portal_horizontal_position', u"""Choose the position for portal"""),
label = _(u'label_portal_horizontal_position', u'Portal Horizontal Position'),
format='select',
),
),
StringField(
'columnOneWidth',
schemata ='dimensions',
required=1,
widget=StringWidget(
description = _(u'description_column_one_width', u"""Choose the column one width in px em or %"""),
label = _(u'label_column_one_width', u'Column One width'),
),
),
StringField(
'columnTwoWidth',
schemata ='dimensions',
required=1,
widget=StringWidget(
description = _(u'description_column_two_width', u"""Choose the column two width in px em or %"""),
label = _(u'label_column_two_width', u'Column Two width'),
),
),
StringField(
'fontFamily',
schemata ='fonts',
required=1,
widget=StringWidget(
description = _(u'description_font_family',
u"""Choose the font family"""),
label = _(u'label_font_family', u'Font Family'),
),
),
StringField(
'fontMainSize',
schemata ='fonts',
required=0,
widget=StringWidget(
description = _(u'description_font_main_size',
u"Choose the main font size in % (better) em px pt "
u"or using a keyword (xx-small, small, ...)"),
label = _(u'label_font_main_size', u'Font Main Size'),
),
),
StringField(
'fontSmallSize',
schemata ='fonts',
required=1,
widget=StringWidget(
description = _(u'description_font_small_size',
u"Choose the small font size in % (better) em px pt "
u"or using a keyword (xx-small, small, ...)"""),
label = _(u'label_font_small_size', u'Font Small Size'),
),
),
StringField(
'headingFontFamily',
schemata ='fonts',
required=1,
widget=StringWidget(
description = _(u'description_heading_font_family',
u"""Choose the font family for titles"""),
label = _(u'label_heading_font_family', u'Heading Font Family'),
),
),
StringField(
'textTransform',
schemata ='fonts',
required=1,
vocabulary = [("none", _(u"none")),
("uppercase", _(u"uppercase")),
("lowercase", _(u"lowercase")),
("capitalize", _(u"capitalize"))],
widget=SelectionWidget(
description = _(u'description_text_transform',
u"""Choose the text transformation for tabs and some headings"""),
label = _(u'label_text_transform', u'Text Transform'),
format='select',
),
),
StringField(
'fontColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_font_color',
u"""Choose the font color"""),
label = _(u'label_font_color', u'Font Color'),
),
),
StringField(
'backgroundColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_background_color',
u"""Choose the background color of the page"""),
label = _(u'label_background_color', u'Background Color'),
),
),
StringField(
'discreetColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_discreet_color',
u"""Choose the discreet color (can be used in content) """),
label = _(u'label_discreet_color', u'Discreet Color'),
),
),
StringField(
'portalBackgroundColor',
schemata ='colors',
default="transparent",
widget=SmartColorWidget(
description = _(u'description_portal_background_color',
u"""Choose the portal background color"""),
label = _(u'label_portal_background_color', u'Portal Background Color'),
),
),
StringField(
'contentBackgroundColor',
schemata ='colors',
default="transparent",
widget=SmartColorWidget(
description = _(u'description_content_background_color',
u"""Choose background color for content part of the page"""),
label = _(u'label_content_background_color', u'Content Background Color'),
),
),
StringField(
'personaltoolsBackgroundColor',
schemata ='colors',
default="#E3E3E3",
widget=SmartColorWidget(
description = _(u'description_personaltools_background_color',
u"""Choose background color for personal tools - language choice and user menu"""),
label = _(u'label_personaltools_background_color',
u"Personal tools Background Color"),
),
),
StringField(
'personaltoolsFontColor',
schemata ='colors',
default="#205C90",
widget=SmartColorWidget(
description = _(u'description_personaltools_font_color',
u"""Choose font color for personal tools - language choice and user menu"""),
label = _(u'label_personaltools_font_color',
u"Personal tools Font Color"),
),
),
StringField(
'headerBackgroundColor',
schemata ='colors',
default="transparent",
widget=SmartColorWidget(
description = _(u'description_header_background_color',
u"""Choose background color for the header"""),
label = _(u'label_header_background_color', u"Header Background Color"),
),
),
StringField(
'globalNavBackgroundColor',
schemata ='colors',
default="#dee7ec",
widget=SmartColorWidget(
description = _(u'description_global_nav_background_color',
u"""Choose the background color of global navigation"""),
label = _(u'label_global_nav_background_color', u'Global navigation Background Color'),
),
),
StringField(
'globalNavLinkColor',
schemata ='colors',
default="#205c90",
widget=SmartColorWidget(
description = _(u'description_global_nav_font_color',
u"""Choose the color of font and selected element background in global navigation"""),
label = _(u'label_global_nav_font_color', u'Global navigation Font Color'),
),
),
StringField(
'inputFontColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_input_font_color',
u"""Choose the input fields font color"""),
label = _(u'label_input_font_color', u'Input Font Color'),
),
),
StringField(
'linkColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_link_color',
u"""Choose the color for links"""),
label = _(u'label_link_color', u'Link Color'),
),
),
StringField(
'linkVisitedColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_link_visited_color',
u"""Choose the color for visited links"""),
label = _(u'label_link_visited_color', u'Link Visited Color'),
),
),
StringField(
'linkActiveColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_link_active_color',
u"""Choose the color for active links"""),
label = _(u'label_link_active_color', u'Link Active/Hover Color'),
),
),
StringField(
'notifyBackgroundColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_notify_background_color',
u"""Choose the notify background color (for portal messages)"""),
label = _(u'label_notify_background_color', u'Notify Background Color'),
),
),
StringField(
'notifyBorderColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_notify_border_color',
u"""Choose the notify border color"""),
label = _(u'label_notify_border_color', u'Notify Border Color'),
),
),
StringField(
'helpBackgroundColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_help_background_color',
u"""Choose the bg color for help in forms"""),
label = _(u'label_help_background_color', u'Help Background Color'),
),
),
StringField(
'oddRowBackgroundColor',
schemata ='colors',
required=1,
default="#EEEEEE",
widget=SmartColorWidget(
description = _(u'description_odd_row_background_color',
u"""Choose the bg color for odd rows (tables, portlets)"""),
label = _(u'label__odd_row_background_color', u'Odd Row Background Color'),
),
),
StringField(
'evenRowBackgroundColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_even_row_background_color',
u"""Choose the bg color for even rows (tables, portlets)"""),
label = _(u'label__even_row_background_color', u'Even Row Background Color'),
),
),
StringField(
'globalBackgroundColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_global_background_color',
u"""Choose the global background color (used in tabs and portlets headers)"""),
label = _(u'label_global_background_color', u'Global Background Color'),
),
),
StringField(
'globalFontColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_global_font_color',
u"""Choose the global font color"""),
label = _(u'label_global_font_color', u'Global Font Color'),
),
),
StringField(
'globalBorderColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_global_border_color',
u"""Choose the color for global borders"""),
label = _(u'label_global_border_color', u'Global Border Color'),
),
),
StringField(
'contentViewBackgroundColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_content_views_background_color',
u"""Choose the background color for content views tabs"""),
label = _(u'label_content_views_background_color', u'Content View Background Color'),
),
),
StringField(
'contentViewBorderColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_content_views_border_color',
u"""Choose the border color for content views tabs"""),
label = _(u'label_content_views_border_color', u'Content View Border Color'),
),
),
StringField(
'contentViewFontColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_content_views_font_color',
u"""Choose the font color for content views tabs"""),
label = _(u'label_content_views_font_color', u'Content View Font Color'),
),
),
StringField(
'listingHeadersFontColor',
schemata ='colors',
required=1,
default="#666666",
widget=SmartColorWidget(
description = _(u'description_listing_headers_font_color',
u"""Choose the font color for the text of listing headers"""),
label = _(u'label_listing_headers_font_color', u'Listing Headers Font Color'),
),
),
StringField(
'portletHeadersFontColor',
schemata ='colors',
required=1,
default="#000000",
widget=SmartColorWidget(
description = _(u'description_portlet_headers_font_color',
u"""Choose the font color for the text of portlet headers"""),
label = _(u'label_portlet_headers_font_color', u'Portlet Headers Font Color'),
),
),
StringField(
'borderStyle',
schemata ='borders',
required=1,
vocabulary = [("none", "no border"),
("hidden", "hidden when none is impossible (tables)"),
("solid", "solid"),
("dotted", "dotted"),
("dashed", "dashed"),
("groove","3D groove"),
("double", "double borders"),
("inset", "3D inset"),
("outset","3D outset"),
("ridge","3D ridge")],
widget=SelectionWidget(
description = _(u'description_border_style',
u"""Choose the global border style"""),
label = _(u'label_border_style', u'Border Style'),
format='select',
),
),
StringField(
'borderStyleAnnotations',
schemata ='borders',
required=1,
vocabulary = [("none", "no border"),
("hidden", "hidden when none is impossible (tables)"),
("solid", "solid"),
("dotted", "dotted"),
("dashed", "dashed"),
("groove","3D groove"),
("double", "double borders"),
("inset", "3D inset"),
("outset","3D outset"),
("ridge","3D ridge")],
widget=SelectionWidget(
description = _(u'description_border_style_annotations',
u"""Choose the border style for annotations """),
label = _(u'label_border_style_annotations', u'Border Style for Annotations'),
format='select',
),
),
StringField(
'borderWidth',
schemata ='borders',
required=1,
widget=StringWidget(
description = _(u'description_border_width',
u"""Choose the border width in px"""),
label = _(u'label_border_width', u'Border Width'),
),
),
BooleanField(
'overloadBody',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_body',
u"""Do you want to overload the body style ?"""),
label = _(u'label_overload_body', u'Overload Body Style'),
),
),
BooleanField(
'overloadHTMLTags',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_html_tags',
u"""Do you want to overload content styles (classic html tags) ?"""),
label = _(u'label_overload_html_tags', u'Overload HTML Tags Styles'),
),
),
BooleanField(
'overloadContent',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_content',
u"""Do you want to overload standard plone styles used for content ?"""),
label = _(u'label_overload_content', u'Overload Various Content Styles'),
),
),
BooleanField(
'overloadSiteActions',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_site_actions',
u"""Do you want to overload site actions styles ?"""),
label = _(u'label_overload_site_actions', u'Overload Site Actions Styles'),
),
),
BooleanField(
'overloadSearchBox',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_search_box',
u"""Do you want to overload search box styles ?"""),
label = _(u'label_overload_search_box', u'Overload Search Box Styles'),
),
),
BooleanField(
'overloadGlobalSections',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_global_sections',
u"""Do you want to overload global sections buttons styles ?"""),
label = _(u'label_overload_global_sections', u'Overload Global Sections Styles'),
),
),
BooleanField(
'overloadPersonalTools',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_personal_tools',
u"""Do you want to overload personal tools buttons styles (login, preferences ...) ?"""),
label = _(u'label_overload_personal_tools', u'Overload Personals Tools Styles'),
),
),
BooleanField(
'overloadBreadcrumbs',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_breadcrumbs',
u"""Do you want to overload breadcrumbs styles ?"""),
label = _(u'label_overload_breadcrumbs', u'Overload Breadcrumbs Styles'),
),
),
BooleanField(
'overloadFooter',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_footer',
u"""Do you want to overload footer styles ?"""),
label = _(u'label_overload_footer', u'Overload Footer Styles'),
),
),
BooleanField(
'overloadSiteMap',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_site_map',
u"""Do you want to overload site map styles ?"""),
label = _(u'label_overload_site_map', u'Overload Site Map Styles'),
),
),
BooleanField(
'overloadColumns',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_columns',
u"""Do you want to overload columns styles ?"""),
label = _(u'label_overload_columns', u'Overload Columns Styles'),
),
),
BooleanField(
'overloadForms',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_forms',
u"""Do you want to overload forms styles ?"""),
label = _(u'label_overload_forms', u'Overload Forms Styles'),
),
),
BooleanField(
'overloadPortlets',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_portlets',
u"""Do you want to overload portlets styles ?"""),
label = _(u'label_overload_portlets', u'Overload Portlets Styles'),
),
),
BooleanField(
'overloadCalendar',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_calendar',
u"""Do you want to overload calendar styles ?"""),
label = _(u'label_overload_calendar', u'Overload Calendar Styles'),
),
),
BooleanField(
'overloadNavtree',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_navtree',
u"""Do you want to overload navigation tree styles (impact sitemap + navtree portlet) ?"""),
label = _(u'label_overload_navtree', u'Overload Navigation Tree Styles'),
),
),
BooleanField(
'overloadAuthoring',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_authoring',
u"""Do you want to overload authoring styles (content views, actions etc ...) ?"""),
label = _(u'label_overload_authoring', u'Overload Authoring Styles'),
),
),
), marshall=RFC822Marshaller())
|
[
"thomas.desvenain@gmail.com"
] |
thomas.desvenain@gmail.com
|
d0089bd15b2c1ffac1e167de02e3ee215da07c7b
|
74698be74d244ebbabcb0b3cf17ebed26adfa37c
|
/orbit/utils/epoch_helper.py
|
6eb110768887e95055c34f7fc3857f08a6b9c276
|
[
"Apache-2.0"
] |
permissive
|
lfads/models
|
aa75616fee2476641aa98ca1cbdce7e5d27a9aff
|
fd700f0cb2e104544c445d9fbf3991d8388ff18a
|
refs/heads/master
| 2021-01-25T13:50:55.423010
| 2021-01-05T18:27:01
| 2021-01-05T18:27:01
| 123,619,512
| 16
| 9
|
Apache-2.0
| 2021-01-05T18:27:02
| 2018-03-02T19:07:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,136
|
py
|
# Copyright 2020 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a utility class for training in epochs."""
import tensorflow as tf
class EpochHelper:
"""A helper class handle bookkeeping of epochs in custom training loops."""
def __init__(self, epoch_steps: int, global_step: tf.Variable):
"""Initializes the `EpochHelper` instance.
Args:
epoch_steps: An integer indicating how many steps are in an epoch.
global_step: A `tf.Variable` providing the current global step.
"""
self._epoch_steps = epoch_steps
self._global_step = global_step
self._current_epoch = None
self._epoch_start_step = None
self._in_epoch = False
def epoch_begin(self):
"""Returns whether a new epoch should begin."""
if self._in_epoch:
return False
current_step = self._global_step.numpy()
self._epoch_start_step = current_step
self._current_epoch = current_step // self._epoch_steps
self._in_epoch = True
return True
def epoch_end(self):
"""Returns whether the current epoch should end."""
if not self._in_epoch:
raise ValueError("`epoch_end` can only be called inside an epoch.")
current_step = self._global_step.numpy()
epoch = current_step // self._epoch_steps
if epoch > self._current_epoch:
self._in_epoch = False
return True
return False
@property
def batch_index(self):
"""Index of the next batch within the current epoch."""
return self._global_step.numpy() - self._epoch_start_step
@property
def current_epoch(self):
return self._current_epoch
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
5bf1f054acecf712806195e5754388d8d6ab6e98
|
2cabf2ef7640f6203ce6c95ee638793c806c2354
|
/venv/Scripts/pip3-script.py
|
ed3e0c46993f2f30d9fd3af434f3e4ea7ba7c22b
|
[] |
no_license
|
nunodores/suziapp
|
4288347e4436981704fa80ae1416a2a486919fe7
|
43e33369ed64e0fe8eec6e94cc29809fa75dcb65
|
refs/heads/master
| 2021-07-20T08:34:26.340345
| 2018-12-01T22:28:58
| 2018-12-01T22:28:58
| 159,992,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
#!"C:\Users\Nuno Dorres\PycharmProjects\Test\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
[
"41326@etu.he2b.be"
] |
41326@etu.he2b.be
|
98fc62623666acf20345eb356467064870a96c88
|
a7265d9cff8f2a06257faa4155655524ff6047c0
|
/utils/findAll_keyNouns.py
|
6fb229482e25c2d901662b491f039080ed01b2d8
|
[] |
no_license
|
szr712/pinyin2hanziTest
|
f285afa7c12e548063f279d523303f43cf81687d
|
2bd4d197d514b85e43ad525be07a8fc6bef3ad23
|
refs/heads/master
| 2023-07-17T18:52:54.519509
| 2021-08-25T13:03:44
| 2021-08-25T13:03:44
| 392,251,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,150
|
py
|
def findAll_keyNouns(pinyin, tones, subpinyin, subtones):
"""匹配关键词拼音在拼音串中的所有位置
Args:
pinyin (str): 被匹配的拼音字符串
tones (list)): 被匹配的拼音串对应的音调
subpinyin (str): 需匹配的关键词拼音串
subtones (list): 需匹配的关键词拼音串对应的音调
Returns:
list: 所有匹配结果的起始下标
"""
index = []
for i in range(0, len(pinyin)-len(subpinyin)+1):
sub = pinyin[i:i+len(subpinyin)]
if sub == subpinyin and tones[i:i+len(subpinyin)] == subtones:
index.append(i)
return index
if __name__ == "__main__":
import sys
sys.path.append("..")
from Convertor import Convertor
words = "【知识点】『互斥事件的概率加法公式互斥事件的概率加法公式』"
convertor = Convertor()
tar, sen = convertor.convert(words)
sub, _ = convertor.convert("公式")
print(sen)
index = findAll_keyNouns(
tar[0].pinyin, tar[0].yindiao, sub[0].pinyin, sub[0].yindiao)
print(index)
print(sen[0][index[0]:index[0]+2])
|
[
"zirui990712@163.com"
] |
zirui990712@163.com
|
509c23e3bf72658ffd093ae405cf9de4958fb78f
|
102d09ef1d6effe166ad703ba4472c45dfb03263
|
/py/Maximum_Depth_of_Binary_Tree.py
|
199982277744c0985b39cfc2326fc115a739fec4
|
[] |
no_license
|
bitcsdby/Codes-for-leetcode
|
5693100d4b66de65d7f135bbdd81b32650aed7d0
|
9e24e621cfb9e7fd46f9f02dfc40a18a702d4990
|
refs/heads/master
| 2016-09-05T08:43:31.656437
| 2014-08-02T15:14:53
| 2014-08-02T15:14:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return an integer
def maxDepth(self, root):
if root == None:
return 0;
l = self.maxDepth(root.left) + 1;
r = self.maxDepth(root.right) + 1;
return l if l > r else r;
|
[
"bitcsdby@gmail.com"
] |
bitcsdby@gmail.com
|
7df058d88c766a9b978227fca727c0f1587d9861
|
2e1322f72f730fdb019c25eb533424bfb411c7dc
|
/backend/garpix_page/contexts/default.py
|
635931bdc671efeb46a46e470ae8ab5dc4d06058
|
[
"MIT"
] |
permissive
|
tempuku/garpix_page
|
750d3ef78e1698d93564ae510a9514dfb815853f
|
d24fa3d8c7b0b4134e66795965596f3cdb61c8db
|
refs/heads/master
| 2023-03-17T03:33:07.528207
| 2021-03-11T15:15:13
| 2021-03-11T15:15:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53
|
py
|
def context(request, *args, **kwargs):
return {}
|
[
"crusat@yandex.ru"
] |
crusat@yandex.ru
|
c575267f57e1c42a783b5c2275174b4427573cab
|
9db916f28e718d4684db7e67a05573365a182e0b
|
/base/migrations/0002_freeresponseanswer_answer.py
|
c4be8ee654e41e8dfcf550bb36d6976aa5ae5b09
|
[] |
no_license
|
shobashiva/questionnaire_app
|
243fee34950141315a9eb05ea4c26b3b677660ed
|
bd6c1c51215215c531944e99b29363ea87e98b62
|
refs/heads/master
| 2021-07-11T03:37:19.980030
| 2017-10-16T13:02:45
| 2017-10-16T13:02:45
| 107,127,788
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='freeresponseanswer',
name='answer',
field=models.TextField(null=True, blank=True),
),
]
|
[
"shoba@apaxsoftware.com"
] |
shoba@apaxsoftware.com
|
9d3339fbb1dea2a21cc7e3817513b96bac97ad71
|
b03121e88c7c7b4738d6ab873cf030e7db140e32
|
/utils/fact_memoization.py
|
47630368952b26b22dc1746de79f0c20b8aab043
|
[] |
no_license
|
priestd09/project_euler
|
0889a2038ee4ff17008169abea73f8a3bc74f713
|
d6a04fbe42947ef0f9d9e26077c2b9b99069f4d1
|
refs/heads/master
| 2021-01-18T19:42:00.135047
| 2014-05-25T09:39:57
| 2014-05-25T09:39:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
#!/usr/bin/env python
'''
Created on 17 feb. 2012
@author: Julien Lengrand-Lambert
@contact: julien@lengrand.fr
'''
import timeit
from memoize import memoize
from Memoized import Memoized
def fact1(value):
"""
Returns value!
"""
if value == 0:
return 1
return value * fact1(value - 1)
@memoize
def fact2(value):
"""
Returns value!, using memoization
"""
if value == 0:
return 1
return value * fact2(value - 1)
@Memoized
def fact3(value):
"""
Returns value!, using memoization
"""
if value == 0:
return 1
return value * fact3(value - 1)
if __name__ == '__main__':
for i in range(3):
t1 = timeit.Timer("fact1(150)", "from __main__ import fact1")
t2 = timeit.Timer("fact2(150)", "from __main__ import fact2")
t3 = timeit.Timer("fact3(150)", "from __main__ import fact3")
print t1.timeit(1), t2.timeit(1), t3.timeit(1)
|
[
"julien@lengrand.fr"
] |
julien@lengrand.fr
|
b6d15f2f18092727dbf5772e4baf476f305ac60b
|
5487607b21cf32d2a7664a8d28b5fd68ad434739
|
/tysdgx_landinspector.py
|
8415b2e51eb903369edd96af6ffddc5be39afd3d
|
[] |
no_license
|
Fr0z3nKnights/landcloud
|
081cfb29f8e2f1b3fb484f37108a86c1855bcb1c
|
f65b413bd616bc4b9ca641aefc5a1db0cee40cb5
|
refs/heads/master
| 2021-04-05T00:03:04.774268
| 2020-03-23T06:55:19
| 2020-03-23T06:55:19
| 248,504,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,168
|
py
|
"""
@author:Fr0z3n
@contact:websec@yeah.net
@datetime:2020/3/16
@desc:To get json data from url(https://landcloud.org.cn),and distribute personal task.
"""
from openpyxl import Workbook
from openpyxl import load_workbook
import requests
import os
import datetime
import re
# 屏蔽SSL报错,要求重新安装requests==2.7.0(version)
from requests.packages import urllib3
urllib3.disable_warnings()
class Tysdgx(object):
def __init__(self, pagecnt=52, assignfile="./tysdgx_data/assign.txt"):
# 总页数52页,每页100个编号
self.pagecnt = pagecnt
self.vlist_url = "https://jg.landcloud.org.cn:8090/webapi/api/vlist"
self.assign_url = "https://jg.landcloud.org.cn:8090/webapi/api//TaskDistribute"
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:74.0) Gecko/20100101 Firefox/74.0",
"Authorization": "",# token cannot be null
"Origin": "https://jg.landcloud.org.cn:8090",
"Referer": "https://jg.landcloud.org.cn:8090/main/list/tysdgx/1/0",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/json;charset=utf-8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
}
self.cookies = {"482b4337498e47ae9e76f0e2271caf89": "WyIzODA1NTY1NTEwIl0"}
self.data = {
"ywlx": "tysdgx",
"xzqdm": "410922",
"level": 1,
"userxzqdm": "410922",
"pageSize": 100,
"pageIndex": 1,
"order": "",
"isUseCacheCount": True
}
self.tbbsm_li = {
"tbbsm": "",
"xmbh": "",
"xzqdm": "410922"
}
self.taskdata ={
"tbbsms": [],
"xzqdm": "410922",
"ywlx": "TYSDGX",
"sbbsms": [614864, 615084, 526084, 550264, 555304, 555284, 555264, 555504, 555604, 556624, 555324, 556264]
}
self.userdata = {
"username": "",
"password": "",
"verifyCode": ""
}
self.maxjctb = 5161
self.update_flag = True
self.assign_name = assignfile
self.datapath = "./tysdgx_data/TYSDGX_ALL_JCTB.xlsx"
self.auth = ''
# 登录
def landlogin(self):
if not os.path.exists("./tysdgx_data"):
os.mkdir("./tysdgx_data")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:74.0) Gecko/20100101 Firefox/74.0"
}
host_url = 'https://landcloud.org.cn'
se = requests.session()
# 获取索引页面设置cookies
index = se.get(url=host_url + "/index", headers=headers, verify=False)
print("[ %s ]++网络状态:" % self.hms_now(), index.status_code)
if index.status_code == 200:
# , index.url, index.headers)
# print(response.content)
# dic = self.cookiejar2dic(se.cookies)
# print(dic)
# 获取登录页面
getlogin = se.get(url=host_url + "/login", cookies=se.cookies, verify=False)
print("[ %s ]++获取登录页面状态:getlogin status:" % self.hms_now(), getlogin.status_code)
patten = "<img id=\"verifycodeImage\"\s+src=\"(.*?)\"/>"
# 匹配登录页面上验证码图片的链接
match = re.findall(pattern=patten, string=getlogin.text)
print("[ %s ]++匹配到验证码链接:" % self.hms_now(), match[0])
veryimg_url = host_url + match[0]
dic2 = self.cookiejar2dic(se.cookies)
# print(veryimg_url)
print("[ %s ]++登录页面获得cookies:" % self.hms_now(), dic2)
# 获取验证码图片
get_verify_img = se.get(url=veryimg_url, cookies=se.cookies, headers=headers, verify=False)
print("[ %s ]++获取验证码链接图片:" % self.hms_now(), get_verify_img.status_code)
# print(get_verify_img.text)
dic3 = self.cookiejar2dic(se.cookies)
vfpath = "./tysdgx_data/%s.png" % (datetime.datetime.now().strftime("%y%m%d-%H%M%S") + "_" +dic3['timestamp'])
# 将获取图片写入文件
with open(vfpath, "wb") as fp:
fp.write(get_verify_img.content)
print("[ %s ]++更新页面获得cookies:" % self.hms_now(), dic3)
# 发送登录消息,等待登录验证通过
verifycode = input("[ %s ]++Please input the code you have identified:" % self.hms_now())
verifycode = verifycode.strip()
self.userdata['verifyCode'] = verifycode
if os.path.exists(vfpath): # 删除生成的.png文件
os.remove(vfpath)
print("[ %s ]++文件路径:%s" % (self.hms_now(), vfpath), "删除成功!")
signin_url = host_url + "/login/login.action"
getsignin = se.post(url=signin_url, cookies=se.cookies, params=self.userdata, headers=headers, verify=False)
# print(getsignin.url)
if getsignin.status_code == 200 and getsignin.json()['status'] == 'OK':
print("[ %s ]++登录页面返回信息:" % self.hms_now(), getsignin.json())
# 发送跨域消息,获取跨域的token即header中的Authorization
auth_url = host_url + "/third/proxy/getListDetailPageUrl?ywlx=TYSDGX&type=1&xzqdm=410922"
get_auth = se.get(url=auth_url, cookies=se.cookies, headers=headers, verify=False)
if get_auth.status_code == 200 and get_auth.json()['status'] == 'OK':
self.auth = re.findall(r"token=(.*?)$", get_auth.json()['data'])
print("[ %s ]++获取跨域token:" % self.hms_now(), self.auth)
return self.auth[0]
else:
return False
# cookiejar object to dictionary
def cookiejar2dic(self, cookies):
return requests.utils.dict_from_cookiejar(cookies)
# retrieve data from server
def rtv_data(self):
# 调用登录流程
self.headers['Authorization'] = "bearer " + self.landlogin()
print("[ %s ]++开始获取来自网络的数据并更新:" % self.hms_now())
filepath = self.datapath
jcbh2tbbsm = {}
# 判断是否存在文件
if os.path.exists(filepath):
wb = load_workbook(filepath)
ws = wb.active
# 判断是否需要更新文件
if self.maxjctb+1 <= ws.max_row:
print("[ %s ]++Excel中最大行数为%s,不需要更新数据!" % (self.hms_now(), ws.max_row))
wb.close()
return
else:
self.update_flag = True
a = 1
for row in ws.iter_rows():
if a == 1:
a += 1
continue
else:
# excel中workbook加载以后,下标从0开始,row[i].value为值
# print("+第%s行" % a, row[1].value, row[2].value)
jcbh2tbbsm[row[2].value] = row[1].value
a += 1
# print(jcbh2tbbsm)
print("[ %s ]网络数据大于本地数据,开始更新数据到本地:" % self.hms_now())
else:
self.update_flag = False
print("[ %s ]本地不存在excel数据,开始创建数据并更新,请注意检查数据页数!" % self.hms_now())
wb = Workbook()
# 激活 worksheet
ws = wb.active
# worksheet rename
ws.title = "TYSDGX_all_jctb" # + datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
# insert into worksheet by one row.
# Excel中的行列顺序
ws.append(["bsm", "tbbsm", "jcbh", "xzb", "yzb", "tblx", "wyzt"])
i = 1
for j in range(1, self.pagecnt+1):
self.data['pageIndex'] = j
r = requests.post(url=self.vlist_url, headers=self.headers,
json=self.data, cookies=self.cookies, verify=False)
# print(r.cookies)
# check if the back status is 200, and retrieve the json data.
if r.status_code == 200:
jdata = r.json()
# count = jdata['data']['allcount'] # int,the count number of all the records.
record = jdata['data']['records'] # list,and each one in list was a dict.
for li in record:
tbbsm = li['tbbsm'] # 图斑标识码
jcbh = li['jcbh'] # 监察编号
tblx = li['tblx']
bsm = li['bsm']
xzb = li['xzb']
yzb = li['yzb']
wyzt = li['wyzt']
if self.update_flag:
if jcbh in jcbh2tbbsm.keys():
i += 1
continue
else:
ws.append([bsm, tbbsm, jcbh, xzb, yzb, tblx, wyzt])
print("[ %s ]+添加第%s行" % (self.hms_now(), i), bsm, tbbsm, jcbh, xzb, yzb, tblx, wyzt)
i += 1
else:
ws.append([bsm, tbbsm, jcbh, xzb, yzb, tblx, wyzt])
print("[ %s ]+第%s行" % (self.hms_now(), i), bsm, tbbsm, jcbh, xzb, yzb, tblx, wyzt)
i += 1
else:
print("[ %s ]+请求数据状态码:%s,退出!" % (self.hms_now(), r.status_code))
wb.save(filepath)
wb.close()
print("[ %s ]+文件保存成功!位于%s." % (self.hms_now(), filepath))
# get nowtime in H:M:S
def hms_now(self):
return datetime.datetime.now().strftime("%H:%M:%S")
# get jctb from .txt
def data_matching(self):
print("[ %s ]++开始处理分发任务:" % self.hms_now())
print("[ %s ]++检查任务分发文件是否存在重复图斑?" % self.hms_now())
assignfilepath = self.assign_name
all_tb = {}
toassign = {} # 要分配任务的图斑字典
notfound = [] # 没发现的图斑列表
# 检查分配任务图斑文件是否存在
if os.path.exists(assignfilepath):
with open(assignfilepath, "r") as fp:
assign_tb = fp.readlines()
# print(assign_tb)
for tb in assign_tb:
tb = tb.strip()
else:
print("[ %s ]++不存在相关分配任务文件!" % self.hms_now())
return False
# 检查任务分配文件图斑重复项并处理
if len(set(assign_tb)) == len(assign_tb):
print("[ %s ]++检查结果:任务分发文件图斑无重复项." % self.hms_now())
else:
print("[ %s ]**********检查结果:分发任务文件图斑存在重复项,请检查!" % self.hms_now())
assign_tb = set(assign_tb)
print("[ %s ]++经处理后图斑列表为:!" % (self.hms_now()), assign_tb)
# 检查本地数据库文件是否存在
if os.path.exists(self.datapath):
wb = load_workbook(self.datapath)
ws = wb.active
b = 1
for row in ws.iter_rows():
if b == 1:
b += 1
continue
else:
all_tb[row[2].value] = row[1].value
b += 1
wb.close()
print("[ %s ]++读取本地数据库长度:%s." % (self.hms_now(), len(all_tb)))
# 开始匹配从txt读取图斑数是否都存在于网络端
for tb in assign_tb:
tb = tb.strip()
if tb in all_tb.keys():
toassign[tb] = all_tb[tb]
else:
print("[ %s ]+图斑%s未找到!" % (self.hms_now(), tb))
notfound.append(tb)
# 对于匹配结果进行处理
if len(assign_tb) == len(toassign):
# print("[ %s ]++匹配到图斑字典长度:" % self.hms_now(, toassign)
print("[ %s ]++匹配到全部要分发图斑共%s个,开始分发任务:" % (self.hms_now(), len(toassign)))
else:
print("[ %s ]**********未全部匹配到图斑数!" % self.hms_now())
nffilepath = "./tysdgx_data/notfound" + datetime.datetime.now().strftime("%y%m%d_%H%M%S") + ".txt"
with open(nffilepath, "w") as fp:
fp.writelines(notfound)
print("[ %s ]**********未匹配图斑文件位于:%s!" % (self.hms_now(), nffilepath))
return False
else:
print("[ %s ]**********不存在本地数据库文件%s,请检查!" % (self.hms_now(), self.datapath))
return False
# 要分配任务图斑在toassign,key为jcbh,value为tbbsm
return toassign
# 开始分发任务图斑到移动端
def begin_assignment(self):
# 先匹配数据
assigndata = self.data_matching()
c = 1
flag = 1
if assigndata:
remain = divmod(len(assigndata), 50)
for tb in assigndata.keys():
self.tbbsm_li['xmbh'] = tb
self.tbbsm_li['tbbsm'] = assigndata[tb]
self.taskdata['tbbsms'].append(self.tbbsm_li)
if (remain[0] == 0 and c == remain[1]) or (remain[0] < flag and divmod(c, 50)[1] == remain[1]):
# print(self.taskdata)
# 向网络请求数据分发任务图斑
res = requests.post(url=self.assign_url, json=self.taskdata, cookies=self.cookies, headers=self.headers, verify=False)
jdata = res.json()
print("[ {0} ]++第{1}波网络返回数据:{2},code:{3},error:{4},message:{5}!".format(self.hms_now(), flag, jdata['data'],jdata['code'],jdata['error'],jdata['message']))
return
elif remain[0] >= flag and divmod(c, 50)[1] == 0:
# 向网络请求数据分发任务图斑
res = requests.post(url=self.assign_url, json=self.taskdata, cookies=self.cookies, headers=self.headers, verify=False)
jdata = res.json()
self.taskdata['tbbsms'] = []
print("[ {0} ]++第{1}波50个网络返回数据:{2},code:{3},error:{4},message:{5}!".format(self.hms_now(), flag, jdata['data'],jdata['code'],jdata['error'],jdata['message']))
flag += 1
continue
c += 1
else:
print("[ %s ]**********匹配分发任务出现问题,请检查后重新运行!" % self.hms_now())
if __name__ == '__main__':
tysd = Tysdgx()
tysd.rtv_data()
tysd.begin_assignment()
|
[
"webhack007@outlook.com"
] |
webhack007@outlook.com
|
d3341770a701c50c59469677b0108cffaf063137
|
b6f892264a425278e84a01d12abb0e0082bddb1d
|
/orders/views.py
|
ad43e233d08f7352ea3703b89514611153bd17ae
|
[] |
no_license
|
omar-bendary/Bookstore
|
5059e264acc16389930cd640773171819fc8853c
|
714630213bd29fd82ba9b4cf89089ca346c705be
|
refs/heads/master
| 2022-12-31T18:14:49.860836
| 2020-10-25T23:39:17
| 2020-10-25T23:39:17
| 294,221,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 757
|
py
|
import stripe
from django.views.generic.base import TemplateView
from django.conf import settings
from django.shortcuts import render
stripe.api_key = settings.STRIPE_TEST_SECRET_KEY
class OrdersPageView(TemplateView):
template_name = 'orders/purchase.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['stripe_key'] = settings.STRIPE_TEST_PUBLISHABLE_KEY
return context
def charge(request):
if request.method == 'POST':
charge = stripe.Charge.create(
amount=3900,
currency='usd',
description='Purchase all books',
source=request.POST['stripeToken']
)
return render(request, 'orders/charge.html')
|
[
"omarbendary@Omars-MacBook-Pro.local"
] |
omarbendary@Omars-MacBook-Pro.local
|
0aa81db5fca950f4936b5d5082166f4d30a34ab1
|
81a9529137c7361189965684764470f8f2d92f38
|
/yowsup/layers/protocol_chatstate/protocolentities/test_chatstate.py
|
c7b7c48b966da8e87344b5d15e81017b1b246adb
|
[
"MIT"
] |
permissive
|
pasinit/yowsup
|
670cd34e0d4e189394da97120ad2e76e201715fd
|
894007650bf3d75ef7af4a0e57e84dc7cccc4dfe
|
refs/heads/master
| 2021-06-01T17:50:29.866854
| 2014-12-20T11:29:30
| 2014-12-20T11:29:30
| 28,185,000
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
from yowsup.layers.protocol_chatstate.protocolentities.chatstate import ChatstateProtocolEntity
from yowsup.structs import ProtocolTreeNode
from yowsup.structs.protocolentity import ProtocolEntityTest
class ChatstateProtocolEntityTest(ProtocolEntityTest):
def setUp(self):
self.ProtocolEntity = ChatstateProtocolEntity
self.node = ProtocolTreeNode("chatstate")
self.node.addChild(ProtocolTreeNode('composing'))
|
[
"g@almeida.io"
] |
g@almeida.io
|
b3a11d9e0e3f539e355dc7095199b687e163277f
|
4a71f8ee21d03c3308245fad1f81525934bd9a31
|
/nlu_dir/online/utils/configs.py
|
f37bcb3ce4a1e823d29eb57bfd0f1bc155f1eb7d
|
[] |
no_license
|
nwaiting/nlu_context
|
fe11f13fbf7165cdbc4914a2b92304cbc52eb8e4
|
da63efa0523d487a3ec59acc5ebfebfb8c1ea84f
|
refs/heads/master
| 2020-04-08T11:06:54.460129
| 2018-11-28T02:08:11
| 2018-11-28T02:08:11
| 159,293,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
from online.utils.nameutil import FieldClassifyNames as dcname,ContextNames as cname
from common.pathutil import PathUtil
_pathutil_class=PathUtil()
domain2entity2paths_set={dcname.product.value:
{cname.domain.value:[_pathutil_class.domain_filepath,
],
cname.property.value:[_pathutil_class.property_filepath,
]
},
dcname.company.value:
{cname.domain.value: [_pathutil_class.company_domain_filepath,
_pathutil_class.fufilled_company_domain_filepath
],
cname.property.value: [_pathutil_class.company_property_filepath
]
}
}
_environ='local'
# _environ='online'
|
[
"zhengyuyu3@jd.com"
] |
zhengyuyu3@jd.com
|
81e7392eb7df7d48b1169c1670899bdb02ff1c0b
|
c35a85e98774d10684fd1ef3a27328d0f005b260
|
/restapi/tasks.py
|
6a6cd1e392142d1b0180592001ed1fb212a05a9d
|
[] |
no_license
|
unichainplatform/unichain-monitor-web
|
ba110ec2bf7d71b8df952a1ec866ab4a411b295b
|
97a0f01698102f3b88de60e1dcfb5a7c7731e2f3
|
refs/heads/master
| 2023-08-04T04:01:25.853165
| 2020-03-03T09:58:21
| 2020-03-03T09:58:21
| 240,002,198
| 1
| 0
| null | 2023-07-23T05:26:05
| 2020-02-12T12:00:17
|
Python
|
UTF-8
|
Python
| false
| false
| 208
|
py
|
from celery import shared_task
from fabric.api import execute
from restapi.builder import start
from time import sleep
@shared_task
def build():
execute(start)
@shared_task
def sleepp():
sleep(20)
|
[
"zequnfeng@gmail.com"
] |
zequnfeng@gmail.com
|
71e6161a0479a61818760f02db11ace705c0070d
|
cdf9cad13361d7cb2306dd5ed1adfad227c5a5aa
|
/services_infrastructure/data-provider/app/extensions/logger.py
|
c1bbc11ce335878a3909fcbb1eaf849fdf209be6
|
[] |
no_license
|
MDUYN/reference-architecture-consent-management-in-data-spaces
|
ab32ec810ba429918f9125d66f323dc4ff5bdad0
|
6d8f0fd733c88954f7ce9694c2574882d39eb48c
|
refs/heads/main
| 2023-01-21T12:32:16.519723
| 2020-12-02T16:47:32
| 2020-12-02T16:47:32
| 315,774,537
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 939
|
py
|
import logging.config
def setup_logging():
logging_config = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'console': {
'level': 'INFO',
'formatter': 'standard',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout', # Default is stderr
},
},
'loggers': {
'': { # root logger
'handlers': ['console'],
'level': 'WARNING',
'propagate': False
},
'app': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False
},
}
}
logging.config.dictConfig(logging_config)
|
[
"marcvanduyn1@gmail.com"
] |
marcvanduyn1@gmail.com
|
3a8f47db15b6d38c675be76ddcac8ebc077745ca
|
d2ad678fd648f516f3eaa4ee8d13825d3e07983d
|
/src/lib/Server.py
|
edcc7d53d75b5a62dfbcf0424aaa04936fd657da
|
[] |
no_license
|
marcusagm/python-socketserver
|
4affcda01e88b6f298486335f810e2624e25a139
|
d4e6559716cead5c3f64acdb64f261d9fe8c32e0
|
refs/heads/master
| 2022-11-24T20:51:27.213763
| 2020-08-05T18:57:28
| 2020-08-05T18:57:28
| 285,364,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,044
|
py
|
import time
import queue
import errno
import socket
import select
import threading
class Server:
def __init__(self, address = '127.0.0.1', port = 3000, bufferSize = 8192, timeout = 300):
self.address = address
self.port = int(port)
self.bufferSize = bufferSize
self.timeout = timeout
self.clients = []
self.lastPings = {}
self.receivedPackets = queue.Queue()
self.lastTimeoutCheck = time.time()
self.timeoutTimer = None
self.context = None
self.listener = None
self.isListening = True
self.isShutDown = threading.Event()
def start(self):
try:
self.context = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.context.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.context.bind((self.address, self.port))
# self.context.setblocking(False)
# self.context.settimeout(self.timeout)
self.log('Servidor iniciado!')
self.initTimeoutChecker()
self.isShutDown.clear()
while self.isListening == True:
selecteds = select.select([self.context], [], [])
if self.context in selecteds[0]:
self.processRequest()
except (KeyboardInterrupt, SystemExit):
pass
except Exception as error:
print(error)
finally:
self.stop()
self.isShutDown.set()
def shutdown(self):
self.stop()
self.isShutDown.wait()
def stop(self):
self.isListening = False
self.context.close()
def processRequest(self):
listener = threading.Thread(target=self.receiveData)
listener.start()
def receiveData(self):
data, client = self.context.recvfrom(self.bufferSize)
if data != '':
self.registerClient(client)
self.parseData(data, client)
def parseData(self, data, client):
response = data.decode('utf-8')
if response != '':
self.log('Endereço:', str(client), 'Data:', response)
if response == '::ping':
self.registerPing(client)
else:
self.receivedPackets.put((data,client))
self.sendToAll()
def sendToAll(self):
while not self.receivedPackets.empty():
data, client = self.receivedPackets.get()
for clientAddr in self.clients:
if clientAddr != client:
self.context.sendto( data, clientAddr)
def registerClient(self, client):
if client not in self.clients:
self.clients.append(client)
self.lastPings[str(client)] = time.time()
self.log('Endereço:', str(client), '- Conectou.')
def removeClient(self, client):
if client in self.clients:
self.clients.remove(client)
del self.lastPings[str(client)]
self.log('Endereço:', str(client), '- Conexão finalizada.')
def initTimeoutChecker(self):
if self.timeoutTimer == None:
self.timeoutTimer = threading.Timer(self.timeout, self.checkForTimeouts)
self.timeoutTimer.start()
def checkForTimeouts(self):
now = time.time()
self.timeoutTimer = None
if now - self.lastTimeoutCheck > self.timeout:
self.lastTimeoutCheck = time.time()
for client, pingTime in list(self.lastPings.items()):
if now - pingTime > self.timeout:
self.log('Endereço:', client, '- Timeout')
self.removeClient(eval(client))
self.initTimeoutChecker()
def registerPing(self, client):
self.context.sendto('::pong'.encode('utf-8'), client)
self.lastPings[str(client)] = time.time()
def now(self):
return time.ctime(time.time())
def log(self, *message):
print( self.now(), '-', ' '.join(message))
|
[
"marcusagmaia@gmail.com"
] |
marcusagmaia@gmail.com
|
ec453b7eab4fe6e2560531b8c90b49e805794a0c
|
f886045c7c8a3d457cf12f6a6df345ccc23ea2e5
|
/çalışankayıtokuma.py
|
934aeee9fc3b477d08c281d969731c34ba66fa9d
|
[] |
no_license
|
alpkaantanis/alp
|
3041e9917dfe4536ce1f56497c01fcf605944d4b
|
137e3501da0a5faaa59d5009ef4aa1c7b152fc01
|
refs/heads/master
| 2023-04-22T08:27:04.521559
| 2021-05-17T19:17:03
| 2021-05-17T19:17:03
| 368,295,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
def main():
calisan_dosyasi=open('calisanlar.txt','r')
isim=calisan_dosyasi.readline()
while isim !='':
sicil=calisan_dosyasi.readline()
bolum=calisan_dosyasi.readline()
isim=isim.rstrip('\n')
sicil=sicil.rstrip('\n')
bolum=bolum.rstrip('\n')
print('isim:',isim)
print('sicil:',sicil)
print('bolum:',bolum)
print()
isim=calisan_dosyasi.readline()
calisan_dosyasi.close()
main()
|
[
"alpkaantanis@gmail.com"
] |
alpkaantanis@gmail.com
|
95f90ed48904d30f93ba5e9307af06e67327876f
|
0d39012599f8bf1a43af0cdc9fc6ec0127f69e89
|
/mini_url/urls.py
|
98e42ab572d830fdb785098bbd55c71fc9688a12
|
[] |
no_license
|
Seifeddine-Selmi/django-tuto
|
0a815fef93367247d508361bf7e8047a9eb5a1c2
|
3054cc5e4ebeddedb9e80eb89d0fc36827ba9f2e
|
refs/heads/master
| 2021-01-14T08:35:48.288865
| 2017-02-21T17:35:57
| 2017-02-21T17:35:57
| 81,965,227
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 787
|
py
|
#-*- coding: utf-8 -*-
from django.conf.urls import url
from . import views
urlpatterns = [
# Une string vide indique la racine
#url(r'^$', views.url_list, name='url_list'),
#url(r'^new_url$', views.new_url, name='url_create'),
# (?P<code>\w{6}) capturera 6 caractères alphanumériques.
url(r'^(?P<code>\w{6})/$', views.url_redirection, name='url_redirection'),
### Views generic CRUD ###
url(r'^$', views.ListUrl.as_view(), name='url_list'),
url(r'^url$', views.URLCreate.as_view(), name='url_create'),
#url(r'^edit/(?P<pk>\d+)$', views.URLUpdate.as_view(), name='url_update'),
url(r'^edit/(?P<code>\w{6})$', views.URLUpdate.as_view(), name='url_update'),
url(r'^delete/(?P<code>\w{6})$', views.URLDelete.as_view(), name='url_delete')
]
|
[
"selmi.seifeddine19@gmail.com"
] |
selmi.seifeddine19@gmail.com
|
d42a3b73ca399156ab41efeaf04eb9a3c00c8126
|
c2dc0e59980bb9141bc2459cc37cfeacc0d9bb75
|
/lumicks/pylake/detail/widefield.py
|
3d0eacab79adf0090e29ee7c4f35ce26afbbfaf5
|
[
"Apache-2.0"
] |
permissive
|
spangeni/pylake
|
fc5c69548aaa2f037f2f636dc6a5fb2ea0989132
|
7764928020264571aa7a0d8c5c9c0cb5d225377f
|
refs/heads/master
| 2023-06-16T04:57:40.324040
| 2021-07-05T16:51:54
| 2021-07-05T18:48:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,705
|
py
|
import numpy as np
import re
import json
import cv2
import tifffile
import warnings
from copy import copy
class TiffFrame:
"""Thin wrapper around a TIFF frame stack. For camera videos timestamps are stored in the DateTime tag in
the format start:end.
Parameters
----------
page : tifffile.tifffile.TiffPage
Tiff page recorded from a camera in Bluelake.
"""
def __init__(self, page, align):
self._src = page
self._description = ImageDescription(page)
self._align = align
def _align_image(self):
""" reconstruct image using alignment matrices from Bluelake; return aligned image as a NumPy array"""
if not self._description:
warnings.warn("File does not contain metadata. Only raw data is available")
return self.raw_data
try:
align_mats = [self._description.alignment_matrix(color) for color in ("red", "blue")]
except KeyError:
warnings.warn("File does not contain alignment matrices. Only raw data is available")
return self.raw_data
img = self.raw_data
rows, cols, _ = img.shape
for mat, channel in zip(align_mats, (0, 2)):
img[:, :, channel] = cv2.warpAffine(
img[:, :, channel],
mat,
(cols, rows),
flags=(cv2.INTER_LINEAR | cv2.WARP_INVERSE_MAP),
borderMode=cv2.BORDER_CONSTANT,
borderValue=0,
)
return img
@property
def data(self):
return self._align_image() if (self.is_rgb and self._align) else self._src.asarray()
@property
def raw_data(self):
return self._src.asarray()
@property
def bit_depth(self):
bit_depth = self._src.tags["BitsPerSample"].value
if self.is_rgb: # (int r, int g, int b)
return bit_depth[0]
else: # int
return bit_depth
@property
def is_rgb(self):
return self._src.tags["SamplesPerPixel"].value == 3
def _get_plot_data(self, channel="rgb", vmax=None):
"""return data an numpy array, appropriate for use by `imshow`
if data is grayscale or channel in ('red', 'green', 'blue')
return data as is
if channel is 'rgb', converted to float in range [0,1] and correct for optional vmax argument:
None : normalize data to max signal of all channels
float : normalize data to vmax value
"""
if not self.is_rgb:
return self.data
if channel.lower() == "rgb":
data = (self.data / (2 ** self.bit_depth - 1)).astype(float)
if vmax is None:
return data / data.max()
else:
return data / vmax
else:
try:
return self.data[:, :, ("red", "green", "blue").index(channel.lower())]
except ValueError:
raise ValueError(f"'{channel}' is not a recognized channel")
@property
def start(self):
timestamp_string = re.search(r"^(\d+):\d+$", self._src.tags["DateTime"].value)
return np.int64(timestamp_string.group(1)) if timestamp_string else None
@property
def stop(self):
timestamp_string = re.search(r"^\d+:(\d+)$", self._src.tags["DateTime"].value)
return np.int64(timestamp_string.group(1)) if timestamp_string else None
class TiffStack:
"""TIFF images exported from Bluelake
Parameters
----------
tiff_file : tifffile.TiffFile
TIFF file recorded from a camera in Bluelake.
"""
def __init__(self, tiff_file, align):
self._src = tiff_file
self._align = align
def get_frame(self, frame):
return TiffFrame(self._src.pages[frame], align=self._align)
@staticmethod
def from_file(image_file, align):
return TiffStack(tifffile.TiffFile(image_file), align=align)
@property
def num_frames(self):
return len(self._src.pages)
class ImageDescription:
def __init__(self, src):
try:
self.json = json.loads(src.description)
except json.decoder.JSONDecodeError:
self.json = {}
self._cmap = {}
return
self._cmap = {"red": 0, "green": 1, "blue": 2}
# update format if necessary
if "Alignment red channel" in self.json:
for color, j in self._cmap.items():
self.json[f"Channel {j} alignment"] = self.json.pop(f"Alignment {color} channel")
self.json[f"Channel {j} detection wavelength (nm)"] = "N/A"
def __bool__(self):
return bool(self.json)
@property
def alignment_roi(self):
return np.array(self.json["Alignment region of interest (x, y, width, height)"])
@property
def roi(self):
return np.array(self.json["Region of interest (x, y, width, height)"])
@property
def offsets(self):
return self.alignment_roi[:2] - self.roi[:2]
def _raw_alignment_matrix(self, color):
return np.array(self.json[f"Channel {self._cmap[color]} alignment"]).reshape((2, 3))
def alignment_matrix(self, color):
def correct_alignment_offset(alignment, x_offset, y_offset):
# translate the origin of the image so that it matches that of the original transform
translation = np.eye(3)
translation[0, -1] = -x_offset
translation[1, -1] = -y_offset
# apply the original transform to the translated image.
# it only needs to be resized from a 2x3 to a 3x3 matrix
original = np.vstack((alignment, [0, 0, 1]))
# translate the image back to the original origin.
# takes into account both the offset and the scaling performed by the first step
back_translation = np.eye(3)
back_translation[0, -1] = original[0, 0] * x_offset
back_translation[1, -1] = original[1, 1] * y_offset
# concatenate the transforms by multiplying their matrices and ignore the unused 3rd row
return np.dot(back_translation, np.dot(original, translation))[:2, :]
align_mat = self._raw_alignment_matrix(color)
x_offset, y_offset = self.offsets
if x_offset == 0 and y_offset == 0:
return align_mat
else:
return correct_alignment_offset(align_mat, x_offset, y_offset)
@property
def for_export(self):
out = copy(self.json)
if self:
for j in range(3):
out[f"Applied channel {j} alignment"] = out.pop(f"Channel {j} alignment")
return json.dumps(out, indent=4)
|
[
"61475504+rpauszek@users.noreply.github.com"
] |
61475504+rpauszek@users.noreply.github.com
|
e7a491c97fbec62daeb6c02fbf47d0ade5a13322
|
9fb90dc8be1b88b3904d863880b35041724cca9b
|
/src/contrib/multiply.py
|
9dd28fdcdda22d67a8f009e52922d83804726d55
|
[
"Apache-2.0"
] |
permissive
|
pombredanne/hub-1
|
3bf6ea03b11b5c61185ba06b5a58eb203a3d95f3
|
ba52c85c1cd2974bd4199bf51ce0678c5eff44e1
|
refs/heads/master
| 2020-12-14T06:17:03.443350
| 2013-04-10T20:40:35
| 2013-04-10T20:40:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 100
|
py
|
#!/usr/bin/env python
from api import task
@task
def multiply(arg1, arg2):
return arg1 * arg2
|
[
"kris@automationlogic.com"
] |
kris@automationlogic.com
|
2965132d2280007b2cb37deb496ec32835affabc
|
e510582d3c00442f80d2872d369e16032c7b629f
|
/5-19(Reward).py
|
239dc412ef824052d486754549f01e08426d1b78
|
[] |
no_license
|
JangJur/Python-Practice
|
044885b5c50d4fe9a4a7b2c05326bef2faa4bba2
|
3adda0f4a6bc6f730fbb89baa22ad3d8f706dde3
|
refs/heads/master
| 2020-04-26T12:38:26.640557
| 2019-03-07T07:11:58
| 2019-03-07T07:11:58
| 173,434,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
p_year = 1988
r_year = 2016
b_money = 50000000
a_money = 1100000000
rate = 0.12
i = 1
while i <= (r_year - p_year):
b_money *= (1 + rate)
i += 1
if b_money > a_money:
print("%d원 차이로 동일 아저씨의 말씀이 맞습니다." % (b_money - a_money))
elif b_money < a_money:
print("%d원 차이로 미란 아주머니의 말씀이 맞습니다." % (a_money - b_money))
else:
print("두분 다 말씀이 맞습니다.")
|
[
"wkdwndud753@naver.com"
] |
wkdwndud753@naver.com
|
58596698f6381143d4fb47cfddea1dde38e4dc79
|
d0f28dccc15804d4426bdb79c964fbc69206ec21
|
/utils/pandas_test.py
|
72e02ae50ce149d0dcce7a1e69b42df9f99ac909
|
[] |
no_license
|
INKWWW/python_project
|
f4170b435a2d08fa931c1ebff4b45bfa172181a6
|
0e4647d09ec4a7a5ea4d14863c2628402b3830d4
|
refs/heads/master
| 2020-04-02T08:29:54.970819
| 2019-04-13T07:52:54
| 2019-04-13T07:52:54
| 154,247,147
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,366
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''pandas practice'''
import pdb
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
# s = pd.Series(list('abca'))
# print(s)
# print(pd.get_dummies(s))
# pdb.set_trace()
df = pd.DataFrame(np.arange(16).reshape((4,4)),index=['a','b','c','d'],columns=['one','two','three','four'])
print(df)
print('-----------------')
# print(df.loc[:, 'one':'three'])
### 取列
# print(df.four) # 等同于:print(df.loc[:,['three', 'four']]) 等同于:print(df['four'])
# print(df.loc[:,['three', 'four']])
# print(df.loc[:,'four'])
# print(df['four'])
### 取一个值
# print(df.loc['a','one'])
### 测试独热编码
# add a new column
df['color'] = ['red', 'green', 'yellow', 'blue']
df['cate'] = ['1', '2', '3' ,'4']
print(df)
# df['color'] = LabelEncoder().fit_transform(df['color'])
# print(df)
# # print(type(df_label))
# df['color'] = OneHotEncoder().fit_transform(df['color'].values.reshape(-1,1)).toarray()
# print(df['color'])
# print(df)
print(df.iloc[:, 2].dtype)
# df_dummy = pd.get_dummies(df, columns=['color'])
df_dummy = pd.get_dummies(df)
print(df_dummy)
df_dummy['new'] = 1
print(df_dummy)
# print(df_dummy[0:2]) # 取第一行和第二行
# print(df_dummy[:2]) # 取第一行和第二行
# print(df_dummy.iloc[:,0:2])
|
[
"whmink@foxmail.com"
] |
whmink@foxmail.com
|
ad9a3b50ae05c454484d9697933ee5e00f730b4a
|
5dd7c4ec44b76180040badc67849ad44f81690f9
|
/unittests/test_stockitem.py
|
751eb41a7c209613f1a6e803ac526f15a85a3c77
|
[] |
no_license
|
myluco/Phoenix
|
68f9abe15a673fe56da6ef4375849ba6a642622d
|
2de746beda35b8b5db547658cae1c65cfe164039
|
refs/heads/master
| 2021-01-18T15:59:05.001240
| 2016-12-04T00:08:36
| 2016-12-04T00:08:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
import unittest
from unittests import wtc
import wx
#---------------------------------------------------------------------------
class stockitem_Tests(wtc.WidgetTestCase):
# TODO: Remove this test and add real ones.
def test_stockitem1(self):
self.fail("Unit tests for stockitem not implemented yet.")
#---------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
[
"robin@alldunn.com"
] |
robin@alldunn.com
|
00c9949db590246f66d2bb3310ffbfe39a1fee79
|
9b24eb3a15e9acd4aaf7af00d88488f5a056438f
|
/backend/home/api/v1/viewsets.py
|
c7c28c17f806e899fca335a7c524c6cb75b776a2
|
[] |
no_license
|
crowdbotics-apps/dashboard-app-18025
|
b8fb28008d42371c7d74102b78ae380725b3221a
|
202f33b00e14f65adfc9dbf84f748ad5cc051652
|
refs/heads/master
| 2022-11-15T12:16:12.733390
| 2020-06-15T17:24:52
| 2020-06-15T17:24:52
| 271,619,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,485
|
py
|
from rest_framework import viewsets
from rest_framework import authentication
from .serializers import (
AddressSerializer,
CustomTextSerializer,
HomePageSerializer,
XYSerializer,
)
from rest_framework.authentication import SessionAuthentication, TokenAuthentication
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.permissions import IsAdminUser
from rest_framework.viewsets import ModelViewSet, ViewSet
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from home.api.v1.serializers import (
SignupSerializer,
CustomTextSerializer,
HomePageSerializer,
UserSerializer,
)
from home.models import Address, CustomText, HomePage, XY
class SignupViewSet(ModelViewSet):
serializer_class = SignupSerializer
http_method_names = ["post"]
class LoginViewSet(ViewSet):
"""Based on rest_framework.authtoken.views.ObtainAuthToken"""
serializer_class = AuthTokenSerializer
def create(self, request):
serializer = self.serializer_class(
data=request.data, context={"request": request}
)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data["user"]
token, created = Token.objects.get_or_create(user=user)
user_serializer = UserSerializer(user)
return Response({"token": token.key, "user": user_serializer.data})
class CustomTextViewSet(ModelViewSet):
serializer_class = CustomTextSerializer
queryset = CustomText.objects.all()
authentication_classes = (SessionAuthentication, TokenAuthentication)
permission_classes = [IsAdminUser]
http_method_names = ["get", "put", "patch"]
class HomePageViewSet(ModelViewSet):
serializer_class = HomePageSerializer
queryset = HomePage.objects.all()
authentication_classes = (SessionAuthentication, TokenAuthentication)
permission_classes = [IsAdminUser]
http_method_names = ["get", "put", "patch"]
class XYViewSet(viewsets.ModelViewSet):
serializer_class = XYSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = XY.objects.all()
class AddressViewSet(viewsets.ModelViewSet):
serializer_class = AddressSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Address.objects.all()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
fbf27bd442478db50ae76d53ff9eeda876dab945
|
35150c23e611588b506851d7bb33049682e667f2
|
/hotelapp/dao.py
|
0fce97739b963a380a00e047225a25e2bcc68375
|
[] |
no_license
|
hienhuynhxuan/HotelManager_KO
|
4fd9dda2dc21a6d7254884b215af02c12af3d2c2
|
f0f362be1ae91462f987a86f5dff4c39f401dd80
|
refs/heads/main
| 2023-01-22T04:47:07.132558
| 2020-11-19T02:48:01
| 2020-11-19T02:48:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
from sqlalchemy import extract
from hotelapp.models import *
def read_room_info(name=None, kind_of_room_id=None, status=None, amount=None):
room = Room.query.all()
kind = str(kind_of_room_id)
if name:
room = filter(lambda tt: tt.name == name, room)
if kind_of_room_id:
room = list(filter(lambda tt: tt.KindOfRoom.name == kind, room))
if status:
room = filter(lambda tt: tt.status.value == status, room)
return room
|
[
"hien.hx133@gmail.com"
] |
hien.hx133@gmail.com
|
b804ac79cb9db91690da307eee3873b42d5a9662
|
f4830205e1ee5d2adfa1ed3be62deb89413ca34f
|
/datastructures/tuples/tuple-sort.py
|
a499ff8c341a1162819eef519abd7de6e72b8065
|
[] |
no_license
|
sarathcakurathi/pyscripts
|
362e53a943fa9ff92bac096dea232fc27eed09d1
|
93f8680fdb5f5ad9b688dd58fb5263132df770f7
|
refs/heads/master
| 2020-07-12T00:25:29.978478
| 2019-08-27T13:12:43
| 2019-08-27T13:12:43
| 204,674,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
#!/bin/python
# Sort list of tuples using second element in tuple
input_list = [(5,6), (4,2,3), (1,4)]
input_list.sort(key = lambda x: x[1])
print(input_list)
|
[
"sarath.c.akurathi@gmail.com"
] |
sarath.c.akurathi@gmail.com
|
95e2a602cdea202da5cba6e81d040adac387cb68
|
ea3048858939a8162f82a1d0b0ec43171530ea8d
|
/apps/search/models.py
|
62ec89db3a73a35a853d885c234a3453ffbb6a68
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
kknet/NewsBlur
|
d229b12c39f7ca3eab1e28922171f87ea37b8df1
|
fa78b434f980d2814dd05fedb70d9e87259ee998
|
refs/heads/master
| 2021-01-17T22:36:29.651729
| 2016-09-20T20:05:25
| 2016-09-20T20:05:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,270
|
py
|
import re
import time
import datetime
import pymongo
import pyes
import redis
import celery
import mongoengine as mongo
from django.conf import settings
from django.contrib.auth.models import User
from apps.search.tasks import IndexSubscriptionsForSearch
from apps.search.tasks import IndexSubscriptionsChunkForSearch
from apps.search.tasks import IndexFeedsForSearch
from utils import log as logging
from utils.feed_functions import chunks
class MUserSearch(mongo.Document):
'''Search index state of a user's subscriptions.'''
user_id = mongo.IntField(unique=True)
last_search_date = mongo.DateTimeField()
subscriptions_indexed = mongo.BooleanField()
subscriptions_indexing = mongo.BooleanField()
meta = {
'collection': 'user_search',
'indexes': ['user_id'],
'index_drop_dups': True,
'allow_inheritance': False,
}
@classmethod
def get_user(cls, user_id, create=True):
try:
user_search = cls.objects.read_preference(pymongo.ReadPreference.PRIMARY)\
.get(user_id=user_id)
except cls.DoesNotExist:
if create:
user_search = cls.objects.create(user_id=user_id)
else:
user_search = None
return user_search
def touch_search_date(self):
if not self.subscriptions_indexed and not self.subscriptions_indexing:
self.schedule_index_subscriptions_for_search()
self.subscriptions_indexing = True
self.last_search_date = datetime.datetime.now()
self.save()
def schedule_index_subscriptions_for_search(self):
IndexSubscriptionsForSearch.apply_async(kwargs=dict(user_id=self.user_id),
queue='search_indexer_tasker')
# Should be run as a background task
def index_subscriptions_for_search(self):
from apps.rss_feeds.models import Feed
from apps.reader.models import UserSubscription
SearchStory.create_elasticsearch_mapping()
start = time.time()
user = User.objects.get(pk=self.user_id)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(user.username, 'search_index_complete:start')
subscriptions = UserSubscription.objects.filter(user=user).only('feed')
total = subscriptions.count()
feed_ids = []
for sub in subscriptions:
try:
feed_ids.append(sub.feed.pk)
except Feed.DoesNotExist:
continue
feed_id_chunks = [c for c in chunks(feed_ids, 6)]
logging.user(user, "~FCIndexing ~SB%s feeds~SN in %s chunks..." %
(total, len(feed_id_chunks)))
tasks = [IndexSubscriptionsChunkForSearch().s(feed_ids=feed_id_chunk,
user_id=self.user_id
).set(queue='search_indexer')
for feed_id_chunk in feed_id_chunks]
group = celery.group(*tasks)
res = group.apply_async(queue='search_indexer')
res.join_native()
duration = time.time() - start
logging.user(user, "~FCIndexed ~SB%s feeds~SN in ~FM~SB%s~FC~SN sec." %
(total, round(duration, 2)))
r.publish(user.username, 'search_index_complete:done')
self.subscriptions_indexed = True
self.subscriptions_indexing = False
self.save()
def index_subscriptions_chunk_for_search(self, feed_ids):
from apps.rss_feeds.models import Feed
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
user = User.objects.get(pk=self.user_id)
logging.user(user, "~FCIndexing %s feeds..." % len(feed_ids))
for feed_id in feed_ids:
feed = Feed.get_by_id(feed_id)
if not feed: continue
feed.index_stories_for_search()
r.publish(user.username, 'search_index_complete:feeds:%s' %
','.join([str(f) for f in feed_ids]))
@classmethod
def schedule_index_feeds_for_search(cls, feed_ids, user_id):
user_search = cls.get_user(user_id, create=False)
if (not user_search or
not user_search.subscriptions_indexed or
user_search.subscriptions_indexing):
# User hasn't searched before.
return
if not isinstance(feed_ids, list):
feed_ids = [feed_ids]
IndexFeedsForSearch.apply_async(kwargs=dict(feed_ids=feed_ids, user_id=user_id),
queue='search_indexer')
@classmethod
def index_feeds_for_search(cls, feed_ids, user_id):
from apps.rss_feeds.models import Feed
user = User.objects.get(pk=user_id)
logging.user(user, "~SB~FCIndexing %s~FC by request..." % feed_ids)
for feed_id in feed_ids:
feed = Feed.get_by_id(feed_id)
if not feed: continue
feed.index_stories_for_search()
@classmethod
def remove_all(cls, drop_index=False):
# You only need to drop the index if there is data you want to clear.
# A new search server won't need this, as there isn't anything to drop.
if drop_index:
logging.info(" ---> ~FRRemoving stories search index...")
SearchStory.drop()
user_searches = cls.objects.all()
logging.info(" ---> ~SN~FRRemoving ~SB%s~SN user searches..." % user_searches.count())
for user_search in user_searches:
try:
user_search.remove()
except Exception, e:
print " ****> Error on search removal: %s" % e
def remove(self):
from apps.rss_feeds.models import Feed
from apps.reader.models import UserSubscription
user = User.objects.get(pk=self.user_id)
subscriptions = UserSubscription.objects.filter(user=self.user_id)
total = subscriptions.count()
removed = 0
for sub in subscriptions:
try:
feed = sub.feed
except Feed.DoesNotExist:
continue
if not feed.search_indexed:
continue
feed.search_indexed = False
feed.save()
removed += 1
logging.user(user, "~FCRemoved ~SB%s/%s feed's search indexes~SN for ~SB~FB%s~FC~SN." %
(removed, total, user.username))
self.delete()
class SearchStory:
ES = pyes.ES(settings.ELASTICSEARCH_STORY_HOSTS)
name = "stories"
@classmethod
def index_name(cls):
return "%s-index" % cls.name
@classmethod
def type_name(cls):
return "%s-type" % cls.name
@classmethod
def create_elasticsearch_mapping(cls, delete=False):
if delete:
cls.ES.indices.delete_index_if_exists("%s-index" % cls.name)
cls.ES.indices.create_index_if_missing("%s-index" % cls.name)
mapping = {
'title': {
'boost': 3.0,
'index': 'analyzed',
'store': 'no',
'type': 'string',
'analyzer': 'standard',
},
'content': {
'boost': 1.0,
'index': 'analyzed',
'store': 'no',
'type': 'string',
'analyzer': 'simple',
},
'tags': {
'boost': 2.0,
'index': 'analyzed',
'store': 'no',
'type': 'string',
'analyzer': 'standard',
},
'author': {
'boost': 1.0,
'index': 'analyzed',
'store': 'no',
'type': 'string',
'analyzer': 'simple',
},
'feed_id': {
'store': 'no',
'type': 'integer'
},
'date': {
'store': 'no',
'type': 'date',
}
}
cls.ES.indices.put_mapping("%s-type" % cls.name, {
'properties': mapping,
'_source': {'enabled': False},
}, ["%s-index" % cls.name])
@classmethod
def index(cls, story_hash, story_title, story_content, story_tags, story_author, story_feed_id,
story_date):
doc = {
"content" : story_content,
"title" : story_title,
"tags" : ', '.join(story_tags),
"author" : story_author,
"feed_id" : story_feed_id,
"date" : story_date,
}
try:
cls.ES.index(doc, "%s-index" % cls.name, "%s-type" % cls.name, story_hash)
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
@classmethod
def remove(cls, story_hash):
try:
cls.ES.delete("%s-index" % cls.name, "%s-type" % cls.name, story_hash)
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
@classmethod
def drop(cls):
cls.ES.indices.delete_index_if_exists("%s-index" % cls.name)
@classmethod
def query(cls, feed_ids, query, order, offset, limit):
cls.create_elasticsearch_mapping()
cls.ES.indices.refresh()
query = re.sub(r'([^\s\w_\-])+', ' ', query) # Strip non-alphanumeric
sort = "date:desc" if order == "newest" else "date:asc"
string_q = pyes.query.QueryStringQuery(query, default_operator="AND")
feed_q = pyes.query.TermsQuery('feed_id', feed_ids[:1000])
q = pyes.query.BoolQuery(must=[string_q, feed_q])
try:
results = cls.ES.search(q, indices=cls.index_name(), doc_types=[cls.type_name()],
partial_fields={}, sort=sort, start=offset, size=limit)
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
return []
logging.info(" ---> ~FG~SNSearch ~FCstories~FG for: ~SB%s~SN (across %s feed%s)" %
(query, len(feed_ids), 's' if len(feed_ids) != 1 else ''))
try:
result_ids = [r.get_id() for r in results]
except pyes.InvalidQuery(), e:
logging.info(" ---> ~FRInvalid search query \"%s\": %s" % (query, e))
return []
return result_ids
class SearchFeed:
_es_client = None
name = "feeds"
@classmethod
def ES(cls):
if cls._es_client is None:
cls._es_client = pyes.ES(settings.ELASTICSEARCH_FEED_HOSTS)
if not cls._es_client.indices.exists_index(cls.index_name()):
cls.create_elasticsearch_mapping()
return cls._es_client
@classmethod
def index_name(cls):
return "%s-index" % cls.name
@classmethod
def type_name(cls):
return "%s-type" % cls.name
@classmethod
def create_elasticsearch_mapping(cls, delete=False):
if delete:
cls.ES().indices.delete_index_if_exists(cls.index_name())
settings = {
"index" : {
"analysis": {
"analyzer": {
"edgengram_analyzer": {
"filter": ["edgengram"],
"tokenizer": "lowercase",
"type": "custom"
},
},
"filter": {
"edgengram": {
"max_gram": "15",
"min_gram": "1",
"type": "edgeNGram"
},
}
}
}
}
cls.ES().indices.create_index_if_missing(cls.index_name(), settings)
mapping = {
"address": {
"analyzer": "edgengram_analyzer",
"store": False,
"term_vector": "with_positions_offsets",
"type": "string"
},
"feed_id": {
"store": True,
"type": "string"
},
"num_subscribers": {
"index": "analyzed",
"store": True,
"type": "long"
},
"title": {
"analyzer": "edgengram_analyzer",
"store": False,
"term_vector": "with_positions_offsets",
"type": "string"
},
"link": {
"analyzer": "edgengram_analyzer",
"store": False,
"term_vector": "with_positions_offsets",
"type": "string"
}
}
cls.ES().indices.put_mapping(cls.type_name(), {
'properties': mapping,
}, [cls.index_name()])
cls.ES().indices.flush()
@classmethod
def index(cls, feed_id, title, address, link, num_subscribers):
doc = {
"feed_id" : feed_id,
"title" : title,
"address" : address,
"link" : link,
"num_subscribers" : num_subscribers,
}
try:
cls.ES().index(doc, cls.index_name(), cls.type_name(), feed_id)
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
@classmethod
def query(cls, text, max_subscribers=5):
try:
cls.ES().default_indices = cls.index_name()
cls.ES().indices.refresh()
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
return []
if settings.DEBUG:
max_subscribers = 1
logging.info("~FGSearch ~FCfeeds~FG: ~SB%s" % text)
q = pyes.query.BoolQuery()
q.add_should(pyes.query.MatchQuery('address', text, analyzer="simple", cutoff_frequency=0.0005, minimum_should_match="75%"))
q.add_should(pyes.query.MatchQuery('link', text, analyzer="simple", cutoff_frequency=0.0005, minimum_should_match="75%"))
q.add_should(pyes.query.MatchQuery('title', text, analyzer="simple", cutoff_frequency=0.0005, minimum_should_match="75%"))
q = pyes.Search(q, min_score=1)
results = cls.ES().search(query=q, size=max_subscribers, doc_types=[cls.type_name()], sort="num_subscribers:desc")
return results
@classmethod
def export_csv(cls):
import djqscsv
qs = Feed.objects.filter(num_subscribers__gte=20).values('id', 'feed_title', 'feed_address', 'feed_link', 'num_subscribers')
csv = djqscsv.render_to_csv_response(qs).content
f = open('feeds.csv', 'w+')
f.write(csv)
f.close()
|
[
"samuel@ofbrooklyn.com"
] |
samuel@ofbrooklyn.com
|
ad937e5bdb44e8c8d3bb32af90bd346163310b48
|
b99ce2b1bc8ac32976cf1762c8df7bc74365e403
|
/models/networks.py
|
c816a0edf200d1544f3b93f4aa93110c3d40a629
|
[
"Apache-2.0"
] |
permissive
|
KoryakovDmitry/TGRNet
|
8a7b2ad34cc2cc481f90bd9f9b0745368cdcc705
|
afef2835a8f3ff0d2f6573dda025e3115e0d3400
|
refs/heads/main
| 2023-08-04T02:29:29.361763
| 2021-10-05T15:03:12
| 2021-10-05T15:03:12
| 413,845,598
| 0
| 0
|
Apache-2.0
| 2021-10-05T14:11:32
| 2021-10-05T14:11:31
| null |
UTF-8
|
Python
| false
| false
| 29,516
|
py
|
import torch
import torch.nn as nn
from torch_geometric.nn import GCNConv
from torch_geometric.data import Data as GraphData
from torch_geometric.data import Batch as GraphBatch
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
from torchvision import models
from torchvision import ops
from torchvision.ops import boxes as box_ops
import numpy as np
import cv2, os
from torchvision.models.detection.generalized_rcnn import GeneralizedRCNN
from torchvision.models.detection.rpn import AnchorGenerator, RPNHead, RegionProposalNetwork
from torchvision.ops import MultiScaleRoIAlign
from torchvision.models.detection.roi_heads import RoIHeads
from torchvision.models.detection.transform import GeneralizedRCNNTransform
from torchvision.models._utils import IntermediateLayerGetter
from torchvision.ops.feature_pyramid_network import FeaturePyramidNetwork
from torch.jit.annotations import Tuple, List, Dict, Optional
from collections import OrderedDict
###############################################################################
# Helper Functions
###############################################################################
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
norm_layer = lambda x: Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.niter> epochs
and linearly decay the rate to zero over the next <opt.niter_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1 and m.affine: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, use_distributed, gpu_id, no_init=False, init_type='normal', init_gain=0.02):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
"""
if use_distributed:
assert(torch.cuda.is_available())
net.to(torch.device('cuda'))
net = torch.nn.parallel.DistributedDataParallel(net, device_ids=[gpu_id])
if not no_init:
init_weights(net, init_type, init_gain=init_gain)
return net
def define_ResNet50(gpu_ids=[]):
net = models.resnet50(pretrained=True)
#net = nn.Sequential(*list(net.children())[:-2])
net = ResNet50(net)
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids)
return net
def resnet_fpn_backbone(backbone_name, pretrained, use_distributed, gpu_id, norm_layer=ops.misc.FrozenBatchNorm2d, trainable_layers=5):
backbone = models.resnet.__dict__[backbone_name](
pretrained=pretrained,
norm_layer=ops.misc.FrozenBatchNorm2d)
# select layers that wont be frozen
assert trainable_layers <= 5 and trainable_layers >= 0
layers_to_train = ['layer4', 'layer3', 'layer2', 'layer1', 'conv1'][:trainable_layers]
# freeze layers
for name, parameter in backbone.named_parameters():
if all([not name.startswith(layer) for layer in layers_to_train]):
parameter.requires_grad_(False)
return_layers = {'layer1': '0', 'layer2': '1', 'layer3': '2', 'layer4': '3'}
#return_layers = {'layer2': '0', 'layer3': '1', 'layer4': '2'}
in_channels_stage2 = backbone.inplanes // 8
in_channels_list = [
in_channels_stage2,
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
]
out_channels = 256
net = BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels)
net = FeatureFusionForFPN(net)
## initalize the FeatureFusion layers
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
init.normal_(m.weight.data, 0.0, 0.02)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
for submodule in net.children():
if submodule.__class__.__name__ != "BackboneWithFPN":
submodule.apply(init_func)
return init_net(net, use_distributed, gpu_id, no_init=True)
def cell_seg_head(use_distributed, gpu_id):
net = Cell_Bbox_Seg()
return init_net(net, use_distributed, gpu_id)
def cell_loc_head(rows_classes, cols_classes, img_h, img_w, alpha, device, use_distributed, gpu_id):
net = Cell_Lloc_Pre(rows_classes, cols_classes, img_h, img_w, alpha, device)
return init_net(net, use_distributed, gpu_id)
##############################################################################
# Classes
##############################################################################
class OrdinalRegressionLoss(nn.Module):
"""
"""
def __init__(self, num_class, gamma=None):
"""
"""
super(OrdinalRegressionLoss, self).__init__()
self.num_class = num_class
self.gamma = torch.as_tensor(gamma, dtype=torch.float32)
def _create_ordinal_label(self, gt):
gamma_i = torch.ones(list(gt.shape)+[self.num_class-1])*self.gamma
gamma_i = gamma_i.to(gt.device)
gamma_i = torch.stack([gamma_i,gamma_i],-1)
ord_c0 = torch.ones(list(gt.shape)+[self.num_class-1]).to(gt.device)
mask = torch.zeros(list(gt.shape)+[self.num_class-1])+torch.linspace(0, self.num_class - 2, self.num_class - 1, requires_grad=False)
mask = mask.contiguous().long().to(gt.device)
mask = (mask >= gt.unsqueeze(len(gt.shape)))
ord_c0[mask] = 0
ord_c1 = 1-ord_c0
ord_label = torch.stack([ord_c0,ord_c1],-1)
return ord_label.long(), gamma_i
def __call__(self, prediction, target):
# original
#ord_label = self._create_ordinal_label(target)
#pred_score = F.log_softmax(prediction,dim=-1)
#entropy = -pred_score * ord_label
#entropy = entropy.view(-1,2,(self.num_class-1)*2)
#loss = torch.sum(entropy, dim=-1).mean()
# using nn.CrossEntropyLoss()
#ord_label = self._create_ordinal_label(target)
#criterion = nn.CrossEntropyLoss().to(ord_label.device)
#loss = criterion(prediction, ord_label)
# add focal
ord_label, gamma_i = self._create_ordinal_label(target)
pred_score = F.softmax(prediction,dim=-1)
pred_logscore = F.log_softmax(prediction,dim=-1)
entropy = -ord_label * torch.pow((1-pred_score), gamma_i) * pred_logscore
entropy = entropy.view(-1,2,(self.num_class-1)*2)
loss = torch.sum(entropy,dim=-1)
return loss.mean()
class BackboneWithFPN(nn.Module):
"""
copy from https://github.com/pytorch/vision/blob/master/torchvision/models/detection/backbone_utils.py
without extra_blocks=LastLevelMaxPool() in FeaturePyramidNetwork
"""
def __init__(self, backbone, return_layers, in_channels_list, out_channels):
super(BackboneWithFPN, self).__init__()
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.fpn = FeaturePyramidNetwork(
in_channels_list=in_channels_list,
out_channels=out_channels,
)
self.out_channels = out_channels
def forward(self, x):
x = self.body(x)
x = self.fpn(x)
return x
class FeatureFusionForFPN(nn.Module):
def __init__(self, backbone):
super(FeatureFusionForFPN, self).__init__()
self.fpn_backbone = backbone
self.layer1_bn_relu = nn.Sequential(
#nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
self.layer2_bn_relu = nn.Sequential(
#nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
self.layer3_bn_relu = nn.Sequential(
#nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
self.layer4_bn_relu = nn.Sequential(
#nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
self.smooth1 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
self.smooth2 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
self.smooth3 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
def _upsample(self, x, y, scale=1):
_, _, H, W = y.size()
#return F.upsample(x, size=(H // scale, W // scale), mode='bilinear')
return nn.functional.interpolate(x, size=(H // scale, W // scale), mode='bilinear', align_corners=False)
def _upsample_add(self, x, y):
_, _, H, W = y.size()
#return F.upsample(x, size=(H, W), mode='bilinear') + y
return nn.functional.interpolate(x, size=(H, W), mode='bilinear', align_corners=False) + y
def forward(self, x):
fpn_outputs = self.fpn_backbone(x)
#print(fpn_outputs['0'].shape,fpn_outputs['1'].shape,fpn_outputs['2'].shape)
# the output of a group of fpn feature:
# [('0', torch.Size([1, 256, 128, 128])),
# ('1', torch.Size([1, 256, 64, 64])),
# ('2', torch.Size([1, 256, 32, 32])),
# ('3', torch.Size([1, 256, 16, 16]))]
layer1 = self.layer1_bn_relu(fpn_outputs['0'])
layer2 = self.layer2_bn_relu(fpn_outputs['1'])
layer3 = self.layer3_bn_relu(fpn_outputs['2'])
layer4 = self.layer4_bn_relu(fpn_outputs['3'])
fusion4_3 = self.smooth1(self._upsample_add(layer4, layer3))
fusion4_2 = self.smooth2(self._upsample_add(fusion4_3, layer2))
fusion4_1 = self.smooth3(self._upsample_add(fusion4_2, layer1))
fusion4_2 = self._upsample(fusion4_2, fusion4_1)
fusion4_3 = self._upsample(fusion4_3, fusion4_1)
layer4 = self._upsample(layer4, fusion4_1)
#fusion4_3 = self._upsample(fusion4_3, fusion4_2)
#layer4 = self._upsample(layer4, fusion4_2)
inter_feat = torch.cat((fusion4_1, fusion4_2, fusion4_3, layer4), 1) # [N, 1024, H, W]
inter_feat = self._upsample(inter_feat, x) # [N, 1024, x_h, x_w]
#inter_feat = torch.cat((fusion4_2, fusion4_3, layer4), 1) # [N, 1024, H, W]
#inter_feat = self._upsample(inter_feat, x) # [N, 1024, x_h, x_w]
return inter_feat
class Cell_Bbox_Seg(nn.Module):
def __init__(self, in_channels = 1024, num_classes=3):
super(Cell_Bbox_Seg, self).__init__()
self.decode_out = nn.Sequential(
nn.Conv2d(in_channels, 256, kernel_size=3, stride=1, padding=1),
#nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
self.row_out = nn.Sequential(
nn.Conv2d(256, 64, kernel_size=(3,1), stride=1, padding=(1,0)),
#nn.BatchNorm2d(64),
nn.LeakyReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=(3,1), stride=1, padding=(1,0)),
#nn.BatchNorm2d(64),
nn.LeakyReLU(inplace=True),
nn.Conv2d(64, num_classes, kernel_size=1, stride=1),
#nn.BatchNorm2d(num_classes),
nn.LeakyReLU(inplace=True)
)
self.col_out = nn.Sequential(
nn.Conv2d(256, 64, kernel_size=(1,3), stride=1, padding=(0,1)),
#nn.BatchNorm2d(64),
nn.LeakyReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=(1,3), stride=1, padding=(0,1)),
#nn.BatchNorm2d(64),
nn.LeakyReLU(inplace=True),
nn.Conv2d(64, num_classes, kernel_size=1, stride=1),
#nn.BatchNorm2d(num_classes),
nn.LeakyReLU(inplace=True)
)
self.twodim_out = nn.Sequential(
nn.Conv2d(256, 64, kernel_size=3, stride=1, padding=1),
#nn.BatchNorm2d(64),
nn.LeakyReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
#nn.BatchNorm2d(64),
nn.LeakyReLU(inplace=True),
nn.Conv2d(64, num_classes, kernel_size=1, stride=1),
#nn.BatchNorm2d(num_classes),
nn.LeakyReLU(inplace=True)
)
self.fusion = nn.Sequential(
nn.Conv2d(num_classes*3, num_classes, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(inplace=True)
)
def postprocess(self, row_pred, col_pred, seg_pred, table_names=None):
#pred_mat = torch.argmax(row_pred,dim=1) * torch.argmax(col_pred,dim=1)
pred_mat = torch.argmax(seg_pred,dim=1)
pred_mat = pred_mat.data.cpu().int().numpy()
pred_mat[np.where(pred_mat>2)] = 2
pred_mask = np.where(pred_mat == 1, 255, 0).astype('uint8')
#self.vis_seg(pred_mask, table_names, '/data/xuewenyuan/dev/tablerec/results/delet_vis')
N, H, W = pred_mask.shape
batch_bboxes = []
for ind in range(N):
contours = cv2.findContours(pred_mask[ind].copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
bboxes = [[ct[:,:,0].min()-2, ct[:,:,1].min()-2, ct[:,:,0].max()+2, ct[:,:,1].max()+2] for ct in contours]
bboxes = torch.as_tensor(bboxes).to(torch.float32)
batch_bboxes.append(bboxes)
return batch_bboxes
def vis_seg(self, label_mat, table_names, vis_path):
if not os.path.exists(vis_path):
os.makedirs(vis_path)
batch_size = len(table_names)
for ind in range(batch_size):
vis_mat = np.zeros((label_mat[ind].shape[0],label_mat[ind].shape[1],3),dtype=np.int32)
vis_mat[np.where(label_mat[ind] == 0)] = np.array([255,0,0],dtype=np.int32)
vis_mat[np.where(label_mat[ind] == 1)] = np.array([0,255,0],dtype=np.int32)
vis_mat[np.where(label_mat[ind] == 2)] = np.array([0,0,255],dtype=np.int32)
cv2.imwrite(os.path.join(vis_path,table_names[ind]+'_pred.png'), vis_mat.astype('uint8'))
def forward(self, input):
decode_feat = self.decode_out(input)
#decode_feat = nn.functional.interpolate(decode_feat, size=(src_img_shape[2], src_img_shape[3]), mode='bilinear', align_corners=False)
seg_pred = self.twodim_out(decode_feat)
row_pred = self.row_out(torch.mean(decode_feat, 3, True))
col_pred = self.col_out(torch.mean(decode_feat, 2, True))
row_expand = torch.repeat_interleave(row_pred, input.shape[3], dim = 3)
col_expand = torch.repeat_interleave(col_pred, input.shape[2], dim = 2)
seg_pred = self.fusion(torch.cat((seg_pred,row_expand,col_expand),1))
#det_bboxes = self.postprocess(row_pred, col_pred, None)
det_bboxes = self.postprocess(None, None, seg_pred)
return row_pred, col_pred, seg_pred, det_bboxes
class Cell_Lloc_Pre(nn.Module):
def __init__(self, rows_classes, cols_classes, img_h, img_w, alpha, device,
in_channels = 1024, cnn_emb_feat = 512, box_emb_feat = 256, gcn_out_feat = 512,
cell_iou_thresh = 0.5, min_cells_percent = 1.0):
super(Cell_Lloc_Pre, self).__init__()
self.cell_iou_thresh = cell_iou_thresh
self.min_cells_percent = min_cells_percent
self.img_h = img_h
self.img_w = img_w
self.device = device
self.rows_classes = rows_classes
self.cols_classes = cols_classes
self.alpha = alpha
self.decode_out = nn.Sequential(
nn.Conv2d(in_channels, 256, kernel_size=3, stride=1, padding=1),
#nn.BatchNorm2d(256,affine=False),
nn.ReLU(inplace=True)
)
self.cnn_emb = nn.Sequential(
nn.Linear(256*2*2, cnn_emb_feat),
#nn.BatchNorm1d(cnn_emb_feat,affine=False),
nn.ReLU(inplace=True)
)
self.box_emb = nn.Sequential(
nn.Linear(4, box_emb_feat),
#nn.BatchNorm1d(box_emb_feat,affine=False),
nn.ReLU(inplace=True)
)
self.gconv_row = GCNConv(cnn_emb_feat+box_emb_feat, gcn_out_feat)
self.gconv_col = GCNConv(cnn_emb_feat+box_emb_feat, gcn_out_feat)
self.row_cls = nn.Sequential(
nn.Linear(gcn_out_feat, 2*(rows_classes-1)*2),
#nn.BatchNorm1d(2*(rows_classes-1)*2,affine=False),
nn.LeakyReLU(inplace=True)
)
self.col_cls = nn.Sequential(
nn.Linear(gcn_out_feat, 2*(cols_classes-1)*2),
#nn.BatchNorm1d(2*(cols_classes-1)*2,affine=False),
nn.LeakyReLU(inplace=True)
)
def get_box_feat(self, cell_boxes):
# roi_bboxes: List(Tensor(x1,y1,x2,y2))
# image_shapes: [N,C,H,W]
boxes = torch.cat(cell_boxes, dim=0)
box_w = boxes[:,2]-boxes[:,0]
box_h = boxes[:,3]-boxes[:,1]
ctr_x = (boxes[:,2]+boxes[:,0])/2
ctr_y = (boxes[:,3]+boxes[:,1])/2
#rel_x = torch.log(ctr_x/self.img_w)
#rel_y = torch.log(ctr_y/self.img_h)
#rel_w = torch.log(box_w/self.img_w)
#rel_h = torch.log(box_h/self.img_h)
rel_x = ctr_x/self.img_w
rel_y = ctr_y/self.img_h
rel_w = box_w/self.img_w
rel_h = box_h/self.img_h
boxes_feat = torch.stack((rel_x,rel_y,rel_w,rel_h),dim=1)
return boxes_feat
def edge_weight(self, edge_ind, cell_boxes, im_scale, pdl, pdt):
assert cell_boxes.size(1) == 4
assert edge_ind.size(1) == 2
org_box = (cell_boxes - torch.stack((pdl,pdt)*2))/im_scale
centr_x1 = (org_box[edge_ind[:,0],0] + org_box[edge_ind[:,0],2]) / 2
centr_y1 = (org_box[edge_ind[:,0],1] + org_box[edge_ind[:,0],3]) / 2
centr_x2 = (org_box[edge_ind[:,1],0] + org_box[edge_ind[:,1],2]) / 2
centr_y2 = (org_box[edge_ind[:,1],1] + org_box[edge_ind[:,1],3]) / 2
tb_w = org_box[:,[0,2]].max()
tb_h = org_box[:,[1,3]].max()
row_attr = torch.exp(-(torch.square((centr_y1-centr_y2)*self.alpha/tb_h)))
col_attr = torch.exp(-(torch.square((centr_x1-centr_x2)*self.alpha/tb_w)))
return row_attr, col_attr
def build_graph(self, cell_boxes, im_scales, pdls, pdts):
#device = roi_bboxes[0].device
num_images = len(cell_boxes)
graphs = []
for img_id in range(num_images):
edge_ind = []
num_nodes = cell_boxes[img_id].shape[0]
for n1 in range(num_nodes):
for n2 in range(num_nodes):
if n1 == n2: continue
edge_ind.append([n1,n2])
edge_ind = torch.as_tensor(edge_ind, dtype=torch.int64)
#print(edge_ind.t())
#edge_attr = self.edge_weight(edge_ind,cell_boxes[img_id], im_scales[img_id], pdls[img_id], pdts[img_id])
row_attr, col_attr = self.edge_weight(edge_ind,cell_boxes[img_id], im_scales[img_id], pdls[img_id], pdts[img_id])
#row_attr, col_attr, row_edge, col_edge = self.edge_weight(edge_ind,cell_boxes[img_id], im_scales[img_id], pdls[img_id], pdts[img_id])
tb_graph = GraphData(edge_index=edge_ind.t(), num_nodes = num_nodes)
tb_graph.row_attr = row_attr
tb_graph.col_attr = col_attr
#tb_graph.row_edge = row_edge.t()
#tb_graph.col_edge = col_edge.t()
graphs.append(tb_graph)
graphs = GraphBatch.from_data_list(graphs).to(self.device)
#print('graph')
#print(graphs.edge_index, graphs.edge_attr)
return graphs
def filter_box(self, pred_boxes, gt_boxes):
batch_size = len(gt_boxes)
train_boxes = []
train_inds = []
count = 0
for b_ind in range(batch_size):
if pred_boxes[b_ind].size(0) != 0:
match_quality_matrix = box_ops.box_iou(pred_boxes[b_ind], gt_boxes[b_ind])
# find best pred candidate for each gt
matched_val, matched_ind = match_quality_matrix.max(dim=0)
rm_gts = torch.where(matched_val>self.cell_iou_thresh)[0]
else:
rm_gts = torch.Tensor([])
res_ind = torch.as_tensor([ i for i in range(gt_boxes[b_ind].size(0)) if (i not in rm_gts)], dtype=torch.int32)
#res_gt_boxes = gt_boxes[b_ind][res_ind]
num_preserved = ((torch.rand((1,))+self.min_cells_percent*10)/10*gt_boxes[b_ind].shape[0]).to(torch.int32) # [0.9 ~ 1)
num_preserved = max(num_preserved - rm_gts.shape[0], 0)
preserved_ind = torch.randperm(len(res_ind))[:num_preserved]
#pred_ind = matches[rm_gts]
#select_boxes = torch.cat((res_gt_boxes[preserved_ind],pred_boxes[b_ind][pred_ind]), dim=0)
#train_boxes.append(select_boxes)
boxes = []
for box_i in range(gt_boxes[b_ind].size(0)):
if box_i in res_ind[preserved_ind]:
boxes.append(gt_boxes[b_ind][box_i])
train_inds.append(count+box_i)
elif box_i in rm_gts:
pred_ind = matched_ind[box_i]
boxes.append(pred_boxes[b_ind][pred_ind])
train_inds.append(count+box_i)
train_boxes.append(torch.stack(boxes,dim=0))
count += gt_boxes[b_ind].size(0)
return train_boxes, train_inds
def forward(self, input, pred_cell_boxes, im_scales, pdls, pdts, gt_cell_boxes=None):
train_inds = None
if (gt_cell_boxes is not None) and (pred_cell_boxes is not None):
cell_boxes, train_inds = self.filter_box(pred_cell_boxes, gt_cell_boxes)
elif (pred_cell_boxes is None) and (gt_cell_boxes is not None):
cell_boxes = []
for img_boxes in gt_cell_boxes:
num_node = img_boxes.size(0)
if num_node < 2:
cell_boxes.append(torch.cat((img_boxes, torch.as_tensor([[0,0,0,0]]*(2-num_node)).to(torch.float32).to(img_boxes.device)),0))
else:
cell_boxes.append(img_boxes)
elif (gt_cell_boxes is None) and (pred_cell_boxes is not None):
cell_boxes = []
for img_boxes in pred_cell_boxes:
num_node = img_boxes.size(0)
if num_node < 2:
cell_boxes.append(torch.cat((img_boxes, torch.as_tensor([[0,0,0,0]]*(2-num_node)).to(torch.float32).to(img_boxes.device)),0))
else:
cell_boxes.append(img_boxes)
box_feat = self.get_box_feat(cell_boxes)
box_feat = self.box_emb(box_feat).to(self.device)
decode_feat = self.decode_out(input)
bbox_count = [i.shape[0] for i in cell_boxes]
cnn_feat = ops.roi_align(decode_feat, cell_boxes, 2) #[num_node, 256, 2, 2]
cnn_feat = self.cnn_emb(cnn_feat.view(cnn_feat.size(0), -1))
graphs = self.build_graph(cell_boxes, im_scales, pdls, pdts)
fusion_feat = torch.cat([box_feat, cnn_feat], dim=1)
row_feat = self.gconv_row(fusion_feat, graphs.edge_index, graphs.row_attr)
row_feat = F.relu(row_feat)
col_feat = self.gconv_col(fusion_feat, graphs.edge_index, graphs.col_attr)
col_feat = F.relu(col_feat)
cls_row_score = self.row_cls(row_feat)
cls_col_score = self.col_cls(col_feat)
#cls_row_score = torch.reshape(cls_row_score, (cls_row_score.size(0), self.rows_classes, 2))
#cls_col_score = torch.reshape(cls_col_score, (cls_col_score.size(0), self.cols_classes, 2))
cls_row_score = torch.reshape(cls_row_score, (cls_row_score.size(0), 2, self.rows_classes-1, 2))
cls_col_score = torch.reshape(cls_col_score, (cls_col_score.size(0), 2, self.cols_classes-1, 2))
return cls_row_score, cls_col_score, train_inds
|
[
"15120452@bjtu.edu"
] |
15120452@bjtu.edu
|
a1d4abecf6810db269fd34712f27f857a5b34edc
|
7f57aa11f807c1d440b46c94bb4b493a1540ce55
|
/SIIC/usuarios/urls.py
|
16968d1a51ba2bd1444fb173db896c6bd0f0f306
|
[] |
no_license
|
Aletrip-dev/OPE2
|
0a498a266bd78d16a0568686004023d72e57c506
|
9ed21a69c30d6c376a6a0c05ff0586f2a747aacf
|
refs/heads/master
| 2023-04-20T10:44:58.204839
| 2021-05-15T20:44:42
| 2021-05-15T20:44:42
| 343,137,702
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
from django.urls import path
# MÓDUDO PARA AUTENTICAÇÃO DE USUÁRIOS
# (passar um alias para não conflitar com a views da aplicação)
from django.contrib.auth import views as auth_views
from .views import UsuarioCreate, PerfilUpdate, alterar_senha
urlpatterns = [
path('login/', auth_views.LoginView.as_view(
template_name='usuarios/login.html'
), name='login'),
path('logout/', auth_views.LogoutView.as_view(), name='logout'),
path('registrar/', UsuarioCreate.as_view(), name='registrar'),
path('atualizar-dados/', PerfilUpdate.as_view(), name='atualizar-dados'),
path('alterar-senha/', alterar_senha, name='alterar-senha'),
]
|
[
"aletrip@msn.com"
] |
aletrip@msn.com
|
e1c496b08d51cf84a89f1bf972424ec6ed19e4fc
|
371ec3b00088f54ef703899048c308ff81e7b78b
|
/python基础-06/08-数据读取.py
|
dbcbfe52ce9a33bd2e44bebd13cb963de1b41121
|
[] |
no_license
|
zhangninggit/pythonjichu
|
d1fd428688d05e1f34f565e0bfd530d33f65e7cb
|
954c935dfcdc8674d488cfa599934bf2e2be79c2
|
refs/heads/master
| 2020-04-04T05:19:36.417438
| 2018-11-02T05:16:43
| 2018-11-02T05:16:43
| 155,733,621
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 65
|
py
|
f = open("xxx.txt","r")
co = f.read()
print(co)
f.close()
|
[
"zhangning2017@163.com"
] |
zhangning2017@163.com
|
1d0f92dbe95dbd0d77497c536f85a8ee3cac119f
|
7504e3c8400986b8d72227a9ae6d084a2eb9de09
|
/acs/cli.py
|
a8ed40bfa7fe927c66f9bba4e722cfba7b0100ea
|
[
"Apache-2.0"
] |
permissive
|
keikhara/acs-cli
|
848b288e009591650e726fb7784e3acff5d0fc49
|
b0d53679f7642c655bfb84ea3e7918b16af7dc25
|
refs/heads/master
| 2020-12-11T07:18:57.236131
| 2016-07-26T05:59:26
| 2016-07-26T05:59:26
| 65,325,109
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,368
|
py
|
"""
acs
Usage:
acs [--config-file=<file>] [--version] [--help] <command> [<args>...]
Options:
-h --help Show this help.
Commands:
service Create and manage Azure Container Service
docker Send docker commands to the cluster
afs Add the Azure Files Docker volume driver to each agent
oms Add or configure Operational Management Suite monitoring
See `acs <command> --help` for information on a specific command.
Help:
For help using this tool please open an issue on the GitHub repository:
https://github.com/rgardler/acs-scripts
"""
from . import __version__ as VERSION
from acs.commands.base import Config
from docopt import docopt
from inspect import getmembers, isclass
import os.path
import sys
def main():
"""Main CLI entrypoint"""
from . import commands
args = docopt(__doc__, version=VERSION, options_first=True)
config = Config(args['--config-file'])
command_name = args["<command>"]
argv = args['<args>']
module = getattr(commands, command_name)
commands = getmembers(module, isclass)
command_class = None
command = None
for k, command_class in commands:
if command_name.lower() in command_class.__name__.lower():
command = command_class(config, argv)
if command is None:
raise Exception("Unrecognized command: " + command_name)
command.run()
|
[
"ross@gardler.org"
] |
ross@gardler.org
|
0f14427239d695935ab94dbda667716278b75906
|
4b3e7fba33d7f93b229d676df9e2c52307e884a1
|
/velruse/views/facebook.py
|
94e927f03518a57de014eca52e63d1dee2722b54
|
[
"MIT"
] |
permissive
|
stoiczek/velruse
|
5b5e82e098850a0dc99d1d04f201051178f58769
|
79f219cfd75423cf16b2c327abec303d0edea547
|
refs/heads/master
| 2020-12-25T16:13:48.048229
| 2011-11-09T11:49:12
| 2011-11-09T11:49:12
| 2,372,811
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,074
|
py
|
"""Facebook Authentication Views"""
import uuid
from urlparse import parse_qs
from pyramid.httpexceptions import HTTPFound
from simplejson import loads
import requests
from velruse.exceptions import AuthenticationComplete
from velruse.exceptions import AuthenticationDenied
from velruse.exceptions import CSRFError
from velruse.exceptions import ThirdPartyFailure
from velruse.parsers import extract_fb_data
from velruse.utils import flat_url
def includeme(config):
config.add_route("facebook_login", "/facebook/login")
config.add_route("facebook_process", "/facebook/process")
config.add_view(facebook_login, route_name="facebook_login")
config.add_view(facebook_process, route_name="facebook_process")
def facebook_login(request):
"""Initiate a facebook login"""
config = request.registry.settings
scope = config.get('velruse.facebook.scope',
request.POST.get('scope', ''))
request.session['state'] = state = uuid.uuid4().hex
fb_url = flat_url('https://www.facebook.com/dialog/oauth/', scope=scope,
client_id=config['velruse.facebook.app_id'],
redirect_uri=request.route_url('facebook_process'),
state=state)
return HTTPFound(location=fb_url)
def facebook_process(request):
"""Process the facebook redirect"""
if request.GET.get('state') != request.session.get('state'):
raise CSRFError("CSRF Validation check failed. Request state %s is "
"not the same as session state %s" % (
request.GET.get('state'), request.session.get('state')
))
config = request.registry.settings
code = request.GET.get('code')
if not code:
reason = request.GET.get('error_reason', 'No reason provided.')
raise AuthenticationDenied(reason)
# Now retrieve the access token with the code
access_url = flat_url('https://graph.facebook.com/oauth/access_token',
client_id=config['velruse.facebook.app_id'],
client_secret=config['velruse.facebook.app_secret'],
redirect_uri=request.route_url('facebook_process'),
code=code)
r = requests.get(access_url)
if r.status_code != 200:
raise ThirdPartyFailure("Status %s: %s" % (r.status_code, r.content))
access_token = parse_qs(r.content)['access_token'][0]
# Retrieve profile data
graph_url = flat_url('https://graph.facebook.com/me',
access_token=access_token)
r = requests.get(graph_url)
if r.status_code != 200:
raise ThirdPartyFailure("Status %s: %s" % (r.status_code, r.content))
fb_profile = loads(r.content)
profile = extract_fb_data(fb_profile)
# Create and raise our AuthenticationComplete exception with the
# appropriate data to be passed
complete = AuthenticationComplete()
complete.profile = profile
complete.credentials = { 'oauthAccessToken': access_token }
raise complete
|
[
"ben@groovie.org"
] |
ben@groovie.org
|
6c10278bce7d441831f59503418233abcba5dee8
|
17c14b758959cdceec0dce8f783346fdeee8e111
|
/chap05_nlp/automl/train.py
|
bca8b1fd41ce03b243523430bdc8d09621f7daa4
|
[] |
no_license
|
yurimkoo/tensormsa_jupyter
|
b0a340119339936d347d12fbd88fb017599a0029
|
0e75784114ec6dc8ee7eff8094aef9cf37131a5c
|
refs/heads/master
| 2021-07-18T12:22:31.396433
| 2017-10-25T01:42:24
| 2017-10-25T01:42:24
| 109,469,220
| 1
| 0
| null | 2017-11-04T05:20:15
| 2017-11-04T05:20:15
| null |
UTF-8
|
Python
| false
| false
| 3,650
|
py
|
"""
Utility used by the Network class to actually train.
Based on:
https://github.com/fchollet/keras/blob/master/examples/mnist_mlp.py
"""
from keras.datasets import mnist, cifar10
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping
# Helper: Early stopping.
early_stopper = EarlyStopping(patience=5)
def get_cifar10():
"""Retrieve the CIFAR dataset and process the data."""
# Set defaults.
nb_classes = 10
batch_size = 64
input_shape = (3072,)
# Get the data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.reshape(50000, 3072)
x_test = x_test.reshape(10000, 3072)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test)
def get_mnist():
"""Retrieve the MNIST dataset and process the data."""
# Set defaults.
nb_classes = 10
batch_size = 128
input_shape = (784,)
# Get the data.
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test)
def compile_model(network, nb_classes, input_shape):
"""Compile a sequential model.
Args:
network (dict): the parameters of the network
Returns:
a compiled network.
"""
# Get our network parameters.
nb_layers = network['nb_layers']
nb_neurons = network['nb_neurons']
activation = network['activation']
optimizer = network['optimizer']
model = Sequential()
# Add each layer.
for i in range(nb_layers):
# Need input shape for first layer.
if i == 0:
model.add(Dense(nb_neurons, activation=activation, input_shape=input_shape))
else:
model.add(Dense(nb_neurons, activation=activation))
model.add(Dropout(0.2)) # hard-coded dropout
# Output layer.
model.add(Dense(nb_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
return model
def train_and_score(network, dataset):
"""Train the model, return test loss.
Args:
network (dict): the parameters of the network
dataset (str): Dataset to use for training/evaluating
"""
if dataset == 'cifar10':
nb_classes, batch_size, input_shape, x_train, \
x_test, y_train, y_test = get_cifar10()
elif dataset == 'mnist':
nb_classes, batch_size, input_shape, x_train, \
x_test, y_train, y_test = get_mnist()
model = compile_model(network, nb_classes, input_shape)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=10000, # using early stopping, so no real limit
verbose=0,
validation_data=(x_test, y_test),
callbacks=[early_stopper])
score = model.evaluate(x_test, y_test, verbose=0)
return score[1] # 1 is accuracy. 0 is loss.
|
[
"tmddno1@naver.com"
] |
tmddno1@naver.com
|
2534efd7cf1a472d4c24db7e37fb628ef53a3a0f
|
9adda6cef38c05c0d6bc4f5d0be25e75500f3406
|
/ques 2 sol.py
|
00f2329450eb86ff204e44c7f8653fbee1abdcff
|
[] |
no_license
|
GLAU-TND/python-programming-assignment4-upadhyay8844
|
09255dd1ef340f7af3ee57e4eee3c671c010d5c4
|
bc5c31d40f03cceebb2c842bdd933e0e73a998a1
|
refs/heads/master
| 2021-05-19T05:26:14.857261
| 2020-04-01T11:43:27
| 2020-04-01T11:43:27
| 251,547,215
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
def is_dict(var):
return str(type(var)) == "<class 'dict'>"
def flatten_helper(d, flat_d, path):
if not is_dict(d):
flat_d[path] = d
return
for key in d:
new_keypath = "{}.{}".format(path, key) if path else key
flatten_helper(d[key], flat_d, new_keypath)
def flatten(d):
flat_d = dict()
flatten_helper(d, flat_d, "")
return flat_d
|
[
"noreply@github.com"
] |
noreply@github.com
|
76795df80dc41e3af67b42a73904e5675010ac9a
|
ccb8b706ca940c86ce96a212f01797586db90158
|
/run
|
67464cba3cc4dd0c42fe1537dee80d4a70807432
|
[] |
no_license
|
ChrisWaites/theorem-prover
|
35e4733309bb2d5fc814a7634a991868712d7940
|
ae7f3fd474c81531dd1d77ebbae2c7ff353e029b
|
refs/heads/master
| 2021-09-10T09:09:50.263916
| 2018-03-23T09:35:26
| 2018-03-23T09:35:26
| 50,534,791
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 980
|
#!/usr/bin/env python
import argparse
from theoremprover.theorem_search import find_theorem, peanos_axioms
from theoremprover.expression import parse
def main(args):
axioms = set(map(parse, args.axioms))
theorem = parse(args.theorem)
path = find_theorem(theorem, axioms)
print(path[0])
for t in path[1:]:
print("->")
print(t)
def parse_args():
parser = argparse.ArgumentParser(description='Given a set of axioms attempts to prove or disprove a given theorem using propositional logic and number theory.')
parser.add_argument('-a', '--axioms', type=str, nargs='+', default=set(map(str, peanos_axioms)), help='axioms of formal system [default: peano\'s axioms]')
parser.add_argument('-t', '--theorem', type=str, default="~(Ea((0)=((a)+(1))))", help='theorem to be proved or disproved [default: ~(Ea((0)=((a)+(1))))]')
return parser.parse_args()
if __name__ == "__main__":
main(parse_args())
|
[
"cwaites3@gatech.edu"
] |
cwaites3@gatech.edu
|
|
a07369e5917c7c67c2f793f80ed4d1023bafb477
|
0b12a3aab3b06a9ff007eaf6daaca0a696be84c1
|
/phase-0/classify.py
|
cdecf02a2e4252e324b6225b05f950a25d64ba8c
|
[] |
no_license
|
fariszahrah/crypto-twitter
|
1cfdaa7db8f14d9a5280d68280690c7aa70162c3
|
2eef22a5e1d71d89d0f1e5f7d344cbb7c929b91d
|
refs/heads/master
| 2021-01-08T23:45:00.693390
| 2020-02-21T16:27:36
| 2020-02-21T16:27:36
| 242,178,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,389
|
py
|
from sklearn.ensemble import RandomForestClassifier
import sklearn as sk
import re
import nltk
from sklearn.feature_extraction.text import *
import pandas as pd
import numpy as np
import pickle
from collections import Counter
'''
the train dataset was created from the following lines of code:
****
Note please dont run this or it will
override the tweets i manually evaluated
****
sample = tweets.sample(frac=1/20,random_state=3)
sample.to_excel('train_tweets.xlsx')
in excel I added a column for target. and that is waht I use as the training dataset below
'''
def download_data():
train = pd.read_excel('./train_tweets.xlsx') # this is a dataframe of tweet objects, although i will only use the 'text' field for classificationn
target = train['Target'] # these are the manual classifications i made
# train.drop(['Target','contributors','favorited','id','id_str','retweeted','coordinates','created_at','geo'], inplace=True, axis=1)
test = pd.read_pickle('./main_user_tweets.pkl') # a dataframe of tweet objects, I use the 'text' field for testing the classifier
return train, target, test
def predict(train, target, test):
t = [i for i in train['text'].tolist()] # list of tweets from train
t1 = [i for i in test['text'].tolist()] # list of tweets from test
tweet_sum = t + t1 # a list of all tweets to use for creating a feauture matrix
########## The next 6 lines are the actual 6 lines of code to train the model and predict the test tweets
tfidf_vectorizer = TfidfVectorizer(min_df=2)
X_tfidf = tfidf_vectorizer.fit_transform(tweet_sum) #creating feature matrix for entire vocab
train_df = pd.DataFrame(X_tfidf.todense()).iloc[:415] # train feature matrix
test_df = pd.DataFrame(X_tfidf.todense()).iloc[415:] # test feature matrix
RF = RandomForestClassifier(n_estimators=100, max_depth=40, random_state=0).fit(train_df, target) # create the classifier, using the train_df and the target values I entered
predictions = RF.predict(test_df) # predict tweet classification
return predictions
def print_pred(predictions): # this is all just for printing... fluff
counter = Counter(predictions)
print('Number of non-subject tweets: {0}'.format(counter[2]))
print('Number of Technology focussed tweets: {0}'.format(counter[1]))
print('Number of Trading focussed tweets: {0}'.format(counter[0]))
def print_examples(predictions,test): # this is also just for printing... fluff
n=False
tech=False
trade=False
for i,v in enumerate(list(predictions)):
if i > 22:
if predictions[i] == 0 and trade==False:
print('\nTweet classified as a trading tweet:\n', test.iloc[415+i]['text'])
trade = True
elif predictions[i] == 1 and tech==False:
print('\nTweet classified as a technology tweet:\n', test.iloc[415+i]['text'])
tech = True
elif predictions[i] == 2 and n==False:
print('\nTweet classified as a Non subject tweet:\n', test.iloc[415+i]['text'])
n = True
def main():
train, target, test = download_data()
predictions = predict(train, target, test)
print_pred(predictions)
print_examples(predictions, test)
if __name__ == "__main__":
main()
|
[
"fariszahrah@Fariss-MBP.attlocal.net"
] |
fariszahrah@Fariss-MBP.attlocal.net
|
81e3da142f2a706ac4fbe041c6cc6057c44a4027
|
9be1ab6f7cc9e1e8474b7c76ef89284b54782c46
|
/chapter17_errors/8_custom_exception.py
|
8441acb47c175019e2ace90a11dcdee1fd945443
|
[] |
no_license
|
Nateque123/python_tutorials
|
8d9842d46570e6cecd7aa5419b9f77bc4468d391
|
83743acf4862155c5837c154d0422f74d0629043
|
refs/heads/master
| 2022-11-20T11:39:02.565456
| 2020-07-24T11:08:34
| 2020-07-24T11:08:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
# how to create custom exception
class NameToShortError(ValueError):
pass
def check(name):
if len(name) < 8:
raise NameToShortError('you enter short name...')
else:
print(f'Hello {name}')
name1 = input('Enter your name: ')
print(check(name1))
|
[
"noreply@github.com"
] |
noreply@github.com
|
612f3220df184b463f51fd5d95a6580cca79748d
|
4ad3624c676defcf75a3235cc014534806dde50b
|
/baranovperictyrant.py
|
f60ee7cfb02dc67a198b3934d2a99733a1b8f959
|
[] |
no_license
|
gbaranov99/BaranovPericCS407Program1
|
5ca2b0eeb3b290d0142eb8e4b9a4da0acf7a4bf1
|
ab29da5887f5f1ff1fda4b2e3dd4bdb3209e7b88
|
refs/heads/master
| 2023-02-27T20:47:03.196996
| 2021-02-13T01:07:38
| 2021-02-13T01:07:38
| 338,470,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,414
|
py
|
#!/usr/bin/python
# This is a dummy peer that just illustrates the available information your peers
# have available. The setup script will copy it to create the versions you edit
import random
import logging
from messages import Upload, Request
from util import even_split
from peer import Peer
class BaranovPericTyrant(Peer):
def post_init(self):
print("post_init(): %s here!" % self.id)
##################################################################################
# Declare any variables here that you want to be able to access in future rounds #
##################################################################################
#This commented out code is and example of a python dictionsary,
#which is a convenient way to store a value indexed by a particular "key"
#self.dummy_state = dict()
#self.dummy_state["cake"] = "lie"
def requests(self, peers, history):
"""
peers: available info about the peers (who has what pieces)
history: what's happened so far as far as this peer can see
returns: a list of Request() objects
This will be called after update_pieces() with the most recent state.
"""
#Calculate the pieces you still need
needed = lambda i: self.pieces[i] < self.conf.blocks_per_piece
needed_pieces = list(filter(needed, list(range(len(self.pieces)))))
np_set = set(needed_pieces) # sets support fast intersection ops.
logging.debug("%s here: still need pieces %s" % (
self.id, needed_pieces))
#This code shows you what you have access to in peers and history
#You won't need it in your final solution, but may want to uncomment it
#and see what it does to help you get started
"""
logging.debug("%s still here. Here are some peers:" % self.id)
for p in peers:
logging.debug("id: %s, available pieces: %s" % (p.id, p.available_pieces))
logging.debug("And look, I have my entire history available too:")
logging.debug("look at the AgentHistory class in history.py for details")
logging.debug(str(history))
"""
requests = [] # We'll put all the things we want here
# Symmetry breaking is good...
random.shuffle(needed_pieces)
# count frequencies of all pieces that the other peers have
# this will be useful for implementing rarest first
###########################################################
# you'll need to write the code to compute these yourself #
###########################################################
frequencies = {}
# Python syntax to perform a sort using a user defined sort key
# This exact sort is probably not a useful sort, but other sorts might be useful
# peers.sort(key=lambda p: p.id)
# request all available pieces from all peers!
# (up to self.max_requests from each)
#############################################################################
# This code asks for pieces at random, you need to adapt it to rarest first #
#############################################################################
for peer in peers:
av_set = set(peer.available_pieces)
isect = av_set.intersection(np_set)
n = min(self.max_requests, len(isect))
# More symmetry breaking -- ask for random pieces.
# You could try fancier piece-requesting strategies
# to avoid getting the same thing from multiple peers at a time.
for piece_id in random.sample(isect, int(n)):
# aha! The peer has this piece! Request it.
# which part of the piece do we need next?
# (must get the next-needed blocks in order)
#
# If you loop over the piece_ids you want to request above
# you don't need to change the rest of this code
start_block = self.pieces[piece_id]
r = Request(self.id, peer.id, piece_id, start_block)
requests.append(r)
return requests
def uploads(self, requests, peers, history):
"""
requests -- a list of the requests for this peer for this round
peers -- available info about all the peers
history -- history for all previous rounds
returns: list of Upload objects.
In each round, this will be called after requests().
"""
##############################################################################
# The code and suggestions here will get you started for the standard client #
# You'll need to change things for the other clients #
##############################################################################
round = history.current_round()
logging.debug("%s again. It's round %d." % (
self.id, round))
# One could look at other stuff in the history too here.
# For example, history.downloads[round-1] (if round != 0, of course)
# has a list of Download objects for each Download to this peer in
# the previous round.
if len(requests) == 0:
logging.debug("No one wants my pieces!")
chosen = []
bws = []
else:
logging.debug("Still here: uploading to a random peer")
########################################################################
# The dummy client picks a single peer at random to unchoke. #
# You should decide a set of peers to unchoke accoring to the protocol #
########################################################################
request = random.choice(requests)
chosen = [request.requester_id]
# Now that we have chosen who to unchoke, the standard client evenly shares
# its bandwidth among them
bws = even_split(self.up_bw, len(chosen))
# create actual uploads out of the list of peer ids and bandwidths
# You don't need to change this
uploads = [Upload(self.id, peer_id, bw)
for (peer_id, bw) in zip(chosen, bws)]
return uploads
|
[
"gbaranov99@gmail.com"
] |
gbaranov99@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.