blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
759560479a1e4b13d2472efb035ca54585168fde | c57da088ba85fe9a1d73707797fffb237268717a | /dashboard/apps/hub/migrations/0028_auto__del_field_election_office__del_field_election_district__del_uniq.py | e08654c61dab12540d0b3fc74e12d2e2c68416c6 | [
"MIT"
] | permissive | GPHemsley/dashboard | f8cf4163597e8a49dae347a9581448f23b459e0e | 892d68566ac6fafe88b7546d8509619eb8fd6676 | refs/heads/master | 2020-03-27T23:04:20.433348 | 2018-09-04T10:47:26 | 2018-09-04T10:47:26 | 147,287,666 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,375 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Election', fields ['end_date', 'office', 'state', 'race_type', 'organization', 'district', 'special']
db.delete_unique('hub_election', ['end_date', 'office_id', 'state_id', 'race_type', 'organization_id', 'district', 'special'])
# Deleting field 'Election.office'
db.delete_column('hub_election', 'office_id')
# Deleting field 'Election.district'
db.delete_column('hub_election', 'district')
def backwards(self, orm):
# Adding field 'Election.office'
db.add_column('hub_election', 'office',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['hub.Office'], null=True, blank=True),
keep_default=False)
# Adding field 'Election.district'
db.add_column('hub_election', 'district',
self.gf('django.db.models.fields.CharField')(default='', max_length=5, blank=True, db_index=True),
keep_default=False)
# Adding unique constraint on 'Election', fields ['end_date', 'office', 'state', 'race_type', 'organization', 'district', 'special']
db.create_unique('hub_election', ['end_date', 'office_id', 'state_id', 'race_type', 'organization_id', 'district', 'special'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'hub.contact': {
'Meta': {'ordering': "['last_name']", 'object_name': 'Contact'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'mobile': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['hub.Organization']"}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'})
},
'hub.dataformat': {
'Meta': {'ordering': "['name']", 'object_name': 'DataFormat'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True'})
},
'hub.election': {
'Meta': {'ordering': "['state', '-end_date', 'race_type']", 'object_name': 'Election'},
'absentee_and_provisional': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'cong_dist_level': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'county_level': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'direct_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'blank': 'True'}),
'formats': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['hub.DataFormat']", 'symmetrical': 'False'}),
'gov': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'house': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level_note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'needs_review': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['hub.Organization']", 'null': 'True'}),
'portal_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'precinct_level': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'prez': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'primary_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'db_index': 'True', 'blank': 'True'}),
'race_type': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'result_type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'senate': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'special': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['hub.State']"}),
'state_leg': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'state_leg_level': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'state_level': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'state_officers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'hub.log': {
'Meta': {'ordering': "['-date']", 'object_name': 'Log'},
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['hub.Contact']", 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'follow_up': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'formal_request': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'gdoc_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['hub.Organization']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['hub.State']"}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'hub.office': {
'Meta': {'ordering': "['name']", 'object_name': 'Office'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True'})
},
'hub.organization': {
'Meta': {'ordering': "['name']", 'object_name': 'Organization'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fec_page': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'gov_agency': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'gov_level': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'hub.state': {
'Meta': {'ordering': "['name']", 'object_name': 'State'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'postal': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'})
},
'hub.volunteer': {
'Meta': {'ordering': "['last_name']", 'object_name': 'Volunteer'},
'affil': ('django.db.models.fields.CharField', [], {'max_length': '254', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'mobile': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'blank': 'True'}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['hub.VolunteerRole']", 'symmetrical': 'False'}),
'skype': ('django.db.models.fields.CharField', [], {'max_length': '254', 'blank': 'True'}),
'states': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['hub.State']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '254', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '254', 'blank': 'True'})
},
'hub.volunteerlog': {
'Meta': {'object_name': 'VolunteerLog'},
'date': ('django.db.models.fields.DateField', [], {}),
'follow_up': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'gdoc_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'volunteer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['hub.Volunteer']"})
},
'hub.volunteerrole': {
'Meta': {'object_name': 'VolunteerRole'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '30', 'primary_key': 'True'})
}
}
complete_apps = ['hub'] | [
"zstumgoren@gmail.com"
] | zstumgoren@gmail.com |
54013c36a6e35a180dd0e6b285b2cdbc207b7a6f | 385ed58325dd0cc75bdb9fd3e61c5e005f7a4f28 | /source/tuyoo/src/poker/entity/game/rooms/arena_match_ctrl/matchtest.py | 3b8daba5fcd923b40d85e49d509432c699b9ddc2 | [] | no_license | csirui/hall37 | 17dfa4e4f1f8bf719d0c11ac7738fa4c14fd06db | 5c4eb4b2bf57bbbee4731470c830d8d81915d603 | refs/heads/master | 2021-09-04T03:55:12.460035 | 2018-01-15T15:12:30 | 2018-01-15T15:12:30 | 117,560,615 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 16,493 | py | # -*- coding:utf-8 -*-
'''
Created on 2015年11月12日
@author: zhaojiangang
'''
import functools
import json
import random
import stackless
import freetime.util.log as ftlog
from freetime.core.reactor import mainloop, exitmainloop
from freetime.core.tasklet import FTTasklet
from freetime.core.timer import FTTimer
from poker.entity.dao import onlinedata
from poker.entity.game.rooms.arena_match_ctrl.interfaces import \
MatchTableController, MatchRankRewardsSender, \
MatchPlayerNotifier, UserInfoLoader
from poker.entity.game.rooms.arena_match_ctrl.match import MatchConf, Match, \
MatchTableManager
class SigninFee(object):
def collectFees(self, inst, userId, fees):
'''
收取用户报名费, 如果报名费不足则抛异常SigninFeeNotEnoughException
'''
ftlog.info('SigninFee.collectFees matchId=', inst.matchId,
'userId=', userId,
'fees=', fees)
def returnFees(self, inst, userId, fees):
'''
退还报名费
'''
ftlog.info('SigninFee.returnFees matchId=', inst.matchId,
'userId=', userId,
'fees=', fees)
class MatchTableControllerTest(MatchTableController):
def __init__(self, match):
self.match = match
def startTable(self, table):
'''
让桌子开始
'''
ftlog.info('MatchTableControllerTest.startTable matchId=', self.match.matchId,
'tableId=', table.tableId)
timeout = random.randint(1, 5)
FTTimer(timeout, self._winlose, table)
def clearTable(self, table):
'''
'''
ftlog.info('MatchTableControllerTest.clear matchId=', self.match.matchId,
'tableId=', table.tableId)
def _winlose(self):
table = FTTasklet.getCurrentFTTasklet().run_argl[0]
playerList = table.getPlayerList()
win0 = random.randint(0, len(playerList) - 1)
win1 = random.randint(0, len(playerList) - 1)
deltaScore = 100
winScore = 0
loseScore = 0
winPlayerCount = 1 if win0 == win1 else 2
if winPlayerCount == 1:
winScore = int(deltaScore * 2)
loseScore = -deltaScore
else:
winScore = deltaScore
loseScore = int(-deltaScore * 2)
for i, player in enumerate(playerList):
isWin = i in (win0, win1)
playerDeltaScore = winScore if isWin else loseScore
playerDeltaScore = int(playerDeltaScore)
self.match.winlose(table.tableId, table.ccrc, player.seat.seatId, player.userId, playerDeltaScore, isWin)
class MatchRankRewardsSenderTest(MatchRankRewardsSender):
def sendRankRewards(self, player, rankRewards):
'''
给用户发奖
'''
ftlog.info('MatchPlayerNotifierTest.sendRankRewards userId=', player.userId,
'rankRewards=', rankRewards,
'totalOver=', len(self._playerMap))
class MatchPlayerNotifierTest(MatchPlayerNotifier):
def __init__(self):
# map<userId, rank>
self._playerMap = {}
def notifyMatchStart(self, player):
ftlog.info('MatchPlayerNotifierTest.notifyMatchStart userId=', player.userId)
def notifyMatchRise(self, player):
'''
通知用户等待晋级
'''
ftlog.info('MatchPlayerNotifierTest.notifyMatchWait userId=', player.userId)
def notifyMatchOver(self, player, reason, rankRewards):
'''
通知用户比赛结束了
'''
self._playerMap[player.userId] = player.rank
ftlog.info('MatchPlayerNotifierTest.notifyMatchOver userId=', player.userId,
'reason=', reason,
'rankRewards=', rankRewards,
'totalOver=', len(self._playerMap),
'allPlayer=', len(player.matchInst._playerMap))
if len(self._playerMap) == 120 or len(player.matchInst._playerMap) < 4:
exitmainloop()
class UserInfoLoaderTest(UserInfoLoader):
def loadUserAttrs(self, userId, attrs):
'''
获取用户属性
'''
ret = []
for attr in attrs:
attrValue = attr
if attr == 'name':
attrValue = 'user%d' % (userId)
elif attr == 'sessionClientId':
attrValue = 'Android_3.7_test'
elif attr == 'snsId':
attrValue = '360:%s' % (userId)
ret.append(attrValue)
return ret
def getSessionClientId(self, userId):
'''
获取用户sessionClientId
'''
return 'Android_3.7_test'
match_conf = {
"matchId": 6060,
"stages": [
{
"cardCount": 1,
"totalUserCount": 120,
"riseUserCount": 90,
"scoreInit": 1000,
"scoreIntoRate": 1,
"rankLine": [
[3200, 1],
[1600, 3],
[800, 7],
[400, 19],
[200, 39],
[100, 55],
[-100, 60],
[-200, 66],
[-400, 82],
[-800, 102],
[-1600, 114],
[-3200, 118],
[-51200, 120]
]
},
{
"cardCount": 1,
"totalUserCount": 90,
"riseUserCount": 66,
"scoreInit": 0,
"scoreIntoRate": 0.75,
"rankLine": [
[4400, 1],
[3125, 2],
[2500, 3],
[2000, 4],
[1900, 5],
[1675, 6],
[1525, 7],
[1400, 8],
[1300, 10],
[1100, 11],
[1000, 13],
[950, 15],
[800, 17],
[725, 20],
[700, 21],
[650, 24],
[550, 26],
[500, 28],
[475, 32],
[400, 33],
[350, 36],
[325, 38],
[275, 39],
[250, 40],
[200, 44],
[125, 47],
[100, 48],
[50, 52],
[25, 54],
[-50, 55],
[-100, 58],
[-125, 62],
[-200, 63],
[-250, 65],
[-275, 68],
[-325, 69],
[-350, 70],
[-400, 72],
[-475, 73],
[-500, 74],
[-550, 77],
[-650, 79],
[-700, 81],
[-725, 82],
[-950, 83],
[-1100, 85],
[-1300, 86],
[-1450, 87],
[-1750, 88],
[-3050, 89],
[-51500, 90]
]
},
{
"cardCount": 1,
"totalUserCount": 66,
"riseUserCount": 48,
"scoreInit": 0,
"scoreIntoRate": 0.75,
"rankLine": [
[4400, 1],
[3275, 2],
[2650, 3],
[2225, 4],
[1975, 5],
[1844, 6],
[1675, 7],
[1550, 8],
[1450, 9],
[1344, 10],
[1250, 11],
[1175, 12],
[1150, 13],
[1063, 14],
[1000, 15],
[950, 16],
[925, 17],
[888, 18],
[850, 19],
[800, 20],
[775, 21],
[725, 22],
[700, 23],
[663, 24],
[625, 25],
[606, 26],
[575, 27],
[550, 28],
[500, 29],
[475, 30],
[438, 31],
[419, 32],
[388, 33],
[350, 34],
[325, 35],
[313, 36],
[288, 37],
[250, 38],
[213, 39],
[200, 40],
[163, 41],
[125, 42],
[88, 44],
[44, 45],
[13, 46],
[-25, 47],
[-50, 48],
[-100, 49],
[-125, 50],
[-163, 51],
[-213, 52],
[-250, 53],
[-275, 54],
[-325, 55],
[-363, 56],
[-425, 57],
[-475, 58],
[-500, 59],
[-594, 60],
[-725, 61],
[-850, 62],
[-1000, 63],
[-1413, 64],
[-2375, 65],
[-51388, 66]
]
},
{
"cardCount": 1,
"totalUserCount": 48,
"riseUserCount": 36,
"scoreInit": 0,
"scoreIntoRate": 0.75,
"rankLine": [
[4300, 1],
[3238, 2],
[2575, 3],
[2225, 4],
[2000, 5],
[1825, 6],
[1681, 7],
[1555, 8],
[1438, 9],
[1338, 10],
[1259, 11],
[1175, 12],
[1109, 13],
[1056, 14],
[1000, 15],
[944, 16],
[894, 17],
[850, 18],
[800, 19],
[756, 20],
[709, 21],
[663, 22],
[625, 23],
[578, 24],
[533, 25],
[494, 26],
[463, 27],
[419, 28],
[381, 29],
[341, 30],
[294, 31],
[256, 32],
[209, 33],
[163, 34],
[116, 35],
[63, 36],
[13, 37],
[-44, 38],
[-106, 39],
[-156, 40],
[-219, 41],
[-283, 42],
[-372, 43],
[-494, 44],
[-669, 45],
[-916, 46],
[-1506, 47],
[-51266, 48]
]
},
{
"cardCount": 1,
"totalUserCount": 36,
"riseUserCount": 24,
"scoreInit": 0,
"scoreIntoRate": 0.75,
"rankLine": [
[4077, 1],
[3027, 2],
[2458, 3],
[2146, 4],
[1920, 5],
[1745, 6],
[1594, 7],
[1463, 8],
[1352, 9],
[1255, 10],
[1173, 11],
[1100, 12],
[1030, 13],
[965, 14],
[901, 15],
[838, 16],
[777, 17],
[719, 18],
[660, 19],
[604, 20],
[549, 21],
[494, 22],
[439, 23],
[381, 24],
[320, 25],
[256, 26],
[186, 27],
[114, 28],
[38, 29],
[-41, 30],
[-128, 31],
[-230, 32],
[-377, 33],
[-608, 34],
[-1212, 35],
[-51090, 36]
]
},
{
"cardCount": 1,
"totalUserCount": 36,
"riseUserCount": 24,
"scoreInit": 0,
"scoreIntoRate": 0.75,
"rankLine": [
[4077, 1],
[3027, 2],
[2458, 3],
[2146, 4],
[1920, 5],
[1745, 6],
[1594, 7],
[1463, 8],
[1352, 9],
[1255, 10],
[1173, 11],
[1100, 12],
[1030, 13],
[965, 14],
[901, 15],
[838, 16],
[777, 17],
[719, 18],
[660, 19],
[604, 20],
[549, 21],
[494, 22],
[439, 23],
[381, 24],
[320, 25],
[256, 26],
[186, 27],
[114, 28],
[38, 29],
[-41, 30],
[-128, 31],
[-230, 32],
[-377, 33],
[-608, 34],
[-1212, 35],
[-51090, 36]
]
},
{
"cardCount": 1,
"totalUserCount": 24,
"riseUserCount": 12,
"scoreInit": 0,
"scoreIntoRate": 0.75,
"rankLine": [
[3791, 1],
[2748, 2],
[2287, 3],
[1994, 4],
[1757, 5],
[1578, 6],
[1434, 7],
[1312, 8],
[1207, 9],
[1110, 10],
[1014, 11],
[923, 12],
[837, 13],
[755, 14],
[672, 15],
[584, 16],
[492, 17],
[389, 18],
[280, 19],
[164, 20],
[28, 21],
[-170, 22],
[-596, 23],
[-50992, 24]
]
},
{
"cardCount": 1,
"totalUserCount": 12,
"riseUserCount": 6,
"scoreInit": 0,
"scoreIntoRate": 0.75,
"rankLine": [
[3312, 1],
[2453, 2],
[1994, 3],
[1706, 4],
[1488, 5],
[1298, 6],
[1123, 7],
[949, 8],
[750, 9],
[522, 10],
[178, 11],
[-50503, 12]
]
},
{
"cardCount": 1,
"totalUserCount": 6,
"riseUserCount": 3,
"scoreInit": 0,
"scoreIntoRate": 0.75,
"rankLine": [
[2839, 1],
[2044, 2],
[1601, 3],
[1229, 4],
[789, 5],
[-50148, 6]
]
},
{
"cardCount": 1,
"totalUserCount": 3,
"riseUserCount": 1,
"scoreInit": 0,
"scoreIntoRate": 0.75,
"rankLine": [
[2318, 1],
[1436, 2],
[-40651, 3]
]
}
],
"baseScore": 100,
"tableSeatCount": 3,
"minSigninCount": 3,
"maxSigninCount": 1000,
"maxPlayerCount": 120,
"processSigninIntervalSeconds": 5,
"processSigninCountPerTime": 100,
"rankRewardsList": [
]
}
def testRankLine():
matchConf = MatchConf()
matchConf.decodeFromDict(match_conf)
def testOnlineLoc(*args, **kwargs):
pass
def testMatch(match):
for i in xrange(120):
match.signin(i + 1)
if __name__ == '__main__':
matchConf = MatchConf()
matchConf.decodeFromDict(match_conf)
matchConf.gameId = 6
matchConf.roomId = 60011001
matchConf.tableId = matchConf.roomId * 10000 # 用来表示玩家在房间队列的特殊tableId
matchConf.seatId = 1
onlinedata.addOnlineLoc = testOnlineLoc
onlinedata.removeOnlineLoc = testOnlineLoc
onlinedata.getOnlineLocSeatId = testOnlineLoc
onlinedata.getOnlineLocList = testOnlineLoc
match = Match(matchConf)
match.tableController = MatchTableControllerTest(match)
match.tableManager = MatchTableManager(6, 3)
match.tableManager.addTables(60011001, 0, 40)
match.playerNotifier = MatchPlayerNotifierTest()
match.rankRewardsSender = MatchRankRewardsSenderTest()
match.userInfoLoader = UserInfoLoaderTest()
match.start()
FTTimer(2, functools.partial(testMatch, match))
stackless.tasklet(mainloop)()
stackless.run()
print 'main end'
print match.playerNotifier._playerMap
print json.dumps(match.playerNotifier._playerMap)
| [
"cg@ibenxi.com"
] | cg@ibenxi.com |
42978d300d56543c398e6469c90adbfa4d47ff5a | f4b8c90c1349c8740c1805f7b6b0e15eb5db7f41 | /test/test_concern_note_item.py | 3ae83d0718e0ff47eb10f831512cec5acdaef178 | [] | no_license | CalPolyResDev/StarRezAPI | 012fb8351159f96a81352d6c7bfa36cd2d7df13c | b184e1863c37ff4fcf7a05509ad8ea8ba825b367 | refs/heads/master | 2021-01-25T10:29:37.966602 | 2018-03-15T01:01:35 | 2018-03-15T01:01:35 | 123,355,501 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | # coding: utf-8
"""
StarRez API
This is a way to connect with the StarRez API. We are not the developers of the StarRez API, we are just an organization that uses it and wanted a better way to connect to it. # noqa: E501
OpenAPI spec version: 1.0.0
Contact: resdev@calpoly.edu
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import starrez_client
from starrez_client.models.concern_note_item import ConcernNoteItem # noqa: E501
from starrez_client.rest import ApiException
class TestConcernNoteItem(unittest.TestCase):
"""ConcernNoteItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testConcernNoteItem(self):
"""Test ConcernNoteItem"""
# FIXME: construct object with mandatory attributes with example values
# model = starrez_client.models.concern_note_item.ConcernNoteItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"fedorareis@gmail.com"
] | fedorareis@gmail.com |
da347efc1930fc80f8c88dd1a13954deab7eb7d5 | 24a1da610a57d9558a7261ed9ca92b20d8689634 | /June/47Day/Minimize_Maximum_Pair_Sum_in_Array_1184ms_28.1mb.py | c6b4f7b84036880c38b3df894c4b1ef2e971f389 | [] | no_license | JadeKim042386/LeetCode | b5b70a8100a19d705150f276ee8e0dc11c5038b2 | 77234a14dc97bd0e023842cd57698b37d1460511 | refs/heads/master | 2023-06-09T20:16:36.352246 | 2021-07-06T09:25:15 | 2021-07-06T09:25:15 | 349,680,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | class Solution:
def minPairSum(self, nums: List[int]) -> int:
arr = sorted(nums)
left, right = 0, len(nums) - 1
answer = 0
while left < right:
answer = max(answer, arr[left] + arr[right])
left += 1
right -= 1
return answer | [
"jk042386@gmail.com"
] | jk042386@gmail.com |
81573f4ec7c06d15ffa891286bf87762b14b1195 | 3af363719a8084b855077acd0bf6a0efc9e6762b | /backend/config/urls.py | 3ded0e7452ab67729732e3f886522a3f4633f919 | [] | no_license | salmanAndroidDev/social-network | 09dc4372985bb8b4219a239e39339a2aaccd4320 | 74ca8f1f9668c91c15e7acaa19dad3ed29e139ab | refs/heads/main | 2023-08-15T19:14:25.675409 | 2021-10-22T06:20:17 | 2021-10-22T06:20:17 | 419,827,526 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('account/', include('accounts.urls')),
path('social-hub/', include('social_hub.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| [
"salmanAndB@outlook.com"
] | salmanAndB@outlook.com |
13571023a12659a5938638520f75131cd1938acb | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/KCB_YCHF/KCB_YCHF_MM/OMS_SHOffer/YCHF_KCBYCHF_OMS_SHOffer_022.py | 51389f6b199d9455f00d1d325ba682212d48fed0 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,541 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test//xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test//service")
from ServiceConfig import *
from ARmainservice import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test//mysql")
from CaseParmInsertMysql import *
from SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test//utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
from env_restart import *
class YCHF_KCBYCHF_OMS_SHOffer_022(xtp_test_case):
def setUp(self):
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YCHF_KCBYCHF_OMS_SHOffer_022')
#clear_data_and_restart_all()
#Api.trade.Logout()
#Api.trade.Login()
pass
#
def test_YCHF_KCBYCHF_OMS_SHOffer_022(self):
title = '先重启上海报盘再重启OMS(沪A最优五档即成转撤单全撤买入)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '已撤',
'errorID': 0,
'errorMSG': queryOrderErrorMsg(0),
'是否生成报单': '是',
'是否是撤废': '否',
# '是否是新股申购': '',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('688000', '1', '4', '2', '0', 'B', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'报单测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
print(stkparm['错误原因'])
self.assertEqual(rs['报单测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price': stkparm['涨停价'],
'quantity': 300,
'position_effect':Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['报单测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
## 还原可用资金
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YW_KCB_BAK_000')
#oms_restart()
self.assertEqual(rs['报单测试结果'], True) # 211
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
10d13a48f0545019c2703815d3a0d486f92e71e1 | d7e0b198c216fc877ec94c4279d837bfbc6bccfc | /tree/Yu/501.py | 65ba4a06cf23c9e665c8fc8565b553cb9c9cde2e | [
"MIT"
] | permissive | choiking/LeetCode | dcdb467e25ad6455156a9e2620dd98fabdf9c28b | 08a7ad6af2449e4268fce86823cbf667bbed2ae8 | refs/heads/master | 2021-07-11T15:46:01.841530 | 2017-10-12T23:34:45 | 2017-10-12T23:34:45 | 107,908,853 | 1 | 0 | null | 2017-10-22T22:48:30 | 2017-10-22T22:48:30 | null | UTF-8 | Python | false | false | 1,059 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Yu Zhou
# 501. Find Mode in Binary Search Tree
# ****************
# Descrption:
# Given a binary search tree (BST) with duplicates, find all the mode(s)
# (the most frequently occurred element) in the given BST.
# ****************
class Solution(object):
def findMode(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
def find_hash(root, hash):
# Edge:
if not root:
return
# Process
if root.val in hash:
hash[root.val] += 1
else:
hash[root.val] = 1
# Recursion
find_hash(root.left, hash)
find_hash(root.right, hash)
# Edge
if not root:
return []
hash = {}
res = []
find_hash(root, hash)
max_value = max(hash.values())
for key in hash.keys():
if hash[key] == max_value:
res.append(key)
return res
| [
"junior147147@yahoo.com"
] | junior147147@yahoo.com |
e26abadd67fc101afc766003b46c485bed810476 | 7c241ed033efebdcd4f58fa5ae83db95f21a5614 | /backend/psychhubb_19465/settings.py | 5ac4c92d4b5e68d24447a144592a4aea70a3e8ac | [] | no_license | crowdbotics-apps/psychhubb-19465 | 97ac3ce690f8759e877e8f407fb2fd03d47b5ab3 | d89db26d2a246a85e7f75c4a2bb7fb3a6533d5e4 | refs/heads/master | 2022-12-09T10:06:46.302480 | 2020-08-09T11:47:12 | 2020-08-09T11:47:12 | 286,226,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,901 | py | """
Django settings for psychhubb_19465 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'psychhubb_19465.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'psychhubb_19465.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
b9b586b21db02c671897a5893716ba96fcfe3712 | 5c280ec1f6f4160c53435d35fec4b46cff1c1485 | /web/forms.py | 2ef70d408d133ed056c297b3482e40568fe7d688 | [
"Apache-2.0"
] | permissive | cheekybastard/namebot | f255f34a10fc5d0ac7e504a432f669190fcbf846 | ea17b31e28461b5e54409f549cb9e1315ab8072a | refs/heads/master | 2020-07-10T08:16:57.038301 | 2014-01-04T00:14:54 | 2014-01-04T00:14:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | from wtforms import Form, BooleanField, TextField, PasswordField, validators
class NameGeneratorForm(Form):
field1 = TextField('Field 1', [validators.Length(min=4, max=25)])
field2 = TextField('Field 2', [validators.Length(min=4, max=25)])
field3 = TextField('Field 3', [validators.Length(min=4, max=25)])
field4 = TextField('Field 4', [validators.Length(min=4, max=25)])
field5 = TextField('Field 5', [validators.Length(min=4, max=25)])
| [
"dxdstudio@gmail.com"
] | dxdstudio@gmail.com |
a3a539ffb8cc47ba73a9ebaafc40f8dafc87b37c | 9e1ca5fe3f86b28d945ea3c06ea7715fc0946f1f | /add_derived_columns.py | 52aa033f3d2241e85302d7c13e92e4d8a141220c | [] | no_license | colinmorris/crawl-coroner | e2962447c697cffebcaa0bc4b8530b1ba30b16b0 | 026f9941ed5ae6df108cd1223c1e65e2b0b18f08 | refs/heads/master | 2021-01-11T18:42:00.951101 | 2017-02-11T18:07:33 | 2017-02-11T18:07:33 | 79,603,714 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,020 | py | import pandas as pd
STORE_FNAME = 'morgue.h5'
# This is probably expensive. But then it only really needs to be run once.
if __name__ == '__main__':
store = pd.HDFStore(STORE_FNAME)
# other derived columns of interest:
# - player experience/skill
games = store['games']
# Original species: doing any kind of naive analysis across species tends to
# result in weird outcomes for draconians, because the population of baby
# draconians is very different from the population of coloured draconians
# (who must have made it to at least level 7). This column is the same
# as species, except that all draconian species are merged into one.
species = games['species'].cat.categories
drac_species = set(sp for sp in species if 'draconian' in sp)
def get_original_species(sp):
return 'draconian' if sp in drac_species else sp
games['orig_species'] = games['species'].map(get_original_species)
# "legit-ness". There are some 'junk' games that will tend to just pollute
# the results of a lot of typical analyses, namely:
# - games played by bots
# - games quit at level 1
# There's an argument to be made for making the latter condition more extreme
# and just excluding all quit games.
# Though actually, maybe that's a bad argument. Because I think some
# players will quit when they get into a clearly hopeless situation
# as some kind of face-saving gesture.
# Excluding level 1 quits was based on the empirical observation that
# there are a shit-ton of quits at level 1 (which probably don't
# meaningfully reflect how the game would have gone if the player
# had actually tried to win). Would be good to sniff around the
# data some time to see if there are any patterns in these lvl
# 1 quitters.
games['legit'] = (~games['bot'] &
~( (games['level'] == 1) & (games['howdied'] == 'quit'))
)
store.put('games', games, format='table', data_columns=['legit'])
| [
"colin.morris2@gmail.com"
] | colin.morris2@gmail.com |
ed076510c5dccaf3e164c2bf539cacf6638734a0 | a4454d895d7b045c7aa9df499523203d04ecb69d | /venv/bin/django-admin.py | 7614153c4dd740b5adb0efee63e5b8a6340b4f92 | [] | no_license | amjedsaleel/Institute-Management | b94a7a06ddb5052572de9734f9b28fbdf8872b02 | 787b0c65bb99ecd24196ff23a41c20181968fe4b | refs/heads/main | 2023-03-01T09:32:05.406385 | 2021-02-13T11:22:45 | 2021-02-13T11:22:45 | 321,311,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | #!/home/amjed/PycharmProjects/institute_management/venv/bin/python
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"amjedsaleel@gmail.com"
] | amjedsaleel@gmail.com |
e7c2b5bb9470f929d344f0160bccbe9ebd10c750 | c2ee51902020596e08aacd4462ab44715432c8f8 | /pyapprox/tests/test_variables.py | 90292c09b8ebf22f21ed3bef0eca25b1833cd5cc | [
"MIT"
] | permissive | ConnectedSystems/pyapprox | bb1462aa8ee54258ee559d734f7bffb744e09c78 | 4f405654c707cba83d211f327c0f0fdbc95efa29 | refs/heads/master | 2021-09-13T09:49:59.048327 | 2021-08-29T03:38:43 | 2021-08-29T03:38:43 | 252,080,343 | 0 | 0 | MIT | 2020-04-01T05:26:29 | 2020-04-01T05:26:29 | null | UTF-8 | Python | false | false | 13,206 | py | import unittest
import numpy as np
from scipy import stats
from warnings import warn
from pyapprox.variables import get_distribution_info, \
define_iid_random_variables, IndependentMultivariateRandomVariable, \
float_rv_discrete, variables_equivalent, get_pdf
from pyapprox.utilities import lists_of_arrays_equal
class TestVariables(unittest.TestCase):
def test_get_distribution_params(self):
name, scales, shapes = get_distribution_info(
stats.beta(a=1, b=2, loc=0, scale=1))
assert name == 'beta'
assert shapes == {'a': 1, 'b': 2}
assert scales == {'loc': 0, 'scale': 1}
rv = stats.beta(a=1, b=2, loc=3, scale=4)
pdf = get_pdf(rv)
xx = rv.rvs(100)
assert np.allclose(pdf(xx), rv.pdf(xx))
name, scales, shapes = get_distribution_info(
stats.beta(1, 2, loc=0, scale=1))
assert name == 'beta'
assert shapes == {'a': 1, 'b': 2}
assert scales == {'loc': 0, 'scale': 1}
name, scales, shapes = get_distribution_info(
stats.beta(1, 2, 0, scale=1))
assert name == 'beta'
assert shapes == {'a': 1, 'b': 2}
assert scales == {'loc': 0, 'scale': 1}
name, scales, shapes = get_distribution_info(stats.beta(1, 2, 0, 1))
assert name == 'beta'
assert shapes == {'a': 1, 'b': 2}
assert scales == {'loc': 0, 'scale': 1}
name, scales, shapes = get_distribution_info(stats.norm(0, 1))
assert name == 'norm'
assert shapes == dict()
assert scales == {'loc': 0, 'scale': 1}
name, scales, shapes = get_distribution_info(stats.norm(0, scale=1))
assert name == 'norm'
assert shapes == dict()
assert scales == {'loc': 0, 'scale': 1}
name, scales, shapes = get_distribution_info(stats.norm(
loc=0, scale=1))
assert name == 'norm'
assert shapes == dict()
assert scales == {'loc': 0, 'scale': 1}
name, scales, shapes = get_distribution_info(
stats.gamma(a=1, loc=0, scale=1))
assert name == 'gamma'
assert shapes == {'a': 1}
assert scales == {'loc': 0, 'scale': 1}
name, scales, shapes = get_distribution_info(
stats.gamma(1, loc=0, scale=1))
assert name == 'gamma'
assert shapes == {'a': 1}
assert scales == {'loc': 0, 'scale': 1}
name, scales, shapes = get_distribution_info(
stats.gamma(1, 0, scale=1))
assert name == 'gamma'
assert shapes == {'a': 1}
assert scales == {'loc': 0, 'scale': 1}
name, scales, shapes = get_distribution_info(stats.gamma(1, 0, 1))
assert name == 'gamma'
assert shapes == {'a': 1}
assert scales == {'loc': 0, 'scale': 1}
name, scales, shapes = get_distribution_info(stats.gamma(1))
assert name == 'gamma'
assert shapes == {'a': 1}
assert scales == {'loc': 0, 'scale': 1}
name, scales, shapes = get_distribution_info(stats.gamma(1, loc=0))
assert name == 'gamma'
assert shapes == {'a': 1}
assert scales == {'loc': 0, 'scale': 1}
name, scales, shapes = get_distribution_info(stats.gamma(1, scale=1))
assert name == 'gamma'
assert shapes == {'a': 1}
assert scales == {'loc': 0, 'scale': 1}
name, scales, shapes = get_distribution_info(
stats.binom(n=1, p=1, loc=0))
assert name == 'binom'
assert shapes == {'n': 1, 'p': 1}
assert scales == {'loc': 0, 'scale': 1}
name, scales, shapes = get_distribution_info(
stats.binom(1, p=1, loc=0))
assert name == 'binom'
assert shapes == {'n': 1, 'p': 1}
assert scales == {'loc': 0, 'scale': 1}
name, scales, shapes = get_distribution_info(stats.binom(1, 1, loc=0))
assert name == 'binom'
assert shapes == {'n': 1, 'p': 1}
assert scales == {'loc': 0, 'scale': 1}
name, scales, shapes = get_distribution_info(stats.binom(1, 1, 0))
assert name == 'binom'
assert shapes == {'n': 1, 'p': 1}
assert scales == {'loc': 0, 'scale': 1}
def test_get_pdf(self):
rv = stats.beta(a=1, b=2, loc=3, scale=4)
pdf = get_pdf(rv)
xx = rv.rvs(100)
assert np.allclose(pdf(xx), rv.pdf(xx))
scipy_continuous_var_names = [
n for n in stats._continuous_distns._distn_names]
continuous_var_names = [
"ksone", "kstwobign", "norm", "alpha", "anglit", "arcsine", "beta",
"betaprime", "bradford", "burr", "burr12", "fisk", "cauchy", "chi",
"chi2", "cosine", "dgamma", "dweibull", "expon", "exponnorm",
"exponweib", "exponpow", "fatiguelife", "foldcauchy", "f",
"foldnorm", "weibull_min", "weibull_max", "frechet_r", "frechet_l",
"genlogistic", "genpareto", "genexpon", "genextreme", "gamma",
"erlang", "gengamma", "genhalflogistic", "gompertz", "gumbel_r",
"gumbel_l", "halfcauchy", "halflogistic", "halfnorm", "hypsecant",
"gausshyper", "invgamma", "invgauss", "norminvgauss", "invweibull",
"johnsonsb", "johnsonsu", "laplace", "levy", "levy_l",
"levy_stable", "logistic", "loggamma", "loglaplace", "lognorm",
"gilbrat", "maxwell", "mielke", "kappa4", "kappa3", "moyal",
"nakagami", "ncx2", "ncf", "t", "nct", "pareto", "lomax",
"pearson3", "powerlaw", "powerlognorm", "powernorm", "rdist",
"rayleigh", "reciprocal", "rice", "recipinvgauss", "semicircular",
"skewnorm", "trapz", "triang", "truncexpon", "truncnorm",
"tukeylambda", "uniform", "vonmises", "vonmises_line", "wald",
"wrapcauchy", "gennorm", "halfgennorm", "crystalball", "argus"]
continuous_var_shapes = [
{"n": int(1e3)}, {}, {}, {"a": 1}, {}, {}, {"a": 2, "b": 3},
{"a": 2, "b": 3}, {"c": 2}, {"c": 2, "d": 1},
{"c": 2, "d": 1}, {"c": 3}, {}, {"df": 10}, {"df": 10},
{}, {"a": 3}, {"c": 3}, {}, {"K": 2}, {"a": 2, "c": 3}, {"b": 3},
{"c": 3}, {"c": 3}, {"dfn": 1, "dfd": 1}, {"c": 1}, {"c": 1},
{"c": 1}, {"c": 1}, {"c": 1}, {"c": 1}, {"c": 1},
{"a": 2, "b": 3, "c": 1}, {"c": 1}, {"a": 2}, {"a": 2},
{"a": 2, "c": 1}, {"c": 1}, {"c": 1}, {}, {}, {}, {}, {}, {},
{"a": 2, "b": 3, "c": 1, "z": 1}, {"a": 1}, {"mu": 1},
{"a": 2, "b": 1}, {"c": 1}, {"a": 2, "b": 1}, {"a": 2, "b": 1},
{}, {}, {}, {"alpha": 1, "beta": 1}, {}, {"c": 1}, {"c": 1},
{"s": 1}, {}, {}, {"k": 1, "s": 1}, {"h": 1, "k": 1}, {"a": 1}, {},
{"nu": 1}, {"df": 10, "nc": 1}, {"dfn": 10, "dfd": 10, "nc": 1},
{"df": 10}, {"df": 10, "nc": 1}, {"b": 2}, {"c": 2}, {"skew": 2},
{"a": 1}, {"c": 2, "s": 1}, {"c": 2}, {"c": 2}, {},
{"a": 2, "b": 3}, {"b": 2}, {"mu": 2}, {}, {"a": 1},
{"c": 0, "d": 1}, {"c": 1}, {"b": 2}, {"a": 2, "b": 3}, {"lam": 2},
{}, {"kappa": 2}, {"kappa": 2}, {}, {"c": 0.5}, {"beta": 2},
{"beta": 2}, {"beta": 2, "m": 2}, {"chi": 1}]
for name in scipy_continuous_var_names:
if name not in continuous_var_names:
warn(f"variable {name} is not tested", UserWarning)
unsupported_continuous_var_names = ["ncf"]
for name in unsupported_continuous_var_names:
ii = continuous_var_names.index(name)
del continuous_var_names[ii]
del continuous_var_shapes[ii]
for name, shapes in zip(
continuous_var_names, continuous_var_shapes):
if name == "levy_l":
loc = -2
else:
loc = 2
print(name, shapes)
var = getattr(stats, name)(**shapes, loc=loc, scale=3)
pdf = get_pdf(var)
xx = var.rvs(100)
assert np.allclose(pdf(xx), var.pdf(xx))
def test_define_iid_random_variables(self):
"""
Construct a independent and identiically distributed (iid)
multivariate random variable from the tensor-product of
the same one-dimensional variable.
"""
var = stats.norm(loc=2, scale=3)
num_vars = 2
iid_variable = define_iid_random_variables(var, num_vars)
assert len(iid_variable.unique_variables) == 1
assert np.allclose(
iid_variable.unique_variable_indices, np.arange(num_vars))
def test_define_mixed_tensor_product_random_variable_I(self):
"""
Construct a multivariate random variable from the tensor-product of
different one-dimensional variables assuming that a given variable type
the distribution parameters ARE the same
"""
univariate_variables = [
stats.uniform(-1, 2), stats.beta(1, 1, -1, 2), stats.norm(0, 1), stats.uniform(-1, 2),
stats.uniform(-1, 2), stats.beta(1, 1, -1, 2)]
variable = IndependentMultivariateRandomVariable(univariate_variables)
assert len(variable.unique_variables) == 3
assert lists_of_arrays_equal(variable.unique_variable_indices,
[[0, 3, 4], [1, 5], [2]])
def test_define_mixed_tensor_product_random_variable_II(self):
"""
Construct a multivariate random variable from the tensor-product of
different one-dimensional variables assuming that a given variable
type the distribution parameters ARE NOT the same
"""
univariate_variables = [
stats.uniform(-1, 2), stats.beta(1, 1, -1, 2),
stats.norm(-1, 2), stats.uniform(), stats.uniform(-1, 2),
stats.beta(2, 1, -2, 3)]
variable = IndependentMultivariateRandomVariable(univariate_variables)
assert len(variable.unique_variables) == 5
assert lists_of_arrays_equal(variable.unique_variable_indices,
[[0, 4], [1], [2], [3], [5]])
def test_float_discrete_variable(self):
nmasses1 = 10
mass_locations1 = np.geomspace(1.0, 32.0, num=nmasses1)
masses1 = np.ones(nmasses1, dtype=float)/nmasses1
var1 = float_rv_discrete(
name='var1', values=(mass_locations1, masses1))()
for power in [1, 2, 3]:
assert np.allclose(
var1.moment(power), (mass_locations1**power).dot(masses1))
np.random.seed(1)
num_samples = int(1e6)
samples = var1.rvs(size=(1, num_samples))
assert np.allclose(samples.mean(), var1.moment(1), atol=1e-2)
# import matplotlib.pyplot as plt
# xx = np.linspace(0,33,301)
# plt.plot(mass_locations1,np.cumsum(masses1),'rss')
# plt.plot(xx,var1.cdf(xx),'-'); plt.show()
assert np.allclose(np.cumsum(masses1), var1.cdf(mass_locations1))
# import matplotlib.pyplot as plt
# yy = np.linspace(0,1,51)
# plt.plot(mass_locations1,np.cumsum(masses1),'rs')
# plt.plot(var1.ppf(yy),yy,'-o',ms=2); plt.show()
xx = mass_locations1
assert np.allclose(xx, var1.ppf(var1.cdf(xx)))
xx = mass_locations1
assert np.allclose(xx, var1.ppf(var1.cdf(xx+1e-1)))
def test_get_statistics(self):
univariate_variables = [
stats.uniform(2, 4), stats.beta(1, 1, -1, 2), stats.norm(0, 1)]
variable = IndependentMultivariateRandomVariable(univariate_variables)
mean = variable.get_statistics('mean')
assert np.allclose(mean.squeeze(), [4, 0, 0])
intervals = variable.get_statistics('interval', alpha=1)
assert np.allclose(intervals, np.array(
[[2, 6], [-1, 1], [-np.inf, np.inf]]))
def test_float_rv_discrete_pdf(self):
nmasses1 = 10
mass_locations1 = np.geomspace(1.0, 32.0, num=nmasses1)
masses1 = np.ones(nmasses1, dtype=float)/nmasses1
var1 = float_rv_discrete(
name='var1', values=(mass_locations1, masses1))()
xk = var1.dist.xk.copy()
II = np.random.permutation(xk.shape[0])[:3]
xk[II] = -1
pdf_vals = var1.pdf(xk)
assert np.allclose(pdf_vals[II], np.zeros_like(II, dtype=float))
assert np.allclose(
np.delete(pdf_vals, II), np.delete(var1.dist.pk, II))
def test_variables_equivalent(self):
nmasses = 10
xk = np.array(range(nmasses), dtype='float')
pk = np.ones(nmasses)/nmasses
xk2 = np.array(range(nmasses), dtype='float')
# pk2 = np.ones(nmasses)/(nmasses)
pk2 = np.geomspace(1.0, 512.0, num=nmasses)
pk2 /= pk2.sum()
var1 = float_rv_discrete(
name='float_rv_discrete', values=(xk, pk))()
var2 = float_rv_discrete(
name='float_rv_discrete', values=(xk2, pk2))()
assert variables_equivalent(var1, var2) == False
if __name__ == "__main__":
variables_test_suite = unittest.TestLoader().loadTestsFromTestCase(
TestVariables)
unittest.TextTestRunner(verbosity=2).run(variables_test_suite)
| [
"29109026+jdjakem@users.noreply.github.com"
] | 29109026+jdjakem@users.noreply.github.com |
a4adcc05a06ecdd2f1d812c69b56ef2e896127a2 | 78d3d78ebded691dd6a92f357c7cc75004ff2184 | /weak_localization/L15_W0p012_highres/socconductance.py | 4f04c6efd310b95559b213cd2e45e8b022be66e3 | [] | no_license | rafaelha/paper_zbcp | 0b5bb9500d997ab99cea9959998e3651be75483b | db2096eb0cb2d7bb801b4e513320adc9cef7a0d9 | refs/heads/master | 2023-02-25T01:41:24.728767 | 2019-09-06T17:24:16 | 2019-09-06T17:24:16 | 199,933,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,744 | py | import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
import kwant as kw
import numpy as np
import tinyarray
from numpy import kron
import time
import scipy.sparse as sp
from scipy.sparse.linalg import eigsh
from numpy.random import rand
import pickle
import datetime as dt
import sys
import os
s0 = tinyarray.array([[1, 0], [0, 1]]); t0 = s0; sig0 = s0;
sx = tinyarray.array([[0, 1], [1, 0]]); tx = sx; sigx = sx;
sy = tinyarray.array([[0, -1j], [1j, 0]]); ty = sy; sigy = sy;
sz = tinyarray.array([[1, 0], [0, -1]]); tz = sz; sigz = sz;
def kr(a, b, c): # Kronecker product of three matrices
return kron(a, kron(b, c))
ax = 20 #lattice constant Angstrom
ax2 = ax**2 # material params Cano et al. (Bernevig) PRB 95 161306 (2017)
ay = 20; az = 20; ay2 = ay**2; az2 = az**2;
C0 = -0.0145
C1 = 10.59
C2 = 11.5
A = 0.889
M0 = 0.0205
M1 = -18.77
M2 = -13.5
B1 = 0
# Magnetic field
Bx = 0.035
ggap = 0.035
g = 1
mu_local =-28e-3 # local chemical potential to push magnetic ggap to fermi level
jx = np.zeros((4,4))
#jx[0,0] = Bx**2 / gap
jx[0,3] = - Bx**3 / ggap**2
jx[1,2] = -0.5 * g * Bx
jx[2,1] = -0.5 * g * Bx
jx[3,0] = -Bx**3 / ggap**2
#jx[3,3] = Bx **2 / ggap
#######################################
############### parameters ############
#######################################
delta = 0.0001 #0.00018*0.5 # SC order parameter (real)
phi = np.pi
mu = (C0 - C1 * M0 / M1) #- 7e-3# tune chem pot to Dirac nodes
Lx = 15
Ly = 800
Lz = 15
xdir = 0 #set direction of transport to one
ydir = 1
zdir = 0
alpha=0.01
beta=0.01
def onsite(): # onsite energy without gap
return kr(sigz, t0, s0) * (C0 + 2*C1/az2 + 2*C2/ax2 + 2*C2/ay2 - mu)\
+ kr(sigz, tz, sz) * (M0 + 2*M1/az2 + 2*M2/ax2 + 2*M2/ay2)\
+ kr(sigz, tx, sz) * alpha \
+ kr(sigz, ty, sz) * beta
def gap(): # onsite energy with gap
return kr(sigx, t0, s0) * delta
def gap_t(x, y, z): # onsite energy with gap
d = 0
if x == 0:
d = delta
return kr(sigx, t0, s0) * d
def gap_b(x, y, z): # onsite energy with gap
d = 0
if x == Lx-1:
d = delta
return kr(sigx, t0, s0) * d
def disorder(): # onsite energy with disorder in uniform interval [-W/2, W/2]
return (rand() - 1/2) * W * kr(sigz, t0, s0)
def disorder_t(x, y, z): # onsite energy with disorder in uniform interval [-W/2, W/2]
d = 0
if x == 0:
d = W
return (rand() - 1/2) * d * kr(sigz, t0, s0)
def disorder_b(x, y, z): # onsite energy with disorder in uniform interval [-W/2, W/2]
d = 0
if x == Lx-1:
d = W
return (rand() - 1/2) * d * kr(sigz, t0, s0)
def mag(x, y, z): # Zeeman field
return kron(sig0, jx) + kr(sigz, t0, s0) * mu_local
def mag_t(x, y, z): # Zeeman field
d = 0
if x == 0:
d = 1
return ( kron(sig0, jx) + kr(sigz, t0, s0) * mu_local ) * d
def mag_b(x, y, z): # Zeeman field
d = 0
if x == Lx-1:
d = 1
return ( kron(sig0, jx) + kr(sigz, t0, s0) * mu_local ) * d
def DOS(sys, k=100, range=(-1.5*50e-6,1.5*50e-6), bins=1000, fignum=2): # plot the lowest eigenvalues in a hist-plot
H = sp.csc_matrix(sys.hamiltonian_submatrix(sparse=True))
ev, _ = eigsh(H, k=k, sigma=0)
plt.ion()
#plt.figure()
#plt.plot(ev,'.')
plt.figure()
plt.clf()
plt.hist(ev, range=range, bins=bins)
plt.xlim(range)
plt.xlabel('Energy')
plt.ylabel('Number of states')
return ev
def build_sys():
lat = kw.lattice.general([(ax,0,0), (0,ay,0), (0,0,az)], norbs=8)
sys = kw.Builder()
sym_left = kw.TranslationalSymmetry((-1*ax*xdir, -1*ay*ydir, -1*az*zdir))
sym_right = kw.TranslationalSymmetry((1*ax*xdir, 1*ay*ydir, 1*az*zdir))
lead0 = kw.Builder(sym_left, conservation_law=-kr(sigz,t0,s0))
lead1 = kw.Builder(sym_right)
################# onsite #############################################
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
for i in np.arange(Lx * (1-xdir) + xdir):
for j in np.arange(Ly * (1-ydir) + ydir):
for k in np.arange(Lz * (1-zdir) + zdir):
lead0[lat(i, j, k)] = onsite() #+ mag_b(i, j, k)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# on site potential
for i in np.arange(Lx):
for j in np.arange(Ly):
for k in np.arange(Lz):
sys[(lat(i, j, k) )] = onsite() + disorder()
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
for i in np.arange(Lx * (1-xdir) + xdir):
for j in np.arange(Ly * (1-ydir) + ydir):
for k in np.arange(Lz * (1-zdir) + zdir):
lead1[lat(i, j, k)] = onsite() + gap()
################# hopping ############################################
######################################################################
#hBx = 1j * B1 / (ax2 * az) * kr(sigz, tx, s0)
#hBy = -1j * B1 / (ay2 * az) * kr(sigz, tx, s0)
hx = kr(sigz, t0, s0) * (-C2)/ax2\
+ kr(sigz, tz, sz) * (-M2)/ax2\
+ kr(sigz, tz, sx) * A * (-1j)/(2 * ax)
hy = kr(sigz, t0, s0) * (-C2)/ay2\
+ kr(sigz, tz, sz) * (-M2)/ay2\
+ kr(sigz, tz, sy) * A * (-1j)/(2 * ay)
hz = kr(sigz, t0, s0) * (-C1)/az2\
+ kr(sigz, tz, sz) * (-M1)/az2
# - (hBx + hBy)
sys[kw.builder.HoppingKind((1, 0, 0), lat, lat)] = hx
sys[kw.builder.HoppingKind((0, 1, 0), lat, lat)] = hy
sys[kw.builder.HoppingKind((0, 0, 1), lat, lat)] = hz
"""
sys[kw.builder.HoppingKind((1, 0, 1), lat, lat)] = hBx
sys[kw.builder.HoppingKind((-1, 0, 1), lat, lat)] = hBx
sys[kw.builder.HoppingKind((0, 1, 1), lat, lat)] = hBy
sys[kw.builder.HoppingKind((0, -1, 1), lat, lat)] = hBy
"""
# PBC along z
for i in np.arange(Lx):
for j in np.arange(Ly):
sys[lat(i, j, 0), lat(i, j, Lz-1)] = hz
"""
if i < Lx-1:
sys[lat(i+1, j, 0), lat(i, j, Lz-1)] = hBx
if i > 0:
sys[lat(i-1, j, 0), lat(i, j, Lz-1)] = hBx
if j < Ly-1:
sys[lat(i, j+1, 0), lat(i, j, Lz-1)] = hBy
if j > 0:
sys[lat(i, j-1, 0), lat(i, j, Lz-1)] = hBy
"""
############### lead0 ##############
lead0[kw.builder.HoppingKind((1, 0, 0), lat, lat)] = hx
lead0[kw.builder.HoppingKind((0, 1, 0), lat, lat)] = hy
lead0[kw.builder.HoppingKind((0, 0, 1), lat, lat)] = hz
"""
lead0[kw.builder.HoppingKind((1, 0, 1), lat, lat)] = hBx
lead0[kw.builder.HoppingKind((-1, 0, 1), lat, lat)] = hBx
lead0[kw.builder.HoppingKind((0, 1, 1), lat, lat)] = hBy
lead0[kw.builder.HoppingKind((0, -1, 1), lat, lat)] = hBy
"""
# PBC along z
for i in np.arange(Lx):
lead0[lat(i, 0, 0), lat(i, 0, Lz-1)] = hz
"""
if i < Lx-1:
lead0[lat(i+1, 0, 0), lat(i, 0, Lz-1)] = hBx
if i > 0:
lead0[lat(i-1, 0, 0), lat(i, 0, Lz-1)] = hBx
lead0[lat(i, 1, 0), lat(i, 0, Lz-1)] = hBy
lead0[lat(i, -1, 0), lat(i, 0, Lz-1)] = hBy
"""
############### lead1 ##############
lead1[kw.builder.HoppingKind((1, 0, 0), lat, lat)] = hx
lead1[kw.builder.HoppingKind((0, 1, 0), lat, lat)] = hy
lead1[kw.builder.HoppingKind((0, 0, 1), lat, lat)] = hz
"""
lead1[kw.builder.HoppingKind((1, 0, 1), lat, lat)] = hBx
lead1[kw.builder.HoppingKind((-1, 0, 1), lat, lat)] = hBx
lead1[kw.builder.HoppingKind((0, 1, 1), lat, lat)] = hBy
lead1[kw.builder.HoppingKind((0, -1, 1), lat, lat)] = hBy
"""
# PBC along z
for i in np.arange(Lx):
lead1[lat(i, 0, 0), lat(i, 0, Lz-1)] = hz
"""
if i < Lx-1:
lead1[lat(i+1, 0, 0), lat(i, 0, Lz-1)] = hBx
if i > 0:
lead1[lat(i-1, 0, 0), lat(i, 0, Lz-1)] = hBx
lead1[lat(i, 1, 0), lat(i, 0, Lz-1)] = hBy
lead1[lat(i, -1, 0), lat(i, 0, Lz-1)] = hBy
"""
sys.attach_lead(lead0)
sys.attach_lead(lead1)
#kw.plot(sys)
sys = sys.finalized()
return sys, lead0, lead1
def sim(sys, range, plot=False):
n = len(range)
energies = range#np.linspace(range[0], range[1], n)
N = np.zeros(n)
Ree = np.zeros(n)
Reh = np.zeros(n)
#G2 = np.zeros(n)
for i in np.arange(n):
smatrix = kw.smatrix(sys, energies[i])
N[i] = smatrix.submatrix((0,0), (0,0)).shape[0]
Ree[i] = smatrix.transmission((0,0), (0,0))
Reh[i] = smatrix.transmission((0,1), (0,0))
#G2[i] = smatrix.transmission((1,0), (0,0))
print(str(i) + '/' + str(n-1))
if plot:
plotG(energies, N, Ree, Reh)
return energies, N, Ree, Reh, N-Ree+Reh
def plotG(en, N, Ree, Reh, G):
plt.ion()
plt.figure()
plt.plot(np.block([-np.flip(en,0), en]), np.block([np.flip(G,0), G]), label='G')
plt.plot(np.block([-np.flip(en,0), en]), np.block([np.flip(N,0), N]), '-.', label='N')
plt.plot(np.block([-np.flip(en,0), en]), np.block([np.flip(Reh,0), Reh]), '-.', label='Reh')
plt.plot(np.block([-np.flip(en,0), en]), np.block([np.flip(Ree,0), Ree]), '-.', label='Ree')
plt.legend()
plt.xlabel('Bias in $\mu$eV')
plt.ylabel('Conductance G')
plt.title('$L_x=$'+str(Lx)+', $L_y=$'+str(Ly)+', $L_z=$'+str(Lz)+', $\Delta=$'+str(np.round(delta*1e3,1))+'meV, B1='+str(B1)+', W='+str(W*1e3)+'meV, t='+str(np.round(duration))+'s' )
plt.tight_layout()
def plotLead(lead0, xlim=(0,np.pi/2), ylim=(-20,20), res=100, rand=True):
plt.ion()
plt.figure(figsize=(4,4))
lead0 = lead0.finalized()
bands = kw.physics.Bands(lead0)
kz = np.linspace(xlim[0], xlim[1], res)
energies = np.array([bands(k)*1000 for k in kz])
n = energies.shape[1]
if rand:
energies += np.repeat(np.random.rand(n).reshape(n,1), res, axis=1).T * 1e-1
plt.plot(kz, energies, linewidth=0.5)
plt.ylim(ylim)
plt.xlim((np.min(kz), np.max(kz)))
if xdir != 0:
plt.xlabel("$k_x a$")
elif ydir != 0:
plt.xlabel("$k_y a$")
elif zdir != 0:
plt.xlabel("$k_z a$")
plt.ylabel("Energy in meV")
plt.title('$L_x=$'+str(Lx)+', $L_y=$'+str(Ly)+', $L_z=$'+str(Lz)+', B='+str(Bx*1e3))
plt.tight_layout()
plt.savefig(next("C:/Users/Rafael/Desktop/MJ/transport/FT/figs/lead%s.pdf"))
return energies
def save(filename, duration, en, N, Ree, Reh, G):
f1 = open(filename, 'ab')
pickle.dump(Lx, f1)
pickle.dump(Ly, f1)
pickle.dump(Lz, f1)
pickle.dump(delta, f1)
pickle.dump(W, f1)
pickle.dump(duration, f1)
pickle.dump(mu - (C0 - C1 * M0 / M1), f1) # mu relative to cone crossing
pickle.dump(en, f1)
pickle.dump(N, f1)
pickle.dump(Ree, f1)
pickle.dump(Reh, f1)
pickle.dump(G, f1)
pickle.dump(Bx, f1)
pickle.dump(ggap, f1)
pickle.dump(g, f1)
pickle.dump(mu_local, f1)
pickle.dump(B1, f1)
pickle.dump(seed, f1)
f1.close()
def sweep_disorder(max=50e-3, steps=10, energy=0.03e-3):
ww = np.linspace(0e-3,max,steps)
G = np.zeros(steps)
N = np.zeros(steps)
Reh = np.zeros(steps)
Ree = np.zeros(steps)
G = np.zeros(steps)
global W #write changes of W to global variable
for i in np.arange(len(ww)):
W = ww[i]
sys, lead0, lead1 = build_sys()
en, N[i], Ree[i], Reh[i], G[i] = sim(sys, range=(energy,))
print(i)
filename = 'disorder/' + str(dt.datetime.now()).replace(':','_').replace('.','_').replace(' ','_')
save(filename, en[0], ww, N, Ree, Reh, G)
def loop(rg):
filename = str(dt.datetime.now()).replace(':','_').replace('.','_').replace(' ','_')+'.pickle'
for i in np.arange(100):
start(filename, rg)
def next(name):
i = 0
while os.path.exists(name % i):
i += 1
return name % i
def nextrun(name):
i = 10
while os.path.exists(name % i):
i += 5
return i, name % i
def start(filename, rg):
start_time = time.time()
sys, lead0, lead1 = build_sys()
en, N, Ree, Reh, G = sim(sys, range=rg, plot=False)
duration = time.time()-start_time
save(filename, duration, en, N, Ree, Reh, G)
if __name__ == '__main__':
W = 0.012
seed = int(sys.argv[1])
np.random.seed(seed)
rg = np.sort(np.block([np.linspace(-200e-6,-100e-6,15)[:-1], np.linspace(0,100e-6,35), np.linspace(100e-6,200e-6,15)[1:]]))
loop(rg)
"""
start_time = time.time()
sys, lead0, lead1 = build_sys()
en, N, Ree, Reh, G = sim(sys, range=rg, plot=False)
duration = time.time()-start_time
plotG(en*1e6, N, Ree, Reh, G)
plt.savefig(next("C:/Users/Rafael/Desktop/Dirac_NS_junction/_notes/fig_soc/cond%s.pdf"))
#plt.savefig('Lz_%s.pdf' % Lz)
#plt.close()
#save(filename, duration, en, N, Ree, Reh, G)
"""
| [
"rafaelhaenel@phas.ubc.ca"
] | rafaelhaenel@phas.ubc.ca |
6efa223635fb295398bf192b057721cde7c8084f | 364ebf23f0a6fba0c9e9fd5dfce6f657dc9bad32 | /estelcon_web/urls.py | 5aade7198cc85801026bde553332e1d4470e5fad | [] | no_license | hirunatan/estelcon_web | bed55f8b82ca268d314c761923b50a4bf9857876 | 13e69c90fd6fa2f4b1efab259d94788f3b6ce00e | refs/heads/master | 2021-01-20T11:30:33.367564 | 2018-11-06T20:07:49 | 2018-11-06T20:07:49 | 23,476,370 | 1 | 2 | null | 2015-10-02T20:50:35 | 2014-08-29T20:08:42 | HTML | UTF-8 | Python | false | false | 1,113 | py | from __future__ import absolute_import, print_function, unicode_literals
from cms.sitemaps import CMSSitemap
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.static import serve
admin.autodiscover()
from user_profiles import urls as user_profiles_urls
from activities import urls as activities_urls
urlpatterns = [
url(r'^sitemap\.xml$', sitemap,
{'sitemaps': {'cmspages': CMSSitemap}}),
]
urlpatterns += i18n_patterns(
url(r'^admin/', include(admin.site.urls)), # NOQA
url(r'^', include(user_profiles_urls)),
url(r'^', include(activities_urls)),
url(r'^', include('cms.urls')),
)
# This is only needed when using runserver.
if settings.DEBUG:
urlpatterns = [
url(r'^media/(?P<path>.*)$', serve,
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
] + staticfiles_urlpatterns() + urlpatterns
| [
"andres.moya@kaleidos.net"
] | andres.moya@kaleidos.net |
169ad0e7002fd35871d0ea40d975ce59b2119dab | 18a6b272d4c55b24d9c179ae1e58959674e53afe | /tf_rl/examples/NerveNet/environments/asset_generator.py | 2f0bdc000944dee0fe98136097ccebe41b778caf | [
"MIT"
] | permissive | Rowing0914/TF2_RL | 6cce916f409b3d4ef2a5a40a0611908f20d08b2c | c1b7f9b376cbecf01deb17f76f8e761035ed336a | refs/heads/master | 2022-12-10T09:58:57.456415 | 2021-05-23T02:43:21 | 2021-05-23T02:43:21 | 233,476,950 | 9 | 1 | MIT | 2022-12-08T07:02:42 | 2020-01-12T23:53:48 | Python | UTF-8 | Python | false | false | 1,939 | py | # -----------------------------------------------------------------------------
# @brief:
# generate the xml files for each different sub-tasks of one master task
# @author:
# Tingwu Wang, Aug. 30th, 2017
# -----------------------------------------------------------------------------
import argparse
import environments.init_path as init_path
import os
import num2words
import environments.centipede_generator as centipede_generator
import environments.snake_generator as snake_generator
import environments.reacher_generator as reacher_generator
TASK_DICT = {
'Centipede': [3, 5, 7] + [4, 6, 8, 10, 12, 14] + [20, 30, 40, 50],
# 'CpCentipede': [3, 5, 7] + [4, 6, 8, 10, 12, 14], # this doesn't exist anymore
'Reacher': [0, 1, 2, 3, 4, 5, 6, 7],
'Snake': [3, 4, 5, 6, 7, 8, 9] + [10, 20, 40],
}
OUTPUT_BASE_DIR = os.path.join(init_path.get_abs_base_dir(),
'environments', 'assets')
def save_xml_files(model_names, xml_number, xml_contents):
# get the xml path ready
number_str = num2words.num2words(xml_number)
xml_names = model_names + number_str[0].upper() + number_str[1:] + '.xml'
xml_file_path = os.path.join(OUTPUT_BASE_DIR, xml_names)
# save the xml file
f = open(xml_file_path, 'w')
f.write(xml_contents)
f.close()
GENERATOR_DICT = {
'Centipede': centipede_generator.generate_centipede,
'Snake': snake_generator.generate_snake,
'Reacher': reacher_generator.generate_reacher
}
if __name__ == '__main__':
# parse the parameters
parser = argparse.ArgumentParser(description='xml_asset_generator.')
parser.add_argument("--env_name", type=str, default='Centipede')
args = parser.parse_args()
# generator the environment xmls
for i_leg_num in TASK_DICT[args.env_name]:
xml_contents = GENERATOR_DICT[args.env_name](i_leg_num)
save_xml_files(args.env_name, i_leg_num, xml_contents)
| [
"kosakaboat@gmail.com"
] | kosakaboat@gmail.com |
e13a436074fd79fa40edf5d9d28341b1d5a8ff27 | 7d4964998493d333900c4be9d78ecbf8fd334127 | /devportfolio/wsgi.py | a9a8c5f341c35b93937e664b961decb5be4d2122 | [
"MIT"
] | permissive | nickmwangemi/devport | 56fd7556895469127d2b86cdf8a9b1e7d4346ed0 | 25b6c0ac97ada74cadf99ebcbae922d1aa0174fe | refs/heads/main | 2023-04-17T07:57:25.644870 | 2021-04-30T15:55:52 | 2021-04-30T15:55:52 | 362,522,911 | 0 | 1 | null | 2021-04-30T07:59:27 | 2021-04-28T15:41:19 | Python | UTF-8 | Python | false | false | 405 | py | """
WSGI config for devportfolio project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "devportfolio.settings.dev")
application = get_wsgi_application()
| [
"nickmwangemi@gmail.com"
] | nickmwangemi@gmail.com |
9a1ec3173aa9c9f1d1931ee671f3d6db686a96b7 | 5c9d96d8184f50afddc0704ab4d4eb8ebe8d132b | /REGAN_kitchen_companion.py | aa881f09b877d8df5b625fc1d4359c657e0bbb4b | [] | no_license | Tanyaregan/practices | 2248da1007577341a6b8c29d17b3369b04115cdd | d981941ce877753b87575542b3adf55879fb66f2 | refs/heads/master | 2021-05-23T05:30:40.405654 | 2018-05-08T20:23:31 | 2018-05-08T20:23:31 | 95,040,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,563 | py | def kitchen_companion():
"""Displays and edits a list of ingredients."""
import random
print 'Welcome to Kitchen Companion!'
ingredient_list = []
ingredients = open('ingredients.txt', 'r+')
for i in ingredients:
ingredient_list.append(i.rstrip())
while True:
print ''
print '*** INGREDIENTS LIST MAIN MENU ***'
print ''
print 'Would you like to:'
print ''
print '(L) List your ingredients in alphabetical order'
print '(A) Add an ingredient'
print '(R) Remove an ingredient'
print '(S) Search for a specific ingredient'
print '(X) Pick a random ingredient'
print '(Q) to quit.'
choice = raw_input('>>> ')
if choice == 'L':
print ''
print 'Your ingredients:'
for item in sorted(ingredient_list):
print item
continue
elif choice == 'A':
print ''
print 'What ingredient would you like to add?: '
addition = raw_input('>>> ')
if addition not in ingredient_list:
ingredient_list.append(addition)
print addition, 'has been added to the list.'
continue
else:
print addition, 'is already on the list.'
continue
elif choice == 'R':
print ''
print 'What ingredient would you like to delete?: '
deletion = raw_input('>>> ')
if deletion not in ingredient_list:
print deletion, 'is not on the list'
continue
else:
ingredient_list.remove(deletion)
print deletion, 'removed from list'
continue
elif choice == 'S':
print ''
print 'What item would you like to search for?: '
search = raw_input('>>> ')
for item in ingredient_list:
if item.startswith(search):
print item
continue
elif choice == 'X':
print ''
print 'Here is a random item, you silly Balloonicorn:'
rand = random.choice(ingredient_list)
print rand
elif choice == 'Q':
print ''
print 'Thanks for using Kitchen Companion!'
break
else:
print 'That entry is not in the list of choices, please try again.'
continue
ingredients.close()
kitchen_companion()
| [
"vagrant@vagrant.vm"
] | vagrant@vagrant.vm |
6f45903d3d8e38b6362d3e59d9804f18ee8c267c | 30150c7f6ed7a10ac50eee3f40101bc3165ebf9e | /src/minigame/DistributedTravelGame.py | 3134c91bb5e1a710933a3c4d3d34ad0e63f93dca | [] | no_license | toontown-restoration-project/toontown | c2ad0d552cb9d5d3232ae6941e28f00c11ca3aa8 | 9bef6d9f823b2c12a176b33518eaa51ddbe3fd2f | refs/heads/master | 2022-12-23T19:46:16.697036 | 2020-10-02T20:17:09 | 2020-10-02T20:17:09 | 300,672,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59,249 | py | """DistributedMinigameTemplate module: contains the DistributedMinigameTemplate class"""
from toontown.toonbase.ToontownModules import *
from toontown.toonbase.ToonBaseGlobal import *
from toontown.toonbase.ToontownGlobals import GlobalDialogColor
from .DistributedMinigame import *
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownTimer
from . import TravelGameGlobals
import math
from toontown.toonbase.ToontownModules import rad2Deg
from toontown.toontowngui import TTDialog
from direct.interval.IntervalGlobal import *
from . import VoteResultsPanel
from . import VoteResultsTrolleyPanel
# For each minigame, what's the corresponding icon
IconDict = {
ToontownGlobals.RaceGameId : 'mg_trolley_sign_race',
ToontownGlobals.CannonGameId : 'mg_trolley_sign_cannon',
ToontownGlobals.TagGameId : 'mg_trolley_sign_tag',
ToontownGlobals.PatternGameId : 'mg_trolley_sign_minnie',
ToontownGlobals.RingGameId : 'mg_trolley_sign_ring',
ToontownGlobals.MazeGameId : 'mg_trolley_sign_maze',
ToontownGlobals.TugOfWarGameId : 'mg_trolley_sign_tugawar',
ToontownGlobals.CatchGameId : 'mg_trolley_sign_catch',
ToontownGlobals.DivingGameId : 'mg_trolley_sign_dive',
ToontownGlobals.TargetGameId : 'mg_trolley_sign_umbrella',
ToontownGlobals.PairingGameId : 'mg_trolley_sign_card',
ToontownGlobals.VineGameId : 'mg_trolley_sign_vine',
ToontownGlobals.IceGameId : 'mg_trolley_sign_ice',
ToontownGlobals.PhotoGameId : 'mg_trolley_sign_photo',
ToontownGlobals.TwoDGameId: 'mg_trolley_sign_2d',
ToontownGlobals.CogThiefGameId: 'mg_trolley_sign_theif',
}
# For each minigame, what is it called
MinigameNameDict ={
ToontownGlobals.RaceGameId : TTLocalizer.RaceGameTitle,
ToontownGlobals.CannonGameId : TTLocalizer.CannonGameTitle,
ToontownGlobals.TagGameId : TTLocalizer.TagGameTitle,
ToontownGlobals.PatternGameId : TTLocalizer.PatternGameTitle,
ToontownGlobals.RingGameId : TTLocalizer.RingGameTitle,
ToontownGlobals.MazeGameId : TTLocalizer.MazeGameTitle,
ToontownGlobals.TugOfWarGameId : TTLocalizer.TugOfWarGameTitle,
ToontownGlobals.CatchGameId : TTLocalizer.CatchGameTitle,
ToontownGlobals.DivingGameId : TTLocalizer.DivingGameTitle,
ToontownGlobals.TargetGameId : TTLocalizer.TargetGameTitle,
ToontownGlobals.PairingGameId : TTLocalizer.PairingGameTitle,
ToontownGlobals.VineGameId : TTLocalizer.VineGameTitle,
ToontownGlobals.TravelGameId : TTLocalizer.TravelGameTitle,
ToontownGlobals.IceGameId : TTLocalizer.IceGameTitle,
ToontownGlobals.PhotoGameId : TTLocalizer.PhotoGameTitle,
ToontownGlobals.TwoDGameId: TTLocalizer.TwoDGameTitle,
ToontownGlobals.CogThiefGameId: TTLocalizer.CogThiefGameTitle,
}
# create a label from a number
def makeLabel(itemName, itemNum, *extraArgs):
# we use a negative number to indicate going up
intVersion = int(itemName)
if intVersion < 0:
textColor = Vec4(0,0,1,1)
intVersion = -intVersion
elif intVersion == 0:
textColor = Vec4(0,0,0,1)
else:
textColor = Vec4(1,0,0,1)
return DirectLabel(text = str(intVersion),
text_fg = textColor,
relief = DGG.RIDGE,
frameSize = (-1.2, 1.2, -0.225, 0.8),
scale =1.0, # 0.85,
)
def map3dToAspect2d(node, point):
"""Maps the indicated 3-d point (a Point3), which is relative to
the indicated NodePath, to the corresponding point in the aspect2d
scene graph. Returns the corresponding Point3 in aspect2d.
Returns None if the point is not onscreen. """
# Convert the point to the 3-d space of the camera
p3 = base.cam.getRelativePoint(node, point)
# Convert it through the lens to render2d coordinates
p2 = Point2()
if not base.camLens.project(p3, p2):
return None
r2d = Point3(p2[0], 0, p2[1])
# And then convert it to aspect2d coordinates
a2d = aspect2d.getRelativePoint(render2d, r2d)
return a2d
# invert a dictionary, turn the keys into values and vice versa
def invertTable(table):
index = { } #empty dictionary
for key in list(table.keys()):
value = table[key]
if value not in index:
index[value] = key # empty list
return index
class DistributedTravelGame(DistributedMinigame):
notify = directNotify.newCategory("DistributedTravelGame")
# define constants that you won't want to tweak here
idToNames = MinigameNameDict
TrolleyMoveDuration = 3
UseTrolleyResultsPanel = True
FlyCameraUp = True
FocusOnTrolleyWhileMovingUp = False
def __init__(self, cr):
DistributedMinigame.__init__(self, cr)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedTravelGame',
[
State.State('off',
self.enterOff,
self.exitOff,
['inputChoice']),
State.State('inputChoice',
self.enterInputChoice,
self.exitInputChoice,
['waitServerChoices',
'displayVotes',
'cleanup']),
State.State('waitServerChoices',
self.enterWaitServerChoices,
self.exitWaitServerChoices,
['displayVotes',
'cleanup']),
State.State('displayVotes',
self.enterDisplayVotes,
self.exitDisplayVotes,
['moveTrolley',
'cleanup']),
State.State('moveTrolley',
self.enterMoveTrolley,
self.exitMoveTrolley,
['inputChoice', 'winMovie',
'cleanup']),
State.State('winMovie',
self.enterWinMovie,
self.exitWinMovie,
['cleanup']),
State.State('cleanup',
self.enterCleanup,
self.exitCleanup,
[]),
],
# Initial State
'off',
# Final State
'cleanup',
)
# it's important for the final state to do cleanup;
# on disconnect, the ClassicFSM will be forced into the
# final state. All states (except 'off') should
# be prepared to transition to 'cleanup' at any time.
# Add our game ClassicFSM to the framework ClassicFSM
self.addChildGameFSM(self.gameFSM)
self.currentVotes = {}
self.cameraTopView = (100, -20, 280, 0, -89, 0)
# These are None to indicate we have not yet established a
# timer; they are filled in as we enter the inputChoice state
# and as the AI reports a start time, respectively. When both
# are filled in, the timer will be displayed.
self.timer = None
self.timerStartTime = None
# we always start at node 0
self.currentSwitch = 0
self.destSwitch = 0
self.minigameLabels = []
self.minigameIcons = []
self.bonusLabels = []
self.trolleyAwaySfx = base.loadSfx("phase_4/audio/sfx/SZ_trolley_away.mp3")
self.trolleyBellSfx = base.loadSfx("phase_4/audio/sfx/SZ_trolley_bell.mp3")
self.turntableRotateSfx = base.loadSfx("phase_4/audio/sfx/MG_sfx_travel_game_turntble_rotate_2.mp3")
self.wonGameSfx = base.loadSfx("phase_4/audio/sfx/MG_sfx_travel_game_bonus.mp3")
self.lostGameSfx = base.loadSfx("phase_4/audio/sfx/MG_sfx_travel_game_no_bonus_2.mp3")
self.noWinnerSfx = base.loadSfx("phase_4/audio/sfx/MG_sfx_travel_game_no_bonus.mp3")
self.boardIndex = 0 # which board layour are we using
self.avNames = [] # names of the players, useful if one disconnects after casting a vote
self.disconnectedAvIds = [] # keep track which players have disconnected
def getTitle(self):
return TTLocalizer.TravelGameTitle
def getInstructions(self):
return TTLocalizer.TravelGameInstructions
def getMaxDuration(self):
# how many seconds can this minigame possibly last (within reason)?
# this is for debugging only
return 0
def load(self):
"""
load travel game assets, not necessarily showing them onscreen
"""
self.notify.debug("load")
DistributedMinigame.load(self)
# load resources and create objects here
self.sky = loader.loadModel("phase_3.5/models/props/TT_sky")
self.gameBoard = loader.loadModel("phase_4/models/minigames/toon_cannon_gameground")
self.gameBoard.setPosHpr(100,0,0,0,0,0)
self.gameBoard.setScale(1.0)
station = loader.loadModel('phase_4/models/modules/trolley_station_TT.bam')
self.trolleyCar = station.find('**/trolley_car')
self.trolleyCar.reparentTo(hidden)
self.trolleyCarOrigPos = self.trolleyCar.getPos()
self.trolleyCarOrigHpr = self.trolleyCar.getHpr()
self.trolleyCar.setPosHpr(0,0,0,0,0,0)
self.trolleyCar.setScale(1.0)
self.trolleyCar.setX(self.trolleyCar.getX() - TravelGameGlobals.xInc)
station.removeNode()
# Variables used to animate trolley parts
# Key
self.keys = self.trolleyCar.findAllMatches('**/key')
self.numKeys = self.keys.getNumPaths()
self.keyInit = []
self.keyRef = []
for i in range(self.numKeys):
key = self.keys[i]
key.setTwoSided(1)
ref = self.trolleyCar.attachNewNode('key' + repr(i) + 'ref')
ref.iPosHpr(key)
self.keyRef.append(ref)
self.keyInit.append(key.getTransform())
# Front wheels
self.frontWheels = self.trolleyCar.findAllMatches('**/front_wheels')
self.numFrontWheels = self.frontWheels.getNumPaths()
self.frontWheelInit = []
self.frontWheelRef = []
for i in range(self.numFrontWheels):
wheel = self.frontWheels[i]
ref = self.trolleyCar.attachNewNode('frontWheel' + repr(i) + 'ref')
ref.iPosHpr(wheel)
self.frontWheelRef.append(ref)
self.frontWheelInit.append(wheel.getTransform())
# Back wheels
self.backWheels = self.trolleyCar.findAllMatches('**/back_wheels')
self.numBackWheels = self.backWheels.getNumPaths()
self.backWheelInit = []
self.backWheelRef = []
for i in range(self.numBackWheels):
wheel = self.backWheels[i]
ref = self.trolleyCar.attachNewNode('backWheel' + repr(i) + 'ref')
ref.iPosHpr(wheel)
self.backWheelRef.append(ref)
self.backWheelInit.append(wheel.getTransform())
trolleyAnimationReset = Func(self.resetAnimation)
self.trainSwitches = {}
self.trainTracks = {}
self.tunnels = {} # switch to tunnel dict
self.extraTrainTracks = [] # the tracks that connects to the root and the leaves
turnTable = loader.loadModel("phase_4/models/minigames/trolley_game_turntable")
minPoint = Point3(0,0,0)
maxPoint = Point3(0,0,0)
turnTable.calcTightBounds(minPoint, maxPoint)
self.fullLength = maxPoint[0]
for key in list(TravelGameGlobals.BoardLayouts[self.boardIndex].keys()):
info = TravelGameGlobals.BoardLayouts[self.boardIndex][key]
switchModel = turnTable.find('**/turntable1').copyTo(render)
switchModel.setPos(* info['pos'])
switchModel.reparentTo(hidden)
self.trainSwitches[key] = switchModel
# load the links
zAdj = 0
for otherSwitch in info['links']:
info2 = TravelGameGlobals.BoardLayouts[self.boardIndex][otherSwitch]
x1,y1,z1 = info['pos']
x2,y2,z2 = info2['pos']
linkKey = (key, otherSwitch)
trainTrack = self.loadTrainTrack(x1,y1,x2,y2, zAdj)
trainTrack.reparentTo(hidden)
self.trainTracks[linkKey] = trainTrack
zAdj += 0.005
# lay an extra track for the trolley coming in
rootInfo = TravelGameGlobals.BoardLayouts[self.boardIndex][0]
rootX, rootY, rootZ = rootInfo['pos']
startX = rootX - TravelGameGlobals.xInc
trainTrack = self.loadTrainTrack(startX,rootY, rootX, rootY)
self.extraTrainTracks.append(trainTrack)
# lay more extra tracks for the trolley going out
tunnelX = None
for key in list(TravelGameGlobals.BoardLayouts[self.boardIndex].keys()):
if self.isLeaf(key):
info = TravelGameGlobals.BoardLayouts[self.boardIndex][key]
switchX, switchY, switchZ = info['pos']
endX = switchX + TravelGameGlobals.xInc
trainTrack = self.loadTrainTrack( switchX, switchY, endX, switchY)
self.extraTrainTracks.append(trainTrack)
# load a tunnel at the end
tempModel = loader.loadModel("phase_4/models/minigames/trolley_game_turntable")
tunnel = tempModel.find('**/tunnel1')
tunnel.reparentTo(render)
tempModel.removeNode()
if not tunnelX:
minTrackPoint = Point3(0,0,0)
maxTrackPoint = Point3(0,0,0)
trainTrack.calcTightBounds(minTrackPoint, maxTrackPoint)
tunnelX = maxTrackPoint[0]
tunnel.setPos(tunnelX, switchY, 0)
tunnel.wrtReparentTo(trainTrack)
self.tunnels[key] = tunnel
turnTable.removeNode()
self.loadGui()
self.introMovie = self.getIntroMovie()
self.music = base.loadMusic("phase_4/audio/bgm/MG_Travel.ogg")
self.flashWinningBeansTrack = None
def loadTrainTrack(self, x1, y1, x2, y2, zAdj = 0):
"""
load a train track that connects those 2 points
"""
turnTable = loader.loadModel("phase_4/models/minigames/trolley_game_turntable")
trainPart = turnTable.find('**/track_a2')
trackHeight = 0.03
trainTrack = render.attachNewNode('trainTrack%d%d%d%d' % (x1,y1,x2,y2))
trainTrack.setPos(x1,y1,trackHeight)
xDiff = abs(x2-x1)
yDiff = abs(y2-y1)
angleInRadians = math.atan((float(y2)-y1) / (x2-x1))
angle = rad2Deg(angleInRadians)
# repeat our straight track as necessary to cover the distance
desiredLength = math.sqrt( (xDiff * xDiff) + (yDiff*yDiff))
lengthToGo = desiredLength
partIndex = 0
lengthCovered = 0
while lengthToGo > self.fullLength / 2.0:
onePart = trainPart.copyTo(trainTrack)
onePart.setX(lengthCovered)
lengthToGo -= self.fullLength
lengthCovered += self.fullLength
trainTrack.setH(angle)
newX = x1 + (x2 - x1) / 2.0
newY = y1 + (y2-y1) / 2.0
trainTrack.setPos(x1, y1, trackHeight + zAdj)
turnTable.removeNode()
return trainTrack
def loadGui(self):
"""
load gui and other 2d parts of the game
"""
scoreText = [ str(self.currentVotes[self.localAvId]),]
self.gui = DirectFrame()
self.remainingVotesFrame = DirectFrame(
parent = self.gui,
relief = None,
geom = DGG.getDefaultDialogGeom(),
geom_color = GlobalDialogColor,
geom_scale = (7, 1, 1),
pos = (-0.9, 0, 0.8),
scale = 0.1,
text = TTLocalizer.TravelGameRemainingVotes,
text_align = TextNode.ALeft,
text_scale = TTLocalizer.DTGRemainingVotesFrameTextScale,
text_pos = (-3.4,-0.1, 0.0)
)
self.localVotesRemaining = DirectLabel(
parent = self.remainingVotesFrame,
relief = None,
text = scoreText,
text_fg = VBase4(0, 0.5, 0, 1),
text_align = TextNode.ARight,
text_scale = 0.7,
pos = (3.2, 0, -0.15),
)
guiModel = loader.loadModel("phase_3.5/models/gui/friendslist_gui")
self.choiceFrame = DirectFrame(
parent = self.gui,
relief = None,
pos = (-0.55,0,-0.85),
image = DGG.getDefaultDialogGeom(),
image_scale = (1.4, 1, 0.225),
image_color = GlobalDialogColor,
)
self.useLabel = DirectLabel(text = TTLocalizer.TravelGameUse,
parent = self.choiceFrame,
pos = (-0.59,0,-0.01),
text_scale = TTLocalizer.DTGUseLabelTextScale,
relief = None,
)
self.votesPeriodLabel = DirectLabel(text = TTLocalizer.TravelGameVotesWithPeriod,
parent = self.choiceFrame,
pos = (-0.21,0,-0.01),
text_scale = TTLocalizer.DTGVotesPeriodLabelTextScale,
relief = None,
text_align = TextNode.ALeft,
)
self.votesToGoLabel = DirectLabel(text = TTLocalizer.TravelGameVotesToGo,
parent = self.choiceFrame,
pos = (-0.21,0,-0.01),
text_scale = TTLocalizer.DTGVotesToGoLabelTextScale,
relief = None,
text_align = TextNode.ALeft,
)
self.upLabel = DirectLabel(text = TTLocalizer.TravelGameUp,
parent = self.choiceFrame,
pos = (0.31,0,-0.01),
text_scale = TTLocalizer.DTGUpLabelTextScale,
text_fg = Vec4(0,0,1,1),
relief = None,
text_align = TextNode.ALeft,
)
self.downLabel = DirectLabel(text = TTLocalizer.TravelGameDown,
parent = self.choiceFrame,
pos = (0.31,0,-0.01),
text_scale = TTLocalizer.DTGDownLabelTextScale,
text_fg = Vec4(1,0,0,1),
relief = None,
text_align = TextNode.ALeft,
)
self.scrollList = DirectScrolledList(
parent = self.choiceFrame,
relief = None,
pos = (-0.36,0,-0.02),
# inc and dec are DirectButtons
incButton_image = (guiModel.find("**/FndsLst_ScrollUp"),
guiModel.find("**/FndsLst_ScrollDN"),
guiModel.find("**/FndsLst_ScrollUp_Rllvr"),
guiModel.find("**/FndsLst_ScrollUp"),
),
incButton_relief = None,
incButton_pos = (0.0, 0.0, -0.04),
# Make the disabled button darker
incButton_image3_color = Vec4(0.6, 0.6, 0.6, 0.6),
incButton_scale = (1.0, 1.0, -1.0),
decButton_image = (guiModel.find("**/FndsLst_ScrollUp"),
guiModel.find("**/FndsLst_ScrollDN"),
guiModel.find("**/FndsLst_ScrollUp_Rllvr"),
guiModel.find("**/FndsLst_ScrollUp"),
),
decButton_relief = None,
decButton_pos = (0.0, 0.0, 0.095),
# Make the disabled button darker
decButton_image3_color = Vec4(0.6, 0.6, 0.6, 0.6),
# itemFrame is a DirectFrame
itemFrame_pos = (0.0, 0.0, 0.0),
itemFrame_relief = DGG.GROOVE,
# each item is a button with text on it
numItemsVisible = 1,
itemMakeFunction = makeLabel,
items = [],
scrollSpeed = 3.0,
itemFrame_scale = 0.1,
command = self.scrollChoiceChanged,
)
self.putChoicesInScrollList()
# Init buttons
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
okImageList = (buttons.find('**/ChtBx_OKBtn_UP'),
buttons.find('**/ChtBx_OKBtn_DN'),
buttons.find('**/ChtBx_OKBtn_Rllvr'))
self.voteButton = DirectButton(
parent = self.choiceFrame,
relief = None,
image = okImageList,
image_scale = 3.0,
pos = (0.85, 0, 0.0),
text = TTLocalizer.TravelGameVoteWithExclamation,
text_scale = TTLocalizer.DTGVoteBtnTextScale,
text_pos = (0,0),
command = self.handleInputChoice,
)
self.waitingChoicesLabel = DirectLabel(
text = TTLocalizer.TravelGameWaitingChoices,
text_fg = VBase4(1,1,1,1),
relief = None,
pos = (-0.2, 0, -0.85),
scale = 0.075)
self.waitingChoicesLabel.hide()
self.gui.hide()
def unload(self):
"""
unloads the assets, should correspond to load()
"""
self.notify.debug("unload")
DistributedMinigame.unload(self)
self.introMovie.finish()
del self.introMovie
self.gameBoard.removeNode()
del self.gameBoard
self.sky.removeNode()
del self.sky
self.trolleyCar.removeNode()
del self.trolleyCar
for key in list(self.trainSwitches.keys()):
self.trainSwitches[key].removeNode()
del self.trainSwitches[key]
self.trainSwitches = {}
for key in list(self.tunnels.keys()):
self.tunnels[key].removeNode()
del self.tunnels[key]
self.tunnels = {}
for key in list(self.trainTracks.keys()):
self.trainTracks[key].removeNode()
del self.trainTracks[key]
self.trainTracks = {}
for trainTrack in self.extraTrainTracks:
trainTrack.removeNode()
del trainTrack
self.extraTrainTracks = []
self.gui.removeNode()
del self.gui
self.waitingChoicesLabel.destroy()
del self.waitingChoicesLabel
if self.flashWinningBeansTrack:
self.flashWinningBeansTrack.finish()
del self.flashWinningBeansTrack
for label in self.minigameLabels:
label.destroy()
del label
self.minigameLabels = []
for icon in self.minigameIcons:
icon.destroy()
icon.removeNode()
self.minigameIcons = []
if hasattr(self, 'mg_icons'):
del self.mg_icons
for label in self.bonusLabels:
label.destroy()
del label
self.bonusLabels = []
self.scrollList.destroy()
del self.scrollList
self.voteButton.destroy()
del self.voteButton
# unload resources and delete objects from load() here
# remove our game ClassicFSM from the framework ClassicFSM
self.removeChildGameFSM(self.gameFSM)
del self.gameFSM
del self.music
def moveCameraToTop(self):
"""
snaps camera to top
"""
camera.reparentTo(render)
p = self.cameraTopView
camera.setPosHpr(p[0], p[1], p[2], p[3], p[4], p[5])
def moveCameraToTrolley(self):
"""
snaps camera to facing the trolley
"""
camera.reparentTo(self.trolleyCar)
camera.setPos(-25, 0, 7.5)
camera.setHpr(-90,0,0)
def onstage(self):
# start up the minigame; parent things to render, start playing
# music...
# at this point we cannot yet show the remote players' toons
self.notify.debug("onstage")
# make sure we can see chat text
NametagGlobals.setOnscreenChatForced(1)
DistributedMinigame.onstage(self)
self.gameBoard.reparentTo(render)
self.sky.reparentTo(render)
self.moveCameraToTop()
self.trolleyCar.reparentTo(render)
for key in list(self.trainSwitches.keys()):
self.trainSwitches[key].reparentTo(render)
for key in list(self.trainTracks.keys()):
self.trainTracks[key].reparentTo(render)
for trainTrack in self.extraTrainTracks:
trainTrack.reparentTo(render)
base.transitions.irisIn(0.4)
# set the background color to match the gameboard
base.setBackgroundColor(0.1875, 0.7929, 0)
# Start music
base.playMusic(self.music, looping = 1, volume = 0.9)
# play the intro movie
self.introMovie.start()
def offstage(self):
self.notify.debug("offstage")
# stop the minigame; parent things to hidden, stop the
# music...
NametagGlobals.setOnscreenChatForced(0)
base.setBackgroundColor(ToontownGlobals.DefaultBackgroundColor)
# make sure the intro movie is finished
self.introMovie.finish()
self.gameBoard.hide()
self.sky.hide()
self.trolleyCar.hide()
self.gui.hide()
self.hideMinigamesAndBonuses()
for key in list(self.trainSwitches.keys()):
self.trainSwitches[key].hide()
for key in list(self.trainTracks.keys()):
self.trainTracks[key].hide()
for trainTrack in self.extraTrainTracks:
trainTrack.hide()
# the base class parents the toons to hidden, so consider
# calling it last
DistributedMinigame.offstage(self)
# show the laff meter
if base.localAvatar.laffMeter:
base.localAvatar.laffMeter.start()
# Stop music
self.music.stop()
def setGameReady(self):
if not self.hasLocalToon: return
self.notify.debug("setGameReady")
if DistributedMinigame.setGameReady(self):
return
# all of the remote toons have joined the game;
# it's safe to show them now.
# Make the avatars all sit in the trolley
for index in range(self.numPlayers):
avId = self.avIdList[index]
name = ''
# Find the actual avatar in the cr
avatar = self.getAvatar(avId)
if avatar:
# Position the avatar in lane i, place 0 (the starting place)
avatar.reparentTo(self.trolleyCar)
# Neutral animation cycle
avatar.animFSM.request('Sit')
avatar.setPosHpr(-4,-4.5 + (index*3),2.8,90,0,0)
name = avatar.getName()
self.avNames.append(name)
# put trolley car in the right position
self.trolleyCar.setH(90)
def setGameStart(self, timestamp):
if not self.hasLocalToon: return
self.notify.debug("setGameStart")
# base class will cause gameFSM to enter initial state
DistributedMinigame.setGameStart(self, timestamp)
# make sure the intro movie is finished
self.introMovie.finish()
# all players have finished reading the rules,
# and are ready to start playing.
# transition to the appropriate state
self.gameFSM.request("inputChoice")
# these are enter and exit functions for the game's
# fsm (finite state machine)
def enterOff(self):
self.notify.debug("enterOff")
def exitOff(self):
pass
def enterInputChoice(self):
self.notify.debug("enterInputChoice")
# make sure we can read the chat
NametagGlobals.setOnscreenChatForced(1)
self.timer = ToontownTimer.ToontownTimer()
self.timer.hide()
if self.timerStartTime != None:
self.startTimer()
# hide the laff meter
if base.localAvatar.laffMeter:
base.localAvatar.laffMeter.stop()
# show the gui now
self.gui.show()
self.showMinigamesAndBonuses()
def exitInputChoice(self):
NametagGlobals.setOnscreenChatForced(0)
if self.timer != None:
self.timer.destroy()
self.timer = None
self.timerStartTime = None
self.gui.hide()
def enterWaitServerChoices(self):
""" we've voted and are waiting for other people to finish voting """
self.notify.debug("enterWaitServerChoices")
self.waitingChoicesLabel.show()
self.gui.hide()
def exitWaitServerChoices(self):
self.waitingChoicesLabel.hide()
def enterDisplayVotes(self, votes, directions, directionToGo, directionReason):
"""
Display how people voted to the player, with a fancy show
"""
assert self.notify.debugStateCall()
if self.UseTrolleyResultsPanel:
self.moveCameraToTrolley()
self.hideMinigamesAndBonuses()
else:
self.moveCameraToTop()
self.resultVotes = votes
self.resultDirections = directions
self.directionToGo = directionToGo
self.directionReason = directionReason
self.resultsStr = ''
assert len(votes) == len(directions)
directionTotals = [0] * TravelGameGlobals.MaxDirections
for index in range(len(votes)):
if index < len(self.avNames):
avId = self.avIdList[index]
dir = directions[index]
numVotes = votes[index]
directionTotals[dir] += numVotes
curStr = TTLocalizer.TravelGameOneToonVote % {
'name': self.avNames[index],
'numVotes' : numVotes,
'dir' : TTLocalizer.TravelGameDirections[dir]
}
# if a player has disconnected and has zero votes, skip him
if not ( numVotes == 0 and avId in self.disconnectedAvIds):
self.resultsStr += curStr
directionStr = TTLocalizer.TravelGameTotals
for index in range(len(directionTotals)):
directionStr += ' ' + TTLocalizer.TravelGameDirections[index] + ':'
directionStr += str(directionTotals[index])
directionStr += '\n'
self.resultsStr += directionStr
reasonStr = ''
if directionReason == TravelGameGlobals.ReasonVote:
if directionToGo == 0:
losingDirection = 1
else:
losingDirection = 0
diffVotes = directionTotals[directionToGo] - \
directionTotals[losingDirection]
reasonStr = ''
if diffVotes > 1:
reasonStr = TTLocalizer.TravelGameReasonVotesPlural % {
'dir': TTLocalizer.TravelGameDirections[directionToGo],
'numVotes' : diffVotes,}
else:
reasonStr = TTLocalizer.TravelGameReasonVotesSingular % {
'dir': TTLocalizer.TravelGameDirections[directionToGo],
'numVotes' : diffVotes,}
elif directionReason == TravelGameGlobals.ReasonRandom :
reasonStr = TTLocalizer.TravelGameReasonRandom % {
'dir': TTLocalizer.TravelGameDirections[directionToGo],
'numVotes' : directionTotals[directionToGo],}
elif directionReason == TravelGameGlobals.ReasonPlaceDecider:
reasonStr = TravelGameReasonPlace % {
'name' : "TODO NAME",
'dir' : TTLocalizer.TravelGameDirections[directionToGo],}
self.resultsStr += reasonStr
self.dialog = TTDialog.TTDialog(
text = self.resultsStr,
command = self.__cleanupDialog,
style = TTDialog.NoButtons,
pos = (0,0,1)
)
self.dialog.hide()
if self.UseTrolleyResultsPanel:
self.votesPanel = VoteResultsTrolleyPanel.VoteResultsTrolleyPanel(
len(self.avIdList),
self.avIdList,
votes,
directions,
self.avNames,
self.disconnectedAvIds,
directionToGo,
directionReason,
directionTotals,
)
else:
self.votesPanel = VoteResultsPanel.VoteResultsPanel(
len(self.avIdList),
self.avIdList,
votes,
directions,
self.avNames,
self.disconnectedAvIds,
directionToGo,
directionReason,
directionTotals,
)
self.votesPanel.startMovie()
numPlayers = len(self.avIdList)
if TravelGameGlobals.SpoofFour:
numPlayers =4
delay = TravelGameGlobals.DisplayVotesTimePerPlayer * (numPlayers +1)
taskMgr.doMethodLater(delay,
self.displayVotesTimeoutTask,
self.taskName("displayVotes-timeout"))
# calculate the new switch we're going to
curSwitch = TravelGameGlobals.BoardLayouts[self.boardIndex][self.currentSwitch]
self.destSwitch = curSwitch['links'][directionToGo]
self.updateCurrentVotes()
def exitDisplayVotes(self):
assert self.notify.debugStateCall()
taskMgr.remove(self.taskName("displayVotes-timeout"))
self.__cleanupDialog(0)
if not self.UseTrolleyResultsPanel:
self.showMinigamesAndBonuses()
self.votesPanel.destroy()
pass
def enterMoveTrolley(self):
"""
We've finished displaying the votes, now show the trolley moving
"""
self.notify.debug("enterMoveTrolley")
camera.wrtReparentTo(render)
# How many revolutions of the wheel?
keyAngle = round(self.TrolleyMoveDuration) * 360
dist = Vec3(self.trainSwitches[self.destSwitch].getPos()- \
self.trainSwitches[self.currentSwitch].getPos()).length()
wheelAngle = dist/(2.0 * math.pi * 0.95) * 360
trolleyAnimateInterval = LerpFunctionInterval(
self.animateTrolley,
duration = self.TrolleyMoveDuration,
blendType = "easeInOut",
extraArgs = [keyAngle, wheelAngle],
name = "TrolleyAnimate")
moveTrolley = Sequence()
moveTrolley.append(Func(self.resetAnimation))
newPos = self.trainSwitches[self.destSwitch].getPos()
linkKey = (self.currentSwitch, self.destSwitch)
origHeading = self.trainTracks[linkKey].getH()
heading = origHeading + 90
firstTurn = Parallel()
firstTurn.append( LerpHprInterval(self.trolleyCar, 1, Vec3(heading,0,0)))
firstTurn.append( LerpHprInterval(self.trainSwitches[self.currentSwitch], 1, Vec3(origHeading,0,0)))
firstTurn.append( LerpHprInterval(self.trainSwitches[self.destSwitch], 1, Vec3(origHeading,0,0)))
moveTrolley.append( firstTurn)
moveTrolley.append( Parallel(
LerpPosInterval(self.trolleyCar, self.TrolleyMoveDuration, newPos, blendType='easeInOut'),
trolleyAnimateInterval
))
secondTurn = Parallel()
secondTurn.append( LerpHprInterval(self.trolleyCar, 1, Vec3(90,0,0)))
secondTurn.append( LerpHprInterval(self.trainSwitches[self.currentSwitch], 1, Vec3(0,0,0)))
secondTurn.append( LerpHprInterval(self.trainSwitches[self.destSwitch], 1, Vec3(0,0,0)))
moveTrolley.append( secondTurn)
soundTrack = Sequence()
trolleyExitBellInterval = Parallel(
SoundInterval(self.trolleyBellSfx, duration=1),
SoundInterval(self.turntableRotateSfx, duration=1, volume = 0.5)
)
trolleyExitAwayInterval = SoundInterval(self.trolleyAwaySfx, duration=3)
soundTrack.append(trolleyExitBellInterval)
soundTrack.append(trolleyExitAwayInterval)
soundTrack.append(trolleyExitBellInterval)
self.moveTrolleyIval = Parallel(moveTrolley, soundTrack)
duration = self.moveTrolleyIval.getDuration()
def focusOnTrolley(t, self = self):
pos = self.trolleyCar.getPos()
pos.setZ(pos.getZ() + 7.5)
camera.lookAt(pos)
self.lastFocusHpr = camera.getHpr()
setRightHprTime = 0
if self.FlyCameraUp:
setRightHprTime = 1.0
camIval1 = Parallel()
camIval1.append(LerpFunc(focusOnTrolley,duration - setRightHprTime,
name='focusOnTrolley'))
finalPos = Vec3(self.cameraTopView[0],self.cameraTopView[1],self.cameraTopView[2])
finalHpr = Vec3(self.cameraTopView[3],self.cameraTopView[4],self.cameraTopView[5])
if self.FlyCameraUp:
if self.FocusOnTrolleyWhileMovingUp:
camIval1.append(LerpPosInterval(camera,duration - setRightHprTime,
finalPos,
name='cameraMove'))
camIval2 = Sequence(LerpHprInterval(camera, setRightHprTime,
finalHpr,
name='cameraHpr'))
else:
camIval2 = Sequence(LerpPosHprInterval(camera, setRightHprTime,
finalPos,
finalHpr,
blendType='easeIn',
name='cameraHpr'))
camIval = Sequence( camIval1, camIval2)
else:
camIval = Sequence( camIval1)
if self.UseTrolleyResultsPanel:
self.moveTrolleyIval.append(camIval)
# make sure the fsm request happens after all ivals
temp = self.moveTrolleyIval
self.moveTrolleyIval = Sequence(temp)
if self.isLeaf(self.destSwitch):
self.moveTrolleyIval.append( Func ( self.gameFSM.request, 'winMovie'))
else:
self.moveTrolleyIval.append( Func ( self.gameFSM.request, 'inputChoice'))
self.moveTrolleyIval.start()
def exitMoveTrolley(self):
self.notify.debug("exitMoveTrolley")
self.currentSwitch = self.destSwitch
self.moveTrolleyIval.finish()
self.moveCameraToTop()
self.showMinigamesAndBonuses()
pass
def enterWinMovie(self):
resultStr = TTLocalizer.TravelGamePlaying % {
'game' : self.idToNames[self.switchToMinigameDict[self.currentSwitch]]}
# if we have only 1 player left, tell them we're going to the gag shop instead.
numToons = 0
for avId in self.avIdList:
if avId not in self.disconnectedAvIds:
numToons += 1
if numToons <= 1:
resultStr = TTLocalizer.TravelGameGoingBackToShop
reachedGoalStr = None
localAvatarWon = False
localAvatarLost = False
noWinner = True
for avId in list(self.avIdBonuses.keys()):
name = ''
avatar = self.getAvatar(avId)
if avatar:
name = avatar.getName()
# if we don't have a name, let's not display an empty name
if self.avIdBonuses[avId][0] == self.currentSwitch:
noWinner = False
reachedGoalStr = TTLocalizer.TravelGameGotBonus % {
'name' : name,
'numBeans' : self.avIdBonuses[avId][1]
}
if avId == base.localAvatar.doId:
if not TravelGameGlobals.ReverseWin:
self.wonGameSfx.play()
bonusLabel = self.switchToBonusLabelDict[self.currentSwitch]
self.flashWinningBeansTrack = Sequence(
LerpColorScaleInterval(bonusLabel, 0.75, Vec4(0.5,1,0.5,1)),
LerpColorScaleInterval(bonusLabel, 0.75, Vec4(1,1,1,1)),
)
self.flashWinningBeansTrack.loop()
else:
self.lostGameSfx.play()
else:
if not TravelGameGlobals.ReverseWin:
self.lostGameSfx.play()
else:
self.wonGameSfx.play()
if noWinner:
self.noWinnerSfx.play()
resultStr += '\n\n'
resultStr += TTLocalizer.TravelGameNoOneGotBonus
if reachedGoalStr:
resultStr += '\n\n'
resultStr += reachedGoalStr
self.winDialog = TTDialog.TTDialog(
text = resultStr,
command = self.__cleanupWinDialog,
style = TTDialog.NoButtons)
# create a sequence of the trolley moving out
info = TravelGameGlobals.BoardLayouts[self.boardIndex][self.currentSwitch]
leafX, leafY, leafZ = info['pos']
endX = leafX + TravelGameGlobals.xInc
heading = 90
moveTrolley = Sequence()
moveTrolley.append( LerpHprInterval(self.trolleyCar, 1, Vec3(heading,0,0)))
moveTrolley.append( LerpPosInterval(self.trolleyCar, 3, Vec3(endX + 20, leafY, 0)))
soundTrack = Sequence()
trolleyExitBellInterval = SoundInterval(self.trolleyBellSfx, duration=1)
trolleyExitAwayInterval = SoundInterval(self.trolleyAwaySfx, duration=3)
soundTrack.append(trolleyExitBellInterval)
soundTrack.append(trolleyExitAwayInterval)
soundTrack.append(trolleyExitBellInterval)
self.moveTrolleyIval = Parallel(moveTrolley, soundTrack)
self.moveTrolleyIval.start()
delay = 8
taskMgr.doMethodLater(delay,
self.gameOverCallback,
self.taskName("playMovie"))
def exitWinMovie(self):
taskMgr.remove(self.taskName("playMovie"))
self.moveTrolleyIval.finish()
pass
def enterCleanup(self):
self.notify.debug("enterCleanup")
def exitCleanup(self):
pass
def setStartingVotes(self, startingVotesArray):
"""
server telling us how many starting votes each avatar have
"""
if not len(startingVotesArray) == len(self.avIdList):
self.notify.error('length does not match, startingVotes=%s, avIdList=%s' %
(startingVotesArray, self.avIdList))
return
for index in range(len( self.avIdList)):
avId = self.avIdList[index]
self.startingVotes[avId] = startingVotesArray[index]
if avId not in self.currentVotes:
self.currentVotes[avId] = startingVotesArray[index]
self.notify.debug('starting votes = %s' % self.startingVotes)
def startTimer(self):
"""startTimer(self)
Starts the timer display running during the inputChoice state,
once we have received the timerStartTime from the AI.
"""
now = globalClock.getFrameTime()
elapsed = now - self.timerStartTime
self.timer.setPos(1.16, 0, -0.83)
self.timer.setTime(TravelGameGlobals.InputTimeout)
self.timer.countdown(TravelGameGlobals.InputTimeout - elapsed,
self.handleChoiceTimeout)
self.timer.show()
def setTimerStartTime(self, timestamp):
"""setTimeStartTime(self, int16 timestamp)
This message is sent from the AI to indicate the point at
which the timer starts (or started) counting. It's used to
synchronize the timer display with the actual countdown on the
AI.
"""
if not self.hasLocalToon: return
self.timerStartTime = globalClockDelta.networkToLocalTime(timestamp)
if self.timer != None:
self.startTimer()
def handleChoiceTimeout(self):
""" If we timeout locally, send a 0,0 for our choice """
self.sendUpdate("setAvatarChoice", [0,0])
self.gameFSM.request("waitServerChoices")
pass
def putChoicesInScrollList(self):
"""
put the available votes in the scrollList
"""
available = self.currentVotes[self.localAvId]
if len(self.scrollList['items']) > 0:
self.scrollList.removeAllItems()
self.indexToVotes = {}
index = 0
# we need to put then in reverse order first, for the up direction
for vote in range(available)[::-1]:
self.scrollList.addItem(str( -(vote + 1)))
self.indexToVotes[index] = vote+1
index += 1
# don't forget zero
self.scrollList.addItem(str(0))
self.indexToVotes[index] = 0
self.zeroVoteIndex = index
index +=1
for vote in range(available):
self.scrollList.addItem(str(vote + 1))
self.indexToVotes[index] = vote+1
index += 1
self.scrollList.scrollTo(self.zeroVoteIndex)
def getAbsVoteChoice(self):
"""
get an absolute number on his vote choice
"""
available = self.currentVotes[self.localAvId]
retval = 0
if hasattr(self,'scrollList'):
selectedIndex = self.scrollList.getSelectedIndex()
if selectedIndex in self.indexToVotes:
retval = self.indexToVotes[selectedIndex]
return retval
def getAbsDirectionChoice(self):
"""
get an absolute number on his direction choice
if we add more directions, this will need to change
"""
selectedIndex = self.scrollList.getSelectedIndex()
if selectedIndex < self.zeroVoteIndex:
# we are voting to going up
retval = 0
elif selectedIndex == self.zeroVoteIndex:
# not using votes
retval = 0
else:
# we are voting to go down
retval = 1
return retval
def makeTextMatchChoice(self):
"""
show the proper text based on the number of votes, and the direction
"""
self.votesPeriodLabel.hide()
self.votesToGoLabel.hide()
self.upLabel.hide()
self.downLabel.hide()
if not hasattr(self,'scrollList') or not hasattr(self,"zeroVoteIndex"):
return
selectedIndex = self.scrollList.getSelectedIndex()
if selectedIndex < self.zeroVoteIndex:
# we are voting to going up
self.votesToGoLabel.show()
self.upLabel.show()
elif selectedIndex == self.zeroVoteIndex:
# not using votes
self.votesPeriodLabel.show()
else:
# we are voting to go down
self.votesToGoLabel.show()
self.downLabel.show()
def scrollChoiceChanged(self):
choiceVotes = self.getAbsVoteChoice()
if choiceVotes == 1:
self.votesToGoLabel['text'] = TTLocalizer.TravelGameVoteToGo
else:
self.votesToGoLabel['text'] = TTLocalizer.TravelGameVotesToGo
available = self.currentVotes[self.localAvId]
self.localVotesRemaining['text'] = str( available - choiceVotes)
self.makeTextMatchChoice()
def setAvatarChose(self, avId):
if not self.hasLocalToon: return
# The server is telling the client that this
# avatar has finished choosing his number
self.notify.debug("setAvatarChose: avatar: " + str(avId) + " choose a number")
# TODO: represent this graphically
def handleInputChoice(self):
# The number we choose will be checked on the server to prevent hacking
numVotes = self.getAbsVoteChoice()
direction = self.getAbsDirectionChoice()
self.sendUpdate("setAvatarChoice", [numVotes, direction])
self.gameFSM.request("waitServerChoices")
def setServerChoices(self, votes, directions, directionToGo, directionReason):
if not self.hasLocalToon: return
assert self.notify.debugStateCall()
# The server sends this when all avatars have choosen their votes
self.notify.debug('requesting displayVotes, curState=%s' % self.gameFSM.getCurrentState().getName())
self.gameFSM.request("displayVotes", [votes, directions, directionToGo, directionReason])
def __cleanupDialog(self, value):
"""
cleanup the votes dialog
"""
if (self.dialog):
self.dialog.cleanup()
self.dialog = None
def displayVotesTimeoutTask(self, task):
self.notify.debug("Done waiting for display votes")
self.gameFSM.request('moveTrolley')
return Task.done
def updateCurrentVotes(self):
"""
update the current votes, subtract what they used
"""
for index in range(len(self.resultVotes)):
avId = self.avIdList[index]
oldCurrentVotes = self.currentVotes[avId]
self.currentVotes[avId] -= self.resultVotes[index]
self.putChoicesInScrollList()
self.makeTextMatchChoice()
def isLeaf(self, switchIndex):
"""
returns True if the switch is a leaf
"""
retval = False
links = TravelGameGlobals.BoardLayouts[self.boardIndex][switchIndex]['links']
if len(links) == 0:
retval = True
return retval
def __cleanupWinDialog(self, value):
"""
cleanup the win dialog
"""
if hasattr(self,'winDialog') and self.winDialog:
self.winDialog.cleanup()
self.winDialog = None
def gameOverCallback(self,task):
self.__cleanupWinDialog(0)
self.gameOver()
return Task.done
def setMinigames(self, switches, minigames):
"""
we've received from the server 2 parallel arrays of switches
and the minigame for each switch
"""
if not self.hasLocalToon: return
self.switchToMinigameDict = {}
for index in range(len(switches)):
switch = switches[index]
minigame = minigames[index]
self.switchToMinigameDict[switch] = minigame
self.notify.debug('minigameDict = %s' % self.switchToMinigameDict)
self.loadMinigameIcons()
def loadMinigameIcons(self):
"""
load an icon, if missing use a direct Label
"""
self.mg_icons = loader.loadModel('phase_4/models/minigames/mg_icons')
for switch in list(self.switchToMinigameDict.keys()):
minigame = self.switchToMinigameDict[switch]
switchPos = self.trainSwitches[switch].getPos()
labelPos = map3dToAspect2d(render, switchPos)
useText = True
iconName = None
if minigame in list(IconDict.keys()):
iconName = IconDict[minigame]
icon = None
if self.mg_icons:
icon = self.mg_icons.find('**/%s' % iconName)
if not icon.isEmpty():
useText = False
if labelPos:
if useText:
labelPos.setZ( labelPos.getZ() - 0.1)
label = DirectLabel(text = self.idToNames[minigame],
relief = None,
scale =0.1,
pos = labelPos,
text_fg = (1.0,1.0,1.0,1.0),
)
label.hide()
self.minigameLabels.append(label)
else:
placeHolder = DirectButton(
image = icon,
relief = None,
text = ('','',self.idToNames[minigame],''),
text_scale = 0.3,
text_pos = (0, -0.7, 0),
text_fg = (1, 1, 1, 1),
clickSound = None,
pressEffect = 0,
)
placeHolder.setPos(labelPos)
placeHolder.setScale(0.2)
placeHolder.hide()
self.minigameIcons.append(placeHolder)
# show the yellow sign above the tunnel
tunnel = self.tunnels[switch]
sign = tunnel.attachNewNode('sign')
icon.copyTo(sign)
sign.setH(-90)
sign.setZ(26)
sign.setScale(10)
def showMinigamesAndBonuses(self):
for label in self.minigameLabels:
label.show()
for label in self.bonusLabels:
label.show()
for icon in self.minigameIcons:
icon.show()
def hideMinigamesAndBonuses(self):
for label in self.minigameLabels:
label.hide()
for label in self.bonusLabels:
label.hide()
for icon in self.minigameIcons:
icon.hide()
def loadBonuses(self):
self.switchToBonusLabelDict = {}
for avId in list(self.avIdBonuses.keys()):
# make sure we show only this local toon's bonus
if avId == self.localAvId:
switch = self.avIdBonuses[avId][0]
beans = self.avIdBonuses[avId][1]
switchPos = self.trainSwitches[switch].getPos()
labelPos = map3dToAspect2d(render, switchPos)
if labelPos:
labelPos.setX( labelPos.getX() + 0.1)
labelPos.setZ( labelPos.getZ() - 0.02)
bonusStr = TTLocalizer.TravelGameBonusBeans % {'numBeans' : beans}
label = DirectLabel(text = bonusStr,
relief = None,
scale =0.1, # 0.85,
pos = labelPos,
text_fg = (1.0,1.0,1.0,1.0),
text_align = TextNode.ALeft,
)
label.hide()
self.bonusLabels.append(label)
self.switchToBonusLabelDict[switch] = label
break
def setBonuses(self, switches, beans):
"""
server has sent the bonus beans for the leaf switches
"""
if not self.hasLocalToon: return
self.avIdBonuses = {}
for index in range(len(self.avIdList)):
avId = self.avIdList[index]
switch = switches[index]
bean = beans[index]
self.avIdBonuses[avId] = ( switch, bean)
self.notify.debug('self.avIdBonuses = %s' % self.avIdBonuses)
self.loadBonuses()
def handleDisabledAvatar(self, avId):
"""
Code to deal with an avatar unexpectedly being deleted
If inheritors override, they should call this base function
we must not request frameworkAvatarExited, since
that leads to requesting the cleanup on self.gameFSM
"""
self.notify.warning("DistrbutedTravelGame: handleDisabledAvatar: disabled avId: " +
str(avId))
self.disconnectedAvIds.append(avId)
# instead of aborting the game, we continue on if we still have a player
# self.frameworkFSM.request('frameworkAvatarExited')
def setBoardIndex(self, boardIndex):
""" server setting which of the 4 board layouts we'll use """
self.boardIndex = boardIndex
def getIntroMovie(self):
"""
create an intro movie of the trolley coming to switch 0
"""
rootInfo = TravelGameGlobals.BoardLayouts[self.boardIndex][0]
rootX, rootY, rootZ = rootInfo['pos']
startX = rootX - TravelGameGlobals.xInc
heading = 90
moveTrolley = Sequence()
moveTrolley.append( Func( self.trolleyCar.setH, 90))
moveTrolley.append( LerpPosInterval(self.trolleyCar, 3, Vec3(rootX, rootY, 0), startPos = Vec3(startX, rootY, 0)))
moveTrolley.append( LerpHprInterval(self.trolleyCar, 1, Vec3(heading,0,0)))
soundTrack = Sequence()
trolleyExitAwayInterval = SoundInterval(self.trolleyAwaySfx, duration=3)
trolleyExitBellInterval = SoundInterval(self.trolleyBellSfx, duration=1)
soundTrack.append(trolleyExitAwayInterval)
soundTrack.append(trolleyExitBellInterval)
retval = Parallel(moveTrolley, soundTrack)
return retval
##### Miscellaneous support functions #####
def animateTrolley(self, t, keyAngle, wheelAngle):
""" Make key rotate at 1 rotation per second, and make wheels move """
for i in range(self.numKeys):
key = self.keys[i]
ref = self.keyRef[i]
key.setH(ref, t * keyAngle)
for i in range(self.numFrontWheels):
frontWheel = self.frontWheels[i]
ref = self.frontWheelRef[i]
frontWheel.setH(ref, t * wheelAngle)
for i in range(self.numBackWheels):
backWheel = self.backWheels[i]
ref = self.backWheelRef[i]
backWheel.setH(ref, t * wheelAngle)
def resetAnimation(self):
""" clear trolley animations """
for i in range(self.numKeys):
self.keys[i].setTransform(self.keyInit[i])
for i in range(self.numFrontWheels):
self.frontWheels[i].setTransform(self.frontWheelInit[i])
for i in range(self.numBackWheels):
self.backWheels[i].setTransform(self.backWheelInit[i])
| [
"brianlach72@gmail.com"
] | brianlach72@gmail.com |
be174d5d2aac5c1d6e5238f958f929ec9c085eab | f5a2f74783fc6c48a75e415f08493f33a5937e3b | /cloudbio/package/conda.py | 9caa900e5ce4c1e3a7f098bbd869001cd31519f6 | [
"MIT"
] | permissive | espritfollet/cloudbiolinux | 8feccd8ca20a1e7cbb70aedbe373e2e522400be8 | c7c41c2634d044c60abae2c2264ff7e9b6885485 | refs/heads/master | 2021-05-05T20:11:52.955336 | 2017-12-23T10:58:27 | 2017-12-23T10:58:27 | 115,296,610 | 0 | 0 | MIT | 2019-03-23T05:30:44 | 2017-12-25T01:34:43 | Python | UTF-8 | Python | false | false | 7,262 | py | """Install packages via the Conda package manager: http://conda.pydata.org/
"""
import collections
import json
import os
import yaml
from cloudbio.custom import shared
from cloudbio.fabutils import quiet
from cloudbio.flavor.config import get_config_file
from cloudbio.package.shared import _yaml_to_packages
def install_packages(env, to_install=None, packages=None):
if shared._is_anaconda(env):
conda_bin = shared._conda_cmd(env)
if hasattr(env, "conda_yaml"):
Config = collections.namedtuple("Config", "base dist")
config_file = Config(base=env.conda_yaml, dist=None)
else:
config_file = get_config_file(env, "packages-conda.yaml")
if config_file.base is None and packages is None:
packages = []
else:
if to_install:
(packages, _) = _yaml_to_packages(config_file.base, to_install, config_file.dist)
with open(config_file.base) as in_handle:
channels = " ".join(["-c %s" % x for x in yaml.safe_load(in_handle).get("channels", [])])
conda_envs = _create_environments(env, conda_bin)
conda_info = json.loads(env.safe_run_output("{conda_bin} info --json".format(**locals())))
# Uninstall old R packages that clash with updated versions
# Temporary fix to allow upgrades from older versions that have migrated
# r-tximport is now bioconductor-tximport
# py2cairo is incompatible with r 3.4.1
# libedit pins to curses 6.0 but bioconda requires 5.9
for problem in ["r-tximport", "py2cairo", "libedit"]:
cur_packages = [x["name"] for x in
json.loads(env.safe_run_output("{conda_bin} list --json {problem}".format(**locals())))]
if problem in cur_packages:
env.safe_run("{conda_bin} remove --force -y {problem}".format(**locals()))
# install our customized packages
if len(packages) > 0:
for env_name, env_packages in _split_by_condaenv(packages):
if env_name:
assert env_name in conda_envs, (env_name, conda_envs)
env_str = "-n %s" % env_name
else:
env_str = ""
pkgs_str = " ".join(env_packages)
env.safe_run("{conda_bin} install --quiet -y {env_str} {channels} {pkgs_str}".format(**locals()))
conda_pkg_list = json.loads(env.safe_run_output(
"{conda_bin} list --json {env_str}".format(**locals())))
for package in env_packages:
_link_bin(package, env, conda_info, conda_bin, conda_pkg_list,
conda_envdir=conda_envs.get(env_name))
conda_pkg_list = json.loads(env.safe_run_output("{conda_bin} list --json".format(**locals())))
for pkg in ["python", "conda", "pip"]:
_link_bin(pkg, env, conda_info, conda_bin, conda_pkg_list, files=[pkg], prefix="bcbio_")
def _link_bin(package, env, conda_info, conda_bin, conda_pkg_list, files=None, prefix="", conda_env=None,
conda_envdir=None):
"""Link files installed in the bin directory into the install directory.
This is imperfect but we're trying not to require injecting everything in the anaconda
directory into a user's path.
"""
package = package.split("=")[0]
final_bindir = os.path.join(env.system_install, "bin")
if conda_envdir:
base_bindir = os.path.join(conda_envdir, "bin")
else:
base_bindir = os.path.dirname(conda_bin)
# resolve any symlinks in the final and base heirarchies
with quiet():
final_bindir = env.safe_run_output("cd %s && pwd -P" % final_bindir)
base_bindir = env.safe_run_output("cd %s && pwd -P" % base_bindir)
for pkg_subdir in [x for x in conda_pkg_list if x["name"] == package]:
pkg_subdir = pkg_subdir["dist_name"].split("::")[-1]
for pkg_dir in conda_info["pkgs_dirs"]:
pkg_bindir = os.path.join(pkg_dir, pkg_subdir, "bin")
if env.safe_exists(pkg_bindir):
if not files:
with quiet():
files = env.safe_run_output("ls -1 {pkg_bindir}".format(**locals())).split()
for fname in files:
# symlink to the original file in the /anaconda/bin directory
# this could be a hard or soft link
base_fname = os.path.join(base_bindir, fname)
if os.path.exists(base_fname) and os.path.lexists(base_fname):
_do_link(base_fname,
os.path.join(final_bindir, "%s%s" % (prefix, fname)))
def _do_link(orig_file, final_file):
"""Perform a soft link of the original file into the final location.
We need the symlink to point to /anaconda/bin directory, not the real location
in the pkgs directory so conda can resolve LD_LIBRARY_PATH and the interpreters.
"""
needs_link = True
# working symlink, check if already in the right place or remove it
if os.path.exists(final_file):
if (os.path.realpath(final_file) == os.path.realpath(orig_file) and
orig_file == os.path.normpath(os.path.join(os.path.dirname(final_file), os.readlink(final_file)))):
needs_link = False
else:
os.remove(final_file)
# broken symlink
elif os.path.lexists(final_file):
os.unlink(final_file)
if needs_link:
os.symlink(os.path.relpath(orig_file, os.path.dirname(final_file)), final_file)
def _split_by_condaenv(packages):
"""Split packages into those requiring special conda environments.
"""
out = collections.defaultdict(list)
for p in packages:
parts = p.split(";")
name = parts[0]
metadata = parts[1:]
condaenv = None
for k, v in [x.split("=") for x in metadata]:
if k == "env":
condaenv = v
out[condaenv].append(name)
return dict(out).items()
def _create_environments(env, conda_bin):
"""Creates custom local environments that conflict with global dependencies.
Available environments:
- python3 -- support tools that require python 3. This is an initial step
towards transitioning to more python3 tool support.
- samtools0 -- For tools that require older samtools 0.1.19
"""
out = {}
conda_envs = json.loads(env.safe_run_output("{conda_bin} info --envs --json".format(**locals())))["envs"]
if not any(x.endswith("/python3") for x in conda_envs):
env.safe_run("{conda_bin} create -y --name python3 python=3".format(**locals()))
conda_envs = json.loads(env.safe_run_output("{conda_bin} info --envs --json".format(**locals())))["envs"]
if not any(x.endswith("/samtools0") for x in conda_envs):
env.safe_run("{conda_bin} create -y --name samtools0 python=2".format(**locals()))
conda_envs = json.loads(env.safe_run_output("{conda_bin} info --envs --json".format(**locals())))["envs"]
out["python3"] = [x for x in conda_envs if x.endswith("/python3")][0]
out["samtools0"] = [x for x in conda_envs if x.endswith("/samtools0")][0]
return out
| [
"chapmanb@50mail.com"
] | chapmanb@50mail.com |
8d09a372c57d6ed538800dbba426352b62552920 | e972a3db507312c58a7637a706464257e557725d | /controltowerlib/controltowerlib.py | c2548f58e5d10faa332916f7d3d2352c2e349569 | [
"MIT"
] | permissive | schubergphilis/controltowerlib | 5ddc6a9b6e02f19e914e636c71fee70031f40c09 | c92c0a50d7d6cb568a8e5660df4256a2976b5a4e | refs/heads/master | 2023-04-08T04:14:07.970825 | 2021-04-26T13:10:19 | 2021-04-26T13:10:19 | 328,966,855 | 5 | 0 | MIT | 2021-04-01T10:46:31 | 2021-01-12T11:34:30 | Python | UTF-8 | Python | false | false | 48,353 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: controltowerlib.py
#
# Copyright 2020 Costas Tyfoxylos
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# pylint: disable=too-many-lines
"""
Main code for controltowerlib.
.. _Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
"""
import copy
import json
import logging
import time
from functools import lru_cache, wraps
from time import sleep
import boto3
import botocore
import requests
from awsauthenticationlib import AwsAuthenticator
from awsauthenticationlib.awsauthenticationlib import LoggerMixin
from opnieuw import retry
from .controltowerlibexceptions import (UnsupportedTarget,
OUCreating,
NoServiceCatalogAccess,
ServiceCallFailed,
ControlTowerBusy,
ControlTowerNotDeployed,
PreDeployValidationFailed,
EmailCheckFailed,
EmailInUse,
UnavailableRegion,
RoleCreationFailure)
from .resources import (LOGGER,
LOGGER_BASENAME,
ServiceControlPolicy,
CoreAccount,
ControlTowerAccount,
ControlTowerOU,
AccountFactory,
OrganizationsOU,
GuardRail,
CREATING_ACCOUNT_ERROR_MESSAGE)
__author__ = '''Costas Tyfoxylos <ctyfoxylos@schubergphilis.com>'''
__docformat__ = '''google'''
__date__ = '''18-02-2020'''
__copyright__ = '''Copyright 2020, Costas Tyfoxylos'''
__credits__ = ["Costas Tyfoxylos"]
__license__ = '''MIT'''
__maintainer__ = '''Costas Tyfoxylos'''
__email__ = '''<ctyfoxylos@schubergphilis.com>'''
__status__ = '''Development''' # "Prototype", "Development", "Production".
class ControlTower(LoggerMixin): # pylint: disable=too-many-instance-attributes,too-many-public-methods
"""Models Control Tower by wrapping around service catalog."""
api_content_type = 'application/x-amz-json-1.1'
api_user_agent = 'aws-sdk-js/2.528.0 promise'
supported_targets = ['listManagedOrganizationalUnits',
'manageOrganizationalUnit',
'deregisterOrganizationalUnit',
'listManagedAccounts',
'getGuardrailComplianceStatus',
'describeManagedOrganizationalUnit',
'listGuardrailsForTarget',
'getAvailableUpdates',
'describeCoreService',
'getAccountInfo',
'listEnabledGuardrails',
'listGuardrails',
'listOrganizationalUnitsForParent',
'listDriftDetails',
'getLandingZoneStatus',
'setupLandingZone',
'getHomeRegion',
'listGuardrailViolations',
'getCatastrophicDrift',
'getGuardrailComplianceStatus',
'describeAccountFactoryConfig',
'performPreLaunchChecks',
'deleteLandingZone'
]
core_account_types = ['PRIMARY', 'LOGGING', 'SECURITY']
def validate_availability(method): # noqa
"""Validation decorator."""
@wraps(method)
def wrap(*args, **kwargs):
"""Inner wrapper decorator."""
logger = logging.getLogger(f'{LOGGER_BASENAME}.validation_decorator')
contol_tower_instance = args[0]
logger.debug('Decorating method: %s', method)
if not contol_tower_instance.is_deployed:
raise ControlTowerNotDeployed
if contol_tower_instance.busy:
raise ControlTowerBusy
return method(*args, **kwargs) # pylint: disable=not-callable
return wrap
def __init__(self, arn, settling_time=90):
self.aws_authenticator = AwsAuthenticator(arn)
self.service_catalog = boto3.client('servicecatalog', **self.aws_authenticator.assumed_role_credentials)
self.organizations = boto3.client('organizations', **self.aws_authenticator.assumed_role_credentials)
self.session = self._get_authenticated_session()
self._region = None
self._is_deployed = None
self.url = f'https://{self.region}.console.aws.amazon.com/controltower/api/controltower'
self._iam_admin_url = 'https://eu-west-1.console.aws.amazon.com/controltower/api/iamadmin'
self._account_factory_ = None
self.settling_time = settling_time
self._root_ou = None
self._update_data_ = None
self._core_accounts = None
@property
def _account_factory(self):
if any([not self.is_deployed,
self.percentage_complete != 100]):
return None
if self._account_factory_ is None:
self._account_factory_ = self._get_account_factory(self.service_catalog)
return self._account_factory_
@property
def is_deployed(self):
"""The deployment status of control tower."""
if not self._is_deployed:
caller_region = self.aws_authenticator.region
url = f'https://{caller_region}.console.aws.amazon.com/controltower/api/controltower'
payload = self._get_api_payload(content_string={},
target='getLandingZoneStatus',
region=caller_region)
self.logger.debug('Trying to get the deployed status of the landing zone with payload "%s"', payload)
response = self.session.post(url, json=payload)
if not response.ok:
self.logger.error('Failed to get the deployed status of the landing zone with response status '
'"%s" and response text "%s"',
response.status_code, response.text)
raise ServiceCallFailed(payload)
not_deployed_states = ('NOT_STARTED', 'DELETE_COMPLETED', 'DELETE_FAILED')
self._is_deployed = response.json().get('LandingZoneStatus') not in not_deployed_states
return self._is_deployed
@property
def region(self):
"""Region."""
if not self.is_deployed:
self._region = self.aws_authenticator.region
return self._region
if self._region is None:
caller_region = self.aws_authenticator.region
url = f'https://{caller_region}.console.aws.amazon.com/controltower/api/controltower'
payload = self._get_api_payload(content_string={}, target='getHomeRegion', region=caller_region)
response = self.session.post(url, json=payload)
if not response.ok:
raise ServiceCallFailed(payload)
self._region = response.json().get('HomeRegion') or self.aws_authenticator.region
return self._region
@staticmethod
def get_available_regions():
"""The regions that control tower can be active in.
Returns:
regions (list): A list of strings of the regions that control tower can be active in.
"""
url = 'https://api.regional-table.region-services.aws.a2z.com/index.json'
response = requests.get(url)
if not response.ok:
LOGGER.error('Failed to retrieve the info')
return []
return [entry.get('id', '').split(':')[1]
for entry in response.json().get('prices')
if entry.get('id').startswith('controltower')]
@property
@validate_availability
def core_accounts(self):
"""The core accounts of the landing zone.
Returns:
core_accounts (list): A list of the primary, logging and security account.
"""
if self._core_accounts is None:
core_accounts = []
for account_type in self.core_account_types:
payload = self._get_api_payload(content_string={'AccountType': account_type},
target='describeCoreService')
response = self.session.post(self.url, json=payload)
if not response.ok:
raise ServiceCallFailed(f'Service call failed with payload {payload}')
core_accounts.append(CoreAccount(self, account_type, response.json()))
self._core_accounts = core_accounts
return self._core_accounts
@property
@validate_availability
def root_ou(self):
"""The root ou of control tower.
Returns:
root_ou (ControlTowerOU): The root ou object.
"""
if self._root_ou is None:
self._root_ou = self.get_organizational_unit_by_name('Root')
return self._root_ou
def _get_authenticated_session(self):
return self.aws_authenticator.get_control_tower_authenticated_session()
@property
def _active_artifact(self):
artifacts = self.service_catalog.list_provisioning_artifacts(ProductId=self._account_factory.product_id)
return next((artifact for artifact in artifacts.get('ProvisioningArtifactDetails', [])
if artifact.get('Active')),
None)
@staticmethod
def _get_account_factory(service_catalog_client):
filter_ = {'Owner': ['AWS Control Tower']}
try:
return AccountFactory(service_catalog_client,
service_catalog_client.search_products(Filters=filter_
).get('ProductViewSummaries', [''])[0])
except IndexError:
raise NoServiceCatalogAccess(('Please make sure the role used has access to the "AWS Control Tower Account '
'Factory Portfolio" in Service Catalog under "Groups, roles, and users"'))
def _validate_target(self, target):
if target not in self.supported_targets:
raise UnsupportedTarget(target)
return target
def _get_api_payload(self, # pylint: disable=too-many-arguments
content_string,
target,
method='POST',
params=None,
path=None,
region=None):
target = self._validate_target(target)
payload = {'contentString': json.dumps(content_string),
'headers': {'Content-Type': self.api_content_type,
'X-Amz-Target': f'AWSBlackbeardService.{target[0].capitalize() + target[1:]}',
'X-Amz-User-Agent': self.api_user_agent},
'method': method,
'operation': target,
'params': params or {},
'path': path or '/',
'region': region or self.region}
return copy.deepcopy(payload)
def _get_paginated_results(self, # pylint: disable=too-many-arguments
content_payload,
target,
object_group=None,
object_type=None,
method='POST',
params=None,
path=None,
region=None,
next_token_marker='NextToken'):
payload = self._get_api_payload(content_string=content_payload,
target=target,
method=method,
params=params,
path=f'/{path}/' if path else '/',
region=region)
response, next_token = self._get_partial_response(payload, next_token_marker)
if not object_group:
yield response.json()
else:
for data in response.json().get(object_group, []):
if object_type:
yield object_type(self, data)
else:
yield data
while next_token:
content_string = copy.deepcopy(json.loads(payload.get('contentString')))
content_string.update({next_token_marker: next_token})
payload.update({'contentString': json.dumps(content_string)})
response, next_token = self._get_partial_response(payload, next_token_marker)
if not object_group:
yield response.json()
else:
for data in response.json().get(object_group, []):
if object_type:
yield object_type(self, data)
else:
yield data
def _get_partial_response(self, payload, next_token_marker):
response = self.session.post(self.url, json=payload)
if not response.ok:
self.logger.debug('Failed getting partial response with payload :%s\n', payload)
self.logger.debug('Response received :%s\n', response.content)
raise ValueError(response.text)
next_token = response.json().get(next_token_marker)
return response, next_token
@property
def _update_data(self):
if self._update_data_ is None:
self._update_data_ = next(self._get_paginated_results(content_payload={},
target='getAvailableUpdates'))
return self._update_data_
@property
@validate_availability
def baseline_update_available(self):
"""Baseline update available."""
return self._update_data.get('BaselineUpdateAvailable')
@property
@validate_availability
def guardrail_update_available(self):
"""Guardrail update available."""
return self._update_data.get('GuardrailUpdateAvailable')
@property
@validate_availability
def landing_zone_update_available(self):
"""Landing Zone update available."""
return self._update_data.get('LandingZoneUpdateAvailable')
@property
@validate_availability
def service_landing_zone_version(self):
"""Service landing zone version."""
return self._update_data.get('ServiceLandingZoneVersion')
@property
@validate_availability
def user_landing_zone_version(self):
"""User landing zone version."""
return self._update_data.get('UserLandingZoneVersion')
@property
@validate_availability
def landing_zone_version(self):
"""Landing zone version."""
return self._update_data.get('UserLandingZoneVersion')
@property
@validate_availability
def organizational_units(self):
"""The organizational units under control tower.
Returns:
organizational_units (OrganizationalUnit): A list of organizational units objects under control tower's
control.
"""
return self._get_paginated_results(content_payload={'MaxResults': 20},
target='listManagedOrganizationalUnits',
object_type=ControlTowerOU,
object_group='ManagedOrganizationalUnitList',
next_token_marker='NextToken')
@validate_availability
def register_organizations_ou(self, name):
"""Registers an Organizations OU under control tower.
Args:
name (str): The name of the Organizations OU to register to Control Tower.
Returns:
result (bool): True if successfull, False otherwise.
"""
if self.get_organizational_unit_by_name(name):
self.logger.info('OU "%s" is already registered with Control Tower.', name)
return True
org_ou = self.get_organizations_ou_by_name(name)
if not org_ou:
self.logger.error('OU "%s" does not exist under organizations.', name)
return False
return self._register_org_ou_in_control_tower(org_ou)
@validate_availability
def create_organizational_unit(self, name):
"""Creates a Control Tower managed organizational unit.
Args:
name (str): The name of the OU to create.
Returns:
result (bool): True if successfull, False otherwise.
"""
self.logger.debug('Trying to create OU :"%s" under root ou', name)
try:
response = self.organizations.create_organizational_unit(ParentId=self.root_ou.id, Name=name)
except botocore.exceptions.ClientError as err:
status = err.response["ResponseMetadata"]["HTTPStatusCode"]
error_code = err.response["Error"]["Code"]
error_message = err.response["Error"]["Message"]
if not status == 200:
self.logger.error('Failed to create OU "%s" under Organizations with error code %s: %s',
name, error_code, error_message)
return False
org_ou = OrganizationsOU(response.get('OrganizationalUnit', {}))
self.logger.debug(response)
return self._register_org_ou_in_control_tower(org_ou)
def _register_org_ou_in_control_tower(self, org_ou):
self.logger.debug('Trying to move management of OU under Control Tower')
payload = self._get_api_payload(content_string={'OrganizationalUnitId': org_ou.id,
'OrganizationalUnitName': org_ou.name,
'ParentOrganizationalUnitId': self.root_ou.id,
'ParentOrganizationalUnitName': self.root_ou.name,
'OrganizationalUnitType': 'CUSTOM'},
target='manageOrganizationalUnit')
response = self.session.post(self.url, json=payload)
if not response.ok:
self.logger.error('Failed to register OU "%s" to Control Tower with response status "%s" '
'and response text "%s"',
org_ou.name, response.status_code, response.text)
return False
self.logger.debug('Giving %s seconds time for the guardrails to be applied', self.settling_time)
time.sleep(self.settling_time)
self.logger.debug('Successfully moved management of OU "%s" under Control Tower', org_ou.name)
return response.ok
@validate_availability
def delete_organizational_unit(self, name):
"""Deletes a Control Tower managed organizational unit.
Args:
name (str): The name of the OU to delete.
Returns:
result (bool): True if successfull, False otherwise.
"""
organizational_unit = self.get_organizational_unit_by_name(name)
if not organizational_unit:
self.logger.error('No organizational unit with name :"%s" registered with Control Tower', name)
return False
payload = self._get_api_payload(content_string={'OrganizationalUnitId': organizational_unit.id},
target='deregisterOrganizationalUnit')
self.logger.debug('Trying to unregister OU "%s" with payload "%s"', name, payload)
response = self.session.post(self.url, json=payload)
if not response.ok:
self.logger.error('Failed to unregister OU "%s" with response status "%s" and response text "%s"',
name, response.status_code, response.text)
return False
self.logger.debug('Successfully unregistered management of OU "%s" from Control Tower', name)
self.logger.debug('Trying to delete OU "%s" from Organizations', name)
response = self.organizations.delete_organizational_unit(OrganizationalUnitId=organizational_unit.id)
self.logger.debug(response)
return bool(response.get('ResponseMetadata', {}).get('HTTPStatusCode') == 200)
@validate_availability
def get_organizational_unit_by_name(self, name):
"""Gets a Control Tower managed Organizational Unit by name.
Args:
name (str): The name of the organizational unit to retrieve.
Returns:
result (ControlTowerOU): A OU object on success, None otherwise.
"""
return next((ou for ou in self.organizational_units if ou.name == name), None)
@validate_availability
def get_organizational_unit_by_id(self, id_):
"""Gets a Control Tower managed Organizational Unit by id.
Args:
id_ (str): The id of the organizational unit to retrieve.
Returns:
result (ControlTowerOU): A OU object on success, None otherwise.
"""
return next((ou for ou in self.organizational_units if ou.id == id_), None)
@property
@validate_availability
def organizations_ous(self):
"""The organizational units under Organizations.
Returns:
organizational_units (OrganizationsOU): A list of organizational units objects under Organizations.
"""
response = self.organizations.list_organizational_units_for_parent(ParentId=self.root_ou.id)
return [OrganizationsOU(data)
for data in response.get('OrganizationalUnits', [])]
@validate_availability
def get_organizations_ou_by_name(self, name):
"""Gets an Organizations managed Organizational Unit by name.
Args:
name (str): The name of the organizational unit to retrieve.
Returns:
result (OrganizationsOU): A OU object on success, None otherwise.
"""
return next((ou for ou in self.organizations_ous if ou.name == name), None)
@validate_availability
def get_organizations_ou_by_id(self, id_):
"""Gets an Organizations managed Organizational Unit by id.
Args:
id_ (str): The id of the organizational unit to retrieve.
Returns:
result (OrganizationsOU): A OU object on success, None otherwise.
"""
return next((ou for ou in self.organizations_ous if ou.id == id_), None)
@validate_availability
def get_organizations_ou_by_arn(self, arn):
"""Gets an Organizations managed Organizational Unit by arn.
Args:
arn (str): The arn of the organizational unit to retrieve.
Returns:
result (OrganizationsOU): A OU object on success, None otherwise.
"""
return next((ou for ou in self.organizations_ous if ou.arn == arn), None)
@property
@validate_availability
def accounts(self):
"""The accounts under control tower.
Returns:
accounts (Account): A list of account objects under control tower's control.
"""
return self._get_paginated_results(content_payload={},
target='listManagedAccounts',
object_type=ControlTowerAccount,
object_group='ManagedAccountList',
next_token_marker='NextToken')
@property
def _service_catalog_accounts_data(self):
products = self.service_catalog.search_provisioned_products()
return [data for data in products.get('ProvisionedProducts', [])
if data.get('Type', '') == 'CONTROL_TOWER_ACCOUNT']
@validate_availability
def get_available_accounts(self):
"""Retrieves the available accounts from control tower.
Returns:
accounts (Account): A list of available account objects under control tower's control.
"""
return self._filter_for_status('AVAILABLE')
@validate_availability
def get_erroring_accounts(self):
"""Retrieves the erroring accounts from control tower.
Returns:
accounts (Account): A list of erroring account objects under control tower's control.
"""
return self._filter_for_status('ERROR')
@validate_availability
def get_accounts_with_available_updates(self):
"""Retrieves the accounts that have available updates from control tower.
Returns:
accounts (Account): A list of account objects under control tower's control with available updates.
"""
return [account for account in self.accounts if account.has_available_update]
@validate_availability
def get_updated_accounts(self):
"""Retrieves the accounts that have no available updates from control tower.
Returns:
accounts (Account): A list of account objects under control tower's control with no available updates.
"""
return [account for account in self.accounts if not account.has_available_update]
def get_changing_accounts(self):
"""Retrieves the under change accounts from control tower.
Returns:
accounts (Account): A list of under change account objects under control tower's control.
"""
products = self.service_catalog.search_provisioned_products()
return [ControlTowerAccount(self, {'AccountId': data.get('PhysicalId')})
for data in products.get('ProvisionedProducts', [])
if all([data.get('Type', '') == 'CONTROL_TOWER_ACCOUNT',
data.get('Status', '') == 'UNDER_CHANGE'])]
def _filter_for_status(self, status):
return [account for account in self.accounts if account.service_catalog_status == status]
def _get_by_attribute(self, attribute, value):
return next((account for account in self.accounts
if getattr(account, attribute) == value), None)
def _get_service_catalog_data_by_account_id(self, account_id):
return next((data for data in self._service_catalog_accounts_data
if data.get('PhysicalId') == account_id), None)
@validate_availability
def get_account_by_name(self, name):
"""Retrieves an account by name.
Returns:
account (Account): An account object that matches the name or None.
"""
return self._get_by_attribute('name', name)
@validate_availability
def get_account_by_id(self, id_):
"""Retrieves an account by id.
Returns:
account (Account): An account object that matches the id or None.
"""
return self._get_by_attribute('id', id_)
@validate_availability
def get_account_by_arn(self, arn):
"""Retrieves an account by arn.
Returns:
account (Account): An account object that matches the arn or None.
"""
return self._get_by_attribute('arn', arn)
@retry(retry_on_exceptions=OUCreating, max_calls_total=7, retry_window_after_first_call_in_seconds=60)
@validate_availability
def create_account(self, # pylint: disable=too-many-arguments
account_name,
account_email,
organizational_unit,
product_name=None,
sso_first_name=None,
sso_last_name=None,
sso_user_email=None):
"""Creates a Control Tower managed account.
Args:
account_name (str): The name of the account.
account_email (str): The email of the account.
organizational_unit (str): The organizational unit that the account should be under.
product_name (str): The product name, if nothing is provided it uses the account name.
sso_first_name (str): The first name of the SSO user, defaults to "Control"
sso_last_name (str): The last name of the SSO user, defaults to "Tower"
sso_user_email (str): The email of the sso, if nothing is provided it uses the account email.
Returns:
result (bool): True on success, False otherwise.
"""
product_name = product_name or account_name
sso_user_email = sso_user_email or account_email
sso_first_name = sso_first_name or 'Control'
sso_last_name = sso_last_name or 'Tower'
if not self.get_organizational_unit_by_name(organizational_unit):
if not self.create_organizational_unit(organizational_unit):
self.logger.error('Unable to create the organizational unit!')
return False
arguments = {'ProductId': self._account_factory.product_id,
'ProvisionedProductName': product_name,
'ProvisioningArtifactId': self._active_artifact.get('Id'),
'ProvisioningParameters': [{'Key': 'AccountName',
'Value': account_name},
{'Key': 'AccountEmail',
'Value': account_email},
{'Key': 'SSOUserFirstName',
'Value': sso_first_name},
{'Key': 'SSOUserLastName',
'Value': sso_last_name},
{'Key': 'SSOUserEmail',
'Value': sso_user_email},
{'Key': 'ManagedOrganizationalUnit',
'Value': organizational_unit}]}
try:
response = self.service_catalog.provision_product(**arguments)
except botocore.exceptions.ClientError as err:
if CREATING_ACCOUNT_ERROR_MESSAGE in err.response['Error']['Message']:
raise OUCreating
raise
response_metadata = response.get('ResponseMetadata', {})
success = response_metadata.get('HTTPStatusCode') == 200
if not success:
self.logger.error('Failed to create account, response was :%s', response_metadata)
return False
# Making sure that eventual consistency is not a problem here,
# we wait for control tower to be aware of the service catalog process
while not self.busy:
time.sleep(1)
return True
@property
@validate_availability
def service_control_policies(self):
"""The service control policies under organization.
Returns:
service_control_policies (list): A list of SCPs under the organization.
"""
return [ServiceControlPolicy(data)
for data in self.organizations.list_policies(Filter='SERVICE_CONTROL_POLICY').get('Policies', [])]
@validate_availability
def get_service_control_policy_by_name(self, name):
"""Retrieves a service control policy by name.
Args:
name (str): The name of the SCP to retrieve
Returns:
scp (ServiceControlPolicy): The scp if a match is found else None.
"""
return next((scp for scp in self.service_control_policies
if scp.name == name), None)
@validate_availability
def update(self):
"""Updates the control tower to the latest version.
Returns:
bool: True on success, False on failure.
"""
if not self.landing_zone_update_available:
self.logger.warning('Landing zone does not seem to need update, is at version %s',
self.landing_zone_version)
return False
log_account = next((account for account in self.core_accounts if account.label == 'LOGGING'), None)
if not log_account:
raise ServiceCallFailed('Could not retrieve logging account to get the email.')
security_account = next((account for account in self.core_accounts if account.label == 'SECURITY'), None)
if not security_account:
raise ServiceCallFailed('Could not retrieve security account to get the email.')
payload = self._get_api_payload(content_string={'HomeRegion': self.region,
'LogAccountEmail': log_account.email,
'SecurityAccountEmail': security_account.email},
target='setupLandingZone')
self.logger.debug('Trying to update the landing zone with payload "%s"', payload)
response = self.session.post(self.url, json=payload)
if not response.ok:
self.logger.error('Failed to update the landing zone with response status "%s" and response text "%s"',
response.status_code, response.text)
return False
self.logger.debug('Successfully started updating landing zone')
return True
@property
def busy(self):
"""Busy."""
return any([self.status == 'IN_PROGRESS',
self.status == 'DELETE_IN_PROGRESS',
self.get_changing_accounts()])
@property
def status(self):
"""Status."""
return self._get_status().get('LandingZoneStatus')
@property
def percentage_complete(self):
"""Percentage complete."""
return self._get_status().get('PercentageComplete')
@property
def deploying_messages(self):
"""Deploying messages."""
return self._get_status().get('Messages')
@property
def region_metadata_list(self):
"""Region metadata list."""
return self._get_status().get('RegionMetadataList')
def _get_status(self):
payload = self._get_api_payload(content_string={},
target='getLandingZoneStatus')
self.logger.debug('Trying to get the landing zone status with payload "%s"', payload)
response = self.session.post(self.url, json=payload)
if not response.ok:
self.logger.error('Failed to get the landing zone status with response status "%s" and response text "%s"',
response.status_code, response.text)
return {}
self.logger.debug('Successfully got landing zone status.')
return response.json()
@property
@validate_availability
def drift_messages(self):
"""Drift messages."""
payload = self._get_api_payload(content_string={},
target='listDriftDetails')
self.logger.debug('Trying to get the drift messages of the landing zone with payload "%s"', payload)
response = self.session.post(self.url, json=payload)
if not response.ok:
self.logger.error('Failed to get the drift message of the landing zone with response status "%s" and '
'response text "%s"',
response.status_code, response.text)
return []
return response.json().get('DriftDetails')
@property
@validate_availability
def enabled_guard_rails(self):
"""Enabled guard rails."""
output = []
for result in self._get_paginated_results(content_payload={}, target='listEnabledGuardrails'):
output.extend([GuardRail(self, data) for data in result.get('EnabledGuardrailList')])
return output
@property
@validate_availability
def guard_rails(self):
"""Guard rails."""
output = []
for result in self._get_paginated_results(content_payload={}, target='listGuardrails'):
output.extend([GuardRail(self, data) for data in result.get('GuardrailList')])
return output
@property
@validate_availability
def guard_rails_violations(self):
"""List guard rails violations."""
output = []
for result in self._get_paginated_results(content_payload={}, target='listGuardrailViolations'):
output.extend(result.get('GuardrailViolationList'))
return output
@property
@validate_availability
def catastrophic_drift(self):
"""List of catastrophic drift."""
output = []
for result in self._get_paginated_results(content_payload={}, target='getCatastrophicDrift'):
output.extend(result.get('DriftDetails'))
return output
@property
def _account_factory_config(self):
"""The config of the account factory."""
payload = self._get_api_payload(content_string={},
target='describeAccountFactoryConfig')
self.logger.debug('Trying to get the account factory config of the landing zone with payload "%s"', payload)
response = self.session.post(self.url, json=payload)
if not response.ok:
self.logger.error('Failed to get the the account factory config of the landing zone with response status '
'"%s" and response text "%s"',
response.status_code, response.text)
return {}
return response.json().get('AccountFactoryConfig')
def _pre_deploy_check(self):
"""Pre deployment check."""
payload = self._get_api_payload(content_string={},
target='performPreLaunchChecks')
self.logger.debug('Trying the pre deployment check with payload "%s"', payload)
response = self.session.post(self.url, json=payload)
if not response.ok:
self.logger.error('Failed to do the pre deployment checks with response status '
'"%s" and response text "%s"',
response.status_code, response.text)
return []
return response.json().get('PreLaunchChecksResult')
def is_email_used(self, email):
"""Check email for availability to be used or if it is already in use."""
payload = self._get_api_payload(content_string={'AccountEmail': email},
target='getAccountInfo')
self.logger.debug('Trying to check email with payload "%s"', payload)
response = self.session.post(self.url, json=payload)
if not response.ok:
self.logger.error('Failed to check for email with response status '
'"%s" and response text "%s"',
response.status_code, response.text)
raise EmailCheckFailed(response.text)
return response.json().get('AccountWithEmailExists')
def _validate_regions(self, regions):
available_regions = self.get_available_regions()
if not set(available_regions).issuperset(set(regions)):
raise UnavailableRegion(set(regions) - set(available_regions))
return regions
def _create_system_role(self, parameters):
default_params = {'Action': 'CreateServiceRole',
'ContentType': 'JSON',
'ServicePrincipalName': 'controltower.amazonaws.com',
'TemplateVersion': 1}
default_params.update(parameters)
payload = {'headers': {'Content-Type': 'application/x-amz-json-1.1'},
'method': 'GET',
'params': default_params,
'path': '/',
'region': 'us-east-1'}
self.logger.debug('Trying to system role with payload "%s"', payload)
response = self.session.post(self._iam_admin_url, json=payload)
if all([not response.ok,
response.status_code == 409,
response.json().get('Error', {}).get('Code') == 'EntityAlreadyExists]']):
self.logger.error('Entity already exists, response status "%s" and response text "%s"',
response.status_code, response.text)
return True
if not response.ok:
self.logger.error('Entity already exists, response status "%s" and response text "%s"',
response.status_code, response.text)
return True
self.logger.debug('Successfully created system role.')
return True
def _create_control_tower_admin(self):
parameters = {'AmazonManagedPolicyArn': 'arn:aws:iam::aws:policy/service-role/AWSControlTowerServiceRolePolicy',
'Description': 'AWS Control Tower policy to manage AWS resources',
'PolicyName': 'AWSControlTowerAdminPolicy',
'RoleName': 'AWSControlTowerAdmin',
'TemplateName': 'AWSControlTowerAdmin',
'TemplateVersion': 2}
return self._create_system_role(parameters)
def _create_control_tower_cloud_trail_role(self):
parameters = {'Description': 'AWS Cloud Trail assumes this role to create and '
'publish Cloud Trail logs',
'PolicyName': 'AWSControlTowerCloudTrailRolePolicy',
'RoleName': 'AWSControlTowerCloudTrailRole',
'TemplateName': 'AWSControlTowerCloudTrailRole'}
return self._create_system_role(parameters)
def _create_control_tower_stack_set_role(self):
parameters = {'Description': 'AWS CloudFormation assumes this role to deploy '
'stacksets in accounts created by AWS Control Tower',
'PolicyName': 'AWSControlTowerStackSetRolePolicy',
'RoleName': 'AWSControlTowerStackSetRole',
'TemplateName': 'AWSControlTowerStackSetRole'}
return self._create_system_role(parameters)
def _create_control_tower_config_aggregator_role(self):
parameters = {'AmazonManagedPolicyArn': 'arn:aws:iam::aws:policy/service-role/AWSConfigRoleForOrganizations',
'Description': 'AWS ControlTower needs this role to help in '
'external config rule detection',
'RoleName': 'AWSControlTowerConfigAggregatorRoleForOrganizations',
'TemplateName': 'AWSControlTowerConfigAggregatorRole'}
return self._create_system_role(parameters)
def deploy(self, logging_account_email, security_account_email, regions=None, retries=10, wait=1): # pylint: disable=too-many-arguments
"""Deploys control tower.
Returns:
bool: True on success, False on failure.
"""
if self.is_deployed:
self.logger.warning('Control tower does not seem to need deploying, already deployed.')
return True
regions = self._validate_regions(regions or [self.region])
region_list = [{"Region": region, "RegionConfigurationStatus": "ENABLED" if region in regions else "DISABLED"}
for region in self.get_available_regions()]
validation = self._pre_deploy_check()
self.logger.debug('Got validation response %s.', validation)
if not all([list(entry.values()).pop().get('Result') == 'SUCCESS' for entry in validation]):
raise PreDeployValidationFailed(validation)
invalid_emails = [email for email in [logging_account_email, security_account_email]
if self.is_email_used(email)]
if invalid_emails:
raise EmailInUse(invalid_emails)
if not all([self._create_control_tower_admin(),
self._create_control_tower_cloud_trail_role(),
self._create_control_tower_stack_set_role(),
self._create_control_tower_config_aggregator_role()]):
raise RoleCreationFailure('Unable to create required roles AWSControlTowerAdmin, '
'AWSControlTowerCloudTrailRole, AWSControlTowerStackSetRole, '
'AWSControlTowerConfigAggregatorRole, manual cleanup is required.')
payload = self._get_api_payload(content_string={'HomeRegion': self.region,
'LogAccountEmail': logging_account_email,
'SecurityAccountEmail': security_account_email,
'RegionConfigurationList': region_list},
target='setupLandingZone')
self.logger.debug('Trying to deploy control tower with payload "%s"', payload)
return self._deploy(payload, retries, wait)
def _deploy(self, payload, retries=10, wait=1):
succeded = False
while retries:
response = self.session.post(self.url, json=payload)
succeded = response.ok
retries -= 1
if response.ok:
retries = 0
if all([not response.ok,
retries]):
self.logger.error('Failed to deploy control tower with response status "%s" and response text "%s"'
'still have %s retries will wait for %s seconds', response.status_code,
response.text, retries, wait)
sleep(wait)
if not succeded:
self.logger.error('Failed to deploy control tower, retries were spent.. Maybe try again later?')
return False
self.logger.debug('Successfully started deploying control tower.')
return True
def decommission(self):
"""Decommissions a landing zone.
The api call does not seem to be enough and although the resources are decomissioned like with
the proper process, control tower responds with a delete failed on the api, so it seems that
aws needs to perform actions on their end for the decommissioning to be successful.
Returns:
response (bool): True if the process starts successfully, False otherwise.
"""
payload = self._get_api_payload(content_string={},
target='deleteLandingZone',
region=self.region)
response = self.session.post(self.url, json=payload)
if not response.ok:
self.logger.error('Failed to decommission control tower with response status "%s" and response text "%s"',
response.status_code, response.text)
return False
self._is_deployed = None
self.logger.debug('Successfully started decommissioning control tower.')
return True
| [
"costas.tyf@gmail.com"
] | costas.tyf@gmail.com |
049b2793c4f8775370e60d6bba96c8a4b0ea6e27 | dfd0797c88aec7b02866d3c559cb1bc64ce87b44 | /Chapter 11 - Testing Code/survey.py | e81028f4b1415ccfd157f4a15c6bb19fcccb5a81 | [] | no_license | 8BitJustin/2020-Python-Crash-Course | d97f9b79c7a1e1c88c9bc2b035b0e98b2ef23025 | 1f078d7fa62e2b07f8d6c01f85e60baed8293779 | refs/heads/master | 2020-12-20T00:19:44.173143 | 2020-06-14T18:42:08 | 2020-06-14T18:42:08 | 235,893,110 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | class AnonymousSurvey():
"""Collect anonymous answers to a survey question."""
def __init__(self, question):
"""Store question and prepare to store responses."""
self.question = question
self.responses = []
def show_question(self):
"""Show the survey question."""
print(self.question)
def store_response(self, new_response):
"""Store a single response to the survey."""
self.responses.append(new_response)
def show_results(self):
"""Show all the responses that have been given."""
print("Survey results:")
for response in self.responses:
print(f"- {response}")
| [
"j.olson.digital@gmail.com"
] | j.olson.digital@gmail.com |
9cb761ebc2d0cb43448126d9150ef9c8f0545248 | 478aa8e979226404fcba2a9aa1cc9c05b7b9b33b | /cars/views.py | feb184e0a125ee455f5b919ce6e6a79d0eb278d8 | [] | no_license | avramenkomy/module_E5 | 5054728c08df028e0f32642c92f6393c31a3d900 | 4bfaacfd4fb94cd2ba59eff5c0dfca1b846bc56a | refs/heads/master | 2023-03-18T15:50:09.901002 | 2021-03-09T11:03:42 | 2021-03-09T11:03:42 | 345,962,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,941 | py | from django.http import HttpResponse
from django.shortcuts import render
from cars.models import Car
from django.template import loader
from django.views.generic.detail import DetailView
from cars.forms import CarFilterForm, CarFullFilter
from django.db.models import Q
# Create your views here.
def index(request):
template = loader.get_template('base.html')
cars = Car.objects.select_related("manufacturer").all()
form = CarFilterForm(request.GET)
full_form = CarFullFilter(request.GET)
if form.is_valid():
if form.cleaned_data["min_year"]:
cars = cars.filter(year__gte=form.cleaned_data["min_year"])
if form.cleaned_data["max_year"]:
cars = cars.filter(year__lte=form.cleaned_data["max_year"])
if form.cleaned_data["model"]:
cars = cars.filter(
Q(model__icontains=form.cleaned_data["model"])|
Q(manufacturer__title=form.cleaned_data["model"])|
Q(manufacturer__title__icontains=form.cleaned_data["model"]))
if form.cleaned_data["gear"] != "":
cars = cars.filter(Q(gear=form.cleaned_data["gear"]))
if full_form.is_valid():
if full_form.cleaned_data["search"]:
cars = cars.filter(Q(model=full_form.cleaned_data["search"])|
Q(manufacturer__title=full_form.cleaned_data["search"])|
Q(manufacturer__title__icontains=full_form.cleaned_data["search"])|
# Q(year=int(full_form.cleaned_data["search"]))|
Q(gear=full_form.cleaned_data["search"])|
Q(gear__icontains=full_form.cleaned_data["search"]))
return HttpResponse(template.render({ "cars": cars, "form": form, "full_form": full_form }))
class CarDetailView(DetailView):
model = Car
| [
"avramenkomy@yandex.ru"
] | avramenkomy@yandex.ru |
2f2e1030c13ce06b0ed7d9df9dc967b10e484184 | 42878cf22469c5adc3d92c9f5eb670b00001956d | /src/synamic/core/services/pre_processor/pre_processor_service.py | 119feec16e16f160b43344bfdb8f18d94db0b763 | [
"MIT"
] | permissive | SabujXi/Synamic | 4fb34f1b2c05df22e98f9b001b948c2a52248693 | c9c06ecf874be82dbb2cba890cb483300809de98 | refs/heads/master | 2021-05-16T00:27:50.197163 | 2020-08-10T18:20:59 | 2020-08-10T18:20:59 | 107,001,678 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,661 | py | """
author: "Md. Sabuj Sarker"
copyright: "Copyright 2017-2018, The Synamic Project"
credits: ["Md. Sabuj Sarker"]
license: "MIT"
maintainer: "Md. Sabuj Sarker"
email: "md.sabuj.sarker@gmail.com"
status: "Development"
"""
from synamic.core.standalones.functions.decorators import not_loaded, loaded
from .builtin_processors import _builtin_processor_classes
from synamic.exceptions import SynamicPreProcessorNotFound
class PreProcessorService:
def __init__(self, site):
self.__site = site
self.__name_to_processor = {}
self.__is_loaded = False
@property
def is_loaded(self):
return self.__is_loaded
@not_loaded
def load(self):
# load builtin processor
preprocess_cdir = self.__site.cpaths.pre_process_cdir
if preprocess_cdir.exists():
cdirs = preprocess_cdir.list_dirs(depth=1)
for cdir in cdirs:
processor_name = cdir.basename
if processor_name in _builtin_processor_classes:
self.add_processor(processor_name, cdir, _builtin_processor_classes[processor_name])
# Add builtin pre-processor even if the dir does not exist for it
for processor_name, processor_class in _builtin_processor_classes.items():
if self.get_processor(processor_name, default=None, error_out=False) is None:
cdir = preprocess_cdir.join(processor_name, is_file=False)
self.add_processor(processor_name, cdir, processor_class)
for processor in self.__name_to_processor.values():
processor.load()
self.__is_loaded = True
def add_processor(self, processor_name, processor_cpath, processor_class):
assert type(processor_class) is type
assert processor_name not in self.__name_to_processor
processor = processor_class(self.__site, processor_cpath)
self.__name_to_processor[processor_name] = processor
return processor
def get_processor(self, processor_name, default=None, error_out=True):
processor = self.__name_to_processor.get(processor_name, None)
if processor is None and error_out is True:
raise SynamicPreProcessorNotFound(f'Processor {processor_name} could not be found')
elif processor is None:
return default
else:
return processor
@property
def pre_processors(self):
return tuple(self.__name_to_processor.values())
def __getattr__(self, key):
return self.get_processor(key, error_out=True)
| [
"md.sabuj.sarker@gmail.com"
] | md.sabuj.sarker@gmail.com |
cde238fc9cdfd455c5a2a4901a72c3558c5c5567 | ccbfc7818c0b75929a1dfae41dc061d5e0b78519 | /aliyun-openapi-python-sdk-master/aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/SaveSingleTaskForCancelingTransferOutRequest.py | 197a0ac4844e7d6ab5af56f639c3e4831f07be01 | [
"Apache-2.0"
] | permissive | P79N6A/dysms_python | 44b634ffb2856b81d5f79f65889bfd5232a9b546 | f44877b35817e103eed469a637813efffa1be3e4 | refs/heads/master | 2020-04-28T15:25:00.368913 | 2019-03-13T07:52:34 | 2019-03-13T07:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,489 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SaveSingleTaskForCancelingTransferOutRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'SaveSingleTaskForCancelingTransferOut')
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_UserClientIp(self):
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self,UserClientIp):
self.add_query_param('UserClientIp',UserClientIp)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang) | [
"1478458905@qq.com"
] | 1478458905@qq.com |
7547ee2982aa5a0f157c0e903540e4774050ac79 | 48df8f9a545b86caf0e52fbaa9f74d71c34285de | /oneshot/getstd.py | 1031bc3b11cb673aaf832f53853292edb9826731 | [] | no_license | Python3pkg/OneShot | 7954408376163279713396a6bfe06669b7c69eea | 26d8ad99a5bb8f925d358e9601005062f651ad4e | refs/heads/master | 2021-01-21T17:38:33.119792 | 2017-05-21T17:23:55 | 2017-05-21T17:23:55 | 91,974,963 | 0 | 0 | null | 2017-05-21T17:23:53 | 2017-05-21T17:23:53 | null | UTF-8 | Python | false | false | 1,450 | py | import os as _os
_on_rtd = _os.environ.get('READTHEDOCS', None) == 'True'
if not _on_rtd:
import numpy as _np
# Get std dev (spot size) {{{
def getstd(res, h, xval):
"""
.. deprecated:: 0.0.0
I'm not really sure what this function does, but it's not referenced anywhere else.
"""
stddevsq = _np.zeros(res)
indivbool = False
if indivbool:
figscan = plt.figure() # noqa
def gauss(x, A, mu, sig):
return A*_np.exp(-_np.power(x-mu, 2)/(2*_np.power(sig, 2)))
for i, row in enumerate(_np.transpose(h)):
# A = max(row)
mean = _np.sum(xval*row)/row.sum()
var = _np.sum(_np.power(xval-mean, 2)*row)/row.sum()
# root = _np.sqrt(var)
# pguess = [A, mean, root]
# popt = pguess
# popt, pcov = spopt.curve_fit(gauss, xval, row, pguess)
# # print "A: {}, mean: {}, sig: {}".format(popt[0], popt[1], popt[2])
# # print "Percent diff: {}%".format(100*(popt[2]-root)/root)
# fit = gauss(xval, popt[0], popt[1], popt[2])
# unchangedroot = gauss(xval, popt[0], popt[1], root)
# if indivbool: plt.plot(xval, row, xval, fit, xval, unchangedroot)
# # plt.plot(xval, row)
# if indivbool: raw_input("Any key.")
# if indivbool: figscan.clf()
# # stddevsq[i] = _np.power(popt[2], 2)
stddevsq[i] = var
# stddev=_np.sqrt(stddevsq)
return stddevsq
# }}}
| [
"joelfred@slac.stanford.edu"
] | joelfred@slac.stanford.edu |
463090bb5b1d514884c43abfa3250022348fc00d | ad13583673551857615498b9605d9dcab63bb2c3 | /output/models/sun_data/mgroup/annotation/annotation00101m/annotation00101m4_xsd/__init__.py | c8a16b879136466c5d8c529de13e7558dae74f94 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 179 | py | from output.models.sun_data.mgroup.annotation.annotation00101m.annotation00101m4_xsd.annotation00101m4 import (
Root,
TheType,
)
__all__ = [
"Root",
"TheType",
]
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
95ad39c94360fcaceaef117c1a0f6f4658e6f408 | 7dc05dc9ba548cc97ebe96ed1f0dab8dfe8d8b81 | /tags/release-0.3.1/pida/services/python.py | 02d9e7df50ca75f178eb63503cecf85459db4c9d | [
"MIT"
] | permissive | BackupTheBerlios/pida-svn | b68da6689fa482a42f5dee93e2bcffb167a83b83 | 739147ed21a23cab23c2bba98f1c54108f8c2516 | refs/heads/master | 2020-05-31T17:28:47.927074 | 2006-05-18T21:42:32 | 2006-05-18T21:42:32 | 40,817,392 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,262 | py | # -*- coding: utf-8 -*-
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
#Copyright (c) 2005 Ali Afshar aafshar@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
# system import(s)
import os
import pida.core.actions as actions
# pida core import(s)
from pida.core import service
defs = service.definitions
types = service.types
class python(service.service):
class python_execution(defs.optiongroup):
"""Options pertaining to python execution"""
class python_executable(defs.option):
"""The command to call when executing python scripts."""
rtype = types.string
default = 'python'
class python_shell(defs.option):
"""The command to call when executing a python shell."""
rtype = types.string
default = 'python'
def cmd_execute_shell(self):
py = self.opt('python_execution', 'python_shell')
command_args=[py]
self.boss.call_command('terminal', 'execute',
command_args=command_args,
icon_name='execute',
short_title='Python Shell')
def cmd_execute_file(self, filename):
py = self.opt('python_execution', 'python_executable')
command_args=[py, filename]
self.boss.call_command('terminal', 'execute',
command_args=command_args,
icon_name='execute')
def act_python_shell(self, action):
"""Start an interactive python shell."""
self.call('execute_shell')
def get_menu_definition(self):
return """
<menubar>
<menu name="base_python" action="base_python_menu">
<menuitem name="pyshell" action="python+python_shell" />
</menu>
</menubar>
"""
class python(defs.project_type):
project_type_name = 'python'
class general(defs.optiongroup):
"""General options for Python projects"""
class source_directory(defs.option):
"""The directory containing source code."""
rtype = types.directory
default = os.path.expanduser('~')
class python_binary_location(defs.option):
"""The location of the python binary"""
rtype = types.file
default = '/usr/bin/python'
class execution(defs.optiongroup):
"""Options relating to executing the project"""
class project_file_to_execute(defs.option):
"""The python file to run for this project"""
rtype = types.file
default = ''
class use_python_to_execute(defs.option):
rtype = types.boolean
default = True
@actions.action(
default_accel='<Shift><Control>x'
)
def act_project_execute(self, action):
"""Execute the current project."""
proj = self.boss.call_command('projectmanager',
'get_current_project')
projfile = proj.get_option('execution',
'project_file_to_execute').value
use_py = proj.get_option('execution',
'use_python_to_execute').value
if use_py:
shell_cmd = 'python'
else:
shell_cmd = 'bash'
if projfile:
self.service.boss.call_command('terminal', 'execute',
command_args=[shell_cmd, projfile], icon_name='run')
else:
self.service.log.info('project has not set an executable')
# XXX: this should be on gazpach service
@actions.action(
label="Add UI Form...",
)
def act_add_ui_form(self, action):
"""Add a user interface form to the current project."""
def callback(name):
if not name.endswith('.glade'):
name = '%s.glade' % name
proj = self.boss.call_command('projectmanager',
'get_current_project')
filepath = os.path.join(proj.source_directory, name)
self.service.boss.call_command('gazpach', 'create',
filename=filepath)
self.service.boss.call_command('window', 'input',
callback_function=callback,
prompt='Form Name')
def get_menu_definition(self):
return """
<menubar>
<menu name="base_project" action="base_project_menu">
<separator />
<menuitem name="addform" action="python+project+add_ui_form" />
<menuitem name="expyproj" action="python+project+project_execute" />
<separator />
</menu>
</menubar>
<toolbar>
<placeholder name="OpenFileToolbar">
</placeholder>
<placeholder name="SaveFileToolbar">
</placeholder>
<placeholder name="EditToolbar">
</placeholder>
<placeholder name="ProjectToolbar">
<separator />
<toolitem name="runproj" action="python+project+project_execute" />
<separator />
</placeholder>
<placeholder name="VcToolbar">
</placeholder>
<placeholder name="ToolsToolbar">
</placeholder>
</toolbar>
"""
class python_executor(defs.language_handler):
file_name_globs = ['*.py']
first_line_globs = ['*/bin/python']
def init(self):
self.__document = None
self.__cached = self.cached = {}
def load_document(self, document):
self.__document = document
def act_execute_current_file(self, action):
"""Runs the current python script"""
self.service.call('execute_file',
filename=self.__document.filename)
def get_menu_definition(self):
return """
<menubar>
<menu name="base_python" action="base_python_menu">
<menuitem name="expyfile" action="python+language+execute_current_file" />
</menu>
</menubar>
<toolbar>
<placeholder name="OpenFileToolbar">
</placeholder>
<placeholder name="SaveFileToolbar">
</placeholder>
<placeholder name="EditToolbar">
</placeholder>
<placeholder name="ProjectToolbar">
<separator />
<toolitem name="runpy" action="python+language+execute_current_file"/>
<separator />
</placeholder>
<placeholder name="VcToolbar">
</placeholder>
<placeholder name="ToolsToolbar">
</placeholder>
</toolbar>
"""
Service = python
| [
"aafshar@ef0b12da-61f9-0310-ba38-b2629ec279a7"
] | aafshar@ef0b12da-61f9-0310-ba38-b2629ec279a7 |
5f62ae2b41203e02f3bd6369cd5bf77f010b43ab | 5ba34cad2a933adfed6b5df5b1229e48038596d4 | /common/utils.py | e1e50b301d87bce558520bd4db37ed53dcafe48a | [
"MIT"
] | permissive | Firmicety/fomalhaut-panel | bececa59cd42edd8793440a652d206b250591cb9 | 3e662db65a7ca654f75a19e38cb0931be21f92e9 | refs/heads/master | 2020-06-06T07:52:27.211654 | 2019-06-20T11:38:39 | 2019-06-20T11:38:39 | 192,683,216 | 0 | 0 | MIT | 2019-06-19T07:39:07 | 2019-06-19T07:39:07 | null | UTF-8 | Python | false | false | 4,540 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# created by restran on 2016/1/2
from __future__ import unicode_literals
import logging
import json
import sys
from django.http import HttpResponse
import six
from six import binary_type, text_type
import time
import itertools
logger = logging.getLogger(__name__)
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PYPY = True if getattr(sys, 'pypy_version_info', None) else False
if PY3:
from io import BytesIO
text_type = str
binary_type = bytes
else:
from cStringIO import StringIO as BytesIO
text_type = unicode
binary_type = str
def utf8(value):
"""Get the UTF8-encoded version of a value."""
if not isinstance(value, binary_type) and not isinstance(value, text_type):
value = binary_type(value)
if isinstance(value, text_type):
return value.encode('utf-8')
else:
return value
def text_type_dict(dict_data):
if not isinstance(dict_data, dict):
raise TypeError
new_dict = {}
for k, v in dict_data.items():
if isinstance(k, binary_type):
k = k.decode('utf-8')
if isinstance(v, binary_type):
v = v.decode('utf-8')
new_dict[k] = v
return new_dict
def datetime_to_str(dt, format_str='%Y-%m-%d %H:%M:%S'):
"""
将datetime转换成字符串
:param format_str:
:param dt:
:return:
"""
return dt.strftime(format_str) if dt else ''
def datetime_to_timestamp(dt):
"""
将 datetime 转换成时间戳,精确到毫秒
:param dt:
:return:
"""
if dt is None:
return None
else:
return time.mktime(dt.timetuple()) * 1e3 + dt.microsecond / 1e3
def error_404(request):
return HttpResponse("page not found")
# return render_to_response('404.html')
def http_response_json(dict_data, encoding='utf-8'):
"""
返回json数据
:param encoding:
:param dict_data:
:return:
"""
# ensure_ascii=False,用来处理中文
try:
if six.PY3:
# if isinstance(dict_data, binary_type):
# dict_data =
dict_data = text_type_dict(dict_data)
return HttpResponse(json.dumps(dict_data, ensure_ascii=False),
content_type="application/json; charset=utf-8")
else:
return HttpResponse(json.dumps(dict_data, encoding=encoding, ensure_ascii=False),
content_type="application/json; charset=utf-8")
except Exception as e:
logger.error(e)
# 去掉 ensure_ascii 再试一下
return HttpResponse(json.dumps(dict_data),
content_type="application/json; charset=utf-8")
def json_loads(content, encoding=None):
if six.PY3:
return json.loads(s=content.decode('utf-8'), encoding=encoding)
else:
return json.loads(s=content, encoding=encoding)
def json_dumps(dict_data, encoding='utf-8', indent=None, sort_keys=False):
"""
返回json数据
:param sort_keys:
:param indent:
:param encoding:
:param dict_data:
:return:
"""
# ensure_ascii=False,用来处理中文
try:
return json.dumps(dict_data, encoding=encoding, ensure_ascii=False, indent=indent, sort_keys=sort_keys)
except Exception as e:
logger.error(e)
# 去掉 ensure_ascii 再试一下
return json.dumps(dict_data, indent=indent, sort_keys=sort_keys)
def check_text_content_type(content_type):
"""
检查content_type 是否是文本类型
:param content_type:
:return:
"""
content_type = text_type(content_type).lower()
text_content_type = [
b'text',
b'application/json',
b'application/x-javascript',
b'application/xml',
b'application/x-www-form-urlencoded'
]
return any(map(content_type.startswith, text_content_type))
def grouper(iterable, size):
# http://stackoverflow.com/a/8991553
it = iter(iterable)
if size <= 0:
yield it
return
while True:
chunk = tuple(itertools.islice(it, size))
if not chunk:
return
yield chunk
__all__ = ['grouper', 'check_text_content_type',
'json_dumps', 'json_loads', 'http_response_json', 'error_404',
'datetime_to_timestamp', 'datetime_to_str', 'text_type_dict',
'utf8', 'BytesIO', 'text_type', 'binary_type', 'PY2', 'PY3', 'PYPY']
| [
"grestran@gmail.com"
] | grestran@gmail.com |
70854803177cbf6f526b08576fc9c7321e535049 | b10ee2d670cab2141cdee16dd99c9425cfeb24e1 | /oci_image.py | c25866a0a69c5b38e894eae5f67c13d823b48f5a | [] | no_license | VariableDeclared/resource-oci-image | c90ece9157c74cafc9ab2eaae74d327baa761432 | e583429139d874bbcab8330c13f3fde30912abdf | refs/heads/master | 2021-05-24T10:37:16.744676 | 2020-02-05T16:18:56 | 2020-02-05T16:19:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,893 | py | from pathlib import Path
import yaml
from ops.framework import Object
from ops.model import BlockedStatus, ModelError
class OCIImageResource(Object):
def __init__(self, charm, resource_name):
super().__init__(charm, resource_name)
self.resource_name = resource_name
def fetch(self):
resource_path = self.model.resources.fetch(self.resource_name)
if not resource_path.exists():
raise MissingResourceError(self.resource_name)
resource_text = Path(resource_path).read_text()
if not resource_text:
raise MissingResourceError(self.resource_name)
try:
resource_data = yaml.safe_load(resource_text)
except yaml.YAMLError as e:
raise InvalidResourceError(self.resource_name) from e
else:
return ImageInfo(resource_data)
class ImageInfo(dict):
def __init__(self, data):
# Translate the data from the format used by the charm store to the format
# used by the Juju K8s pod spec, since that is how this is typically used.
super().__init__({
'imagePath': data['registrypath'],
'username': data['username'],
'password': data['password'],
})
@property
def image_path(self):
return self['imagePath']
@property
def username(self):
return self['username']
@property
def password(self):
return self['password']
class ResourceError(ModelError):
status_type = BlockedStatus
status_message = 'Resource error'
def __init__(self, resource_name):
super().__init__(resource_name)
self.status = self.status_type(f'{self.status_message}: {resource_name}')
class MissingResourceError(ModelError):
status_message = 'Missing resource'
class InvalidResourceError(ModelError):
status_message = 'Invalid resource'
| [
"johnsca@gmail.com"
] | johnsca@gmail.com |
d483a1ed0b3c259251024bcf7ffd220b7ec4e08f | c81745ea7c36fa03cd818b3986f605a6da036658 | /u_app/models.py | 1d2d6231ad49b573d94313e42613910c8668c6d9 | [] | no_license | mpresto/user_mgmt_site | 3f77fbbe4fa3fec4724e7ba8916eed9fd1d18aae | 063ed174f0af46fff3a7c5f7631f1587dc152511 | refs/heads/master | 2022-04-11T18:15:29.603694 | 2020-03-29T04:10:32 | 2020-03-29T04:10:32 | 237,847,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import connection
import sqlite3
# Create your models here.
class MyUser(AbstractBaseUser):
email = models.EmailField(max_length=254, unique=True)
password = models.CharField(max_length=50)
full_name = models.CharField(max_length=50)
birth_date = models.DateField(blank=True, null=True)
registration_date = models.DateTimeField(auto_now_add=True)
last_login = models.DateTimeField(auto_now_add=True)
objects = BaseUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['full_name', 'birth_date',]
class Doggo(models.Model):
"""A class for our doggos"""
name = models.CharField(max_length=200)
image_url = models.CharField(max_length=500)
age = models.IntegerField(blank=True)
description = models.CharField(max_length=500)
entry_date = models.DateTimeField(auto_now_add=True)
submitter = models.ForeignKey(MyUser, on_delete=models.CASCADE)
average_rating = models.IntegerField(default=0)
class Rating(models.Model):
"""A model for our rating records"""
user_who_voted = models.ForeignKey(MyUser, on_delete=models.CASCADE)
rated_doggo = models.ForeignKey(Doggo, on_delete=models.CASCADE)
vote_value = models.IntegerField(default=0)
| [
"monty.preston5@gmail.com"
] | monty.preston5@gmail.com |
c3a6193d0572e0c981b8de79b098cda9d7eb1a93 | 09c595368ed7617381edb8ea87f56d5596ab4bdb | /Medium/103. Binary Tree Zigzag Level Order Traversal.py | bfdd1a6f73b158b43b2c1359cb6fcec8d8390eba | [] | no_license | h74zhou/leetcode | 38d989135b968b6947c36df7be288cb11ead3f0c | a5efedb34271160a7a776a7ce9bfff29f47b2389 | refs/heads/master | 2023-03-10T19:24:46.786706 | 2021-02-23T02:25:27 | 2021-02-23T02:25:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def zigzagLevelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
answer = []
if not root:
return answer
q, count = [root], 2
while q:
currLevel, nextLevel = [], []
for node in q:
if count % 2 == 0:
currLevel.append(node.val)
else:
currLevel.insert(0, node.val)
if node.left:
nextLevel.append(node.left)
if node.right:
nextLevel.append(node.right)
count += 1
answer.append(currLevel[:])
q = nextLevel[:]
return answer
| [
"iamherunzhou@gmail.com"
] | iamherunzhou@gmail.com |
e25c1f9014a8ca9635d1989cfee9bc1ea967069c | 78eb766321c7ed3236fb87bb6ac8547c99d0d1a4 | /oneYou2/home/migrations/0007_sitesettings_header.py | 770b350040d29f6fff38be8d6bdb226d71ae63f1 | [] | no_license | danmorley/nhs-example | 9d7be76116ed962248e1f7e287355a6870534f5d | ae4b5f395d3518ee17ef89348ed756c817e0c08c | refs/heads/master | 2022-12-13T02:13:18.484448 | 2019-02-28T11:05:31 | 2019-02-28T11:05:31 | 203,353,840 | 1 | 0 | null | 2022-12-07T04:29:46 | 2019-08-20T10:30:15 | Python | UTF-8 | Python | false | false | 612 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-02-13 11:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pages', '0011_header'),
('home', '0006_sitesettings_footer'),
]
operations = [
migrations.AddField(
model_name='sitesettings',
name='header',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='pages.Header'),
),
]
| [
"andrewkenyon123@gmail.com"
] | andrewkenyon123@gmail.com |
b82f5cd0680f4b8d37be75d4084b93fdaaf2da86 | 0485a490f466bd1d02eaae96d277888781208c0e | /tests/single_instruction_translation_validation/mcsema/memory-variants/subq_r64_m64/Output/test-z3.py | 3786808e7a8d2a62a625335f4ec19aaf8a638f2c | [
"LicenseRef-scancode-unknown-license-reference",
"NCSA"
] | permissive | Mthandazo42/validating-binary-decompilation | c0e2d54cd79e609bfa35802975bddfa52e646fad | c0fcd6f099e38195dcbbac9e8c13a825865c5cb5 | refs/heads/master | 2022-11-11T13:18:13.033044 | 2020-06-25T05:49:01 | 2020-06-25T05:49:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,694 | py | #############################################
######## Auto Generated Proof Scripts #######
#############################################
import z3
import sys
status=True
test_name="UnK"
if(len(sys.argv) > 1):
test_name = sys.argv[1]
def solve(msg, lvar, xvar, s):
global status
s.set("timeout", 60000)
res = s.check()
if(z3.unknown == res):
print(test_name + "::" + msg + "::unk")
status = "Unknown"
if(z3.sat == res):
if("UNDEF" in xvar.sexpr()):
print(test_name + "::" + msg + "::undef-sat")
else:
m = s.model()
print(test_name + "::" + msg + "::sat")
print("\n")
print("query", s)
print("\n")
print("model", m)
print("\n")
print("xvar =", m.evaluate(xvar))
print("lvar =", m.evaluate(lvar))
print("\n")
status = False
##############################
## X86 specific variables ####
##############################
### GPRs
VX_RAX = z3.BitVec('VX_RAX',64)
VX_RBX = z3.BitVec('VX_RBX',64)
VX_RCX = z3.BitVec('VX_RCX',64)
VX_RDX = z3.BitVec('VX_RDX',64)
VX_RSI = z3.BitVec('VX_RSI',64)
VX_RDI = z3.BitVec('VX_RDI',64)
### Flags
VX_CF = z3.BitVec('VX_CF',1)
VX_PF = z3.BitVec('VX_PF',1)
VX_ZF = z3.BitVec('VX_ZF',1)
VX_SF = z3.BitVec('VX_SF',1)
VX_AF = z3.BitVec('VX_AF',1)
VX_OF = z3.BitVec('VX_OF',1)
### YMM Registers
VX_YMM1 = z3.BitVec('VX_YMM1', 256)
VX_YMM2 = z3.BitVec('VX_YMM2', 256)
## Undef
VX_UNDEF_1 = z3.BitVec('VX_UNDEF_1', 1)
VX_UNDEF_BOOL = z3.Bool('VX_UNDEF_BOOL')
## Memeory
VX_MEM_8 = z3.BitVec('VX_MEM_8',8)
VX_MEM_16 = z3.BitVec('VX_MEM_16',16)
VX_MEM_32 = z3.BitVec('VX_MEM_32',32)
VX_MEM_64 = z3.BitVec('VX_MEM_64',64)
VX_MEM_128 = z3.BitVec('VX_MEM_128',128)
VX_MEM_256 = z3.BitVec('VX_MEM_256',256)
##############################
## X86 specific variables ####
##############################
### GPRs
VL_RAX = z3.BitVec('VL_RAX',64)
VL_RBX = z3.BitVec('VL_RBX',64)
VL_RCX = z3.BitVec('VL_RCX',64)
VL_RDX = z3.BitVec('VL_RDX',64)
VL_RSI = z3.BitVec('VL_RSI',64)
VL_RDI = z3.BitVec('VL_RDI',64)
### Flags
VL_CF = z3.BitVec('VL_CF',8)
VL_PF = z3.BitVec('VL_PF',8)
VL_ZF = z3.BitVec('VL_ZF',8)
VL_SF = z3.BitVec('VL_SF',8)
VL_AF = z3.BitVec('VL_AF',8)
VL_OF = z3.BitVec('VL_OF',8)
### YMM Registers
VL_YMM1_0 = z3.BitVec('VL_YMM1_0', 64)
VL_YMM1_1 = z3.BitVec('VL_YMM1_1', 64)
VL_YMM1_2 = z3.BitVec('VL_YMM1_2', 64)
VL_YMM1_3 = z3.BitVec('VL_YMM1_3', 64)
VL_YMM2_0 = z3.BitVec('VL_YMM2_0', 64)
VL_YMM2_1 = z3.BitVec('VL_YMM2_1', 64)
VL_YMM2_2 = z3.BitVec('VL_YMM2_2', 64)
VL_YMM2_3 = z3.BitVec('VL_YMM2_3', 64)
## Memeory
VL_MEM_8 = z3.BitVec('VL_MEM_8',8)
VL_MEM_16 = z3.BitVec('VL_MEM_16',16)
VL_MEM_32 = z3.BitVec('VL_MEM_32',32)
VL_MEM_32_0 = z3.BitVec('VL_MEM_32_0',32)
VL_MEM_32_1 = z3.BitVec('VL_MEM_32_1',32)
VL_MEM_32_2 = z3.BitVec('VL_MEM_32_2',32)
VL_MEM_32_3 = z3.BitVec('VL_MEM_32_3',32)
VL_MEM_64 = z3.BitVec('VL_MEM_64',64)
VL_MEM_64_0 = z3.BitVec('VL_MEM_64_0',64)
VL_MEM_64_1 = z3.BitVec('VL_MEM_64_1',64)
VL_MEM_128 = z3.BitVec('VL_MEM_128',128)
VL_MEM_256 = z3.BitVec('VL_MEM_256',256)
##############################
## Proof variables ###########
##############################
V_R = z3.BitVec('V_R',64)
V_F = z3.BitVec('V_F',1)
V_Y = z3.BitVec('V_Y',256)
V_M8 = z3.BitVec('V_M8',8)
V_M16 = z3.BitVec('V_M16',16)
V_M32 = z3.BitVec('V_M32',32)
V_M64 = z3.BitVec('V_M64',64)
V_M128 = z3.BitVec('V_M128',128)
V_M256 = z3.BitVec('V_M256',256)
## Solver instance
s = z3.Solver()
##############################
## Default constraints #######
##############################
### GPRs
s.add(VX_RAX == VL_RAX)
s.add(VX_RBX == VL_RBX)
s.add(VX_RCX == VL_RCX)
s.add(VX_RDX == VL_RDX)
s.add(VX_RDI == VL_RDI)
s.add(VX_RSI == VL_RSI)
### Flags
s.add(z3.Or(VL_CF == 0, VL_CF == 1))
s.add(z3.Or(VL_ZF == 0, VL_ZF == 1))
s.add(z3.Or(VL_PF == 0, VL_PF == 1))
s.add(z3.Or(VL_SF == 0, VL_SF == 1))
s.add(z3.Or(VL_AF == 0, VL_AF == 1))
s.add(z3.Or(VL_OF == 0, VL_OF == 1))
s.add(z3.Extract(0,0, VL_CF) == VX_CF)
s.add(z3.Extract(0,0, VL_SF) == VX_SF)
s.add(z3.Extract(0,0, VL_ZF) == VX_ZF)
s.add(z3.Extract(0,0, VL_PF) == VX_PF)
s.add(z3.Extract(0,0, VL_AF) == VX_AF)
s.add(z3.Extract(0,0, VL_OF) == VX_OF)
### Ymms
s.add(z3.Concat(VL_YMM1_3, VL_YMM1_2, VL_YMM1_1, VL_YMM1_0) == VX_YMM1)
s.add(z3.Concat(VL_YMM2_3, VL_YMM2_2, VL_YMM2_1, VL_YMM2_0) == VX_YMM2)
## Memeory
s.add(VX_MEM_8 == VL_MEM_8)
s.add(VX_MEM_16 == VL_MEM_16)
s.add(VX_MEM_32 == VL_MEM_32)
s.add(VX_MEM_64 == VL_MEM_64)
s.add(VX_MEM_128 == VL_MEM_128)
s.add(VX_MEM_256 == VL_MEM_256)
s.add(VX_MEM_128 == z3.Concat(VL_MEM_64_1, VL_MEM_64_0))
s.add(VX_MEM_128 == z3.Concat(VL_MEM_32_3, VL_MEM_32_2, VL_MEM_32_1, VL_MEM_32_0))
## =******= AF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, ((((z3.LShR(((((VL_MEM_64 ^ VL_RBX) & z3.BitVecVal(18446744073709551616 - 1, 64)) ^ ((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64))) & z3.BitVecVal(18446744073709551616 - 1, 64)), z3.BitVecVal(4, 64)) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(256 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(256 - 1, 64)))))
xvar = (V_F == ((z3.Extract(4, 4, VX_MEM_64) ^ z3.Extract(4, 4, VX_RBX)) ^ z3.Extract(4, 4, ((z3.Concat(z3.BitVecVal(0, 1), (VX_MEM_64 ^ z3.BitVecVal(-1, 64))) + z3.BitVecVal(1, 65)) + z3.Concat(z3.BitVecVal(0, 1), VX_RBX)))))
s.add(lvar != xvar)
solve("AF", lvar, xvar, s)
s.pop()
## =******= CF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, z3.Concat(z3.BitVecVal(0, 7), z3.Extract(0, 0, z3.If(z3.UGT(VL_MEM_64, VL_RBX), z3.BitVecVal(1, 8), z3.BitVecVal(0, 8)))))))
xvar = (V_F == z3.If(z3.Not((z3.Extract(64, 64, ((z3.Concat(z3.BitVecVal(0, 1), (VX_MEM_64 ^ z3.BitVecVal(-1, 64))) + z3.BitVecVal(1, 65)) + z3.Concat(z3.BitVecVal(0, 1), VX_RBX))) == z3.BitVecVal(1, 1))), z3.BitVecVal(1, 1), z3.BitVecVal(0, 1)))
s.add(lvar != xvar)
solve("CF", lvar, xvar, s)
s.pop()
## =******= MEM =******=
s.push()
lvar = (V_M64 == z3.Concat(z3.Extract(63, 56, VL_MEM_64), z3.Extract(55, 48, VL_MEM_64), z3.Extract(47, 40, VL_MEM_64), z3.Extract(39, 32, VL_MEM_64), z3.Extract(31, 24, VL_MEM_64), z3.Extract(23, 16, VL_MEM_64), z3.Extract(15, 8, VL_MEM_64), z3.Extract(7, 0, VL_MEM_64)))
xvar = (V_M64 == z3.Concat(z3.Extract(63, 56, VX_MEM_64), z3.Extract(55, 48, VX_MEM_64), z3.Extract(47, 40, VX_MEM_64), z3.Extract(39, 32, VX_MEM_64), z3.Extract(31, 24, VX_MEM_64), z3.Extract(23, 16, VX_MEM_64), z3.Extract(15, 8, VX_MEM_64), z3.Extract(7, 0, VX_MEM_64)))
s.add(lvar != xvar)
solve("MEM", lvar, xvar, s)
s.pop()
## =******= OF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, z3.Concat(z3.BitVecVal(0, 7), z3.Extract(0, 0, z3.If(((((((z3.LShR(((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)), z3.BitVecVal(63, 64)) & z3.BitVecVal(18446744073709551616 - 1, 64)) ^ (z3.LShR(VL_RBX, z3.BitVecVal(63, 64)) & z3.BitVecVal(18446744073709551616 - 1, 64))) & z3.BitVecVal(18446744073709551616 - 1, 64)) + (((z3.LShR(VL_MEM_64, z3.BitVecVal(63, 64)) & z3.BitVecVal(18446744073709551616 - 1, 64)) ^ (z3.LShR(VL_RBX, z3.BitVecVal(63, 64)) & z3.BitVecVal(18446744073709551616 - 1, 64))) & z3.BitVecVal(18446744073709551616 - 1, 64))) & z3.BitVecVal(18446744073709551616 - 1, 64)) == z3.BitVecVal(2, 64)), z3.BitVecVal(1, 8), z3.BitVecVal(0, 8)))))))
xvar = (V_F == z3.If(z3.And((((z3.Extract(63, 63, VX_MEM_64) ^ z3.BitVecVal(-1, 1)) == z3.BitVecVal(1, 1)) == (z3.Extract(63, 63, VX_RBX) == z3.BitVecVal(1, 1))), z3.Not((((z3.Extract(63, 63, VX_MEM_64) ^ z3.BitVecVal(-1, 1)) == z3.BitVecVal(1, 1)) == (z3.Extract(63, 63, ((z3.Concat(z3.BitVecVal(0, 1), (VX_MEM_64 ^ z3.BitVecVal(-1, 64))) + z3.BitVecVal(1, 65)) + z3.Concat(z3.BitVecVal(0, 1), VX_RBX))) == z3.BitVecVal(1, 1))))), z3.BitVecVal(1, 1), z3.BitVecVal(0, 1)))
s.add(lvar != xvar)
solve("OF", lvar, xvar, s)
s.pop()
## =******= PF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(31, 64)) & z3.BitVecVal(4294967296 - 1, 64)) + (((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.UDiv((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(2, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(2, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(3, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(4, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(5, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(6, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(7, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(8, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(9, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(10, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(11, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(12, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(13, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(14, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(15, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(16, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(17, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(18, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(19, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(20, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(21, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(22, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(23, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(24, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(25, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(26, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(27, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(28, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(29, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) + (((z3.LShR((((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(255, 64)) & z3.BitVecVal(4294967296 - 1, 64)), z3.BitVecVal(30, 64)) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(4294967296 - 1, 64))) & z3.BitVecVal(4294967296 - 1, 64)) & z3.BitVecVal(256 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(256 - 1, 64)) ^ z3.BitVecVal(1, 64)) & z3.BitVecVal(256 - 1, 64)))))
xvar = (V_F == z3.If(z3.Not(z3.Xor(z3.Xor(z3.Xor(z3.Xor(z3.Xor(z3.Xor(z3.Xor((z3.Extract(0, 0, ((z3.Concat(z3.BitVecVal(0, 1), (VX_MEM_64 ^ z3.BitVecVal(-1, 64))) + z3.BitVecVal(1, 65)) + z3.Concat(z3.BitVecVal(0, 1), VX_RBX))) == z3.BitVecVal(1, 1)), (z3.Extract(1, 1, ((z3.Concat(z3.BitVecVal(0, 1), (VX_MEM_64 ^ z3.BitVecVal(-1, 64))) + z3.BitVecVal(1, 65)) + z3.Concat(z3.BitVecVal(0, 1), VX_RBX))) == z3.BitVecVal(1, 1))), (z3.Extract(2, 2, ((z3.Concat(z3.BitVecVal(0, 1), (VX_MEM_64 ^ z3.BitVecVal(-1, 64))) + z3.BitVecVal(1, 65)) + z3.Concat(z3.BitVecVal(0, 1), VX_RBX))) == z3.BitVecVal(1, 1))), (z3.Extract(3, 3, ((z3.Concat(z3.BitVecVal(0, 1), (VX_MEM_64 ^ z3.BitVecVal(-1, 64))) + z3.BitVecVal(1, 65)) + z3.Concat(z3.BitVecVal(0, 1), VX_RBX))) == z3.BitVecVal(1, 1))), (z3.Extract(4, 4, ((z3.Concat(z3.BitVecVal(0, 1), (VX_MEM_64 ^ z3.BitVecVal(-1, 64))) + z3.BitVecVal(1, 65)) + z3.Concat(z3.BitVecVal(0, 1), VX_RBX))) == z3.BitVecVal(1, 1))), (z3.Extract(5, 5, ((z3.Concat(z3.BitVecVal(0, 1), (VX_MEM_64 ^ z3.BitVecVal(-1, 64))) + z3.BitVecVal(1, 65)) + z3.Concat(z3.BitVecVal(0, 1), VX_RBX))) == z3.BitVecVal(1, 1))), (z3.Extract(6, 6, ((z3.Concat(z3.BitVecVal(0, 1), (VX_MEM_64 ^ z3.BitVecVal(-1, 64))) + z3.BitVecVal(1, 65)) + z3.Concat(z3.BitVecVal(0, 1), VX_RBX))) == z3.BitVecVal(1, 1))), (z3.Extract(7, 7, ((z3.Concat(z3.BitVecVal(0, 1), (VX_MEM_64 ^ z3.BitVecVal(-1, 64))) + z3.BitVecVal(1, 65)) + z3.Concat(z3.BitVecVal(0, 1), VX_RBX))) == z3.BitVecVal(1, 1)))), z3.BitVecVal(1, 1), z3.BitVecVal(0, 1)))
s.add(lvar != xvar)
solve("PF", lvar, xvar, s)
s.pop()
## =******= RAX =******=
s.push()
lvar = (V_R == z3.Concat(z3.Extract(63, 56, VL_RAX), z3.Extract(55, 48, VL_RAX), z3.Extract(47, 40, VL_RAX), z3.Extract(39, 32, VL_RAX), z3.Extract(31, 24, VL_RAX), z3.Extract(23, 16, VL_RAX), z3.Extract(15, 8, VL_RAX), z3.Extract(7, 0, VL_RAX)))
xvar = (V_R == VX_RAX)
s.add(lvar != xvar)
solve("RAX", lvar, xvar, s)
s.pop()
## =******= RBX =******=
s.push()
lvar = (V_R == z3.Concat(z3.Extract(63, 56, ((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64))), z3.Extract(55, 48, ((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64))), z3.Extract(47, 40, ((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64))), z3.Extract(39, 32, ((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64))), z3.Extract(31, 24, ((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64))), z3.Extract(23, 16, ((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64))), z3.Extract(15, 8, ((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64))), z3.Extract(7, 0, ((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)))))
xvar = (V_R == z3.Extract(63, 0, ((z3.Concat(z3.BitVecVal(0, 1), (VX_MEM_64 ^ z3.BitVecVal(-1, 64))) + z3.BitVecVal(1, 65)) + z3.Concat(z3.BitVecVal(0, 1), VX_RBX))))
s.add(lvar != xvar)
solve("RBX", lvar, xvar, s)
s.pop()
## =******= RCX =******=
s.push()
lvar = (V_R == z3.Concat(z3.Extract(63, 56, VL_RCX), z3.Extract(55, 48, VL_RCX), z3.Extract(47, 40, VL_RCX), z3.Extract(39, 32, VL_RCX), z3.Extract(31, 24, VL_RCX), z3.Extract(23, 16, VL_RCX), z3.Extract(15, 8, VL_RCX), z3.Extract(7, 0, VL_RCX)))
xvar = (V_R == VX_RCX)
s.add(lvar != xvar)
solve("RCX", lvar, xvar, s)
s.pop()
## =******= RDX =******=
s.push()
lvar = (V_R == z3.Concat(z3.Extract(63, 56, VL_RDX), z3.Extract(55, 48, VL_RDX), z3.Extract(47, 40, VL_RDX), z3.Extract(39, 32, VL_RDX), z3.Extract(31, 24, VL_RDX), z3.Extract(23, 16, VL_RDX), z3.Extract(15, 8, VL_RDX), z3.Extract(7, 0, VL_RDX)))
xvar = (V_R == VX_RDX)
s.add(lvar != xvar)
solve("RDX", lvar, xvar, s)
s.pop()
## =******= SF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, ((z3.LShR(((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)), z3.BitVecVal(63, 64)) & z3.BitVecVal(18446744073709551616 - 1, 64)) & z3.BitVecVal(256 - 1, 64)))))
xvar = (V_F == z3.Extract(63, 63, ((z3.Concat(z3.BitVecVal(0, 1), (VX_MEM_64 ^ z3.BitVecVal(-1, 64))) + z3.BitVecVal(1, 65)) + z3.Concat(z3.BitVecVal(0, 1), VX_RBX))))
s.add(lvar != xvar)
solve("SF", lvar, xvar, s)
s.pop()
## =******= ZF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, z3.Concat(z3.BitVecVal(0, 7), z3.Extract(0, 0, z3.If((((VL_RBX - VL_MEM_64) & z3.BitVecVal(18446744073709551616 - 1, 64)) == z3.BitVecVal(0, 64)), z3.BitVecVal(1, 8), z3.BitVecVal(0, 8)))))))
xvar = (V_F == z3.If((z3.Extract(63, 0, ((z3.Concat(z3.BitVecVal(0, 1), (VX_MEM_64 ^ z3.BitVecVal(-1, 64))) + z3.BitVecVal(1, 65)) + z3.Concat(z3.BitVecVal(0, 1), VX_RBX))) == z3.BitVecVal(0, 64)), z3.BitVecVal(1, 1), z3.BitVecVal(0, 1)))
s.add(lvar != xvar)
solve("ZF", lvar, xvar, s)
s.pop()
if(status == True):
print('[6;30;42m' + 'Test-Pass: ' + '[0m' + test_name)
else:
if(status == False):
print('[0;30;41m' + 'Test-Fail: ' + '[0m' + test_name)
else:
print('[6;30;47m' + 'Test-Unk: ' + '[0m' + test_name)
| [
"sdasgup3@illinois.edu"
] | sdasgup3@illinois.edu |
c008da7634db24b7e928afe0d27575e8bf211931 | ad3339db839a9353ae445b7069c7f2d2c805fadc | /tribune/urls.py | 3dbd1848986508a951390659bd2309263a06e032 | [] | no_license | felkiriinya/Moringa-Tribune | 1a89ad0ab01618284eb8e4fb55587f60cb197fa1 | 5c936fac04b265e9a1ea98e3c2c296a7a1ef75e8 | refs/heads/master | 2023-01-13T19:45:02.966996 | 2020-11-24T17:24:55 | 2020-11-24T17:24:55 | 312,059,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | """tribune URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from django.contrib.auth import views
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('',include('news.urls')),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^logout/$',views.LogoutView.as_view(), {'next_page': '/'}),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^api-token-auth/', obtain_auth_token)
]
| [
"felkiriinya@gmail.com"
] | felkiriinya@gmail.com |
f8460aaf86927b5c5035f75d913a717aa778cda9 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/counting_20200622225609.py | 00a400697539fd255487c113aa925cea6350e1ca | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | def counting(str):
str = str.split('-')
hour1 = int(convertTo24(str[0]).split(':')[0])
print('hour1',hour1)
hour2 = int(convertTo24(str[1]).split(':'))
def convertTo24(hour):
newHour = ''
if 'am' in hour and hour[:2] == '12':
newHour = '24'
newHour += hour[2:5]
print(hour)
print(newHour)
return newHour
counting("12:00am-12:00am") | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
82f26b32b81eeb6214dd9fb7788ba27f86cba0d3 | b2ba78fb1e53f92efdc3b6e0be50c81e5dd036ed | /algos/mbl_trpo/defaults.py | ef455095211017613a659a721b6b9e08ca3ca5f2 | [
"MIT"
] | permissive | ShuoZ9379/Integration_SIL_and_MBL | 2dcfae10cb5929c4121a3a8bfceebae8c0b6ba08 | d7df6501a665d65eb791f7fd9b8e85fd660e6320 | refs/heads/master | 2020-07-23T20:04:17.304302 | 2019-09-23T18:58:57 | 2019-09-23T18:58:57 | 207,690,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,039 | py | from baselines.common.models import mlp, cnn_small
def atari():
return dict(
network = cnn_small(),
timesteps_per_batch=512,
max_kl=0.001,
cg_iters=10,
cg_damping=1e-3,
gamma=0.98,
lam=1.0,
vf_iters=3,
vf_stepsize=1e-4,
entcoeff=0.00,
)
def sparse_mujoco():
return dict(
network = mlp(num_hidden=32, num_layers=2),
timesteps_per_batch=1024,
max_kl=0.01,
cg_iters=10,
cg_damping=0.1,
gamma=0.99,
lam=0.98,
vf_iters=5,
vf_stepsize=1e-3,
entcoef=1.0,
normalize_observations=True,
)
def mujoco():
return dict(
network = mlp(num_hidden=32, num_layers=2),
timesteps_per_batch=2048,
max_kl=0.01,
cg_iters=10,
cg_damping=0.1,
gamma=0.99,
lam=0.98,
vf_iters=5,
vf_stepsize=1e-3,
normalize_observations=True,
normalize_obs=True,
ent_coef=0.0
)
def gym_locomotion_envs():
return dict(
network = mlp(num_hidden=32, num_layers=2),
timesteps_per_batch=1024,
max_kl=0.01,
cg_iters=10,
cg_damping=0.1,
gamma=0.99,
lam=0.98,
vf_iters=5,
vf_stepsize=1e-3,
normalize_observations=True,
entcoef=0.0
)
def robotics():
return dict(
network = mlp(num_hidden=32, num_layers=2),
timesteps_per_batch=1024,
max_kl=0.01,
cg_iters=10,
cg_damping=0.1,
gamma=0.99,
lam=0.98,
vf_iters=5,
vf_stepsize=1e-3,
normalize_observations=True,
)
def classic_control():
return dict(
network = mlp(num_hidden=32, num_layers=2),
#timesteps_per_batch=1024,
#max_kl=0.01,
#cg_iters=10,
#cg_damping=0.1,
#gamma=0.99,
#lam=0.98,
#vf_iters=5,
#vf_stepsize=1e-3,
normalize_observations=True,
normalize_obs=True
)
| [
"zhangshuo19930709@gmail.com"
] | zhangshuo19930709@gmail.com |
383e6330f03c52d4c051dbaae812f110ffd8383b | 7aac370bbd217d716ba53f0efd70047879bb1444 | /homeassistant/components/somfy_mylink/config_flow.py | 79fbf028b16a525c660d1986847854e8614a00c8 | [
"Apache-2.0"
] | permissive | OverloadUT/home-assistant | c6bd63edb7c087eaf81ff507fdeb3e1420062d3a | 7ccfaed7361604aa83cc55f059015327b544b5a7 | refs/heads/dev | 2023-02-23T01:04:13.441125 | 2021-10-25T16:26:03 | 2021-10-25T16:26:03 | 78,159,098 | 5 | 1 | Apache-2.0 | 2023-02-22T06:18:15 | 2017-01-06T00:21:06 | Python | UTF-8 | Python | false | false | 6,841 | py | """Config flow for Somfy MyLink integration."""
import asyncio
from copy import deepcopy
import logging
from somfy_mylink_synergy import SomfyMyLinkSynergy
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.components.dhcp import HOSTNAME, IP_ADDRESS, MAC_ADDRESS
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.core import callback
from homeassistant.helpers.device_registry import format_mac
from .const import (
CONF_REVERSE,
CONF_REVERSED_TARGET_IDS,
CONF_SYSTEM_ID,
CONF_TARGET_ID,
CONF_TARGET_NAME,
DEFAULT_PORT,
DOMAIN,
MYLINK_STATUS,
)
_LOGGER = logging.getLogger(__name__)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from schema with values provided by the user.
"""
somfy_mylink = SomfyMyLinkSynergy(
data[CONF_SYSTEM_ID], data[CONF_HOST], data[CONF_PORT]
)
try:
status_info = await somfy_mylink.status_info()
except asyncio.TimeoutError as ex:
raise CannotConnect from ex
if not status_info or "error" in status_info:
_LOGGER.debug("Auth error: %s", status_info)
raise InvalidAuth
return {"title": f"MyLink {data[CONF_HOST]}"}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Somfy MyLink."""
VERSION = 1
def __init__(self):
"""Initialize the somfy_mylink flow."""
self.host = None
self.mac = None
self.ip_address = None
async def async_step_dhcp(self, discovery_info):
"""Handle dhcp discovery."""
self._async_abort_entries_match({CONF_HOST: discovery_info[IP_ADDRESS]})
formatted_mac = format_mac(discovery_info[MAC_ADDRESS])
await self.async_set_unique_id(format_mac(formatted_mac))
self._abort_if_unique_id_configured(
updates={CONF_HOST: discovery_info[IP_ADDRESS]}
)
self.host = discovery_info[HOSTNAME]
self.mac = formatted_mac
self.ip_address = discovery_info[IP_ADDRESS]
self.context["title_placeholders"] = {"ip": self.ip_address, "mac": self.mac}
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
self._async_abort_entries_match({CONF_HOST: user_input[CONF_HOST]})
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_HOST, default=self.ip_address): str,
vol.Required(CONF_SYSTEM_ID): str,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): int,
}
),
errors=errors,
)
async def async_step_import(self, user_input):
"""Handle import."""
self._async_abort_entries_match({CONF_HOST: user_input[CONF_HOST]})
return await self.async_step_user(user_input)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for somfy_mylink."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Initialize options flow."""
self.config_entry = config_entry
self.options = deepcopy(dict(config_entry.options))
self._target_id = None
@callback
def _async_callback_targets(self):
"""Return the list of targets."""
return self.hass.data[DOMAIN][self.config_entry.entry_id][MYLINK_STATUS][
"result"
]
@callback
def _async_get_target_name(self, target_id) -> str:
"""Find the name of a target in the api data."""
mylink_targets = self._async_callback_targets()
for cover in mylink_targets:
if cover["targetID"] == target_id:
return cover["name"]
raise KeyError
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if self.config_entry.state is not config_entries.ConfigEntryState.LOADED:
_LOGGER.error("MyLink must be connected to manage device options")
return self.async_abort(reason="cannot_connect")
if user_input is not None:
if target_id := user_input.get(CONF_TARGET_ID):
return await self.async_step_target_config(None, target_id)
return self.async_create_entry(title="", data=self.options)
cover_dict = {None: None}
mylink_targets = self._async_callback_targets()
if mylink_targets:
for cover in mylink_targets:
cover_dict[cover["targetID"]] = cover["name"]
data_schema = vol.Schema({vol.Optional(CONF_TARGET_ID): vol.In(cover_dict)})
return self.async_show_form(step_id="init", data_schema=data_schema, errors={})
async def async_step_target_config(self, user_input=None, target_id=None):
"""Handle options flow for target."""
reversed_target_ids = self.options.setdefault(CONF_REVERSED_TARGET_IDS, {})
if user_input is not None:
if user_input[CONF_REVERSE] != reversed_target_ids.get(self._target_id):
reversed_target_ids[self._target_id] = user_input[CONF_REVERSE]
return await self.async_step_init()
self._target_id = target_id
return self.async_show_form(
step_id="target_config",
data_schema=vol.Schema(
{
vol.Optional(
CONF_REVERSE,
default=reversed_target_ids.get(target_id, False),
): bool
}
),
description_placeholders={
CONF_TARGET_NAME: self._async_get_target_name(target_id),
},
errors={},
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
| [
"noreply@github.com"
] | OverloadUT.noreply@github.com |
9e4ef2699511b1cda5dab70de5f4507eac7db2cb | 1c29948305793ced5835a5345903c6110a078bd2 | /examples/app/app/middleware.py | 6415e78ef30c1971be3b14c88310612d74a34735 | [
"MIT"
] | permissive | cmanallen/flask-compose | 84b7dbd5c68dce04de335afe03a522835847b82e | 560760e39ac32e888f0a4d84154a1bd26a2a3033 | refs/heads/master | 2020-03-30T15:09:03.387185 | 2018-10-27T20:27:42 | 2018-10-27T20:27:42 | 151,350,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | from flask import jsonify, make_response
def render_response(fn):
"""Response renderer middleware."""
def decorator(*args, **kwargs):
response, code = fn(*args, **kwargs)
return make_response(jsonify(response), code)
return decorator
| [
"cmanallen90@gmail.com"
] | cmanallen90@gmail.com |
6958c46d143159426cf3e7865f78b0c9a69c09d1 | fc2fb2118ea02867d559bf8027e54e3c6b652cfd | /devItems/spring-2020/source-all/574_tfidf_traintest_p2_code_regression.py | a4d4d0c335155d8002b2ada302bdfe3e138c306f | [] | no_license | pdhung3012/SoftwareStoryPointsPrediction | 2431ad599e0fba37617cfd467de1f4f1afed56cc | 520990663cb42adcac315b75cd4eb1150c3fc86c | refs/heads/master | 2023-08-29T15:16:30.413766 | 2021-09-18T17:12:20 | 2021-09-18T17:12:20 | 254,596,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,117 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 18:19:24 2019
@author: hungphd
"""
# import modules
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.metrics import classification_report,confusion_matrix
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.model_selection import cross_val_score, cross_val_predict, StratifiedKFold, train_test_split
import os
from sklearn.metrics import precision_score,accuracy_score
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
import xgboost as xgb
from sklearn.metrics import mean_squared_error,mean_absolute_error
def createDirIfNotExist(fopOutput):
try:
# Create target Directory
os.mkdir(fopOutput)
print("Directory ", fopOutput, " Created ")
except FileExistsError:
print("Directory ", fopOutput, " already exists")
# set file directory
fopVectorAllSystems = 'data/vector574_Tfidf1_code/'
fopOverallResultReg= 'result/574_code_reg/'
createDirIfNotExist(fopOverallResultReg)
from os import listdir
from os.path import isfile, join
arrFiles = [f for f in listdir(fopVectorAllSystems) if isfile(join(fopVectorAllSystems, f))]
fpMAEMax = fopOverallResultReg + 'MAE_max.txt'
fpMAEMin = fopOverallResultReg + 'MAE_min.txt'
fpMAEAvg = fopOverallResultReg + 'MAE_avg.txt'
o3=open(fpMAEMax,'w')
o3.write('')
o3.close()
o3 = open(fpMAEMin, 'w')
o3.write('')
o3.close()
o3 = open(fpMAEAvg, 'w')
o3.write('')
o3.close()
# fileCsv = fopVectorAllSystems + file+
fpVectorItemTrainReg = fopVectorAllSystems + 'code_train_regression.csv'
fpVectorItemTestReg = fopVectorAllSystems + 'code_test_regression.csv'
fopOutputItemDetail = fopOverallResultReg + "/details/"
# fopOutputItemEachReg = fopOutputItemDetail + file + "/"
fopOutputItemResult = fopOverallResultReg + "/result/"
fopOutputItemChart = fopOverallResultReg + "/chart/"
fpResultAll=fopOutputItemResult+'overall.txt'
fpAllMAEInfo = fopOutputItemChart + 'MAE.txt'
createDirIfNotExist(fopOutputItemDetail)
# createDirIfNotExist(fopOutputItemEachReg)
createDirIfNotExist(fopOutputItemResult)
createDirIfNotExist(fopOutputItemChart)
# fnAll='_10cv.csv'
# load data for 10-fold cv
df_train = pd.read_csv(fpVectorItemTrainReg)
print(list(df_train.columns.values))
y_train = df_train['star']
X_train = df_train.drop(['no','star'],axis=1)
df_test = pd.read_csv(fpVectorItemTestReg)
print(list(df_test.columns.values))
y_test = df_test['star']
X_test = df_test.drop(['no','star'],axis=1)
# create a list of classifiers
random_seed = 2
# classifiers = [GaussianNB(), LogisticRegression(random_state=random_seed),DecisionTreeClassifier(),
# RandomForestClassifier(random_state=random_seed, n_estimators=50), AdaBoostClassifier(), LinearDiscriminantAnalysis(),QuadraticDiscriminantAnalysis(),
# LinearSVC(random_state=random_seed), MLPClassifier(alpha=1), GradientBoostingClassifier(random_state=random_seed, max_depth=5)]
classifiers = [DecisionTreeRegressor(),
RandomForestRegressor(random_state=2, n_estimators=1000),AdaBoostRegressor(), xgb.XGBRegressor(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1,
max_depth = 5, alpha = 10, n_estimators = 10),
LinearSVR(random_state=random_seed), MLPRegressor(alpha=1),
GradientBoostingRegressor(random_state=random_seed, max_depth=5)]
# fit and evaluate for 10-cv
index = 0
# group = df_all['label']
arrClassifierName = ['DTR', 'RFR', 'ABR', 'XGBR', 'LSVR', 'MLPR', 'GBR']
arrXBar = []
arrMAE = []
arrStrMAEAvg = []
arrIndex=[]
o2=open(fpResultAll,'w')
o2.close()
k_fold = StratifiedKFold(10,shuffle=True)
for classifier in classifiers:
index=index+1
try:
filePredict = ''.join([fopOutputItemDetail,arrClassifierName[index-1], '.txt'])
print("********", "\n", "10 fold CV Results Regression with: ", str(classifier))
# X_train, X_test, y_train, y_test = train_test_split(all_data, all_label, test_size = 0.2,shuffle = False, stratify = None)
classifier.fit(X_train, y_train)
predicted = classifier.predict(X_test)
# cross_val = cross_val_score(classifier, all_data, all_label, cv=k_fold, n_jobs=1)
# predicted = cross_val_predict(classifier, all_data, all_label, cv=k_fold)
# weightAvg = precision_score(all_label, predicted, average='weighted') * 100
# maeAccuracy = mean_absolute_error(all_label, predicted)
# mqeAccuracy = mean_squared_error(all_label, predicted)
maeAccuracy = mean_absolute_error(y_test, predicted)
mqeAccuracy = mean_squared_error(y_test, predicted)
# maeAccuracy = mean_absolute_error(all_label, predicted)
print('{:.2f}'.format(maeAccuracy))
np.savetxt(filePredict, predicted, fmt='%s', delimiter=',')
o2 = open(fpResultAll, 'a')
o2.write('Result for ' + str(classifier) + '\n')
o2.write('MAE {}\nMQE {}\n'.format(maeAccuracy,mqeAccuracy))
# o2.write(str(sum(cross_val) / float(len(cross_val))) + '\n')
# o2.write(str(confusion_matrix(all_label, predicted)) + '\n')
# o2.write(str(classification_report(all_label, predicted)) + '\n')
o2.close()
strClassX = str(arrClassifierName[index - 1])
arrIndex.append(index)
arrXBar.append(strClassX)
arrMAE.append(maeAccuracy)
arrStrMAEAvg.append('{:.2f}'.format(maeAccuracy))
# break
except Exception as inst:
print("Error ", index)
print(type(inst)) # the exception instance
print(inst.args) # arguments stored in .args
print(inst)
arrAlgm = np.array(arrMAE)
bestMAE=np.amax(arrAlgm)
worstMAE=np.amin(arrAlgm)
avgMAE=np.average(arrAlgm)
maxIndexMAE= np.argmax(arrAlgm)
minIndexMAE = np.argmin(arrAlgm)
print(maxIndexMAE)
o3=open(fpMAEMax,'a')
o3.write('{}\t{}\n'.format(arrClassifierName[maxIndexMAE], bestMAE))
o3.close()
o3 = open(fpMAEMin, 'a')
o3.write('{}\t{}\n'.format(arrClassifierName[minIndexMAE], worstMAE))
o3.close()
o3 = open(fpMAEAvg, 'a')
o3.write('{}\n'.format(avgMAE))
o3.close()
o3 = open(fpAllMAEInfo, 'w')
for i in range(0,len(arrMAE)):
o3.write('{}\t{}\n'.format(arrXBar[i],arrMAE[i]))
o3.close()
# y_pos = np.arange(len(arrXBar))
# plt.bar(y_pos, arrMAE, align='center', alpha=0.5)
# plt.xticks(y_pos, arrIndex, rotation=90)
# plt.rcParams["figure.figsize"] = (40, 40)
# plt.ylabel('MAE Accuracy')
# plt.ylim(0, 50)
# for i in range(len(arrMAE)):
# plt.text(x=i - 0.5, y=arrMAE[i] + 1, s=arrStrMAEAvg[i])
# plt.text(x=i, y=arrMAE[i] - 1, s=arrXBar[i], rotation=90)
#
# plt.title(fpResultAll)
# plt.savefig(fpAllMAEInfo)
# plt.clf() | [
"pdhung3012@gmail.com"
] | pdhung3012@gmail.com |
695b5a756c2e0b025a91c5b0a72fc68c9b6b7b6a | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-antiddos/huaweicloudsdkantiddos/v1/model/show_d_dos_status_request.py | 62c9d00ea8af66a317f15806c1652a4f6f9548d0 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,667 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowDDosStatusRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'floating_ip_id': 'str',
'ip': 'str'
}
attribute_map = {
'floating_ip_id': 'floating_ip_id',
'ip': 'ip'
}
def __init__(self, floating_ip_id=None, ip=None):
"""ShowDDosStatusRequest - a model defined in huaweicloud sdk"""
self._floating_ip_id = None
self._ip = None
self.discriminator = None
self.floating_ip_id = floating_ip_id
if ip is not None:
self.ip = ip
@property
def floating_ip_id(self):
"""Gets the floating_ip_id of this ShowDDosStatusRequest.
用户EIP对应的ID
:return: The floating_ip_id of this ShowDDosStatusRequest.
:rtype: str
"""
return self._floating_ip_id
@floating_ip_id.setter
def floating_ip_id(self, floating_ip_id):
"""Sets the floating_ip_id of this ShowDDosStatusRequest.
用户EIP对应的ID
:param floating_ip_id: The floating_ip_id of this ShowDDosStatusRequest.
:type: str
"""
self._floating_ip_id = floating_ip_id
@property
def ip(self):
"""Gets the ip of this ShowDDosStatusRequest.
用户EIP
:return: The ip of this ShowDDosStatusRequest.
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""Sets the ip of this ShowDDosStatusRequest.
用户EIP
:param ip: The ip of this ShowDDosStatusRequest.
:type: str
"""
self._ip = ip
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowDDosStatusRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
d3f88b3096c9915223d4fa391e07bfc402c947fc | 8cc12b26221e7fa7060adb01989bd048134b3957 | /test_15450/urls.py | 764913f5d830c8bf2b431a1463080620205845e1 | [] | no_license | crowdbotics-apps/test-15450 | 842436977a2796648280b0ff2d53d0f1d100ae7b | 06c21efb558ed2f6ea3185943b1b7d84cb7d512d | refs/heads/master | 2022-02-22T02:54:29.345958 | 2020-04-04T23:57:33 | 2020-04-04T23:57:33 | 253,121,799 | 0 | 0 | null | 2022-02-10T13:56:45 | 2020-04-04T23:57:12 | Python | UTF-8 | Python | false | false | 1,890 | py | """test_15450 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Test"
admin.site.site_title = "Test Admin Portal"
admin.site.index_title = "Test Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="Test API",
default_version="v1",
description="API documentation for Test App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
83d7efe8ec51a03b89fc7457db7b1e9a0126db06 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/tensorflow_estimator/python/estimator/head/multi_head.py | 2826514b2b5d57d3feeb26964d7a3ac7fc016c82 | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:0a250ae00433eead90a7bb128c31387543770c56715cc722483690c2c4b13f76
size 21311
| [
"github@cuba12345"
] | github@cuba12345 |
cf9c6c5035bcfd72c591b6a33dc2330d6f9960cc | 16c5a7c5f45a6faa5f66f71e043ce8999cb85d80 | /app/honor/student/games/word_flash_card.py | 1a917f3e75ac0c64386f4aeb152d453f54bd576f | [] | no_license | vectorhuztt/test_android_copy | ca497301b27f49b2aa18870cfb0fd8b4640973e5 | f70ab6b1bc2f69d40299760f91870b61e012992e | refs/heads/master | 2021-04-03T19:26:48.009105 | 2020-06-05T01:29:51 | 2020-06-05T01:29:51 | 248,389,861 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,841 | py | # @Author : Vector
# @Email : vectorztt@163.com
# @Time : 2019/8/1 11:35
# -----------------------------------------
import random
import re
import string
import time
from selenium.webdriver.common.by import By
from app.honor.student.games.all_game_common_element import GameCommonEle
from conf.decorator import teststep, teststeps
from utils.games_keyboard import Keyboard
from utils.get_attribute import GetAttribute
from utils.toast_find import Toast
class FlashCardGame(GameCommonEle):
@teststep
def wait_check_flash_study_page(self):
"""学习模式页面检查点"""
locator = (By.ID, self.id_type() + "side")
return self.wait.wait_check_element(locator)
@teststep
def wait_check_copy_page(self):
"""抄写模式页面检查点 以键盘id作为索引"""
locator = (By.ID, self.id_type() + "mine_word")
return self.wait.wait_check_element(locator)
@teststep
def wait_check_sentence_page(self):
"""以“闪卡练习 -句子模式”的句子id为依据"""
locator = (By.ID, self.id_type() + "sentence")
return self.wait.wait_check_element(locator, timeout=3)
@teststep
def wait_check_explain_page(self):
"""判断解释是否存在"""
locator = (By.ID, self.id_type() + 'tv_chinese')
return self.wait.wait_check_element(locator, timeout=3)
@teststep
def wait_check_flash_result_page(self):
"""结果页页面检查点"""
locator = (By.XPATH, "//*[@text='完成学习']")
return self.wait.wait_check_element(locator)
@teststep
def study_word(self):
"""页面单词"""
locator = (By.ID, self.id_type() + 'tv_english')
ele = self.wait.wait_find_element(locator)
return ele.text
@teststep
def copy_word(self):
"""抄写页面单词"""
locator = (By.ID, self.id_type() + 'english')
ele = self.wait.wait_find_element(locator)
return ele.text
@teststep
def copy_explain(self):
"""抄写模式单词解释"""
locator = (By.ID, self.id_type() + 'chinese')
return self.wait.wait_find_element(locator)
@teststep
def copy_input(self):
"""抄写模式输入答案"""
locator = (By.ID, self.id_type() + 'mine_word')
ele = self.wait.wait_find_element(locator)
return ele
@teststep
def click_voice(self):
"""播放按钮"""
locator = (By.ID, self.id_type() + 'play_voice')
self.wait.wait_find_element(locator).click()
@teststep
def pattern_switch(self):
"""点击右上角的全英/英汉,切换模式"""
locator = (By.ID, self.id_type() + 'side')
self.wait.wait_find_element(locator).click()
@teststep
def author(self):
"""例句推荐老师"""
locator = (By.ID, self.id_type() + 'author')
ele = self.wait.wait_find_element(locator)
return ele.text
@teststep
def english_study(self):
"""全英模式 页面内展示的word"""
locator = (By.ID, self.id_type() + 'tv_english')
ele = self.wait.wait_find_element(locator)
return ele.text
@teststep
def study_word_explain(self):
"""英汉模式 页面内展示的word解释"""
locator = (By.ID, self.id_type() + 'tv_chinese')
return self.wait.wait_find_element(locator)
@teststep
def study_sentence(self):
"""全英模式 页面内展示的句子"""
locator = (By.ID, self.id_type() + 'sentence')
ele = self.wait.wait_find_element(locator)
return ele.text
@teststep
def study_sentence_explain(self):
"""英汉模式 页面内展示的句子解释"""
locator = (By.ID, self.id_type() + 'sentence_explain')
ele = self.wait.wait_find_element(locator)
return ele.text
@teststep
def star_button(self):
"""星标按钮"""
locator = (By.ID, self.id_type() + 'iv_star')
return self.wait.wait_find_element(locator)
@teststep
def familiar_button(self):
"""熟词按钮"""
locator = (By.ID, self.id_type() + 'expert')
return self.wait.wait_find_element(locator)
@teststep
def change_model_btn(self):
"""英汉切换按钮"""
locator = (By.ID, self.id_type() + 'side')
return self.wait.wait_find_element(locator)
# 结果页元素
@teststep
def study_sum(self):
"""学习统计"""
locator = (By.ID, self.id_type() + 'study_sum')
ele = self.wait.wait_find_element(locator)
return ele.text
@teststep
def get_start_sum(self):
"""星星标记个数"""
return int(re.findall(r'\d+', self.study_sum())[1])
@teststep
def study_again(self):
"""再学一遍"""
locator = (By.ID, self.id_type() + 'study_again')
return self.wait.wait_find_element(locator)
@teststep
def study_star_again(self):
"""把标星的单词再练一遍"""
locator = (By.ID, self.id_type() + 'star_again')
return self.wait.wait_find_element(locator)
@teststep
def result_words(self):
"""结果页单词"""
locator = (By.ID, self.id_type() + 'tv_word')
return self.wait.wait_find_elements(locator)
@teststep
def word_voice(self, word):
"""单词左侧声音按钮"""
locator = (By.XPATH, '//*[@text="{}"]/preceding-sibling::android.widget.ImageView'
'[contains(@resource-id,"{}iv_voice")]'.format(word, self.id_type()))
return self.wait.wait_find_element(locator, timeout=3)
@teststep
def word_explain(self, word):
"""单词对应的解释"""
locator = (By.XPATH, '//*[@text="{}"]/../following-sibling::android.widget.'
'LinearLayout/android.widget.TextView'.format(word))
return self.wait.wait_find_element(locator, timeout=3)
@teststep
def word_star(self, word):
"""单词对应的星标按钮"""
locator = (By.XPATH, '//*[@text="{}"]/../../following-sibling::android.widget.ImageView'.format(word))
return self.wait.wait_find_element(locator, timeout=5)
@teststep
def sentence_voice(self, word):
"""句子左侧声音按钮"""
locator = (By.XPATH, '//*[@text="{}"]/../preceding-sibling::android.widget.ImageView'.format(word, self.id_type()))
return self.wait.wait_find_element(locator, timeout=5)
@teststep
def sentence_explain(self, word):
"""句子对应的解释"""
locator = (By.XPATH, '//*[@text="{}"]/following-sibling::android.widget.TextView'.format(word))
return self.wait.wait_find_element(locator, timeout=5)
@teststep
def sentence_star(self, word):
"""句子对应的星标按钮"""
locator = (By.XPATH, '//*[@text="{}"]/../following-sibling::android.widget.ImageView'.format(word))
return self.wait.wait_find_element(locator, timeout=5)
@teststep
def flash_card_game_operate(self, fq, half_exit, star_list=None):
"""图书馆、作业内闪卡游戏过程"""
star_words = [] if fq == 1 else star_list
if self.wait_check_flash_study_page():
total_num = self.rest_bank_num()
for x in range(0, total_num):
self.rate_judge(total_num, x) # 待完成数校验
self.next_btn_judge('true', self.fab_next_btn) # 下一步按钮状态检验
self.click_voice()
word = self.study_word()
print('单词:', word)
if half_exit:
if x == 1:
self.click_back_up_button()
self.tips_operate()
if not self.wait_check_explain_page(): # 验证页面是否默认选择英汉模式
self.base_assert.except_error('未发现单词解释,页面没有默认选择英汉模式' + word)
else:
print('解释:', self.study_word_explain().text) # 单词解释
if self.wait_check_sentence_page():
print("句子:", self.study_sentence()) # 句子
print("句子解释:", self.study_sentence_explain()) # 句子解释
print("推荐老师:", self.author()) # 推荐老师
self.change_model_btn().click() # 切换全英模式
if self.wait_check_explain_page(): # 校验翻译是否出现
self.base_assert.except_error('切换至全英模式依然存在解释' + word)
self.change_model_btn().click() # 切换回英汉模式
if x % 2 == 0: # 第一次,部分单词点击标星按钮
if fq == 1:
self.star_button().click()
star_words.append(word)
print('加入标星单词')
else:
if GetAttribute().get_selected(self.star_button()) == 'true':
print('标星校验正确')
else:
self.base_assert.except_error('单词已标星但标星按钮未被选中')
print('-' * 20, '\n')
self.fab_next_btn().click()
time.sleep(2)
return total_num, star_words
@teststep
def flash_copy_game_operate(self, fq, half_exit, star_list=None,):
star_words = [] if fq == 1 else star_list
if self.wait_check_copy_page():
total_num = self.rest_bank_num()
for x in range(total_num):
self.click_voice()
self.rate_judge(total_num, x)
copy_word = self.copy_word()
print('单词:', copy_word)
if half_exit:
if x == 1:
self.click_back_up_button()
self.tips_operate()
break
if x % 2 == 0: # 奇数题
if fq == 1: # 若为第一次
self.star_button().click() # 标星
star_words.append(copy_word)
print('加入标星单词')
else: # 若为第二次 校验是否被标星
if GetAttribute().get_selected(self.star_button()) == 'true':
print('标星校验正确')
else:
self.base_assert.except_error('单词已标星但标星按钮未被选中')
self.copy_input().click()
time.sleep(1)
if x == 1:
random_str = random.sample(string.ascii_lowercase, 4) # 随机输入错误单词,
for j, s in enumerate(random_str):
Keyboard().keyboard_operate(s, j)
print('输入单词:', ''.join(random_str))
if self.copy_word() != copy_word: # 验证是否跳转到下一题
self.base_assert.except_error('输入错误单词可以进入下一题')
for y in range(4): # 清除输入的单词
Keyboard().games_keyboard('backspace')
time.sleep(1)
for j, s in enumerate(list(copy_word)): # 输入抄写单词
Keyboard().keyboard_operate(s, j)
time.sleep(3)
print('-' * 30, '\n')
return total_num, star_words
@teststeps
def flash_card_result_operate(self, flash_result):
"""闪卡结果页面处理"""
total, star_words = flash_result
# if self.wait_check_medal_page():
# print('获取勋章')
# self.click_back_up_button()
if self.wait_check_flash_result_page():
print('完成学习!')
summary = self.study_sum()
print(summary, '\n')
full_count = int(re.findall(r'\d+', summary)[0]) # 页面统计总个数
star_count = self.get_start_sum()
if full_count != total:
self.base_assert.except_error('页面统计个数与做题个数不一致')
if len(star_words) != star_count:
self.base_assert.except_error('标星个数与页面统计个数不一致')
self.cancel_or_add_star(total, star_words, cancel=True)
if self.get_start_sum() != 0:
self.base_assert.except_error('单词标星取消,页面标星统计数未发生变化,与实际标星数不一致')
self.study_star_again().click()
if Toast().find_toast('没有标记★的内容'):
print('没有标记★的内容\n')
else:
self.base_assert.except_error('未提示没有标星单词')
self.cancel_or_add_star(total, star_words)
self.study_star_again().click()
@teststep
def cancel_or_add_star(self, total, star_words, cancel=False):
"""添加或取消标星"""
word_list = []
while True:
words = self.result_words()
for i, w in enumerate(words):
if w.text in word_list:
continue
else:
if i == len(words) - 1:
self.screen_swipe_up(0.5, 0.8, 0.72, 1000)
result_word = w.text
word_list.append(result_word)
voice_btn = self.word_voice(result_word) or self.sentence_voice(result_word)
voice_btn.click()
word_explain = self.word_explain(result_word) or self.sentence_explain(result_word)
word_star = self.word_star(result_word) or self.sentence_star(result_word)
if cancel:
print('单词:', result_word)
print('解释', word_explain.text)
if GetAttribute().get_selected(word_star) == 'true':
word_star.click()
print('取消标星')
star_words.remove(result_word)
print('-' * 20, '\n')
else:
if i == 2 or i == 4:
print('单词:', result_word, end='\t')
word_star.click()
print('添加标星')
star_words.append(result_word)
print('-' * 20, '\n')
if len(word_list) != total:
self.screen_swipe_up(0.5, 0.8, 0.3, 1000)
else:
break
self.screen_swipe_down(0.5, 0.2, 0.8, 1000)
@teststep
def play_flash_game(self, half_exit):
"""闪卡的总体流程"""
if self.wait_check_flash_study_page():
first_result = self.flash_card_game_operate(fq=1, half_exit=half_exit)
if not half_exit:
self.flash_card_result_operate(first_result)
self.flash_card_game_operate(fq=2, half_exit=half_exit, star_list=first_result[1],)
elif self.wait_check_copy_page():
first_result = self.flash_copy_game_operate(fq=1, half_exit=half_exit)
if not half_exit:
self.flash_card_result_operate(first_result)
self.flash_copy_game_operate(fq=2, half_exit=half_exit, star_list=first_result[1])
if not half_exit:
if self.wait_check_flash_result_page():
print(self.study_sum())
self.click_back_up_button()
| [
"vectorztt@163.com"
] | vectorztt@163.com |
2a314614273fd1f98a7a750f330a866ac7116ab5 | 4751a9daca11558dd0780f2e8b9477a484ebc7f4 | /src/qibo/tests/cirq_utils.py | c6ec7923450f40c20ffd28e6c9d73826c5a7d561 | [
"Apache-2.0"
] | permissive | drheli/qibo | f6875ed39883fe7bfa0b8939abb042fe636c5de7 | b99568aee9f978a5a82e92860c8d17e3358af7b9 | refs/heads/master | 2023-04-17T20:40:44.324689 | 2021-04-29T16:29:40 | 2021-04-29T16:29:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | import re
import cirq
import numpy as np
_QIBO_TO_CIRQ = {"CNOT": "CNOT", "RY": "Ry", "TOFFOLI": "TOFFOLI"}
def assert_gates_equivalent(qibo_gate, cirq_gate):
"""Asserts that qibo gate is equivalent to cirq gate.
Checks that:
* Gate type agrees.
* Target and control qubits agree.
* Parameter (if applicable) agrees.
Cirq gate parameters are extracted by parsing the gate string.
"""
pieces = [x for x in re.split("[()]", str(cirq_gate)) if x]
if len(pieces) == 2:
gatename, targets = pieces
theta = None
elif len(pieces) == 3:
gatename, theta, targets = pieces
else: # pragma: no cover
# case not tested because it fails
raise RuntimeError("Cirq gate parsing failed with {}.".format(pieces))
qubits = list(int(x) for x in targets.replace(" ", "").split(","))
targets = (qubits.pop(),)
controls = set(qubits)
assert _QIBO_TO_CIRQ[qibo_gate.__class__.__name__] == gatename
assert qibo_gate.target_qubits == targets
assert set(qibo_gate.control_qubits) == controls
if theta is not None:
if "π" in theta:
theta = np.pi * float(theta.replace("π", ""))
else: # pragma: no cover
# case doesn't happen in tests (could remove)
theta = float(theta)
np.testing.assert_allclose(theta, qibo_gate.parameters)
| [
"35475381+stavros11@users.noreply.github.com"
] | 35475381+stavros11@users.noreply.github.com |
74f1ce2400caeb1017978bc9cd7e592929ffa333 | 464850ba426263b17084fc71363ca14b8278b15e | /queue.py | 09c1d0d38a0d5d2c0a155264f7415f957a7588b0 | [] | no_license | eng-arvind/python | 8442c30ec10f979f913b354458b4f910539d8728 | 249f5f35f245a3f1742b10310de37ca6c6023af2 | refs/heads/master | 2020-12-23T06:40:16.911269 | 2020-02-02T18:42:01 | 2020-02-02T18:42:01 | 237,069,973 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | from queue import Queue
def reversequeue(queue):
Stack = []
while (not queue.empty()):
Stack.append(queue.queue[0])
queue.get()
while (len(Stack) != 0):
queue.put(Stack[-1])
Stack.pop()
q=Queue()
q.put('5')
q.put('6')
q.put('7')
q.put('8')
q.put('9')
q.put('10')
q.put('11')
q.put('12')
reversequeue(q)
while(not q.empty()):
print(q.get(),end=" ")
| [
"noreply@github.com"
] | eng-arvind.noreply@github.com |
ec79280c4e8b367009e17e371a77a8f3c6e29ec7 | 73bb9d0d50b96b3d7ee48e2d97b1d8128a5f2b1e | /backjoon/15/11054.py | ef1c7f92b461227fc42eda080384ef49f02a5cde | [] | no_license | Hoon94/Algorithm | a0ef211d72a2b78e08249501d197875065392084 | 6f6969214bbb6bacd165313b6d8c0feb1caa8963 | refs/heads/master | 2023-05-11T13:12:11.585285 | 2023-05-08T14:38:47 | 2023-05-08T14:38:47 | 244,936,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | n = int(input())
a = list(map(int, input().split()))
dpp = [0 for _ in range(n)]
dpm = [0 for _ in range(n)]
dpb = [0 for _ in range(n)]
for i in range(n):
for j in range(i):
if a[i] > a[j] and dpp[i] < dpp[j]:
dpp[i] = dpp[j]
dpp[i] += 1
for i in range(n - 1, -1, -1):
for j in range(n - 1, i, -1):
if a[i] > a[j] and dpm[i] < dpm[j]:
dpm[i] = dpm[j]
dpm[i] += 1
for i in range(n):
dpb[i] = dpp[i] + dpm[i] - 1
print(max(dpb))
| [
"dleognsdl1@naver.com"
] | dleognsdl1@naver.com |
a7dac03b28d59fbdba3a9c969905a7c20ffc2d7c | 23d8c1b24ce4eb9fe7ee7f790c58d411c5b6f185 | /.lint.py.tcf.py | fb97800befcfff47a7b69b560c9e7425329ea899 | [
"Apache-2.0"
] | permissive | d-scott-phillips/tcf | aca30a6faad3778a8eef28409dcc6cf88c702a05 | 4d5f06b25799f18c103d4e4e8b222652956ebc49 | refs/heads/master | 2020-05-14T09:56:20.700897 | 2019-04-16T15:47:32 | 2019-04-16T15:47:32 | 181,752,941 | 0 | 0 | Apache-2.0 | 2019-04-16T19:24:08 | 2019-04-16T19:24:08 | null | UTF-8 | Python | false | false | 2,783 | py | #! /usr/bin/python3
"""
Implement TCF specificy lints
"""
import re
lint_py_check_per_line_name = "misc Python checks"
def lint_py_check_per_line_filter(_repo, cf):
"""
Run multiple line-by-line checks
"""
if not cf or cf.binary or cf.deleted:
return False
with open(cf.name, 'r') as f:
firstline = f.readline()
if not cf.name.endswith(".py") and not 'python' in firstline:
_repo.log.info("%s: skipping, not a Python file", cf.name)
return False
return True
def lint_py_check_per_line(_repo, cf):
"""
Run multiple line-by-line checks
"""
with open(cf.name, "r") as f:
line_cnt = 0
regex_import = re.compile(r"\s*from\s+.*\s+import\s+.*")
warnings = 0
for line in f:
line_cnt += 1
line = line.strip()
if not line_cnt in cf.lines or _repo.wide:
continue # Not a line modified, skip it
# Check that imports are not done with *from HERE import
# THAT* because it makes code very confusing when we can't
# see where functions are coming from
m = regex_import.match(line)
if m:
_repo.warning("""\
%s:%d: python style error: use 'import MODULE' vs 'from MODULE import SYMBOLs'
see https://securewiki.ith.intel.com/display/timo/Coding+Style+and+procedures#CodingStyleandprocedures-Importingcode"""
% (cf.name, line_cnt))
warnings += 1
# We like spacing around equal signs and operators in
# general, the C way. The python way sucks. ARG OPERATOR
# ARG beats ARGOPERATORARG. Ewks.
# Likewise, [X] is an index, [ X ] is a list. Heaven's
# sake. For consistency, dictionaries are { K: V }; it's
# really had to check on those and a patch to pylint would
# be needed for that.
regex_bad_eqop = re.compile(r"\S(=|==|!=|\+=|-=|\*=|/=|\|=|&=|^=)\S")
regex_config = re.compile("CONFIG_[^=]+=")
# Catches things like blabla('--someswitch=', whatever) or
# blabla("--something=that")
regex_string = re.compile(r"=dd[^\s'\"]*['\"]")
# Got a probable bad usage?
m = regex_bad_eqop.search(line)
if m:
# Maybe a config assignment (this is actually shell code)
if regex_config.search(line) or regex_string.search(line):
continue
# Maybe rst code, ignore it
if '===' in line:
continue
_repo.warning("""\
%s:%d: python style error: always leave spaces around operators
('a = b' vs 'a=b')\
""" % (cf.name, line_cnt))
| [
"inaky.perez-gonzalez@intel.com"
] | inaky.perez-gonzalez@intel.com |
2badb5dc4737dd2ff29fa4c88e0c38d4282f9abe | f1766b34f25348b49303ca55a99aaf87f42d78ab | /backend/home/migrations/0002_load_initial_data.py | 723269172a48c32a19c73fb69e921379e67a086f | [] | no_license | crowdbotics-apps/test-25996 | 907a79dfad8a679b84df78679777a6037257ef51 | 2e05c8580a9aefc5ccec4d45c3fdf7406e92da3f | refs/heads/master | 2023-04-18T04:31:43.233315 | 2021-04-28T12:20:13 | 2021-04-28T12:20:13 | 362,441,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "test"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">test</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "test-25996.botics.co"
site_params = {
"name": "test",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
b46eb3393d73726f9d7c429be423e933ee3bd82c | 866e5f850aed11ef49daab845fbafc2e4b2a585f | /vk.py | 2a8d101b46e93eae73160e0374aa59710325e357 | [] | no_license | SeregaFreeman/vk-robot | 4031bde2b8ee10a8dec5bdcdadf58ceb100547e4 | 3d27ea5e8d84c25e84a9e7ee07afe7f03f7dd801 | refs/heads/master | 2021-06-01T06:21:07.453525 | 2016-07-29T22:29:30 | 2016-07-29T22:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,627 | py | import vk_api
from random import randint, choice
def main():
""" Пример отправки Jsony Stathomy рандомное сообщенько и возврата id друзей профиля"""
login = input('Введи логин: ')
password = input('Введи пароль: ')
vk_session = vk_api.VkApi(login, password, api_version='5.53')
try:
vk_session.authorization()
except vk_api.AuthorizationError as error_msg:
print(error_msg)
return
"""
Примеры можно посмотреть здесь: https://github.com/python273/vk_api/tree/master/examples
VK API - здесь: https://new.vk.com/dev.php?method=methods
"""
vk = vk_session.get_api()
user = vk.users.get()[-1]
print(user)
'''Просто способ работы с api
'''
vk.messages.send(user_id='376365095', message='HI LOLKA')
print(vk.friends.get(user_id=user['id'])['items'])
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
'''Далее рабочий код, но он работает после подправки файла vk_tools.py (259 строка):
for x in range(len(response)):
if self.one_param:
if response[x] is False:
self.one_param['return'][cur_pool[x]] = {'_error': True}
else:
self.one_param['return'][cur_pool[x]] = response[x]
else:
if response[x] is False:
self.pool[i + x][2].update({'_error': True})
elif type(response[x]) is list:
self.pool[i + x][2].update({'list': response[x]})
else:
if type(response[0]) is int:
self.pool[i + x][2].update()
else:
self.pool[i + x][2].update(response[x])
'''
with vk_api.VkRequestsPool(vk_session) as pool:
message = ''
for i in range(randint(1, 10)):
message += choice(['Syka', 'bliat', 'соси кирпичь', 'улыбашка', 'ti', 'жОпа', 'mamky ipal'])\
+ choice([' ', ', '])
pool.method('messages.send', {'oauth': '1', 'user_id': '376365095', 'message': message})
with vk_api.VkRequestsPool(vk_session) as pool:
friends = pool.method('friends.get', {'user_id': user['id']})
if friends['items']:
print(friends['items'])
else:
print('Ti odinokaia sychka')
if __name__ == '__main__':
main() | [
"19941510metalhead@gmail.com"
] | 19941510metalhead@gmail.com |
ce58a280272597d54408ad8dd15c1be2287bd149 | 240c95e46d5cdb547f4500f00960dd96705cac34 | /functionalTools.py | 16c95a123d1533a81323304dd25b7767d16e0f2d | [] | no_license | CaMeLCa5e/dailysummer2015 | a23fda58c27fe42a7512b4150420827eb292305c | 378d386702db0814bd79e83de37de8ba442f9c7b | refs/heads/master | 2021-01-10T18:12:08.298279 | 2015-08-19T21:48:41 | 2015-08-19T21:48:41 | 36,266,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | def f(x): return x % 3 == 0 or x % 5 == 0
print filter(f, range(2, 25))
def cube(x): return x*x*x
print map(cube, range(1, 11))
seq = range(8)
def add(x, y): return x+y
print map(add, seq, seq) | [
"JM273606@gmail.com"
] | JM273606@gmail.com |
4b78f4a718bcee6036bc3f1536277611e5f3b666 | 3f28b697f570ded0502de70c706200005ab62525 | /env/lib/python2.7/site-packages/sklearn/externals/joblib/numpy_pickle.py | fb91c51dff1b1e891267ee3c52dd63a6ed9ae448 | [
"MIT"
] | permissive | Ram-Aditya/Healthcare-Data-Analytics | 5387e41ad8e56af474e10fa2d1c9d8a2847c5ead | d1a15d2cc067410f82a9ded25f7a782ef56b4729 | refs/heads/master | 2022-12-09T12:49:59.027010 | 2019-11-23T20:10:55 | 2019-11-23T20:10:55 | 223,639,339 | 0 | 1 | MIT | 2022-11-22T00:37:48 | 2019-11-23T19:06:20 | Jupyter Notebook | UTF-8 | Python | false | false | 15,580 | py | """
Utilities for fast persistence of big data, with optional compression.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import pickle
import traceback
import sys
import os
import zlib
import warnings
from ._compat import _basestring
from io import BytesIO
if sys.version_info[0] >= 3:
Unpickler = pickle._Unpickler
Pickler = pickle._Pickler
def asbytes(s):
if isinstance(s, bytes):
return s
return s.encode('latin1')
else:
Unpickler = pickle.Unpickler
Pickler = pickle.Pickler
asbytes = str
_MEGA = 2 ** 20
_MAX_LEN = len(hex(2 ** 64))
# To detect file types
_ZFILE_PREFIX = asbytes('ZF')
###############################################################################
# Compressed file with Zlib
def _read_magic(file_handle):
""" Utility to check the magic signature of a file identifying it as a
Zfile
"""
magic = file_handle.read(len(_ZFILE_PREFIX))
# Pickling needs file-handles at the beginning of the file
file_handle.seek(0)
return magic
def read_zfile(file_handle):
"""Read the z-file and return the content as a string
Z-files are raw data compressed with zlib used internally by joblib
for persistence. Backward compatibility is not guaranteed. Do not
use for external purposes.
"""
file_handle.seek(0)
assert _read_magic(file_handle) == _ZFILE_PREFIX, \
"File does not have the right magic"
length = file_handle.read(len(_ZFILE_PREFIX) + _MAX_LEN)
length = length[len(_ZFILE_PREFIX):]
length = int(length, 16)
# We use the known length of the data to tell Zlib the size of the
# buffer to allocate.
data = zlib.decompress(file_handle.read(), 15, length)
assert len(data) == length, (
"Incorrect data length while decompressing %s."
"The file could be corrupted." % file_handle)
return data
def write_zfile(file_handle, data, compress=1):
"""Write the data in the given file as a Z-file.
Z-files are raw data compressed with zlib used internally by joblib
for persistence. Backward compatibility is not guarantied. Do not
use for external purposes.
"""
file_handle.write(_ZFILE_PREFIX)
length = hex(len(data))
if sys.version_info[0] < 3 and type(length) is long:
# We need to remove the trailing 'L' in the hex representation
length = length[:-1]
# Store the length of the data
file_handle.write(asbytes(length.ljust(_MAX_LEN)))
file_handle.write(zlib.compress(asbytes(data), compress))
###############################################################################
# Utility objects for persistence.
class NDArrayWrapper(object):
""" An object to be persisted instead of numpy arrays.
The only thing this object does, is to carry the filename in which
the array has been persisted, and the array subclass.
"""
def __init__(self, filename, subclass):
"Store the useful information for later"
self.filename = filename
self.subclass = subclass
def read(self, unpickler):
"Reconstruct the array"
filename = os.path.join(unpickler._dirname, self.filename)
# Load the array from the disk
np_ver = [int(x) for x in unpickler.np.__version__.split('.', 2)[:2]]
if np_ver >= [1, 3]:
array = unpickler.np.load(filename,
mmap_mode=unpickler.mmap_mode)
else:
# Numpy does not have mmap_mode before 1.3
array = unpickler.np.load(filename)
# Reconstruct subclasses. This does not work with old
# versions of numpy
if (hasattr(array, '__array_prepare__')
and not self.subclass in (unpickler.np.ndarray,
unpickler.np.memmap)):
# We need to reconstruct another subclass
new_array = unpickler.np.core.multiarray._reconstruct(
self.subclass, (0,), 'b')
new_array.__array_prepare__(array)
array = new_array
return array
#def __reduce__(self):
# return None
class ZNDArrayWrapper(NDArrayWrapper):
"""An object to be persisted instead of numpy arrays.
This object store the Zfile filename in which
the data array has been persisted, and the meta information to
retrieve it.
The reason that we store the raw buffer data of the array and
the meta information, rather than array representation routine
(tostring) is that it enables us to use completely the strided
model to avoid memory copies (a and a.T store as fast). In
addition saving the heavy information separately can avoid
creating large temporary buffers when unpickling data with
large arrays.
"""
def __init__(self, filename, init_args, state):
"Store the useful information for later"
self.filename = filename
self.state = state
self.init_args = init_args
def read(self, unpickler):
"Reconstruct the array from the meta-information and the z-file"
# Here we a simply reproducing the unpickling mechanism for numpy
# arrays
filename = os.path.join(unpickler._dirname, self.filename)
array = unpickler.np.core.multiarray._reconstruct(*self.init_args)
with open(filename, 'rb') as f:
data = read_zfile(f)
state = self.state + (data,)
array.__setstate__(state)
return array
###############################################################################
# Pickler classes
class NumpyPickler(Pickler):
"""A pickler to persist of big data efficiently.
The main features of this object are:
* persistence of numpy arrays in separate .npy files, for which
I/O is fast.
* optional compression using Zlib, with a special care on avoid
temporaries.
"""
def __init__(self, filename, compress=0, cache_size=10):
self._filename = filename
self._filenames = [filename, ]
self.cache_size = cache_size
self.compress = compress
if not self.compress:
self.file = open(filename, 'wb')
else:
self.file = BytesIO()
# Count the number of npy files that we have created:
self._npy_counter = 0
Pickler.__init__(self, self.file,
protocol=pickle.HIGHEST_PROTOCOL)
# delayed import of numpy, to avoid tight coupling
try:
import numpy as np
except ImportError:
np = None
self.np = np
def _write_array(self, array, filename):
if not self.compress:
self.np.save(filename, array)
container = NDArrayWrapper(os.path.basename(filename),
type(array))
else:
filename += '.z'
# Efficient compressed storage:
# The meta data is stored in the container, and the core
# numerics in a z-file
_, init_args, state = array.__reduce__()
# the last entry of 'state' is the data itself
with open(filename, 'wb') as zfile:
write_zfile(zfile, state[-1], compress=self.compress)
state = state[:-1]
container = ZNDArrayWrapper(os.path.basename(filename),
init_args, state)
return container, filename
def save(self, obj):
""" Subclass the save method, to save ndarray subclasses in npy
files, rather than pickling them. Of course, this is a
total abuse of the Pickler class.
"""
if self.np is not None and type(obj) in (self.np.ndarray,
self.np.matrix, self.np.memmap):
size = obj.size * obj.itemsize
if self.compress and size < self.cache_size * _MEGA:
# When compressing, as we are not writing directly to the
# disk, it is more efficient to use standard pickling
if type(obj) is self.np.memmap:
# Pickling doesn't work with memmaped arrays
obj = self.np.asarray(obj)
return Pickler.save(self, obj)
self._npy_counter += 1
try:
filename = '%s_%02i.npy' % (self._filename,
self._npy_counter)
# This converts the array in a container
obj, filename = self._write_array(obj, filename)
self._filenames.append(filename)
except:
self._npy_counter -= 1
# XXX: We should have a logging mechanism
print('Failed to save %s to .npy file:\n%s' % (
type(obj),
traceback.format_exc()))
return Pickler.save(self, obj)
def close(self):
if self.compress:
with open(self._filename, 'wb') as zfile:
write_zfile(zfile, self.file.getvalue(), self.compress)
class NumpyUnpickler(Unpickler):
"""A subclass of the Unpickler to unpickle our numpy pickles.
"""
dispatch = Unpickler.dispatch.copy()
def __init__(self, filename, file_handle, mmap_mode=None):
self._filename = os.path.basename(filename)
self._dirname = os.path.dirname(filename)
self.mmap_mode = mmap_mode
self.file_handle = self._open_pickle(file_handle)
Unpickler.__init__(self, self.file_handle)
try:
import numpy as np
except ImportError:
np = None
self.np = np
def _open_pickle(self, file_handle):
return file_handle
def load_build(self):
""" This method is called to set the state of a newly created
object.
We capture it to replace our place-holder objects,
NDArrayWrapper, by the array we are interested in. We
replace them directly in the stack of pickler.
"""
Unpickler.load_build(self)
if isinstance(self.stack[-1], NDArrayWrapper):
if self.np is None:
raise ImportError('Trying to unpickle an ndarray, '
"but numpy didn't import correctly")
nd_array_wrapper = self.stack.pop()
array = nd_array_wrapper.read(self)
self.stack.append(array)
# Be careful to register our new method.
if sys.version_info[0] >= 3:
dispatch[pickle.BUILD[0]] = load_build
else:
dispatch[pickle.BUILD] = load_build
class ZipNumpyUnpickler(NumpyUnpickler):
"""A subclass of our Unpickler to unpickle on the fly from
compressed storage."""
def __init__(self, filename, file_handle):
NumpyUnpickler.__init__(self, filename,
file_handle,
mmap_mode=None)
def _open_pickle(self, file_handle):
return BytesIO(read_zfile(file_handle))
###############################################################################
# Utility functions
def dump(value, filename, compress=0, cache_size=100):
"""Fast persistence of an arbitrary Python object into a files, with
dedicated storage for numpy arrays.
Parameters
-----------
value: any Python object
The object to store to disk
filename: string
The name of the file in which it is to be stored
compress: integer for 0 to 9, optional
Optional compression level for the data. 0 is no compression.
Higher means more compression, but also slower read and
write times. Using a value of 3 is often a good compromise.
See the notes for more details.
cache_size: positive number, optional
Fixes the order of magnitude (in megabytes) of the cache used
for in-memory compression. Note that this is just an order of
magnitude estimate and that for big arrays, the code will go
over this value at dump and at load time.
Returns
-------
filenames: list of strings
The list of file names in which the data is stored. If
compress is false, each array is stored in a different file.
See Also
--------
joblib.load : corresponding loader
Notes
-----
Memmapping on load cannot be used for compressed files. Thus
using compression can significantly slow down loading. In
addition, compressed files take extra extra memory during
dump and load.
"""
if compress is True:
# By default, if compress is enabled, we want to be using 3 by
# default
compress = 3
if not isinstance(filename, _basestring):
# People keep inverting arguments, and the resulting error is
# incomprehensible
raise ValueError(
'Second argument should be a filename, %s (type %s) was given'
% (filename, type(filename))
)
try:
pickler = NumpyPickler(filename, compress=compress,
cache_size=cache_size)
pickler.dump(value)
pickler.close()
finally:
if 'pickler' in locals() and hasattr(pickler, 'file'):
pickler.file.flush()
pickler.file.close()
return pickler._filenames
def load(filename, mmap_mode=None):
"""Reconstruct a Python object from a file persisted with joblib.dump.
Parameters
-----------
filename: string
The name of the file from which to load the object
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, the arrays are memory-mapped from the disk. This
mode has no effect for compressed files. Note that in this
case the reconstructed object might not longer match exactly
the originally pickled object.
Returns
-------
result: any Python object
The object stored in the file.
See Also
--------
joblib.dump : function to save an object
Notes
-----
This function can load numpy array files saved separately during the
dump. If the mmap_mode argument is given, it is passed to np.load and
arrays are loaded as memmaps. As a consequence, the reconstructed
object might not match the original pickled object. Note that if the
file was saved with compression, the arrays cannot be memmaped.
"""
with open(filename, 'rb') as file_handle:
# We are careful to open the file handle early and keep it open to
# avoid race-conditions on renames. That said, if data are stored in
# companion files, moving the directory will create a race when
# joblib tries to access the companion files.
if _read_magic(file_handle) == _ZFILE_PREFIX:
if mmap_mode is not None:
warnings.warn('file "%(filename)s" appears to be a zip, '
'ignoring mmap_mode "%(mmap_mode)s" flag passed'
% locals(), Warning, stacklevel=2)
unpickler = ZipNumpyUnpickler(filename, file_handle=file_handle)
else:
unpickler = NumpyUnpickler(filename, file_handle=file_handle,
mmap_mode=mmap_mode)
try:
obj = unpickler.load()
finally:
if hasattr(unpickler, 'file_handle'):
unpickler.file_handle.close()
return obj
| [
"ramaditya.danbrown@gmail.com"
] | ramaditya.danbrown@gmail.com |
4f03c3470bbba0f7c758d20b10865ae84540daca | 5a9d8c64c6478f3816b63f59f1cdaca73c0848eb | /pythonNet/ex11_re/regex.py | e958d05f261053f72450c4cb97517e4327dd014a | [] | no_license | wangredfei/nt_py | f68134977e6d1e05cf17cec727644509f084c462 | fedf03c0d52565f588e9b342d1c51df0b6dc2681 | refs/heads/master | 2020-04-08T07:55:08.302589 | 2018-11-23T09:53:48 | 2018-11-23T09:53:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | import re
s = '2008年发生了很多大事,08奥运,512地震'
s2 = "zhang:1994 li:1993"
'''
pattern = r'(\w)+:(\d+)'
# [('g', '1994'), ('i', '1993')]
pattern = r'(\w+)+:(\d+)'
# [('zhang', '1994'), ('li', '1993')]
l = re.findall(pattern,s2)
print(l)
'''
pattern = r'\d+'
regex = re.compile(pattern)
l = regex.findall(s,0,19)
print(l)
l = re.split(r'\s+',"Hello wo15 N44ao Beijing")
print(l)
s = re.subn(r'\s+','##','hello word haha',1)
print(s)
| [
"289498360@qq.com"
] | 289498360@qq.com |
f98515e8184c3727c5d871f06b0705370ea3bfc8 | ce074998469af446e33d0fab7adb01320ccc77ed | /dst_procedures/Execute command writing output to local Admin Share.py | e8d4822c286ecd38edd7c220027f7a2e6937af2e | [] | no_license | parahaoer/detection_rules | 1341063568b0ccfa180da129a29aeec0a62e679e | c9f3408eccbcb4b61d1d441af31839872f9bb26c | refs/heads/master | 2023-02-09T13:54:40.254874 | 2020-12-28T09:25:31 | 2020-12-28T09:25:31 | 265,990,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | {"query": {"constant_score": {"filter": {"bool": {"must": [{"bool": {"must": [{"match_phrase": {"event_id": "5140"}}, {"match_phrase": {"share_name": "Admin$"}}]}}, {"bool": {"must_not": [{"bool": {"must": [{"wildcard": {"user_name.keyword": "*$"}}]}}]}}]}}}}}
tactic = "Lateral Movement"
technique = "Windows Admin Shares"
procedure = "Execute command writing output to local Admin Share"
tech_code = "T1077"
| [
"33771109+parahaoer@users.noreply.github.com"
] | 33771109+parahaoer@users.noreply.github.com |
2223b95c0de46b2364e258f55bd107d6d44603c1 | f9529fb21c8c3bcd65d392b018b4885141c17376 | /moneymap/core.py | 7e5be0fd7e02a3b3924e0c95ed610eafce16fecc | [
"MIT"
] | permissive | kobiluria/MoneyMap | 3a71cd0a271f610ce16d0fb6c0e6dcfd9e432e3a | 78f329775df8695dbc15c8f04b5890b625663bf3 | refs/heads/master | 2021-01-10T19:58:03.726726 | 2014-04-14T17:35:55 | 2014-04-14T17:35:55 | 18,042,079 | 1 | 1 | null | 2014-03-28T06:04:44 | 2014-03-23T19:51:34 | null | UTF-8 | Python | false | false | 67 | py | from flask.ext.mongoengine import MongoEngine
db = MongoEngine()
| [
"paulywalsh@gmail.com"
] | paulywalsh@gmail.com |
9b83b8e8c296e4803184fccdac2033c1537c7f51 | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /nox_mesh_4_loop_repro_w_3_retries/interreplay_91_l.2/replay_config.py | c00a845bedd714e8a3156ea7c049b6468c541d45 | [] | no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import Replayer
from sts.simulation_state import SimulationConfig
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./nox_core -v -i ptcp:6635 routing', address='127.0.0.1', port=6635, cwd='nox_classic/build/src')],
topology_class=MeshTopology,
topology_params="num_switches=4",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False)
control_flow = Replayer(simulation_config, "experiments/nox_mesh_4_loop_repro_w_3_retries/interreplay_91_l.2/events.trace",
wait_on_deterministic_values=False)
# Invariant check: 'None'
| [
"cs@cs.berkeley.edu"
] | cs@cs.berkeley.edu |
2f551310aef9f44360448ad6ae031ec388a40da1 | 242733887bf50ffc91b2e305dfbea268fdfebae0 | /Regex/parseBytes.py | bdb567c5b6494ff4034cf86aff518e00bfb34981 | [] | no_license | aryabiju37/Python-mini-Proects | c2e4f59bf309b3d7e7696dfe92fb6ff63790114a | a84e03702665cf2e06a16637cfe24a6697163894 | refs/heads/master | 2023-07-25T18:56:54.649800 | 2021-08-27T06:49:32 | 2021-08-27T06:49:32 | 400,420,845 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | import re
def parse_bytes(inputStream):
byteStream = re.compile(r'[0-1]{4,}')
match = byteStream.findall(inputStream)
return match
print(parse_bytes("11010101 101 323"))
print(parse_bytes("my data is: 10101010 11100010"))
print(parse_bytes("asdsa"))
| [
"riyabee123@gmail.com"
] | riyabee123@gmail.com |
1c17c87fa534798f87e424fc1c749678ab1f35ab | 17d23f404a20c34a406dd086b0a89f956c4ecac0 | /Django-Tutorials/accounts/migrations/0008_auto_20190305_0920.py | aedc1d26206c204d22e2425f605c4b62f37425b5 | [] | no_license | apabhishek178/ieltsonline | 69df682862d96bc04b318262e962e22a0919fe88 | 42061efa8293c948342a670f0a62c90d3b31ebff | refs/heads/master | 2020-04-26T09:19:55.712217 | 2019-03-20T13:36:31 | 2019-03-20T13:36:31 | 173,451,873 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-03-05 03:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0007_userprofile_organisation'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='website',
field=models.CharField(default='', max_length=100),
),
]
| [
"apabhishek178@gmail.com"
] | apabhishek178@gmail.com |
dde041f8735a4a188e6da4d287558879d50e5e3a | 5730110af5e4f0abe538ed7825ddd62c79bc3704 | /pacu/pacu/core/svc/vstim/stimulus/position_tuple.py | a16384e53fbb82b460e1b846638146f31903f41b | [] | no_license | jzeitoun/pacu-v2 | bdbb81def96a2d87171ca20b89c878b2f66975e7 | 0ccb254a658263b4fe8c80ea623f860cb7dc1428 | refs/heads/master | 2021-06-03T18:50:50.890399 | 2020-04-27T16:31:59 | 2020-04-27T16:31:59 | 110,889,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | from pacu.util.spec.list import FloatListSpec
from pacu.core.svc.impl.pacu_attr import PacuAttr
from pacu.core.svc.impl.ember_attr import EmberAttr
class PositionTuple(PacuAttr, FloatListSpec):
component = 'x-svc-comp-input-array'
description = EmberAttr('2 floats in deg')
placeholder = EmberAttr('')
title = EmberAttr('Position')
tooltip = EmberAttr('')
| [
"jzeitoun@uci.edu"
] | jzeitoun@uci.edu |
675d92022d928d36636dbf7fb19c6956b8fcde33 | 6039142144cb221f04e29e2c7359dc5bed7bb830 | /atividade06/model/__init__.py | abc61a6a013de4d5b185c09456ed6bbd7e291e45 | [
"Apache-2.0"
] | permissive | Yuri-Santiago/yuri-mateus-poo-python-ifce-p7 | f5f245345c38b1e08a1ce6d142204b30868023d0 | edbf0e945e01430eb14dff3c0c7806582430d1c2 | refs/heads/master | 2023-06-12T18:48:08.950423 | 2021-07-09T17:04:17 | 2021-07-09T17:04:17 | 349,737,223 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,437 | py | from atividade06.model.cliente import Cliente
from atividade06.model.produto import Produto
from atividade06.model.notafiscal import NotaFiscal
from atividade06.model.itemnotafiscal import ItemNotaFiscal
# banco de dados
# Clientes
cliente1 = Cliente(1, "Yuri Mateus", 100, '200.100.345-34', 'pessoa fisica')
cliente2 = Cliente(2, "Raquel Maciel", 200, '123.456.789-10', 'pessoa fisica')
cliente3 = Cliente(3, "Israel Leite", 300, '109.876.543-21', 'pessoa fisica')
clientes = [cliente1, cliente2, cliente3]
# Produtos
produto1 = Produto(1, 100, 'Arroz', 5.5)
produto2 = Produto(2, 200, 'Feijao', 4.5)
produto3 = Produto(3, 300, 'Batata', 6)
produtos = [produto1, produto2, produto3]
# Notas Fiscais
notafiscal1 = NotaFiscal(1, 100, cliente1)
notafiscal2 = NotaFiscal(2, 200, cliente2)
notafiscal3 = NotaFiscal(3, 300, cliente3)
notas = [notafiscal1, notafiscal2, notafiscal3]
# ItensNotaFiscal
item1 = ItemNotaFiscal(1, 1, 6, produto1)
item2 = ItemNotaFiscal(2, 1, 8, produto1)
item3 = ItemNotaFiscal(3, 2, 5, produto2)
item4 = ItemNotaFiscal(4, 1, 10, produto1)
item5 = ItemNotaFiscal(5, 2, 4, produto2)
item6 = ItemNotaFiscal(6, 3, 7, produto3)
itens = [item1, item2, item3, item4, item5, item6]
# Adicionando os produtos
notafiscal1.adicionarItem(item1)
notafiscal2.adicionarItem(item2)
notafiscal2.adicionarItem(item3)
notafiscal3.adicionarItem(item4)
notafiscal3.adicionarItem(item5)
notafiscal3.adicionarItem(item6)
| [
"yurimateussantiago@gmail.com"
] | yurimateussantiago@gmail.com |
dd97a9756058e4c5df2ac0b5327245d868e532b0 | fe856f232f21ee5f1b45d4c7c19d062b3b3261bc | /pyfr/solvers/navstokes/elements.py | 5c6de3d59176f9693955284cf792aadd3e4a498a | [
"CC-BY-4.0",
"BSD-3-Clause"
] | permissive | bartwozniak/PyFR | 5589c36e7dc0dcac9a7aed7c69c8964bda2c55d8 | d99120c1db245c7a2a35c72dae51ea72c49efef5 | refs/heads/master | 2021-01-20T16:03:21.409981 | 2013-12-05T18:08:48 | 2013-12-05T18:08:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | # -*- coding: utf-8 -*-
from pyfr.solvers.baseadvecdiff import BaseAdvectionDiffusionElements
from pyfr.solvers.euler.elements import BaseFluidElements
class NavierStokesElements(BaseFluidElements, BaseAdvectionDiffusionElements):
def set_backend(self, backend, nscalupts):
super(NavierStokesElements, self).set_backend(backend, nscalupts)
backend.pointwise.register('pyfr.solvers.navstokes.kernels.tflux')
def get_tdisf_upts_kern(self):
tplargs = dict(ndims=self.ndims, nvars=self.nvars,
c=self._cfg.items_as('constants', float))
return self._be.kernel('tflux', tplargs, dims=[self.nupts, self.neles],
u=self.scal_upts_inb, smats=self._smat_upts,
f=self._vect_upts[0])
| [
"freddie@witherden.org"
] | freddie@witherden.org |
6c2149f2e2e124af3166ea13c3d9579008857761 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /120_design_patterns/003_factories/examples/factory/Factory/autos/jeepsahara.py | 2deb7ee4bf417e533292c263531291cac3658182 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 227 | py | from .abs_auto import AbsAuto
class JeepSahara(AbsAuto):
def __init__(self, name):
self._name = name
def start(self):
print('%s running ruggedly.' % self.name)
def stop(self):
print('%s shutting down.' % self.name) | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
6a9ea93a8f6c1c2225a44921839604c2585cc1a9 | e2afe7e135d4cf68fb88cbccf59a782c986fb171 | /getTwits.py | fac1fd1a4f85c8576b00c655b35f4bfee464c1f3 | [] | no_license | BUEC500C1/video-AIRICLEE | b09368ef9543c5f1599f80e5f21e7a0556a1c552 | f2a4ac0d8ce000673c1fe5e163eb841b56bab0c9 | refs/heads/master | 2020-12-29T09:30:30.298871 | 2020-02-25T01:29:33 | 2020-02-25T01:29:33 | 238,556,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,069 | py | import tweepy #https://github.com/tweepy/tweepy
import os
from PIL import Image, ImageFont, ImageDraw
import twitterAPIKey
def getTwitsFeed():
#Twitter API credentials
consumer_key = twitterAPIKey.consumer_key
consumer_secret = twitterAPIKey.consumer_secret
access_key = twitterAPIKey.access_key
access_secret = twitterAPIKey.access_secret
# Authentification
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
# key word to look for twitter
query = twitterAPIKey.query
language = "en"
# user_timeline function
results = api.search(q=query, lang=language)
list_tweet = []
for tweet in results:
list_tweet.append(tweet.text)
return list_tweet
def transTwits2Image(list_tweet):
# transform text to image
i = 0
for tweet in list_tweet:
text = tweet
im = Image.new('RGB',(1080,720),(255,255,255))
dr = ImageDraw.Draw(im)
ttf='/usr/share/fonts/truetype/myfonts/puhui.ttf'
font=ImageFont.truetype(ttf, 12)
dr.text((10, 5), text, font=font, fill="#000000")
# im.show()
im.save("images/t%d.png"%i)
i = i + 1;
def transImage2Video(usrName):
with open('videos.txt', 'w') as f:
for i in range(12):
command = "ffmpeg -loglevel quiet -y -ss 0 -t 3 -f lavfi -i color=c=0x000000:s=830x794:r=30 " \
"-i /home/lighao/EC500/assignment_3/images/t" + str(i+1) \
+ ".png -filter_complex \"[1:v]scale=830:794[v1];[0:v][v1]overlay=0:0[outv]\" " \
"-map [outv] -c:v libx264 /home/lighao/EC500/assignment_3/video" \
+ str(i+1) + ".mp4 -y"
p = os.popen(command)
p.close()
f.write("file video" + str(i+1) + ".mp4" + '\n')
f.close()
cd = "ffmpeg -loglevel quiet -y -f concat -i videos.txt -c copy OutputVideo" + "test" + usrName + ".mp4"
pp = os.popen(cd)
pp.close()
def getResult(usrName):
list_tweet = getTwitsFeed()
transTwits2Image(list_tweet)
transImage2Video(usrName)
if __name__ == '__main__':
getResult()
| [
"noreply@github.com"
] | BUEC500C1.noreply@github.com |
cf52fd255745206eefa21f318a7747bf99f10b47 | 63f9a0d150cbef75f4e6e8246dc7ecac3f3b6d09 | /rllib/agents/a3c/a2c.py | 0a71a359c014b6d352bd73ae29c0b0ff8ea4351d | [
"Apache-2.0",
"MIT"
] | permissive | ray-project/maze-raylit | 79f0a5af9fe4bdc13a2d5b3919da867ed5439aab | a03cd14a50d87d58effea1d749391af530d7609c | refs/heads/master | 2023-01-23T04:23:35.178501 | 2020-12-04T22:34:14 | 2020-12-04T22:34:14 | 318,274,659 | 5 | 0 | Apache-2.0 | 2020-12-04T22:34:15 | 2020-12-03T17:47:58 | Python | UTF-8 | Python | false | false | 2,487 | py | import math
from ray.rllib.agents.a3c.a3c import DEFAULT_CONFIG as A3C_CONFIG, \
validate_config, get_policy_class
from ray.rllib.agents.a3c.a3c_tf_policy import A3CTFPolicy
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.execution.metric_ops import StandardMetricsReporting
from ray.rllib.execution.rollout_ops import ParallelRollouts, ConcatBatches
from ray.rllib.execution.train_ops import ComputeGradients, AverageGradients, \
ApplyGradients, TrainOneStep
from ray.rllib.utils import merge_dicts
A2C_DEFAULT_CONFIG = merge_dicts(
A3C_CONFIG,
{
"rollout_fragment_length": 20,
"min_iter_time_s": 10,
"sample_async": False,
# A2C supports microbatching, in which we accumulate gradients over
# batch of this size until the train batch size is reached. This allows
# training with batch sizes much larger than can fit in GPU memory.
# To enable, set this to a value less than the train batch size.
"microbatch_size": None,
},
)
def execution_plan(workers, config):
rollouts = ParallelRollouts(workers, mode="bulk_sync")
if config["microbatch_size"]:
num_microbatches = math.ceil(
config["train_batch_size"] / config["microbatch_size"])
# In microbatch mode, we want to compute gradients on experience
# microbatches, average a number of these microbatches, and then apply
# the averaged gradient in one SGD step. This conserves GPU memory,
# allowing for extremely large experience batches to be used.
train_op = (
rollouts.combine(
ConcatBatches(min_batch_size=config["microbatch_size"]))
.for_each(ComputeGradients(workers)) # (grads, info)
.batch(num_microbatches) # List[(grads, info)]
.for_each(AverageGradients()) # (avg_grads, info)
.for_each(ApplyGradients(workers)))
else:
# In normal mode, we execute one SGD step per each train batch.
train_op = rollouts \
.combine(ConcatBatches(
min_batch_size=config["train_batch_size"])) \
.for_each(TrainOneStep(workers))
return StandardMetricsReporting(train_op, workers, config)
A2CTrainer = build_trainer(
name="A2C",
default_config=A2C_DEFAULT_CONFIG,
default_policy=A3CTFPolicy,
get_policy_class=get_policy_class,
validate_config=validate_config,
execution_plan=execution_plan)
| [
"noreply@github.com"
] | ray-project.noreply@github.com |
7472d35af0f5726c029f1b53e6644e87d50bde12 | 9c47c55873e88d747bccb397c4b8197f42317c99 | /main.py | 8e86847052413cc452bb67b2ca961a4967c68a4a | [] | no_license | balubankudi/LinkedIn-Course | 29eefc7a86df3be5dc1d1b2953b9e1ef6077a158 | 26907301529db251d048f3b5ae129ab445c8bf6e | refs/heads/master | 2022-11-18T08:38:15.960284 | 2020-06-23T17:45:37 | 2020-06-23T17:45:37 | 274,471,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,049 | py |
x = 100
y = 42
if x == 5:
print("do five stuff")
elif x == 6:
print("do six stuff")
else:
print("do something else")
# Copyright 2009-2017 BHG http://bw.org/
words = ['one', 'two', 'three', 'four', 'five']
n = 0
while(n < 5):
print(words[n])
n += 1
# Copyright 2009-2017 BHG http://bw.org/
words = ['one', 'two', 'three', 'four', 'five']
for i in words:
print(i)
# simple fibonacci series
# the sum of two elements defines the next set
a, b = 0, 1
while b < 1000:
print(b, end=' ', flush=True)
a, b = b, a + b
print() # line ending
# Copyright 2009-2017 BHG http://bw.org/
def function(n):
print(n)
function(12)
#!/usr/bin/env python3
# Copyright 2009-2017 BHG http://bw.org/
def isprime(n):
if n <= 1:
return False
for x in range(2, n):
if n % x == 0:
return False
else:
return True
#in line 62 i don´t understand the use of Flush
def list_primes():
for n in range(100):
if isprime(n):
print(n,end=" ",flush=True)
print()
list_primes()
n = 6
if isprime(n):
print(f'{n} is prime')
else:
print(f'{n} not prime')
#defining even numbers
def iseven(n):
if n <= 1:
return False
for x in range(2, n):
if n % x == 2:
return False
else:
return True
def list_even():
for n in range(100):
if iseven(n):
print(n,end=" ",flush=True)
print()
list_even()
# knowing format
x = 7.8
print('x is {}'.format(x))
print(type(x))
x = [ 1, 2, 3, 4, 5 ]
x[2] = 42
#eventhough initially the amount of x[2] is supposed to be 3, it is changed in the next line
for i in x:
print('i is {}'.format(i))
#a tuple is like a list except that it is inmutable so we use parenthesis instead of brackets. So the x[2]= 42 will not execute since the initially list is inmutable. See what happens next. it gives error. Conclusion: for inmutable use parenthesis over brackets
#x = ( 1, 2, 3, 4, 5 )
#x[2] = 42
#for i in x:
# print('i is {}'.format(i))
#print a list of even numbers from 1 to 100 and later add them"
x = range (2, 101, 2)
for i in x:
print(i)
x = range (2, 101, 2)
sum = 0
for i in x:
sum += i
print(sum)
#defining even numbers type 2
def iseven(n):
if n <= 1:
return False
for x in range(1, n):
if n % 2 == 0:
return True
else:
return False
def list_even():
for n in range(101):
if iseven(n):
print(n,end=" ",flush=True)
print()
list_even()
def list_evensum():
sum = 0
for n in range(101):
if iseven(n):
sum += n
print(sum)
list_evensum()
#Conditional
x = 2
if x == 0:
print('zero true')
elif x == 1:
print('one 1 true')
elif x == 2:
print('elif 2 true')
elif x == 3:
print('elif 3 true')
elif False:
print('elif true')
else:
print('neither true')
hungry = True
x = 'Feed the bear now!' if hungry else 'Do not feed the bear.'
print(x)
infectedwithcovid19 = True
x = 'stay at home!' if infectedwithcovid19 else 'still stay at home safe'
print(x)
#operators
x = 5
y = 3
z = x + y
z = -z
print(f'result is {z}')
#bitwise operators
x = 0x0a
y = 0x02
z = x | y
print(f'(hex) x is {x:02x}, y is {y:02x}, z is {z:02x}')
print(f'(bin) x is {x:08b}, y is {y:08b}, z is {z:08b}')
#boolean
a = True
b = False
x = ( 'bear', 'bunny', 'tree', 'sky', 'rain' )
y = 'bear'
if y in x:
print('expression is true')
else:
print('expression is false')
if y is x[0]:
print('expression is true')
else:
print('expression is false')
print(id(y))
print(id(x[0]))
#operator precedence: order in which orders are evaluates
#while loop
secret = 'swordfish'
pw = ''
#while pw != secret:
# pw = input("What's the secret word? ")
#for loop
animals = ( 'bear', 'bunny', 'dog', 'cat', 'velociraptor' )
for pet in animals:
print(pet)
animals = ( 'bear', 'bunny', 'dog', 'cat', 'velociraptor' )
for pet in range(5):
print(pet)
secret = 'swordfish'
pw = ''
auth = False
count = 0
max_attempt = 5
#while pw != secret:
#count += 1
#if count > max_attempt: break
#if count == 3: continue
#pw = input(f"{count}: What's the secret word?")
#else:
# auth = True
#print ("authorized" if auth else "Calling the FBI")
#normally the while function is built to be false and else is true
#defining functions
def main():
x = kitten(5, 6, 7)
print(x)
def kitten(a, b = 1, c = 0):
print ("Meow")
print (a, b, c)
if __name__ == '__main__': main()
#notabene: arguments without default should always be before arguement with default
def main():
x = 5
kitten (x)
print(f"in main x is {x}")
def kitten(a):
a = 3
print ("Meow")
print (a)
if __name__ == '__main__': main()
#this is what we call "call by value", and when you pass a variable to a f(x), the f(x) operates on a copy of the variable. the value is passed but not the object it self. A interger is not mutable,
#so this is important to understand: an integer is not mutable so it cannot change, so when you assign a new value to an integer, you're actually assigning an entirely different object to the name. The original integer is not changed, the name simply refers to a new object. Passing a value to a function acts exactly the same way. A reference to the object is passed and acts exactly like an assignment. So mutable objects may be changed, and those changes will be reflected in the caller. Immutable objects may not be changed. So function arguments in Python act exactly as assignments in Python, with all the quirks of Python's object model. For the most part, things will work as expected, but be careful with lists and other mutable objects.
#keyword arguments
def main():
kitten(Buffy = 'meow', Zilla = 'grr', Angel = 'rawr')
def kitten(**kwargs):
if len(kwargs):
for k in kwargs:
print('Kitten {} says {}'.format(k, kwargs[k]))
else: print('Meow.')
if __name__ == '__main__': main()
#GENERATORS: INCLUSIVE RANGE
def main():
for i in inclusive_range(25):
print(i, end = ' ')
print()
def inclusive_range(*args):
numargs = len(args)
start = 0
step = 1
# initialize parameters
if numargs < 1:
raise TypeError(f'expected at least 1 argument, got {numargs}')
elif numargs == 1:
stop = args[0]
elif numargs == 2:
(start, stop) = args
elif numargs == 3:
(start, stop, step) = args
else: raise TypeError(f'expected at most 3 arguments, got {numargs}')
# generator
i = start
while i <= stop:
yield i
i += step
if __name__ == '__main__': main()
def main():
seq = range(11)
print_list(seq)
def print_list(o):
for x in o: print(x, end = ' ')
print()
if __name__ == '__main__': main()
class RevStr(str):
def __str__(self):
return self[::-1]
def main():
hello = RevStr('Hello, World.')
print(hello)
if __name__ == '__main__': main()
print (chr(128406))
x = list(range(30))
print(x)
| [
"replituser@example.com"
] | replituser@example.com |
5377cede56b1c31c3c6e1d05f57ca234f77bbbf7 | 56afef87e593f4a09da95ebeceb3b04940d7069f | /Unified/stagor.py | 7efc149dc8a90ff89ef4b2494eeeff03c2cc2a03 | [] | no_license | lucacopa/WmAgentScripts | 8bf3d3377a84be6172eb2b13214d8e7b2596d130 | c6884f005574506c27b4a09099d987e70734c7e5 | refs/heads/master | 2021-01-18T07:21:55.940980 | 2015-07-13T19:16:48 | 2015-07-13T19:16:48 | 12,436,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,659 | py | #!/usr/bin/env python
from assignSession import *
from utils import checkTransferStatus, checkTransferApproval, approveSubscription, getWorkflowByInput
import sys
import itertools
import pprint
from htmlor import htmlor
def stagor(url,specific =None):
done_by_wf_id = {}
done_by_input = {}
completion_by_input = {}
good_enough = 100.0
for wfo in session.query(Workflow).filter(Workflow.status == 'staging').all():
## implement the grace period for by-passing the transfer.
pass
for transfer in session.query(Transfer).all():
if specific and str(transfer.phedexid)!=str(specific): continue
skip=True
for wfid in transfer.workflows_id:
tr_wf = session.query(Workflow).get(wfid)
if tr_wf:
if tr_wf.status == 'staging':
skip=False
break
if skip: continue
if transfer.phedexid<0: continue
## check the status of transfers
checks = checkTransferApproval(url, transfer.phedexid)
approved = all(checks.values())
if not approved:
print transfer.phedexid,"is not yet approved"
approveSubscription(url, transfer.phedexid)
continue
## check on transfer completion
checks = checkTransferStatus(url, transfer.phedexid, nocollapse=True)
if not specific:
for dsname in checks:
if not dsname in done_by_input: done_by_input[dsname]={}
if not dsname in completion_by_input: completion_by_input[dsname] = {}
done_by_input[dsname][transfer.phedexid]=all(map(lambda i:i>=good_enough, checks[dsname].values()))
completion_by_input[dsname][transfer.phedexid]=checks[dsname].values()
if checks:
print "Checks for",transfer.phedexid,[node.values() for node in checks.values()]
done = all(map(lambda i:i>=good_enough,list(itertools.chain.from_iterable([node.values() for node in checks.values()]))))
else:
## it is empty, is that a sign that all is done and away ?
print "ERROR with the scubscriptions API of ",transfer.phedexid
print "Most likely something else is overiding the transfer request. Need to work on finding the replacement automatically, if the replacement exists"
done = False
## the thing above is NOT giving the right number
#done = False
for wfid in transfer.workflows_id:
tr_wf = session.query(Workflow).get(wfid)
if tr_wf:# and tr_wf.status == 'staging':
if not tr_wf.id in done_by_wf_id: done_by_wf_id[tr_wf.id]={}
done_by_wf_id[tr_wf.id][transfer.phedexid]=done
if done:
## transfer.status = 'done'
print transfer.phedexid,"is done"
else:
print transfer.phedexid,"not finished"
pprint.pprint( checks )
#print done_by_input
print "\n----\n"
for dsname in done_by_input:
fractions = None
if dsname in completion_by_input:
fractions = itertools.chain.from_iterable([check.values() for check in completion_by_input.values()])
## the workflows in the waiting room for the dataset
using_its = getWorkflowByInput(url, dsname)
#print using_its
using_wfos = []
for using_it in using_its:
wf = session.query(Workflow).filter(Workflow.name == using_it).first()
if wf:
using_wfos.append( wf )
#need_sites = int(len(done_by_input[dsname].values())*0.7)+1
need_sites = len(done_by_input[dsname].values())
if need_sites > 10:
need_sites = int(need_sites/2.)
got = done_by_input[dsname].values().count(True)
if all([wf.status != 'staging' for wf in using_wfos]):
## not a single ds-using wf is in staging => moved on already
## just forget about it
print "presence of",dsname,"does not matter anymore"
print "\t",done_by_input[dsname]
print "\t",[wf.status for wf in using_wfos]
print "\tneeds",need_sites
continue #??
## should the need_sites reduces with time ?
# with dataset choping, reducing that number might work as a block black-list.
if all(done_by_input[dsname].values()):
print dsname,"is everywhere we wanted"
## the input dataset is fully transfered, should consider setting the corresponding wf to staged
for wf in using_wfos:
if wf.status == 'staging':
print wf.name,"is with us. setting staged and move on"
wf.status = 'staged'
session.commit()
elif fractions and len(list(fractions))>1 and set(fractions)==1:
print dsname,"is everywhere at the same fraction"
print "We do not want this in the end. we want the data we asked for"
continue
## the input dataset is fully transfered, should consider setting the corresponding wf to staged
for wf in using_wfos:
if wf.status == 'staging':
print wf.name,"is with us everywhere the same. setting staged and move on"
wf.status = 'staged'
session.commit()
elif got >= need_sites:
print dsname,"is almost everywhere we wanted"
#print "We do not want this in the end. we want the data we asked for"
#continue
## the input dataset is fully transfered, should consider setting the corresponding wf to staged
for wf in using_wfos:
if wf.status == 'staging':
print wf.name,"is almost with us. setting staged and move on"
wf.status = 'staged'
session.commit()
else:
print dsname
print "\t",done_by_input[dsname]
print "\tneeds",need_sites
print "\tgot",got
for wfid in done_by_wf_id:
#print done_by_wf_id[wfid].values()
## ask that all related transfer get into a valid state
if all(done_by_wf_id[wfid].values()):
pass
#tr_wf = session.query(Workflow).get(wfid)
#print "setting",tr_wf.name,"to staged"
#tr_wf.status = 'staged'
#session.commit()
if __name__ == "__main__":
url = 'cmsweb.cern.ch'
spec=None
if len(sys.argv)>1:
spec = sys.argv[1]
stagor(url, spec)
htmlor()
| [
"vlimant@cern.ch"
] | vlimant@cern.ch |
28519e7591dcb13b25682c4d4c25774be9b499d1 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/largestTime_20200903122443.py | 84a256da28e4d575cd155ab30b135a196baa028c | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | from itertools import permutations
def Time(A):
# getting the different permutations
# get the one that falls between 0000 and 2359
# then place the semi colon in the proper place
# otherwise return an empty string
A = [str(i) for i in A]
perm = permutations(A)
time = ""
newArray = []
arr = []
for i in list(perm):
string = "".join(i)
newArray.append(string)
newArray = [int(i) for i in newArray]
for i in newArray:
if i > 0000 and i <= 2359:
arr.append(i)
newTime = arr[len(arr)-1]
newTime = str(newTime)
print(new)
Time([1,2,3,4])
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
19f811ca33bd32efecc9c62adc05120fc3c75251 | 4ae3b27a1d782ae43bc786c841cafb3ace212d55 | /Test_Slen/PythonSelFramework/tests/emu_nvdia.py | 873ce775dcbd7be2d18d25684ee30b9ab8eb2b11 | [] | no_license | bopopescu/Py_projects | c9084efa5aa02fd9ff6ed8ac5c7872fedcf53e32 | a2fe4f198e3ca4026cf2e3e429ac09707d5a19de | refs/heads/master | 2022-09-29T20:50:57.354678 | 2020-04-28T05:23:14 | 2020-04-28T05:23:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,875 | py | < form
autocomplete = "false"
class ="modules-login-components-SignIn-components-Form-___style__sign-up-form___2z3Ug" >
< div class ="core-forms-components-___input__input-wrapper___17k2k" > < div class ="ui textfield invalid
">
<label for="email">Email Address</label><input autocomplete="true
" id="
email
" type="
email
" class="
input
" placeholder="
user @ domain.com
" value="">" \
"" \
"<i aria-hidden="
true
" class="
ui
icon
close - circle -
fill
"></i></div><div class="
core - forms - components - ___input__error - message___3ZeWU
">Required field</div></div><div class="
ui
buttons
"><button class="
ui
button
primary
disabled
" disabled=""
type = "submit" > Next < / button > < a
class ="forgot-password ui button flat" href="/signin/password/forgot" > Forgot Password < / a > < a class ="sign-up ui button default" href="/signup" > Create an Account < / a > < / div > < / form >
# create driver
# load webdriver
# load webpage(login url)
# locator for email address
# send_keys (email address)
# click on email locator
# find locator of new page
# locate successful message welcome : assert
def test_email(url, email):
driver = webdriver.Chrome()
driver.get(url)
locator = driver.find_element_by_id("email")
locator.send_keys(email)
welcome_page = driver.find_element_by_id("welcome")
assert welcome.title == "welcome"
menu.json
{"menu": {
"id": "file",
"value": "File",
"popup": {
"menuitem": [
{"value": "New", "onclick": "CreateNewDoc()"},
{"value": "Open", "onclick": "OpenDoc()"},
{"value": "Close", "onclick": "CloseDoc()"}
]
}
}}
{[(<>)]}
True
# input is a string, consisting of {[(<>)]} and letters
{{]} False
load
json
a
python
dict
json.load(open("menu.json", 'r")
def match(string):
| [
"sunusd@yahoo.com"
] | sunusd@yahoo.com |
d697f737208e9caf8f79d0bba9cceecc106bba98 | 40c927ea44653c645c9540e68a8f5b439990fddd | /Chap 10/10.6.5 Exercise.py | afd2bc4c30edcf594d0113cb259cefb66ad026a9 | [] | no_license | simrit1/asimo | 211ff255434637ac6ad396e8ff5ed5cee6ea971d | 12564ab591129ebbb0c2daaa3c538cc6d39aee39 | refs/heads/master | 2022-02-25T07:53:05.034587 | 2019-09-22T15:34:11 | 2019-09-22T15:34:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,405 | py | # Exercise 10.6.5
import turtle
turtle.setup(400,500)
wn = turtle.Screen()
wn.title("Tess becomes a traffic light!")
wn.bgcolor("lightgreen")
green = turtle.Turtle() # Create 3 turtles correspond with 3 traffic lights
orange = turtle.Turtle()
red = turtle.Turtle()
def draw_housing():
""" Draw a nice housing to hold the traffic lights """
green.pensize(3)
green.color("black", "darkgrey")
green.begin_fill()
green.forward(80)
green.left(90)
green.forward(200)
green.circle(40, 180)
green.forward(200)
green.left(90)
green.end_fill()
draw_housing()
green.penup()
# Position "green" onto the place where the green light should be
green.forward(40)
green.left(90)
green.forward(50)
# Turn "green" into a big green circle
green.shape("circle")
green.shapesize(3)
green.fillcolor("green")
a = green.position() # We get them to the same position
orange.goto(a)
red.goto(a)
orange.left(90) # Turn "orange" into a big orange circle
orange.forward(70)
orange.shape("circle")
orange.shapesize(3)
orange.fillcolor("orange")
red.left(90) # Turn "red" into a big red circle
red.forward(140)
red.shape("circle")
red.shapesize(3)
red.fillcolor("red")
# This variable holds the current state of the machine
state_num = 0
def advance_state_machine():
"""
Modify previous program then we can still realize
the rest of lights when they turn off.
Green, green and orange, orange, red. We number these states 0, 1, 2, 3.
With timer like exercise requisition.
"""
global state_num
if state_num == 0:
red.color("black")
orange.color("black")
green.color("green")
state_num = 1
wn.ontimer(advance_state_machine, 3000)
elif state_num == 1:
red.color("black")
orange.color("orange")
green.color("green")
state_num = 2
wn.ontimer(advance_state_machine, 1000)
elif state_num == 2:
red.color("black")
orange.color("orange")
green.color("black")
state_num = 3
wn.ontimer(advance_state_machine, 1000)
else:
red.color("red")
orange.color("black")
green.color("black") # Show up our green
state_num = 0
wn.ontimer(advance_state_machine, 2000)
advance_state_machine()
wn.exitonclick()
| [
"noreply@github.com"
] | simrit1.noreply@github.com |
d7200a1a5608907a5c84351d0b3f64523cdfda5a | 7b4bc42501e220d84a61a0e31da4609c6fec0939 | /lifelib/libraries/basiclife/BasicTerm_ME/Projection/__init__.py | 21991aa525a8f303b27cdccfc1dd97c3f2308998 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | odddkidout/lifelib | 14ec2ccce5b2e6b8ba2b3118300b1560b976712e | b787f43ddfd1d0eeac7e39f124ea49369c2445de | refs/heads/master | 2023-07-06T14:14:04.324826 | 2021-08-07T11:55:27 | 2021-08-07T11:55:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,637 | py | """The main Space in the :mod:`~basiclife.BasicTerm_M` model.
:mod:`~basiclife.BasicTerm_M.Projection` is the only Space defined
in the :mod:`~basiclife.BasicTerm_M` model, and it contains
all the logic and data used in the model.
.. rubric:: Parameters and References
(In all the sample code below,
the global variable ``Projection`` refers to the
:mod:`~basiclife.BasicTerm_M.Projection` Space.)
Attributes:
model_point_table: All model point data as a DataFrame.
The sample model point data was generated by
*generate_model_points.ipynb* included in the library.
By default, :func:`model_point` returns this :attr:`model_point_table`.
The DataFrame has columns labeled ``age_at_entry``,
``sex``, ``policy_term``, ``policy_count``
and ``sum_assured``.
Cells defined in :mod:`~basiclife.BasicTerm_M.Projection`
with the same names as these columns return
the corresponding columns.
(``policy_count`` is not used by default.)
.. code-block::
>>> Projection.model_poit_table
age_at_entry sex policy_term policy_count sum_assured
point_id
1 47 M 10 1 622000
2 29 M 20 1 752000
3 51 F 10 1 799000
4 32 F 20 1 422000
5 28 M 15 1 605000
... .. ... ... ...
9996 47 M 20 1 827000
9997 30 M 15 1 826000
9998 45 F 20 1 783000
9999 39 M 20 1 302000
10000 22 F 15 1 576000
[10000 rows x 5 columns]
The DataFrame is saved in the Excel file *model_point_table.xlsx*
placed in the model folder.
:attr:`model_point_table` is created by
Projection's `new_pandas`_ method,
so that the DataFrame is saved in the separate file.
The DataFrame has the injected attribute
of ``_mx_dataclident``::
>>> Projection.model_point_table._mx_dataclient
<PandasData path='model_point_table.xlsx' filetype='excel'>
.. seealso::
* :func:`model_point`
* :func:`age_at_entry`
* :func:`sex`
* :func:`policy_term`
* :func:`sum_assured`
disc_rate_ann: Annual discount rates by duration as a pandas Series.
.. code-block::
>>> Projection.disc_rate_ann
year
0 0.00000
1 0.00555
2 0.00684
3 0.00788
4 0.00866
146 0.03025
147 0.03033
148 0.03041
149 0.03049
150 0.03056
Name: disc_rate_ann, Length: 151, dtype: float64
The Series is saved in the Excel file *disc_rate_ann.xlsx*
placed in the model folder.
:attr:`disc_rate_ann` is created by
Projection's `new_pandas`_ method,
so that the Series is saved in the separate file.
The Series has the injected attribute
of ``_mx_dataclident``::
>>> Projection.disc_rate_ann._mx_dataclient
<PandasData path='disc_rate_ann.xlsx' filetype='excel'>
.. seealso::
* :func:`disc_rate_mth`
* :func:`disc_factors`
mort_table: Mortality table by age and duration as a DataFrame.
See *basic_term_sample.xlsx* included in this library
for how the sample mortality rates are created.
.. code-block::
>>> Projection.mort_table
0 1 2 3 4 5
Age
18 0.000231 0.000254 0.000280 0.000308 0.000338 0.000372
19 0.000235 0.000259 0.000285 0.000313 0.000345 0.000379
20 0.000240 0.000264 0.000290 0.000319 0.000351 0.000386
21 0.000245 0.000269 0.000296 0.000326 0.000359 0.000394
22 0.000250 0.000275 0.000303 0.000333 0.000367 0.000403
.. ... ... ... ... ... ...
116 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
117 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
118 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
119 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
120 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
[103 rows x 6 columns]
The DataFrame is saved in the Excel file *mort_table.xlsx*
placed in the model folder.
:attr:`mort_table` is created by
Projection's `new_pandas`_ method,
so that the DataFrame is saved in the separate file.
The DataFrame has the injected attribute
of ``_mx_dataclident``::
>>> Projection.mort_table._mx_dataclient
<PandasData path='mort_table.xlsx' filetype='excel'>
.. seealso::
* :func:`mort_rate`
* :func:`mort_rate_mth`
np: The `numpy`_ module.
pd: The `pandas`_ module.
.. _numpy:
https://numpy.org/
.. _pandas:
https://pandas.pydata.org/
.. _new_pandas:
https://docs.modelx.io/en/latest/reference/space/generated/modelx.core.space.UserSpace.new_pandas.html
"""
from modelx.serialize.jsonvalues import *
_formula = None
_bases = []
_allow_none = None
_spaces = []
# ---------------------------------------------------------------------------
# Cells
def model_point():
"""Target model points
Returns as a DataFrame the model points to be in the scope of calculation.
By default, this Cells returns the entire :attr:`model_point_table`
without change.
To select model points, change this formula so that this
Cells returns a DataFrame that contains only the selected model points.
Examples:
To select only the model point 1::
def model_point():
return model_point_table.loc[1:1]
To select model points whose ages at entry are 40 or greater::
def model_point():
return model_point_table[model_point_table["age_at_entry"] >= 40]
Note that the shape of the returned DataFrame must be the
same as the original DataFrame, i.e. :attr:`model_point_table`.
When selecting only one model point, make sure the
returned object is a DataFrame, not a Series, as seen in the example
above where ``model_point_table.loc[1:1]`` is specified
instead of ``model_point_table.loc[1]``.
Be careful not to accidentally change the original table.
"""
return model_point_table
def sum_assured():
"""The sum assured of the model points
The ``sum_assured`` column of the DataFrame returned by
:func:`model_point`.
"""
return model_point()["sum_assured"]
def age_at_entry():
"""The age at entry of the model points
The ``age_at_entry`` column of the DataFrame returned by
:func:`model_point`.
"""
return model_point()["age_at_entry"]
def sex():
"""The sex of the model points
The ``sex`` column of the DataFrame returned by
:func:`model_point`.
"""
return model_point()["sex"]
def proj_len():
"""Projection length in months
Projection length in months defined as::
12 * policy_term() + 1
.. seealso::
:func:`policy_term`
"""
return np.maximum(12 * policy_term() - duration_mth(0) + 1, 0)
max_proj_len = lambda: max(proj_len())
"""The max of all projection lengths"""
def disc_factors():
"""Discount factors.
Vector of the discount factors as a Numpy array. Used for calculating
the present values of cashflows.
.. seealso::
:func:`disc_rate_mth`
"""
return np.array(list((1 + disc_rate_mth()[t])**(-t) for t in range(max_proj_len())))
def net_cf(t):
"""Net cashflow
Net cashflow for the period from ``t`` to ``t+1`` defined as::
premiums(t) - claims(t) - expenses(t) - commissions(t)
.. seealso::
* :func:`premiums`
* :func:`claims`
* :func:`expenses`
* :func:`commissions`
"""
return premiums(t) - claims(t) - expenses(t) - commissions(t)
def premium_pp(t):
"""Monthly premium per policy
Monthly premium amount per policy defined as::
round((1 + loading_prem()) * net_premium(), 2)
.. seealso::
* :func:`loading_prem`
* :func:`net_premium_pp`
"""
return np.around((1 + loading_prem()) * net_premium_pp(), 2)
def claim_pp(t):
"""Claim per policy
The claim amount per plicy. Defaults to :func:`sum_assured`.
"""
return sum_assured()
def inflation_factor(t):
"""The inflation factor at time t
.. seealso::
* :func:`inflation_rate`
"""
return (1 + inflation_rate())**(t//12)
def premiums(t):
"""Premium income
Premium income during the period from ``t`` to ``t+1`` defined as::
premium_pp(t) * pols_if(t)
.. seealso::
* :func:`premium_pp`
* :func:`pols_if`
"""
return premium_pp(t) * pols_if_at(t, "BEF_DECR")
def duration(t):
"""Duration in force in years"""
return duration_mth(t) //12
def claims(t):
"""Claims
Claims during the period from ``t`` to ``t+1`` defined as::
claim_pp(t) * pols_death(t)
.. seealso::
* :func:`claim_pp`
* :func:`pols_death`
"""
return claim_pp(t) * pols_death(t)
def expenses(t):
"""Expenses
Expense during the period from ``t`` to ``t+1``.
At ``t=0``, it is defined as :func:`expense_acq`.
For ``t=1`` and onwards, defined as::
pols_if(t) * expense_maint()/12 * inflation_factor(t)
.. seealso::
* :func:`pols_if`
* :func:`expense_maint`
* :func:`inflation_factor`
"""
return expense_acq() * pols_new_biz(t) \
+ pols_if_at(t, "BEF_DECR") * expense_maint()/12 * inflation_factor(t)
def age(t):
"""The attained age at time t.
Defined as::
age_at_entry() + duration(t)
.. seealso::
* :func:`age_at_entry`
* :func:`duration`
"""
return age_at_entry() + duration(t)
def disc_rate_mth():
"""Monthly discount rate
Nummpy array of monthly discount rates from time 0 to :func:`max_proj_len` - 1
defined as::
(1 + disc_rate_ann)**(1/12) - 1
.. seealso::
:func:`disc_rate_ann`
"""
return np.array(list((1 + disc_rate_ann[t//12])**(1/12) - 1 for t in range(max_proj_len())))
def lapse_rate(t):
"""Lapse rate
By default, the lapse rate assumption is defined by duration as::
max(0.1 - 0.02 * duration(t), 0.02)
.. seealso::
:func:`duration`
"""
return np.maximum(0.1 - 0.02 * duration(t), 0.02)
def pv_pols_if():
"""Present value of policies in-force
The discounted sum of the number of in-force policies at each month.
It is used as the annuity factor for calculating :func:`net_premium_pp`.
"""
result = np.array(list(pols_if_at(t, "BEF_DECR") for t in range(max_proj_len()))).transpose()
return result @ disc_factors()[:max_proj_len()]
def pv_net_cf():
"""Present value of net cashflows.
Defined as::
pv_premiums() - pv_claims() - pv_expenses() - pv_commissions()
.. seealso::
* :func:`pv_premiums`
* :func:`pv_claims`
* :func:`pv_expenses`
* :func:`pv_commissions`
"""
return pv_premiums() - pv_claims() - pv_expenses() - pv_commissions()
def commissions(t):
"""Commissions
By default, 100% premiums for the first year, 0 otherwise.
.. seealso::
* :func:`premiums`
* :func:`duration`
"""
return (duration(t) == 0) * premiums(t)
def inflation_rate():
"""Inflation rate"""
return 0.01
def pols_death(t):
"""Number of death occurring at time t"""
return pols_if_at(t, "BEF_DECR") * mort_rate_mth(t)
def pols_if(t):
"""Number of Policies In-force
Number of in-force policies calculated recursively.
The initial value is read from :func:`pols_if_init`.
Subsequent values are defined recursively as::
pols_if(t-1) - pols_lapse(t-1) - pols_death(t-1) - pols_maturity(t)
.. seealso::
* :func:`pols_lapse`
* :func:`pols_death`
* :func:`pols_maturity`
"""
return pols_if_at(t, "BEF_MAT")
def pols_lapse(t):
"""Number of lapse occurring at time t
.. seealso::
* :func:`pols_if`
* :func:`lapse_rate`
"""
return pols_if_at(t, "BEF_DECR") * (1-(1 - lapse_rate(t))**(1/12))
def pv_claims():
"""Present value of claims
.. seealso::
* :func:`claims`
"""
cl = np.array(list(claims(t) for t in range(max_proj_len()))).transpose()
return cl @ disc_factors()[:max_proj_len()]
def pv_commissions():
"""Present value of commissions
.. seealso::
* :func:`expenses`
"""
result = np.array(list(commissions(t) for t in range(max_proj_len()))).transpose()
return result @ disc_factors()[:max_proj_len()]
def pv_expenses():
"""Present value of expenses
.. seealso::
* :func:`expenses`
"""
result = np.array(list(expenses(t) for t in range(max_proj_len()))).transpose()
return result @ disc_factors()[:max_proj_len()]
def pv_premiums():
"""Present value of premiums
.. seealso::
* :func:`premiums`
"""
result = np.array(list(premiums(t) for t in range(max_proj_len()))).transpose()
return result @ disc_factors()[:max_proj_len()]
def expense_acq():
"""Acquisition expense per policy
``300`` by default.
"""
return 300
def expense_maint():
"""Annual maintenance expence per policy
``60`` by default.
"""
return 60
def loading_prem():
"""Loading per premium
``0.5`` by default.
.. seealso::
* :func:`premium_pp`
"""
return 0.5
def mort_rate(t):
"""Mortality rate to be applied at time t
.. seealso::
* :attr:`mort_table`
* :func:`mort_rate_mth`
"""
# result = mort_table[str(min(5, duration(t)))][age(t)]
# result.index = model_point().index
# return result
mi = pd.MultiIndex.from_arrays([age(t), np.minimum(duration(t), 5)])
return mort_table_reindexed().reindex(
mi, fill_value=0).set_axis(model_point().index)
def mort_rate_mth(t):
"""Monthly mortality rate to be applied at time t
.. seealso::
* :attr:`mort_table`
* :func:`mort_rate`
"""
return 1-(1- mort_rate(t))**(1/12)
def result_pv():
"""Result table of present value of cashflows
.. seealso::
* :func:`pv_premiums`
* :func:`pv_claims`
* :func:`pv_expenses`
* :func:`pv_commissions`
* :func:`pv_net_cf`
"""
data = {
"PV Premiums": pv_premiums(),
"PV Claims": pv_claims(),
"PV Expenses": pv_expenses(),
"PV Commissions": pv_commissions(),
"PV Net Cashflow": pv_net_cf()
}
return pd.DataFrame(data, index=model_point().index)
def result_cf():
"""Result table of cashflows
.. seealso::
* :func:`premiums`
* :func:`claims`
* :func:`expenses`
* :func:`commissions`
* :func:`net_cf`
"""
t_len = range(max_proj_len())
data = {
"Premiums": [sum(premiums(t)) for t in t_len],
"Claims": [sum(claims(t)) for t in t_len],
"Expenses": [sum(expenses(t)) for t in t_len],
"Commissions": [sum(commissions(t)) for t in t_len],
"Net Cashflow": [sum(net_cf(t)) for t in t_len]
}
return pd.DataFrame(data, index=t_len)
def pols_if_init():
"""Initial Number of Policies In-force
Number of in-force policies at time 0 referenced from :func:`pols_if`.
Defaults to 1.
"""
return model_point()["policy_count"].where(duration_mth(0) > 0, other=0)
def policy_term():
"""The policy term of the model points.
The ``policy_term`` column of the DataFrame returned by
:func:`model_point`.
"""
return model_point()["policy_term"]
def net_premium_pp():
"""Net premium per policy
The net premium per policy is defined so that
the present value of net premiums equates to the present value of
claims::
pv_claims() / pv_pols_if()
.. seealso::
* :func:`pv_claims`
* :func:`pv_pols_if`
"""
# return pv_claims() / pv_pols_if()
with np.errstate(divide='ignore', invalid='ignore'):
return np.nan_to_num(pv_claims() / pv_pols_if())
def pols_maturity(t):
"""Number of maturing policies
The policy maturity occurs at ``t == 12 * policy_term()``,
after death and lapse during the last period::
pols_if(t-1) - pols_lapse(t-1) - pols_death(t-1)
otherwise ``0``.
"""
return (duration_mth(t) == policy_term() * 12) * pols_if_at(t, "BEF_MAT")
def duration_mth(t):
if t == 0:
return model_point()['duration_mth']
else:
return duration_mth(t-1) + 1
def pols_new_biz(t):
return model_point()['policy_count'].where(duration_mth(t) == 0, other=0)
def pols_if_at(t, timing):
"""
- t-1, "AFT_DECR", "BEF_MAT"
- t-1, "AFT_MAT", "BEF_NB"
- t-1, "AFT_NB", "BEF_DECR"
- t, "AFT_DECR", "BEF_MAT"
"""
if timing == "BEF_MAT":
if t == 0:
return pols_if_init()
else:
return pols_if_at(t-1, "BEF_DECR") - pols_lapse(t-1) - pols_death(t-1)
elif timing == "BEF_NB":
return pols_if_at(t, "BEF_MAT") - pols_maturity(t)
elif timing == "BEF_DECR":
return pols_if_at(t, "BEF_NB") + pols_new_biz(t)
else:
raise ValueError("invalid timing")
def mort_table_reindexed():
"""MultiIndexed Mortality Table
Returns a Series of mortlity rates reshaped from :attr:`mortality_table`.
The returned Series is indexed by age and duration.
"""
result = []
for col in mort_table.columns:
df = mort_table[[col]]
df = df.assign(Duration=int(col)).set_index('Duration', append=True)[col]
result.append(df)
return pd.concat(result)
def result_pols():
"""Result table of cashflows
.. seealso::
* :func:`premiums`
* :func:`claims`
* :func:`expenses`
* :func:`commissions`
* :func:`net_cf`
"""
t_len = range(max_proj_len())
data = {
"pols_if": [sum(pols_if(t)) for t in t_len],
"pols_maturity": [sum(pols_maturity(t)) for t in t_len],
"pols_new_biz": [sum(pols_new_biz(t)) for t in t_len],
"pols_death": [sum(pols_death(t)) for t in t_len],
"pols_lapse": [sum(pols_lapse(t)) for t in t_len]
}
return pd.DataFrame(data, index=t_len)
# ---------------------------------------------------------------------------
# References
disc_rate_ann = ("DataClient", 2234521425280)
mort_table = ("DataClient", 2234521427152)
np = ("Module", "numpy")
pd = ("Module", "pandas")
model_point_table = ("DataClient", 2234543164384) | [
"fumito.ham@gmail.com"
] | fumito.ham@gmail.com |
27d0ef92e9aba96fde4dacfa572ed41d33a84647 | aba00d6272765b71397cd3eba105fc79b3a346e0 | /Digisig/digsigvenv/lib/python3.6/site-packages/ufl/objects.py | f34c47c6d4cba9fedb8e0c36d0e65630f1ee7ba7 | [] | no_license | JosteinGj/School | a2c7cc090571b867637003fe6c647898ba9d8d24 | 3b5f29846e443b97f042241237dbda3208b20831 | refs/heads/master | 2023-05-02T11:07:29.517669 | 2021-04-26T09:04:57 | 2021-04-26T09:04:57 | 295,340,194 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,694 | py | # -*- coding: utf-8 -*-
"Utility objects for pretty syntax in user code."
# Copyright (C) 2008-2016 Martin Sandve Alnæs
#
# This file is part of UFL.
#
# UFL is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UFL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with UFL. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Anders Logg, 2008
# Modified by Kristian Oelgaard, 2009
from ufl.core.multiindex import indices
from ufl.cell import Cell
from ufl.measure import Measure
from ufl.measure import integral_type_to_measure_name
# Default indices
i, j, k, l = indices(4) # noqa: E741
p, q, r, s = indices(4)
for integral_type, measure_name in integral_type_to_measure_name.items():
globals()[measure_name] = Measure(integral_type)
# TODO: Firedrake hack, remove later
ds_tb = ds_b + ds_t # noqa: F821
# Default measure dX including both uncut and cut cells
dX = dx + dC # noqa: F821
# Create objects for builtin known cell types
vertex = Cell("vertex", 0)
interval = Cell("interval", 1)
triangle = Cell("triangle", 2)
tetrahedron = Cell("tetrahedron", 3)
quadrilateral = Cell("quadrilateral", 2)
hexahedron = Cell("hexahedron", 3)
# Facet is just a dummy declaration for RestrictedElement
facet = "facet"
| [
"jostein.gj@gmail.com"
] | jostein.gj@gmail.com |
8e838aceaeca95e269ad225ea7c65c28c335810c | e3030bb29b8c713daf360953e27b7752c6f9daa2 | /bubble_sort_test.py | 9c31b843dcadcdfa4fb66c1413e45d0460c932f7 | [] | no_license | vicvv/python_scripts | 4941316c92ec0bc7ebf6d7011071a2a727606de8 | ebe2858f2164085f75bdb1e832f894aa4ee5e729 | refs/heads/master | 2022-11-13T13:40:27.440346 | 2022-10-09T02:21:30 | 2022-10-09T02:21:30 | 217,444,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | list = [3,1,9,2,9]
print ("Original list: " , list)
changed = True
while changed:
print("New Loop!")
changed = False
for i in range(0,len(list) - 1):
print(list)
if list[i] > list[i+1]:
(list[i+1], list[i]) = (list[i], list[i+1])
changed = True
| [
"xolosno270@gmail.com"
] | xolosno270@gmail.com |
cfc80e9ee07487642639f480fc81954aaf00149f | e9261678450fee1b9f05b6b03972c62c79c2bc2c | /tensorflow_compression/python/ops/round_ops_test.py | 6e9ad7ae3c8393047da725a7d9e23868e3053372 | [
"Apache-2.0"
] | permissive | tensorflow/compression | 46aa22462eded425ea66d9f006da924d330e142f | 80d962f8f8532d9a3dbdaf0a97e249b7be7c29f6 | refs/heads/master | 2023-08-21T01:11:34.129210 | 2023-08-11T15:54:52 | 2023-08-11T15:55:37 | 133,584,278 | 818 | 296 | Apache-2.0 | 2022-11-10T19:48:07 | 2018-05-15T23:32:19 | Python | UTF-8 | Python | false | false | 3,791 | py | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for soft round."""
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_compression.python.ops import round_ops
class SoftRoundTest(tf.test.TestCase, parameterized.TestCase):
def test_soft_round_small_alpha_is_identity(self):
x = tf.linspace(-2., 2., 50)
y = round_ops.soft_round(x, alpha=1e-13)
self.assertAllClose(x, y)
def test_soft_round_large_alpha_is_round(self):
# We don't care what happens exactly near half-integer values:
for offset in range(-5, 5):
x = tf.linspace(offset - 0.499, offset + 0.499, 100)
y = round_ops.soft_round(x, alpha=2000.0)
self.assertAllClose(tf.round(x), y, atol=0.02)
def test_soft_inverse_round_small_alpha_is_identity(self):
x = tf.linspace(-2., 2., 50)
y = round_ops.soft_round_inverse(x, alpha=1e-13)
self.assertAllEqual(x, y)
def test_soft_inverse_is_actual_inverse(self):
x = tf.constant([-1.25, -0.75, 0.75, 1.25], dtype=tf.float32)
y = round_ops.soft_round(x, alpha=2.0)
x2 = round_ops.soft_round_inverse(y, alpha=2.0)
self.assertAllClose(x, x2)
def test_soft_round_inverse_large_alpha_is_ceil_minus_half(self):
# We don't care what happens exactly near integer values:
for offset in range(-5, 5):
x = tf.linspace(offset + 0.001, offset + 0.999, 100)
y = round_ops.soft_round_inverse(x, alpha=5000.0)
self.assertAllClose(tf.math.ceil(x) - 0.5, y, atol=0.001)
def test_conditional_mean_large_alpha_is_round(self):
# We don't care what happens exactly near integer values:
for offset in range(-5, 5):
x = tf.linspace(offset + 0.001, offset + 0.999, 100)
y = round_ops.soft_round_conditional_mean(x, alpha=5000.0)
self.assertAllClose(tf.math.round(x), y, atol=0.001)
@parameterized.parameters(0., 1e-6, 1e-2, 5., 1e6)
def test_soft_round_values_and_gradients_are_finite(self, alpha):
x = tf.linspace(0., 1., 11) # covers exact integers and half-integers
with tf.GradientTape() as tape:
tape.watch(x)
y = round_ops.soft_round(x, alpha=alpha)
dy = tape.gradient(y, x)
self.assertAllEqual(tf.math.is_finite(y), tf.ones(x.shape, dtype=bool))
self.assertAllEqual(tf.math.is_finite(dy), tf.ones(x.shape, dtype=bool))
@parameterized.parameters(0., 1e-6, 1e-2, 5., 1e6)
def test_soft_round_inverse_values_and_gradients_are_finite(self, alpha):
x = tf.linspace(-.5, .5, 11) # covers exact integers and half-integers
with tf.GradientTape() as tape:
tape.watch(x)
y = round_ops.soft_round_inverse(x, alpha=alpha)
dy = tape.gradient(y, x)
self.assertAllEqual(tf.math.is_finite(y), tf.ones(x.shape, dtype=bool))
is_finite = tf.math.is_finite(dy)
expected_finite = tf.ones(dy.shape, dtype=bool)
if alpha > 15:
# We allow non-finite values at 0 for large alphas, since the function
# simply is extremely steep there.
expected_finite = tf.tensor_scatter_nd_update(
expected_finite, [[5]], [is_finite[5]])
self.assertAllEqual(is_finite, expected_finite)
if __name__ == "__main__":
tf.test.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
6c587ba032a7af68306a1a2527df857e66313f3b | ddb1bb5b8ec5c07b750a305b6095aba553739a90 | /gym/data/demo.py | cd17e3f5d00c70296288634ef91fc80d1046ad5f | [] | no_license | geyang/decision-transformer | 7f41b3af8b6bd11125781c0cb458d26254d6de5a | 181d021040f5fee8241d8a642fe99aff24775fe9 | refs/heads/main | 2023-06-02T08:40:37.812378 | 2021-06-15T23:27:05 | 2021-06-15T23:27:05 | 377,266,002 | 0 | 0 | null | 2021-06-15T18:57:13 | 2021-06-15T18:57:12 | null | UTF-8 | Python | false | false | 37 | py | import dm_control
print(dm_control)
| [
"yangge1987@gmail.com"
] | yangge1987@gmail.com |
1dd995ecbf18a55f8656020e123f70bf4bbaca0e | 002ce67bd8b405ef097741165c16af3ef9c89b06 | /test1234_dev_3126/wsgi.py | 9db98b92f57c0333439ed44a259d3ec968dbcf55 | [] | no_license | crowdbotics-apps/test1234-dev-3126 | cd9bc5a3926546a92e9ab39de2726065b6854730 | 226847552ae1f8273977b005b395701a36dd3498 | refs/heads/master | 2023-03-30T00:23:04.619342 | 2020-04-20T04:40:19 | 2020-04-20T04:40:19 | 257,172,780 | 0 | 0 | null | 2021-04-09T18:25:46 | 2020-04-20T04:39:40 | Python | UTF-8 | Python | false | false | 411 | py | """
WSGI config for test1234_dev_3126 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test1234_dev_3126.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
764b96a217a92536ffe82d5123200c2afc5dfe5e | b853c16efafa74a9e1cb076008a17c9d85389fca | /HOME/笔记/guohao/6.py | 4f6d4ad03ced547e37db664de5c14298365081a8 | [] | no_license | Jason0221/backup | 14c48f1adb871b915d6f0ba49a26396e7cf0cd64 | dfd54cbcf7c27b0df6249104747e9a7ceffcb392 | refs/heads/master | 2020-06-03T13:14:39.751679 | 2017-05-15T08:50:38 | 2017-05-15T08:50:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | #!/usr/bin/python
#coding=utf-8
'''
6. 编写一个空洞文件,大小为1K
'''
f = open('empty','w')
f.seek(1024,0)
f.write(' ')
| [
"jasonlearning@outlook.com"
] | jasonlearning@outlook.com |
01aa1c46ef883e8b40c3d3a86289407768ff42aa | 297497957c531d81ba286bc91253fbbb78b4d8be | /testing/web-platform/tests/webdriver/tests/classic/new_session/create_alwaysMatch.py | 4c4610966a6447eda19413197a76f56166971d38 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | marco-c/gecko-dev-comments-removed | 7a9dd34045b07e6b22f0c636c0a836b9e639f9d3 | 61942784fb157763e65608e5a29b3729b0aa66fa | refs/heads/master | 2023-08-09T18:55:25.895853 | 2023-08-01T00:40:39 | 2023-08-01T00:40:39 | 211,297,481 | 0 | 0 | NOASSERTION | 2019-09-29T01:27:49 | 2019-09-27T10:44:24 | C++ | UTF-8 | Python | false | false | 468 | py |
import pytest
from .conftest import product, flatten
from tests.support.asserts import assert_success
from tests.classic.new_session.support.create import valid_data
@pytest.mark.parametrize("key,value", flatten(product(*item) for item in valid_data))
def test_valid(new_session, add_browser_capabilities, key, value):
response, _ = new_session({"capabilities": {
"alwaysMatch": add_browser_capabilities({key: value})}})
assert_success(response)
| [
"mcastelluccio@mozilla.com"
] | mcastelluccio@mozilla.com |
65a09fdbf361363f147f0d6bb1e6b93b054e8390 | 559e336386e02c0e5ebc7316424c3b4a41380d99 | /fullstack/statistics/distro_evaluation_iso.py | e36f45cff0a3455c400f50e31fb62509daeaf52b | [] | no_license | maranemil/howto | edf1e294544ef6980894dcd345d73160d8aa9620 | f6270ed0affcdbd899dd8a2ff9b0b98625e63a5a | refs/heads/master | 2023-09-05T03:02:18.526914 | 2023-09-04T11:27:52 | 2023-09-04T11:27:52 | 22,177,757 | 48 | 26 | null | 2022-10-17T19:43:31 | 2014-07-23T21:04:50 | Python | UTF-8 | Python | false | false | 4,269 | py |
# https://www.onlinegdb.com/online_python_compiler
# https://ideone.com/
# https://www.tutorialspoint.com/execute_python_online.php - support numpy pandas matplotlib math
# https://repl.it/repls/DimLoathsomeTwintext - support numpy pandas matplotlib math sklearn
import numpy as np
import pandas as pd
import matplotlib.pyplot as pt
from sklearn import linear_model
import math
# https://bigdata-madesimple.com/how-to-run-linear-regression-in-python-scikit-learn/
# https://www.programiz.com/python-programming/array#introduction
# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.assign.html
# https://towardsdatascience.com/building-a-deployable-ml-classifier-in-python-46ba55e1d720
# https://data-science-blog.com/blog/2017/10/17/lineare-regression-in-python-scitkit-learn/
# https://pandas.pydata.org/pandas-docs/stable/basics.html
# https://www.kaggle.com/riteshdash/linear-regression-numpy-pandas-sklearn-matplotlib
# https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.polyfit.html
# https://www.ritchieng.com/pandas-selecting-multiple-rows-and-columns/
# https://pythonhow.com/accessing-dataframe-columns-rows-and-cells/
# https://bigdata-madesimple.com/how-to-run-linear-regression-in-python-scikit-learn/
# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.set_index.html
# https://towardsdatascience.com/embedding-machine-learning-models-to-web-apps-part-1-6ab7b55ee428
# https://towardsdatascience.com/building-a-deployable-ml-classifier-in-python-46ba55e1d720
# https://dziganto.github.io/data%20science/online%20learning/python/scikit-learn/An-Introduction-To-Online-Machine-Learning/
# https://www.digitalocean.com/community/tutorials/how-to-build-a-machine-learning-classifier-in-python-with-scikit-learn
arDistro = [
("distro","Yes Features","no Features","Score Seconds","Total Score Yes + Time"),
("Manjaro Xfce minimal 0.8.9 x86 64", 5, 0, 10, 15),
("Pop os 18 04 amd64 intel 37", 5, 0, 9, 14),
("Lubuntu 16.04.5 desktop amd64", 4, 1, 9, 13),
("Ubuntu 14.04.5 LTS (Trusty Tahr)", 5, 0, 8, 13),
("Ubuntu 9.04 desktop i386", 3, 2, 10, 13),
("Elementary OS 0.4.1 stable", 5, 0, 7, 12),
("Fuduntu 2013 2 i686 LiteDVD", 3, 2, 9, 12),
("OpenSUSE 11 1 GNOME LiveCD i686", 3, 2, 9, 12),
("Trisquel mini 8.0 amd64", 3, 2, 9, 12),
("Ubuntu 10.04 desktop i386", 3, 2, 9, 12),
("Ubuntu 12.04.5 LTS (Precise Pangolin)", 3, 2, 9, 12),
("Ubuntu 16.04.5 LTS (Xenial Xerus)", 5, 0, 7, 12),
("Black Lab bll 8 unity x86 64", 4, 1, 7, 11),
("LinuxMint 19 xfce 64bit", 4, 1, 7, 11),
("Pure-OS 8 0 gnome 20180904 amd64", 5, 0, 6, 11),
("Ubuntu 18.04.1 desktop amd64", 5, 0, 6, 11),
("Ubuntu 18.10 (Cosmic Cuttlefish) amd64", 5, 0, 6, 11),
("Feren_OS_x64", 5, 0, 5, 10),
("FreeBSD 11 2 RELEASE amd64", 2, 3, 8, 10),
("Kali Linux light 2018 3 amd64", 3, 2, 7, 10),
("Kali linux light 2018 2 amd64", 3, 2, 7, 10),
("RaspberryPi Debian 2017 x86 stretch", 3, 2, 7, 10),
("Debian live 8.5.0 i386 xfce desktop", 2, 3, 7, 9),
("CentOS 7 x86 64 Minimal 1804", 2, 3, 6, 8),
("Debian live 9.5.0 amd64 xfce", 2, 3, 6, 8),
("Red-Hat rhel server 7 5 x86 64", 2, 3, 6, 8),
("Debian 7.11 0 i386 xfce CD 1", 3, 2, 4, 7),
("Fedora Workstation Live x86 64", 5, 0, 2, 7),
("Debian 9.5.0 amd64 xfce CD 1", 2, 3, 4, 6),
("Scientific Linux SL 7 5 x86 64 2018", 2, 3, 3, 5),
("Linux Mint 18 3 xfce 64bit", 4, 1, 0, 4)
]
#df = pd.DataFrame({'A': [1, 2, 3]})
df = pd.DataFrame(arDistro)
#df["Distro"] = np.zeros(df.shape[0])
print(df.head())
print ("--------------------------------------------------------------------")
print(df.columns)
print ("--------------------------------------------------------------------")
#print(df.describe())
#print "--------------------------------------------------------------------"
#print(df.tail(6))
print(df.index)
print(df.loc[3,1]) # row 3, col 1
print ("--------------------------------------------------------------------")
x = df.loc[1:22,1]
y = df.loc[1:22,2]
#print(x)
# eshape your data either using array.reshape(-1, 1)
# if your data has a single feature or array.reshape(1, -1)
from sklearn.linear_model import LinearRegression
#x = np.arange(10000).reshape(-1,1)
#y = np.arange(10000)+100*np.random.random_sample((10000,))
regr = LinearRegression()
print( regr.fit(x,y))
| [
"maran.emil@gmail.com"
] | maran.emil@gmail.com |
1d8083b55d082f72f892cff1f599d23d9b04c0e4 | f41309da5e0d26b24d974a009fa309a02fcaa20c | /aws_s3_policies/aws_s3_bucket_name_dns_compliance.py | 81c0bbb2ecb50dcda200b7d0ea8a5f0124f5eab4 | [
"Apache-2.0"
] | permissive | georgeSkoumas/panther-analysis | 2e1e87f83c6533cb6d62ecb62e3f61b2ff4b5ed4 | 30b21c270504bf7c84f99207c9c6c2f6110843ae | refs/heads/master | 2022-09-14T13:22:31.786275 | 2020-05-26T16:18:58 | 2020-05-26T16:18:58 | 267,569,230 | 1 | 0 | Apache-2.0 | 2020-05-28T11:15:05 | 2020-05-28T11:15:04 | null | UTF-8 | Python | false | false | 61 | py | def policy(resource):
return '.' not in resource['Name']
| [
"noreply@github.com"
] | georgeSkoumas.noreply@github.com |
3f81577375d63aefb453286aabffd215d6e539a2 | 688df3b704d072fc05f5d23a432f30037e864d35 | /sorting/quick_sort.py | 793378ba8787b7ad85a239a51eb46683ffb51e4d | [] | no_license | mfuentesg/problem-solving | 87d459cc532b81ec0882bd88b4fd0d4d62357824 | ffb91831a303ce8670c04580a10dcc4b7dd35dcb | refs/heads/master | 2023-03-19T00:20:27.245732 | 2021-03-07T19:23:43 | 2021-03-07T19:23:43 | 315,674,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | def quick_sort(items):
def swap(i, j):
if i == j:
return
temp = items[i]
items[i] = items[j]
items[j] = temp
def partition(left, right):
pivot = items[right]
pointer = left - 1
for i in range(left, right):
if items[i] < pivot:
pointer += 1
swap(i, pointer)
swap(pointer + 1, right)
return pointer + 1
def qs(left, right):
if left >= right:
return
pi = partition(left, right)
qs(left, pi - 1)
qs(pi + 1, right)
qs(0, len(items) - 1)
ll = [10, 80, 30, 90, 40, 50, 70]
quick_sort(ll)
print(ll)
| [
"marceloe.fuentes@gmail.com"
] | marceloe.fuentes@gmail.com |
b3ef8f8b0b2b71c3623b3d8b0ba12a381961635c | f7d47249f7e74bec51eacaa05f381674b92e3611 | /interview/19_多线程threading.Thread.py | 729ee1370c66c0b35d3a2915947de85fc312ba42 | [] | no_license | jinlijiang123/crawler | f96764bc5e7ae6f254e397189c4228336889a0d1 | cd3f16d04cc7c83b78d5a78afa7a57951399d490 | refs/heads/master | 2020-06-15T19:44:48.892953 | 2019-03-26T04:35:29 | 2019-03-26T04:35:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | # -*- coding: utf-8-*-
import random
import time,threading
def thread_run(urls):
print "Current %s is running..."%threading.current_thread().name
for url in urls:
print '%s......%s'%(threading.current_thread().name,url)
time.sleep(random.random())
print "Current %s ends..."%threading.current_thread().name
print '%s is running...'%threading.currentThread().name
#通过threading.Thread()方法创建实例
t1 = threading.Thread(target = thread_run,name = 'Thread1',args = (['url1','url2','url3'],))#注意传入的args是一个tuole
t2 = threading.Thread(target = thread_run,name = 'Thread2',args = (['url4','url5','url6'],))
t1.start()
t2.start()
t1.join()
t2.join()
print '%s ends...'%threading.currentThread().name
| [
"648672371@qq.com"
] | 648672371@qq.com |
def004f06e653303926927e3b105f622f2d9984f | 9ca9cad46f2358717394f39e2cfac2af4a2f5aca | /Week04/01_basics/01_basics_LGY.py | f3d3afb393caffb65ae5b7cb0cc3a3bf563069da | [] | no_license | Artinto/Python_and_AI_Study | ddfd165d1598914e99a125c3019a740a7791f6f6 | 953ff3780287825afe9ed5f9b45017359707d07a | refs/heads/main | 2023-05-05T15:42:25.963855 | 2021-05-24T12:24:31 | 2021-05-24T12:24:31 | 325,218,591 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,924 | py | #01_basics.py
import numpy as np # 행렬계산을 용이하게 해주는 라이브러리
import matplotlib.pyplot as plt # 시각적으로 볼 수 있도록 그래프를 만들어주는 라이브러리
x_data = [1.0, 2.0, 3.0] # 학습시킬 문제 x data
y_data = [2.0, 4.0, 6.0] # 학습시킬 답안 y data
# 함수실행시 실행되는 함수
# linear regression
# y_pred_val = forward(x_val) // line 실행시 x_val이 통과하는 forward함수
def forward(x):
return x * w
# l = loss(x_val, y_val) // line 실행시 x_val, y_val가 통과하는 loss함수
def loss(x, y): # Loss function
y_pred = forward(x) # forward(x_val) 실행
# y_pred = x_val * w
return (y_pred - y) * (y_pred - y) # (x_val * w - y_val)^2
w_list = []
mse_list = []
for w in np.arange(0.0, 4.1, 0.1): # Weight 값 : 0.0에서 4.1전까지 0.1씩 증가한 array만들기
# Print the weights and initialize the lost
print("w=", w)
l_sum = 0 #loss값들의 합 : x_data*w(예측값)과 y_data(실제값)과의 오차들의 합
for x_val, y_val in zip(x_data, y_data): # 각각의 학습데이터를 가져옴
y_pred_val = forward(x_val) # 학습데이터 x를 forward라는 함수에 넣어줌. (forward함수 실행)
# (return x * w) y_pred_val에는 x_val * W 값이 들어감.
l = loss(x_val, y_val) # 두개의 데이터가 loss함수를 거침. (loss함수 실행)
# return (y_pred - y) * (y_pred - y) # (x_val * w - y_val)^2
l_sum += l #loss값들의 합
print("\t", x_val, y_val, y_pred_val, l) # /t : tab
print("MSE=", l_sum / len(x_data)) # MSE값 print
w_list.append(w)
mse_list.append(l_sum / len(x_data))
print(w_list,"asdfasdf")
plt.plot(w_list, mse_list) # x축에 w_list, y축엔 mse_list을 나타내기
plt.ylabel('Loss') # x축 이름
plt.xlabel('w') # y축 이름
plt.show() # 그래프 그려라
| [
"noreply@github.com"
] | Artinto.noreply@github.com |
9f11a8a4e2e6119aaf6a992994dea49a32aebe0d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_myspace.py | 5aa68abf70dd96af4471607b3e717ee92bc74843 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py |
#calss header
class _MYSPACE():
def __init__(self,):
self.name = "MYSPACE"
self.definitions = [u'a social media website used especially for sharing music, music videos, and information about musical artists ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
1df2a55cfa73babf59f39d8d79273ee2f586ee42 | 20e3010608e40a6ec5ea56f69d122a62182e4bdb | /1 - Python-2/10 - unit tests/test_prime_numbers.py | 846a190019cc7098811faedf3ee6b8c99fe68923 | [] | no_license | LarisaOvchinnikova/Python | ee65eac221cd03563d60110118175692564c5b2d | 9cc86a260828662995dec59a6d69528f96d37e79 | refs/heads/master | 2021-08-22T21:41:02.351589 | 2021-05-25T18:37:09 | 2021-05-25T18:37:09 | 253,842,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | from prime_numbers import is_prime, fibonacci
def test_is_prime():
assert is_prime(2) == True
assert is_prime(3) == True
assert is_prime(-1) == False
assert is_prime(4) == False
def test_fibonacci():
assert fibonacci(1) == 1
| [
"larisaplantation@gmail.com"
] | larisaplantation@gmail.com |
c7480f1b1bec354bb6248ee2a68df0ae9e94dbc2 | 462c56e7454c97e0541588b9be66a4e216ea20fd | /453.minimum-moves-to-equal-array-elements.py | 2e6d96729301360e54f20d7f08acd741bd43ac3d | [] | no_license | LouisYLWang/leetcode_python | d5ac6289e33c5d027f248aa3e7dd66291354941c | 2ecaeed38178819480388b5742bc2ea12009ae16 | refs/heads/master | 2020-05-27T08:38:48.532000 | 2019-12-28T07:08:57 | 2019-12-28T07:08:57 | 188,549,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | #
# @lc app=leetcode id=453 lang=python
#
# [453] Minimum Moves to Equal Array Elements
#
class Solution:
def minMoves(self, nums):
minvalue = min(nums)
sum_ = sum(nums)
n = len(nums)
return sum_ - minvalue * n
| [
"louis.yl.wang@outlook.com"
] | louis.yl.wang@outlook.com |
492edc1fcfc189426a503a19e709f3bc7b819d30 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1245-042/sdB_pg_1245-042_lc.py | 91ce244322e683ec2e15ee1d01e0cc7f5bb72ef9 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[192.058083,-4.513167], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_pg_1245-042/sdB_pg_1245-042_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
3b06e98026136575d9506393d4ebba3fb1fec542 | c1e46fb0a7d8d86fc52dfb54016e17d22f81c2b4 | /eterea_quickLocators/scripts/scale_selected_locators.py | 0bb1ed925f68f58a2d567fb23c92ac9f4ee4cd19 | [] | no_license | Tilapiatsu/modo-tila_customconfig | 130ac84397f87048c87cd670f152df74eefd6b26 | 749d02fcb4d05ec0dbe6895e3d415751f181592e | refs/heads/master | 2021-01-17T12:38:20.269540 | 2018-10-22T08:37:32 | 2018-10-22T08:37:32 | 59,156,743 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,947 | py | #python
# scale_selected_locators.py
#
# Version 1.2 - By Cristobal Vila, 2013 - With the help of other members from Luxology Forums :-)
# Special thanks to MonkeybrotherJR
#
# To scale all channels in a selected Locators,
# no matter the kind of Locators and if there are some channels greyed
#
# www.etereaestudios.com
import lx
try:
scene_svc = lx.Service("sceneservice")
# Define my argument:
myargument = lx.args()[0]
# get selected layers
selected_layers = lx.evalN("query sceneservice selection ? all")
# drop selection so that we can work on one item at a time
lx.eval("select.drop item")
# create empty list to put locators in
locators = []
for item in selected_layers:
# select layer
scene_svc.select("item",str(item))
lx.eval('select.item {%s} set' % item)
# get item type
itemType = scene_svc.query("item.type")
if itemType == 'locator':
locators.append(item)
# Ask if our locator has a default or custom shape:
lx.eval('item.channel locator$drawShape ?')
# This gives a result (default / custom)
# Save that result into a variable:
locatorShape = lx.eval1('item.channel locator$drawShape ?')
if locatorShape == 'default':
# Change size for standard default locator:
lx.eval("item.channel locator$size ?*" + myargument)
elif locatorShape == 'custom':
# Ask which is actual shape:
lx.eval("item.channel locator$isShape ?")
# This gives a result (box, pyramid, plane…)
# Save that result into a variable:
originalShape = lx.eval("item.channel locator$isShape ?")
# Change size for standard default locator:
lx.eval("item.channel locator$size ?*" + myargument)
# Set shape to Box:
lx.eval("item.channel locator$isShape box")
# Change properties for XYZ channels, since now all are available:
lx.eval("item.channel locator$isSize.X ?*" + myargument)
lx.eval("item.channel locator$isSize.Y ?*" + myargument)
lx.eval("item.channel locator$isSize.Z ?*" + myargument)
# Set shape to Circle:
lx.eval("item.channel locator$isShape circle")
# Change properties for Radius, since now this is available:
lx.eval("item.channel locator$isRadius ?*" + myargument)
# Change shape back to the one saved inside our first variable:
lx.eval("item.channel locator$isShape %s" % originalShape)
# re-select the user selected layers
for item in selected_layers:
lx.eval('select.item {%s} add' % item)
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.exc_traceback.tb_lineno)) | [
"tilapiatsu@hotmail.fr"
] | tilapiatsu@hotmail.fr |
a5c615b39fbe692f7ddbc540eb34891cbe602283 | ad20495c8df427211dba51c93c507365f9fce319 | /init_topics.py | 3f60ed5eadea3515bdf1c8b64457c332722eb00d | [
"LicenseRef-scancode-public-domain"
] | permissive | tilejet/tilejet-server | 779398257c65138c906f3989c63e029dfe45587e | 7bd0caa18cde98a8fd80aeea6e06bbe8aa2fa1be | refs/heads/master | 2021-01-10T02:41:23.553939 | 2015-12-06T07:18:56 | 2015-12-06T07:19:59 | 43,448,267 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | from django.conf import settings
from geowatchdjango.utils import provision_geowatch_client
verbose = True
enabled = settings.GEOWATCH_ENABLED
if not enabled:
print "GeoWatch not enabled via settings"
topic_requests = settings.TILEJET_GEOWATCH_TOPIC_REQUESTS
topic_logs = settings.TILEJET_GEOWATCH_TOPIC_LOGS
topic_stats = settings.TILEJET_GEOWATCH_TOPIC_STATS
if enabled:
client = provision_geowatch_client()
topics = client.list_topics(limit=100, verbose=verbose)
print topics
client.create_topic(topic_requests)
client.create_topic(topic_logs)
client.create_topic(topic_stats)
print "Final Check..."
print client.list_topics(limit=100, verbose=verbose)
else:
print "Missing settings"
| [
"pjdufour.dev@gmail.com"
] | pjdufour.dev@gmail.com |
0e828d914306ac83778d23de820a991cf5e6c1a2 | d257a3c9c96b919d7ba8ffe4b674437aea76afc7 | /zips/script.vistatv-installer/extractor.py | 7eedae1c68e2b89e9861ab34f4674f6ac0de2fe4 | [] | no_license | biglad/eptvinstall | 457053791684127c91bb847262d91cd76e9e0a12 | 4eaa522a7d9edc068e7824576147be190897fb09 | refs/heads/master | 2022-05-06T05:00:23.291801 | 2022-04-14T23:17:11 | 2022-04-14T23:17:11 | 230,219,713 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py |
import xbmcgui
import utils
import os
import xbmc
KODIV = float(xbmc.getInfoLabel("System.BuildVersion")[:4])
if KODIV > 17:
import zfile as zipfile #FTG mod for Kodi 18
else:
import zipfile
def extract(packedfile, unpackpath, dname, dp = None):
if not dname:
dname = "CerebroTV"
if not dp:
dp = xbmcgui.DialogProgress()
dp.create(dname,"Adding New Files to System",'[COLOR=black].[/COLOR]', 'DO NOT TURN OFF! ')
dp.update(0)
zfile = zipfile.ZipFile(packedfile, 'r')
nItem = float(len(zfile.infolist()))
index = 0
for item in zfile.infolist():
index += 1
percent = int(index / nItem *100)
filename = item.filename
dp.update(percent)
try:
zfile.extract(item, unpackpath)
except Exception, e:
utils.log('Changelog error in extractAll')
utils.log(e)
zfile.close()
dp.close()
dp.create("DOING HOUSE KEEPING",'[COLOR=black].[/COLOR]','CLEANING UP', ' ')
xbmc.sleep(2500)
try: os.unlink(packedfile)
except: pass
xbmc.sleep(2500)
try: utils.DeleteFile(packedfile)
except: pass
try: os.remove(packedfile)
except: pass
dp.close() | [
"biglad@mgawow.co.uk"
] | biglad@mgawow.co.uk |
5d2c11912d326b2c3506e24f1f9f563969a58800 | d3239c2e5652378b17932553f80be1dbcbbdfdbf | /python/week14/p_00.py | eff8eaff11bcd268f2727c5834064ef89fe0e157 | [] | no_license | jorge-alvarado-revata/code_educa | 673a8b10817c24b3fc2c5792d216837c15a701aa | 241e1e3f43586e486b73cee8f385ab74dd99caf1 | refs/heads/main | 2022-12-25T21:37:36.988225 | 2020-10-13T18:59:38 | 2020-10-13T18:59:38 | 303,801,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | class Foo:
def __init__(self, name):
self._name = name
def get_name(self):
return self._name
def __str__(self):
return 'object Foo: {}'.format(self._name)
a = Foo('myfoo')
print(a)
print(a.get_name())
| [
"serviciosplusapp@gmail.com"
] | serviciosplusapp@gmail.com |
44183f2c6594b09ae874bf3617411f1646150c47 | 3bc8823224b335c4bffe10db525e109a44d1000a | /backend/manage.py | b5e28faeb76b7beace9e0f99570e872daa1a9833 | [] | no_license | crowdbotics-apps/msm-mobile-231109-d-15521 | 55068537db8f5e2e47c139a49eaabbad1dd5ca07 | b5c8274463bbad7e68e1bb7bf5d274c5a9fdf5c1 | refs/heads/master | 2023-01-14T05:02:35.599404 | 2020-11-23T09:52:34 | 2020-11-23T09:52:34 | 315,269,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'msm_mobile_231109_d_15521.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
a7dc79a7ddb9579d57776b334612349c8cf06707 | 5537eec7f43098d216d2b550678c8d10b2a26f09 | /venv/tower/lib/python2.7/site-packages/azure/mgmt/logic/models/run_workflow_parameters.py | c0fe65944511e5100dc38f57793055e7c3a88f22 | [] | no_license | wipro-sdx/Automation | f0ae1512b8d9d491d7bacec94c8906d06d696407 | a8c46217d0fbe51a71597b5db87cbe98ed19297a | refs/heads/master | 2021-07-08T11:09:05.314435 | 2018-05-02T07:18:54 | 2018-05-02T07:18:54 | 131,812,982 | 0 | 1 | null | 2020-07-23T23:22:33 | 2018-05-02T07:15:28 | Python | UTF-8 | Python | false | false | 1,043 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RunWorkflowParameters(Model):
"""RunWorkflowParameters.
:param name: Gets or sets the name of workflow run trigger.
:type name: str
:param outputs: Gets or sets the outputs of workflow run trigger.
:type outputs: object
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'outputs': {'key': 'outputs', 'type': 'object'},
}
def __init__(self, name=None, outputs=None):
self.name = name
self.outputs = outputs
| [
"admin@example.com"
] | admin@example.com |
85b27e1850e9ab3f662cc6e402360ba7f1a4fbbf | c2f85286d1e21fb803c35f6d996abc850b993e53 | /mystorage/views.py | bac4b48fe57ef1003f3a3b1a4254cadca2c86c66 | [] | no_license | devdw98/likelion_drf | dfeec1bf5ee153918807f99040c8c33240c4344c | 6d0171961bc93f4edd7998b7351034e0a936079d | refs/heads/master | 2020-07-29T20:38:29.041098 | 2019-10-27T07:22:53 | 2019-10-27T07:22:53 | 209,951,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,785 | py | from rest_framework import viewsets
from .models import Essay, Album, Files
from .serializers import EssaySerializer, AlbumSerializer, FilesSerializer
from rest_framework.filters import SearchFilter # 검색
from rest_framework.parsers import MultiPartParser,FormParser
from rest_framework.response import Response
from rest_framework import status
class PostViewSet(viewsets.ModelViewSet):
queryset = Essay.objects.all()
serializer_class = EssaySerializer
filter_backends = [SearchFilter]
search_fields = ('title', 'body')
def perform_create(self, serializer):
#직접 작성한 유저를 자동으로 저장
serializer.save(author=self.request.user)
#현재 request 낸 유저 == self.request.user >> 유저가 쓴 글만 나타남
def get_queryset(self):
qs = super().get_queryset()
if self.request.user.is_authenticated: #login
qs = qs.filter(author = self.request.user)
else: #not login
qs = qs.none
return qs
class ImgViewSet(viewsets.ModelViewSet):
queryset = Album.objects.all()
serializer_class = AlbumSerializer
class FileViewSet(viewsets.ModelViewSet):
queryset = Files.objects.all()
serializer_class = FilesSerializer
#parser_class 지정
parser_classes = (MultiPartParser, FormParser) #다양한 미디어 파일 형식을 수락할 수 있음
#create() Overriding - post()
def post(self, request, *args, **kwargs): #메소드 커스터마이징
serializer = FilesSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=HTTP_201_CREATED)
else:
return Response(serializer.error, status=HTTP_400_BAD_REQUEST) | [
"devdw98@gmail.com"
] | devdw98@gmail.com |
667ac49be64cde87e2044c48985e2d91cfce26bd | b9f0399cf7ea0a66fb76900f0c2ceac2d4859d34 | /venv/lib/python3.6/site-packages/pygments/lexers/csound.py | 2c9b1e97286cc2dd434d7e45ffb236c1d353fc40 | [] | no_license | huangtaosdt/QA-website-zsb | eea0fcd6a2415cf5c61f01f6692d39a544ed900a | 518470a3b37d6561797a38de42fe0c81d27c6ceb | refs/heads/master | 2021-09-20T15:19:44.559747 | 2018-08-11T03:53:17 | 2018-08-11T03:53:17 | 100,498,996 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,540 | py | # -*- coding: utf-8 -*-
"""
pygments.lexers.csound
~~~~~~~~~~~~~~~~~~~~~~
Lexers for CSound languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, default, include, using, words
from pygments.token import Comment, Keyword, Name, Number, Operator, Punctuation, \
String, Text
from pygments.lexers._csound_builtins import OPCODES
from pygments.lexers.html import HtmlLexer
from pygments.lexers.python import PythonLexer
from pygments.lexers.scripting import LuaLexer
__all__ = ['CsoundScoreLexer', 'CsoundOrchestraLexer', 'CsoundDocumentLexer']
newline = (r'((?:(?:;|//).*)*)(\n)', bygroups(Comment.Single, Text))
class CsoundLexer(RegexLexer):
# Subclasses must define a 'single-line string' state.
tokens = {
'whitespace': [
(r'[ \t]+', Text),
(r'\\\n', Text),
(r'/[*](.|\n)*?[*]/', Comment.Multiline)
],
'macro call': [
(r'(\$\w+\.?)(\()', bygroups(Comment.Preproc, Punctuation),
'function macro call'),
(r'\$\w+(\.|\b)', Comment.Preproc)
],
'function macro call': [
(r"((?:\\['\)]|[^'\)])+)(')", bygroups(Comment.Preproc, Punctuation)),
(r"([^'\)]+)(\))", bygroups(Comment.Preproc, Punctuation), '#pop')
],
'whitespace or macro call': [
include('whitespace'),
include('macro call')
],
'preprocessor directives': [
(r'#(e(nd(if)?|lse)|ifn?def|undef)\b|##', Comment.Preproc),
(r'#include\b', Comment.Preproc, 'include'),
(r'#[ \t]*define\b', Comment.Preproc, 'macro name'),
(r'@+[ \t]*\d*', Comment.Preproc)
],
'include': [
include('whitespace'),
(r'"', String, 'single-line string')
],
'macro name': [
include('whitespace'),
(r'(\w+)(\()', bygroups(Comment.Preproc, Text),
'function macro argument list'),
(r'\w+', Comment.Preproc, 'object macro definition after name')
],
'object macro definition after name': [
include('whitespace'),
(r'#', Punctuation, 'object macro replacement text')
],
'object macro replacement text': [
(r'(\\#|[^#])+', Comment.Preproc),
(r'#', Punctuation, '#pop:3')
],
'function macro argument list': [
(r"(\w+)(['#])", bygroups(Comment.Preproc, Punctuation)),
(r'(\w+)(\))', bygroups(Comment.Preproc, Punctuation),
'function macro definition after name')
],
'function macro definition after name': [
(r'[ \t]+', Text),
(r'#', Punctuation, 'function macro replacement text')
],
'function macro replacement text': [
(r'(\\#|[^#])+', Comment.Preproc),
(r'#', Punctuation, '#pop:4')
]
}
class CsoundScoreLexer(CsoundLexer):
"""
For `Csound <http://csound.github.io>`_ scores.
.. versionadded:: 2.1
"""
name = 'Csound Score'
aliases = ['csound-score', 'csound-sco']
filenames = ['*.sco']
tokens = {
'partial statement': [
include('preprocessor directives'),
(r'\d+e[+-]?\d+|(\d+\.\d*|\d*\.\d+)(e[+-]?\d+)?', Number.Float),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer),
(r'"', String, 'single-line string'),
(r'[+\-*/%^!=<>|&#~.]', Operator),
(r'[]()[]', Punctuation),
(r'\w+', Comment.Preproc)
],
'statement': [
include('whitespace or macro call'),
newline + ('#pop',),
include('partial statement')
],
'root': [
newline,
include('whitespace or macro call'),
(r'[{}]', Punctuation, 'statement'),
(r'[abefimq-tv-z]|[nN][pP]?', Keyword, 'statement')
],
'single-line string': [
(r'"', String, '#pop'),
(r'[^\\"]+', String)
]
}
class CsoundOrchestraLexer(CsoundLexer):
"""
For `Csound <http://csound.github.io>`_ orchestras.
.. versionadded:: 2.1
"""
name = 'Csound Orchestra'
aliases = ['csound', 'csound-orc']
filenames = ['*.orc']
user_defined_opcodes = set()
def opcode_name_callback(lexer, match):
opcode = match.group(0)
lexer.user_defined_opcodes.add(opcode)
yield match.start(), Name.Function, opcode
def name_callback(lexer, match):
name = match.group(0)
if re.match('p\d+$', name) or name in OPCODES:
yield match.start(), Name.Builtin, name
elif name in lexer.user_defined_opcodes:
yield match.start(), Name.Function, name
else:
nameMatch = re.search(r'^(g?[aikSw])(\w+)', name)
if nameMatch:
yield nameMatch.start(1), Keyword.Type, nameMatch.group(1)
yield nameMatch.start(2), Name, nameMatch.group(2)
else:
yield match.start(), Name, name
tokens = {
'label': [
(r'\b(\w+)(:)', bygroups(Name.Label, Punctuation))
],
'partial expression': [
include('preprocessor directives'),
(r'\b(0dbfs|k(r|smps)|nchnls(_i)?|sr)\b', Name.Variable.Global),
(r'\d+e[+-]?\d+|(\d+\.\d*|\d*\.\d+)(e[+-]?\d+)?', Number.Float),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer),
(r'"', String, 'single-line string'),
(r'\{\{', String, 'multi-line string'),
(r'[+\-*/%^!=&|<>#~¬]', Operator),
(r'[](),?:[]', Punctuation),
(words((
# Keywords
'do', 'else', 'elseif', 'endif', 'enduntil', 'fi', 'if', 'ithen', 'kthen',
'od', 'then', 'until', 'while',
# Opcodes that act as control structures
'return', 'timout'
), prefix=r'\b', suffix=r'\b'), Keyword),
(words(('goto', 'igoto', 'kgoto', 'rigoto', 'tigoto'),
prefix=r'\b', suffix=r'\b'), Keyword, 'goto label'),
(words(('cggoto', 'cigoto', 'cingoto', 'ckgoto', 'cngoto'),
prefix=r'\b', suffix=r'\b'), Keyword,
('goto label', 'goto expression')),
(words(('loop_ge', 'loop_gt', 'loop_le', 'loop_lt'),
prefix=r'\b', suffix=r'\b'), Keyword,
('goto label', 'goto expression', 'goto expression', 'goto expression')),
(r'\bscoreline(_i)?\b', Name.Builtin, 'scoreline opcode'),
(r'\bpyl?run[it]?\b', Name.Builtin, 'python opcode'),
(r'\blua_(exec|opdef)\b', Name.Builtin, 'lua opcode'),
(r'\b[a-zA-Z_]\w*\b', name_callback)
],
'expression': [
include('whitespace or macro call'),
newline + ('#pop',),
include('partial expression')
],
'root': [
newline,
include('whitespace or macro call'),
(r'\binstr\b', Keyword, ('instrument block', 'instrument name list')),
(r'\bopcode\b', Keyword, ('opcode block', 'opcode parameter list',
'opcode types', 'opcode types', 'opcode name')),
include('label'),
default('expression')
],
'instrument name list': [
include('whitespace or macro call'),
(r'\d+|\+?[a-zA-Z_]\w*', Name.Function),
(r',', Punctuation),
newline + ('#pop',)
],
'instrument block': [
newline,
include('whitespace or macro call'),
(r'\bendin\b', Keyword, '#pop'),
include('label'),
default('expression')
],
'opcode name': [
include('whitespace or macro call'),
(r'[a-zA-Z_]\w*', opcode_name_callback, '#pop')
],
'opcode types': [
include('whitespace or macro call'),
(r'0|[]afijkKoOpPStV[]+', Keyword.Type, '#pop'),
(r',', Punctuation)
],
'opcode parameter list': [
include('whitespace or macro call'),
newline + ('#pop',)
],
'opcode block': [
newline,
include('whitespace or macro call'),
(r'\bendop\b', Keyword, '#pop'),
include('label'),
default('expression')
],
'goto label': [
include('whitespace or macro call'),
(r'\w+', Name.Label, '#pop'),
default('#pop')
],
'goto expression': [
include('whitespace or macro call'),
(r',', Punctuation, '#pop'),
include('partial expression')
],
'single-line string': [
include('macro call'),
(r'"', String, '#pop'),
# From https://github.com/csound/csound/blob/develop/Opcodes/fout.c#L1405
(r'%\d*(\.\d+)?[cdhilouxX]', String.Interpol),
(r'%[!%nNrRtT]|[~^]|\\([\\aAbBnNrRtT"]|[0-7]{1,3})', String.Escape),
(r'[^\\"~$%\^\n]+', String),
(r'[\\"~$%\^\n]', String)
],
'multi-line string': [
(r'\}\}', String, '#pop'),
(r'[^}]+|\}(?!\})', String)
],
'scoreline opcode': [
include('whitespace or macro call'),
(r'\{\{', String, 'scoreline'),
default('#pop')
],
'scoreline': [
(r'\}\}', String, '#pop'),
(r'([^}]+)|\}(?!\})', using(CsoundScoreLexer))
],
'python opcode': [
include('whitespace or macro call'),
(r'\{\{', String, 'python'),
default('#pop')
],
'python': [
(r'\}\}', String, '#pop'),
(r'([^}]+)|\}(?!\})', using(PythonLexer))
],
'lua opcode': [
include('whitespace or macro call'),
(r'"', String, 'single-line string'),
(r'\{\{', String, 'lua'),
(r',', Punctuation),
default('#pop')
],
'lua': [
(r'\}\}', String, '#pop'),
(r'([^}]+)|\}(?!\})', using(LuaLexer))
]
}
class CsoundDocumentLexer(RegexLexer):
"""
For `Csound <http://csound.github.io>`_ documents.
.. versionadded:: 2.1
"""
name = 'Csound Document'
aliases = ['csound-document', 'csound-csd']
filenames = ['*.csd']
# These tokens are based on those in XmlLexer in pygments/lexers/html.py. Making
# CsoundDocumentLexer a subclass of XmlLexer rather than RegexLexer may seem like a
# better idea, since Csound Document files look like XML files. However, Csound
# Documents can contain Csound comments (preceded by //, for example) before and
# after the root element, unescaped bitwise AND & and less than < operators, etc. In
# other words, while Csound Document files look like XML files, they may not actually
# be XML files.
tokens = {
'root': [
newline,
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'[^<&;/]+', Text),
(r'<\s*CsInstruments', Name.Tag, ('orchestra', 'tag')),
(r'<\s*CsScore', Name.Tag, ('score', 'tag')),
(r'<\s*[hH][tT][mM][lL]', Name.Tag, ('HTML', 'tag')),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag)
],
'orchestra': [
(r'<\s*/\s*CsInstruments\s*>', Name.Tag, '#pop'),
(r'(.|\n)+?(?=<\s*/\s*CsInstruments\s*>)', using(CsoundOrchestraLexer))
],
'score': [
(r'<\s*/\s*CsScore\s*>', Name.Tag, '#pop'),
(r'(.|\n)+?(?=<\s*/\s*CsScore\s*>)', using(CsoundScoreLexer))
],
'HTML': [
(r'<\s*/\s*[hH][tT][mM][lL]\s*>', Name.Tag, '#pop'),
(r'(.|\n)+?(?=<\s*/\s*[hH][tT][mM][lL]\s*>)', using(HtmlLexer))
],
'tag': [
(r'\s+', Text),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop')
],
'attr': [
(r'\s+', Text),
(r'".*?"', String, '#pop'),
(r"'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop')
]
}
| [
"huangtaosdt@163.com"
] | huangtaosdt@163.com |
c7dcf5d5e7428481fbe074379f43dd32bb6f556c | bd02769c3f8cbd250c0c488c580852b28bda2ff4 | /venv/Lib/site-packages/instruments/tests/test_thorlabs/test_thorlabs_lcc25.py | 1865b5f51c9e0c1dcc19af63b7187aeb37a1620f | [] | no_license | LiamDroog/DG645-Interface | 01ef09083b3e08de186ddef00cd0cc3a774e468b | 5f81ea76797893f807bb2a8b4a967de1f8effba0 | refs/heads/master | 2023-06-30T01:39:42.142594 | 2021-08-04T15:05:01 | 2021-08-04T15:05:01 | 387,855,182 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,417 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module containing tests for the Thorlabs LCC25
"""
# IMPORTS ####################################################################
from __future__ import absolute_import
import pytest
import quantities as pq
import instruments as ik
from instruments.tests import expected_protocol, unit_eq
# TESTS ######################################################################
def test_lcc25_name():
with expected_protocol(
ik.thorlabs.LCC25,
[
"*idn?"
],
[
"*idn?",
"bloopbloop",
">"
],
sep="\r"
) as lcc:
name = lcc.name
assert name == "bloopbloop", "got {} expected bloopbloop".format(name)
def test_lcc25_frequency():
with expected_protocol(
ik.thorlabs.LCC25,
[
"freq?",
"freq=10.0"
],
[
"freq?",
"20",
">freq=10.0",
">"
],
sep="\r"
) as lcc:
unit_eq(lcc.frequency, pq.Quantity(20, "Hz"))
lcc.frequency = 10.0
def test_lcc25_frequency_lowlimit():
with pytest.raises(ValueError):
with expected_protocol(
ik.thorlabs.LCC25,
[
"freq=0.0"
],
[
"freq=0.0",
">"
],
sep="\r"
) as lcc:
lcc.frequency = 0.0
def test_lcc25_frequency_highlimit():
with pytest.raises(ValueError):
with expected_protocol(
ik.thorlabs.LCC25,
[
"freq=160.0"
],
[
"freq=160.0",
">"
],
sep="\r"
) as lcc:
lcc.frequency = 160.0
def test_lcc25_mode():
with expected_protocol(
ik.thorlabs.LCC25,
[
"mode?",
"mode=1"
],
[
"mode?",
"2",
">mode=1",
">"
],
sep="\r"
) as lcc:
assert lcc.mode == ik.thorlabs.LCC25.Mode.voltage2
lcc.mode = ik.thorlabs.LCC25.Mode.voltage1
def test_lcc25_mode_invalid():
with pytest.raises(ValueError):
with expected_protocol(
ik.thorlabs.LCC25,
[],
[]
) as lcc:
lcc.mode = "blo"
def test_lcc25_enable():
with expected_protocol(
ik.thorlabs.LCC25,
[
"enable?",
"enable=1"
],
[
"enable?",
"0",
">enable=1",
">"
],
sep="\r"
) as lcc:
assert lcc.enable is False
lcc.enable = True
def test_lcc25_enable_invalid_type():
with pytest.raises(TypeError):
with expected_protocol(
ik.thorlabs.LCC25,
[],
[]
) as lcc:
lcc.enable = "blo"
def test_lcc25_extern():
with expected_protocol(
ik.thorlabs.LCC25,
[
"extern?",
"extern=1"
],
[
"extern?",
"0",
">extern=1",
">"
],
sep="\r"
) as lcc:
assert lcc.extern is False
lcc.extern = True
def test_tc200_extern_invalid_type():
with pytest.raises(TypeError):
with expected_protocol(
ik.thorlabs.LCC25,
[],
[]
) as tc:
tc.extern = "blo"
def test_lcc25_remote():
with expected_protocol(
ik.thorlabs.LCC25,
[
"remote?",
"remote=1"
],
[
"remote?",
"0",
">remote=1",
">"
],
sep="\r"
) as lcc:
assert lcc.remote is False
lcc.remote = True
def test_tc200_remote_invalid_type():
with pytest.raises(TypeError):
with expected_protocol(
ik.thorlabs.LCC25,
[],
[]
) as tc:
tc.remote = "blo"
def test_lcc25_voltage1():
with expected_protocol(
ik.thorlabs.LCC25,
[
"volt1?",
"volt1=10.0"
],
[
"volt1?",
"20",
">volt1=10.0",
">"
],
sep="\r"
) as lcc:
unit_eq(lcc.voltage1, pq.Quantity(20, "V"))
lcc.voltage1 = 10.0
def test_check_cmd():
assert ik.thorlabs.thorlabs_utils.check_cmd("blo") == 1
assert ik.thorlabs.thorlabs_utils.check_cmd("CMD_NOT_DEFINED") == 0
assert ik.thorlabs.thorlabs_utils.check_cmd("CMD_ARG_INVALID") == 0
def test_lcc25_voltage2():
with expected_protocol(
ik.thorlabs.LCC25,
[
"volt2?",
"volt2=10.0",
],
[
"volt2?",
"20",
">volt2=10.0",
">"
],
sep="\r"
) as lcc:
unit_eq(lcc.voltage2, pq.Quantity(20, "V"))
lcc.voltage2 = 10.0
def test_lcc25_minvoltage():
with expected_protocol(
ik.thorlabs.LCC25,
[
"min?",
"min=10.0"
],
[
"min?",
"20",
">min=10.0",
">"
],
sep="\r"
) as lcc:
unit_eq(lcc.min_voltage, pq.Quantity(20, "V"))
lcc.min_voltage = 10.0
def test_lcc25_maxvoltage():
with expected_protocol(
ik.thorlabs.LCC25,
[
"max?",
"max=10.0"
],
[
"max?",
"20",
">max=10.0",
">"
],
sep="\r"
) as lcc:
unit_eq(lcc.max_voltage, pq.Quantity(20, "V"))
lcc.max_voltage = 10.0
def test_lcc25_dwell():
with expected_protocol(
ik.thorlabs.LCC25,
[
"dwell?",
"dwell=10"
],
[
"dwell?",
"20",
">dwell=10",
">"
],
sep="\r"
) as lcc:
unit_eq(lcc.dwell, pq.Quantity(20, "ms"))
lcc.dwell = 10
def test_lcc25_dwell_positive():
with pytest.raises(ValueError):
with expected_protocol(
ik.thorlabs.LCC25,
[
"dwell=-10"
],
[
"dwell=-10",
">"
],
sep="\r"
) as lcc:
lcc.dwell = -10
def test_lcc25_increment():
with expected_protocol(
ik.thorlabs.LCC25,
[
"increment?",
"increment=10.0"
],
[
"increment?",
"20",
">increment=10.0",
">"
],
sep="\r"
) as lcc:
unit_eq(lcc.increment, pq.Quantity(20, "V"))
lcc.increment = 10.0
def test_lcc25_increment_positive():
with pytest.raises(ValueError):
with expected_protocol(
ik.thorlabs.LCC25,
[
"increment=-10"
],
[
"increment=-10",
">"
],
sep="\r"
) as lcc:
lcc.increment = -10
def test_lcc25_default():
with expected_protocol(
ik.thorlabs.LCC25,
[
"default"
],
[
"default",
"1",
">"
],
sep="\r"
) as lcc:
lcc.default()
def test_lcc25_save():
with expected_protocol(
ik.thorlabs.LCC25,
[
"save"
],
[
"save",
"1",
">"
],
sep="\r"
) as lcc:
lcc.save()
def test_lcc25_set_settings():
with expected_protocol(
ik.thorlabs.LCC25,
[
"set=2"
],
[
"set=2",
"1",
">"
],
sep="\r"
) as lcc:
lcc.set_settings(2)
def test_lcc25_set_settings_invalid():
with pytest.raises(ValueError):
with expected_protocol(
ik.thorlabs.LCC25,
[],
[],
sep="\r"
) as lcc:
lcc.set_settings(5)
def test_lcc25_get_settings():
with expected_protocol(
ik.thorlabs.LCC25,
[
"get=2"
],
[
"get=2",
"1",
">"
],
sep="\r"
) as lcc:
lcc.get_settings(2)
def test_lcc25_get_settings_invalid():
with pytest.raises(ValueError):
with expected_protocol(
ik.thorlabs.LCC25,
[],
[],
sep="\r"
) as lcc:
lcc.get_settings(5)
def test_lcc25_test_mode():
with expected_protocol(
ik.thorlabs.LCC25,
[
"test"
],
[
"test",
"1",
">"
],
sep="\r"
) as lcc:
lcc.test_mode()
def test_lcc25_remote_invalid_type():
with pytest.raises(TypeError):
with expected_protocol(
ik.thorlabs.LCC25,
[],
[]
) as lcc:
lcc.remote = "blo"
def test_lcc25_extern_invalid_type():
with pytest.raises(TypeError):
with expected_protocol(
ik.thorlabs.LCC25,
[],
[]
) as lcc:
lcc.extern = "blo"
| [
"droog@ualberta.ca"
] | droog@ualberta.ca |
76798c14c341150c22b20258d37b3a778b75999d | 88023c9a62994e91291c67088156a2894cc26e9e | /corral/run/alert.py | 158db6152901180cc09d4f126540afa3dd86c8dc | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | toros-astro/corral | 41e9d0224d734c4268bf5161d472b3c0375842f0 | 75474b38ff366330d33644461a902d07374a5bbc | refs/heads/master | 2023-06-10T15:56:12.264725 | 2018-09-03T17:59:41 | 2018-09-03T17:59:41 | 44,282,921 | 6 | 5 | BSD-3-Clause | 2023-03-24T12:03:17 | 2015-10-14T23:56:40 | Python | UTF-8 | Python | false | false | 7,350 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Cabral, Juan; Sanchez, Bruno & Berois, Martín
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
# IMPORTS
# =============================================================================
import inspect
import datetime
import collections
import six
from .. import db, util, exceptions
from ..db.default_models import Alerted
from ..core import logger
from .base import Processor, Runner
conf = util.dimport("corral.conf", lazy=True)
# =============================================================================
# CONSTANTS
# =============================================================================
ALERT_TEMPLATE = (
"[{project_name}-ALERT @ {now}-15s] Check the object '{obj}'\n")
# =============================================================================
# ALERT CLASSES
# =============================================================================
class AlertRunner(Runner):
def validate_target(self, alert_cls):
if not (inspect.isclass(alert_cls) and issubclass(alert_cls, Alert)):
msg = "alert_cls '{}' must be subclass of 'corral.run.Alert'"
raise TypeError(msg.format(alert_cls))
def run(self):
alert_cls = self.target
logger.info("Executing alert '{}'".format(alert_cls))
with db.session_scope() as session, alert_cls(session) as alert:
for obj in alert.generate():
alert.validate(obj)
generator = alert.process(obj) or []
if not hasattr(generator, "__iter__"):
generator = (generator,)
for proc_obj in generator:
alert.validate(proc_obj)
alert.save(proc_obj)
logger.info("Done Alert '{}'".format(alert_cls))
class Alert(Processor):
runner_class = AlertRunner
model = None
conditions = None
ordering = None
auto_register = True
@classmethod
def retrieve_python_path(cls):
for import_string in conf.settings.ALERTS:
if cls == util.dimport(import_string):
return import_string
def setup(self):
for ep in self.alert_to:
ep.setup(self)
def teardown(self, type, value, traceback):
for ep in self.alert_to:
ep.teardown(type, value, traceback)
def generate(self):
if self.model is None or self.conditions is None:
clsname = type(self).__name__
raise NotImplementedError(
"'{}' subclass with a default generate must redefine "
"'model' and 'conditions' class-attributes".format(clsname))
query = self.session.query(self.model).filter(*self.conditions)
if self.auto_register:
query = self._filter_auto_registered(query)
else:
query = self.filter_registered(query)
if self.ordering is not None:
query = query.order_by(*self.ordering)
return query
def _filter_auto_registered(self, query):
filters = Alerted.alert_to_columns(type(self))
filters.update(Alerted.model_class_to_column(self.model))
alerteds = self.session.query(Alerted.model_ids).filter_by(**filters)
if alerteds.count():
grouped_id = collections.defaultdict(set)
for row in alerteds.all():
for k, v in six.iteritems(row[0]):
grouped_id[k].add(v)
exclude = []
for k, v in grouped_id.items():
exclude.append(getattr(self.model, k).in_(v))
query = query.filter(~db.and_(*exclude))
return query
def _auto_register(self, obj):
register = Alerted()
register.alert = type(self)
register.model = obj
register.created_at = datetime.datetime.utcnow()
return register
def filter_registered(self, query):
raise NotImplementedError()
def register(self, obj):
raise NotImplementedError()
def process(self, obj):
for ep in self.alert_to:
ep.process(obj)
if self.auto_register:
return self._auto_register(obj)
else:
return self.register(obj)
def render_alert(self, utcnow, endpoint, obj):
return ALERT_TEMPLATE.format(
project_name=conf.PACKAGE, now=utcnow.isoformat(), obj=obj)
# =============================================================================
# FUNCTIONS
# =============================================================================
def alerts_groups():
groups = set()
for cls in load_alerts():
groups.update(cls.get_groups())
return tuple(sorted(groups))
def load_alerts(groups=None):
alerts = []
logger.debug("Loading Alert Classes")
for import_string in conf.settings.ALERTS:
cls = util.dimport(import_string)
if not (inspect.isclass(cls) and issubclass(cls, Alert)):
msg = "STEP '{}' must be subclass of 'corral.run.Alert'"
raise exceptions.ImproperlyConfigured(msg.format(import_string))
if groups is None or set(cls.get_groups()).intersection(groups):
alerts.append(cls)
alerts.sort(key=lambda cls: cls.__name__)
return tuple(alerts)
def execute_alert(alert_cls, sync=False):
if not (inspect.isclass(alert_cls) and issubclass(alert_cls, Alert)):
msg = "alert_cls '{}' must be subclass of 'corral.run.Alert'"
raise TypeError(msg.format(alert_cls))
procs = []
alert_cls.class_setup()
runner = alert_cls.runner_class()
runner.setup(alert_cls)
if sync:
runner.run()
else:
db.engine.dispose()
runner.start()
procs.append(runner)
alert_cls.class_teardown()
return tuple(procs)
| [
"jbc.develop@gmail.com"
] | jbc.develop@gmail.com |
0066ff8bc3fd0ad05dd8a0a3ad12977be023ba21 | a447f89a13573328dc09ebc267a436220cf0b521 | /tests/fixtures/common/models/hl7_v3/ne2008/multicacheschemas/coct_mt110000_uv04.py | f3c8ed144287df0b710a7f9531482412c96ddf37 | [
"MIT"
] | permissive | ansFourtyTwo/xsdata | 8260e6dda8cf6e963ddf782b6a22a5dfc9192514 | 525be01d12e4d8abd792969adedcfafcee3fcf9b | refs/heads/master | 2022-11-13T13:25:18.370403 | 2020-07-11T17:54:53 | 2020-07-12T17:31:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,616 | py | from enum import Enum
from dataclasses import dataclass, field
from typing import List, Optional, Union
from tests.fixtures.common.models.hl7_v3.ne2008.coreschemas.datatypes_base import (
AdExplicit,
Bl,
Ce,
Cs,
Ii,
IvlTsExplicit,
Mo,
OnExplicit,
PnExplicit,
St,
TelExplicit,
)
from tests.fixtures.common.models.hl7_v3.ne2008.coreschemas.voc import (
ActClass,
ActMood,
EntityClass,
EntityClassOrganization,
EntityDeterminer,
NullFlavor,
ParticipationType,
RoleClassMutualRelationship,
RoleClassOntological,
RoleClassPartitive,
RoleClassPassive,
XAccommodationRequestorRole,
XDocumentEntrySubject,
XDocumentSubject,
XInformationRecipientRole,
XRoleClassAccommodationRequestor,
XRoleClassCoverage,
XRoleClassCoverageInvoice,
XRoleClassCredentialedEntity,
XRoleClassPayeePolicyRelationship,
)
__NAMESPACE__ = "urn:hl7-org:v3"
@dataclass
class CoctMt110000Uv04AccountHolderLanguage:
"""
:ivar realm_code:
:ivar type_id:
:ivar template_id:
:ivar language_code:
:ivar mode_code:
:ivar preference_ind:
:ivar null_flavor:
"""
class Meta:
name = "COCT_MT110000UV04.AccountHolderLanguage"
realm_code: List[Cs] = field(
default_factory=list,
metadata=dict(
name="realmCode",
type="Element",
namespace="urn:hl7-org:v3",
min_occurs=0,
max_occurs=9223372036854775807
)
)
type_id: Optional[Ii] = field(
default=None,
metadata=dict(
name="typeId",
type="Element",
namespace="urn:hl7-org:v3"
)
)
template_id: List[Ii] = field(
default_factory=list,
metadata=dict(
name="templateId",
type="Element",
namespace="urn:hl7-org:v3",
min_occurs=0,
max_occurs=9223372036854775807
)
)
language_code: Optional[Cs] = field(
default=None,
metadata=dict(
name="languageCode",
type="Element",
namespace="urn:hl7-org:v3",
required=True
)
)
mode_code: Optional[Cs] = field(
default=None,
metadata=dict(
name="modeCode",
type="Element",
namespace="urn:hl7-org:v3",
required=True
)
)
preference_ind: Optional[Bl] = field(
default=None,
metadata=dict(
name="preferenceInd",
type="Element",
namespace="urn:hl7-org:v3",
required=True
)
)
null_flavor: Optional[NullFlavor] = field(
default=None,
metadata=dict(
name="nullFlavor",
type="Attribute"
)
)
@dataclass
class CoctMt110000Uv04AccountHolderOrganization:
"""
:ivar realm_code:
:ivar type_id:
:ivar template_id:
:ivar id:
:ivar code:
:ivar name:
:ivar null_flavor:
:ivar class_code:
:ivar determiner_code:
"""
class Meta:
name = "COCT_MT110000UV04.AccountHolderOrganization"
realm_code: List[Cs] = field(
default_factory=list,
metadata=dict(
name="realmCode",
type="Element",
namespace="urn:hl7-org:v3",
min_occurs=0,
max_occurs=9223372036854775807
)
)
type_id: Optional[Ii] = field(
default=None,
metadata=dict(
name="typeId",
type="Element",
namespace="urn:hl7-org:v3"
)
)
template_id: List[Ii] = field(
default_factory=list,
metadata=dict(
name="templateId",
type="Element",
namespace="urn:hl7-org:v3",
min_occurs=0,
max_occurs=9223372036854775807
)
)
id: List[Ii] = field(
default_factory=list,
metadata=dict(
type="Element",
namespace="urn:hl7-org:v3",
min_occurs=0,
max_occurs=9223372036854775807
)
)
code: Optional[Ce] = field(
default=None,
metadata=dict(
type="Element",
namespace="urn:hl7-org:v3"
)
)
name: Optional[OnExplicit] = field(
default=None,
metadata=dict(
type="Element",
namespace="urn:hl7-org:v3"
)
)
null_flavor: Optional[NullFlavor] = field(
default=None,
metadata=dict(
name="nullFlavor",
type="Attribute"
)
)
class_code: Optional[EntityClassOrganization] = field(
default=None,
metadata=dict(
name="classCode",
type="Attribute",
required=True
)
)
determiner_code: EntityDeterminer = field(
init=False,
default=EntityDeterminer.INSTANCE,
metadata=dict(
name="determinerCode",
type="Attribute",
required=True
)
)
@dataclass
class CoctMt110000Uv04RelationshipRole:
"""
:ivar realm_code:
:ivar type_id:
:ivar template_id:
:ivar code:
:ivar null_flavor:
:ivar class_code:
"""
class Meta:
name = "COCT_MT110000UV04.RelationshipRole"
realm_code: List[Cs] = field(
default_factory=list,
metadata=dict(
name="realmCode",
type="Element",
namespace="urn:hl7-org:v3",
min_occurs=0,
max_occurs=9223372036854775807
)
)
type_id: Optional[Ii] = field(
default=None,
metadata=dict(
name="typeId",
type="Element",
namespace="urn:hl7-org:v3"
)
)
template_id: List[Ii] = field(
default_factory=list,
metadata=dict(
name="templateId",
type="Element",
namespace="urn:hl7-org:v3",
min_occurs=0,
max_occurs=9223372036854775807
)
)
code: Optional[Cs] = field(
default=None,
metadata=dict(
type="Element",
namespace="urn:hl7-org:v3"
)
)
null_flavor: Optional[NullFlavor] = field(
default=None,
metadata=dict(
name="nullFlavor",
type="Attribute"
)
)
class_code: Optional[Union[RoleClassMutualRelationship, RoleClassPassive, str, RoleClassOntological, RoleClassPartitive, "CoctMt110000Uv04RelationshipRole.Value"]] = field(
default=None,
metadata=dict(
name="classCode",
type="Attribute",
required=True,
pattern=r"[^\s]+"
)
)
class Value(Enum):
"""
:cvar ROL:
"""
ROL = "ROL"
@dataclass
class CoctMt110000Uv04AccountHolderPerson:
"""
:ivar realm_code:
:ivar type_id:
:ivar template_id:
:ivar id:
:ivar name:
:ivar addr:
:ivar as_relationship_role:
:ivar account_holder_language:
:ivar null_flavor:
:ivar class_code:
:ivar determiner_code:
"""
class Meta:
name = "COCT_MT110000UV04.AccountHolderPerson"
realm_code: List[Cs] = field(
default_factory=list,
metadata=dict(
name="realmCode",
type="Element",
namespace="urn:hl7-org:v3",
min_occurs=0,
max_occurs=9223372036854775807
)
)
type_id: Optional[Ii] = field(
default=None,
metadata=dict(
name="typeId",
type="Element",
namespace="urn:hl7-org:v3"
)
)
template_id: List[Ii] = field(
default_factory=list,
metadata=dict(
name="templateId",
type="Element",
namespace="urn:hl7-org:v3",
min_occurs=0,
max_occurs=9223372036854775807
)
)
id: List[Ii] = field(
default_factory=list,
metadata=dict(
type="Element",
namespace="urn:hl7-org:v3",
min_occurs=0,
max_occurs=9223372036854775807
)
)
name: Optional[PnExplicit] = field(
default=None,
metadata=dict(
type="Element",
namespace="urn:hl7-org:v3"
)
)
addr: Optional[AdExplicit] = field(
default=None,
metadata=dict(
type="Element",
namespace="urn:hl7-org:v3"
)
)
as_relationship_role: Optional[CoctMt110000Uv04RelationshipRole] = field(
default=None,
metadata=dict(
name="asRelationshipRole",
type="Element",
namespace="urn:hl7-org:v3",
required=True,
nillable=True
)
)
account_holder_language: List[CoctMt110000Uv04AccountHolderLanguage] = field(
default_factory=list,
metadata=dict(
name="accountHolderLanguage",
type="Element",
namespace="urn:hl7-org:v3",
min_occurs=0,
max_occurs=9223372036854775807,
nillable=True
)
)
null_flavor: Optional[NullFlavor] = field(
default=None,
metadata=dict(
name="nullFlavor",
type="Attribute"
)
)
class_code: EntityClass = field(
init=False,
default=EntityClass.PSN,
metadata=dict(
name="classCode",
type="Attribute",
required=True
)
)
determiner_code: EntityDeterminer = field(
init=False,
default=EntityDeterminer.INSTANCE,
metadata=dict(
name="determinerCode",
type="Attribute",
required=True
)
)
@dataclass
class CoctMt110000Uv04AccountHolder:
"""
:ivar realm_code:
:ivar type_id:
:ivar template_id:
:ivar id:
:ivar addr:
:ivar telecom:
:ivar held_account_holder_person:
:ivar held_account_holder_organization:
:ivar null_flavor:
:ivar class_code:
"""
class Meta:
name = "COCT_MT110000UV04.AccountHolder"
realm_code: List[Cs] = field(
default_factory=list,
metadata=dict(
name="realmCode",
type="Element",
namespace="urn:hl7-org:v3",
min_occurs=0,
max_occurs=9223372036854775807
)
)
type_id: Optional[Ii] = field(
default=None,
metadata=dict(
name="typeId",
type="Element",
namespace="urn:hl7-org:v3"
)
)
template_id: List[Ii] = field(
default_factory=list,
metadata=dict(
name="templateId",
type="Element",
namespace="urn:hl7-org:v3",
min_occurs=0,
max_occurs=9223372036854775807
)
)
id: List[Ii] = field(
default_factory=list,
metadata=dict(
type="Element",
namespace="urn:hl7-org:v3",
min_occurs=0,
max_occurs=9223372036854775807
)
)
addr: Optional[AdExplicit] = field(
default=None,
metadata=dict(
type="Element",
namespace="urn:hl7-org:v3"
)
)
telecom: Optional[TelExplicit] = field(
default=None,
metadata=dict(
type="Element",
namespace="urn:hl7-org:v3"
)
)
held_account_holder_person: Optional[CoctMt110000Uv04AccountHolderPerson] = field(
default=None,
metadata=dict(
name="heldAccountHolderPerson",
type="Element",
namespace="urn:hl7-org:v3",
nillable=True
)
)
held_account_holder_organization: Optional[CoctMt110000Uv04AccountHolderOrganization] = field(
default=None,
metadata=dict(
name="heldAccountHolderOrganization",
type="Element",
namespace="urn:hl7-org:v3",
nillable=True
)
)
null_flavor: Optional[NullFlavor] = field(
default=None,
metadata=dict(
name="nullFlavor",
type="Attribute"
)
)
class_code: Union[RoleClassMutualRelationship, RoleClassPassive, str, RoleClassOntological, RoleClassPartitive, "CoctMt110000Uv04AccountHolder.Value", XAccommodationRequestorRole, XDocumentEntrySubject, XDocumentSubject, XInformationRecipientRole, XRoleClassAccommodationRequestor, XRoleClassCoverage, XRoleClassCoverageInvoice, XRoleClassCredentialedEntity, XRoleClassPayeePolicyRelationship] = field(
init=False,
default=RoleClassPassive.HLD,
metadata=dict(
name="classCode",
type="Attribute",
required=True,
pattern=r"[^\s]+"
)
)
class Value(Enum):
"""
:cvar ROL:
"""
ROL = "ROL"
@dataclass
class CoctMt110000Uv04Holder:
"""
:ivar realm_code:
:ivar type_id:
:ivar template_id:
:ivar time:
:ivar account_holder:
:ivar null_flavor:
:ivar type_code:
"""
class Meta:
name = "COCT_MT110000UV04.Holder"
realm_code: List[Cs] = field(
default_factory=list,
metadata=dict(
name="realmCode",
type="Element",
namespace="urn:hl7-org:v3",
min_occurs=0,
max_occurs=9223372036854775807
)
)
type_id: Optional[Ii] = field(
default=None,
metadata=dict(
name="typeId",
type="Element",
namespace="urn:hl7-org:v3"
)
)
template_id: List[Ii] = field(
default_factory=list,
metadata=dict(
name="templateId",
type="Element",
namespace="urn:hl7-org:v3",
min_occurs=0,
max_occurs=9223372036854775807
)
)
time: Optional[IvlTsExplicit] = field(
default=None,
metadata=dict(
type="Element",
namespace="urn:hl7-org:v3"
)
)
account_holder: Optional[CoctMt110000Uv04AccountHolder] = field(
default=None,
metadata=dict(
name="accountHolder",
type="Element",
namespace="urn:hl7-org:v3",
required=True,
nillable=True
)
)
null_flavor: Optional[NullFlavor] = field(
default=None,
metadata=dict(
name="nullFlavor",
type="Attribute"
)
)
type_code: ParticipationType = field(
init=False,
default=ParticipationType.HLD,
metadata=dict(
name="typeCode",
type="Attribute",
required=True
)
)
@dataclass
class CoctMt110000Uv04Account:
"""
:ivar realm_code:
:ivar type_id:
:ivar template_id:
:ivar id:
:ivar code:
:ivar title:
:ivar effective_time:
:ivar balance_amt:
:ivar holder:
:ivar null_flavor:
:ivar class_code:
:ivar mood_code:
"""
class Meta:
name = "COCT_MT110000UV04.Account"
realm_code: List[Cs] = field(
default_factory=list,
metadata=dict(
name="realmCode",
type="Element",
namespace="urn:hl7-org:v3",
min_occurs=0,
max_occurs=9223372036854775807
)
)
type_id: Optional[Ii] = field(
default=None,
metadata=dict(
name="typeId",
type="Element",
namespace="urn:hl7-org:v3"
)
)
template_id: List[Ii] = field(
default_factory=list,
metadata=dict(
name="templateId",
type="Element",
namespace="urn:hl7-org:v3",
min_occurs=0,
max_occurs=9223372036854775807
)
)
id: Optional[Ii] = field(
default=None,
metadata=dict(
type="Element",
namespace="urn:hl7-org:v3",
required=True
)
)
code: Optional[Cs] = field(
default=None,
metadata=dict(
type="Element",
namespace="urn:hl7-org:v3"
)
)
title: Optional[St] = field(
default=None,
metadata=dict(
type="Element",
namespace="urn:hl7-org:v3"
)
)
effective_time: Optional[IvlTsExplicit] = field(
default=None,
metadata=dict(
name="effectiveTime",
type="Element",
namespace="urn:hl7-org:v3"
)
)
balance_amt: Optional[Mo] = field(
default=None,
metadata=dict(
name="balanceAmt",
type="Element",
namespace="urn:hl7-org:v3"
)
)
holder: Optional[CoctMt110000Uv04Holder] = field(
default=None,
metadata=dict(
type="Element",
namespace="urn:hl7-org:v3",
nillable=True
)
)
null_flavor: Optional[NullFlavor] = field(
default=None,
metadata=dict(
name="nullFlavor",
type="Attribute"
)
)
class_code: ActClass = field(
init=False,
default=ActClass.ACCT,
metadata=dict(
name="classCode",
type="Attribute",
required=True
)
)
mood_code: ActMood = field(
init=False,
default=ActMood.EVN,
metadata=dict(
name="moodCode",
type="Attribute",
required=True
)
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
9880d0bf8f032a96170410d474dea6707d70f473 | 15102eb2c657a296eb00821dc378225b79fbc17e | /Homework/venv/Lib/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/cache.py | 0fdcffdef8a23b2e8997bf89d25ee3f7bc7c5fe9 | [] | no_license | yuju13488/pyworkspace | 746446b3573fa6241d979b205e964e7d52af009b | 0c77836185237450ee446542e6ff3856c7cd7de1 | refs/heads/master | 2020-08-02T03:56:55.577735 | 2019-10-04T05:50:56 | 2019-10-04T05:50:56 | 211,226,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,690 | py | """Cache Management
"""
import errno
import hashlib
import logging
import os
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.download import path_to_url
from pip._internal.models.link import Link
from pip._internal.utils.compat import expanduser
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.wheel import InvalidWheelFilename, Wheel
if MYPY_CHECK_RUNNING:
from typing import Optional, Set, List, Any # noqa: F401
from pip._internal.index import FormatControl # noqa: F401
logger = logging.getLogger(__name__)
class Cache(object):
"""An abstract class_hw - provides cache directories for data from links
:param cache_dir: The root of the cache.
:param format_control: An object of FormatControl class_hw to limit
binaries being read from the cache.
:param allowed_formats: which formats of files the cache should store.
('binary' and 'source' are the only allowed values)
"""
def __init__(self, cache_dir, format_control, allowed_formats):
# type: (str, FormatControl, Set[str]) -> None
super(Cache, self).__init__()
self.cache_dir = expanduser(cache_dir) if cache_dir else None
self.format_control = format_control
self.allowed_formats = allowed_formats
_valid_formats = {"source", "binary"}
assert self.allowed_formats.union(_valid_formats) == _valid_formats
def _get_cache_path_parts(self, link):
# type: (Link) -> List[str]
"""Get parts of part that must be os.path.joined with cache_dir
"""
# We want to generate an url to use as our cache key, we don't want to
# just re-use the URL because it might have other items in the fragment
# and we don't care about those.
key_parts = [link.url_without_fragment]
if link.hash_name is not None and link.hash is not None:
key_parts.append("=".join([link.hash_name, link.hash]))
key_url = "#".join(key_parts)
# Encode our key url with sha224, we'll use this because it has similar
# security properties to sha256, but with a shorter total output (and
# thus less secure). However the differences don't make a lot of
# difference for our use case here.
hashed = hashlib.sha224(key_url.encode()).hexdigest()
# We want to nest the directories some to prevent having a ton of top
# level directories where we might run out of sub directories on some
# FS.
parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
return parts
def _get_candidates(self, link, package_name):
# type: (Link, Optional[str]) -> List[Any]
can_not_cache = (
not self.cache_dir or
not package_name or
not link
)
if can_not_cache:
return []
canonical_name = canonicalize_name(package_name)
formats = self.format_control.get_allowed_formats(
canonical_name
)
if not self.allowed_formats.intersection(formats):
return []
root = self.get_path_for_link(link)
try:
return os.listdir(root)
except OSError as err:
if err.errno in {errno.ENOENT, errno.ENOTDIR}:
return []
raise
def get_path_for_link(self, link):
# type: (Link) -> str
"""Return a directory to store cached items in for link.
"""
raise NotImplementedError()
def get(self, link, package_name):
# type: (Link, Optional[str]) -> Link
"""Returns a link to a cached item if it exists, otherwise returns the
passed link.
"""
raise NotImplementedError()
def _link_for_candidate(self, link, candidate):
# type: (Link, str) -> Link
root = self.get_path_for_link(link)
path = os.path.join(root, candidate)
return Link(path_to_url(path))
def cleanup(self):
# type: () -> None
pass
class SimpleWheelCache(Cache):
"""A cache of wheels for future installs.
"""
def __init__(self, cache_dir, format_control):
# type: (str, FormatControl) -> None
super(SimpleWheelCache, self).__init__(
cache_dir, format_control, {"binary"}
)
def get_path_for_link(self, link):
# type: (Link) -> str
"""Return a directory to store cached wheels for link
Because there are M wheels for any one sdist, we provide a directory
to cache them in, and then consult that directory when looking up
cache hits.
We only insert things into the cache if they have plausible version
numbers, so that we don't contaminate the cache with things that were
not unique. E.g. ./package might have dozens of installs done for it
and build a version of 0.0...and if we built and cached a wheel, we'd
end up using the same wheel even if the source has been edited.
:param link: The link of the sdist for which this will cache wheels.
"""
parts = self._get_cache_path_parts(link)
# Store wheels within the root cache_dir
return os.path.join(self.cache_dir, "wheels", *parts)
def get(self, link, package_name):
# type: (Link, Optional[str]) -> Link
candidates = []
for wheel_name in self._get_candidates(link, package_name):
try:
wheel = Wheel(wheel_name)
except InvalidWheelFilename:
continue
if not wheel.supported():
# Built for a different python/arch/etc
continue
candidates.append((wheel.support_index_min(), wheel_name))
if not candidates:
return link
return self._link_for_candidate(link, min(candidates)[1])
class EphemWheelCache(SimpleWheelCache):
"""A SimpleWheelCache that creates it's own temporary cache directory
"""
def __init__(self, format_control):
# type: (FormatControl) -> None
self._temp_dir = TempDirectory(kind="ephem-wheel-cache")
self._temp_dir.create()
super(EphemWheelCache, self).__init__(
self._temp_dir.path, format_control
)
def cleanup(self):
# type: () -> None
self._temp_dir.cleanup()
class WheelCache(Cache):
"""Wraps EphemWheelCache and SimpleWheelCache into a single Cache
This Cache allows for gracefully degradation, using the ephem wheel cache
when a certain link is not found in the simple wheel cache first.
"""
def __init__(self, cache_dir, format_control):
# type: (str, FormatControl) -> None
super(WheelCache, self).__init__(
cache_dir, format_control, {'binary'}
)
self._wheel_cache = SimpleWheelCache(cache_dir, format_control)
self._ephem_cache = EphemWheelCache(format_control)
def get_path_for_link(self, link):
# type: (Link) -> str
return self._wheel_cache.get_path_for_link(link)
def get_ephem_path_for_link(self, link):
# type: (Link) -> str
return self._ephem_cache.get_path_for_link(link)
def get(self, link, package_name):
# type: (Link, Optional[str]) -> Link
retval = self._wheel_cache.get(link, package_name)
if retval is link:
retval = self._ephem_cache.get(link, package_name)
return retval
def cleanup(self):
# type: () -> None
self._wheel_cache.cleanup()
self._ephem_cache.cleanup()
| [
"shiyoo123@hotmail.com"
] | shiyoo123@hotmail.com |
9bf8399b0d96619d46fa8d08f62c9db0def0eaee | f842b77b50015456f1396b71e527180d48a2eadc | /demo/libdemo/write_names.py | 860bf2382a5dc182f36c923677e879b906d1374a | [] | no_license | srikanthpragada/PYTHON_16_JUNE_2020 | 75e4d2b42607e31e26d6a5df3ea0065df941c750 | 50c2d0c355eef94ed93c4e124796fe3add7a60d9 | refs/heads/master | 2022-11-24T12:42:28.594097 | 2020-07-29T02:28:21 | 2020-07-29T02:28:21 | 273,117,380 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | with open("names.txt", "wt") as f:
while True:
name = input("Enter name [end to stop] : ")
if name == 'end':
break
f.write(name + "\n")
| [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
21675dfebc1da3e403944ba6c29730499d64b6c6 | d9dbeafdcbe65f1121acb6f3d2ea789c33dc9edf | /data_structures/binary_tree.py | f1744e98626cbacd893c6a2a5cdc96cc357f4f4f | [] | no_license | Ethic41/LearningAlgorithms | 2227547064f0027a265e62a48d12923013cf2511 | 614fcf534344e643cda4867c0e45be507ebe46b8 | refs/heads/master | 2022-11-28T11:57:56.899894 | 2022-11-24T12:28:14 | 2022-11-24T12:28:14 | 192,438,021 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,833 | py | # Author: Dahir Muhammad Dahir
# Date: 04-July-2019 10:14 PM
# About: Binary Tree is a non-linear, finite,
# connected, undirected graph with vertex
# or node of degree !> 3, data structure.
# this is my first implementation of a
# binary tree, note that due to it's varying
# nature, this is not the only possible implementation
from collections import deque
from tree import Tree # type: ignore
class BinaryTree(Tree):
def __init__(self):
super().__init__()
def add_node(self, node):
"""Add node to the first found leaf"""
if self.root is None:
current_node = self.root
else:
current_node = self.find_leaf()
if current_node is None:
self.root = node
elif current_node.left is None:
current_node.left = node
current_node.left.parent = current_node
elif current_node.right is None:
current_node.right = node
current_node.right.parent = current_node
def find_leaf(self):
que = deque()
que.append(self.root)
current_node = None
while len(que) > 0:
current_node = que.popleft()
if current_node.left is not None:
que.append(current_node.left)
else:
return current_node
if current_node.right is not None:
que.append(current_node.right)
else:
return current_node
return current_node
def get_size_dft(self):
current_node = self.root
previous_node = None
node_count = 0
while current_node is not None:
if previous_node == current_node.parent: # we are at a new node
node_count += 1
if current_node.left is not None:
next_node = current_node.left
elif current_node.right is not None:
next_node = current_node.right
else:
next_node = current_node.parent
elif previous_node == current_node.left:
if current_node.right is not None:
next_node = current_node.right
else:
next_node = current_node.parent
else:
next_node = current_node.parent
previous_node = current_node
current_node = next_node
return node_count
def breadth_first_traverse(self):
que = deque()
if self.root is not None:
que.append(self.root)
while len(que) > 0:
current_node = que.popleft()
if current_node.left is not None:
que.append(current_node.left)
if current_node.right is not None:
que.append(current_node.right)
def add_internal_node(self, internal_node, node):
pass
def print_tree_dfs(self):
current_node = self.root
previous_node = None
while current_node is not None:
if previous_node == current_node.parent:
print("{}=>".format(current_node.data), end="")
if current_node.left is not None:
next_node = current_node.left
elif current_node.right is not None:
next_node = current_node.right
else:
next_node = current_node.parent
elif previous_node == current_node.left:
if current_node.right is not None:
next_node = current_node.right
else:
next_node = current_node.parent
else:
next_node = current_node.parent
previous_node = current_node
current_node = next_node
print(None)
| [
"dahirmuhammad3@gmail.com"
] | dahirmuhammad3@gmail.com |
ac6d39306311581386b425c673286cf0df918c6c | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/intentions/addMissingVarargsInGoogleDocString.py | 4ac878d317527059d445618173bfd0bbe605f9c5 | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 72 | py | def <caret>f(x, *args, **kwargs):
"""
Args:
x: foo
""" | [
"mikhail.golubev@jetbrains.com"
] | mikhail.golubev@jetbrains.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.