text stringlengths 8 6.05M |
|---|
import torch
from torch_geometric.datasets.molecule_net import x_map as x_map_default
from torch_geometric.datasets.molecule_net import e_map as e_map_default
def get_atom_feature_dims():
allowable_features = x_map_default
return list(map(len, [
allowable_features['atomic_num'],
allowable_features['chirality'],
allowable_features['degree'],
allowable_features['formal_charge'],
allowable_features['num_hs'],
allowable_features['num_radical_electrons'],
allowable_features['hybridization'],
allowable_features['is_aromatic'],
allowable_features['is_in_ring']
]))
def get_bond_feature_dims():
allowable_features = e_map_default
return list(map(len, [
allowable_features['bond_type'],
allowable_features['stereo'],
allowable_features['is_conjugated']
]))
class AtomEncoder(torch.nn.Module):
def __init__(self, emb_dim):
super(AtomEncoder, self).__init__()
full_atom_feature_dims = get_atom_feature_dims()
self.atom_embedding_list = torch.nn.ModuleList()
for i, dim in enumerate(full_atom_feature_dims):
emb = torch.nn.Embedding(dim, emb_dim)
torch.nn.init.xavier_uniform_(emb.weight.data)
self.atom_embedding_list.append(emb)
def forward(self, x):
x_embedding = 0
for i in range(x.shape[1]):
x_embedding += self.atom_embedding_list[i](x[:, i])
return x_embedding
class BondEncoder(torch.nn.Module):
def __init__(self, emb_dim):
super(BondEncoder, self).__init__()
full_bond_feature_dims = get_bond_feature_dims()
self.bond_embedding_list = torch.nn.ModuleList()
for i, dim in enumerate(full_bond_feature_dims):
emb = torch.nn.Embedding(dim, emb_dim)
torch.nn.init.xavier_uniform_(emb.weight.data)
self.bond_embedding_list.append(emb)
def forward(self, edge_attr):
bond_embedding = 0
for i in range(edge_attr.shape[1]):
bond_embedding += self.bond_embedding_list[i](edge_attr[:, i])
return bond_embedding
|
a = [0]
memo = {}
memo[0] = 1
for x in xrange(1,500001):
test = a[x-1] - x
if test > 0 and test not in memo:
memo[test] = 1
a.append(test)
else:
memo[test + 2*x] = 1
a.append(test + 2*x)
k = int(raw_input())
while k != -1:
print a[k]
k = int(raw_input()) |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'LotteryCountryDivision'
db.create_table(u'lottery_country_division', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('remote_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=8)),
('remote_country', self.gf('django.db.models.fields.CharField')(max_length=32)),
))
db.send_create_signal(u'lottery', ['LotteryCountryDivision'])
# Adding model 'LotteryGame'
db.create_table(u'lottery_game', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal(u'lottery', ['LotteryGame'])
# Adding model 'LotteryGameComponent'
db.create_table(u'lottery_game_component', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='components', null=True, to=orm['lottery.LotteryGame'])),
('remote_id', self.gf('django.db.models.fields.CharField')(max_length=8)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('format', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('identifier', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal(u'lottery', ['LotteryGameComponent'])
# Adding M2M table for field division on 'LotteryGameComponent'
m2m_table_name = db.shorten_name(u'lottery_game_component_division')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('lotterygamecomponent', models.ForeignKey(orm[u'lottery.lotterygamecomponent'], null=False)),
('lotterycountrydivision', models.ForeignKey(orm[u'lottery.lotterycountrydivision'], null=False))
))
db.create_unique(m2m_table_name, ['lotterygamecomponent_id', 'lotterycountrydivision_id'])
# Adding model 'LotteryDraw'
db.create_table(u'lottery_draw', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('component', self.gf('django.db.models.fields.related.ForeignKey')(related_name='draws', to=orm['lottery.LotteryGameComponent'])),
('date', self.gf('django.db.models.fields.DateField')()),
('jackpot', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('result', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('division', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lottery.LotteryCountryDivision'], null=True)),
('official', self.gf('django.db.models.fields.BooleanField')(default=False)),
('frenzied', self.gf('django.db.models.fields.BooleanField')(default=False)),
('powerplay', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('five_of_five_only', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('five_of_five_powerplay', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('four_of_five_powerball', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('four_of_five_only', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('three_of_five_with_powerball', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('three_of_five_only', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('two_of_five_powerball', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('two_of_five_only', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('one_of_five_powerball', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('powerball_only', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('megaplier', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('megaball', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('four_of_five_megaball', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('three_of_five_with_megaball', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('two_of_five_megaball', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('one_of_five_megaball', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('extra', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('six_of_six_only', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('five_of_six_only', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('five_of_six_extra', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('four_of_six_only', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('four_of_six_extra', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('three_of_six_only', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('three_of_six_extra', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('two_of_six_only', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('two_of_six_extra', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('bonus', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('four_of_four', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('four_of_four_bonus', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('three_of_four', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('three_of_four_bonus', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('two_of_four', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('two_of_four_bonus', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('one_of_four', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('one_of_four_bonus', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('straight', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('box', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('staright_and_box', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('box_only', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('win', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('exacta', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('trifecta', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('race_time', self.gf('django.db.models.fields.CharField')(max_length=80, null=True, blank=True)),
('exacta_with_racetime', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('win_with_racetime', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('race_time_amount', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('twelve_of_twelve', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('eleven_of_twelve', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('ten_of_tweleve', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('nine_of_twelve', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('eight_of_twelve', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('four_of_twelve', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('three_of_twelve', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('two_of_twelve', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('one_of_twelve', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('zero_of_twelve', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('number_of_winners', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True)),
('odds', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal(u'lottery', ['LotteryDraw'])
# Adding model 'LotteryDrawFrenzy'
db.create_table(u'lottery_draw_frenzy', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('draw', self.gf('django.db.models.fields.related.ForeignKey')(related_name='frenzies', to=orm['lottery.LotteryDraw'])),
('added_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'lottery', ['LotteryDrawFrenzy'])
# Adding model 'LotteryTicket'
db.create_table(u'lottery_ticket', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('draw', self.gf('django.db.models.fields.related.ForeignKey')(related_name='tickets', to=orm['lottery.LotteryDraw'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='tickets', to=orm['user.YooLottoUser'])),
('division', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lottery.LotteryCountryDivision'], null=True)),
('winnings', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=15, decimal_places=2, blank=True)),
('notified', self.gf('django.db.models.fields.BooleanField')(default=False)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('added_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'lottery', ['LotteryTicket'])
# Adding model 'LotteryTicketClient'
db.create_table(u'lottery_ticket_client', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('draw', self.gf('django.db.models.fields.related.ForeignKey')(related_name='tickets_client', to=orm['lottery.LotteryDraw'])),
('device', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('email', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('added_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'lottery', ['LotteryTicketClient'])
# Adding model 'LotteryTicketSubmission'
db.create_table(u'lottery_ticket_submission', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('submission', self.gf('django.db.models.fields.CharField')(max_length=16, null=True, blank=True)),
('ticket', self.gf('django.db.models.fields.related.ForeignKey')(related_name='submissions', to=orm['lottery.LotteryTicket'])),
('checked', self.gf('django.db.models.fields.BooleanField')(default=False)),
('added_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'lottery', ['LotteryTicketSubmission'])
# Adding model 'LotteryTicketPlay'
db.create_table(u'lottery_ticket_play', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ticket', self.gf('django.db.models.fields.related.ForeignKey')(related_name='plays', to=orm['lottery.LotteryTicket'])),
('play', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('division', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lottery.LotteryCountryDivision'], null=True)),
('submission_old', self.gf('django.db.models.fields.CharField')(max_length=16, null=True, db_column='submission', blank=True)),
('submission', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='plays', null=True, to=orm['lottery.LotteryTicketSubmission'])),
('winnings', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=15, decimal_places=2, blank=True)),
('winnings_base', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=15, decimal_places=2, blank=True)),
('winnings_sum', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=15, decimal_places=2, blank=True)),
('added_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'lottery', ['LotteryTicketPlay'])
# Adding model 'LotteryTicketAvailable'
db.create_table(u'lottery_ticket_available', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ticket', self.gf('django.db.models.fields.related.ForeignKey')(related_name='ticket_submissions', to=orm['lottery.LotteryTicket'])),
('play', self.gf('django.db.models.fields.related.ForeignKey')(related_name='ticket_play', to=orm['lottery.LotteryTicketPlay'])),
('available', self.gf('django.db.models.fields.BooleanField')(default=False)),
('json', self.gf('django.db.models.fields.TextField')()),
('device', self.gf('django.db.models.fields.CharField')(max_length=256)),
('image_first', self.gf('django.db.models.fields.files.ImageField')(max_length=300, null=True, blank=True)),
('image_second', self.gf('django.db.models.fields.files.ImageField')(max_length=300, null=True, blank=True)),
('image_third', self.gf('django.db.models.fields.files.ImageField')(max_length=300, null=True, blank=True)),
('valid_image_name', self.gf('django.db.models.fields.TextField')(max_length=256, null=True, blank=True)),
('rejected', self.gf('django.db.models.fields.BooleanField')(default=False)),
('reason', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('added_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'lottery', ['LotteryTicketAvailable'])
# Adding model 'LotteryTicketEdit'
db.create_table(u'lottery_ticket_edit', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('available', self.gf('django.db.models.fields.related.ForeignKey')(related_name='ticket_available', to=orm['lottery.LotteryTicketAvailable'])),
('numbers', self.gf('django.db.models.fields.CharField')(max_length=256)),
))
db.send_create_signal(u'lottery', ['LotteryTicketEdit'])
def backwards(self, orm):
# Deleting model 'LotteryCountryDivision'
db.delete_table(u'lottery_country_division')
# Deleting model 'LotteryGame'
db.delete_table(u'lottery_game')
# Deleting model 'LotteryGameComponent'
db.delete_table(u'lottery_game_component')
# Removing M2M table for field division on 'LotteryGameComponent'
db.delete_table(db.shorten_name(u'lottery_game_component_division'))
# Deleting model 'LotteryDraw'
db.delete_table(u'lottery_draw')
# Deleting model 'LotteryDrawFrenzy'
db.delete_table(u'lottery_draw_frenzy')
# Deleting model 'LotteryTicket'
db.delete_table(u'lottery_ticket')
# Deleting model 'LotteryTicketClient'
db.delete_table(u'lottery_ticket_client')
# Deleting model 'LotteryTicketSubmission'
db.delete_table(u'lottery_ticket_submission')
# Deleting model 'LotteryTicketPlay'
db.delete_table(u'lottery_ticket_play')
# Deleting model 'LotteryTicketAvailable'
db.delete_table(u'lottery_ticket_available')
# Deleting model 'LotteryTicketEdit'
db.delete_table(u'lottery_ticket_edit')
models = {
u'lottery.lotterycountrydivision': {
'Meta': {'object_name': 'LotteryCountryDivision', 'db_table': "u'lottery_country_division'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'remote_country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '8'})
},
u'lottery.lotterydraw': {
'Meta': {'object_name': 'LotteryDraw', 'db_table': "u'lottery_draw'"},
'bonus': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'box': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'box_only': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'component': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'draws'", 'to': u"orm['lottery.LotteryGameComponent']"}),
'date': ('django.db.models.fields.DateField', [], {}),
'division': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lottery.LotteryCountryDivision']", 'null': 'True'}),
'eight_of_twelve': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'eleven_of_twelve': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'exacta': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'exacta_with_racetime': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'extra': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'five_of_five_only': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'five_of_five_powerplay': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'five_of_six_extra': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'five_of_six_only': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'four_of_five_megaball': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'four_of_five_only': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'four_of_five_powerball': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'four_of_four': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'four_of_four_bonus': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'four_of_six_extra': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'four_of_six_only': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'four_of_twelve': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'frenzied': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jackpot': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'megaball': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'megaplier': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'nine_of_twelve': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'number_of_winners': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'odds': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'official': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'one_of_five_megaball': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'one_of_five_powerball': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'one_of_four': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'one_of_four_bonus': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'one_of_twelve': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'powerball_only': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'powerplay': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'race_time': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'race_time_amount': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'result': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'six_of_six_only': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'staright_and_box': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'straight': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'ten_of_tweleve': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'three_of_five_only': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'three_of_five_with_megaball': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'three_of_five_with_powerball': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'three_of_four': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'three_of_four_bonus': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'three_of_six_extra': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'three_of_six_only': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'three_of_twelve': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'trifecta': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'twelve_of_twelve': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'two_of_five_megaball': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'two_of_five_only': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'two_of_five_powerball': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'two_of_four': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'two_of_four_bonus': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'two_of_six_extra': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'two_of_six_only': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'two_of_twelve': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'win': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'win_with_racetime': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'zero_of_twelve': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
u'lottery.lotterydrawfrenzy': {
'Meta': {'object_name': 'LotteryDrawFrenzy', 'db_table': "u'lottery_draw_frenzy'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'draw': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'frenzies'", 'to': u"orm['lottery.LotteryDraw']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'lottery.lotterygame': {
'Meta': {'object_name': 'LotteryGame', 'db_table': "u'lottery_game'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'lottery.lotterygamecomponent': {
'Meta': {'object_name': 'LotteryGameComponent', 'db_table': "u'lottery_game_component'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'division': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components'", 'symmetrical': 'False', 'to': u"orm['lottery.LotteryCountryDivision']"}),
'format': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'components'", 'null': 'True', 'to': u"orm['lottery.LotteryGame']"}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '8'})
},
u'lottery.lotteryticket': {
'Meta': {'object_name': 'LotteryTicket', 'db_table': "u'lottery_ticket'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'division': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lottery.LotteryCountryDivision']", 'null': 'True'}),
'draw': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tickets'", 'to': u"orm['lottery.LotteryDraw']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tickets'", 'to': u"orm['user.YooLottoUser']"}),
'winnings': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'})
},
u'lottery.lotteryticketavailable': {
'Meta': {'object_name': 'LotteryTicketAvailable', 'db_table': "u'lottery_ticket_available'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'device': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_first': ('django.db.models.fields.files.ImageField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'image_second': ('django.db.models.fields.files.ImageField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'image_third': ('django.db.models.fields.files.ImageField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'json': ('django.db.models.fields.TextField', [], {}),
'play': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ticket_play'", 'to': u"orm['lottery.LotteryTicketPlay']"}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rejected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ticket_submissions'", 'to': u"orm['lottery.LotteryTicket']"}),
'valid_image_name': ('django.db.models.fields.TextField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
u'lottery.lotteryticketclient': {
'Meta': {'object_name': 'LotteryTicketClient', 'db_table': "u'lottery_ticket_client'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'device': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'draw': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tickets_client'", 'to': u"orm['lottery.LotteryDraw']"}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'lottery.lotteryticketedit': {
'Meta': {'object_name': 'LotteryTicketEdit', 'db_table': "u'lottery_ticket_edit'"},
'available': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ticket_available'", 'to': u"orm['lottery.LotteryTicketAvailable']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'numbers': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'lottery.lotteryticketplay': {
'Meta': {'object_name': 'LotteryTicketPlay', 'db_table': "u'lottery_ticket_play'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'division': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lottery.LotteryCountryDivision']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'play': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'plays'", 'null': 'True', 'to': u"orm['lottery.LotteryTicketSubmission']"}),
'submission_old': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'db_column': "'submission'", 'blank': 'True'}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'plays'", 'to': u"orm['lottery.LotteryTicket']"}),
'winnings': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'winnings_base': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'winnings_sum': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'})
},
u'lottery.lotteryticketsubmission': {
'Meta': {'object_name': 'LotteryTicketSubmission', 'db_table': "u'lottery_ticket_submission'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'submission': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'to': u"orm['lottery.LotteryTicket']"})
},
u'user.yoolottouser': {
'Meta': {'object_name': 'YooLottoUser', 'db_table': "u'user'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'referral': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['lottery'] |
import numpy as np
import cv2
import imutils
from collections import deque
red_lower_bound = np.array([0, 100, 100]) # HSV format
red_upper_bound = np.array([20, 255, 255])
lower_bound = red_lower_bound
upper_bound = red_upper_bound
# BGR format
# Blue Red Green Yellow White
color_list = [(255, 0, 0), (0, 0, 255), (0, 255, 0), (0, 255, 255), (255, 255, 255)]
# Blue Red
color_palette_list = [(255, 0, 0), (0, 0, 255)]
# index for the colors in our palette
idx = 0
trace_blue = [deque(maxlen=1500)]
trace_red = [deque(maxlen=1500)]
# indexes
idx_blue = 0
idx_red = 0
camera = cv2.VideoCapture(0)
while True:
(cam_rec, cam_frame) = camera.read()
cam_frame = cv2.flip(cam_frame, 1)
cam_frame = imutils.resize(cam_frame, width=1000)
feed = cv2.cvtColor(cam_frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(feed, lower_bound, upper_bound)
(contours, _) = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
center = None
# mask = cv2.inRange(feed, lower_bound, upper_bound)
# (_, contours, _) = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# center = None
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.5
t = 2
cam_frame = cv2.rectangle(cam_frame, (125,60), (275,120), (90,0,100), -1)
cv2.putText(cam_frame, "CLEAR", (170, 95), font, font_scale, color_list[4], t, cv2.LINE_AA)
cam_frame = cv2.rectangle(cam_frame, (425,60), (575,120), color_palette_list[0], -1)
cv2.putText(cam_frame, "BLUE", (480, 95), font, font_scale, color_list[4], t, cv2.LINE_AA)
cam_frame = cv2.rectangle(cam_frame, (725,60), (875,120), color_palette_list[1], -1)
cv2.putText(cam_frame, "RED", (785, 95), font, font_scale, color_list[4], t, cv2.LINE_AA)
if len(contours) > 0:
cont = sorted(contours, key = cv2.contourArea, reverse = True)[0]
((x, y), radius) = cv2.minEnclosingCircle(cont)
cv2.circle(cam_frame, (int(x), int(y)), int(radius), color_list[2], 2)
M = cv2.moments(cont)
center = (int(M['m10'] / M['m00']), int(M['m01'] / M['m00']))
if center[1] <= 120:
if 125 <= center[0] <= 275:
trace_blue = [deque(maxlen=1500)]
trace_red = [deque(maxlen=1500)]
idx_blue = 0
idx_red = 0
elif 425 <= center[0] <= 575:
idx = 0
elif 725 <= center[0] <= 875:
idx = 1
else :
if idx == 0:
trace_blue[idx_blue].appendleft(center)
elif idx == 1:
trace_red[idx_red].appendleft(center)
else:
trace_blue.append(deque(maxlen=1500))
idx_blue += 1
trace_red.append(deque(maxlen=1500))
idx_red += 1
traced = [trace_blue, trace_red]
for p in range(len(traced)):
for m in range(len(traced[p])):
for n in range(1, len(traced[p][m])):
if traced[p][m][n] is None:
continue
cv2.line(cam_frame, traced[p][m][n - 1], traced[p][m][n], color_palette_list[p], 2)
cv2.imshow("Canvas Drawing", cam_frame)
if cv2.waitKey(1) & 0xFF == ord("w"):
break
camera.release()
cv2.destroyAllWindows()
|
import json
import os
import warnings
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from skimage.transform import downscale_local_mean
from skimage import io, img_as_uint
from tqdm import tqdm_notebook, tqdm
from zipfile import ZipFile
import torch
import cv2
from DataLoader import ImagesetDataset, ImageSet
from DeepNetworks.HRNet import HRNet
from Evaluator import shift_cPSNR, shift_cMSE, cSSIM, cMSE
from utils import getImageSetDirectories, readBaselineCPSNR, collateFunction
def get_sr_and_score(imset, model, aposterior_gt, next_sr, num_frames, min_L=16):
'''
Super resolves an imset with a given model.
Args:
imset: imageset
model: HRNet, pytorch model
min_L: int, pad length
Returns:
sr: tensor (1, C_out, W, H), super resolved image
scPSNR: float, shift cPSNR score
'''
if imset.__class__ is ImageSet:
collator = collateFunction(num_frames, min_L=min_L)
lrs, alphas, hrs, hr_maps, names = collator([imset])
elif isinstance(imset, tuple): # imset is a tuple of batches
lrs, alphas, hrs, hr_maps, names = imset
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#print("LRS SHAPE:", lrs.shape)
#print("ALPHAS SHAPE", alphas.shape)
#lrs = lrs[:, :num_frames, :, :]
#alphas = alphas[:, :num_frames]
lrs = lrs.float().to(device)
alphas = alphas.float().to(device)
sr = model(lrs, alphas)[:, 0]
sr = sr.detach().cpu().numpy()[0]
sr = np.clip(sr, 0, 1)
# sr = downscale_local_mean(sr, (2, 2))
cur_hr = hrs.numpy()[0]
cur_hr_map = hr_maps.numpy()[0]
cur_sr = sr
# cur_hr = downscale_local_mean(cur_hr, (2, 2))
# cur_hr_map = downscale_local_mean(cur_hr_map, (2, 2))
assert(cur_sr.ndim == 2)
assert(cur_hr.ndim == 2)
assert(cur_hr_map.ndim == 2)
if cur_sr.dtype.type is np.uint16: # integer array is in the range [0, 65536]
cur_sr = cur_sr / np.iinfo(np.uint16).max # normalize in the range [0, 1]
else:
assert 0 <= cur_sr.min() and cur_sr.max() <= 1, 'sr.dtype must be either uint16 (range 0-65536) or float64 in (0, 1).'
if cur_hr.dtype.type is np.uint16:
cur_hr = cur_hr / np.iinfo(np.uint16).max
if len(hrs) > 0:
val_gt_SSIM = cSSIM(sr=cur_sr, hr=cur_hr)
val_L2 = mean_squared_error(cur_hr, cur_sr)
else:
val_gt_SSIM = None
val_L2 = None
if (str(type(aposterior_gt)) == "<class 'NoneType'>"):
val_aposterior_SSIM = 1.0
else:
val_aposterior_SSIM = cSSIM(sr = cur_sr, hr = aposterior_gt)
if (str(type(next_sr)) == "<class 'NoneType'>"):
val_delta_L2 = None
else:
assert (next_sr.ndim == 2)
val_delta_L2 = mean_squared_error(next_sr, cur_sr)
if len(cur_sr.shape) == 2:
cur_sr = cur_sr[None, ]
cur_hr = cur_hr[None, ]
cur_hr_map = cur_hr_map[None, ]
if len(hrs) > 0:
val_cMSE = cMSE(sr= cur_sr, hr= cur_hr, hr_map= cur_hr_map)
val_cPSNR = -10 * np.log10(val_cMSE)
val_usual_PSNR = -10 * np.log10(val_L2)
val_shift_cPSNR = shift_cPSNR(sr = cur_sr, hr=cur_hr, hr_map=cur_hr_map)
val_shift_cMSE = shift_cMSE(sr = cur_sr, hr=cur_hr, hr_map=cur_hr_map)
else:
val_cMSE = None
val_cPSNR = None
val_usual_PSNR = None
val_shift_cPSNR = None
val_shift_cMSE = None
if (str(type(next_sr)) == "<class 'NoneType'>"):
val_delta_cMSE = None
val_delta_shift_cMSE = None
else:
if next_sr.dtype.type is np.uint16: # integer array is in the range [0, 65536]
next_sr = next_sr / np.iinfo(np.uint16).max # normalize in the range [0, 1]
else:
assert 0 <= next_sr.min() and next_sr.max() <= 1, 'sr.dtype must be either uint16 (range 0-65536) or float64 in (0, 1).'
if len(cur_sr.shape) == 2:
next_sr = next_sr[None,]
val_delta_cMSE = cMSE(sr = cur_sr, hr = next_sr, hr_map = cur_hr_map)
val_delta_shift_cMSE = shift_cMSE(sr = cur_sr, hr = next_sr, hr_map = cur_hr_map)
return sr, val_gt_SSIM, val_aposterior_SSIM, val_cPSNR, val_usual_PSNR, val_shift_cPSNR, val_cMSE, \
val_L2, val_shift_cMSE, val_delta_cMSE, val_delta_L2, val_delta_shift_cMSE
def load_data(config_file_path, val_proportion=0.10, top_k=-1):
'''
Loads all the data for the ESA Kelvin competition (train, val, test, baseline)
Args:
config_file_path: str, paths of configuration file
val_proportion: float, validation/train fraction
top_k: int, number of low-resolution images to read. Default (top_k=-1) reads all low-res images, sorted by clearance.
Returns:
train_dataset: torch.Dataset
val_dataset: torch.Dataset
test_dataset: torch.Dataset
baseline_cpsnrs: dict, shift cPSNR scores of the ESA baseline
'''
with open(config_file_path, "r") as read_file:
config = json.load(read_file)
data_directory = config["paths"]["prefix"]
baseline_cpsnrs = readBaselineCPSNR(os.path.join(data_directory, "norm.csv"))
train_set_directories = getImageSetDirectories(os.path.join(data_directory, "train"))
test_set_directories = getImageSetDirectories(os.path.join(data_directory, "test"))
# val_proportion = 0.10
train_list, val_list = train_test_split(train_set_directories,
test_size=val_proportion, random_state=1, shuffle=True)
# val_list = ["imgset0000", "imgset0061", "imgset0203", "imgset0280", "imgset0374", "imgset0476", "imgset0585",
# "imgset0692", "imgset0769", "imgset0845", "imgset0960", "imgset1039", "imgset1128",
#"imgset0011", "imgset0072", "imgset0204", "imgset0285", "imgset0382", "imgset0498", "imgset0588", "imgset0711",
# "imgset0771", "imgset0878", "imgset0962", "imgset1052", "imgset1133",
#"imgset0023", "imgset0085", "imgset0205", "imgset0289", "imgset0414", "imgset0499", "imgset0602", "imgset0728",
# "imgset0776", "imgset0884", "imgset0980", "imgset1054", "imgset1134",
#"imgset0035", "imgset0087", "imgset0208", "imgset0313", "imgset0448", "imgset0503", "imgset0604", "imgset0730",
# "imgset0791", "imgset0896", "imgset0998", "imgset1063", "imgset1158",
#"imgset0039", "imgset0114", "imgset0221", "imgset0324", "imgset0450", "imgset0505", "imgset0617", "imgset0734",
# "imgset0793", "imgset0921", "imgset1013", "imgset1068",
#"imgset0047", "imgset0130", "imgset0235", "imgset0328", "imgset0458", "imgset0530", "imgset0618", "imgset0748",
# "imgset0796", "imgset0923", "imgset1015", "imgset1089",
#"imgset0051", "imgset0138", "imgset0255", "imgset0337", "imgset0460", "imgset0534", "imgset0652", "imgset0751",
# "imgset0811", "imgset0933", "imgset1021", "imgset1112",
#"imgset0056", "imgset0164", "imgset0262", "imgset0340", "imgset0465", "imgset0549", "imgset0674", "imgset0758",
# "imgset0814", "imgset0948", "imgset1023", "imgset1121",
#"imgset0057", "imgset0192", "imgset0270", "imgset0361", "imgset0470", "imgset0558", "imgset0687", "imgset0762",
# "imgset0817", "imgset0951", "imgset1034", "imgset1126"]
config["training"]["create_patches"] = False
train_dataset = ImagesetDataset(imset_dir=train_list, config=config["training"], top_k=top_k)
val_dataset = ImagesetDataset(imset_dir=val_list, config=config["training"], top_k=top_k)
test_dataset = ImagesetDataset(imset_dir=test_set_directories, config=config["training"], top_k=top_k)
return train_dataset, val_dataset, test_dataset, baseline_cpsnrs
def load_model(config, checkpoint_file):
'''
Loads a pretrained model from disk.
Args:
config: dict, configuration file
checkpoint_file: str, checkpoint filename
Returns:
model: HRNet, a pytorch model
'''
# checkpoint_dir = config["paths"]["checkpoint_dir"]
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = HRNet(config["network"]).to(device)
model.load_state_dict(torch.load(checkpoint_file))
return model
def evaluate(model, train_dataset, val_dataset, test_dataset, min_L=16):
'''
Evaluates a pretrained model.
Args:
model: HRNet, a pytorch model
train_dataset: torch.Dataset
val_dataset: torch.Dataset
test_dataset: torch.Dataset
min_L: int, pad length
Returns:
scores: dict, results
clerances: dict, clearance scores
part: dict, data split (train, val or test)
'''
model.eval()
scores = {}
clerances = {}
part = {}
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
for s, imset_dataset in [('train', train_dataset),
('val', val_dataset),
('test', test_dataset)]:
if __IPYTHON__:
tqdm = tqdm_notebook
for imset in tqdm(imset_dataset):
sr, scPSNR = get_sr_and_score(imset, model, min_L=min_L)
scores[imset['name']] = scPSNR
clerances[imset['name']] = imset['clearances']
part[imset['name']] = s
return scores, clerances, part
def custom_evaluate(model, train_dataset, val_dataset, test_dataset, num_frames, min_L=16):
model.eval()
scores = {}
clerances = {}
part = {}
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
for s, imset_dataset in [('train', train_dataset),
('val', val_dataset),
('test', test_dataset)]:
if __IPYTHON__:
tqdm = tqdm_notebook
for imset in tqdm(imset_dataset):
sr, scPSNR, ssim, aposterior_ssim = get_sr_and_score(imset, model, None, num_frames, min_L)
# imset, model, aposterior_gt, num_frames, min_L=16
scores[imset['name']] = scPSNR
clerances[imset['name']] = imset['clearances']
part[imset['name']] = s
return scores, clerances, part
def benchmark(baseline_cpsnrs, scores, part, clerances):
'''
Benchmark scores against ESA baseline.
Args:
baseline_cpsnrs: dict, shift cPSNR scores of the ESA baseline
scores: dict, results
part: dict, data split (train, val or test)
clerances: dict, clearance scores
Returns:
results: pandas.Dataframe, results
'''
# TODO HR mask clearance
results = pd.DataFrame({'ESA': baseline_cpsnrs,
'model': scores,
'clr': clerances,
'part': part, })
results['score'] = results['ESA'] / results['model']
results['mean_clr'] = results['clr'].map(np.mean)
results['std_clr'] = results['clr'].map(np.std)
return results
def generate_submission_file(model, imset_dataset, out='../submission'):
'''
USAGE: generate_submission_file [path to testfolder] [name of the submission folder]
EXAMPLE: generate_submission_file data submission
'''
print('generating solutions: ', end='', flush='True')
os.makedirs(out, exist_ok=True)
if __IPYTHON__:
tqdm = tqdm_notebook
for imset in tqdm(imset_dataset):
folder = imset['name']
sr, _ = get_sr_and_score(imset, model)
sr = img_as_uint(sr)
# normalize and safe resulting image in temporary folder (complains on low contrast if not suppressed)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
io.imsave(os.path.join(out, folder + '.png'), sr)
print('*', end='', flush='True')
print('\narchiving: ')
sub_archive = out + '/submission.zip' # name of submission archive
zf = ZipFile(sub_archive, mode='w')
try:
for img in os.listdir(out):
if not img.startswith('imgset'): # ignore the .zip-file itself
continue
zf.write(os.path.join(out, img), arcname=img)
print('*', end='', flush='True')
finally:
zf.close()
print('\ndone. The submission-file is found at {}. Bye!'.format(sub_archive))
class Model(object):
def __init__(self, config):
self.config = config
def load_checkpoint(self, checkpoint_file):
self.model = load_model(self.config, checkpoint_file)
def __call__(self, imset, aposterior_gt, next_sr, num_frames, custom_min_L = 16):
sr, val_gt_SSIM, val_aposterior_SSIM, val_cPSNR, val_usual_PSNR, val_shift_cPSNR, val_cMSE, \
val_L2, val_shift_cMSE, val_delta_cMSE, val_delta_L2, \
val_delta_shift_cMSE = get_sr_and_score(imset, self.model, aposterior_gt, next_sr, num_frames, min_L= custom_min_L)#self.config['training']['min_L'])
return sr, val_gt_SSIM, val_aposterior_SSIM, val_cPSNR, val_usual_PSNR, val_shift_cPSNR, val_cMSE, \
val_L2, val_shift_cMSE, val_delta_cMSE, val_delta_L2, val_delta_shift_cMSE
def evaluate(self, train_dataset, val_dataset, test_dataset, baseline_cpsnrs):
scores, clearance, part = evaluate(self.model, train_dataset, val_dataset, test_dataset,
min_L=self.config['training']['min_L'])
results = benchmark(baseline_cpsnrs, scores, part, clearance)
return results
def custom_evaluate(self, train_dataset, val_dataset, test_dataset, baseline_cpsnrs, num_frames, min_L):
scores, clearance, part = custom_evaluate(self.model, train_dataset, val_dataset, test_dataset, num_frames, min_L)
results = benchmark(baseline_cpsnrs, scores, part, clearance)
return results
def generate_submission_file(self, imset_dataset, out='../submission'):
generate_submission_file(self.model, imset_dataset, out='../submission')
|
from .trainer import Trainer
from .make_optimizer import make_optimizer,make_scheduler |
from typing import Dict
import logging
import os
import random
from overrides import overrides
from allennlp.data.instance import Instance
from allennlp.data.tokenizers.tokenizer import Tokenizer
from allennlp.data.tokenizers import WordTokenizer
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.token_indexers.token_indexer import TokenIndexer
from allennlp.data.fields import TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def filename(folder, i):
fname = f"news.en-{i:05d}-of-00100"
return os.path.expanduser(folder + fname)
@DatasetReader.register("billion_words")
class BillionWordsReader(DatasetReader):
"""
Parameters
----------
tokenizer : ``Tokenizer``, optional (default=``WordTokenizer()``)
We use this ``Tokenizer`` for the text. See :class:`Tokenizer`.
token_indexers : ``Dict[str, TokenIndexer]``, optional (default=``{"tokens": SingleIdTokenIndexer()}``)
We use this to define the input representation for the text. See :class:`TokenIndexer`.
Note that the `output` representation will always be single token IDs - if you've specified
a ``SingleIdTokenIndexer`` here, we use the first one you specify. Otherwise, we create
one with default parameters.
"""
def __init__(self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
lazy: bool = False,
shuffle: bool = True) -> None:
super().__init__(lazy)
self._tokenizer = tokenizer or WordTokenizer()
self._shuffle = shuffle
self._token_indexers = token_indexers or {
"tokens": SingleIdTokenIndexer()
}
# No matter how you want to represent the input, we'll always represent the output as a
# single token id. This code lets you learn a language model that concatenates word
# embeddings with character-level encoders, in order to predict the word token that comes
# next.
self._output_indexer: Dict[str, TokenIndexer] = None
for name, indexer in self._token_indexers.items():
if isinstance(indexer, SingleIdTokenIndexer):
self._output_indexer = {name: indexer}
break
else:
self._output_indexer = {"tokens": SingleIdTokenIndexer()}
@overrides
def _read(self, dataset_path: str):
file_ids = list(range(1, 2))
if self._shuffle:
random.shuffle(file_ids)
for file_id in file_ids:
with open(filename(dataset_path, file_id), "r") as text_file:
lines = text_file.readlines()
print("read file")
lines = [line.strip() for line in lines]
#lines = lines[:500]
if self._shuffle:
random.shuffle(lines)
for line in lines:
tokenized_string = self._tokenizer.tokenize(line)
input_field = TextField(tokenized_string[:-1],
self._token_indexers)
output_field = TextField(tokenized_string[1:],
self._output_indexer)
yield Instance({
'tokens': input_field,
# 'output_tokens': output_field
})
@overrides
def text_to_instance(self, sentence: str) -> Instance: # type: ignore
# pylint: disable=arguments-differ
tokenized_string = self._tokenizer.tokenize(sentence)
input_field = TextField(tokenized_string[:-1], self._token_indexers)
output_field = TextField(tokenized_string[1:], self._output_indexer)
return Instance({
'input_tokens': input_field,
'output_tokens': output_field
})
|
"""
Something goes here right?
"""
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.utils.html import escape
from django.http import JsonResponse
import datetime
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.db.models import Q
import json as json
from .forms import RecipeForm
from .models import Recipe
# Create your views here.
def index(request):
all_recipes = Recipe.objects.all().order_by('-id')
context = {
'sitename':"EdgarRaw",
'page_name':"Recipes",
'all_recipes':all_recipes,
}
return render(request, 'recipes.html', context)
@login_required(login_url="/login/")
def submitRecipe(request):
if request.method=='POST':
form = RecipeForm(request.POST, request.FILES)
if form.is_valid():
form.save(request)
return HttpResponseRedirect('/recipes/')
else:
form = RecipeForm()
context = {
'sitename':"EdgarRaw",
'page_name':"Add a Recipe - EdgarRaw",
'form':form,
}
return render(request, 'addrecipe.html', context)
def viewRecipe(request, recipe_id):
recipe = Recipe.objects.get(pk=recipe_id)
context = {
'sitename':"EdgarRaw",
'page_name':"Recipes",
'recipe':recipe,
}
return render(request, 'recipeDetails.html', context)
def searchRecipes(request):
if request.method=='POST':
search_text = request.POST['term']
else:
return HttpResponseRedirect("/recipes/")
if search_text == "":
return HttpResponseRedirect("/recipes/")
searchallrecipes = Recipe.objects.filter(Q(title__contains=search_text) |
Q(description__contains=search_text) |
Q(recipe__contains=search_text) |
Q(ingredients__contains=search_text))
searchtitles = Recipe.objects.filter(title__contains=search_text)
searchdescriptions = Recipe.objects.filter(description__contains=search_text)
searchrecipes = Recipe.objects.filter(recipe__contains=search_text)
searchingredients = Recipe.objects.filter(ingredients__contains=search_text)
context = {
'page_name':"Search Recipes",
'searchallrecipes':searchallrecipes,
'search_text':search_text,
'searchtitles':searchtitles,
'searchdescriptions':searchdescriptions,
'searchrecipes':searchrecipes,
'searchingredients':searchingredients,
}
return render(request, 'searchforrecipes.html', context)
def editRecipe(request, recipe_id):
post = Recipe.objects.get(pk=recipe_id)
thefile = post.image
if request.method == "POST":
form = RecipeForm(request.POST, request.FILES, instance=post)
if form.is_valid():
post = form.update(instance=recipe_id, thefile=thefile, commit=False)
post.save()
context = {
'recipe':post,
}
return render(request, 'recipeDetails.html', context)
else:
form = RecipeForm(instance=post)
context = {
'post':post,
'form':form,
}
return render(request, 'editrecipe.html', context)
def delete(request, recipe_id):
recipe = Recipe.objects.get(pk=recipe_id)
recipe.delete()
return HttpResponseRedirect("/recipes/")
|
# -*- coding: utf-8 -*-
from jinja2 import Environment, FileSystemLoader
from modules.httpcore import HttpRequest, HttpException
from models.demo import say_hello
from config import *
j2_env = Environment(loader=FileSystemLoader(TEMPLATES), trim_blocks=True, autoescape=True)
class Index(HttpRequest):
def get(self, request, response):
# u = request.uri - site root path [string]
# h = request.headers[key] - REQUEST HEADERS [list]
# p = request.form[key] - FORM DATA (POST) [list]
# g = request.query[key] - QUERYSTRING DATA (GET) [list]
# c = request.cookies[key] - COOKIES [list]
# p = request.params[index] - URL PARAMS [list] eg: /path_to_file/param1/param2
# rows = self.conn.execute(sql, [vals]).fetchall()
# self.conn.execute(sql, [vals]).commit()
# response.headers[key] = val - Case sensitive key name
# response.status_code = xxx - HttpStatus code. Default: 200
# raise HttpException(code, message) - Overwrites response.status_code. Message is optional.
# return "application/json", '{"Hello, Person!"}'
return 'text/html', j2_env.get_template('index.html').render({
'site': {'title': SITE_TITLE},
'message': say_hello()
})
|
def merge_sort(arr, left, right):
if left < right:
mid = int((left + right) / 2)
merge_sort(arr, left, mid)
merge_sort(arr, mid+1, right)
merge(arr, left, mid, right)
def merge(arr, left, mid, right):
n1 = mid - left + 1
n2 = right - mid
# create temp arrays
larr = []
rarr = []
# Copy data to temp arrays larr[] and rarr[]
for i in range(0, n1):
larr.append(arr[left + i])
for j in range(0, n2):
rarr.append(arr[mid + 1 + j])
# Merge the temp arrays back into arr[l..r]
i = 0 # Initial index of first subarray
j = 0 # Initial index of second subarray
k = left # Initial index of merged subarray
while i < n1 and j < n2:
if larr[i] <= rarr[j]:
arr[k] = larr[i]
i += 1
else:
arr[k] = rarr[j]
j += 1
k += 1
# Copy the remaining elements of larr[], case n1 > n2
while i < n1:
arr[k] = larr[i]
i += 1
k += 1
# Copy the remaining elements of rarr[], case n2 > n1
while j < n2:
arr[k] = rarr[j]
j += 1
k += 1
# Test
arr1 = [6, 3, 4, 8, 5, 1, 7, 0]
print(arr1)
merge_sort(arr1, 0, len(arr1) - 1)
print(arr1)
|
#Grading students
n = int(input().strip())
res = []
def get5(num):
while num % 5:
num += 1
return num
for i in range(n):
grade = int(input().strip())
if grade < 38:
res.append(grade)
else:
mul = get5(grade)
diff = mul - grade
if diff < 3:
res.append(mul)
else:
res.append(grade)
for g in res:
print(g)
|
# Generated by Django 3.0.7 on 2020-07-27 00:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Risk_project_ufps', '0005_auto_20200701_2149'),
]
operations = [
migrations.CreateModel(
name='Categorias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=30)),
('descripcion', models.CharField(blank=True, max_length=100, null=True)),
],
),
migrations.CreateModel(
name='Subcategorias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=30)),
('descripcion', models.CharField(blank=True, max_length=100, null=True)),
('categoria', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Risk_project_ufps.Categorias')),
],
),
]
|
import os, sys, random, time
import enum
from random import choice
from baseClasses import genders, Countries, fileName
voices = {
Countries.Australian : {
genders.male:["Lee"],
genders.female:["Karen"]
},
Countries.Indian : {
genders.male:["Rishi"],
genders.female:["Veena"]
},
Countries.Irish : {
genders.female:["Moira"]
},
Countries.South_Africa : {
genders.female:["Tessa"]
},
Countries.British : {
genders.male:["Daniel","Oliver"],
genders.female:["Kate","Serena"]
},
Countries.American : {
genders.male:["Alex","Tom","Vicki"],
genders.female:["Agnes","Allison","Ava","Samantha","Susan"]
}
}
def uniqueAudioFileNameGenerator()->str:
pass
def getAudioSample(word:str,voice:str):
if(not os.path.isdir(f"dataset/{word}")):
os.mkdir(f"dataset/{word}")
#voice = choice(list(voices[country][gender]))
#print(voice)
filePath = fileName%(word,voice,"SIRI")
#print(filePath)
os.system("say -v "+voice+" "+word+" -o" +filePath+".aiff")
os.system("lame -b "+ filePath+".aiff "+filePath+".wav")
os.system("rm "+filePath+".aiff")
if __name__ == "__main__" :
getAudioSample("malware","Daniel") |
import pygame
import random
pygame.init()
# ----- Gera tela principal
WIDTH = 800
HEIGHT = 400
janela_jogo = pygame.display.set_mode((WIDTH,HEIGHT))
pygame.display.set_caption('Catch em` all ')
white = (255,255,255)
color_dark = (0,0,0)
#===TELA DE INICIO===
tela_inicio=False
instrucoes = True
game = True
while (tela_inicio==False):
for event in pygame.event.get():
# ----- Verifica consequências
if event.type == pygame.QUIT:
tela_inicio = True
game = False
if event.type== pygame.KEYDOWN:
tela_inicio=True
janela_jogo.fill(color_dark)
titulo_na_tela = pygame.image.load('tela_de_inicio.png')
titulo_na_tela = pygame.transform.scale(titulo_na_tela, (WIDTH,HEIGHT))
janela_jogo.blit(titulo_na_tela,(0,0))
pygame.display.flip()
# ----- Inicia estruturas de dados
cama_elastica_imagem = pygame.image.load('bombeiros.png')
resgatado_imagem = pygame.image.load('RESGATADO.png')
ambulancia_imagem = pygame.image.load('Ambulancia.png')
predio_imagem = pygame.image.load('images.jpg') #Precisa ser trocada pela imagem de um predio
cama_elastica_imagem = pygame.transform.scale(cama_elastica_imagem, (150,150))
resgatado_imagem = pygame.transform.scale(resgatado_imagem, (30,30))
ambulancia_imagem = pygame.transform.scale(ambulancia_imagem, (175,175))
predio_imagem = pygame.transform.scale(predio_imagem,(100,300))
#Criando a classe do jogador
class Jogador(pygame.sprite.Sprite):
def __init__(self, img):
# Construtor da classe mãe (Sprite).
pygame.sprite.Sprite.__init__(self)
self.image = img
self.rect = self.image.get_rect()
self.rect.centerx = 150
self.rect.centery = HEIGHT - 50
self.speedx = 0
self.mask = pygame.mask.from_surface(self.image)
def update(self):
self.rect.x += self.speedx
# Mantem dentro da tela
if self.rect.right > WIDTH - 120:
self.rect.right = WIDTH - 120
if self.rect.left < 70:
self.rect.left = 70
#Criando a classe do resgatado
class Resgatado(pygame.sprite.Sprite):
def __init__(self,img):
pygame.sprite.Sprite.__init__(self)
self.image = img
self.rect = self.image.get_rect()
self.rect.centerx = -25
self.rect.centery = 400
self.speedx = 2
self.speedy = 4.5
self.mask = pygame.mask.from_surface(self.image)
def update(self):
self.rect.x += self.speedx
self.rect.y -= self.speedy
if self.rect.y == 110 :
self.speedy = - 5
elif self.rect.centerx == 700 :
self.rect.x -= self.speedx
self.rect.y -= self.speedy
def quicar(self):
self.speedy = 4.5
#Criando classe da Ambulancia
class Ambulance(pygame.sprite.Sprite) :
def __init__(self,img) :
pygame.sprite.Sprite.__init__(self)
self.image = img
self.rect = self.image.get_rect()
self.rect.x = WIDTH - 150
self.rect.y = 300
self.mask = pygame.mask.from_surface(self.image)
def update(self):
self.rect.x = self.rect.x
self.rect.y = self.rect.y
class Perigo(pygame.sprite.Sprite) :
def __init__(self,img) :
pygame.sprite.Sprite.__init__(self)
self.image = img
self.rect = self.image.get_rect()
self.rect.x = WIDTH - 400
self.rect.y = 300
self.mask = pygame.mask.from_surface(self.image)
def update(self):
self.rect.x = self.rect.x
self.rect.y = self.rect.y
# Variável para o ajuste de velocidade
clock = pygame.time.Clock()
FPS = 30
# Criando um grupo para todos os sprites
all_sprites = pygame.sprite.Group()
all_resgatados = pygame.sprite.Group()
# Criando sprites
player = Jogador(cama_elastica_imagem)
all_sprites.add(player)
rescue = Resgatado(resgatado_imagem)
all_sprites.add(rescue)
all_resgatados.add(rescue)
ambulancia = Ambulance(ambulancia_imagem)
all_sprites.add(ambulancia)
#criando evento para ir adicionando resgatados
ADDRESCUE = pygame.USEREVENT + 1
CHANGE_VEL = pygame.USEREVENT + 2
pygame.time.set_timer(ADDRESCUE,10000)
#pontuacao inicial do placar
Score = 0
#numero de vidas
Lifes = 3
# ===== Loop principal =====
while game:
clock.tick(FPS)
# ----- Trata eventos
for event in pygame.event.get():
# ----- Verifica consequências
if event.type == pygame.QUIT:
game = False
# Verifica se apertou alguma tecla.
if event.type == pygame.KEYDOWN:
# Dependendo da tecla, altera a velocidade.
if event.key == pygame.K_LEFT:
player.speedx -= 16
if event.key == pygame.K_RIGHT:
player.speedx += 16
# Verifica se soltou alguma tecla.
if event.type == pygame.KEYUP:
# Dependendo da tecla, altera a velocidade.
if event.key == pygame.K_LEFT:
player.speedx += 16
if event.key == pygame.K_RIGHT:
player.speedx -= 16
if event.type == ADDRESCUE :
rescue = Resgatado(resgatado_imagem)
all_sprites.add(rescue)
all_resgatados.add(rescue)
Random_vel=(random.randint(4,7))*1000
pygame.time.set_timer(ADDRESCUE,Random_vel)
print(Random_vel)
print(len(all_resgatados))
print(rescue.speedx)
#Fica mais dificil em funcao da pontuacao do player
if Score//2 == 0:
rescue.speedx = 2.7
#verifica se houve ou nao colisao do resgatado com cama elastica
for elemento in all_resgatados :
if pygame.sprite.collide_mask(elemento,player) : #Houve colisao
elemento.quicar()
if pygame.sprite.collide_mask(elemento,ambulancia) :
elemento.kill()
Score += 1
if elemento.rect.y == HEIGHT - 10 : #ira tirar uma vida quando o rescue cair no chao,nao houve colisao
Lifes -= 1
elemento.kill()
# ----- Gera saídas
janela_jogo.fill((255, 255, 0)) #Preenche background da tela do jogo
all_sprites.draw(janela_jogo)
vertices_ambulancia = ((WIDTH - 150,350),(WIDTH,350),(WIDTH,400),(WIDTH - 150,400))
janela_jogo.blit(predio_imagem,(0,100)) #blit da imagem do predio
# ----- Gerando saida da pontucao
font = pygame.font.Font(None, 30)
text_pontos = font.render(str(Score), 1, color_dark)
text_pontuacao = font.render('pontuacao :',1,color_dark)
janela_jogo.blit(text_pontos, (600,10))
janela_jogo.blit(text_pontuacao, (450,10))
# ----- Gerando saida da vida
text_life = font.render(str(Lifes),1,color_dark)
text_lifes = font.render('vidas :',1,color_dark)
janela_jogo.blit(text_life, (750,10))
janela_jogo.blit(text_lifes,(650,10))
if Lifes == 0:
game = False
all_sprites.update() #Atualizando posicao das sprites
pygame.display.update() # Mostra o novo frame para o jogador
#===== Tela de Game Over =====
GameOver=False
instrucoes = True
game = True
while (GameOver==False):
for event in pygame.event.get():
# ----- Verifica consequências
if event.type == pygame.QUIT:
GameOver = True
if event.type== pygame.KEYDOWN:
GameOver = True
janela_jogo.fill(color_dark)
GameOver_tela = pygame.image.load('tela_de_inicio.png')
GameOver_tela = pygame.transform.scale(titulo_na_tela, (WIDTH,HEIGHT))
janela_jogo.blit(GameOver_tela,(0,0))
pygame.display.flip()
# ===== Finalização =====
pygame.quit() # Função do PyGame que finaliza os recursos utilizados
|
def test_sample():
print("BUILD")
|
#!/usr/bin/python3
""" Does deployment"""
from fabric.api import *
import os
from datetime import datetime
import tarfile
env.hosts = ["35.237.254.224", "34.73.109.66"]
env.user = "ubuntu"
def deploy():
""" Calls all tasks to deploy archive to webservers"""
tar = do_pack()
if not tar:
return False
return do_deploy(tar)
def do_pack():
""" Creates tar archive"""
savedir = "versions/"
filename = "web_static_" + datetime.now().strftime("%Y%m%d%H%M%S") + ".tgz"
if not os.path.exists(savedir):
os.mkdir(savedir)
with tarfile.open(savedir + filename, "w:gz") as tar:
tar.add("web_static", arcname=os.path.basename("web_static"))
if os.path.exists(savedir + filename):
return savedir + filename
else:
return None
def do_deploy(archive_path):
""" Deploys archive to servers"""
if not os.path.exists(archive_path):
return False
results = []
res = put(archive_path, "/tmp")
results.append(res.succeeded)
basename = os.path.basename(archive_path)
if basename[-4:] == ".tgz":
name = basename[:-4]
newdir = "/data/web_static/releases/" + name
run("mkdir -p " + newdir)
run("tar -xzf /tmp/" + basename + " -C " + newdir)
run("rm /tmp/" + basename)
run("mv " + newdir + "/web_static/* " + newdir)
run("rm -rf " + newdir + "/web_static")
run("rm -rf /data/web_static/current")
run("ln -s " + newdir + " /data/web_static/current")
return True
|
from flask import render_template, redirect, url_for,request
from . import main
from ..models import Sources
from ..request import get_sources, get_articles, topheadlines, everything
@main.route('/')
def index():
'''
message = 'Hello World'
'''
cat_general = get_sources('general')
cat_business = get_sources('business')
cat_entertainment = get_sources('entertainment')
cat_sports = get_sources('sports')
cat_tech = get_sources('technology')
cat_science = get_sources('science')
cat_health = get_sources('health')
title = 'Home | Best News Update Site'
return render_template('index.html',title=title, general=cat_general, business = cat_business, entertainment = cat_entertainment, sports = cat_sports, tech = cat_tech, science = cat_science, health = cat_health)
@main.route('/articles/<source_id>')
def articles(source_id):
'''
Function that returns articles based on their sources
'''
# print(source_id)
news_source = get_articles(source_id)
per_page = 40
news_source = get_articles(source_id, per_page)
title = f'{source_id} | All articles'
return render_template('articles.html', title=title, news=news_source)
@main.route('/topheadlines')
def headlines():
'''
Function that returns top headlines articles
'''
per_page = 40
topheadlines_news = topheadlines(per_page)
title = 'Top Headlines'
return render_template('topheadlines.html', title=title, name='Top Headlines', news=topheadlines_news)
@main.route('/everything')
def all_news():
'''
Function that returns top headlines articles
'''
per_page = 40
everything_news = everything(per_page)
title = 'All News'
return render_template('topheadlines.html', title=title, name='All News', news=everything_news)
|
import flask
import json
import requests
import settings
import scraper
app = flask.Flask(__name__)
@app.route("/")
def route():
# s = scraper.Scraper()
wins = []
# windows = s.scrape()
with open('windows.json', 'r') as f:
windows = json.load(f)
for window in windows:
w = json.loads(window)
wins.append(w)
return flask.render_template('index.html', windows=wins, google_maps_key=settings.GOOGLE_MAPS_KEY)
if __name__ == "__main__":
app.run(debug=True)
|
wort = "unverändertes Wort"
zahl = 10
liste = [10, 20, 25, 35]
# bonus
dicte = {"a": "1"}
inner_print = "Innerhalb der Funktion: "
outer_print = "Außerhalb der Funktion: "
def parameteruebergabe(_):
wort = "verändertes Wort"
print(f"{inner_print}{wort}")
def parameteruebergabeInt(_):
zahl = 5
print(f"{inner_print}{zahl}")
def parameteruebergabeListe(_):
liste.append(10)
print(f"{inner_print}{liste}")
def parameteruebergabeDict(_):
dicte["b"] = "2"
print(f"{inner_print}{dicte.keys()}, {dicte.values()}")
# call by value
parameteruebergabe(wort)
print(f"{outer_print}{wort}")
parameteruebergabeInt(zahl)
print(f"{outer_print}{zahl}")
# call by reference
parameteruebergabeListe(liste)
print(f"{outer_print}{liste}") # nur mutable objects können "call by reference"d werden
parameteruebergabeDict(dicte)
print(f"{outer_print}{dicte.keys()}, {dicte.values()}") # nur mutable objects können "call by reference"d werden
|
import numpy
import struct
import pyaudio
import threading
import struct
from collections import deque
from bibliopixel import LEDMatrix
from bibliopixel.animation import BaseMatrixAnim
import bibliopixel.colors as colors
class Recorder:
"""Simple, cross-platform class to record from the microphone."""
def __init__(self):
"""minimal garb is executed when class is loaded."""
self.RATE=48100
self.BUFFERSIZE=2**12 #4069 is a good buffer size
self.secToRecord=.1
self.threadsDieNow=False
self.newAudio=False
self.maxVals = deque(maxlen=500)
def setup(self):
"""initialize sound card."""
#TODO - windows detection vs. alsa or something for linux
#TODO - try/except for sound card selection/initiation
self.buffersToRecord=int(self.RATE*self.secToRecord/self.BUFFERSIZE)
if self.buffersToRecord==0: self.buffersToRecord=1
self.samplesToRecord=int(self.BUFFERSIZE*self.buffersToRecord)
self.chunksToRecord=int(self.samplesToRecord/self.BUFFERSIZE)
self.secPerPoint=1.0/self.RATE
self.p = pyaudio.PyAudio()
self.inStream = self.p.open(format=pyaudio.paInt16,channels=1,rate=self.RATE,input=True, output=False,frames_per_buffer=self.BUFFERSIZE)
self.xsBuffer=numpy.arange(self.BUFFERSIZE)*self.secPerPoint
self.xs=numpy.arange(self.chunksToRecord*self.BUFFERSIZE)*self.secPerPoint
self.audio=numpy.empty((self.chunksToRecord*self.BUFFERSIZE),dtype=numpy.int16)
def close(self):
"""cleanly back out and release sound card."""
self.p.close(self.inStream)
### RECORDING AUDIO ###
def getAudio(self):
"""get a single buffer size worth of audio."""
audioString=self.inStream.read(self.BUFFERSIZE)
return numpy.fromstring(audioString,dtype=numpy.int16)
def record(self,forever=True):
"""record secToRecord seconds of audio."""
while True:
if self.threadsDieNow: break
for i in range(self.chunksToRecord):
self.audio[i*self.BUFFERSIZE:(i+1)*self.BUFFERSIZE]=self.getAudio()
self.newAudio=True
if forever==False: break
def continuousStart(self):
"""CALL THIS to start running forever."""
self.t = threading.Thread(target=self.record)
self.t.start()
def continuousEnd(self):
"""shut down continuous recording."""
self.threadsDieNow=True
### MATH ###
def fft(self,xMax, yMax):
data=self.audio.flatten()
left,right=numpy.split(numpy.abs(numpy.fft.fft(data)),2)
ys=numpy.add(left,right[::-1])
#FFT max values can vary widely depending on the hardware/audio setup.
#Take the average of the last few values which will keep everything
#in a "normal" range (visually speaking). Also makes it volume independent.
self.maxVals.append(numpy.amax(ys))
ys = ys[:xMax]
m = max(100000, numpy.average(self.maxVals))
ys = numpy.rint(numpy.interp(ys,[0,m],[0,yMax-1]))
return ys
class EQ(BaseMatrixAnim):
def __init__(self, led):
super(EQ, self).__init__(led)
self.rec = Recorder()
self.rec.setup()
self.rec.continuousStart()
self.colors = [colors.hue_helper(y, self.height, 0) for y in range(self.height)]
def endRecord(self):
self.rec.continuousEnd()
def step(self, amt = 1):
self._led.all_off()
eq_data = self.rec.fft(self.width, self.height + 1)
for x in range(self.width):
for y in range(self.height):
if y < int(eq_data[x]):
self._led.set(x, self.height - y - 1, self.colors[y])
# x = 0
# for y in eq_data:
# self._led.drawLine(x, self._led.height - 1, x, self._led.height - int(y), colors.hue_helper(int(y), self._led.height, 0))
# x += 1
self._step = amt
#Load driver for your hardware, visualizer just for example
# from bibliopixel.drivers.visualizer import DriverVisualizer
# driver = DriverVisualizer(width = 24, height = 24, stayTop = True)
# from bibliopixel.drivers.serial_driver import *
# import bibliopixel.gamma as gamma
# num = 24*24
# print "Pixel Count: {}".format(num)
# driver = DriverSerial(LEDTYPE.LPD8806, num, c_order=ChannelOrder.BRG, SPISpeed=2, gamma = gamma.LPD8806)
#
# #load the LEDMatrix class
# from bibliopixel.led import *
# #change rotation and vert_flip as needed by your display
# led = LEDMatrix(driver, rotation = MatrixRotation.ROTATE_0, vert_flip = True)
# led.setMasterBrightness(128)
# import bibliopixel.log as log
# #log.setLogLevel(log.DEBUG)
#
# try:
# anim = EQ(led)
# anim.run(fps=30)
# except KeyboardInterrupt:
# anim.endRecord()
# led.all_off()
# led.update()
MANIFEST = [
{
"class": EQ,
"controller": "matrix",
"desc": "Reads system audio output and displays VU meter.",
"display": "EQ",
"id": "EQ",
"params": [],
"type": "animation"
}
]
|
from queries.base import Base
from queries.message import Message
class QueryTypePagination(Base):
def __init__(self, sdk):
super().__init__(sdk)
self.limit_per_page = 5
@staticmethod
def name():
return 'pagination'
async def create(self, payload, data):
"""
Send a new message
:param payload:
:param data:
:return:
"""
# Prepare Type's data
self.wrapped_data = self.__wrap_data(data)
# Create a new Message
self.message = Message(self.sdk)
self.message.create(self.wrapped_data, self.name())
# Compose text
text = self.__generate_text(self.wrapped_data['data'])
# Create a keyboard
keyboard = self.__generate_keyboard(1, self.wrapped_data['total_pages'])
await self.sdk.send_inline_keyboard_to_chat(
payload["chat"],
text,
keyboard,
parse_mode="HTML",
disable_web_page_preview=True,
want_response=self.message.hash,
bot=payload.get('bot', None)
)
async def process(self, payload, requested_page):
"""
:param payload:
:param requested_page: page number
:return:
"""
# Parse page number
requested_page = int(requested_page)
# Get text to send
text = self.__generate_text(self.message.data['data'], requested_page)
# Generate keyboard
keyboard = self.__generate_keyboard(requested_page, self.message.data['total_pages'])
await self.sdk.send_inline_keyboard_to_chat(
payload["chat"],
text,
keyboard,
parse_mode="HTML",
disable_web_page_preview=True,
update_id=self.message.id,
bot=payload.get('bot', None)
)
def __wrap_data(self, data):
"""
Add additional information from this Type and return wrapped data
:param data:
:return:
"""
return {
"total_pages": self.__count_chunks(data),
"data": data
}
def __count_chunks(self, data):
"""
Count number of chunks (pages)
:param data:
:return number:
"""
# To get result of divison rounded up we can use div for negative number and multiply it to -1 back
return ((-1 * data.__len__()) // self.limit_per_page) * -1
def __generate_text(self, data, page=1):
text = ''
# Count number of list items to skip
skip = (page - 1) * self.limit_per_page
# Compose message
for text_block in data[skip:skip + self.limit_per_page]:
text += text_block
return text
def __generate_keyboard(self, cursor, total, keys_per_row=5):
"""
Generates a keyboard for a current page
:param number cursor: number of current page
:param number total: total number of pages
:param number keys_per_row: number of keys in a row. should be between 5 and 8
:return list:
"""
# Prepare array of buttons
keyboard_row = []
# For one page we no need to add keyboard
if total <= 1:
pass
# No need to add overjump buttons (with arrow)
# If count of pages is not greater that the max number of buttons
#
# For only 3 pages and 5 max buttons
# [ 1 ] [ 2 ] [ •3• ]
# Get pages from
# 1 start of the list
# to
# total + 1 because range() does not get right side of interval
elif total <= keys_per_row:
for i in range(1, total + 1):
keyboard_row.append({
"text": i if i != cursor else "• {} •".format(i),
"callback_data": self.message.wrap_callback_data(i)
})
# We need to add overjumps
else:
# Find a center button
#
# For 5 (odd)
# [ ] [ ] [X] [ ] [ ]
#
# For 6 (even)
# [ ] [ ] [X] [ ] [ ] [ ]
half_of_keys_per_row = keys_per_row // 2 + keys_per_row % 2
# If this page in the start of pages list
#
# If current page is not grater than a half of keys per row
#
# [ 1 ] [ 2 ] [ •3• ] [ 4 ] [ 5 ] [ 16 » ]
if cursor <= half_of_keys_per_row:
# Get pages from
# 1 start of the list
# to
# keys_per_row - 1 max number of buttons minus button with arrow
# + 1 because range() does not get right side of interval
for i in range(1, (keys_per_row - 1) + 1):
keyboard_row.append({
"text": i if i != cursor else "• {} •".format(i),
"callback_data": self.message.wrap_callback_data(i)
})
# Add the last button with arrow for overjumping
#
# ... [ 50 » ]
keyboard_row.append({
"text": "{} »".format(total),
"callback_data": self.message.wrap_callback_data(total)
})
# Check if his page in the end part of the list
#
# If the page number belongs the last half_of_keys_per_row of the list
elif cursor > total - half_of_keys_per_row:
# Add the first button with arrow for overjumping
#
# [ « 1 ] ...
keyboard_row.append({
"text": "« {}".format(1),
"callback_data": self.message.wrap_callback_data(1)
})
# Get pages from
# total - keys_per_row + 1 last keys_per_row elements of the list (count from 1)
# + 1 we have placed for the first button with arrow
# to
# total + 1 because range() does not get right side of interval
#
# For 47th page of 50 with 6 buttons
# [ « 1 ] [ 46 ] [ •47• ] [ 48 ] [ 49 ] [ 50 ]
for i in range((total - keys_per_row + 1) + 1, total + 1):
keyboard_row.append({
"text": i if i != cursor else "• {} •".format(i),
"callback_data": self.message.wrap_callback_data(i)
})
# Okay. Page is not on the sides of pages list
else:
# Add the first button with arrow for overjumping
#
# [ « 1 ] ...
keyboard_row.append({
"text": "« {}".format(1),
"callback_data": self.message.wrap_callback_data(1)
})
# Getting adjacent buttons without arrows from
# cursor current cursor position
# - (keys_per_row - 2) // 2 a half of number of buttons left to add
# + (keys_per_row + 1) % 2 if number of buttons is even then "center" will be one
# of the first half buttons. then we no need one more step
# ... [ _12_ ] [ _13_ ] [ •14• ] [ ... ] [ ... ] ...
# ... [ _12_ ] [ _13_ ] [ •14• ] [ ... ] [ ... ] [ ... ] ...
# to
# cursor current cursor position
# (keys_per_row - 2) // 2 add a half of number of buttons left to add
# + 1 because range() does not get right side of interval
# ... [ ... ] [ ... ] [ •14• ] [ _15_ ] [ _16_ ] ...
# ... [ ... ] [ ... ] [ •13• ] [ _14_ ] [ _15_ ] [ _16_ ] ...
for i in range(cursor - ((keys_per_row - 2) // 2) + (keys_per_row + 1) % 2, cursor + ((keys_per_row - 2) // 2) + 1):
keyboard_row.append({
"text": i if i != cursor else "• {} •".format(i),
"callback_data": self.message.wrap_callback_data(i)
})
# Add the last button with arrow for overjumping
#
# ... [ 50 » ]
keyboard_row.append({
"text": "{} »".format(total),
"callback_data": self.message.wrap_callback_data(total)
})
# Add keyboard_row to keyboard
keyboard = [
keyboard_row
]
return keyboard
|
#coding:utf-8
import tornado.web
import tornado.ioloop
import hashlib
class htmlHandle(tornado.web.RequestHandler):
def get(self):
print('html ----> start 0')
self.render('index.html',list_info = [11,22,33])
settings = {
'template_path': 'template',
'static_path': 'static',
# 'static_url_prefix': '/static/',
}
application=tornado.web.Application(
[
(r'/html',htmlHandle),
],**settings)
if __name__ == '__main__':
application.listen(8001)
tornado.ioloop.IOLoop.instance().start() |
from django.contrib import admin
from explorer.models import Query
from explorer.actions import generate_report_action
class QueryAdmin(admin.ModelAdmin):
list_display = ('title', 'description', 'created_by_user',)
list_filter = ('title',)
raw_id_fields = ('created_by_user',)
actions = [generate_report_action()]
admin.site.register(Query, QueryAdmin)
|
from stemming.porter2 import stem
import sys
import numpy as np
vocab = {}
foobar = [".", ",", "(", ")", '"', "'"]
with open("sentiment.txt") as f:
lines = f.readlines()
for line in lines:
line = line[:-1].lower().strip()
words = line.split(" ")[1:]
words = [word.strip() for word in words]
words = filter(lambda x: x not in foobar, words)
words = list(map(stem, words))
for word in words:
if word in foobar:
continue
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
def build_idx(vocab):
word2idx = {}
count = 0
for k, v in vocab.items():
word2idx[k] = count
count += 1
assert count == len(vocab)
return word2idx
def sentence2features(words, vocab, word2idx):
N = len(vocab)
x = np.zeros(N, dtype=np.int)
for word in words:
idx = word2idx[word]
x[idx] += 1
return x
K = 13
stopwords = sorted(vocab.items(), key=lambda x: x[1])[:: -1][: K]
for k, v in stopwords:
print(k, v)
stopwords_dict = dict(stopwords)
print(len(vocab))
print(vocab.get("like"))
word2idx = build_idx(vocab)
def is_stopword(x, stopwords_dict=stopwords_dict):
if x in stopwords_dict:
return True
return False
def is_not_stopword(x, stopwords_dict=stopwords_dict):
return not is_stopword(x, stopwords_dict)
X = []
Y = []
with open("sentiment.txt") as f:
lines = f.readlines()
for line in lines:
line = line[:-1].lower().strip()
y, words = line.split(" ")[0], line.split(" ")[1:]
y = 0 if (y == "+1") else 1
words = [word.strip() for word in words]
words = filter(lambda x: x not in foobar, words)
words = list(map(stem, words))
words = list(filter(is_not_stopword, words))
x = sentence2features(words, vocab, word2idx)
X.append(x)
Y.append(y)
X = np.array(X)
Y = np.array(Y)
print(X.shape)
print(Y.shape)
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.33, random_state=12345)
model = LogisticRegression(penalty="l2", random_state=12345)
model.fit(X_train, Y_train)
Y_hat = model.predict(X_test)
print(np.mean(Y_hat == Y_test))
from sklearn.metrics import classification_report
print(classification_report(Y_test, Y_hat))
# +1, -1
sentences = [
"the actors are so terrific at conveying their young angst , we do indeed feel for them .",
"a big meal of cliches that the talented cast generally chokes on . "
]
for idx, s in enumerate(sentences):
words = s.lower().strip().split(" ")
words = [word.strip() for word in words]
words = filter(lambda x: x not in foobar, words)
words = list(map(stem, words))
words = list(filter(is_not_stopword, words))
print(idx, words)
fv = np.array(sentence2features(words, vocab, word2idx))[None, :]
p = model.predict_proba(fv)
p = p[0]
y = np.argmax(p)
label = "+1" if y == 0 else "-1"
print("{} {}".format(label, p[y]))
sys.exit(0)
|
import unittest
class MyTestCase(unittest.TestCase):
def test_readme_example1(self):
from rdflib import RDFS, OWL, Namespace
from funowl import OntologyDocument, Ontology
EX = Namespace("http://www.example.com/ontology1#")
o = Ontology("http://www.example.com/ontology1")
o.imports("http://www.example.com/ontology2")
o.annotation(RDFS.label, "An example")
o.subClassOf(EX.Child, OWL.Thing)
doc = OntologyDocument(EX, o)
print(str(doc.to_functional()))
def test_readme_example2(self):
from rdflib import Namespace, XSD, Literal
from funowl import Ontology, DataProperty, Class, DataAllValuesFrom, DataOneOf, SubClassOf, DataSomeValuesFrom, \
ClassAssertion, OntologyDocument
EX = Namespace("http://example.org/")
# Ontology represents the OWLF OntologyDocument production
o = Ontology(EX.myOntology, "http://example.org/myOntolology/version/0.1")
# namedIndividual, objectProperty, class, et. properties add to declarations
o.namedIndividuals(EX.a)
# Declarations can also be added explicitly
o.declarations(DataProperty(EX.dp), Class(EX.A))
# Axioms are added by type
o.subClassOf(EX.A, DataAllValuesFrom(EX.dp, DataOneOf(3, Literal(4, datatype=XSD.int))))
# or as an array
o.axioms.append(SubClassOf(EX.A, DataAllValuesFrom(EX.dp, DataOneOf(Literal(2, datatype=XSD.short),
Literal(3, datatype=XSD.int)))))
o.axioms.append(ClassAssertion(DataSomeValuesFrom(EX.dp, DataOneOf(3)), EX.a))
print(str(OntologyDocument(EX, ontology=o).to_functional()))
if __name__ == '__main__':
unittest.main()
|
import requests
import json
from django.http import HttpResponse
def getEdgeinfo():
name1 = "扎克伯格"
name2 = "文继荣"
url = "http://websensor.playbigdata.com/fss3/service.svc/GetSearchResults"
querystring = {"query": name1 + " " + name2, "num": "5", "start": "1"}
headers = {
'user-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
'upgrade-insecure-requests': "1",
'accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
'accept-encoding': "gzip, deflate",
'accept-language': "zh-CN,zh;q=0.8",
'cache-control': "no-cache",
'postman-token': "5549dc28-2253-f247-d5f9-1f8e87bd830f"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response)
res = json.loads(response.text)
response = HttpResponse(json.dumps(res), content_type="application/json")
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS"
response["Access-Control-Max-Age"] = "1000"
response["Access-Control-Allow-Headers"] = "*"
return response
getEdgeinfo() |
import csv
import sys
import collections
import operator
import datetime
today = datetime.date.today()
sunday = today - datetime.timedelta(today.weekday()+1)
ifile = open(sys.argv[1],"rb")
reader = csv.reader(ifile)
rownum = 0
low = 0
high = 0
datelist = []
datelistDict = {}
datelistDict['Monday'] = 0
datelistDict['Tuesday'] = 0
datelistDict['Wednesday'] = 0
datelistDict['Thursday'] = 0
datelistDict['Friday'] = 0
datelistDict['Saturday'] = 0
datelistDict['Sunday'] = 0
for row in reader:
if rownum == 0:
header = row
else:
colnum = 0
for col in row:
if header[colnum] == "Created Date":
datelistDict[str(datetime.datetime.strptime(col,'%m/%d/%Y %I:%M:%S %p').strftime('%A'))]+=1
#low = min(col,low)
#high = max(col,high)
#datelist.append(date_object)
colnum+=1
rownum+=1
#print datelist
#counter = collections.Counter(datelist)
#print counter
#counter = sorted(counter, key = operator.itemgetter(1),reverse = True)
#ans1 = counter.keys()
#ans2 = counter.values()
#print datelistDict
d = {name:val for val, name in enumerate(datelistDict)}
n = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']
d = sorted(n, key=d.get)
#print d
for i,j in datelistDict.iteritems():
datelist.append([i,j])
#print datelist
datelist1=[]
for i in datelist:
if i[0] == 'Monday':
datelist1.append([0,i[1]])
elif i[0] == 'Tuesday':
datelist1.append([1,i[1]])
elif i[0] == 'Wednesday':
datelist1.append([2,i[1]])
elif i[0] == 'Thursday':
datelist1.append([3,i[1]])
elif i[0] == 'Friday':
datelist1.append([4,i[1]])
elif i[0] == 'Saturday':
datelist1.append([5,i[1]])
elif i[0] == 'Sunday':
datelist1.append([6,i[1]])
for i in xrange(7):
print n[i],"==",datelist1[i][1]
#print rownum-1, "complaints between",low, "and", high
ifile.close()
|
'''
Created on Dec 10, 2013
@author: Raul
'''
class UserType():
'''
User Type
'''
visitor="Visitor"
student="Student"
tutor="Tutor"
admin="Admin"
Value={0:"Visitor",1:"Student",2:"Tutor",3:"Admin"}
#----------------------------------------------------------------- Visitor=0
#----------------------------------------------------------------- Student=1
#------------------------------------------------------------------- Tutor=2
#------------------------------------------------------------------- Admin=3
|
"""
Code by Matteo Zanotto and Riccardo Volpi on top of
CRBM code (by Graham Taylor).
Theano CRBM implementation.
For details, see:
http://www.uoguelph.ca/~gwtaylor/publications/nips2006mhmublv
Sample data:
http://www.uoguelph.ca/~gwtaylor/publications/nips2006mhmublv/motion.mat
"""
import numpy
import numpy as np
import matplotlib.pyplot as plt
import time
import os
import sys
import tables
import zmq
import cPickle
import pickle
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from numpy.random import RandomState
from scipy.io import loadmat, savemat
from ConfigParser import *
from datetime import datetime
from reader import FileHandlerPyTable
from DataPreproc import DataPreproc
# code adapted fomr original script by Graham Taylor
class condRBM(object):
def __init__(self, refDir, expConfigFilename, modelConfigFilename, gpuId):
self.refDir = refDir
self.expConfigFilename = refDir + '/' + expConfigFilename
self.modelConfigFilename = refDir + '/' + modelConfigFilename
self.dpp = DataPreproc()
self.loadExpConfig()
self.loadModelConfig()
self.gpuId = int(gpuId)
def loadExpConfig(self):
config = ConfigParser()
config.read(self.expConfigFilename)
self.npRandState = config.getint('PARAMETERS','npRandState')
self.dataDir = config.get('EXP_DETAILS','dataDir')
self.phase = config.get('EXP_DETAILS','phase')
#~ self.seqId = config.getint('EXP_DETAILS','seqId')
seqList = config.get('EXP_DETAILS','seqId')
self.seqId = [int(k) for k in seqList.split(',')]
self.hdf5File = config.get('EXP_DETAILS','hdf5File')
self.npyDataFile = config.get('EXP_DETAILS','npyDataFile')
self.modelFile = config.get('EXP_DETAILS','modelFile')
self.pcaFile = config.get('EXP_DETAILS','pcaFile')
patchList = config.get('EXP_DETAILS','patch')
self.patch = [str(k) for k in patchList.split(',')]
self.minmaxFile = config.get('EXP_DETAILS','minmaxFile')
self.electrodeFlag = config.getboolean('EXP_DETAILS','electrodeFlag')
self.subsetFlag = config.getboolean('EXP_DETAILS','subsetFlag')
self.subsetCycles = config.getint('EXP_DETAILS','subsetCycles')
self.doPCA = config.getboolean('EXP_DETAILS','doPCA')
self.whitenFlag = config.getboolean('EXP_DETAILS','whitenFlag')
self.logFlag = config.getboolean('EXP_DETAILS','logFlag')
self.subUnitsFlag = config.getboolean('EXP_DETAILS','subUnitsFlag')
self.epBin = config.getint('EXP_DETAILS','epBin')
self.scaling = config.get('EXP_DETAILS','scaling')
if self.electrodeFlag:
tsDir = '/timestampsElectrode/'
lgcpDir = '/lgcpElectrode/'
else:
tsDir = '/timestamps/'
lgcpDir = '/lgcp/'
self.dataFilename = self.dataDir + self.phase + lgcpDir + 'ep' +str(self.epBin) +'/npy_data/'
def loadModelConfig(self):
config = ConfigParser()
config.read(self.modelConfigFilename)
self.training_epochs = config.getint('MAIN_PARAMETER_SETTING','training_epochs')
self.batch_size = config.getint('MAIN_PARAMETER_SETTING','batch_size')
self.k = config.getint('MAIN_PARAMETER_SETTING','k') # steps in CDk
self.n_hidden = config.getint('MODEL_PARAMETER_SETTING','n_hidden')
self.delay = config.getint('MODEL_PARAMETER_SETTING','delay')
self.learning_rate = config.getfloat('MODEL_PARAMETER_SETTING','learning_rate')
self.n_gibbs_generate = config.getint('MODEL_PARAMETER_SETTING','n_gibbs_generate') # Gibbs iterations when generating predictions
self.delta = config.getint('MODEL_PARAMETER_SETTING','delta')
def initialiseParams(self, input=None, input_history=None,
A=None, B=None, W=None, hbias=None,
vbias=None, numpy_rng=None,
theano_rng=None):
"""
:param input: None for standalone RBMs or symbolic variable if RBM is
part of a larger graph.
:param A: None for standalone CRBMs or symbolic variable pointing to a
shared weight matrix in case CRBM is part of a CDBN network; in a CDBN,
the weights are shared between CRBMs and layers of a MLP
:param B: None for standalone CRBMs or symbolic variable pointing to a
shared weight matrix in case CRBM is part of a CDBN network; in a CDBN,
the weights are shared between CRBMs and layers of a MLP
:param W: None for standalone CRBMs or symbolic variable pointing to a
shared weight matrix in case CRBM is part of a CDBN network; in a CDBN,
the weights are shared between CRBMs and layers of a MLP
:param hbias: None for standalone CRBMs or symbolic variable pointing
to a shared hidden units bias vector in case CRBM is part of a
different network
:param vbias: None for standalone RBMs or a symbolic variable
pointing to a shared visible units bias
"""
n_visible = self.d.shape[1]
self.n_visible = n_visible
# To use GPU.
import theano.sandbox.cuda
theano.sandbox.cuda.use('gpu'+str(self.gpuId))
theano.config.floatX = 'float32'
if numpy_rng is None:
# create a number generator
numpy_rng = numpy.random.RandomState(self.npRandState)
if theano_rng is None:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
if W is None:
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
initial_W = np.asarray(0.01 * numpy_rng.randn(n_visible,
self.n_hidden),
dtype=theano.config.floatX)
# theano shared variables for weights and biases
W = theano.shared(value=initial_W, name='W')
if A is None:
initial_A = np.asarray(0.01 * numpy_rng.randn(n_visible * self.delay,
n_visible),
dtype=theano.config.floatX)
# theano shared variables for weights and biases
A = theano.shared(value=initial_A, name='A')
if B is None:
initial_B = np.asarray(0.01 * numpy_rng.randn(n_visible * self.delay,
self.n_hidden),
dtype=theano.config.floatX)
# theano shared variables for weights and biases
B = theano.shared(value=initial_B, name='B')
if hbias is None:
# create shared variable for hidden units bias
hbias = theano.shared(value=numpy.zeros(self.n_hidden,
dtype=theano.config.floatX), name='hbias')
if vbias is None:
# create shared variable for visible units bias
vbias = theano.shared(value=numpy.zeros(n_visible,
dtype=theano.config.floatX), name='vbias')
# initialize input layer for standalone CRBM or layer0 of CDBN
self.input = input
if not input:
self.input = T.matrix('input')
self.input_history = input_history
if not input_history:
self.input_history = T.matrix('input_history')
self.W = W
self.A = A
self.B = B
self.hbias = hbias
self.vbias = vbias
self.numpy_rng = numpy_rng
self.theano_rng = theano_rng
# **** WARNING: It is not a good idea to put things in this list
# other than shared variables created in this function.
self.params = [self.W, self.A, self.B, self.hbias, self.vbias]
def free_energy(self, v_sample, v_history):
''' Function to compute the free energy of a sample conditional
on the history '''
wx_b = T.dot(v_sample, self.W) + T.dot(v_history, self.B) + self.hbias
ax_b = T.dot(v_history, self.A) + self.vbias
visible_term = T.sum(0.5 * T.sqr(v_sample - ax_b), axis=1)
hidden_term = T.sum(T.log(1 + T.exp(wx_b)), axis=1)
return visible_term - hidden_term
def propup(self, vis, v_history):
''' This function propagates the visible units activation upwards to
the hidden units
Note that we return also the pre-sigmoid activation of the layer. As
it will turn out later, due to how Theano deals with optimizations,
this symbolic variable will be needed to write down a more
stable computational graph (see details in the reconstruction cost
function)
'''
pre_sigmoid_activation = T.dot(vis, self.W) + \
T.dot(v_history, self.B) + self.hbias
return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
def sample_h_given_v(self, v0_sample, v_history):
''' This function infers state of hidden units given visible units '''
# compute the activation of the hidden units given a sample of the
# visibles
#pre_sigmoid_h1, h1_mean = self.propup(v0_sample)
pre_sigmoid_h1, h1_mean = self.propup(v0_sample, v_history)
# get a sample of the hiddens given their activation
# Note that theano_rng.binomial returns a symbolic sample of dtype
# int64 by default. If we want to keep our computations in floatX
# for the GPU we need to specify to return the dtype floatX
h1_sample = self.theano_rng.binomial(size=h1_mean.shape, n=1,
p=h1_mean,
dtype=theano.config.floatX)
return [pre_sigmoid_h1, h1_mean, h1_sample]
def propdown(self, hid, v_history):
'''This function propagates the hidden units activation downwards to
the visible units
Note that we return also the pre_sigmoid_activation of the layer. As
it will turn out later, due to how Theano deals with optimizations,
this symbolic variable will be needed to write down a more
stable computational graph (see details in the reconstruction cost
function)
'''
mean_activation = T.dot(hid, self.W.T) + T.dot(v_history, self.A) + \
self.vbias
return mean_activation
def sample_v_given_h(self, h0_sample, v_history):
''' This function infers state of visible units given hidden units '''
# compute the activation of the visible given the hidden sample
#pre_sigmoid_v1, v1_mean = self.propdown(h0_sample)
v1_mean = self.propdown(h0_sample, v_history)
# get a sample of the visible given their activation
# Note that theano_rng.binomial returns a symbolic sample of dtype
# int64 by default. If we want to keep our computations in floatX
# for the GPU we need to specify to return the dtype floatX
#v1_sample = self.theano_rng.binomial(size=v1_mean.shape,
# n=1, p=v1_mean,
# dtype = theano.config.floatX)
v1_sample = v1_mean # mean-field
return [v1_mean, v1_sample]
def gibbs_hvh(self, h0_sample, v_history):
''' This function implements one step of Gibbs sampling,
starting from the hidden state'''
v1_mean, v1_sample = self.sample_v_given_h(h0_sample, v_history)
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v1_sample,
v_history)
return [v1_mean, v1_sample, pre_sigmoid_h1, h1_mean, h1_sample]
def gibbs_vhv(self, v0_sample, v_history):
''' This function implements one step of Gibbs sampling,
starting from the visible state'''
#pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v0_sample)
#pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h1_sample)
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v0_sample,
v_history)
v1_mean, v1_sample = self.sample_v_given_h(h1_sample, v_history)
return [pre_sigmoid_h1, h1_mean, h1_sample, v1_mean, v1_sample]
def get_cost_updates(self):
"""
This functions implements one step of CD-k
:param lr: learning rate used to train the RBM
:param persistent: None for CD
:param k: number of Gibbs steps to do in CD-k
Returns a proxy for the cost and the updates dictionary. The
dictionary contains the update rules for weights and biases but
also an update of the shared variable used to store the persistent
chain, if one is used.
"""
lr=self.learning_rate
k=self.k
# compute positive phase
pre_sigmoid_ph, ph_mean, ph_sample = \
self.sample_h_given_v(self.input, self.input_history)
# for CD, we use the newly generate hidden sample
chain_start = ph_sample
# perform actual negative phase
# in order to implement CD-k we need to scan over the
# function that implements one gibbs step k times.
# Read Theano tutorial on scan for more information :
# http://deeplearning.net/software/theano/library/scan.html
# the scan will return the entire Gibbs chain
# updates dictionary is important because it contains the updates
# for the random number generator
[nv_means, nv_samples, pre_sigmoid_nhs, nh_means,
nh_samples], updates = theano.scan(self.gibbs_hvh,
# the None are place holders, saying that
# chain_start is the initial state corresponding to the
# 5th output
outputs_info=[None, None, None, None, chain_start],
non_sequences=self.input_history,
n_steps=k)
# determine gradients on CRBM parameters
# not that we only need the sample at the end of the chain
chain_end = nv_samples[-1]
cost = T.mean(self.free_energy(self.input, self.input_history)) - \
T.mean(self.free_energy(chain_end, self.input_history))
# We must not compute the gradient through the gibbs sampling
gparams = T.grad(cost, self.params, consider_constant=[chain_end])
# constructs the update dictionary
for gparam, param in zip(gparams, self.params):
# make sure that the learning rate is of the right dtype
if param == self.A:
# slow down autoregressive updates
updates[param] = param - gparam * 0.01 * \
T.cast(lr, dtype=theano.config.floatX)
else:
updates[param] = param - gparam * \
T.cast(lr, dtype=theano.config.floatX)
# reconstruction error is a better proxy for CD
monitoring_cost = self.get_reconstruction_cost(updates, nv_means[-1])
return monitoring_cost, updates
def get_reconstruction_cost(self, updates, pre_sigmoid_nv):
"""Approximation to the reconstruction error
"""
# sum over dimensions, mean over cases
recon = T.mean(T.sum(T.sqr(self.input - pre_sigmoid_nv), axis=1))
return recon
def generate(self, orig_data, orig_history, n_samples):
""" Given initialization(s) of visibles and matching history, generate
n_samples in future.
orig_data : n_seq by n_visibles array
initialization for first frame
orig_history : n_seq by delay * n_visibles array
delay-step history
n_samples : int
number of samples to generate forward
n_gibbs : int
number of alternating Gibbs steps per iteration"""
n_gibbs=self.n_gibbs_generate
n_seq = orig_data.shape[0]
persistent_vis_chain = theano.shared(orig_data)
persistent_history = theano.shared(orig_history)
#persistent_history = T.matrix('persistent_history')
[presig_hids, hid_mfs, hid_samples, vis_mfs, vis_samples], updates = \
theano.scan(crbm.gibbs_vhv,
outputs_info=[None, None, None, None,
persistent_vis_chain],
non_sequences=persistent_history,
n_steps=n_gibbs)
# add to updates the shared variable that takes care of our persistent
# chain
# initialize next visible with current visible
# shift the history one step forward
updates.update({persistent_vis_chain: vis_samples[-1],
persistent_history: T.concatenate(
(vis_samples[-1],
persistent_history[:, :(self.delay - 1) * \
self.n_visible],
), axis=1)})
# construct the function that implements our persistent chain.
# we generate the "mean field" activations for plotting and the actual
# samples for reinitializing the state of our persistent chain
sample_fn = theano.function([], [vis_mfs[-1], vis_samples[-1]],
updates=updates,
name='sample_fn')
#vis_mf, vis_sample = sample_fn()
#print orig_data[:,1:5]
#print vis_mf[:,1:5]
generated_series = np.empty((n_seq, n_samples, self.n_visible))
for t in xrange(n_samples):
print "Generating frame %d" % t
vis_mf, vis_sample = sample_fn()
generated_series[:, t, :] = vis_mf
return generated_series
def __getstate__(self):
state = dict(self.__dict__)
#np.save('input.npy', self.input)
#np.save('input_history.npy', self.input_history)
del state['input']
del state['input_history']
return state
def loadData(self):
# loading spiking rate data
# deals with the old data format (Numpy)
if 'npz' in self.dataFilename:
with np.load(self.dataFilename) as dataFile:
self.d = dataFile['d'].astype(np.float32)
if self.dataFilename[-9:-4] == 'elect':
self.obsKeys = dataFile['electIdx']
else:
self.obsKeys = dataFile['neuronIdx']
# deals with the new data format (HDF5)
else:
## dataFile = tables.open_file(self.dataFilename,'r')
## self.d = dataFile.root.d[:].astype(np.float32)
## self.obsKeys = dataFile.root.neuronIdx[:]
## dataFile.close()
dataDir = self.dataFilename
seqNumbers = self.seqId
allData = dict()
self.patchNoNeurons = dict()
self.neuronsIdx = np.array([])
for pth in self.patch:
print pth
self.seqs = []
data = []
for seqNum in seqNumbers:
if self.patch == 'whole':
dataFile = tables.open_file(dataDir+'seq'+str(seqNum)+'_th50_ClassStimconsistent.hdf5','r')
else:
dataFile = tables.open_file(dataDir+'seq'+str(seqNum)+'_'+str(pth)+'_th50_ClassStimconsistent.hdf5','r')
d_tmp = dataFile.root.d[:].astype(np.float32)
d_tmp = d_tmp.transpose()
data.append(d_tmp)
self.seqs.append(d_tmp.shape[0])
self.obsKeys = dataFile.root.neuronIdx[:]
dataFile.close()
self.neuronsIdx = np.hstack((self.neuronsIdx,self.obsKeys))
del d_tmp
data = np.array(data)
self.d = data[0]
print self.d.shape
if data.shape[0] > 1:
for i in range(1,data.shape[0]):
self.d = np.vstack((self.d,data[i]))
allData[str(pth)] = self.d
self.patchNoNeurons[str(pth)] = len(self.d)
del data
self.d = 0
print 'Loaded.'
self.d = allData[self.patch[0]]
del allData[self.patch[0]]
print self.patch
print dir()
for i in self.patch[1:]:
print i
self.d = np.hstack((self.d,allData[i]))
del allData[i]
print self.d.shape
print 'Ready to train'
# if just a part of the experiment is going to be used (subsetFlag id True)
# find how much data needs to be dropped
if self.subsetFlag:
fh = FileHandlerPyTable(self.dataDir+'hdf5Data/' + self.hdf5File)
fh.openAndLoadFile()
interestFrames = fh.getSubsetFrames(self.seqId, self.subsetCycles)
cutoff = interestFrames[1] - interestFrames[0] - 1
self.d = self.d[:cutoff,:]
fh.close()
def train(self):
context = zmq.Context()
socket = context.socket(zmq.DEALER)
socket.connect('tcp://localhost:'+str(556)+str(self.gpuId))
contextSt = zmq.Context()
socketSt = context.socket(zmq.REQ)
socketSt.connect ('tcp://localhost:'+str(559)+str(self.gpuId))
# perform training following what was done in the original code
rng = self.numpy_rng
theano_rng = self.theano_rng
# batchdata is returned as theano shared variable floatX
# batchdata, seqlen, data_mean, data_std = load_data(dataset)
data_mean = self.d.mean(axis=0)
data_std = self.d.std(axis=0)
seqlen = np.array([self.d.shape[0]])
batchdata = theano.shared(np.asarray(self.d, dtype=theano.config.floatX))
# valid starting indices
batchdataindex = []
last = 0
# seqs: durata di ogni sequenza.
for s in self.seqs:
batchdataindex += range(last + self.delay*self.delta, last + s)
last = last + s
permindex = np.array(batchdataindex)
# Con delay elevati bisogna usare questo.
n_train_batches = permindex.shape[0]/self.batch_size
n_dim = batchdata.get_value(borrow=True).shape[1]
# Li uso solo nel codice per calcolare features spazio-temporali.
self.dataIdx = permindex
self.histIdx = np.array([self.dataIdx - n*self.delta for n in xrange(1, self.delay + 1)]).T
f = file(self.refDir + '/' +'crbm_idx.pkl', 'wb')
cPickle.dump([self.dataIdx,self.histIdx], f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
rng.shuffle(permindex)
# allocate symbolic variables for the data
index = T.lvector() # index to a [mini]batch
index_hist = T.lvector() # index to history
x = T.matrix('x') # the data
x_history = T.matrix('x_history')
#theano.config.compute_test_value='warn'
#x.tag.test_value = np.random.randn(batch_size, n_dim)
#x_history.tag.test_value = np.random.randn(batch_size, n_dim*delay)
# initialize storage for the persistent chain
# (state = hidden layer of chain)
self.input = x
self.input_history = x_history
self.n_visible = n_dim
self.numpy_rng=rng
self.theano_rng=theano_rng
# get the cost and the gradient corresponding to one step of CD-15
cost, updates = self.get_cost_updates()
#################################
# Training the CRBM #
#################################
# the purpose of train_crbm is solely to update the CRBM parameters
train_crbm = theano.function([index, index_hist], cost,
updates=updates,
givens={x: batchdata[index], \
x_history: batchdata[index_hist].reshape((
self.batch_size, self.delay * n_dim))},
name='train_crbm')
plotting_time = 0.
start_time = time.clock()
print('Starting learning loop')
# go through training epochs
for epoch in xrange(self.training_epochs):
# go through the training set
mean_cost = []
for batch_index in xrange(n_train_batches):
#~ print str(epoch) + ' - ' + str(batch_index) + '/' + str(n_train_batches)
# indexing is slightly complicated
# build a linear index to the starting frames for this batch
# (i.e. time t) gives a batch_size length array for data
data_idx = permindex[batch_index * self.batch_size:(batch_index + 1) \
* self.batch_size]
# now build a linear index to the frames at each delay tap
# (i.e. time t-1 to t-delay)
# gives a batch_size x delay array of indices for history
hist_idx = np.array([data_idx - n*self.delta for n in xrange(1, self.delay + 1)]).T
#~ print '===' + str(batch_index * self.batch_size) + ':' + str((batch_index + 1)*self.batch_size) + '==='
#~ print '[' + str(batch_index) + ']' + str(data_idx.shape) + ' - ' + str(hist_idx.shape)
this_cost = train_crbm(data_idx, hist_idx.ravel())
#print batch_index, this_cost
mean_cost += [this_cost]
# stop the learning if the "stop_now" file has been created
# in the experiment folder
if os.path.isfile(self.refDir + '/' + 'stop_now'):
break
# backup data every three epochs
if epoch % 5 == 0:
self.epoch = epoch
f = file(self.refDir + '/' +'crbm.pkl', 'wb')
pickle.dump(self, f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
socket.send_string(self.refDir)
#~ if epoch == 300:
#~ print 'Sending...'
#~ socketSt.send('OK')
print 'Training epoch %d, cost is ' % epoch, numpy.mean(mean_cost)
end_time = time.clock()
pretraining_time = (end_time - start_time)
with open(self.refDir + '/done', 'w') as doneFile:
doneFile.write(datetime.strftime(datetime.now(), '%d/%m/%Y %H:%M:%S'))
#~ np.save(self.refDir + '/' + 'self_d',self.d)
#~ del self.d
f = file(self.refDir + '/' +'crbm.pkl', 'wb')
pickle.dump(self, f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
print ('Training took %f minutes' % (pretraining_time / 60.))
|
from cnn.lstm import add
import pytest
@pytest.fixture
def test_init():
# assert add(2, 3) == 5
print('init test mock')
def test_add():
assert add(2, 10) == 12
|
# -*- coding: utf-8 -*-
from django.apps import AppConfig as BaseAppConfig
from django.utils.translation import ugettext_lazy as _
class AppConfig(BaseAppConfig):
name = 'knowledge'
verbose_name = _('База знаний')
def ready(self):
from knowledge import signals # flake8: NOQA
|
from datetime import date
from onegov.ballot import Election
from onegov.ballot import ElectionCompound
from onegov.ballot import Vote
from onegov.core.utils import Bunch
from onegov.election_day.models import Notification
from onegov.election_day.models import WebsocketNotification
from onegov.election_day.utils import get_last_notified
from onegov.election_day.utils import get_parameter
from onegov.election_day.utils import replace_url
from pytest import raises
def test_get_last_notified(session):
vote = Vote(
title="Vote",
domain='federation',
date=date(2011, 1, 1),
)
election = Election(
title="election",
domain='region',
date=date(2011, 1, 1),
)
compound = ElectionCompound(
title="Elections",
domain='canton',
date=date(2011, 1, 1),
)
for model in (vote, election, compound):
session.add(model)
session.flush()
assert get_last_notified(model) is None
notification = WebsocketNotification()
notification.update_from_model(model)
session.add(notification)
session.flush()
last_notified = get_last_notified(model)
assert last_notified is not None
notification = Notification()
notification.update_from_model(model)
session.add(notification)
session.flush()
assert get_last_notified(model) == last_notified
notification = WebsocketNotification()
notification.update_from_model(model)
session.add(notification)
session.flush()
assert get_last_notified(model) > last_notified
def test_get_param():
with raises(NotImplementedError):
get_parameter(Bunch(params={}), 'name', float, None)
with raises(NotImplementedError):
get_parameter(Bunch(params={}), 'name', tuple, None)
with raises(NotImplementedError):
get_parameter(Bunch(params={}), 'name', dict, None)
assert get_parameter(Bunch(params={}), 'name', int, None) is None
assert get_parameter(Bunch(params={}), 'name', int, 10) == 10
assert get_parameter(Bunch(params={'name': ''}), 'name', int, 10) == 10
assert get_parameter(Bunch(params={'name': 5}), 'name', int, 10) == 5
assert get_parameter(Bunch(params={'name': '5'}), 'name', int, 10) == 5
assert get_parameter(Bunch(params={'name': ' 5 '}), 'name', int, 10) == 5
assert get_parameter(Bunch(params={}), 'name', list, None) is None
assert get_parameter(Bunch(params={}), 'name', list, [1, 2]) == [1, 2]
assert get_parameter(Bunch(params={'name': ''}), 'name', list, [1, 2]) \
== [1, 2]
assert get_parameter(Bunch(params={'name': 'a,b'}), 'name', list, None) \
== ['a', 'b']
assert get_parameter(Bunch(params={'name': ' a, b '}), 'name', list, 1) \
== ['a', 'b']
assert get_parameter(Bunch(params={}), 'name', bool, None) is None
assert get_parameter(Bunch(params={}), 'name', bool, False) is False
assert get_parameter(Bunch(params={'name': ''}), 'name', bool, None) \
is None
assert get_parameter(Bunch(params={'name': '1'}), 'name', bool, None) \
is True
assert get_parameter(Bunch(params={'name': 'True'}), 'name', bool, None) \
is True
assert get_parameter(Bunch(params={'name': 'trUe'}), 'name', bool, None) \
is True
assert get_parameter(Bunch(params={'name': ' 1 '}), 'name', bool, None) \
is True
def test_replace_url():
assert replace_url(None, None) is None
assert replace_url(None, '') is None
assert replace_url('', '') == ''
assert replace_url('', None) == ''
assert replace_url('', 'https://b.y') == 'https://b.y'
assert replace_url('http://a.x', 'https://b.y') == 'https://b.y'
assert replace_url('http://a.x', 'https://') == 'https://a.x'
assert replace_url('http://a.x/m', 'https://b.y') == 'https://b.y/m'
|
#!/usr/bin/env python3
langs = {"Perl", "Python", "Java", "Go", "C++", "Rust"}
for l in langs:
print(l)
|
#!/usr/bin/python
from lwr_incr2 import *
#from lwr_incr3 import *
def ToStr(*lists):
s= ''
delim= ''
for v in lists:
s+= delim+' '.join(map(str,list(v)))
delim= ' '
return s
def ToList(x):
if x==None: return []
elif isinstance(x,list): return x
elif isinstance(x,(np.ndarray,np.matrix)):
if len(x.shape)==1: return x.tolist()
if len(x.shape)==2:
if x.shape[0]==1: return x.tolist()[0]
if x.shape[1]==1: return x.T.tolist()[0]
print 'ToList: x=',x
raise Exception('ToList: Impossible to serialize:',x)
def Median(array):
if len(array)==0: return None
a_sorted= copy.deepcopy(array)
a_sorted.sort()
return a_sorted[len(a_sorted)/2]
import math
#TrueFunc= lambda x: 1.2+math.sin(2.0*(x[0]+x[1]))
TrueFunc= lambda x: 0.1*(x[0]*x[0]+x[1]*x[1])
#TrueFunc= lambda x: 4.0-x[0]*x[1]
#TrueFunc= lambda x: 4.0-x[0]-x[1] if x[0]**2+x[1]**2<2.0 else 0.0
def GenData(n=100, noise=0.3):
#data_x= [[x+1.0*Rand()] for x in FRange1(-3.0,5.0,n)]
data_x= [[Rand(-3.0,3.0), Rand(-3.0,3.0)] for k in range(n)]
data_y= [[TrueFunc(x)+noise*Rand()] for x in data_x]
return data_x, data_y
def Main():
#def PrintEq(s): print '%s= %r' % (s, eval(s))
data_x, data_y = GenData(20, noise=0.0) #TEST: n samples, noise
model= TLWR()
#model.Init(c_min=0.6, f_reg=0.00001)
#model.Init(c_min=0.3, f_reg=0.001)
model.Init(c_min=0.01, f_reg=0.001)
#model.Init(c_min=0.002, f_reg=0.001)
#model.Init(c_min=0.0001, f_reg=0.0000001)
for x,y in zip(data_x, data_y):
model.Update(x,y)
#model.C= [0.01]*len(model.C)
#model.C= model.AutoWidth(model.CMin)
nt= 25
N_test= nt*nt
x_test= np.array(sum([[[x1,x2] for x2 in FRange1(-3.0,3.0,nt)] for x1 in FRange1(-3.0,3.0,nt)],[])).astype(np.float32)
y_test= np.array([[TrueFunc(x)] for x in x_test]).astype(np.float32)
# Dump data for plot:
fp1= file('/tmp/smpl_train2.dat','w')
for x,y in zip(data_x,data_y):
fp1.write('%s #%i# %s\n' % (' '.join(map(str,x)),len(x)+1,' '.join(map(str,y))))
fp1.close()
# Dump data for plot:
fp1= file('/tmp/smpl_test2.dat','w')
for x,y,i in zip(x_test,y_test,range(len(y_test))):
if i%(nt+1)==0: fp1.write('\n')
fp1.write('%s #%i# %s\n' % (' '.join(map(str,x)),len(x)+1,' '.join(map(str,y))))
fp1.close()
pred= [[model.Predict(x).Y[0,0]] for x in x_test]
fp1= file('/tmp/lwr_est.dat','w')
for x,y,i in zip(x_test,pred,range(len(pred))):
if i%(nt+1)==0: fp1.write('\n')
fp1.write('%s #%i# %s\n' % (' '.join(map(str,x)),len(x)+1,' '.join(map(str,y))))
fp1.close()
def PlotGraphs():
print 'Plotting graphs..'
import os,sys
opt= sys.argv[2:]
commands=[
'''qplot -x2 aaa -3d {opt} -s 'set xlabel "x";set ylabel "y";set ticslevel 0;'
-cs 'u 1:2:4' /tmp/smpl_train2.dat pt 6 ps 2 t '"sample"'
/tmp/smpl_test2.dat w l lw 3 t '"true"'
/tmp/lwr_est.dat w l t '"LWR"' &''',
'''''',
'''''',
]
for cmd in commands:
if cmd!='':
cmd= ' '.join(cmd.format(opt=' '.join(opt)).splitlines())
print '###',cmd
os.system(cmd)
print '##########################'
print '###Press enter to close###'
print '##########################'
raw_input()
os.system('qplot -x2kill aaa')
if __name__=='__main__':
import sys
if len(sys.argv)>1 and sys.argv[1] in ('p','plot','Plot','PLOT'):
PlotGraphs()
sys.exit(0)
Main()
|
"""
VMWare Backup Internals
Some References that might of been used (or not)
VMWare Command Line
https://www.vmware.com/support/ws5/doc/ws_learning_cli_vmrun.html
Linux Scheduler:
http://stackoverflow.com/questions/1603109/how-to-make-a-python-script-run-like-a-service-or-daemon-in-linux
http://unix.stackexchange.com/questions/69098/how-can-i-schedule-a-python-program-to-run-from-another-python-program
Windows Scheduler (not supported):
http://stackoverflow.com/questions/2725754/schedule-python-script-windows-7
Linux Daemon (not needed?):
http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
"""
### EXTERNAL INCLUDES ###
from .default_settings import DEFAULT_SETTINGS, FOLDER_TS_FORMAT, MUTLIPLE_TAPE_SYSTEM
from .virtual_machine import VirtualMachine, execute_backup
from .cron import enable_backup, disable_backup
### CONSTANTS ###
## Meta Data##
__version__ = '1.02.13'
__author__ = 'Kirill V. Belyayev'
__license__ = 'MIT'
|
from ED6ScenarioHelper import *
def main():
# 封印区域 第四层
CreateScenaFile(
FileName = 'C4311 ._SN',
MapName = 'Grancel',
Location = 'C4311.x',
MapIndex = 1,
MapDefaultBGM = "ed60035",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT09/CH11090 ._CH', # 00
'ED6_DT09/CH11091 ._CH', # 01
'ED6_DT09/CH11100 ._CH', # 02
'ED6_DT09/CH11101 ._CH', # 03
'ED6_DT09/CH10920 ._CH', # 04
'ED6_DT09/CH10921 ._CH', # 05
'ED6_DT09/CH10940 ._CH', # 06
'ED6_DT09/CH10941 ._CH', # 07
'ED6_DT09/CH10950 ._CH', # 08
'ED6_DT09/CH10951 ._CH', # 09
'ED6_DT09/CH10990 ._CH', # 0A
'ED6_DT09/CH10991 ._CH', # 0B
)
AddCharChipPat(
'ED6_DT09/CH11090P._CP', # 00
'ED6_DT09/CH11091P._CP', # 01
'ED6_DT09/CH11100P._CP', # 02
'ED6_DT09/CH11101P._CP', # 03
'ED6_DT09/CH10920P._CP', # 04
'ED6_DT09/CH10921P._CP', # 05
'ED6_DT09/CH10940P._CP', # 06
'ED6_DT09/CH10941P._CP', # 07
'ED6_DT09/CH10950P._CP', # 08
'ED6_DT09/CH10951P._CP', # 09
'ED6_DT09/CH10990P._CP', # 0A
'ED6_DT09/CH10991P._CP', # 0B
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 10,
ChipIndex = 0xA,
NpcIndex = 0x1C5,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 8,
ChipIndex = 0x8,
NpcIndex = 0x1C5,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclMonster(
X = -205700,
Z = 0,
Y = -291370,
Unknown_0C = 180,
Unknown_0E = 0,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x291,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -110910,
Z = -4000,
Y = -303160,
Unknown_0C = 180,
Unknown_0E = 4,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x30D,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = 6950,
Z = -4000,
Y = -349460,
Unknown_0C = 180,
Unknown_0E = 0,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x291,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -32200,
Z = -8000,
Y = -171090,
Unknown_0C = 180,
Unknown_0E = 4,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x30D,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -58060,
Z = 0,
Y = -104990,
Unknown_0C = 180,
Unknown_0E = 4,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x30D,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclEvent(
X = -60000,
Y = -1000,
Z = -32000,
Range = 3000,
Unknown_10 = 0x5DC,
Unknown_14 = 0x0,
Unknown_18 = 0x40,
Unknown_1C = 3,
)
DeclActor(
TriggerX = -261269,
TriggerZ = -4000,
TriggerY = -297640,
TriggerRange = 1000,
ActorX = -261100,
ActorZ = -4000,
ActorY = -296970,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 5,
Unknown_22 = 0,
)
DeclActor(
TriggerX = 17060,
TriggerZ = -4000,
TriggerY = -165400,
TriggerRange = 1000,
ActorX = 17040,
ActorZ = -4000,
ActorY = -164810,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 6,
Unknown_22 = 0,
)
DeclActor(
TriggerX = -204610,
TriggerZ = -4000,
TriggerY = -359520,
TriggerRange = 1000,
ActorX = -204860,
ActorZ = -4000,
ActorY = -360270,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 7,
Unknown_22 = 0,
)
DeclActor(
TriggerX = 16350,
TriggerZ = -8000,
TriggerY = -118970,
TriggerRange = 1000,
ActorX = 16920,
ActorZ = -8000,
ActorY = -119010,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 8,
Unknown_22 = 0,
)
ScpFunction(
"Function_0_286", # 00, 0
"Function_1_295", # 01, 1
"Function_2_2FD", # 02, 2
"Function_3_313", # 03, 3
"Function_4_39E", # 04, 4
"Function_5_464", # 05, 5
"Function_6_663", # 06, 6
"Function_7_869", # 07, 7
"Function_8_9AF", # 08, 8
)
def Function_0_286(): pass
label("Function_0_286")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x7F, 2)), scpexpr(EXPR_END)), "loc_294")
OP_A3(0x3FA)
Event(0, 4)
label("loc_294")
Return()
# Function_0_286 end
def Function_1_295(): pass
label("Function_1_295")
OP_10(0x17, 0x0)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xD2, 7)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2AA")
OP_6F(0x1, 0)
Jump("loc_2B1")
label("loc_2AA")
OP_6F(0x1, 60)
label("loc_2B1")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xD3, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2C3")
OP_6F(0x3, 0)
Jump("loc_2CA")
label("loc_2C3")
OP_6F(0x3, 60)
label("loc_2CA")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xD3, 3)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2DC")
OP_6F(0x2, 0)
Jump("loc_2E3")
label("loc_2DC")
OP_6F(0x2, 60)
label("loc_2E3")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xD3, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2F5")
OP_6F(0x4, 0)
Jump("loc_2FC")
label("loc_2F5")
OP_6F(0x4, 60)
label("loc_2FC")
Return()
# Function_1_295 end
def Function_2_2FD(): pass
label("Function_2_2FD")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_312")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("Function_2_2FD")
label("loc_312")
Return()
# Function_2_2FD end
def Function_3_313(): pass
label("Function_3_313")
EventBegin(0x0)
Fade(1000)
OP_89(0x0, -59200, 20000, -32800, 0)
OP_89(0x1, -60800, 20000, -32800, 0)
OP_89(0x2, -60800, 20000, -31200, 0)
OP_89(0x3, -59200, 20000, -31200, 0)
OP_6D(-60000, 0, -32000, 1500)
Sleep(100)
SetMapFlags(0x100000)
OP_22(0xEB, 0x0, 0x64)
OP_6F(0x0, 0)
OP_70(0x0, 0x12C)
Sleep(2000)
OP_A2(0x3FA)
NewScene("ED6_DT01/C4302 ._SN", 100, 0, 0)
IdleLoop()
Return()
# Function_3_313 end
def Function_4_39E(): pass
label("Function_4_39E")
EventBegin(0x0)
SetPlaceName(0xE1) # 封印区域 第四层
OP_4F(0x31, (scpexpr(EXPR_PUSH_LONG, 0xE1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_6F(0x0, 150)
OP_70(0x0, 0x0)
OP_48()
OP_89(0x0, -59200, 20000, -32800, 0)
OP_89(0x1, -60800, 20000, -32800, 0)
OP_89(0x2, -60800, 20000, -31200, 0)
OP_89(0x3, -59200, 20000, -31200, 0)
OP_6D(-60000, 0, -32000, 0)
OP_73(0x0)
Sleep(100)
Fade(1000)
OP_89(0x0, -60000, 0, -35200, 180)
OP_89(0x1, -60000, 0, -35200, 180)
OP_89(0x2, -60000, 0, -35200, 180)
OP_89(0x3, -60000, 0, -35200, 180)
EventEnd(0x0)
Return()
# Function_4_39E end
def Function_5_464(): pass
label("Function_5_464")
SetMapFlags(0x8000000)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xD2, 7)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_625")
OP_22(0x2B, 0x0, 0x64)
OP_70(0x1, 0x3C)
Sleep(500)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xD3, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_552")
OP_9F(0x8, 0xFF, 0xFF, 0xFF, 0x0, 0x0)
SetChrPos(0x8, -261100, -1500, -296970, 320)
TurnDirection(0x8, 0x0, 0)
def lambda_4B3():
OP_8F(0xFE, 0xFFFC0414, 0xFFFFF448, 0xFFFB77F6, 0x4B0, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_4B3)
def lambda_4CE():
OP_9F(0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x4B0)
ExitThread()
QueueWorkItem(0x8, 2, lambda_4CE)
ClearChrFlags(0x8, 0x80)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"机械人形出现了!\x07\x00\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
Battle(0x312, 0x0, 0x0, 0x0, 0xFF)
SetChrFlags(0x8, 0x80)
Switch(
(scpexpr(EXPR_PUSH_VALUE_INDEX, 0x3), scpexpr(EXPR_END)),
(0, "loc_52D"),
(2, "loc_53F"),
(1, "loc_54F"),
(SWITCH_DEFAULT, "loc_552"),
)
label("loc_52D")
OP_A2(0x698)
OP_6F(0x1, 60)
Sleep(500)
Jump("loc_552")
label("loc_53F")
OP_6F(0x1, 0)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
label("loc_54F")
OP_B4(0x0)
Return()
label("loc_552")
Jc((scpexpr(EXPR_EXEC_OP, "OP_3E(0x29, 1)"), scpexpr(EXPR_END)), "loc_5AA")
FadeToDark(300, 0, 100)
OP_22(0x11, 0x0, 0x64)
SetMessageWindowPos(-1, -1, -1, -1)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x0),
"得到了\x07\x02",
"合金乌剑\x07\x00",
"。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
SetMessageWindowPos(72, 320, 56, 3)
FadeToBright(300, 0)
OP_A2(0x697)
Jump("loc_622")
label("loc_5AA")
FadeToDark(300, 0, 100)
AnonymousTalk(
(
"宝箱里装有\x07\x02",
"合金乌剑\x07\x00",
"。\x01",
"不过现有的数量太多,\x07\x02",
"合金乌剑\x07\x00",
"不能再拿更多了。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_22(0x2C, 0x0, 0x64)
OP_6F(0x1, 60)
OP_70(0x1, 0x0)
label("loc_622")
Jump("loc_655")
label("loc_625")
FadeToDark(300, 0, 100)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"宝箱里什么东西都没有。\x07\x00\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_83(0xF, 0x72)
label("loc_655")
Sleep(30)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
# Function_5_464 end
def Function_6_663(): pass
label("Function_6_663")
SetMapFlags(0x8000000)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xD3, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_82D")
OP_22(0x2B, 0x0, 0x64)
OP_70(0x3, 0x3C)
Sleep(500)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xD3, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_751")
OP_9F(0x9, 0xFF, 0xFF, 0xFF, 0x0, 0x0)
SetChrPos(0x9, 17040, -1500, -164810, 320)
TurnDirection(0x9, 0x0, 0)
def lambda_6B2():
OP_8F(0xFE, 0x4290, 0xFFFFF448, 0xFFFD7C36, 0x4B0, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_6B2)
def lambda_6CD():
OP_9F(0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x4B0)
ExitThread()
QueueWorkItem(0x9, 2, lambda_6CD)
ClearChrFlags(0x9, 0x80)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"机械人形出现了!\x07\x00\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
Battle(0x313, 0x0, 0x0, 0x0, 0xFF)
SetChrFlags(0x9, 0x80)
Switch(
(scpexpr(EXPR_PUSH_VALUE_INDEX, 0x3), scpexpr(EXPR_END)),
(0, "loc_72C"),
(2, "loc_73E"),
(1, "loc_74E"),
(SWITCH_DEFAULT, "loc_751"),
)
label("loc_72C")
OP_A2(0x69A)
OP_6F(0x3, 60)
Sleep(500)
Jump("loc_751")
label("loc_73E")
OP_6F(0x3, 0)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
label("loc_74E")
OP_B4(0x0)
Return()
label("loc_751")
Jc((scpexpr(EXPR_EXEC_OP, "OP_3E(0xFE, 1)"), scpexpr(EXPR_END)), "loc_7AC")
FadeToDark(300, 0, 100)
OP_22(0x11, 0x0, 0x64)
SetMessageWindowPos(-1, -1, -1, -1)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x0),
"得到了\x07\x02",
"女武神战甲\x07\x00",
"。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
SetMessageWindowPos(72, 320, 56, 3)
FadeToBright(300, 0)
OP_A2(0x699)
Jump("loc_82A")
label("loc_7AC")
FadeToDark(300, 0, 100)
AnonymousTalk(
(
"宝箱里装有\x07\x02",
"女武神战甲\x07\x00",
"。\x01",
"不过现有的数量太多,\x07\x02",
"女武神战甲\x07\x00",
"不能再拿更多了。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_22(0x2C, 0x0, 0x64)
OP_6F(0x3, 60)
OP_70(0x3, 0x0)
label("loc_82A")
Jump("loc_85B")
label("loc_82D")
FadeToDark(300, 0, 100)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"宝箱里什么东西都没有。\x07\x00\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_83(0xF, 0x73)
label("loc_85B")
Sleep(30)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
# Function_6_663 end
def Function_7_869(): pass
label("Function_7_869")
SetMapFlags(0x8000000)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xD3, 3)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_961")
OP_22(0x2B, 0x0, 0x64)
OP_70(0x2, 0x3C)
Sleep(500)
Jc((scpexpr(EXPR_EXEC_OP, "OP_3E(0x1FF, 1)"), scpexpr(EXPR_END)), "loc_8E2")
FadeToDark(300, 0, 100)
OP_22(0x11, 0x0, 0x64)
SetMessageWindowPos(-1, -1, -1, -1)
SetChrName("")
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x0),
"得到了\x07\x02",
"EP改良填充剂\x07\x00",
"。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
SetMessageWindowPos(72, 320, 56, 3)
FadeToBright(300, 0)
OP_A2(0x69B)
Jump("loc_95E")
label("loc_8E2")
FadeToDark(300, 0, 100)
SetChrName("")
AnonymousTalk(
(
"宝箱里装有\x07\x02",
"EP改良填充剂\x07\x00",
"。\x01",
"不过现有的数量太多,\x07\x02",
"EP改良填充剂\x07\x00",
"不能再拿更多了。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_22(0x2C, 0x0, 0x64)
OP_6F(0x2, 60)
OP_70(0x2, 0x0)
label("loc_95E")
Jump("loc_9A1")
label("loc_961")
FadeToDark(300, 0, 100)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"宝箱里什么东西都没有。\x07\x00\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_83(0xF, 0x74)
label("loc_9A1")
Sleep(30)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
# Function_7_869 end
def Function_8_9AF(): pass
label("Function_8_9AF")
SetMapFlags(0x8000000)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xD3, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_AAA")
OP_22(0x2B, 0x0, 0x64)
OP_70(0x4, 0x3C)
Sleep(500)
Jc((scpexpr(EXPR_EXEC_OP, "OP_3E(0x1F7, 1)"), scpexpr(EXPR_END)), "loc_A29")
FadeToDark(300, 0, 100)
OP_22(0x11, 0x0, 0x64)
SetMessageWindowPos(-1, -1, -1, -1)
SetChrName("")
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x0),
"得到了\x07\x02",
"全回复药\x07\x00",
"。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
SetMessageWindowPos(72, 320, 56, 3)
FadeToBright(300, 0)
OP_A2(0x69C)
Jump("loc_AA7")
label("loc_A29")
FadeToDark(300, 0, 100)
SetChrName("")
AnonymousTalk(
(
"宝箱里装有\x07\x02",
"全回复药\x07\x00",
"。\x01",
"不过现有的数量太多,\x07\x02",
"全回复药\x07\x00",
"不能再拿更多了。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_22(0x2C, 0x0, 0x64)
OP_6F(0x4, 60)
OP_70(0x4, 0x0)
label("loc_AA7")
Jump("loc_AFA")
label("loc_AAA")
FadeToDark(300, 0, 100)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"宝箱里什么东西都没有。\x07\x00\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_83(0xF, 0x75)
label("loc_AFA")
Sleep(30)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
# Function_8_9AF end
SaveToFile()
Try(main)
|
#========================================================================#
# Submit LSWT L3U processing job to lotus
# (To be run from cron server)
#------------------------------------------------------------------------#
# This script invokes L3U processing, automatically followed by L3C-daily
# and L3C-dekadal processing.
#------------------------------------------------------------------------#
# R. Maidment
#========================================================================#
#------------------------------------------------------------------------#
# Import modules
#------------------------------------------------------------------------#
import os.path
import subprocess
import config_lswt as config
import lswt_operational as so
#------------------------------------------------------------------------#
# Define job to submit
#------------------------------------------------------------------------#
pycommand = 'run_lswt_l3u.py', '--rerun', 'no', '--run_l3cdaily', 'yes'
job = ['bsub',
'-q', 'short-serial',
'-W', '24:00',
'-oo', os.path.join(config.log_opsdir,'submit_l3u.out'),
'-eo', os.path.join(config.log_opsdir,'submit_l3u.err'),
'-R', 'rusage[mem=40000]',
'-M', '40000',
'python2.7',
os.path.join(config.homedir, so.tidyup_job(pycommand))]
result = subprocess.check_output(so.tidyup_job(job), shell=True)
|
"""
#------------------------------------------------------------------------------
# Create ZV-IC Shaper
#
# This script will take a generalized input from an undamped second order system subject
# to nonzero initial conditions and solve the minimum-time ZV shaper using optimization
#
# Created: 6/20/17 - Daniel Newman -- dmn3669@louisiana.edu
#
# Modified:
# * 6/20/17 - DMN -- dmn3669@louisiana.edu
# - Added documentation for this script
#------------------------------------------------------------------------------
"""
# Ignore user warnings to keep the terminal clean
import warnings
warnings.simplefilter("ignore", UserWarning)
warnings.simplefilter("ignore", RuntimeWarning)
# Import the necessary python library modules
import numpy as np
from scipy.signal import lsim
from scipy.special import gamma
from scipy import integrate
import control
from scipy import optimize
import os
import sys
import pdb
# Add my local path to the relevant modules list
sys.path.append('/Users/Daniel/Github/Crawlab-Student-Code/Daniel Newman/Python Modules')
# Import my python modules
import crawlab_toolbox.inputshaping as shaping
import crawlab_toolbox.utilities as craw_utils
import crawlab_toolbox.plotting as genplt
import kanes_2link as twolink
folder = 'Figures/{}/'.format(
sys.argv[0],
)
# Number of elements per link
n = 3
# Time array
tmax = 7
t_step = 1/60
t = np.arange(0,tmax,t_step)
StartTime = 0.0
# Conversion for degrees to radians
DEG_TO_RAD = np.pi / 180
# Link Length
L_1 = 0.5
L_2 = 0.5
L = [L_1,L_2]
# Mass density per unity length
rho = 0.2
# Mass of the links
m_1 = rho * L_1
m_2 = rho * L_2
M = [m_1,m_2]
# Mass of the link payloads
m_p = 0.1
m_h2 = 1
J_h1 = 0.1
J_h2 = 0.1
J_p = 0.0005
J = [J_h1,J_h2,J_p]
# Initial states
theta1_0 = 0
theta1_dot_0 = 0
theta2_0 = 0
theta2_dot_0 = 0
X0 = [np.rad2deg(theta1_0),np.rad2deg(theta2_0),theta1_dot_0,theta2_dot_0]
# Stiffness of the links
E = 1
I = 1
# Maximum allowed actuator effort
tau_max = 10
theta1_d = 90. * DEG_TO_RAD
theta2_d = 90. * DEG_TO_RAD
Distance = [theta1_d,theta2_d]
Disturb_Dist = [0.001,0.001]
ZV_Shaper = np.load('zv-shaper.npy')[0]
ZV_Shaper_sequential = np.load('zv-shaper-sequential.npy')[0]
# Arguments to pass to the solver
p = [tau_max, M, J, I, E, L, StartTime, t_step, t, X0, Distance]
p_error = [tau_max, M, J, I, E, L, StartTime, t_step, t, X0, Distance]
# Derive the system using Kane's method
Kane = twolink.derive_sys(n,[[m_p,m_h2],J, E, I])
Kane_error = twolink.derive_sys(n,[[0. * m_p,.0 * m_h2],J, E, I])
A,B,C = twolink.linearize_system(n,Kane,p)
A_error,B_error,C_error = twolink.linearize_system(n,Kane_error,p)
Q = np.diagflat([0.0100, 0.0100, 0.0101, 0.0100, 0.0100, 1.4729, 0.0100, 4.7198, 0.0100, 0.0100, 0.0416, 4.2659])
R = np.diagflat([0.2504, 0.1214])
Q = np.diagflat([0.9486, 0.2993, 0.0744, 0.0010, 0.2299, 1.8036, 0.0240, 4.4930, 0.0010, 0.0034, 0.0010, 2.7230])
R = np.diagflat([0.5574, 0.0742])
Q_LQR = np.diagflat([0.0010, 0.0010, 0.0010, 1.3499, 0.0644, 2.7491, 0.0184, 4.5188, 0.5126, 0.0722, 0.1133, 1.0610 ])
R_LQR = np.diagflat([1.0818, 0.1238])
LQR_Gains,S,E = control.lqr(A,B,Q_LQR,R_LQR)
LQR_Gains = LQR_Gains.T
K_damped,S,E = control.lqr(A,B,Q,R)
K_damped = K_damped.T
K_damped[1,:] = 0
K_damped[2*n-2,:] = 0
K_damped[3*n-2,:] = 0
K_damped[4*n-2,:] = 0.
des_xy = np.zeros([1,4*n])
des_xy[:,0:n] = np.tile(theta1_d,(n,1)).T
des_xy[:,n:2*n] = np.tile(theta2_d,(n,1)).T
des_x,des_y = twolink.get_xy_coords(n,des_xy,L)
Disturbance = np.zeros([len(t),4*n])
Disturbance[:,4*n-1] = craw_utils.pulse(t,20,0.1,1)
#Disturbance[:,3*n-1] = shaping.pulse(t,20,0.1,1)
LQR_Gains[1,:] = 0
LQR_Gains[2*n-2,:] = 0
LQR_Gains[3*n-2,:] = 0
LQR_Gains[4*n-2,:] = 0.
print('LQR Gains: {}'.format(LQR_Gains))
Omegas_damp,Zetas_damp = twolink.nominal_omega(n,Kane,p,K_damped)
LQR_Omegas,LQR_Zetas = twolink.nominal_omega(n,Kane,p,LQR_Gains)
Omegas_damp_error,Zetas_damp_error = twolink.nominal_omega(n,Kane_error,p,K_damped)
LQR_Omegas_error,LQR_Zetas_error = twolink.nominal_omega(n,Kane_error,p,LQR_Gains)
print('Damped Omegas: {}'.format(Omegas_damp))
print('Damped Zetas: {}'.format(Zetas_damp))
print('LQR Omegas: {}'.format(LQR_Omegas))
print('LQR Zetas: {}'.format(LQR_Zetas))
print('Damped Error: {}',format((Omegas_damp - Omegas_damp_error) / Omegas_damp_error))
print('LQR Error: {}',format((LQR_Omegas - LQR_Omegas_error) / LQR_Omegas_error))
print('Damped Gains: {}',format(K_damped))
def actuator_effort(response,Shaper,Gains,dist_1=Distance[0],dist_2=Distance[1]):
theta1 = response[:,0]
theta1_dot = response[:,2*n]
theta2 = response[:,n]
theta2_dot = response[:,3*n]
delta1 = response[:,n-1]
delta1_dot = response[:,3*n-1]
delta2 = response[:,2*n-1]
delta2_dot = response[:,4*n-1]
shaped_pos1 = craw_utils.shaped_input(craw_utils.step_input,t,Shaper,dist_1)
shaped_pos2 = craw_utils.shaped_input(craw_utils.step_input,t,Shaper,dist_2)
shaped_pos1 = shaped_pos1 + X0[0]
shaped_pos2 = shaped_pos2 + X0[1]
shaped_vel1 = np.zeros(len(t))
shaped_vel2 = np.zeros(len(t))
X_ref = np.zeros([len(t),4*n])
X_ref[:,0:n] = np.tile(shaped_pos1,(n,1)).T
X_ref[:,n:2*n] = np.tile(shaped_pos2,(n,1)).T
tau = np.zeros([len(t),2])
for i in range(len(t)):
tau[i,:] = np.matmul(Gains.T,(X_ref[i,:] - response[i,:]))
tau = np.clip(tau,-tau_max,tau_max)
return tau[:,0],tau[:,1]
#anim = twolink.animate_pendulum(n,Kane,p,K,Omegas,Zetas,Shaper1='ZV ZV ZV',Shaper2='ZV ZV ZV',motion='Step')
#anim.save(folder + 'Shaped.mp4', bitrate = 2500, fps = 60)
#anim = twolink.animate_pendulum(n,Kane,p,K,Omegas,Zetas,Shaper1='LQR',Shaper2='LQR',motion='Step')
#anim.save(folder + 'LQR.mp4', bitrate = 2500, fps = 60)
import numpy as np
import matplotlib.pylab as plt
import scipy
from scipy import ndimage
import matplotlib.patheffects as path_effects
import matplotlib.gridspec as gridspec
from matplotlib.lines import Line2D
from matplotlib.animation import FuncAnimation, writers
# Maintain consistency between different plot colors
UNSHAPED_COLOR = '#e41a1c'
SHAPED_COLOR = '#377eb8'
SEQUENTIAL_COLOR = '#4daf4a'
# Determine whether we want to make a transparent background
# for clearner presentation
OPAQUE = False
TRANSPARENT = not OPAQUE
# These are some constants for my plotting
# I have multiplicative factors to give decent room above
# and below the plotted data, making my figures look cleaner
Y_MIN = 0.1
Y_MAX = 0.3
SCALING_FACTOR = 600
# Specify axis labels
X_LABEL = 'Time (s)'
Y1_LABEL = 'X Position (m)'
Y2_LABEL = 'Y Position (m)'
# Let's load response data from a previously run simulation
time = t
# Framerate of the animation, derived from the sampling time
# of the simulation
fps = int(np.round(1 / (time[1] - time[0])))
shaped_resp,shaper,Gains = twolink.response(n,Kane,p,K_damped,Omegas_damp,Zetas_damp,Shaper1=ZV_Shaper,Shaper2=ZV_Shaper,motion='Step')
shaped_x, shaped_y = twolink.get_xy_coords(n,shaped_resp,L)
shaped_resp_error, shaper ,Gains = twolink.response(n,Kane_error,p,K_damped,Omegas_damp,Zetas_damp,Shaper1=ZV_Shaper,Shaper2=ZV_Shaper,motion='Step')
shaped_x_error, shaped_y_error = twolink.get_xy_coords(n,shaped_resp_error,L)
#np.save('zv-shaper.npy',ZV_Shaper)
unshaped_resp, shaper ,Gains = twolink.response(n,Kane,p,LQR_Gains,LQR_Omegas,LQR_Zetas,Shaper1='Unshaped',Shaper2='Unshaped',motion='Step')
unshaped_x, unshaped_y = twolink.get_xy_coords(n,unshaped_resp,L)
unshaped_resp_error,shaper,Gains = twolink.response(n,Kane_error,p,LQR_Gains,LQR_Omegas,LQR_Zetas,Shaper1='Unshaped',Shaper2='Unshaped',motion='Step')
unshaped_x_error, unshaped_y_error = twolink.get_xy_coords(n,unshaped_resp_error,L)
sequential_resp,shaper,Gains = twolink.response(n,Kane,p,LQR_Gains,LQR_Omegas,LQR_Zetas,Shaper1=ZV_Shaper_sequential,Shaper2=ZV_Shaper_sequential,motion='Step')
sequential_x, sequential_y = twolink.get_xy_coords(n,sequential_resp,L)
sequential_resp_error,shaper,Gains = twolink.response(n,Kane_error,p,LQR_Gains,LQR_Omegas,LQR_Zetas,Shaper1=ZV_Shaper_sequential,Shaper2=ZV_Shaper_sequential,motion='Step')
sequential_x_error, sequential_y_error = twolink.get_xy_coords(n,sequential_resp_error,L)
#np.save('zv-shaper-sequential.npy',ZV_Shaper)
x_responses = np.vstack((shaped_x[:,-1],unshaped_x[:,-1],sequential_x[:,-1]))
y_responses = np.vstack((shaped_y[:,-1],unshaped_y[:,-1],sequential_y[:,-1]))
x_concurrent_compare = np.vstack((shaped_x[:,-1],shaped_x_error[:,-1]))
y_concurrent_compare = np.vstack((shaped_y[:,-1],shaped_y_error[:,-1]))
x_lqr_compare = np.vstack((unshaped_x[:,-1],unshaped_x_error[:,-1]))
y_lqr_compare = np.vstack((unshaped_y[:,-1],unshaped_y_error[:,-1]))
x_sequential_compare = np.vstack((sequential_x[:,-1],sequential_x_error[:,-1]))
y_sequential_compare = np.vstack((sequential_y[:,-1],sequential_y_error[:,-1]))
labels = ['Concurrent','LQR','Sequential']
labels_error = ['Nominal','Error']
genplt.generate_plot(t,
x_responses,labels,
'Time (s)','X Position (m)',filename="Payload_X",
folder=folder,save_plot=True,num_col=1,legend_loc='lower right',
)
genplt.generate_plot(t,
y_responses,labels,
'Time (s)','Y Position (m)',filename="Payload_Y",
folder=folder,save_plot=True,num_col=1,legend_loc='lower right',
)
genplt.generate_plot(t,
x_concurrent_compare,labels_error,
'Time (s)','X Position (m)',filename="Concurrent_Compare_X",
folder=folder,save_plot=True,num_col=1,legend_loc='lower right',
)
genplt.generate_plot(t,
y_concurrent_compare,labels_error,
'Time (s)','Y Position (m)',filename="Concurrent_Compare_Y",
folder=folder,save_plot=True,num_col=1,legend_loc='lower right',
)
genplt.generate_plot(t,
x_lqr_compare,labels_error,
'Time (s)','X Position (m)',filename="LQR_Compare_X",
folder=folder,save_plot=True,num_col=1,legend_loc='lower right',
)
genplt.generate_plot(t,
y_lqr_compare,labels_error,
'Time (s)','Y Position (m)',filename="LQR_Compare_Y",
folder=folder,save_plot=True,num_col=1,legend_loc='lower right',
)
genplt.generate_plot(t,
x_sequential_compare,labels_error,
'Time (s)','X Position (m)',filename="Sequential_Compare_X",
folder=folder,save_plot=True,num_col=1,legend_loc='lower right',
)
genplt.generate_plot(t,
y_sequential_compare,labels_error,
'Time (s)','Y Position (m)',filename="Sequential_Compare_Y",
folder=folder,save_plot=True,num_col=1,legend_loc='lower right',
)
# Cable length of the crane
length = SCALING_FACTOR
# Create a new figure large enough to fill a powerpoint screen
fig = plt.figure(figsize=(16,9))
# Create a 2x2 subplot
gs1 = gridspec.GridSpec(2, 2)
# Set the grid spacing as appropriate
gs1.update(left=0, right=0.95, hspace=0.2, wspace=0.1)
gs1.tight_layout(fig)
# Set the axes for the subplots.
ax1 = plt.subplot(gs1[:, :-1])
ax2 = plt.subplot(gs1[0, -1])
ax3 = plt.subplot(gs1[-1, -1])
# Create a line for the unshaped velocity plot
unshaped_x_position, = ax2.plot(
[], [], # Set the data to null at first
lw=2, # Line width
label='LQR Only',
#path_effects=[path_effects.SimpleLineShadow(),path_effects.Normal()], # Add a shadow
color=UNSHAPED_COLOR, # Set the color
linestyle='-') # Simple linestyle
# Create a line for the unshaped theta plot
unshaped_y_position, = ax3.plot(
[],[], # Set the data to null at first
lw=2, # Line width
#path_effects=[path_effects.SimpleLineShadow(),path_effects.Normal()], # Add a shadow
color=UNSHAPED_COLOR, # Set the color
linestyle='-') # Simple linestyle
# Create a line for the shaped velocity plot
shaped_x_position, = ax2.plot(
[], [], # Set the data to null at first
lw=2, # Line width
label='Concurrent Design',
#path_effects=[path_effects.SimpleLineShadow(),path_effects.Normal()], # Add a shadow
color=SHAPED_COLOR, # Set the color
linestyle='--') # Simple linestyle
# Create a line for the shaped theta plot
shaped_y_position, = ax3.plot(
[], [], # Set the data to null at first
lw=2, # Line width
#path_effects=[path_effects.SimpleLineShadow(),path_effects.Normal()], # Add a shadow
color=SHAPED_COLOR, # Set the color
linestyle='--') # Simple linestyle
# Create a line for the unshaped velocity plot
sequential_x_position, = ax2.plot(
[], [], # Set the data to null at first
lw=2, # Line width
label='Sequential Design',
#path_effects=[path_effects.SimpleLineShadow(),path_effects.Normal()], # Add a shadow
color=SEQUENTIAL_COLOR, # Set the color
linestyle='-.') # Simple linestyle
# Create a line for the unshaped theta plot
sequential_y_position, = ax3.plot(
[],[], # Set the data to null at first
lw=2, # Line width
#path_effects=[path_effects.SimpleLineShadow(),path_effects.Normal()], # Add a shadow
color=SEQUENTIAL_COLOR, # Set the color
linestyle='-.') # Simple linestyle
# Set the legend with the number of columns
ax2.legend(ncol=2,mode=None,loc='lower right').get_frame().set_edgecolor('k')
# Create null lists for all the data
x_data = []
unshaped_xdata = []
unshaped_ydata = []
shaped_xdata = []
shaped_ydata = []
sequential_xdata = []
sequential_ydata = []
# Set the X limits for the velocity and theta plots
ax2.set_xlim(np.amin(time), np.amax(time))
ax3.set_xlim(np.amin(time), np.amax(time))
# Set the Y limits for the velocity and theta plots based on the constants already defined
ax2.set_ylim(
np.amin(unshaped_x) - Y_MIN * abs(np.amin(unshaped_x)),
np.amax(unshaped_x) + Y_MAX * abs(np.amax(unshaped_x)-np.amin(unshaped_x)))
ax3.set_ylim(
np.amin(unshaped_y) - Y_MIN * abs(np.amin(unshaped_y)),
np.amax(unshaped_y) + Y_MAX * abs(np.amax(unshaped_y)-np.amin(unshaped_y)))
# Set the plot window and labels for the first plot
ax2.spines['right'].set_color('none')
ax2.spines['top'].set_color('none')
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
ax2.grid(False)
ax2.set_xlabel('{}'.format(X_LABEL), fontsize=24, weight='bold', labelpad=5)
ax2.set_ylabel('{}'.format(Y1_LABEL), fontsize=24, weight='bold', labelpad=5)
# Set the plot window and labels for the first plot
ax3.spines['right'].set_color('none')
ax3.spines['top'].set_color('none')
ax3.xaxis.set_ticks_position('bottom')
ax3.yaxis.set_ticks_position('left')
ax3.grid(False)
ax3.set_xlabel('{}'.format(X_LABEL), fontsize=24, weight='bold', labelpad=5)
ax3.set_ylabel('{}'.format(Y2_LABEL), fontsize=24, weight='bold', labelpad=5)
def update(i):
# Since we're using loaded images, we need to clear the axis
# on which we display the images
ax1.cla()
if not (i % fps): # print notice every 30th frame
print('Processing frame {}'.format(i))
# Add the next data point to the plot lines
x_data.append(time[i])
unshaped_xdata.append(unshaped_x_error[i,-1])
unshaped_ydata.append(unshaped_y_error[i,-1])
shaped_xdata.append(shaped_x_error[i,-1])
shaped_ydata.append(shaped_y_error[i,-1])
sequential_xdata.append(sequential_x_error[i,-1])
sequential_ydata.append(sequential_y_error[i,-1])
# Set the plot lines with the up-to-date data
unshaped_x_position.set_data(x_data, unshaped_xdata)
unshaped_y_position.set_data(x_data,unshaped_ydata)
shaped_x_position.set_data(x_data, shaped_xdata)
shaped_y_position.set_data(x_data,shaped_ydata)
sequential_x_position.set_data(x_data, sequential_xdata)
sequential_y_position.set_data(x_data,sequential_ydata)
# Hide grid lines on the trolley window
ax1.grid(False)
ax1.set_ylim(-650,300)
ax1.set_xlim(-300,650)
# Hide the window
ax1.spines['right'].set_color('none')
ax1.spines['top'].set_color('none')
ax1.spines['bottom'].set_color('none')
ax1.spines['left'].set_color('none')
# Hide axes ticks
ax1.set_xticks([])
ax1.set_yticks([])
# Add the line representing the unshaped cable motion
for j in range(2*n):
line1 = ax1.add_line(
Line2D(
[unshaped_x_error[i,j] * SCALING_FACTOR,unshaped_x_error[i,j+1] * SCALING_FACTOR], # Start and end X coordinates
[unshaped_y_error[i,j] * SCALING_FACTOR,unshaped_y_error[i,j+1] * SCALING_FACTOR], # Start and end Y coordinates
linewidth=5, # Line weight
linestyle='-', # Simple line style
marker='o', # Add circles at the endpoints
label='Bar',
color='#e41a1c', # Set the color to be consistent for unshaped motion
#path_effects=[path_effects.SimpleLineShadow(),path_effects.Normal()]) # Add a shadow
))
# Add the line representing the shaped cable motion
line2 = ax1.add_line(
Line2D(
[shaped_x_error[i,j] * SCALING_FACTOR,shaped_x_error[i,j+1] * SCALING_FACTOR], # Start and end X coordinates
[shaped_y_error[i,j] * SCALING_FACTOR,shaped_y_error[i,j+1] * SCALING_FACTOR], # Start and end Y coordinates
linewidth=5, # Line weight
linestyle='-', # Simple line style
marker='o', # Add circles at the endpoints
label='Bar',
color='#377eb8', # Set the color to be consistent for unshaped motion
#path_effects=[path_effects.SimpleLineShadow(),path_effects.Normal()]) # Add a shadow
))
# Add the line representing the shaped cable motion
line3 = ax1.add_line(
Line2D(
[sequential_x_error[i,j] * SCALING_FACTOR,sequential_x_error[i,j+1] * SCALING_FACTOR], # Start and end X coordinates
[sequential_y_error[i,j] * SCALING_FACTOR,sequential_y_error[i,j+1] * SCALING_FACTOR], # Start and end Y coordinates
linewidth=5, # Line weight
linestyle='-', # Simple line style
marker='o', # Add circles at the endpoints
label='Bar',
color=SEQUENTIAL_COLOR, # Set the color to be consistent for unshaped motion
#path_effects=[path_effects.SimpleLineShadow(),path_effects.Normal()]) # Add a shadow
))
# Set the alpha as 1 or 0 based on whether "OPAQUE" is True or False
fig.patch.set_alpha(float(OPAQUE))
# Create the animation
anim = FuncAnimation(
fig, # Use the predefined figure
update, # Call the update function
frames=fps * int(np.amax(time)), # Use a number of frames based on the framerate and length of the time array
interval=fps)
# anim.save(
# 'crane_anim.mov', # Set the file name
# codec="png",
# dpi=100,
# bitrate=-1,
# savefig_kwargs={
# 'transparent': TRANSPARENT,
# 'facecolor': 'none'})
# Added: 03/31/18 - Joshua Vaughan - joshua.vaughan@louisiana.edu
#
# I had a hard time getting good quality videos using Daniel's settings, without
# reprocessing the video in QuickTime. I'm guessing that we have different sets
# of video codecs installed. Below is what gave me the best results on my home
# iMac.
#
# These encoder I'm using (h264) also doesn't seem to get along with the video
# being transparent. So, this version will have a white background. This also
# may make it better as a "standalone" video. For me, these settings also seem
# to result in smaller filesizes for approximately the same quality.
FFMpegWriter = writers['ffmpeg']
# We can also add some metadata to the video.
metadata = dict(title='Input Shaping Animation', artist='CRAWLAB',
comment='Shows a point-to-poitn move of a planar crane with and without input shaping.')
# Change the video bitrate as you like and add some metadata.
writer = FFMpegWriter(codec="h264", fps=fps, bitrate=-1, metadata=metadata)
anim.save(
'two_link_anim.mp4', # Set the file name
dpi=240, # Bump up to 4K resolution 3840x2160
writer=writer,
savefig_kwargs={
'transparent': TRANSPARENT, # h264 doesn't seem to like transparency
'facecolor': 'none'})
# plt.show() # JEV - This seemed to force encoding into an endless loop for me
plt.close() |
import torch
import torch.nn as nn
import numpy as np
from edflow.util import retrieve, get_obj_from_str
class Shuffle(nn.Module):
def __init__(self, in_channels, **kwargs):
super(Shuffle, self).__init__()
self.in_channels = in_channels
idx = torch.randperm(in_channels)
self.register_buffer('forward_shuffle_idx', nn.Parameter(idx, requires_grad=False))
self.register_buffer('backward_shuffle_idx', nn.Parameter(torch.argsort(idx), requires_grad=False))
def forward(self, x, reverse=False, conditioning=None):
if not reverse:
return x[:, self.forward_shuffle_idx, ...], 0
else:
return x[:, self.backward_shuffle_idx, ...]
class BasicFullyConnectedNet(nn.Module):
def __init__(self, dim, depth, hidden_dim=256, use_tanh=False, use_bn=False, out_dim=None):
super(BasicFullyConnectedNet, self).__init__()
layers = []
layers.append(nn.Linear(dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.LeakyReLU())
for d in range(depth):
layers.append(nn.Linear(hidden_dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.LeakyReLU())
layers.append(nn.Linear(hidden_dim, dim if out_dim is None else out_dim))
if use_tanh:
layers.append(nn.Tanh())
self.main = nn.Sequential(*layers)
def forward(self, x):
return self.main(x)
class DoubleVectorCouplingBlock(nn.Module):
"""In contrast to VectorCouplingBlock, this module assures alternating chunking in upper and lower half."""
def __init__(self, in_channels, hidden_dim, depth=2, use_hidden_bn=False, n_blocks=2):
super(DoubleVectorCouplingBlock, self).__init__()
assert in_channels % 2 == 0
self.s = nn.ModuleList([BasicFullyConnectedNet(dim=in_channels // 2, depth=depth, hidden_dim=hidden_dim,
use_tanh=True) for _ in range(n_blocks)])
self.t = nn.ModuleList([BasicFullyConnectedNet(dim=in_channels // 2, depth=depth, hidden_dim=hidden_dim,
use_tanh=False) for _ in range(n_blocks)])
def forward(self, x, reverse=False):
if not reverse:
logdet = 0
for i in range(len(self.s)):
idx_apply, idx_keep = 0, 1
if i % 2 != 0:
x = torch.cat(torch.chunk(x, 2, dim=1)[::-1], dim=1)
x = torch.chunk(x, 2, dim=1)
scale = self.s[i](x[idx_apply])
x_ = x[idx_keep] * (scale.exp()) + self.t[i](x[idx_apply])
x = torch.cat((x[idx_apply], x_), dim=1)
logdet_ = torch.sum(scale.view(x.size(0), -1), dim=1)
logdet = logdet + logdet_
return x, logdet
else:
idx_apply, idx_keep = 0, 1
for i in reversed(range(len(self.s))):
if i % 2 == 0:
x = torch.cat(torch.chunk(x, 2, dim=1)[::-1], dim=1)
x = torch.chunk(x, 2, dim=1)
x_ = (x[idx_keep] - self.t[i](x[idx_apply])) * (self.s[i](x[idx_apply]).neg().exp())
x = torch.cat((x[idx_apply], x_), dim=1)
return x
class VectorActNorm(nn.Module):
def __init__(self, in_channel, logdet=True, **kwargs):
super().__init__()
self.loc = nn.Parameter(torch.zeros(1, in_channel, 1, 1))
self.scale = nn.Parameter(torch.ones(1, in_channel, 1, 1))
self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
self.logdet = logdet
def initialize(self, input):
if len(input.shape) == 2:
input = input[:, :, None, None]
with torch.no_grad():
flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
mean = (
flatten.mean(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
std = (
flatten.std(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
self.loc.data.copy_(-mean)
self.scale.data.copy_(1 / (std + 1e-6))
def forward(self, input, reverse=False, conditioning=None):
if len(input.shape) == 2:
input = input[:, :, None, None]
if not reverse:
_, _, height, width = input.shape
if self.initialized.item() == 0:
self.initialize(input)
self.initialized.fill_(1)
log_abs = torch.log(torch.abs(self.scale))
logdet = height * width * torch.sum(log_abs)
logdet = logdet * torch.ones(input.shape[0]).to(input)
if not self.logdet:
return (self.scale * (input + self.loc)).squeeze()
return (self.scale * (input + self.loc)).squeeze(), logdet
else:
return self.reverse(input)
def reverse(self, output, conditioning=None):
return (output / self.scale - self.loc).squeeze()
class Flow(nn.Module):
def __init__(self, module_list, in_channels, hidden_dim, hidden_depth):
super(Flow, self).__init__()
self.in_channels = in_channels
self.flow = nn.ModuleList(
[module(in_channels, hidden_dim=hidden_dim, depth=hidden_depth) for module in module_list])
def forward(self, x, condition=None, reverse=False):
if not reverse:
logdet = 0
for i in range(len(self.flow)):
x, logdet_ = self.flow[i](x)
logdet = logdet + logdet_
return x, logdet
else:
for i in reversed(range(len(self.flow))):
x = self.flow[i](x, reverse=True)
return x
class EfficientVRNVP(nn.Module):
def __init__(self, module_list, in_channels, n_flow, hidden_dim, hidden_depth):
super().__init__()
assert in_channels % 2 == 0
self.flow = nn.ModuleList([Flow(module_list, in_channels, hidden_dim, hidden_depth) for n in range(n_flow)])
def forward(self, x, condition=None, reverse=False):
if not reverse:
logdet = 0
for i in range(len(self.flow)):
x, logdet_ = self.flow[i](x, condition=condition)
logdet = logdet + logdet_
return x, logdet
else:
for i in reversed(range(len(self.flow))):
x = self.flow[i](x, condition=condition, reverse=True)
return x, None
def reverse(self, x, condition=None):
return self.flow(x, condition=condition, reverse=True)
class VectorTransformer(nn.Module):
def __init__(self, config):
super().__init__()
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
self.config = config
self.in_channel = retrieve(config, "Transformer/in_channel")
self.n_flow = retrieve(config, "Transformer/n_flow")
self.depth_submodules = retrieve(config, "Transformer/hidden_depth")
self.hidden_dim = retrieve(config, "Transformer/hidden_dim")
modules = [VectorActNorm, DoubleVectorCouplingBlock, Shuffle]
self.realnvp = EfficientVRNVP(modules, self.in_channel, self.n_flow, self.hidden_dim,
hidden_depth=self.depth_submodules)
def forward(self, input, reverse=False):
if reverse:
return self.reverse(input)
input = input.squeeze()
out, logdet = self.realnvp(input)
return out[:, :, None, None], logdet
def reverse(self, out):
out = out.squeeze()
return self.realnvp(out, reverse=True)[0][:, :, None, None]
class FactorTransformer(VectorTransformer):
def __init__(self, config):
super().__init__(config)
self.n_factors = retrieve(config, "Transformer/n_factors", default=2)
self.factor_config = retrieve(config, "Transformer/factor_config", default=list())
def forward(self, input):
out, logdet = super().forward(input)
if self.factor_config:
out = torch.split(out, self.factor_config, dim=1)
else:
out = torch.chunk(out, self.n_factors, dim=1)
return out, logdet
def reverse(self, out):
out = torch.cat(out, dim=1)
return super().reverse(out)
|
from django.contrib import admin
from member.models import Profile, Organisation, Competition, Ticket
import logging
g_logger = logging.getLogger(__name__)
class TicketInline(admin.TabularInline):
model = Ticket
extra = 0
readonly_fields = ('used', 'token')
def has_delete_permission(self, request, obj=None):
return False
def has_add_permission(self, request, obj=None):
return False
class ParticipantInline(admin.TabularInline):
model = Competition.participants.through
extra = 0
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
field = super(ParticipantInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
if db_field.name == 'participant':
if request._obj_ is not None:
field.queryset = field.queryset.filter(tournament = request._obj_.tournament)
else:
field.queryset = field.queryset.none()
return field
def add_tickets(modeladmin, request, queryset):
g_logger.debug("add_tickets(%r, %r, %r)", modeladmin, request, queryset)
for comp in queryset:
for i in range(10):
Ticket.objects.create(competition=comp)
class CompetitionAdmin(admin.ModelAdmin):
inlines = ( TicketInline, ParticipantInline)
actions = [ add_tickets ]
list_display = ('organisation', 'tournament', 'participant_count')
fields = ('organisation', 'tournament', 'token_len')
def participant_count(self, obj):
return obj.participants.count();
def get_readonly_fields(self, request, obj):
return obj and ('organisation', 'tournament') or []
def get_inline_instances(self, request, obj=None):
return obj and super(CompetitionAdmin, self).get_inline_instances(request, obj) or []
def get_form(self, request, obj=None, **kwargs):
# save obj reference for future processing in Inline
request._obj_ = obj
return super(CompetitionAdmin, self).get_form(request, obj, **kwargs)
class ProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'test_features_enabled')
admin.site.register(Profile)
admin.site.register(Organisation)
admin.site.register(Competition, CompetitionAdmin)
|
import numpy as np
import pprint
import pandas as pd
import datetime as dt
from collections import defaultdict
import matplotlib.pyplot as plt
from investagram_data_loader.repository.sqlite_dao import SqliteDao
STOCK_CODE = '2GO'
BROKERS = ['BDO', 'ATR']
START_DATE, END_DATE = dt.date(2021, 1, 1), dt.date(2021, 2, 24)
transactions = defaultdict(dict)
with SqliteDao() as dao:
for trans in dao.get_transactions_by_stock_code(STOCK_CODE, START_DATE, END_DATE):
transactions[trans.broker_code][trans.date] = trans.net_value
pprint.pprint(transactions)
df = pd.DataFrame(transactions)
# df.rename(columns={'Unnamed: 0': 'Date'}, inplace=True)
# df.set_index(['Date'], inplace=True)
df.sort_index(inplace=True)
df.fillna(0, inplace=True)
df = df.cumsum()
df[BROKERS].plot(marker='.')
plt.show()
|
N = int(input())
K = int(input())
X = list( map( int, input().split()))
ans = 0
for i in range(N):
ans += min(abs(X[i]),abs(X[i]-K))*2
print(ans)
|
#!/usr/bin/env python
import sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-i", "--file", dest="inputFileName",
help="path to input file")
parser.add_option("-o", "--output", dest="outputFileName",
help="output file name (without extension)")
parser.add_option("-p", "--path", dest="pathToHistogram",
help="path to histogram, e.g. 'BB3/rescaledThr'")
parser.add_option("-t", "--type", dest="outputType",default="png",
help="output type (default = png)")
parser.add_option("-v", "--version", dest="version", default=0,
help="specify which version of plots to get (default = 0)")
parser.add_option("-z", "--logz", action="store_true", dest="logZ", default=False,
help="sets the z axis to a log scale")
(arguments, args) = parser.parse_args()
if not arguments.inputFileName:
print "please specify input file";
sys.exit(0)
if not arguments.pathToHistogram:
print "please specify input histogram";
sys.exit(0)
from moduleSummaryPlottingTools import *
from ROOT import TFile
gROOT.SetBatch()
if not os.path.exists(arguments.inputFileName):
print "invalid input file, quitting"
sys.exit(0)
canvas = produce2DSummaryPlot(arguments.inputFileName,
arguments.pathToHistogram,
arguments.version)
if canvas is None:
sys.exit(0)
if arguments.outputFileName:
name = arguments.outputFileName
else:
name = arguments.pathToHistogram.replace("/","_") + "_V" + str(arguments.version)
if arguments.logZ:
canvas.SetLogz()
if arguments.outputType is "root":
outputFile = TFile(name+".root", "RECREATE")
outputFile.cd()
canvas.Write()
outputFile.Close()
else:
canvas.SaveAs(name+"."+arguments.outputType)
|
import random
class Environment(object):
def __init__(self, size):
self.agent_pos = 0
self.agent_reward = 0
self.agent_step = 0
self.size = size
self.map = [False] * self.size
def add_random_dirt(self):
for i in range(int(self.size / 5) + 1):
index = random.randrange(len(self.map))
self.map[index] = True
def percept(self):
self.add_random_dirt()
is_dirty = self.map[self.agent_pos]
return [self.agent_pos, is_dirty]
def act(self, action):
self.agent_step += 1
if action == 'left-right':
action = random.choice(['left', 'right'])
if action == 'left' and self.agent_pos != 0:
self.agent_pos -= 1
elif action == 'right' and self.agent_pos != self.size - 1:
self.agent_pos += 1
elif action == 'suck' and self.map[self.agent_pos]:
print (self.agent_step, self.agent_reward)
self.agent_reward += 1
self.map[self.agent_pos] = False
class Agent(object):
def __init__(self, env):
self.env = env
self.rules = {}
for i in range(self.env.size):
self.rules[i] = {True: 'suck', False: 'left-right'}
#self.rules[0] = {True: 'suck', False: 'right'}
#self.rules[1] = {True: 'suck', False: 'left'}
def live(self):
while True:
pos, is_dirty = self.env.percept()
action = self.rules[pos][is_dirty]
self.env.act(action)
env = Environment(2)
agent = Agent(env)
agent.live()
|
def isSelfCrossing(self, x):
return any(d >= b > 0 and (a >= c or a >= c-e >= 0 and f >= d-b)
for a, b, c, d, e, f in ((x[i:i+6] + [0] * 6)[:6]
for i in xrange(len(x))))
class Solution:
def isSelfCrossing(self, x):
b = c = d = e = 0
for a in x:
if d >= b > 0 and (a >= c or a >= c-e >= 0 and f >= d-b):
return True
b, c, d, e, f = a, b, c, d, e
return False |
"""
kullanıcıdan 3 basamaklı bir sayı okuyup sayının bas tersten yazılımı ile
elde edilecek sayının okunan sayıya eşit olup olmadığını kontrol eden bir algo
ÖRN: girdi 575 ise algo "eşit" çıktısını, 134 olduğunda ise "eşit değil" çıktısını vermelidir
"""
sayi=int(input("3 Basamaklı Bir Sayi Giriniz: "))
a=sayi%10 #birler
b=sayi//10
b=b%10 #onlar
c=sayi//100 #yüzler
ters=100*c+10*b+c
if(sayi==ters):
print("Tersi Birbirine Eşittir!")
else:
print("Eşit Değildir!")
|
#!/usr/bin/python
import os
import subprocess
import sipconfig
import PyQt5.QtCore
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
class BuildExt(build_ext):
def run(self):
for path in files_to_moc:
moc = path_of_moc_file(path)
cmd = 'moc-qt5 -o %s %s' % (moc, path)
print(cmd)
subprocess.check_call(cmd.split())
os.chdir(os.path.join(base_dir, 'src'))
subprocess.call(sip_cmd.split())
os.chdir(base_dir)
build_ext.run(self)
os.chdir(os.path.abspath(os.path.dirname(__file__)))
base_dir = os.getcwd()
sip_config = sipconfig.Configuration()
sip_cmd = ' '.join([
sip_config.sip_bin,
'-c .',
'-o',
'-I /usr/share/sip/PyQt5',
PyQt5.QtCore.PYQT_CONFIGURATION['sip_flags'],
'pygqrx.sip'
])
cflags = [
'-g',
'-Wall',
'-std=c++14',
'-fPIC',
'-mtune=generic',
'-O2',
'-fstack-protector-strong',
'-I.',
'-I/usr/include/python2.7'
]
cflags.extend(subprocess.check_output('pkg-config --cflags Qt5Core Qt5Widgets'.split()).split())
ldflags = [
'-lpython2.7',
'-shared',
'-Wl,-O1,--sort-common,--as-needed,-z,relro'
]
ldflags.extend(subprocess.check_output('pkg-config --libs Qt5Core Qt5Widgets'.split()).split())
files_to_moc = [
'src/bookmarks.h',
'src/bookmarkstablemodel.h',
'src/bookmarkstaglist.h',
'src/freqctrl.h',
'src/meter.h',
'src/plotter.h',
]
def path_of_moc_file(path):
fname = os.path.splitext(os.path.basename(path))[0]
basedir = os.path.dirname(path)
return os.path.join(basedir, 'moc_%s.cpp' % fname)
mocked_files = list(map(path_of_moc_file, files_to_moc))
pygqrx = Extension(
name = 'pygqrx',
sources = [
'src/bookmarks.cpp',
'src/bookmarkstablemodel.cpp',
'src/bookmarkstaglist.cpp',
'src/freqctrl.cpp',
'src/meter.cpp',
'src/plotter.cpp',
'src/fftbuffer.cpp',
'src/sippygqrxCFreqCtrl.cpp',
'src/sippygqrxCMeter.cpp',
'src/sippygqrxCPlotter.cpp',
'src/sippygqrxBookmarks.cpp',
'src/sippygqrxTagInfo.cpp',
'src/sippygqrxBookmarkInfo.cpp',
'src/sippygqrxcmodule.cpp',
'src/sippygqrxQList0100BookmarkInfo.cpp',
'src/sippygqrxQList0100TagInfo.cpp',
] + mocked_files,
include_dirs = ['src'],
extra_compile_args=cflags,
extra_link_args=ldflags
)
setup(name='pygqrx',
version='0.1',
description='Python wrapper for Qt widgets used by Gqrx',
long_description='Uses SIP to export the CFreqCtrl, CPlotter and CMeter widget used by Gqrx',
author='Alexander Fasching',
author_email='fasching.a91@gmail.com',
maintainer='Alexander Fasching',
maintainer_email='fasching.a91@gmail.com',
url='https://github.com/alexf91/pygqrx',
license='GPL',
cmdclass={'build_ext': BuildExt},
ext_modules=[pygqrx]
)
|
"""Login urls."""
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
path('signup', views.signup, name='signup'),
url('account_activation_sent/$', views.account_activation_sent,
name='account_activation_sent'),
path('activate/<uidb64>/<token>/', views.activate, name='activate'),
path('login', views.login, name='login'),
path('logout', views.logout, name='logout')
]
|
from __future__ import absolute_import, unicode_literals
from celery import shared_task
from datetime import datetime, date
from django.utils import timezone
from product.models import Product
from amdtelecom.celery import app
# @shared_task
# def new_prod_published_date():
# products = Product.objects.filter(is_new_expired__lte=timezone.datetime.today()).update(is_new = False)
@app.task()
def changed_is_new(prod_id):
print('changed_is_new is working')
product = Product.objects.filter(id=prod_id).filter(is_new_expired__lte=timezone.datetime.today()).update(is_new = False)
|
from django import forms
import datetime
from django.forms.fields import ChoiceField, IntegerField
from django.utils.text import slugify
import datetime
class InputDataForm(forms.Form):
ph = forms.FloatField()
hardness = forms.FloatField()
solids = forms.FloatField()
chloramines = forms.FloatField()
sulphate = forms.FloatField()
conductivity = forms.FloatField()
organic_carbon = forms.FloatField()
trihalomethanes = forms.FloatField()
turbidity = forms.FloatField()
|
#!/usr/bin/env python
import os
import sys
from text_template import TextTemplate as view
youtube_link = sys.argv[1]
image_name = sys.argv[2]
url_name = sys.argv[3]
dir_path = os.path.dirname(os.path.realpath(__file__))
rendered = view.render(
template=dir_path + '/youtube_template.txt',
image_name=image_name,
youtube_link=youtube_link
)
with open(dir_path + "/adoption/" + url_name + ".html", 'w') as writer:
writer.write(rendered)
print("Link: http://tnachen.github.io/adoption/" + url_name + ".html")
|
a=int(input())
b=input().split()
c=[]
s=''
for i in range(a):
if i%2==0 and int(b[i])%2==1:
c.append(b[i])
if i%2==1 and int(b[i])%2==0:
c.append(b[i])
for i in range(len(c)-1):
s+=c[i]+" "
print(s+c[-1])
|
"""
Module that runs the Flask app in either development or production
mode, after setting up environment variables appropriately.
"""
import argparse
import os
import shlex
import subprocess
import sys
import hyperschedule.util as util
def exec_cmd(cmd):
print(" ".join(map(shlex.quote, cmd)))
try:
sys.exit(subprocess.run(cmd).returncode)
except KeyboardInterrupt:
sys.exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Hyperschedule backend server")
parser.add_argument(
"config", metavar="key=val", nargs="*", help="config var settings (see README)"
)
config_args = parser.parse_args().config
config = {}
for config_arg in config_args:
if "=" not in config_arg:
util.die("malformed key=val argument: {}".format(repr(config_arg)))
var, val = config_arg.split("=", maxsplit=1)
if var not in util.ENV_DEFAULTS:
util.die("unknown config var: {}".format(repr(var)))
config[var] = val
for var, val in util.ENV_DEFAULTS.items():
if var not in config:
config[var] = val
val = config[var]
env_var = "HYPERSCHEDULE_" + var.upper()
os.environ[env_var] = val
app = "hyperschedule.app:app"
port = util.get_env("port")
host = "0.0.0.0" if util.get_env_boolean("expose") else "127.0.0.1"
if util.get_env_boolean("debug"):
os.environ["FLASK_ENV"] = "development"
os.environ["FLASK_APP"] = app
os.environ["FLASK_SKIP_DOTENV"] = "1"
exec_cmd(["flask", "run", "--host", host, "--port", port, "--no-reload"])
else:
exec_cmd(["gunicorn", "-w", "1", "-b", "{}:{}".format(host, port), app])
|
class Dog():
def __init__(self, type, name, color):
print(self, "class")
self.name = name
self.type = type
self.color = color
dog1 = Dog("Alabai", "Sharik", "brown")
print(dog1, "code")
dog2 = Dog("Alabai", "Simba", "black")
print(dog2, "simba") |
# -*- coding: utf-8 -*-
# @Date : 2018-02-28 11:19:48
# @Author : jym
# @Description:
# @Version : v0.0
import datetime
import functools
import itertools
import time
import tornado
from tornado import gen
from tornado import web
from math import *
def distance(lat1,lng1,lat2,lng2):
#计算两点之间的距离(km)
radlat1=radians(lat1)
radlat2=radians(lat2)
a=radlat1-radlat2
b=radians(lng1)-radians(lng2)
s=2*asin(sqrt(pow(sin(a/2),2)+cos(radlat1)*cos(radlat2)*pow(sin(b/2),2)))
earth_radius=6378.137
s=s*earth_radius
if s<0:
return -s
else:
return s
#显示当前时间
def showTime():
dt = datetime.datetime.now()
localtime = dt.strftime('%Y-%m-%d %H:%M:%S') #转化为date time
print localtime
def delayTime(interval):
time.sleep(interval)
#阻塞式重试函数
def retry(exceptions=(Exception,), interval=0, max_retries=10, success=None, retry_func=None, **retry_func_kwargs):
'''
exceptions是异常定义
success是一个用于判定结果是否正确的函数,通常可以定义为一个lambda函数。
例如结果值ret必须为正数,则可以定义success=lambda x: x>0
retry_func为函数运行失败后调用的重试函数
'''
if not exceptions and success is None:
raise u"exceptions与success参数不能同时为空"
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if max_retries < 0:
iterator = itertools.count()
else:
iterator = range(max_retries)
for num, _ in enumerate(iterator, 1):
try:
if num!=1 and retry_func!=None:
getattr(args[0],retry_func)()
result = func(*args,**kwargs)
if success is None or success(result):
return result
except exceptions,e:
print e
if num == max_retries:
raise
time.sleep(interval)#此处是阻塞的
return wrapper
return decorator
#因为windows平台不能使用信号来做定时器,所以借用tornado的协程来实现定时器功能
def retry_unblock(exceptions=(Exception,), interval=0, max_retries=10, success=None, retry_func=None, **retry_func_kwargs):
if not exceptions and success is None:
raise u"exceptions与success参数不能同时为空"
def decorator(func):
@functools.wraps(func)
@gen.coroutine
def wrapper(*args, **kwargs):
if max_retries < 0:
iterator = itertools.count()
else:
iterator = range(max_retries)
for num, _ in enumerate(iterator, 1):
try:
if num!=1 and retry_func!=None:
yield retry_func(**retry_func_kwargs)
result = yield func(*args,**kwargs)
if success is None or success(result):
yield result
return
except exceptions,e:
print e
if num == max_retries:
print u"已达到最大重试次数"
return
yield gen.sleep(interval)#此处是非阻塞的
return wrapper
return decorator
def run_retry_unblock(func):
def wrapper(*args,**kwargs):
tornado.ioloop.IOLoop.current().run_sync(lambda:func(*args,**kwargs))
return wrapper
#测试
def retry_func(a,b,c):
print "a + b + c = ",sum([a,b,c])
@run_retry_unblock
@retry_unblock(interval=3,max_retries=3,retry_func=retry_func,a=1,b=2,c=3)
def test(a,b):
print a/b
if __name__ == '__main__':
test(10,0) |
from typing import TYPE_CHECKING
from django import forms
from budget.models import Pattern
if TYPE_CHECKING:
from budget.models import Category
class UploadFileForm(forms.Form):
title = forms.CharField(max_length=50)
file = forms.FileField()
class CategoryClassChoiceField(forms.ModelChoiceField):
"""Use this to show class with categories in a drop-down."""
def label_from_instance(self, obj: "Category") -> str:
return f"{obj.class_field} - {obj.name}"
class PatternForm(forms.ModelForm):
class Meta:
model = Pattern
fields = ["pattern", "category"]
field_classes = {"category": CategoryClassChoiceField}
class PatternBulkUpdateForm(forms.Form):
csv = forms.FileField()
|
import argparse
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=1, type=int, help="Which GPU to train.")
parser.add_argument("--batch_size", default=8, type=int, help="Batch size to use during training.")
parser.add_argument("--size", default=512, type=int, help="model1 input size")
parser.add_argument("--datamore", default=1, type=int, help="DataAugmentation")
parser.add_argument("--optim", default=0, type=int, help="Optimizer: 0: Adam, 1: SGD, 2:SGD with Nesterov")
parser.add_argument("--display_freq", default=10, type=int, help="Display frequency")
parser.add_argument("--lr1", default=0.00005, type=float, help="Learning rate for optimizer")
parser.add_argument("--epochs", default=50, type=int, help="Number of epochs to train")
parser.add_argument("--eval_per_epoch", default=1, type=int, help="eval_per_epoch ")
args = parser.parse_args()
return args
|
import discord
import os
from discord.ext import tasks
from discord.ext import commands
class checkmsg(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print("Checkmsg.py Cog has loaded Succesfully")
def setup(client):
client.add_cog(checkmsg(client)) |
import sys, collections
from numpy import *
from matplotlib import pyplot as plt
fn="../data/data_provinces.csv"
# loading file with 3 columns
name=loadtxt(fn, unpack=True, delimiter=',', skiprows=1, dtype='a', usecols=arange(1))
# array defined for the first column
region=loadtxt(fn, unpack=True, delimiter=',', skiprows=1, dtype='a', usecols=arange(1)+1)
# array defined for the second column
population,lifeExpectancy,incomePerCapita,expenditurePerCapita=loadtxt(fn,unpack=True, delimiter=',', skiprows=1,usecols=arange(4)+2) # array defined for the remaining columns
def main():
region_list = unique(region)
age_dict = {}
for i in range(len(region_list)):
age_dict[region_list[i]] = ageCounter(region_list[i])
sort_dict = collections.OrderedDict(sorted(age_dict.items()))
for key in sort_dict:
print "%s: %s" % (key, sort_dict[key])
graph(sort_dict)
sys.exit(1)
# Iterates through each row with the same Region
# and adds age to sum and returns sum value
def ageCounter(region_val):
sum = 0
reg_index = where(region==region_val)
for i in range(0,len(reg_index[0])):
sum = sum + (lifeExpectancy[reg_index[0][i]] * population[reg_index[0][i]])
avg = sum/popCounter(region_val)
return avg
def popCounter(region_val):
sum = 0
reg_index = where(region==region_val)
for i in range(0,len(reg_index[0])):
sum = sum + population[reg_index[0][i]]
return sum
# Set x and y labels and values for barplot
# and then graph values and save in fig folder
def graph(sort_dict):
plt.yticks(range(len(sort_dict)), sorted(sort_dict.keys(), key=sort_dict.get), rotation=45, size='small')
plt.barh(range(len(sort_dict)), sorted(sort_dict.values()))
plt.xlim((0,100))
plt.xlabel("Life Expectancy")
plt.ylabel("Regions")
plt.title("Regional Average Life Expectancy of the Philippines")
plt.savefig("../fig/Life Expectancy.png")
plt.show()
if __name__ == '__main__':
main()
|
########################
# the default config should work out of the box with minimal change
# Under the '## User specific parameter' line need to be changed to make the config correctly
########################
from WMCore.Configuration import Configuration
from os import environ, path
import WMCore.WMInit
config = Configuration()
config.component_('Webtools')
config.Webtools.application = 'AgentMonitoring'
config.component_('AgentMonitoring')
config.AgentMonitoring.templates = path.join( WMCore.WMInit.getWMBASE(), 'src/templates/WMCore/WebTools' )
## User specific parameter:
config.AgentMonitoring.admin = 'your@email.com'
config.AgentMonitoring.title = 'WMAgent Monitoring'
config.AgentMonitoring.description = 'Monitoring of a WMAgentMonitoring'
config.AgentMonitoring.section_('views')
# These are all the active pages that Root.py should instantiate
active = config.AgentMonitoring.views.section_('active')
wmagent = active.section_('wmagent')
# The class to load for this view/page
wmagent.object = 'WMCore.WebTools.RESTApi'
wmagent.templates = path.join( WMCore.WMInit.getWMBASE(), 'src/templates/WMCore/WebTools/')
wmagent.section_('database')
## User specific parameter:
wmagent.database.connectUrl = 'mysql://metson@localhost/wmagent'
# http://www.sqlalchemy.org/docs/reference/sqlalchemy/connections.html
#wmagent.database.database.engineParameters = {'pool_size': 10, 'max_overflow': 0}
wmagent.section_('model')
wmagent.model.object = 'WMCore.HTTPFrontEnd.Agent.AgentRESTModel'
wmagent.section_('formatter')
wmagent.formatter.object = 'WMCore.WebTools.RESTFormatter'
wmagentmonitor = active.section_('wmagentmonitor')
# The class to load for this view/page
wmagentmonitor.object = 'WMCore.HTTPFrontEnd.Agent.AgentMonitorPage'
wmagentmonitor.templates = path.join(WMCore.WMInit.getWMBASE(), 'src/templates/WMCore/WebTools/')
wmagentmonitor.javascript = path.join(WMCore.WMInit.getWMBASE(), 'src/javascript/')
wmagentmonitor.html = path.join(WMCore.WMInit.getWMBASE(), 'src/html/')
|
print ("questao 1")
a = float(input("Digite a altura:CM"))
b = float(input("Digite o peso "))
imc = b/a**2
print ("seu imc é:",imc)
if imc <= 18.5:
print ("abaixo do peso")
elif imc >25:
print ("acima do peso ")
else :
print ("peso ideal")
|
# Generated by Django 2.1.5 on 2019-03-15 18:31
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userprogress', '0011_userattemptedchallenge'),
]
operations = [
migrations.AddField(
model_name='userattemptedchallenge',
name='end_datetime',
field=models.DateTimeField(blank=True, default=datetime.datetime.now),
),
]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class TDNN(nn.Module):
def __init__(
self,
input_dim=23,
output_dim=512,
context_size=5,
stride=1,
dilation=1,
batch_norm=False,
dropout_p=0
):
'''
TDNN as defined by https://www.danielpovey.com/files/2015_interspeech_multisplice.pdf
Affine transformation not applied globally to all frames but smaller windows with local context
batch_norm: True to include batch normalisation after the non linearity
Context size and dilation determine the frames selected
(although context size is not really defined in the traditional sense)
For example:
context size 5 and dilation 1 is equivalent to [-2,-1,0,1,2]
context size 3 and dilation 2 is equivalent to [-2, 0, 2]
context size 1 and dilation 1 is equivalent to [0]
'''
super(TDNN, self).__init__()
self.context_size = context_size
self.stride = stride
self.input_dim = input_dim
self.output_dim = output_dim
self.dilation = dilation
self.dropout_p = dropout_p
self.batch_norm = batch_norm
self.kernel = nn.Linear(self.input_dim*self.context_size, self.output_dim)
self.nonlinearity = nn.ReLU()
if self.batch_norm:
self.bn = nn.BatchNorm1d(output_dim)
if self.dropout_p:
self.drop = nn.Dropout(p=self.dropout_p)
def forward(self, x):
'''
input: size (batch, seq_len, input_features)
outpu: size (batch, new_seq_len, output_features)
'''
_, _, d = x.shape
assert (d == self.input_dim), 'Input dimension was wrong. Expected ({}), got ({})'.format(self.input_dim, d)
x = x.unsqueeze(1)
# Unfold input into smaller temporal contexts
x = F.unfold(
x,
(self.context_size, self.input_dim),
stride=(1,self.input_dim),
dilation=(self.dilation,1)
)
# N, output_dim*context_size, new_t = x.shape
x = x.transpose(1,2)
if self.dropout_p:
x = self.drop(x)
x = self.kernel(x)
x = self.nonlinearity(x)
if self.batch_norm:
x = x.transpose(1,2)
x = self.bn(x)
x = x.transpose(1,2)
return x
class StatsPooling(nn.Module):
def __init__(self):
super(StatsPooling,self).__init__()
def forward(self,varient_length_tensor):
mean = varient_length_tensor.mean(dim=1)
std = varient_length_tensor.std(dim=1)
return mean+std
class FullyConnected(nn.Module):
def __init__(self):
super(FullyConnected, self).__init__()
self.hidden1 = nn.Linear(512,512)
self.hidden2 = nn.Linear(512,512)
self.dropout = nn.Dropout(p=0.25)
def forward(self, x):
x = self.hidden1(x)#F.relu( self.hidden1(x))
x = self.dropout(x)
x = self.hidden2(x)
return x
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.frame1 = TDNN(input_dim=23, output_dim=512, context_size=5, dilation=1)
self.frame2 = TDNN(input_dim=512, output_dim=512, context_size=3, dilation=2)
self.frame3 = TDNN(input_dim=512, output_dim=512, context_size=3, dilation=3)
self.frame4 = TDNN(input_dim=512, output_dim=512, context_size=1, dilation=1)
self.frame5 = TDNN(input_dim=512, output_dim=512, context_size=1, dilation=1)
self.pooling = StatsPooling()
self.fully = FullyConnected()
self.softmax = nn.Softmax(dim=1)
def forward(self,x):
x1 = self.frame1(x)
x2 = self.frame2(x1)
x3 = self.frame3(x2)
x4 = self.frame4(x3)
x5 = self.frame5(x4)
x6 = self.pooling(x5)
x7 = self.fully(x6)
x7 = self.softmax(x7)
return x7
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
#self.mfcc = MFCC(sample_rate=8000, n_mfcc= 23)
self.cos = nn.CosineSimilarity(dim=1, eps=1e-6)
self.TDNN = MyModel()
self.softmax = nn.Softmax(dim=1)
def load_(self):
self.TDNN = torch.load('best.pkl')
def save_(self,epoch_):
torch.save(self.TDNN,str(epoch_)+'_model.pkl')
def forward(self, x ):
one = torch.squeeze(x[:,0:1,:,:])
other = torch.squeeze(x[:,1:2,:,:])
one = self.TDNN(one)
other= self.TDNN(other)
output = self.cos(one,other)
return output
|
import os
import maya.cmds as cmds
import maya.mel as mel
import pb.general.assets as assets
def create_export_skeleton():
rigs = cmds.ls("*:RIG")
character_rig = None
weapon_rig = None
for rig in rigs:
if rig.startswith('ch'):
character_rig = rig
elif rig.startswith('wp'):
weapon_rig = rig
character_namespace = character_rig.rpartition(':')[0]
character_skel_group = cmds.listConnections(character_rig + '.skeleton_group', source=True)[0]
character_root_joint = cmds.listRelatives(character_skel_group)[0]
export_skeleton = cmds.duplicate(character_root_joint)
cmds.parent(export_skeleton[0], world=True)
for joint in export_skeleton:
character_joint = character_namespace + ':' + joint
cmds.parentConstraint(character_joint, joint)
if weapon_rig is not None:
weapon_namespace = weapon_rig.rpartition(':')[0]
for joint in export_skeleton:
weapon_joint = weapon_namespace + ':' + joint
if joint.startswith('weapon') and cmds.objExists(weapon_joint):
old_constraint = cmds.listRelatives(joint, type='constraint')
cmds.delete(old_constraint)
cmds.parentConstraint(weapon_joint, joint)
def delete_export_skeleton():
root = "|root"
if cmds.objExists(root):
cmds.delete(root)
def release_fbx_name(version):
work_file = assets.asset_file()
highest_version = assets.highest_release_version()
decomposed = assets.decompose_file_name(work_file)
f = "A_{0}_{1}_v{2:0>3}.fbx".format(
decomposed['asset'],
decomposed['description'],
version,
)
return f
def release_scene_name(version):
work_file = assets.asset_file()
highest_version = assets.highest_release_version()
decomposed = assets.decompose_file_name(work_file)
f = "{0}_{1}_{2}_{3}_v{4:0>3}.ma".format(
decomposed["type"],
decomposed["asset"],
decomposed["step"],
decomposed["description"],
version,
)
return f
def export_animation(version):
release_dir = assets.release_dir()
file_name = release_fbx_name(version)
fbx_path = os.path.join(release_dir, file_name).replace("\\", "/")
skeleton = cmds.listRelatives(
'|root', allDescendents=True, type='joint'
)
skeleton.insert(0, "|root")
cmds.select(skeleton, replace=True)
mel.eval("FBXExportBakeComplexAnimation -v true")
mel.eval("FBXExportInputConnections -v false")
mel.eval("FBXExportUpAxis z")
command = 'FBXExport -f "{}" -s'.format(fbx_path)
mel.eval(command)
def save_release_scene(version):
release_dir = assets.release_dir()
file_name = release_scene_name(version)
full_path = os.path.join(release_dir, file_name)
cmds.file(rename=full_path)
cmds.file(save=True, type='mayaAscii')
def publish():
new_version = assets.highest_release_version() + 1
assets.incremental_save()
create_export_skeleton()
export_animation(version=new_version)
delete_export_skeleton()
save_release_scene(version=new_version)
if __name__ == "__main__":
create_export_skeleton()
|
cpf = input("CPF(xxx.xxx.xxx-xx) :")
while (cpf[3] !=".") or (cpf[7] !=".") or (cpf[11] !="-"):
cpf = input("O formato deve ser: (xxx.xxx.xxx-xx) :")
else: print("O formato está correto")
|
class CredentialSet:
def __init__(self):
self.Password = "default"
self.UserName = "default" |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# author: cg
# time : 2017-12-04
import pymysql.cursors
class DoMysql:
# 连接数据库,返回一条连接
def __init__(self, dictMsgForMysql):
# 构造函数
self.strHost = dictMsgForMysql.get('host')
self.strPort = dictMsgForMysql.get('port')
self.strUser = dictMsgForMysql.get('user')
self.strPasswd = dictMsgForMysql.get('passwd')
self.strDatabase = dictMsgForMysql.get('database')
def connectionMySQL(self):
# 连接数据库
# 返回一个连接
connection = None
try:
connection = pymysql.connect(host = self.strHost, port = int(self.strPort), user = self.strUser,
passwd = self.strPasswd, db = self.strDatabase,
charset="utf8mb4", cursorclass=pymysql.cursors.DictCursor)
except:
print('请重新检查数据库配置(可能配置出错或者网络出错)')
return connection
|
import random
from onegov.core.collection import Pagination
from onegov.ticket import handlers as global_handlers
from onegov.ticket.model import Ticket
from sqlalchemy import desc, distinct, func
from sqlalchemy.orm import joinedload, undefer
from uuid import UUID
from typing import Any, Literal, NamedTuple, TYPE_CHECKING
if TYPE_CHECKING:
from onegov.ticket.model import TicketState
from sqlalchemy.orm import Query, Session
from typing_extensions import Self, TypeAlias, TypedDict
ExtendedTicketState: TypeAlias = TicketState | Literal['all', 'unfinished']
class StateCountDict(TypedDict, total=False):
open: int
pending: int
closed: int
archived: int
class TicketCollectionPagination(Pagination[Ticket]):
if TYPE_CHECKING:
# forward declare query
def query(self) -> 'Query[Ticket]': ...
def __init__(
self,
session: 'Session',
page: int = 0,
state: 'ExtendedTicketState' = 'open',
handler: str = 'ALL',
group: str | None = None,
owner: str = '*',
extra_parameters: dict[str, Any] | None = None
):
self.session = session
self.page = page
self.state = state
self.handler = handler
self.handlers = global_handlers
self.group = group
self.owner = owner
if self.handler != 'ALL':
self.extra_parameters = extra_parameters or {}
else:
self.extra_parameters = {}
def __eq__(self, other: object) -> bool:
return (
isinstance(other, TicketCollection)
and self.state == other.state
and self.page == other.page
)
def subset(self) -> 'Query[Ticket]':
query = self.query()
query = query.order_by(desc(Ticket.created))
query = query.options(joinedload(Ticket.user))
query = query.options(undefer(Ticket.created))
if self.state == 'unfinished':
query = query.filter(
Ticket.state != 'closed',
Ticket.state != 'archived'
)
elif self.state == 'all':
query = query.filter(Ticket.state != 'archived')
elif self.state != 'all':
query = query.filter(Ticket.state == self.state)
if self.group != None:
query = query.filter(Ticket.group == self.group)
if self.owner != '*':
query = query.filter(Ticket.user_id == self.owner)
if self.handler != 'ALL':
query = query.filter(Ticket.handler_code == self.handler)
if self.extra_parameters:
handler_class = self.handlers.get(self.handler)
query = handler_class.handle_extra_parameters(
self.session, query, self.extra_parameters
)
return query
@property
def page_index(self) -> int:
return self.page
def page_by_index(self, index: int) -> 'Self':
return self.__class__(
self.session, index, self.state, self.handler, self.group,
self.owner, self.extra_parameters
)
def available_groups(self, handler: str = '*') -> tuple[str, ...]:
query = self.query().with_entities(distinct(Ticket.group))
query = query.order_by(Ticket.group)
if handler != '*':
query = query.filter(Ticket.handler_code == handler)
return tuple(r[0] for r in query.all())
def for_state(self, state: 'ExtendedTicketState') -> 'Self':
return self.__class__(
self.session, 0, state, self.handler, self.group, self.owner,
self.extra_parameters
)
def for_handler(self, handler: str) -> 'Self':
return self.__class__(
self.session, 0, self.state, handler, self.group, self.owner,
self.extra_parameters
)
def for_group(self, group: str) -> 'Self':
return self.__class__(
self.session, 0, self.state, self.handler, group, self.owner,
self.extra_parameters
)
def for_owner(self, owner: str | UUID) -> 'Self':
if isinstance(owner, UUID):
owner = owner.hex
return self.__class__(
self.session, 0, self.state, self.handler, self.group, owner,
self.extra_parameters
)
class TicketCount(NamedTuple):
open: int = 0
pending: int = 0
closed: int = 0
archived: int = 0
class TicketCollection(TicketCollectionPagination):
def query(self) -> 'Query[Ticket]':
return self.session.query(Ticket)
def random_number(self, length: int) -> int:
range_start = 10 ** (length - 1)
range_end = 10 ** length - 1
return random.randint(range_start, range_end) # nosec B311
def random_ticket_number(self, handler_code: str) -> str:
number = str(self.random_number(length=8))
return f'{handler_code}-{number[:4]}-{number[4:]}'
def is_existing_ticket_number(self, ticket_number: str) -> bool:
query = self.query().filter(Ticket.number == ticket_number)
return self.session.query(query.exists()).scalar()
def issue_unique_ticket_number(self, handler_code: str) -> str:
""" Randomly generates a new ticket number, ensuring it is unique
for the given handler_code.
The resulting code is of the following form::
XXX-0000-1111
Where ``XXX`` is the handler_code and the rest is a 12 character
sequence of random numbers separated by dashes.
This gives us 10^8 or 100 million ticket numbers for each handler.
Though we'll never reach that limit, there is an increasing chance
of conflict with existing ticket numbers, so we have to check
against the database.
Still, this number is not unguessable (say in an URL) - there we have
to rely on the internal ticket id, which is a uuid.
In a social engineering setting, where we don't have the abilty to
quickly try out thousands of numbers, the ticket number should
be pretty unguessable however.
"""
# usually we won't have any conflict, so we just run queries
# against the existing database, even if this means to run more than
# one query once in forever
while True:
candidate = self.random_ticket_number(handler_code)
if not self.is_existing_ticket_number(candidate):
return candidate
def open_ticket(
self,
handler_code: str,
handler_id: str,
**handler_data: Any
) -> Ticket:
""" Opens a new ticket using the given handler. """
ticket = Ticket.get_polymorphic_class(handler_code, default=Ticket)()
ticket.number = self.issue_unique_ticket_number(handler_code)
# add it to the session before invoking the handler, who expects
# each ticket to belong to a session already
self.session.add(ticket)
ticket.handler_id = handler_id
ticket.handler_code = handler_code
ticket.handler_data = handler_data
ticket.handler.refresh()
self.session.flush()
return ticket
# FIXME: It seems better to return a query here...
def by_handler_code(self, handler_code: str) -> list[Ticket]:
return self.query().filter(Ticket.handler_code == handler_code).all()
def by_id(
self,
id: UUID,
ensure_handler_code: str | None = None
) -> Ticket | None:
query = self.query().filter(Ticket.id == id)
if ensure_handler_code:
query = query.filter(Ticket.handler_code == ensure_handler_code)
return query.first()
def by_handler_id(self, handler_id: str) -> Ticket | None:
return self.query().filter(Ticket.handler_id == handler_id).first()
def get_count(self, excl_archived: bool = True) -> TicketCount:
query: 'Query[tuple[str, int]]' = self.query().with_entities(
Ticket.state, func.count(Ticket.state)
)
if excl_archived:
query = query.filter(Ticket.state != 'archived')
query = query.group_by(Ticket.state)
return TicketCount(**{state: count for state, count in query})
def by_handler_data_id(
self,
handler_data_id: str | UUID
) -> 'Query[Ticket]':
return self.query().filter(
Ticket.handler_data['handler_data']['id'] == str(handler_data_id))
# FIXME: Why is this its own subclass? shouldn't this at least override
# __init__ to pin state to 'archived'?!
class ArchivedTicketsCollection(TicketCollectionPagination):
def query(self) -> 'Query[Ticket]':
return self.session.query(Ticket)
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.load_dashboard, name='home-page'),
path('predictor/', views.PredictorView.as_view(), name='predictor'),
]
|
def preorder(self, root: 'Node') -> List[int]:
res = []
# def recursion(root):
# if not root:
# return
# res.append(root.val)
# for child in root.children:
# recursion(child)
# recursion(root)
# return res
"""
迭代法:参考二叉树前序遍历的迭代法
O(N), O(N)
"""
if not root:
return res
from collections import deque
stack = deque()
stack.append(root)
while stack:
node = stack.pop()
res.append(node.val)
for child in node.children[::-1]:
stack.append(child)
return res |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2005-2006 CamptoCamp
# Copyright (c) 2006-2010 OpenERP S.A
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import time
import re
from report import report_sxw
import logging
from openerp.tools.amount_to_text_en import amount_to_text
_logger = logging.getLogger('reportes')
class stock_picking(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(stock_picking, self).__init__(cr, uid, name, context)
self.localcontext.update({
'time': time,
'_date': self._date,
'_amounts': self._amounts,
'_amount_to_text':self._amount_to_text,
})
def _date(self, date):
fecha=''
dia=''
mes=''
mes_string=''
ano=''
date_ready=''
sep_dia=' '
sep_mes=' '
if date:
fecha=str(date)
mes=fecha[3:5]
ano=fecha[6:10]
dia=fecha[0:2]
if mes == '01':
mes_string='Enero '
elif mes == '02':
mes_string='Febrero '
elif mes == '03':
mes_string='Marzo '
elif mes == '04':
mes_string='Abril '
elif mes == '05':
mes_string='Mayo '
elif mes == '06':
mes_string='Junio '
elif mes == '07':
mes_string='Julio '
elif mes == '08':
mes_string='Agosto '
elif mes == '09':
mes_string='Septiembre'
elif mes == '10':
mes_string='Octubre '
elif mes == '11':
mes_string='Noviembre '
elif mes == '12':
mes_string='Diciembre '
date_ready=dia+sep_dia+mes_string+sep_mes+ano
return date_ready
def _amounts(self, valor):
amount=''
count=0
if valor:
val=int(round(valor))
monto=str(format(int(valor),',d'))
amount=monto.replace(',', '.')
else:amount='0'
return amount
def _amount_to_text(self, amount):
text=''
text = amount_to_text(amount)
return text
report_sxw.report_sxw('report.stock_picking_rio_bueno', 'stock.picking', 'trunk/econube_invoice_print_rio_bueno/reportes/print_picking.rml', parser=stock_picking, header='false')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
print("program to reverse the given words")
message=input("enter the string")
list=[]
list=message.split(" ")
print(len(list))
for i in range(len(list)-1,-1,-1):
print(list[i],end=" ")
|
#!/usr/bin/python3
import time
from threading import Thread
NUMBER_OF_THREADS = 1
def process_line(in_line):
#swapcase
out_line = in_line.swapcase()
#process numbers
for i in range(0, len(out_line)):
if out_line[i].isdigit():
d = int(out_line[i])
if d<9:
out_line = out_line[:i] + '{}'.format(d+1) + out_line[i+1:]
else:
out_line = out_line[:i] + '0' + out_line[i+1:]
return out_line
def process_thread(lines, offset, threads_count):
for i in range(offset, len(lines), threads_count):
line_in = lines[i]
line_out = process_line(line_in)
lines[i] = line_out
if __name__ == '__main__':
start = time.time()
try:
print("Load input data")
in_file = open("input.data", "r")
lines = in_file.readlines()
in_file.close()
data_load = time.time()
threads_pool = []
print("Start {} thread".format(NUMBER_OF_THREADS))
#start threads
for i in range(0, NUMBER_OF_THREADS):
thread = Thread(target=process_thread, args=(lines, i, NUMBER_OF_THREADS))
thread.start()
threads_pool.append(thread)
print("Wait for finish")
#wiat for finish
for t in threads_pool:
t.join()
print("Save output data")
out_file = open("output_py.data", "w")
out_file.writelines(lines)
out_file.close()
print("Done.")
except Exception as e:
print(e)
end = time.time()
print("Data load time: {}".format(data_load - start))
print("Total Execution time: {}".format(end - start))
|
import pandas as pd
import random
## The missing linkes should be extracted from the previous snapshot of the
## network which you want to predict links for. (if you want to have a negative
## dataset for your predictions)
## for example if you want to have a list of links which were not present
## in 2016 and also is not present in 2017 and the positive dataset is from 2017
## so these links can be in our negative
## dataset
'''
# num_of_edges = int(input("How much edges you want to be generated: "))
num_of_edges = 100000
old_edges_file_path = input(" Please enter OLD edges file path: \n >>> ")
new_edges_file_path = input(" Please enter NEW edges file path: \n >>> ")
# old_nodes_info_file_path = input(" Please enter OLD nodes info file path: \n >>> ")
# new_nodes_info_file_path = input(" Please enter NEW nodes info file path: \n >>> ")
output_file_path = input(" Please enter the output file path: \n >>> ")
'''
def get_missing_links(old_edges_file_path, new_edges_file_path, output_file_path, num_of_edges):
df_old_edges = pd.read_csv(old_edges_file_path, sep=",")
df_new_edges = pd.read_csv(new_edges_file_path, sep=",")
# df_old_nodes = pd.read_csv(old_nodes_info_file_path, sep=",")
# df_new_nodes = pd.read_csv(new_nodes_info_file_path, sep=",")
old_nodes = set()
old_edges = set()
for s,t in zip(list(df_old_edges.Source), list(df_old_edges.Target)):
old_edges.add(tuple(sorted((s,t))))
old_nodes.add(s)
old_nodes.add(t)
new_nodes = set()
new_edges = set()
for s,t in zip(list(df_new_edges.Source), list(df_new_edges.Target)):
new_edges.add(tuple(sorted((s,t))))
new_nodes.add(s)
new_nodes.add(t)
nodes = old_nodes & new_nodes
nodes = list(nodes)
len_nodes = len(nodes)
missing_edges = set()
while True:
if num_of_edges == 0:
break
num_of_edges -= 1
source = nodes[random.randint(0, len_nodes-1)]
target = nodes[random.randint(0, len_nodes-1)]
if source == target:
num_of_edges += 1
continue
edge = tuple(sorted((source, target)))
if edge not in old_edges and edge not in new_edges and edge not in missing_edges:
missing_edges.add(edge)
else:
num_of_edges += 1
print("\r{}".format(num_of_edges), end="")
output_missing_edges = pd.DataFrame(columns=["Source", "Target"])
cnt = 0
sources = []
targets = []
for edge in list(missing_edges):
cnt += 1
print("\r {} added".format(cnt), end="")
sources.append(edge[0])
targets.append(edge[1])
output_missing_edges.Source = sources
output_missing_edges.Target = targets
return {"output_dataframe": output_missing_edges, "new_edges": new_edges, "missing_edges": missing_edges}
###################################################################
'''
ot_mss_egs = get_missing_links(old_edges_file_path, new_edges_file_path, output_file_path, num_of_edges)["output_dataframe"]
ot_mss_egs.to_csv(output_file_path, sep=',', index=False)
'''
#
|
from handControl.axis import Axis
from handControl.communication.serial_connection import SerialConnection
import time
class Hand(object):
def __init__(self, port):
self._axis = []
for i in range(0, 4):
self._axis.append(Axis())
self._axis[i].set_angle(90)
self._serial = SerialConnection()
self._serial.connect(port, 9600)
def _move(self):
packet = self._serial.create_packet(self._axis)
self._serial.write(packet)
def move(self, angles):
for i in range(0, 4):
self._axis[i].set_angle(angles[i])
self._move()
def move_axis(self, axis, angle):
if angle>180:
angle=180
if angle<1:
angle =0
self._axis[axis].set_angle(angle)
self.move([axis._angle for axis in self._axis])
hand = Hand('/dev/cu.usbmodem1411') |
from selenium import webdriver
import time
import os
driver = webdriver.Chrome()
file_path = 'file:///'+os.path.abspath("C:/课件/我的课件/测试/selenium2/locateElement/selenium2html/send.html")
driver.get(file_path)
time.sleep(3)
driver.maximize_window()
driver.find_element_by_xpath("//html//body//input").click()
time.sleep(3)
alert = driver.switch_to.alert
alert.send_keys("蔡徐坤")
time.sleep(3)
alert.accept()
time.sleep(6)
driver.quit()
|
import os
from distutils.core import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = 'BigRig',
version = '0.1-pre',
license = 'BSD',
description = 'A pure Python ECMAScript 5.1 engine.',
long_description = read('README.rst'),
author = 'Jeff Kistler',
author_email = 'jeff@jeffkistler.com',
packages = ['bigrig'],
package_dir = {'bigrig': 'bigrig'},
scripts = ['scripts/bigrig'],
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
|
import sympy as sp
import numpy as np
from kaa.bundle import Bundle, BundleTransformer
from kaa.model import Model
def test_bund_trans_1():
x,y = sp.Symbol('x'), sp.Symbol('y')
dx = x + 1
dy = y + 1
dyns =[dx, dy]
vars = [x, y]
L = np.empty([2,2])
T = np.empty(2)
L[0] = [1, 0]
L[1] = [0, 1]
T[0] = 0
T[1] = 1
T = [T]
offu = np.empty(2)
offl = np.empty(2)
offu[0] = 1
offu[1] = 1
offl[0] = 1
offl[1] = 1
init_bund = Bundle(T, L, offu, offl, vars)
trans = BundleTransformer(dyns)
trans_bund = trans.transform(init_bund)
print(trans_bund)
assert False
|
from .data_processor import DataProcessor
from torch import nn
class SourceTargetDataProcessor(nn.Module):
"""
Abstract class used for preprocessing and embedding
It basically contains two DataProcessors (one for the source and one for the target)
Preprocessing: from ? -> (source, target, metadata_dict) where
- source is (batch_size, num_events_source, num_channels_source)
- target is (batch_size, num_events_target, num_channels_target)
- metadata_dict is a dictionnary of (batch_size, ...) tensors that can be used by positional encodings and first token.
Embedding: from (batch_size, num_events, num_channels) ->
(batch_size, num_events, num_channels, embedding_size)
"""
def __init__(self,
encoder_data_processor: DataProcessor,
decoder_data_processor: DataProcessor):
super(SourceTargetDataProcessor, self).__init__()
self.encoder_data_processor = encoder_data_processor
self.decoder_data_processor = decoder_data_processor
@property
def embedding_size_source(self):
return self.encoder_data_processor.embedding_size
@property
def embedding_size_target(self):
return self.decoder_data_processor.embedding_size
@property
def num_channels_source(self):
return self.encoder_data_processor.num_channels
@property
def num_channels_target(self):
return self.decoder_data_processor.num_channels
@property
def num_events_source(self):
return self.encoder_data_processor.num_events
@property
def num_events_target(self):
return self.decoder_data_processor.num_events
@property
def num_tokens_per_channel_target(self):
return self.decoder_data_processor.num_tokens_per_channel
def embed_source(self, x):
"""
:param x: (..., num_channels)
:return: (..., num_channels, embedding_size)
"""
return self.encoder_data_processor.embed(x)
def embed_target(self, x):
"""
:param x: (..., num_channels)
:return: (..., num_channels, embedding_size)
"""
return self.decoder_data_processor.embed(x)
def embed_step_source(self, x, channel_index):
"""
:param x: (..., num_channels)
:return: (..., num_channels, embedding_size)
"""
return self.encoder_data_processor.embed_step(
x, channel_index=channel_index)
def embed_step_target(self, x, channel_index):
"""
:param x: (..., num_channels)
:return: (..., num_channels, embedding_size)
"""
return self.decoder_data_processor.embed_step(
x, channel_index=channel_index)
def preprocess(self, x):
"""
Subclasses must implement this method
x comes directly from the data_loader_generator.
source and target must be put on the GPUs if needed.
:param x: ?
:return: (source, target, metadata_dict)
of size (batch_size, num_events_source, num_channels_source)
(batch_size, num_events_target, num_channels_target)
"""
raise NotImplementedError
|
class Objeto:
def __init__(self):
self.x=0
self.y=0
self.orientation=0
class Frame:
def __init__(self):
self.robots_blue = [Objeto(), Objeto(), Objeto()]
self.robots_yellow = [Objeto(), Objeto(), Objeto()]
self.ball=Objeto() |
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import itertools
import statsmodels.api as sm
import streamlit as st
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from pylab import rcParams
from sklearn.metrics import mean_squared_error
from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
import seaborn as sns
sns.set_context("paper", font_scale=1.3)
sns.set_style('white')
import math
from sklearn.preprocessing import MinMaxScaler
plt.style.use('fivethirtyeight')
st.title("Time Series Prediction Model for Crude Palm Oil Price")
st.markdown(
"""
This is a simple time series prediction model with parameter to adjust in order
to get best prediction accurancy result
"""
)
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d')
os.chdir('C:\\Users\\oryza\\OneDrive\\Desktop\\DataScience\\DataMiningOnClass\\project\\WQD7005_DataMining\\B_Processed_Data')
CPO_Price=pd.read_csv("..\\A_Raw_Data\\CPOPrices\\investing_Bursa_CPO_USD_price.csv",parse_dates=['Date'], date_parser=dateparse)
CPO_Price_September=CPO_Price[CPO_Price['Date']>='2014-09-01']
CPO_Price_September=CPO_Price_September.drop(['Open','High','Low','Vol.','Change %'],axis=1)
CPO_Price_September.set_index('Date',inplace=True)
print(CPO_Price_September)
y = CPO_Price_September['Price'].resample('SMS').mean()
y.plot(figsize=(15, 6))
# st.pyplot()
# plt.show()
rcParams['figure.figsize'] = 18, 8
decomposition = sm.tsa.seasonal_decompose(y, model='additive',freq=40)
fig = decomposition.plot()
# st.pyplot()
# plt.show()
# normalize the data_set
sc = MinMaxScaler(feature_range = (0, 1))
df = sc.fit_transform(CPO_Price_September)
# split into train and test sets
train_size = int(len(df) * 0.75)
test_size = len(df) - train_size
train, test = df[0:train_size, :], df[train_size:len(df), :]
# convert an array of values into a data_set matrix def
def create_data_set(_data_set, _look_back=1):
data_x, data_y = [], []
for i in range(len(_data_set) - _look_back - 1):
a = _data_set[i:(i + _look_back), 0]
data_x.append(a)
data_y.append(_data_set[i + _look_back, 0])
return np.array(data_x), np.array(data_y)
# reshape into X=t and Y=t+1
look_back =90
X_train,Y_train,X_test,Ytest = [],[],[],[]
X_train,Y_train=create_data_set(train,look_back)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test,Y_test=create_data_set(test,look_back)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
# create and fit the LSTM network regressor = Sequential()
regressor = Sequential()
regressor.add(LSTM(units = 100, return_sequences = True, input_shape = (X_train.shape[1], 1)))
regressor.add(Dropout(0.1))
regressor.add(LSTM(units = 100, return_sequences = True))
regressor.add(Dropout(0.1))
regressor.add(LSTM(units = 100))
regressor.add(Dropout(0.1))
regressor.add(Dense(units = 1))
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
reduce_lr = ReduceLROnPlateau(monitor='val_loss',patience=5)
history =regressor.fit(X_train, Y_train, epochs = 20, batch_size = 15,validation_data=(X_test, Y_test), callbacks=[reduce_lr],shuffle=False)
train_predict = regressor.predict(X_train)
test_predict = regressor.predict(X_test)
# invert predictions
train_predict = sc.inverse_transform(train_predict)
Y_train = sc.inverse_transform([Y_train])
test_predict = sc.inverse_transform(test_predict)
Y_test = sc.inverse_transform([Y_test])
# print('Train Mean Absolute Error:', mean_absolute_error(Y_train[0], train_predict[:,0]))
# print('Train Root Mean Squared Error:',np.sqrt(mean_squared_error(Y_train[0], train_predict[:,0])))
# print('Test Mean Absolute Error:', mean_absolute_error(Y_test[0], test_predict[:,0]))
# print('Test Root Mean Squared Error:',np.sqrt(mean_squared_error(Y_test[0], test_predict[:,0])))
plt.figure(figsize=(8,4))
plt.plot(history.history['loss'], label='Train Loss')
plt.plot(history.history['val_loss'], label='Test Loss')
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epochs')
plt.legend(loc='upper right')
# st.pyplot()
# plt.show()
#Compare Actual vs. Prediction
aa=[x for x in range(180)]
plt.figure(figsize=(8,4))
plt.plot(aa, Y_test[0][:180], marker='.', label="actual")
plt.plot(aa, test_predict[:,0][:180], 'r', label="prediction")
plt.tight_layout()
sns.despine(top=True)
plt.subplots_adjust(left=0.07)
plt.ylabel('Price', size=15)
plt.xlabel('Time step', size=15)
plt.legend(fontsize=15)
st.pyplot()
# plt.show()
# Print Parameters
'Train Mean Absolute Error:', mean_absolute_error(Y_train[0], train_predict[:,0])
'Train Root Mean Squared Error:',np.sqrt(mean_squared_error(Y_train[0], train_predict[:,0]))
'Test Mean Squared Error:', mean_absolute_error(Y_test[0], test_predict[:,0])
'Test Root Mean Squared Error:',np.sqrt(mean_squared_error(Y_test[0], test_predict[:,0]))
|
from tile import Tile
class Item(Tile):
"""Contains all the items functions"""
itemscollected = 0
def __init__(self, img, text):
Tile.__init__(self, img, text)
self.collected = False
self.invx = -100
self.invy = -100
def set_inventory_pos(self, x, y):
"""Set the position for the inventory tile"""
self.invx = x
self.invy = y
def item_event(self):
"""Event who happen when an item is collected"""
self.collected = True
self.displayed = False
self.get_inventory_pos()
Item.itemscollected += 1
self.clean_a_tile()
self.clean_a_tile("images/MacGyver.png")
self.message_display_text(1)
def display_inventory(self):
"""Display an inventory tile"""
Tile.gamedisplay.blit(self.get_sized(30, 30), (self.invx, self.invy))
def should_inv_be_displayed(self):
"""Check if an inventory tile should be displayed"""
if self.collected:
self.display_inventory()
def get_inventory_pos(self):
"""Attribuate a free inventory position to tile"""
self.set_inventory_pos(125 + 40 * Item.itemscollected, 565)
def reset_item(self):
"""Reset an item to it's starting attributes"""
self.collected = False
self.displayed = True
self.clean_ui()
|
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
def index(request):
return render_to_response('index.html', {})
def index500(request):
return render_to_response('500.html', {})
def trac(request):
return HttpResponseRedirect('http://ericolstad.com:8080')
|
import click
from utilities import decorators
from contacts.controller import ContactsController
@click.group()
def cli():
""" Manage my directory """
pass
@cli.command(name="list", help="Show my contact list")
@decorators.title
def list_contacts():
""" Show my contact list """
contact_list = ContactsController().list_contacts()
click.echo("ID | FULL NAME | EMAIL | PHONE")
for contact in contact_list:
contact["id"] = "{:02d}".format(contact["id"])
click.echo("{id} | {name} {lastname} | {email} | {phone}".format(**contact))
@cli.command(name="create", help="Create a new contact into my directory")
@click.option("-n", "--name", type=str, prompt=True, help="The contact name")
@click.option("-l", "--lastname", type=str, prompt=True, help="The contact lastname")
@click.option("-e", "--email", type=str, prompt=True, help="The contact email")
@click.option("-p", "--phone", type=str, prompt=True, help="The contact phone")
@decorators.title
def create_contact(name, lastname, email, phone):
contacts_ctrl = ContactsController()
insertion_id = contacts_ctrl.create_contact(name, lastname, email, phone)
if isinstance(insertion_id, int):
click.echo(f"Contact was inserted with id: {insertion_id}")
else:
click.secho("There was a problem trying to insert the contact", fg="red")
@cli.command(name="delete", help="Delete a contact by id")
@click.argument("cid", required=True, type=int)
@decorators.title
def delete_contact(cid):
""" Delete a contact by id """
contacts_ctrl = ContactsController()
contact = contacts_ctrl.get_contact(cid)
if not contact:
click.echo("Contact was not found".center(50))
else:
click.confirm(
f"Are you sure you want to delete: {contact['name']} {contact['lastname']}?",
abort=True,
)
print("Yes")
@cli.command(name="update", help="Update a contact by id")
@click.argument("cid", required=True, type=int)
@decorators.title
def update_contact(cid):
""" Update a contact by id """
contacts_ctrl = ContactsController()
contact = contacts_ctrl.get_contact(cid)
if not contact:
click.echo("Contact was not found".center(50))
else:
click.confirm(
f"Would you like to update contact: {contact['name']}?", abort=True
)
data_contact = contacts_ctrl.ask_for_values(contact)
updated = contacts_ctrl.update_contact(cid, data_contact)
if updated:
click.echo(f"Contact with id {cid} was updated successfully")
else:
click.secho("There was a problem trying to update the contact", fg="red")
if __name__ == "__main__":
cli()
|
def get_triangles(n):
return [i*(i+1)/2 for i in range(n+1)]
def value(word):
return sum([ord(c)-ord('A')+1 for c in word])
if __name__ == "__main__":
f = open("../../problem_inputs/p042_words.txt", "r")
triangles = get_triangles(50)
names = f.readline().split(",")
names = [s[1:-1] for s in names]
answer = 0
for n in names:
if value(n) in triangles:
answer += 1
print(answer)
|
import alyssa
import valentina
import peppermint
import json
from flask import Flask
from flask import request
app = Flask(__name__)
@app.route('/PATE-01/', methods = ['POST'])
def postJsonHandler():
if request.is_json:
peppermint.ascii_art()
content = request.get_json()
#print(content)
if not valentina.valentina(content['school']):
work = alyssa.alyssa(content['school'], content['schoolpwd'])
return str(work)
else:
print("Work done today for "+ content['school']+ " you may leave the stage")
return "3"
#peppermint.ascii_art()
app.run(host='0.0.0.0', port=8090)
|
#
#
# Server side image processing
# Adding PCA dimensionality reduction
#
from __future__ import print_function
from time import time
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
from sklearn.externals import joblib
from sklearn.decomposition import RandomizedPCA
import numpy as np
if __name__ == "__main__":
# Dataset location and file structure
dataDir = '/Users/andy/Documents/Software/imageProcessing/'
dataFile = 'X.csv'
labelFile = 'y.csv'
testDataFile = 'Xtest.csv'
testLabelFile = 'ytest.csv'
testNameFile = 'NamesTest.csv'
modelName = 'svmImageClassifier.pkl'
############################################################################
X = np.genfromtxt(dataDir+dataFile, delimiter=',')
X = X[:,0:3200] # TODO Fix nan column
y = np.genfromtxt(dataDir+labelFile, delimiter=',')
n_samples,n_features = X.shape
############################################################################
# PCA for dimensionality reduction
############################################################################
n_components = 25
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X)
joblib.dump(pca, dataDir+'transform.pkl')
eigenpeople = pca.components_.reshape((n_components, 80, 40)) # TODO: automatically get h and w
X_train_pca = pca.transform(X)
############################################################################
# Train a SVM classification model
############################################################################
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e2, 5e2, 1e3, 5e3, 1e4],
'gamma': [0.00001, 0.00005, 0.0001, 0.0005, 0.001, 0.005], }
clf = GridSearchCV(SVC(kernel='linear', class_weight='auto'), param_grid) # 13 errors in 107 test set
# clf = GridSearchCV(SVC(kernel='rbf', class_weight='auto'), param_grid)
clf = clf.fit(X_train_pca, y)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
# Save model to disk
clf = clf.best_estimator_
joblib.dump(clf, dataDir+'imageSvmClassifier.pkl')
y_pred = clf.predict(X_train_pca)
print(classification_report(y, y_pred, target_names=list(str(y))))
############################################################################
# Quantitative evaluation of the model quality on the test set
############################################################################
Xtest = np.genfromtxt(dataDir+testDataFile, delimiter=',')
Xtest = Xtest[:, 0:3200]
ytest = np.genfromtxt(dataDir+testLabelFile, delimiter=',')
nameListTest = []
# fName = open(dataDir+testNameFile)
# nl = fName.readline()
# while nl<>'':
# nameListTest.append(nl)
# nl = fName.readline()
with open(dataDir+testNameFile) as fName:
for line in fName:
nameListTest.append(line)
print("Predicting presence of people in the test set")
t0 = time()
X_test_pca = pca.transform(Xtest)
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
# print(classification_report(ytest, y_pred, target_names=list(strytest)))
print(y_pred)
nn = ytest.shape[0]
errorCount = 0
for i in range(ytest.shape[0]):
flag = ''
if (ytest[i]<>y_pred[i]):
errorCount += 1
flag = '---- error ---'
print('For '+nameListTest[i].strip()+' '+'Actual: '+str(ytest[i])+
' Predicted: '+str(y_pred[i])+flag)
print(str(nn)+' test set elements')
print(str(errorCount)+' incorrectly classified')
# print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
|
# -*- coding: utf-8 -*-
import os
from StringIO import StringIO
import pycurl
from . import ForeignDataWrapper
from .utils import log_to_postgres
from logging import WARNING
import csv
import chardet
import os
class WebCsvFdw(ForeignDataWrapper):
def __init__(self, fdw_options, fdw_columns):
super(WebCsvFdw, self).__init__(fdw_options, fdw_columns)
self.url = fdw_options["url"]
self.delimiter = fdw_options.get("delimiter", ",")
self.quotechar = fdw_options.get("quotechar", '"')
self.skip_header = int(fdw_options.get('skip_header', 0))
self.columns = fdw_columns
def execute(self, quals, columns):
os.system("echo \"123\" >> /home/hdc/yenkuanlee/qq.txt")
url = self.url
storage = StringIO()
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEFUNCTION, storage.write)
c.perform()
c.close()
content = storage.getvalue()
L = content.split("\n")
for i in range(len(L)):
if not L[i]:
L[i] = "NULL"
continue
if chardet.detect(L[i])['encoding']!='utf-8':
L[i]=L[i].decode('Big5','ignore').encode('utf-8')
reader = csv.reader(L[:len(L)-1], delimiter=self.delimiter)
count = 0
checked = False
for line in reader:
if count >= self.skip_header:
if not checked:
# On first iteration, check if the lines are of the
# appropriate length
checked = True
if len(line) > len(self.columns):
log_to_postgres("There are more columns than "
"defined in the table", WARNING)
if len(line) < len(self.columns):
log_to_postgres("There are less columns than "
"defined in the table", WARNING)
yield line[:len(self.columns)]
count += 1
def insert(self, values):
#os.system("mkdir /tmp/KKK")
self.connection.execute(self.table.insert(values=values))
|
# Databricks notebook source
# MAGIC %md # Introduction to Deep Learning Frameworks
# MAGIC
# MAGIC In this notebook, we're going to experiment with image classification using a variant of logistic regression.
# MAGIC
# MAGIC We're not going to do any deep learning quite yet; we're going to use this as an opportunity to become familiar with concepts found across deep learning frameworks. We'll cover these things:
# MAGIC
# MAGIC * [Acquiring Training Data](#acquire)
# MAGIC * [Configuring the model training process](#configure)
# MAGIC * [Defining the model](#define)
# MAGIC * [Training the model](#train)
# MAGIC * [Hyperparameter Tuning](#tune)
# MAGIC
# MAGIC First, we need to get a few Python package imports out of the way (as well as a *%matplotlib inline* to allow us to display graphs/plots inline within our notebook).
# COMMAND ----------
# MAGIC %matplotlib inline
# COMMAND ----------
import tensorflow as tf
import numpy as np
import pandas
from matplotlib import pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
# COMMAND ----------
# MAGIC %md ## Acquiring Training Data
# MAGIC <a name="acquire"> </a>
# MAGIC
# MAGIC The first thing we need to do is acquire training data. In this notebook, we're going to be working with the [MNIST Database of handwritten digits](http://yann.lecun.com/exdb/mnist/). The MNIST dataset contains 60,000 examples of handwritten digits (0-9) and 10,000 seperate test examples.
# MAGIC
# MAGIC <img style="float: left;" src="https://avanadeaibootcamp.blob.core.windows.net/images/mnist.png">
# COMMAND ----------
# MAGIC %md TensorFlow's tutorials package contains helper functions to download and ingest the MNIST dataset. Run the following cell to download the MNIST dataset to your Azure Notebook environment.
# COMMAND ----------
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True, validation_size=12000)
# COMMAND ----------
# MAGIC %md ### Visualizing a single training example
# MAGIC
# MAGIC Let's take a look at what a single training example looks like. A single training example contains two parts - a 28 x 28 grayscale input image, and a label. We'll use the **next_batch()** function to get a single random training example.
# COMMAND ----------
sample_x, sample_y = mnist.train.next_batch(1)
# COMMAND ----------
print("Image data shape: {}".format(sample_x.shape))
print("Image label shape: {}".format(sample_y.shape))
# COMMAND ----------
# MAGIC %md The shape of the input image data is pretty straightforward - we have a single 28 x 28 image - the first component of the shape (1, 784), flattened into a 784 dimensional vector - the second component of the shape (1, 784).
# MAGIC
# MAGIC But the image label is less obvious - why is a single label a 10-dimensional vector? To understand, let's take a look at the label:
# COMMAND ----------
print(sample_y[0])
# COMMAND ----------
# MAGIC %md This is what is known as a **one-hot encoding**. We're classifying against 10 possible digits (0-9); a one-hot encoded label is a vector where all elements are 0 except for the single index that maps to the categorical value of the label - e.g.:
# MAGIC
# MAGIC ```
# MAGIC 0: [1 0 0 0 0 0 0 0 0 0]
# MAGIC 1: [0 1 0 0 0 0 0 0 0 0]
# MAGIC 2: [0 0 1 0 0 0 0 0 0 0]
# MAGIC 3: [0 0 0 1 0 0 0 0 0 0]
# MAGIC 4: [0 0 0 0 1 0 0 0 0 0]
# MAGIC 5: [0 0 0 0 0 1 0 0 0 0]
# MAGIC 6: [0 0 0 0 0 0 1 0 0 0]
# MAGIC 7: [0 0 0 0 0 0 0 1 0 0]
# MAGIC 8: [0 0 0 0 0 0 0 0 1 0]
# MAGIC 9: [0 0 0 0 0 0 0 0 0 1]
# MAGIC ```
# MAGIC
# MAGIC The sample's label can be interpreted like this:
# COMMAND ----------
print('Label: {}'.format(sample_y[0]))
print('')
_, index = np.where(sample_y == 1)
print("Corresponds to digit: {}".format(index[0]))
# COMMAND ----------
# MAGIC %md Now that we've examined a label, let's take a quick look at the image data for a single training example. Below is a small function that reshapes the 784-dimensional flat image vector into a 28x28 image and displays it inline within your notebook.
# COMMAND ----------
def show_image(input):
image = (np.reshape(input, (28, 28)) * 255).astype(np.uint8)
plt.imshow(image, interpolation='nearest', cmap="Greys")
plt.show()
# COMMAND ----------
show_image(sample_x)
# COMMAND ----------
# MAGIC %md Try this with a few other sample images. To do so, scroll back up to this cell:
# MAGIC
# MAGIC ```sample_x, sample_y = mnist.train.next_batch(1)```
# MAGIC
# MAGIC Re-run that cell, then run each subsequent cell back down to this point to see the output.
# COMMAND ----------
# MAGIC %md ## Configuring the model training process
# MAGIC <a name="configure"> </a>
# MAGIC
# MAGIC Next, we'll configure ***hyperparameters*** for our model. These hyperparameters are levers & knobs we can use to control the training process, as we'll see shortly. For our model, the important hyperparameters we're working with are:
# MAGIC
# MAGIC * **Learning rate** - determines how fast weights (in case of a neural network) or the cooefficents (in case of linear regression or logistic regression) change
# MAGIC * **Training iterations** - how long we train the model for
# MAGIC * **Batch size** - how many examples are included in each minibatch
# COMMAND ----------
hyperparameters = {
#############################
# Hyperparameters for model
#############################
'learning_rate': 0.01,
'training_iters': 100000,
'batch_size': 100,
#############################
# Input data configuration
#############################
'n_pixels': 784, # MNIST data input (img shape: 28*28)
'n_classes': 10, # MNIST total classes (0-9 digits)
#############################
# Debug verbosity
#############################
'display_step': 10
}
# COMMAND ----------
# MAGIC %md ## Defining the model
# MAGIC <a name="define"> </a>
# MAGIC
# MAGIC Now, we'll define our **model graph**.
# MAGIC
# MAGIC ### Inputs to model graph
# MAGIC
# MAGIC First, we need *entry points* for data flowing into our model. In TensorFlow, these entry points are defined with *tensor placeholders*. These represent tensors that are fed at model evaluation time with input data (e.g. training data). We'll define shapes for our input tensors which correspond to the shapes of our MNIST data - both the shape of the input image data, and the shape of the target image label.
# COMMAND ----------
x = tf.placeholder(tf.float32, [None, hyperparameters['n_pixels']])
y = tf.placeholder(tf.float32, [None, hyperparameters['n_classes']])
# COMMAND ----------
# MAGIC %md You might ask yourself - what does **None** mean in this context? In this example, **None** is itself a placeholder for how many training examples exist within a single batch. Using **None** lets us dynamically control how many training examples flow into the model at training and at inference time.
# MAGIC
# MAGIC You can see the placeholder nature of **None** if you print the shape of ***x*** and ***y***:
# COMMAND ----------
print(x.shape)
print(y.shape)
# COMMAND ----------
# MAGIC %md ### Model weights
# MAGIC
# MAGIC The next thing we need to do is define TensorFlow **variable tensors** for our model's coefficients. If this were a neural network, these variables might represent the *weights* for a given layer. For sake of convienence, we'll refer to them as weights (e.g. W).
# MAGIC
# MAGIC Notes regarding the shape of the variables:
# MAGIC 1. They are explicitly defined, and importantly, there are no placeholder numbers for batch size. This is because these variables contain tensors that represent a single instance of the model.
# MAGIC 2. We have two variables - W (weights) and b (biases). Note for both the output is framed in terms of number of output classes.
# COMMAND ----------
W = tf.Variable(tf.zeros([hyperparameters['n_pixels'], hyperparameters['n_classes']]))
b = tf.Variable(tf.zeros([hyperparameters['n_classes']]))
# COMMAND ----------
# MAGIC %md ### Model graph definition
# MAGIC
# MAGIC Then, we'll define the model itself. Note the flow here - training data enters the model through the **x** placeholder tensor; image vectors (**x**) are multiplied against the **W** weight matrix (our **W** variable tensor), which **b**, our bias vector, is added to. The result of this is passed through the TensorFlow softmax operation (which can be thought of as a node within our model graph). The result of the softmax operation is return as the output of the model graph.
# COMMAND ----------
# Create model
def model(x, W, b):
pred = tf.nn.softmax(tf.matmul(x, W) + b)
return pred
# COMMAND ----------
# MAGIC %md Visually, this is what our model is doing:
# COMMAND ----------
# MAGIC %md <div>
# MAGIC <img style="float: left; width: 600px" src="https://avanadeaibootcamp.blob.core.windows.net/images/logistic_reg_vs_softmax_reg.png">
# MAGIC </div>
# MAGIC <div style="clear: both;"></div>
# MAGIC <br/>
# MAGIC From [What is Softmax Regression and How is it Related to Logistic Regression?](https://www.kdnuggets.com/2016/07/softmax-regression-related-logistic-regression.html)
# COMMAND ----------
# MAGIC %md ## Training the model
# MAGIC <a name="train"> </a>
# MAGIC
# MAGIC ### Preparing to train the model
# MAGIC
# MAGIC We're almost ready to train the model. Before we do, we need to "inflate" or construct our graph. Importantly, no data will actually flow through the graph yet - data does not flow within a TensorFlow graph until it is run within the context of a TensorFlow ***session***. But to run a model graph in a session, first, we need to construct the graph.
# MAGIC
# MAGIC We also need to define a few critical nodes in our graph:
# MAGIC
# MAGIC * **Cost** - this *operation* node is a function, evaluated *every* training iteration, that calculates the cost (loss/error) for the network given the current minibatch's training examples as the input to the network, evaluated against the current minibatch's training targets.
# MAGIC * **Optimizer** - this node is the TensorFlow optimizer that implements the gradient descent algorithm that is used during training to update the weights of the model. Note that we're effectively implementing ***minibatch stochastic*** gradient descent by virtue of our incremental approach to updating the weights (minibatch by minibatch of training examples).
# MAGIC * **Accuracy** - this *operation* node is a function, evaluated periodically during training, that calculates accuracy for the ***current*** minibatch.
# COMMAND ----------
def construct_training_graph(hyperparameters):
# Construct model graph
pred = model(x, W, b)
# Minimize error using cross entropy
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
# Gradient Descent Optimizer
optimizer = tf.train.GradientDescentOptimizer(hyperparameters['learning_rate']).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
return {'pred': pred,
'cost': cost,
'optimizer': optimizer,
'accuracy': accuracy,
'init': init}
training_graph = construct_training_graph(hyperparameters)
# COMMAND ----------
# MAGIC %md ### TensorFlow Session and Model Training
# MAGIC
# MAGIC We're now ready to create a TensorFlow [session](https://www.tensorflow.org/programmers_guide/graphs) and run our SGD training algorithm. Each iteration, we:
# MAGIC
# MAGIC - Extract **batch_size** training examples (**batch_x**, **batch_y**)
# MAGIC - Run a single step of gradient descent optimization, feeding the batch into the graph via **feed_dict**.
# MAGIC - Periodically, we'll calculate and print metrics for the current batch.
# MAGIC
# MAGIC Once we've iterated **training_iter** steps, we print a set of final metrics for a select number of images from the seperate MNIST database test set.
# MAGIC
# MAGIC Now, run the cell below to train your model.
# COMMAND ----------
def train(training_graph, hyperparameters, verbose=True):
# Training metrics
train_costs = []
valid_costs = []
train_accs = []
valid_accs = []
# Nodes from training graph
init = training_graph['init']
cost = training_graph['cost']
accuracy = training_graph['accuracy']
optimizer = training_graph['optimizer']
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
while step * hyperparameters['batch_size'] < hyperparameters['training_iters']:
x_train, y_train = mnist.train.next_batch(hyperparameters['batch_size'])
# Run single step of gradient descent optimization
sess.run(optimizer, feed_dict={x: x_train, y: y_train})
# Periodically calculate current batch loss and accuracy
if step % hyperparameters['display_step'] == 0:
# Calculate training loss, accuracy
cost_train, acc_train = sess.run([cost, accuracy], feed_dict={x: x_train, y: y_train})
train_costs.append(cost_train)
train_accs.append(acc_train)
# Calculate validation loss, accuracy
x_valid, y_valid = mnist.validation.next_batch(hyperparameters['batch_size'])
cost_valid, acc_valid = sess.run([cost, accuracy], feed_dict={x: x_valid, y: y_valid})
valid_costs.append(cost_valid)
valid_accs.append(acc_valid)
if (verbose):
print("Iter " + str(step * hyperparameters['batch_size']) + " Train Cost: " + \
"{:.5f}".format(cost_train) + " Accuracy: " + \
"{:.2f}".format(acc_train * 100.0) + "% Validation Cost: " + \
"{:.5f}".format(cost_valid) + " Accuracy: " + \
"{:.2f}".format(acc_train * 100.0) + "%")
step += 1
print('')
print("Optimization Finished!")
print('')
# Calculate accuracy for withheld test image set
acc_test = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})
print("Testing Accuracy: " + "{:.2f}".format(acc_test * 100.0) + "%")
return train_costs, valid_costs
hyperparameters['learning_rate'] = 0.01
training_graph = construct_training_graph(hyperparameters)
train_costs, valid_costs = train(training_graph, hyperparameters)
# COMMAND ----------
# MAGIC %md ## Hyperparameter Tuning
# MAGIC <a name="tune"> </a>
# MAGIC
# MAGIC Congratulations, at this point, you've trained a working model with TensorFlow. How did your model do? You're likely in the 86-88% accuracy range on the withheld test set.
# MAGIC
# MAGIC The question now is - can we do any better? What are the levers and switches we can play with to improve the final accuracy of the model on the test data?
# MAGIC
# MAGIC Let's take a look at the end-to-end process of training - specifically, the **cost over time** on the training and validation sets. In particular, we're interested how the **learning rate** we defined much earlier impacts the process of training.
# COMMAND ----------
def plot_costs(train_costs, valid_costs):
fig, ax = plt.subplots()
fig_size = [12, 9]
plt.rcParams["figure.figsize"] = fig_size
plt.plot(train_costs, label='Training Cost')
plt.plot(valid_costs, label='Validation Cost')
plt.title('Cost over time during training')
legend = ax.legend(loc='upper right')
# Plot the training and validation costs over time of the model we just trained
plot_costs(train_costs, valid_costs)
# COMMAND ----------
# MAGIC %md
# COMMAND ----------
# MAGIC %md What if we try a different learning rate? Let's try a much smaller learning rate - 0.00000001:
# COMMAND ----------
hyperparameters['learning_rate'] = 0.00000001
training_graph = construct_training_graph(hyperparameters)
train_costs, valid_costs = train(training_graph, hyperparameters, verbose=False)
plot_costs(train_costs, valid_costs)
# COMMAND ----------
# MAGIC %md Not particularly great. Why is performance worse in this case?
# MAGIC
# MAGIC The intuition here is: because the learning rate impacts how fast weights are updated, a much smaller learning rate means the rate of learning itself is much slower. You can see that in the slopes of the training/validation curves - a steeper curve - a quicker reduction in cost - roughly maps to faster learning. We might be able to get back up to that 87% range if we trained longer, but that isn't the ideal solution here.
# MAGIC
# MAGIC What if we try a much higher learning rate? Let's try a learning rate of 1.0:
# COMMAND ----------
# MAGIC %md
# COMMAND ----------
hyperparameters['learning_rate'] = 1.0
training_graph = construct_training_graph(hyperparameters)
train_costs, valid_costs = train(training_graph, hyperparameters, verbose=False)
plot_costs(train_costs, valid_costs)
# COMMAND ----------
# MAGIC %md The end result here is better - you're likely hitting accuracy around the 90-91% range. Learning proceeds much faster (steep drop in cost early in training) - but notice how the cost is a bit less stable than our original learning rate (of 0.1).
# MAGIC
# MAGIC Let's go even faster now - let's double the learning rate to 2.0:
# COMMAND ----------
hyperparameters['learning_rate'] = 2.0
training_graph = construct_training_graph(hyperparameters)
train_costs, valid_costs = train(training_graph, hyperparameters, verbose=False)
plot_costs(train_costs, valid_costs)
# COMMAND ----------
# MAGIC %md Hmm - it likely didn't help as much as you might have hoped - and the magnitude of the instability is a bit higher than before. What if we go even faster, with an even higher learning rate - 3.0:
# COMMAND ----------
hyperparameters['learning_rate'] = 3.0
training_graph = construct_training_graph(hyperparameters)
train_costs, valid_costs = train(training_graph, hyperparameters, verbose=False)
plot_costs(train_costs, valid_costs)
# COMMAND ----------
# MAGIC %md The accuracy has most likely collapsed here into the < 10% range. So for this problem, it seems there is a rough upper bound somewhere between 2.0 and 3.0.
# MAGIC
# MAGIC We'll try one more learning rate - 0.1. This is a bit closer to the sweet spot we want to be in for this particular problem, and you should most likely get accuracy in the 90-91% range:
# COMMAND ----------
hyperparameters['learning_rate'] = 0.1
training_graph = construct_training_graph(hyperparameters)
train_costs, valid_costs = train(training_graph, hyperparameters, verbose=False)
plot_costs(train_costs, valid_costs)
# COMMAND ----------
# MAGIC %md ## Conclusion
# COMMAND ----------
# MAGIC %md Congratulations, you've completed the Introduction to Deep Learning Frameworks, having covered these concepts:
# MAGIC
# MAGIC * [Acquiring Training Data](#acquire)
# MAGIC * [Configuring the model training process](#configure)
# MAGIC * [Defining the model](#define)
# MAGIC * [Training the model](#train)
# MAGIC * [Hyperparameter Tuning](#tune)
# COMMAND ----------
|
#!/usr/bin/env python3
from subprocess import Popen, PIPE
import os
import sys
import urllib.parse
import urllib.request
params = {
"count_active": "on",
"count_enabled": "on",
}
query = urllib.parse.urlencode(params)
url = "https://myosg.grid.iu.edu/miscproject/xml?count_sg_1&%s" % query
with urllib.request.urlopen(url) as req:
data = req.read().decode("utf-8")
newenv = os.environ.copy()
newenv["XMLLINT_INDENT"] = "\t"
proc = Popen("xmllint --format -", stdin=PIPE, stdout=sys.stdout, shell=True, encoding="utf-8", env=newenv)
proc.communicate(data)
|
import numpy as np
import matplotlib.pyplot as plt
pentadecathlon = np.zeros( ( 24,24 ) )
pentadecathlon[ 10:20,10 ] = 1
pentadecathlon[ 12, 9 ] = 1
pentadecathlon[ 12,10 ] = 0
pentadecathlon[ 12,11 ] = 1
pentadecathlon[ 17, 9 ] = 1
pentadecathlon[ 17,10 ] = 0
pentadecathlon[ 17,11 ] = 1
pd_list = [pentadecathlon]
nt = 30
for t in range(0,nt ):
evolved = evolve(pd_list[-1]) # evolve the simulation and append it to `pd_list`
pd_list.append(evolved)
plt.imshow( pd_list[ -1 ] )
plt.show() |
from django.conf import settings
from osgeo import gdal,osr
from osgeo.gdalnumeric import *
from osgeo.gdalconst import *
import numpy as np
import os
import sys
def to_rgb(bandarray):
print(bandarray)
maxvalue = np.amax(bandarray)
#minvalue = np.amin(bandarray)
#print("from RGB",maxvalue,minvalue, bandarray)
bandarray = np.divide(bandarray, maxvalue)
bandarray = bandarray * 100
# Colour the red band
r_ba = bandarray * 1 #intialize new band
r_ba[(r_ba > 0) & (r_ba < 33)] = 1100 #first class
r_ba[(r_ba > 33) & (r_ba < 50)] = 1400 #Second Class
r_ba[(r_ba > 50) & (r_ba < 66)] = 24500 #third class
r_ba[(r_ba > 66) & (r_ba < 83)] = 23000 #fourth class
r_ba[(r_ba > 83) & (r_ba < 100)] = 19400 #fifth class
r_ba = np.divide(r_ba, 100)
# Colour the green band
g_ba = bandarray * 1
g_ba[(g_ba > 0) & (g_ba < 33)] = 4400 #first class
g_ba[(g_ba > 33) & (g_ba < 50)] = 19600 #Second Class
g_ba[(g_ba > 50) & (g_ba < 66)] = 21500 #third class
g_ba[(g_ba > 66) & (g_ba < 83)] = 14200 #fourth class
g_ba[(g_ba > 83) & (g_ba < 100)] = 8200 #fifth class
g_ba = np.divide(g_ba, 100)
# Colour the blue band
b_ba = bandarray * 1
b_ba[(b_ba > 0) & (b_ba < 33)] = 12200 #first class
b_ba[(b_ba > 33) & (b_ba < 50)] = 6500 #Second Class
b_ba[(b_ba > 50) & (b_ba < 66)] = 700 #third class
b_ba[(b_ba > 66) & (b_ba < 83)] = 2800 #fourth class
b_ba[(b_ba > 83) & (b_ba < 100)] = 6000 #fifth class
b_ba = np.divide(b_ba, 100)
#print(maxvalue,minvalue, bandarray, r_ba, g_ba, b_ba)
return(r_ba, g_ba, b_ba)
def project(inputdsname, rasterxsize, rasterysize, datatype, projection, geotransform):
inputds = gdal.Open(os.path.join(settings.MEDIA_ROOT,inputdsname))
outputfilename = inputdsname+'projected.tiff'
outputfile = os.path.join(settings.MEDIA_ROOT,outputfilename)
driver= gdal.GetDriverByName('GTiff')
output = driver.Create(outputfile, rasterxsize, rasterysize, 1, datatype)
output.SetGeoTransform(geotransform)
output.SetProjection(projection)
gdal.ReprojectImage(inputds,output,inputds.GetProjection(),projection,gdalconst.GRA_Bilinear)
del output
return outputfilename
def calculatedr(timenow, DW, DF_name, RW, RF_name, AW, AF_name, SW, SF_name, TW, TF_name, IW, IF_name, CW, CF_name, UW, UF_name):
divisor = 7;
#read datasets
DF_ds = gdal.Open(os.path.join(settings.MEDIA_ROOT,DF_name))
#get information first file
projection = DF_ds.GetProjection()
srs = osr.SpatialReference(wkt=projection)
#print(srs)
geotransform = DF_ds.GetGeoTransform()
rasterxsize = DF_ds.RasterXSize
rasterysize = DF_ds.RasterYSize
minx = geotransform[0]
maxy = geotransform[3]
maxx = minx + geotransform[1] * DF_ds.RasterXSize
miny = maxy + geotransform[5] * DF_ds.RasterYSize
DF_bd = DF_ds.GetRasterBand(1)
datatype = DF_bd.DataType
DF_ba = BandReadAsArray(DF_bd)
#print(DF_ba)
CLIP_ba = DF_ba * 1
CLIP_ba[(CLIP_ba > 0) & (CLIP_ba < 255)] = 1
CLIP_ba[CLIP_ba > 254] = 0
# get array for each raster file
if RF_name == 'blank':
RF_ba = DF_ba*0
divisor = divisor-1;
else:
try:
RF_name = project(RF_name, rasterxsize, rasterysize, datatype, projection, geotransform)
RF_ds = gdal.Open(os.path.join(settings.MEDIA_ROOT, RF_name))
RF_ba = BandReadAsArray(RF_ds.GetRasterBand(1))
RF_ba[RF_ba < 0] = 0
except:
#print('rf blank expect')
RF_ba = DF_ba*0
divisor = divisor-1;
if AF_name == 'blank':
AF_ba = DF_ba*0
divisor = divisor-1;
else:
try:
AF_name = project(AF_name, rasterxsize, rasterysize, datatype, projection, geotransform)
AF_ds = gdal.Open(os.path.join(settings.MEDIA_ROOT, AF_name))
AF_ba = BandReadAsArray(AF_ds.GetRasterBand(1))
AF_ba[AF_ba < 0] = 0
except:
AF_ba = DF_ba*0
divisor = divisor-1;
if SF_name == 'blank':
SF_ba = DF_ba*0
divisor = divisor-1;
else:
try:
SF_name = project(SF_name, rasterxsize, rasterysize, datatype, projection, geotransform)
SF_ds = gdal.Open(os.path.join(settings.MEDIA_ROOT, SF_name))
SF_ba = BandReadAsArray(SF_ds.GetRasterBand(1))
SF_ba[SF_ba < 0] = 0
except:
SF_ba = DF_ba*0
divisor = divisor-1;
if TF_name == 'blank':
TF_ba = DF_ba*0
divisor = divisor-1;
else:
try:
TF_name = project(TF_name, rasterxsize, rasterysize, datatype, projection, geotransform)
TF_ds = gdal.Open(os.path.join(settings.MEDIA_ROOT, TF_name))
TF_ba = BandReadAsArray(TF_ds.GetRasterBand(1))
TF_ba[TF_ba < 0] = 0
except:
TF_ba = DF_ba*0
divisor = divisor-1;
if IF_name == 'blank':
IF_ba = DF_ba*0
divisor = divisor-1;
else:
try:
IF_name = project(IF_name, rasterxsize, rasterysize, datatype, projection, geotransform)
IF_ds = gdal.Open(os.path.join(settings.MEDIA_ROOT, IF_name))
IF_ba = BandReadAsArray(IF_ds.GetRasterBand(1))
IF_ba[IF_ba < 0] = 0
except:
IF_ba = DF_ba*0
divisor = divisor-1;
if CF_name == 'blank':
CF_ba = DF_ba*0
divisor = divisor-1;
else:
try:
CF_name = project(CF_name, rasterxsize, rasterysize, datatype, projection, geotransform)
CF_ds = gdal.Open(os.path.join(settings.MEDIA_ROOT, CF_name))
CF_ba = BandReadAsArray(CF_ds.GetRasterBand(1))
CF_ba[CF_ba < 0] = 0
except:
CF_ba = DF_ba*0
divisor = divisor-1;
if UF_name == 'blank':
UF_ba = DF_ba*0
#divisor = divisor-1;
else:
try:
UF_name = project(UF_name, rasterxsize, rasterysize, datatype, projection, geotransform)
UF_ds = gdal.Open(os.path.join(settings.MEDIA_ROOT, UF_name))
UF_ba = BandReadAsArray(UF_ds.GetRasterBand(1))
UF_ba[UF_ba < 0] = 0
except:
UF_ba = DF_ba*0
#divisor = divisor-1;
#CALCULATE ALL VALUES
CALC_ba = DF_ba * DW + RF_ba * RW + AF_ba * AW + SF_ba * SW + TF_ba * TW + IF_ba * IW + CF_ba*CW #+ UF_ba*UW
CALC_ba = CALC_ba.astype(float)
#CALC_ba = CALC_ba * CLIP_ba
CALC_ba = np.divide(CALC_ba, divisor)
#CONVERT TO rgb
colour_ba = to_rgb(CALC_ba)
r_ba = colour_ba[0]
g_ba = colour_ba[1]
b_ba = colour_ba[2]
#GET TRANSPARENCY LAYER
trsp_ba = r_ba*r_ba*r_ba
trsp_ba[trsp_ba > 0] = 1
trsp_ba = trsp_ba*255
outdriver = gdal.GetDriverByName('GTiff')
output_ds_name = timenow+'-output-drastic'+'.tiff'
print("File processed by Gdal GEOTIFF saved by name"+output_ds_name)
output_ds = outdriver.Create(os.path.join(settings.MEDIA_ROOT,output_ds_name), rasterxsize, rasterysize, 1, datatype)
CopyDatasetInfo(DF_ds,output_ds)
output_bd = output_ds.GetRasterBand(1)
BandWriteArray(output_bd, CALC_ba)
#print(DF_ba)
#print(CALC_ba)
#DF_ds = None
#del output_ds
renderdriver = gdal.GetDriverByName('GTiff')
render_ds_name = timenow+'-render-drastic'+'.tiff'
options = ['PHOTOMETRIC=RGB', 'PROFILE=GeoTIFF']
#print(renderdriver)
#print(datatype)
render_ds = renderdriver.Create(os.path.join(settings.MEDIA_ROOT,render_ds_name), rasterxsize, rasterysize, 4, gdal.GDT_Int32)
CopyDatasetInfo(DF_ds,render_ds)
render_ds.GetRasterBand(1).WriteArray(r_ba)
render_ds.GetRasterBand(2).WriteArray(g_ba)
render_ds.GetRasterBand(3).WriteArray(b_ba)
render_ds.GetRasterBand(4).WriteArray(trsp_ba)
render_ds.SetGeoTransform(geotransform)
render_ds.SetProjection(projection)
#render_ds.SetProjection(srs)
pngdriver = gdal.GetDriverByName('PNG')
png_ds_name = timenow+'-render-drastic'+'.png'
png_ds = pngdriver.CreateCopy(os.path.join(settings.MEDIA_ROOT,png_ds_name), render_ds, 0)
CopyDatasetInfo(DF_ds,png_ds)
return (os.path.join(settings.MEDIA_URL, output_ds_name), os.path.join(settings.MEDIA_URL, render_ds_name), os.path.join(settings.MEDIA_URL,png_ds_name), minx, miny, maxx, maxy)
def calculatega(timenow, GW, GF_name, HW, HF_name, GTW, GTF_name, GDW, GDF_name, IW, IF_name, TW, TF_name, UW, UF_name):
divisor = 6;
#read datasets
GF_ds = gdal.Open(os.path.join(settings.MEDIA_ROOT,GF_name))
#get information first file
projection = GF_ds.GetProjection()
srs = osr.SpatialReference(wkt=projection)
#print(srs)
geotransform = GF_ds.GetGeoTransform()
rasterxsize = GF_ds.RasterXSize
rasterysize = GF_ds.RasterYSize
minx = geotransform[0]
maxy = geotransform[3]
maxx = minx + geotransform[1] * GF_ds.RasterXSize
miny = maxy + geotransform[5] * GF_ds.RasterYSize
GF_bd = GF_ds.GetRasterBand(1)
datatype = GF_bd.DataType
GF_ba = BandReadAsArray(GF_bd)
#print(DF_ba)
#trsp_ba = DF_ba*1
#GF_ba[GF_ba < 0] = 0
#print(GF_ba)
CLIP_ba = GF_ba * 1
CLIP_ba[(CLIP_ba > 0) & (CLIP_ba < 255)] = 1
CLIP_ba[CLIP_ba > 254] = 0
#print (CLIP_ba)
# get array for each raster file
if HF_name == 'blank':
HF_ba = GF_ba*0
divisor = divisor-1;
else:
try:
HF_name = project(HF_name, rasterxsize, rasterysize, datatype, projection, geotransform)
HF_ds = gdal.Open(os.path.join(settings.MEDIA_ROOT, HF_name))
HF_ba = BandReadAsArray(RF_ds.GetRasterBand(1))
HF_ba[HF_ba < 0] = 0
except:
#print('rf blank expect')
HF_ba = GF_ba*0
divisor = divisor-1;
if GTF_name == 'blank':
GTF_ba = GF_ba*0
divisor = divisor-1;
else:
try:
GTF_name = project(GTF_name, rasterxsize, rasterysize, datatype, projection, geotransform)
GTF_ds = gdal.Open(os.path.join(settings.MEDIA_ROOT, GTF_name))
GTF_ba = BandReadAsArray(GTF_ds.GetRasterBand(1))
GTF_ba[GTF_ba < 0] = 0
except:
GTF_ba = GF_ba*0
divisor = divisor-1;
if GDF_name == 'blank':
GDF_ba = GF_ba*0
divisor = divisor-1;
else:
try:
GDF_name = project(GDF_name, rasterxsize, rasterysize, datatype, projection, geotransform)
GDF_ds = gdal.Open(os.path.join(settings.MEDIA_ROOT, GDF_name))
GDF_ba = BandReadAsArray(GDF_ds.GetRasterBand(1))
GDF_ba[GDF_ba < 0] = 0
except:
GDF_ba = GF_ba*0
divisor = divisor-1;
if IF_name == 'blank':
IF_ba = GF_ba*0
divisor = divisor-1;
else:
try:
IF_name = project(IF_name, rasterxsize, rasterysize, datatype, projection, geotransform)
IF_ds = gdal.Open(os.path.join(settings.MEDIA_ROOT, IF_name))
IF_ba = BandReadAsArray(IF_ds.GetRasterBand(1))
IF_ba[IF_ba < 0] = 0
except:
IF_ba = GF_ba*0
divisor = divisor-1;
if TF_name == 'blank':
TF_ba = GF_ba*0
divisor = divisor-1;
else:
try:
TF_name = project(TF_name, rasterxsize, rasterysize, datatype, projection, geotransform)
TF_ds = gdal.Open(os.path.join(settings.MEDIA_ROOT, TF_name))
TF_ba = BandReadAsArray(TF_ds.GetRasterBand(1))
TF_ba[TF_ba < 0] = 0
except:
TF_ba = GF_ba*0
divisor = divisor-1;
if UF_name == 'blank':
UF_ba = GF_ba*0
#divisor = divisor-1;
else:
try:
UF_name = project(UF_name, rasterxsize, rasterysize, datatype, projection, geotransform)
UF_ds = gdal.Open(os.path.join(settings.MEDIA_ROOT, UF_name))
UF_ba = BandReadAsArray(UF_ds.GetRasterBand(1))
UF_ba[UF_ba < 0] = 0
except:
UF_ba = GF_ba*0
#divisor = divisor-1;
#CALCULATE ALL VALUES
CALC_ba = GF_ba * GW + HF_ba * HW + GTF_ba * GTW + GDF_ba * GDW + IF_ba * IW + TF_ba * TW #+ UF_ba*UW
CALC_ba = CALC_ba.astype(float)
CALC_ba = CALC_ba * CLIP_ba
divisor = divisor * 3
CALC_ba = np.divide(CALC_ba, divisor)
#CONVERT TO rgb
colour_ba = to_rgb(CALC_ba)
r_ba = colour_ba[0]
g_ba = colour_ba[1]
b_ba = colour_ba[2]
#GET TRANSPARENCY LAYER
trsp_ba = r_ba*r_ba*r_ba
trsp_ba[trsp_ba > 0] = 1
trsp_ba = trsp_ba*255
outdriver = gdal.GetDriverByName('GTiff')
output_ds_name = timenow+'-output-galdit'+'.tiff'
print("File processed by Gdal GEOTIFF saved by name"+output_ds_name)
output_ds = outdriver.Create(os.path.join(settings.MEDIA_ROOT,output_ds_name), rasterxsize, rasterysize, 1, datatype)
CopyDatasetInfo(GF_ds,output_ds)
output_bd = output_ds.GetRasterBand(1)
BandWriteArray(output_bd, CALC_ba)
#print(DF_ba)
#print(CALC_ba)
#DF_ds = None
#del output_ds
renderdriver = gdal.GetDriverByName('GTiff')
render_ds_name = timenow+'-render-galdit'+'.tiff'
options = ['PHOTOMETRIC=RGB', 'PROFILE=GeoTIFF']
#print(renderdriver)
#print(datatype)
render_ds = renderdriver.Create(os.path.join(settings.MEDIA_ROOT,render_ds_name), rasterxsize, rasterysize, 4, gdal.GDT_Int32)
CopyDatasetInfo(GF_ds,render_ds)
render_ds.GetRasterBand(1).WriteArray(r_ba)
render_ds.GetRasterBand(2).WriteArray(g_ba)
render_ds.GetRasterBand(3).WriteArray(b_ba)
render_ds.GetRasterBand(4).WriteArray(trsp_ba)
render_ds.SetGeoTransform(geotransform)
render_ds.SetProjection(projection)
#render_ds.SetProjection(srs)
pngdriver = gdal.GetDriverByName('PNG')
png_ds_name = timenow+'-render-galdit'+'.png'
png_ds = pngdriver.CreateCopy(os.path.join(settings.MEDIA_ROOT,png_ds_name), render_ds, 0)
CopyDatasetInfo(GF_ds,png_ds)
return (os.path.join(settings.MEDIA_URL, output_ds_name), os.path.join(settings.MEDIA_URL, render_ds_name), os.path.join(settings.MEDIA_URL,png_ds_name), minx, miny, maxx, maxy)
|
#!/usr/bin/python
#\file head2.py
#\brief Baxter: head control 2
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Oct.09, 2015
import roslib
import rospy
import actionlib
import control_msgs.msg
import baxter_interface
import time, math, sys
if __name__=='__main__':
rospy.init_node('baxter_test')
rs= baxter_interface.RobotEnable(baxter_interface.CHECK_VERSION)
init_state= rs.state().enabled
def clean_shutdown():
if not init_state:
print 'Disabling robot...'
rs.disable()
rospy.on_shutdown(clean_shutdown)
rs.enable()
head= baxter_interface.Head()
head.set_pan(0.0) #NOTE: Default speed=100, timeout=10
head.command_nod()
head.set_pan(0.0)
print 'Head pan=',head.pan()
head.set_pan(-1.57, speed=10, timeout=10) #NOTE: Set timeout=0 for non-blocking
print 'Head pan=',head.pan()
head.set_pan(0.0, speed=80)
print 'Head pan=',head.pan()
head.set_pan(1.0, speed=20)
print 'Head pan=',head.pan()
head.set_pan(0.0, speed=10, timeout=0)
head.command_nod()
print 'Head pan=',head.pan()
head.set_pan(0.0)
head.command_nod()
rospy.signal_shutdown('Done.')
|
from ED6ScenarioHelper import *
def main():
# 蔡斯
CreateScenaFile(
FileName = 'T3222 ._SN',
MapName = 'Zeiss',
Location = 'T3222.x',
MapIndex = 1,
MapDefaultBGM = "ed60084",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'拜舍尔', # 9
'艾德', # 10
'林', # 11
'莉西亚', # 12
'希利尔', # 13
'艾缇', # 14
'拉克', # 15
'希玛', # 16
'库安', # 17
'艾德尔', # 18
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT07/CH01040 ._CH', # 00
'ED6_DT07/CH01270 ._CH', # 01
'ED6_DT07/CH01030 ._CH', # 02
'ED6_DT07/CH01150 ._CH', # 03
'ED6_DT07/CH01120 ._CH', # 04
'ED6_DT07/CH01130 ._CH', # 05
'ED6_DT07/CH01160 ._CH', # 06
'ED6_DT07/CH01020 ._CH', # 07
'ED6_DT07/CH01060 ._CH', # 08
'ED6_DT07/CH01130 ._CH', # 09
)
AddCharChipPat(
'ED6_DT07/CH01040P._CP', # 00
'ED6_DT07/CH01270P._CP', # 01
'ED6_DT07/CH01030P._CP', # 02
'ED6_DT07/CH01150P._CP', # 03
'ED6_DT07/CH01120P._CP', # 04
'ED6_DT07/CH01130P._CP', # 05
'ED6_DT07/CH01160P._CP', # 06
'ED6_DT07/CH01020P._CP', # 07
'ED6_DT07/CH01060P._CP', # 08
'ED6_DT07/CH01130P._CP', # 09
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 4,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x1,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 5,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 2,
ChipIndex = 0x2,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 6,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 3,
ChipIndex = 0x3,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 7,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 4,
ChipIndex = 0x4,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 8,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 5,
ChipIndex = 0x5,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 9,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 6,
ChipIndex = 0x6,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 10,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 7,
ChipIndex = 0x7,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 8,
ChipIndex = 0x8,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 13,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 9,
ChipIndex = 0x9,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 3,
)
DeclActor(
TriggerX = 2440,
TriggerZ = 250,
TriggerY = 2960,
TriggerRange = 400,
ActorX = 2550,
ActorZ = 1750,
ActorY = 4470,
Flags = 0x7E,
TalkScenaIndex = 0,
TalkFunctionIndex = 11,
Unknown_22 = 0,
)
ScpFunction(
"Function_0_25E", # 00, 0
"Function_1_3EA", # 01, 1
"Function_2_3EB", # 02, 2
"Function_3_401", # 03, 3
"Function_4_4B4", # 04, 4
"Function_5_4BB", # 05, 5
"Function_6_4C2", # 06, 6
"Function_7_5E9", # 07, 7
"Function_8_5F0", # 08, 8
"Function_9_5F7", # 09, 9
"Function_10_5FE", # 0A, 10
"Function_11_605", # 0B, 11
"Function_12_60A", # 0C, 12
"Function_13_FD1", # 0D, 13
)
def Function_0_25E(): pass
label("Function_0_25E")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC0, 1)), scpexpr(EXPR_END)), "loc_27E")
ClearChrFlags(0xF, 0x80)
SetChrPos(0xF, 2550, 250, 4470, 192)
Jump("loc_3E9")
label("loc_27E")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xAB, 5)), scpexpr(EXPR_END)), "loc_29E")
ClearChrFlags(0xF, 0x80)
SetChrPos(0xF, 2550, 250, 4470, 192)
Jump("loc_3E9")
label("loc_29E")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA6, 7)), scpexpr(EXPR_END)), "loc_2BE")
ClearChrFlags(0xF, 0x80)
SetChrPos(0xF, 2550, 250, 4470, 192)
Jump("loc_3E9")
label("loc_2BE")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA5, 0)), scpexpr(EXPR_END)), "loc_2F4")
ClearChrFlags(0xF, 0x80)
SetChrPos(0xF, 2550, 250, 4470, 192)
ClearChrFlags(0x10, 0x80)
SetChrPos(0x10, -960, 250, -2580, 188)
Jump("loc_3E9")
label("loc_2F4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA4, 6)), scpexpr(EXPR_END)), "loc_32A")
ClearChrFlags(0xF, 0x80)
SetChrPos(0xF, 2550, 250, 4470, 192)
ClearChrFlags(0x10, 0x80)
SetChrPos(0x10, -5240, 500, -330, 108)
Jump("loc_3E9")
label("loc_32A")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA4, 2)), scpexpr(EXPR_END)), "loc_376")
ClearChrFlags(0xA, 0x80)
SetChrPos(0xA, 590, 250, 2540, 10)
ClearChrFlags(0xF, 0x80)
SetChrPos(0xF, 2550, 250, 4470, 192)
ClearChrFlags(0x10, 0x80)
SetChrPos(0x10, 4130, 0, -2220, 291)
Jump("loc_3E9")
label("loc_376")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA3, 7)), scpexpr(EXPR_END)), "loc_3AC")
ClearChrFlags(0x11, 0x80)
SetChrPos(0x11, -3250, 250, 4820, 348)
ClearChrFlags(0xF, 0x80)
SetChrPos(0xF, 2550, 250, 4470, 192)
Jump("loc_3E9")
label("loc_3AC")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA3, 2)), scpexpr(EXPR_END)), "loc_3CC")
ClearChrFlags(0xF, 0x80)
SetChrPos(0xF, 2550, 250, 4470, 192)
Jump("loc_3E9")
label("loc_3CC")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA0, 2)), scpexpr(EXPR_END)), "loc_3E9")
ClearChrFlags(0xF, 0x80)
SetChrPos(0xF, 2550, 250, 4470, 192)
label("loc_3E9")
Return()
# Function_0_25E end
def Function_1_3EA(): pass
label("Function_1_3EA")
Return()
# Function_1_3EA end
def Function_2_3EB(): pass
label("Function_2_3EB")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_400")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("Function_2_3EB")
label("loc_400")
Return()
# Function_2_3EB end
def Function_3_401(): pass
label("Function_3_401")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC0, 1)), scpexpr(EXPR_END)), "loc_40E")
Jump("loc_4B0")
label("loc_40E")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xAB, 5)), scpexpr(EXPR_END)), "loc_418")
Jump("loc_4B0")
label("loc_418")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA6, 7)), scpexpr(EXPR_END)), "loc_422")
Jump("loc_4B0")
label("loc_422")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA5, 0)), scpexpr(EXPR_END)), "loc_42C")
Jump("loc_4B0")
label("loc_42C")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA4, 6)), scpexpr(EXPR_END)), "loc_436")
Jump("loc_4B0")
label("loc_436")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA4, 2)), scpexpr(EXPR_END)), "loc_440")
Jump("loc_4B0")
label("loc_440")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA3, 7)), scpexpr(EXPR_END)), "loc_49F")
ChrTalk(
0xFE,
"嘿~真有意思。\x02",
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"这个就是东方的陶器吧\x01",
"素朴又可爱呢。\x02",
)
)
CloseMessageWindow()
Jump("loc_4B0")
label("loc_49F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA3, 2)), scpexpr(EXPR_END)), "loc_4A9")
Jump("loc_4B0")
label("loc_4A9")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA0, 2)), scpexpr(EXPR_END)), "loc_4B0")
label("loc_4B0")
TalkEnd(0xFE)
Return()
# Function_3_401 end
def Function_4_4B4(): pass
label("Function_4_4B4")
TalkBegin(0xFE)
TalkEnd(0xFE)
Return()
# Function_4_4B4 end
def Function_5_4BB(): pass
label("Function_5_4BB")
TalkBegin(0xFE)
TalkEnd(0xFE)
Return()
# Function_5_4BB end
def Function_6_4C2(): pass
label("Function_6_4C2")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC0, 1)), scpexpr(EXPR_END)), "loc_4CF")
Jump("loc_5E5")
label("loc_4CF")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xAB, 5)), scpexpr(EXPR_END)), "loc_4D9")
Jump("loc_5E5")
label("loc_4D9")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA6, 7)), scpexpr(EXPR_END)), "loc_4E3")
Jump("loc_5E5")
label("loc_4E3")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA5, 0)), scpexpr(EXPR_END)), "loc_4ED")
Jump("loc_5E5")
label("loc_4ED")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA4, 6)), scpexpr(EXPR_END)), "loc_4F7")
Jump("loc_5E5")
label("loc_4F7")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA4, 2)), scpexpr(EXPR_END)), "loc_5CA")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 2)), scpexpr(EXPR_END)), "loc_55D")
ChrTalk(
0xFE,
(
"唔~\x01",
"有没有其他要买的东西呢。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
"……呀?\x02",
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"那个碟子,\x01",
"还真是可爱啊。\x02",
)
)
CloseMessageWindow()
Jump("loc_5C7")
label("loc_55D")
OP_A2(0x2)
ChrTalk(
0xFE,
(
"嗯……我记得……\x01",
"酱油和酒都用完了啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"唔~\x01",
"有没有其他要买的东西呢。\x02",
)
)
CloseMessageWindow()
label("loc_5C7")
Jump("loc_5E5")
label("loc_5CA")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA3, 7)), scpexpr(EXPR_END)), "loc_5D4")
Jump("loc_5E5")
label("loc_5D4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA3, 2)), scpexpr(EXPR_END)), "loc_5DE")
Jump("loc_5E5")
label("loc_5DE")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA0, 2)), scpexpr(EXPR_END)), "loc_5E5")
label("loc_5E5")
TalkEnd(0xFE)
Return()
# Function_6_4C2 end
def Function_7_5E9(): pass
label("Function_7_5E9")
TalkBegin(0xFE)
TalkEnd(0xFE)
Return()
# Function_7_5E9 end
def Function_8_5F0(): pass
label("Function_8_5F0")
TalkBegin(0xFE)
TalkEnd(0xFE)
Return()
# Function_8_5F0 end
def Function_9_5F7(): pass
label("Function_9_5F7")
TalkBegin(0xFE)
TalkEnd(0xFE)
Return()
# Function_9_5F7 end
def Function_10_5FE(): pass
label("Function_10_5FE")
TalkBegin(0xFE)
TalkEnd(0xFE)
Return()
# Function_10_5FE end
def Function_11_605(): pass
label("Function_11_605")
Call(0, 12)
Return()
# Function_11_605 end
def Function_12_60A(): pass
label("Function_12_60A")
TalkBegin(0xF)
FadeToDark(300, 0, 100)
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0x18), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Menu(
0,
10,
100,
1,
(
"对话\x01", # 0
"买东西\x01", # 1
"离开\x01", # 2
)
)
MenuEnd(0x0)
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0xFFFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_5F(0x0)
FadeToBright(300, 0)
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_66A")
OP_0D()
OP_A9(0x44)
OP_56(0x0)
TalkEnd(0xF)
Return()
label("loc_66A")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_67B")
TalkEnd(0xF)
Return()
label("loc_67B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC0, 1)), scpexpr(EXPR_END)), "loc_770")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 7)), scpexpr(EXPR_END)), "loc_6EA")
ChrTalk(
0xF,
(
"因为女王的诞辰庆典快到了,\x01",
"客人也越来越少了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"我这个看店的\x01",
"也困得要命……\x02",
)
)
CloseMessageWindow()
Jump("loc_76D")
label("loc_6EA")
OP_A2(0x7)
ChrTalk(
0xF,
(
"哎呀哎呀……\x01",
"还以为是谁,原来有客人来了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"因为几乎没人来,\x01",
"我都已经在想\x01",
"是不是该关门了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"啊,想帮帮我的话,\x01",
"就买点东西吧。\x02",
)
)
CloseMessageWindow()
label("loc_76D")
Jump("loc_FCD")
label("loc_770")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xAB, 5)), scpexpr(EXPR_END)), "loc_8C4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 7)), scpexpr(EXPR_END)), "loc_7F4")
ChrTalk(
0xF,
(
"很遗憾啊,我想在这个村子里\x01",
"大概也不会找到什么线索吧。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"但是难得来了,\x01",
"不如买点温泉鸡蛋之类的特产吧?\x02",
)
)
CloseMessageWindow()
Jump("loc_8C1")
label("loc_7F4")
OP_A2(0x7)
ChrTalk(
0xF,
(
"啊,是你们啊……\x01",
"工作辛苦了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"我从毛婆婆那里\x01",
"听说了你们的事情……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"我想在这个村子里\x01",
"大概也不会找到什么线索吧。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"算了,难得来一次,\x01",
"至少看看这些特产,\x01",
"挑点想买的带回去吧。\x02",
)
)
CloseMessageWindow()
label("loc_8C1")
Jump("loc_FCD")
label("loc_8C4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA6, 7)), scpexpr(EXPR_END)), "loc_A84")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 7)), scpexpr(EXPR_END)), "loc_992")
ChrTalk(
0xF,
(
"仔细想想看,\x01",
"还是小时候最幸福啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"……我和妻子相遇的时候,\x01",
"年纪正好像库安那么大呢。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"那时候自己\x01",
"还是个和库安一样的少年……\x01",
"现在想想简直不敢相信。\x02",
)
)
CloseMessageWindow()
Jump("loc_A81")
label("loc_992")
OP_A2(0x7)
ChrTalk(
0xF,
(
"关于蔡斯事件的话题\x01",
"在大人们当中引起了很大的骚动……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"但是库安他们一点也没受影响,\x01",
"还是和往常一样天真无邪地在外边玩耍。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"仔细想想看,\x01",
"还是小时候最幸福啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"……我和妻子相遇的时候,\x01",
"年纪正好像库安那么大呢。\x02",
)
)
CloseMessageWindow()
label("loc_A81")
Jump("loc_FCD")
label("loc_A84")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA5, 0)), scpexpr(EXPR_END)), "loc_B19")
ChrTalk(
0xF,
"哟,早上好。\x02",
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"已经要回去了吗?\x01",
"买点特产作纪念怎么样?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"来这里的游客们\x01",
"都会买很多特产带回去的。\x02",
)
)
CloseMessageWindow()
Jump("loc_FCD")
label("loc_B19")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA4, 6)), scpexpr(EXPR_END)), "loc_BFA")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 7)), scpexpr(EXPR_END)), "loc_B91")
ChrTalk(
0xF,
"呼,腰痛……\x02",
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"把店里收拾完以后\x01",
"就去『红叶亭』休息一下吧。\x02",
)
)
CloseMessageWindow()
Jump("loc_BF7")
label("loc_B91")
OP_A2(0x7)
ChrTalk(
0xF,
(
"咦……\x01",
"还以为是谁,原来有客人来了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"马上就关门了,\x01",
"要买东西请尽快哦。\x02",
)
)
CloseMessageWindow()
label("loc_BF7")
Jump("loc_FCD")
label("loc_BFA")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA4, 2)), scpexpr(EXPR_END)), "loc_CF7")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 7)), scpexpr(EXPR_END)), "loc_C42")
ChrTalk(
0xF,
(
"哎呀……\x01",
"又到天黑的时候了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"这一天\x01",
"就这样平平淡淡地结束了。\x02",
)
)
CloseMessageWindow()
Jump("loc_CF4")
label("loc_C42")
OP_A2(0x7)
ChrTalk(
0xF,
(
"我们会向观光的客人\x01",
"推荐这里的土特产品……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"当然,\x01",
"本店里同样也卖村民的生活必需品。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"因为村子里\x01",
"没有其他的商店了。\x02",
)
)
CloseMessageWindow()
label("loc_CF4")
Jump("loc_FCD")
label("loc_CF7")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA3, 7)), scpexpr(EXPR_END)), "loc_DF0")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 7)), scpexpr(EXPR_END)), "loc_D70")
ChrTalk(
0xF,
(
"啊,\x01",
"终于有客人来了呢。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"这些陶器很值得推荐,\x01",
"请慢慢看吧。\x02",
)
)
CloseMessageWindow()
Jump("loc_DED")
label("loc_D70")
OP_A2(0x7)
ChrTalk(
0xF,
"啊,欢迎光临。\x02",
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"等了好久,\x01",
"终于有客人来了呢。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"这些陶器很值得推荐,\x01",
"请慢慢看吧。\x02",
)
)
CloseMessageWindow()
label("loc_DED")
Jump("loc_FCD")
label("loc_DF0")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA3, 2)), scpexpr(EXPR_END)), "loc_EB0")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 7)), scpexpr(EXPR_END)), "loc_E55")
ChrTalk(
0xF,
(
"我的儿子库安\x01",
"性格十分活泼外向。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"这一点不太像我,\x01",
"倒更像我已经去世的妻子。\x02",
)
)
CloseMessageWindow()
Jump("loc_EAD")
label("loc_E55")
OP_A2(0x7)
ChrTalk(
0xF,
(
"呼,\x01",
"今天没什么客人光顾呀。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"库安那小子,\x01",
"有没有在用心招呼客人啊。\x02",
)
)
CloseMessageWindow()
label("loc_EAD")
Jump("loc_FCD")
label("loc_EB0")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA0, 2)), scpexpr(EXPR_END)), "loc_FCD")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 7)), scpexpr(EXPR_END)), "loc_F4B")
ChrTalk(
0xF,
"欢迎~请进吧。\x02",
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"温泉鸡蛋的食用方法\x01",
"也有很多种哦。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
"请一定要试试看。\x02",
)
CloseMessageWindow()
Jump("loc_FCD")
label("loc_F4B")
OP_A2(0x7)
ChrTalk(
0xF,
(
"啊啊……\x01",
"还以为是谁,原来有客人来了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"欢迎。\x01",
"请、请慢慢看吧。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"我个人最推荐的特产\x01",
"就要数温泉蛋了。\x02",
)
)
CloseMessageWindow()
label("loc_FCD")
TalkEnd(0xF)
Return()
# Function_12_60A end
def Function_13_FD1(): pass
label("Function_13_FD1")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC0, 1)), scpexpr(EXPR_END)), "loc_FDE")
Jump("loc_12A4")
label("loc_FDE")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xAB, 5)), scpexpr(EXPR_END)), "loc_FE8")
Jump("loc_12A4")
label("loc_FE8")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA6, 7)), scpexpr(EXPR_END)), "loc_FF2")
Jump("loc_12A4")
label("loc_FF2")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA5, 0)), scpexpr(EXPR_END)), "loc_104D")
ChrTalk(
0xFE,
"啊,真没劲。\x02",
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"店里的事做完之后\x01",
"就去外边玩吧。\x02",
)
)
CloseMessageWindow()
Jump("loc_12A4")
label("loc_104D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA4, 6)), scpexpr(EXPR_END)), "loc_11C2")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1, 0)), scpexpr(EXPR_END)), "loc_1111")
ChrTalk(
0xFE,
(
"虽然这么说,\x01",
"没有客人来真是不太安心。\x01",
"爸爸让我去招揽顾客。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"我的爸爸就像\x01",
"图画书里的那个没用的爸爸。\x02",
)
)
CloseMessageWindow()
Jump("loc_11BF")
label("loc_1111")
OP_A2(0x8)
ChrTalk(
0xFE,
(
"我的爸爸\x01",
"对生意不很热心啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"要是正累的时候\x01",
"有客人前来光顾,\x01",
"他就会摆出一副臭脸。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"那样可不行啊。\x01",
"爸爸根本不适合做生意。\x02",
)
)
CloseMessageWindow()
label("loc_11BF")
Jump("loc_12A4")
label("loc_11C2")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA4, 2)), scpexpr(EXPR_END)), "loc_1289")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1, 0)), scpexpr(EXPR_END)), "loc_1210")
ChrTalk(
0xFE,
(
"啊,怎么了?\x01",
"要买温泉蛋吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
"咕噜咕噜的,咕噜咕噜的。\x02",
)
CloseMessageWindow()
Jump("loc_1286")
label("loc_1210")
OP_A2(0x8)
ChrTalk(
0xFE,
(
"啊~~\x01",
"来点温泉蛋怎么样~\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"咕噜咕噜的\x01",
"好吃的温泉蛋啊~~\x02",
)
)
CloseMessageWindow()
label("loc_1286")
Jump("loc_12A4")
label("loc_1289")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA3, 7)), scpexpr(EXPR_END)), "loc_1293")
Jump("loc_12A4")
label("loc_1293")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA3, 2)), scpexpr(EXPR_END)), "loc_129D")
Jump("loc_12A4")
label("loc_129D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA0, 2)), scpexpr(EXPR_END)), "loc_12A4")
label("loc_12A4")
TalkEnd(0xFE)
Return()
# Function_13_FD1 end
SaveToFile()
Try(main)
|
import requests
import logging
import re
from bs4 import BeautifulSoup
import hashlib
import sqlite3
import os
from unfurl.util import timeit
LOG = logging.getLogger(__name__)
def get_page(url):
LOG.debug('fetching page: %s' % url)
try:
page = requests.get(url)
except requests.exceptions.MissingSchema, e:
LOG.error(e.args[0])
return None
LOG.debug('response headers: %s' % page.headers)
return page
class Page(object):
def __init__(self, url, regex=None, autoload=False):
self.url = self._normalize_url(url)
self._request = None
self.regex = regex or '.+'
if autoload:
self.load()
@timeit('page load')
def load(self):
self._request = get_page(self.url)
@property
def loaded(self):
return self._request is not None
def _normalize_url(self, url):
return url.rstrip('/')
@property
def markup(self):
return self._request.text
@property
def links(self):
if not self.markup:
return None
regex = re.compile(self.regex)
items = [ i['href'] for i in \
BeautifulSoup(self.markup).findAll('a', href=regex) ]
result = list(set(items))
result.sort()
return result
@property
def snapshot(self, regex='.*'):
"""
Return a snapshot encapsulating the current state of the links on this
page, including:
* The list of links
* A crytographic hash representing the data
"""
return PageSnapshot(self.url, self.links, self.regex)
class PageSnapshot(object):
DEFAULT_HASH_FUNCTION = hashlib.sha512
DEFAULT_HASH_ENCODING = 'hex'
def __init__(self, url=None, links=[], regex=None, hasher=None,
encoding=None):
self.url = url
self.links = links
self.regex = regex
self.hasher = hasher or self.DEFAULT_HASH_FUNCTION
self.encoding = encoding or self.DEFAULT_HASH_ENCODING
self.links.sort()
def __eq__(self, other):
return other.url == self.url and \
other.regex == self.regex and \
other.checksum == self.checksum
@property
def blob(self):
"""
Create a unique representation of the link data
"""
self.links.sort()
return '\x00'.join(self.links)
@classmethod
def unblob(cls, blob):
return unicode(blob).encode('utf-8').split('\x00')
@property
def checksum(self):
return self.hasher(self.blob).hexdigest()
def json(self):
return {
'url': self.url,
'links': self.links,
'regex': self.regex,
'checksum': self.checksum,
}
|
from Data import Data
def main():
data = Data()
data.createVectors()
data.createClassifier()
data.printResults()
data.kmeansClustering()
if __name__ == "__main__":
main() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 17 19:40:26 2018
@author: cmdrlias
"""
class Cliente:
def __init__(self, nm_cliente, id_telegram):
self.nm_cliente = nm_cliente
self.id_telegram = id_telegram
self.lista_compra_produto = []
def getNm_cliente(self):
return self.nm_cliente
def getId_telegram(self):
return self.id_telegram
def getLista_compra_produto(self):
return self.lista_compra_produto
def setNm_cliente(self, nm_cliente):
self.nm_cliente = nm_cliente
def setId_telegram(self, id_telegram):
self.id_telegram = id_telegram
def setLista_compra_produto(self, lista_compra_produto):
self.lista_compra_produto = lista_compra_produto |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class GaowenboItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
from scrapy.item import Item, Field
from scrapy.loader import ItemLoader
from scrapy.loader.processors import MapCompose, TakeFirst, Join
class ExampleItem(Item):
name = Field()
description = Field()
link = Field()
crawled = Field()
spider = Field()
url = Field()
class ExampleLoader(ItemLoader):
default_item_class = ExampleItem
default_input_processor = MapCompose(lambda s: s.strip())
default_output_processor = TakeFirst()
description_out = Join()
class StatsItem(scrapy.Item):
prov_code=scrapy.Field()
prov_name=scrapy.Field()
city_code=scrapy.Field()
city_name=scrapy.Field()
county_code=scrapy.Field()
county_name=scrapy.Field()
town_code=scrapy.Field()
town_name=scrapy.Field()
village_code = scrapy.Field()
village_name = scrapy.Field() |
import math
import torch
import numpy
from scipy.optimize import fsolve
inf = [[-368461.739, 26534822.568, -517664.322, 21966984.2427, -0.000104647296],
[10002180.758, 12040222.131, 21796269.831, 23447022.1136, -0.000308443058],
[-7036480.928, 22592611.906, 11809485.040, 20154521.4618, -0.000038172460]]
def get_position(unsolved_value):
x, y, z = unsolved_value[0], unsolved_value[1], unsolved_value[2]
a = (x - inf[0][0]) ** 2 + (y - inf[0][1]) ** 2 + (z - inf[0][2]) ** 2
b = (x - inf[1][0]) ** 2 + (y - inf[1][1]) ** 2 + (z - inf[1][2]) ** 2
c = (x - inf[2][0]) ** 2 + (y - inf[2][1]) ** 2 + (z - inf[2][2]) ** 2
return [
math.sqrt(a) - inf[0][3] - (3 * 10 ** 8) * inf[0][4],
math.sqrt(b) - inf[1][3] - (3 * 10 ** 8) * inf[1][4],
math.sqrt(c) - inf[2][3] - (3 * 10 ** 8) * inf[2][4]
]
def print_revpos1():
so = fsolve(get_position, [0, 0, 0])
print("接收机位置", so)
return so
def get_distance1(pos1):
v1 = numpy.array([pos1[0], pos1[1], pos1[2]])
v2 = numpy.array([-2280736.13132096, 5004753.28331651, 3220020.98543618])
distance = numpy.linalg.norm(v1 - v2)
return distance
def getgrad(x, y, z):
k = math.sqrt((x + 2280736.13132096) ** 2 + (y - 5004753.28331651) ** 2 + (z - 3220020.98543618) ** 2)
n1 = x + 2280736.13132096
n2 = y - 5004753.28331651
n3 = z - 3220020.98543618
print(n1, n2, n3)
res = []
res.append(n1 / k)
res.append(n2 / k)
res.append(n3 / k)
# print(res)
t = torch.tensor(res)
sign = t.sign()
return list(numpy.array(sign))
def inter():
global inf
dis = 1000000
while dis > 746:
r = print_revpos1()
dis = get_distance1(r)
print(dis)
sign = getgrad(r[0], r[1], r[2])
# sign[0] = -sign[0]
# sign[1] = -sign[1]
# sign[2] = -sign[2]
print(sign)
i = 0
while i < 3:
inf[i][3] = inf[i][3] + 1 * sign[i]
i = i + 1
inter()
|
if __name__ == "__main__":
import AssetReader
docx_reader = AssetReader.DocXReader()
print docx_reader.find_images("GestaltBoxDemo.docx")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.