text
stringlengths 8
6.05M
|
|---|
# Generated by Django 3.0.3 on 2020-05-05 19:29
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BugType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
],
),
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
],
),
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(blank=True, max_length=200, null=True)),
('last_name', models.CharField(blank=True, max_length=200, null=True)),
('level', models.IntegerField(choices=[(1, '1'), (2, '2'), (3, '3')], default=1)),
('department', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='issues.Department')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='FunctionalArea',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
],
),
migrations.CreateModel(
name='Priority',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
],
),
migrations.CreateModel(
name='Resolution',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
],
),
migrations.CreateModel(
name='Severity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
],
),
migrations.CreateModel(
name='Status',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
],
),
migrations.CreateModel(
name='Program',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.IntegerField(validators=[django.core.validators.MinValueValidator(1)])),
('release', models.IntegerField(validators=[django.core.validators.MinValueValidator(1)])),
('name', models.CharField(max_length=200)),
('areas', models.ManyToManyField(to='issues.FunctionalArea')),
],
options={
'unique_together': {('name', 'release', 'version')},
},
),
migrations.CreateModel(
name='Issue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('summary', models.CharField(blank=True, max_length=500, null=True)),
('problem', models.CharField(blank=True, max_length=500, null=True)),
('suggestedFix', models.CharField(blank=True, max_length=500, null=True)),
('comments', models.CharField(blank=True, max_length=500, null=True)),
('reproducible', models.BooleanField(null=True)),
('issueDate', models.DateTimeField(default=django.utils.timezone.now)),
('resolveByDate', models.DateTimeField(blank=True, null=True)),
('testByDate', models.DateTimeField(blank=True, null=True)),
('attachment', models.FileField(blank=True, null=True, upload_to='issue_images/')),
('attachment2', models.FileField(blank=True, null=True, upload_to='issue_images/')),
('attachment3', models.FileField(blank=True, null=True, upload_to='issue_images/')),
('attachment4', models.FileField(blank=True, null=True, upload_to='issue_images/')),
('attachment5', models.FileField(blank=True, null=True, upload_to='issue_images/')),
('assignedTo', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='employee_assignedToID', to='issues.Employee')),
('bugtype', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='issues.BugType')),
('functionalArea', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='issues.FunctionalArea')),
('priority', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='issues.Priority')),
('program', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='issues.Program')),
('reportedBy', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='employee_reportedByID', to='issues.Employee')),
('resolution', models.ForeignKey(default='PENDING', on_delete=django.db.models.deletion.CASCADE, to='issues.Resolution')),
('severity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='issues.Severity')),
('status', models.ForeignKey(default='OPEN', on_delete=django.db.models.deletion.CASCADE, to='issues.Status')),
('testedBy', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='employee_testedByID', to='issues.Employee')),
],
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('members', models.ManyToManyField(to='issues.Employee')),
],
),
]
|
from Items import Invoice, Item
def main():
print("Welcome!")
invoice = Invoice()
item_name = input ("Enter the name of the first item purchased.")
while (len(item_name) > 0):
item_count = int(input ("How many " + item_name + " were purchased? "))
item_price = float(input ("What was the price for each " + item_name + "? "))
item = Item(item_name, item_count, item_price)
invoice.addItem(item)
item_name = input("\nEnter the name of the next item purchased.")
print ("\n\n%-30s%10s%12s%10s" % ("Item Name", "Cost Each", "Quantity", "Total"))
itemlist = invoice.getItems()
for i in itemlist:
print ("%-30s%10.2f%12d%10.2f" % (i.name, i.cost, i.quantity, i.getTotalCost()))
print("\n\tGrand Total: %.2f" % (invoice.getTotalCost()))
main()
|
# shows a user's playlists (need to be authenticated via oauth)
import sys
import spotipy
import spotipy.util as util
def show_tracks(tracks):
print '==============='
print tracks['items']
print '==============='
for i, item in enumerate(tracks['items']):
track = item['track']
output = u' '.join( (track['artists'][0]['name'], track['name'], track['uri']) ).encode('utf-8').strip()
print ' '+str(i)+ ' ' +output
if __name__ == '__main__':
if len(sys.argv) > 1:
username = sys.argv[1]
else:
print "Whoops, need your username!"
print "usage: python user_playlists.py [username]"
sys.exit()
token = util.prompt_for_user_token(username)
if token:
sp = spotipy.Spotify(auth=token)
playlists = sp.user_playlists(username)
pcount = len(playlists['items'])
print 'Number of Playlists: ',str(pcount)
for playlist in playlists['items']:
print '- - - -'
print playlist['owner']['id']
print playlist['name']
print playlist['uri']
print ' total tracks', playlist['tracks']['total']
results = sp.user_playlist(username, playlist['id'],fields="tracks,next")
tracks = results['tracks']
print ' alt total: '+str(len(tracks['items']))
show_tracks(tracks)
while tracks['next']:
tracks = sp.next(tracks)
show_tracks(tracks)
else:
print "Can't get token for", username
|
from xml.sax.saxutils import escape
class TmxFile:
def __init__(self, file_path, src_lang):
self._file_path = file_path
self._src_lang = src_lang
self.add_header()
def add_header(self):
with open(self._file_path, 'w', encoding='utf8') as f:
f.write(r'<?xml version="1.0" encoding="UTF-8"?>'
+ "\n"
+ r'<!DOCTYPE tmx SYSTEM "tmx11.dtd">'
+ "\n"
+ r'<tmx version="1.1">'
+ "\n"
+ r" <header"
+ "\n"
+ r' creationtool="org.omegat.OmegaT"'
+ "\n"
+ r' creationtoolversion="1"'
+ "\n"
+ r' segtype="paragraph"'
+ "\n"
+ r' o-tmf="org.omegat.OmegaT TMX"'
+ "\n"
+ r' adminlang="'
+ self._src_lang
+ '"'
+ "\n"
+ r' srclang="'
+ self._src_lang
+ '"'
+ "\n"
+ r' datatype="plaintext"'
+ "\n"
+ r' >'
+ "\n"
+ r' </header>'
+ "\n"
+ r' <body>'
+ "\n")
def add_translation_unit(self, target_str, src_str, target_lang, src_lang='default'):
if src_lang == 'default':
src_lang = self._src_lang
with open(self._file_path, 'a', encoding='utf8') as f:
f.write(r' <tu>'
+ "\n"
+ r' <tuv lang="'
+ src_lang
+ r'">'
+ "\n"
+ r' <seg>'
+ escape(src_str.strip())
+ r'</seg>'
+ "\n"
+ r' </tuv>'
+ "\n"
+ r' <tuv lang="'
+ target_lang
+ '">'
+ "\n"
+ r' <seg>'
+ escape(target_str.strip())
+ r'</seg>'
+ "\n"
+ r' </tuv>'
+ "\n"
+ r' </tu>'
+ "\n")
def add_footer(self):
with open(self._file_path, 'a', encoding='utf8') as f:
f.write(r' </body>' + "\n" + r'</tmx>')
|
# test = input("waiting for you: ")
#
# test #ignored...
#
#
#
#
test = 5
# print(test+"ok") #not working
print(5+5)
print("test"+"ok")
print(4*"ok") #whoaaaaa...
print(int("4"))
# print(int("4f"))
# print("okok" - "ok")
|
###Estudos dos laços
###While
###Interropendo repetições while
nome = 'Marccus'
idade = 18
print(f'O {nome:->20} tem {idade:.2f} anos.!')#F-String
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import math
import six
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BERTLayerNorm(nn.Module):
def __init__(self, config, variance_epsilon=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BERTLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(config.hidden_size))
self.beta = nn.Parameter(torch.zeros(config.hidden_size))
self.variance_epsilon = variance_epsilon
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class BERTEmbeddings(nn.Module):
def __init__(self, config):
super(BERTEmbeddings, self).__init__()
"""Construct the embedding module from word, position and token_type embeddings.
"""
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BERTLayerNorm(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BERTSelfAttention(nn.Module):
def __init__(self, config):
super(BERTSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[
:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(
query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / \
math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[
:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BERTSelfOutput(nn.Module):
def __init__(self, config):
super(BERTSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BERTLayerNorm(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BERTAttention(nn.Module):
def __init__(self, config):
super(BERTAttention, self).__init__()
self.self = BERTSelfAttention(config)
self.output = BERTSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BERTIntermediate(nn.Module):
def __init__(self, config):
super(BERTIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = gelu
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BERTOutput(nn.Module):
def __init__(self, config):
super(BERTOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BERTLayerNorm(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BERTLayer(nn.Module):
def __init__(self, config):
super(BERTLayer, self).__init__()
self.attention = BERTAttention(config)
self.intermediate = BERTIntermediate(config)
self.output = BERTOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BERTEncoder(nn.Module):
def __init__(self, config):
super(BERTEncoder, self).__init__()
layer = BERTLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer)
for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BERTPooler(nn.Module):
def __init__(self, config):
super(BERTPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertModel(nn.Module):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config: BertConfig):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
"""
super(BertModel, self).__init__()
self.embeddings = BERTEmbeddings(config)
self.encoder = BERTEncoder(config)
self.pooler = BERTPooler(config)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.float()
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
all_encoder_layers = self.encoder(
embedding_output, extended_attention_mask)
sequence_output = all_encoder_layers[-1]
pooled_output = self.pooler(sequence_output)
return all_encoder_layers, pooled_output
class BertForSequenceClassification(nn.Module):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 2, 0]])
config = BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels):
super(BertForSequenceClassification, self).__init__()
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(
mean=0.0, std=config.initializer_range)
elif isinstance(module, BERTLayerNorm):
module.beta.data.normal_(
mean=0.0, std=config.initializer_range)
module.gamma.data.normal_(
mean=0.0, std=config.initializer_range)
if isinstance(module, nn.Linear):
module.bias.data.zero_()
self.apply(init_weights)
def forward(self, input_ids, token_type_ids, attention_mask, labels=None, n_class=1):
seq_length = input_ids.size(2)
_, pooled_output = self.bert(input_ids.view(-1, seq_length),
token_type_ids.view(-1, seq_length),
attention_mask.view(-1, seq_length))
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
logits = logits.view(-1, n_class)
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.view(-1)
loss = loss_fct(logits, labels)
return loss, logits
else:
return logits
# FIXME (kai): not modified accordingly yet
class BertForQuestionAnswering(nn.Module):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 2, 0]])
config = BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__()
self.bert = BertModel(config)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(
mean=0.0, std=config.initializer_range)
elif isinstance(module, BERTLayerNorm):
module.beta.data.normal_(
mean=0.0, std=config.initializer_range)
module.gamma.data.normal_(
mean=0.0, std=config.initializer_range)
if isinstance(module, nn.Linear):
module.bias.data.zero_()
self.apply(init_weights)
def forward(self, input_ids, token_type_ids, attention_mask, start_positions=None, end_positions=None):
all_encoder_layers, _ = self.bert(
input_ids, token_type_ids, attention_mask)
sequence_output = all_encoder_layers[-1]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
else:
return start_logits, end_logits
class BertForRace(nn.Module):
def __init__(self, config):
super(BertForRace, self).__init__()
self.bert = BertModel(config)
self.final_match = BertMultiAttention(config)
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(
mean=0.0, std=config.initializer_range)
elif isinstance(module, BERTLayerNorm):
module.beta.data.normal_(
mean=0.0, std=config.initializer_range)
module.gamma.data.normal_(
mean=0.0, std=config.initializer_range)
if isinstance(module, nn.Linear):
module.bias.data.zero_()
self.apply(init_weights)
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
sequence_length=None,
labels=None):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
flat_sequence_length = sequence_length.view(
-1, sequence_length.size(-1))
all_encoder_layers, _ = self.bert(
flat_input_ids, flat_token_type_ids, flat_attention_mask)
# sequence_output:8*512*768
sequence_output = all_encoder_layers[-1]
# create necassary tensors
chidden_states = torch.zeros_like(
sequence_output, device=sequence_output.device)
qhidden_states = torch.zeros_like(
sequence_output, device=sequence_output.device)
ahidden_states = torch.zeros_like(
sequence_output, device=sequence_output.device)
cattention_mask = torch.zeros_like(
flat_attention_mask, device=sequence_output.device
)
qattention_mask = torch.zeros_like(
flat_attention_mask, device=sequence_output.device
)
aattention_mask = torch.zeros_like(
flat_attention_mask, device=sequence_output.device
)
for i in range(flat_input_ids.size(0)):
chidden_states[i, :(flat_sequence_length[i, 0] + 2),
:] = sequence_output[i, :(flat_sequence_length[i, 0] + 2),
:]
qhidden_states[i, (flat_sequence_length[i, 0] + 2):(flat_sequence_length[i, 0] + 2 + flat_sequence_length[i, 1]),
:] = sequence_output[i, (flat_sequence_length[i, 0] + 2):(flat_sequence_length[i, 0] + 2 + flat_sequence_length[i, 1]),
:]
ahidden_states[i, (flat_sequence_length[i, 0] + 2 + flat_sequence_length[i, 1]):(flat_sequence_length[i, 0] + 3 + flat_sequence_length[i, 1] + flat_sequence_length[i, 2]),
:] = sequence_output[i, (flat_sequence_length[i, 0] + 2 + flat_sequence_length[i, 1]): (flat_sequence_length[i, 0] + 3 + flat_sequence_length[i, 1] + flat_sequence_length[i, 2]),
:]
cattention_mask[i, : (flat_sequence_length[i, 0] + 2)
] = torch.ones(1, flat_sequence_length[i, 0] + 2)
qattention_mask[i, (flat_sequence_length[i, 0] + 2):(
flat_sequence_length[i, 0] + 2 + flat_sequence_length[i, 1])] = torch.ones(1, flat_sequence_length[i, 1])
aattention_mask[i, (flat_sequence_length[i, 0] + 2 + flat_sequence_length[i, 1]):(
flat_sequence_length[i, 0] + 3 + flat_sequence_length[i, 1] + flat_sequence_length[i, 2])] = torch.ones(1, flat_sequence_length[i, 2] + 1)
cattention_mask = cattention_mask.unsqueeze(1)
cattention_mask = cattention_mask.to(
dtype=next(self.parameters()).dtype)
cattention_mask = (1 - cattention_mask) * -10000.0
qattention_mask = qattention_mask.unsqueeze(1)
qattention_mask = qattention_mask.to(
dtype=next(self.parameters()).dtype)
qattention_mask = (1 - qattention_mask) * -10000.0
aattention_mask = aattention_mask.unsqueeze(1)
aattention_mask = aattention_mask.to(
dtype=next(self.parameters()).dtype)
aattention_mask = (1 - aattention_mask) * -10000.0
logits = self.final_match(chidden_states, qhidden_states, ahidden_states,
cattention_mask, qattention_mask, aattention_mask)
logits = logits.view(-1, 3)
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.view(-1)
loss = loss_fct(logits, labels)
acc = self.accuracy(logits, labels)
acc = acc.sum()
return loss, acc
else:
return logits
def accuracy(self, out, tgt):
out = torch.argmax(out, -1)
return (out == tgt).float()
class SoftSel(nn.Module):
def __init__(self, config):
super(SoftSel, self).__init__()
self.attention = nn.Linear(
config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(
self,
m_hidden_states,
n_hidden_states,
nattention_mask):
attention_matrix = self.attention(n_hidden_states)
attention_matrix = torch.transpose(attention_matrix, 1, 2)
attention_matrix = torch.matmul(m_hidden_states, attention_matrix)
attention_matrix = attention_matrix + nattention_mask
attention_matrix = nn.Softmax(dim=-1)(attention_matrix)
attention_matrix = self.dropout(attention_matrix)
return torch.matmul(attention_matrix, n_hidden_states)
class BertMultiAttention(nn.Module):
def __init__(self, config):
super(BertMultiAttention, self).__init__()
self._c = SoftSel(config)
self._q = SoftSel(config)
self.a_ = SoftSel(config)
self._a = SoftSel(config)
self.c_ = SoftSel(config)
self.qpa = nn.Linear(2 * config.hidden_size,
config.hidden_size)
self.eam = nn.Linear(2 * config.hidden_size,
config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.last_dropout = nn.Dropout(p=0.2)
self.aff = nn.Linear(config.hidden_size * 6, 1)
def forward(self,
chidden_states,
qhidden_states,
ahidden_states,
cattention_mask,
qattention_mask,
aattention_mask):
# eam1
q_hidden_ = self._c(qhidden_states, chidden_states, cattention_mask)
aq_hidden = self._q(ahidden_states, q_hidden_, qattention_mask)
aq_hidden_ = self._a(aq_hidden, ahidden_states, aattention_mask)
a_hidden_ = self.a_(ahidden_states, aq_hidden, aattention_mask)
row1 = torch.cat(
[
aq_hidden * aq_hidden_,
aq_hidden - aq_hidden_
],
dim=2
)
row2 = torch.cat(
[
ahidden_states * a_hidden_,
ahidden_states - a_hidden_
],
dim=2
)
eam1_1, eam1_2 = torch.chunk(self.dropout(
nn.ReLU()(self.eam(torch.cat([row1, row2], dim=1)))), 2, 1)
# can add layernorm here later
eam1_1 = self.masked_max(eam1_1, aattention_mask)
eam1_2 = self.masked_max(eam1_2, aattention_mask)
# eam2
a_hidden_ = self._q(ahidden_states, qhidden_states, qattention_mask)
ac_hidden = self._c(a_hidden_, chidden_states, cattention_mask)
ac_hidden_ = self._a(ac_hidden, ahidden_states, aattention_mask)
a_hidden__ = self.a_(ahidden_states, ac_hidden, aattention_mask)
row1 = torch.cat(
[
ac_hidden * ac_hidden_,
ac_hidden - ac_hidden_
],
dim=2
)
row2 = torch.cat(
[
ahidden_states * a_hidden__,
ahidden_states - a_hidden__
],
dim=2
)
eam2_1, eam2_2 = torch.chunk(self.dropout(
nn.ReLU()(self.eam(torch.cat([row1, row2], dim=1)))), 2, 1)
# can add layernorm here later
eam2_1 = self.masked_max(eam2_1, aattention_mask)
eam2_2 = self.masked_max(eam2_2, aattention_mask)
# qpam
cq_hidden = self.c_(chidden_states, qhidden_states, qattention_mask)
ca_hidden = self.c_(chidden_states, ahidden_states, aattention_mask)
row1 = torch.cat(
[
cq_hidden * chidden_states,
cq_hidden - chidden_states
],
dim=2
)
row2 = torch.cat(
[
ca_hidden * chidden_states,
ca_hidden - chidden_states
],
dim=2
)
qpa1, qpa2 = torch.chunk(self.dropout(
nn.ReLU()(self.qpa(torch.cat([row1, row2], dim=1)))), 2, 1)
qpa1 = self.masked_max(qpa1, cattention_mask)
qpa2 = self.masked_max(qpa2, cattention_mask)
res = torch.cat(
[
eam1_1,
eam1_2,
eam2_1,
eam2_2,
qpa1,
qpa2
],
dim=1
)
# can remove layernorm here and add dropout(0.5) maybe Better
# removed layernorm and added dropoout
# res = self.last_dropout(res)
return self.aff(res)
def masked_max(self, v, v_mask):
tmp_v_mask = ((v_mask + 10000.0) / 10000.0)
tmp_v_mask = tmp_v_mask.permute(0, 2, 1).contiguous()
v = v * tmp_v_mask
return torch.max(v, 1)[0]
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('falcon_heavy.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
e1 = cv2.getTickCount()
# your code execution
rocket = img[22:123,80:100]
img[22:123,40:60] = rocket
e2 = cv2.getTickCount()
time = (e2 - e1)/ cv2.getTickFrequency()
print('Time take: {}ns'.format(time*10**6))
# for i in range(50):
# img.itemset((i,10,1),122)
# print(img.item(px))
plt.imshow(img)
plt.show()
|
from django.apps import AppConfig
class ListyConfig(AppConfig):
name = 'listy'
|
import telegram
from typing import Tuple, Optional, List
from core.TelegramMessageWrapper import TelegramMessageWrapper
import logging
logger = logging.getLogger(__name__)
class TelegramPresenter:
def __init__(self, bot: telegram.Bot):
self._bot = bot
def send_message(self,
chat_id: str,
message: TelegramMessageWrapper) -> None:
if chat_id is None:
logger.warn(f"Skipping attempt to send to chat_id=None message: {message.text}")
return
logger.info(f"Sending to chat_id: {chat_id} msg: {message.text}")
self._bot.sendMessage(
chat_id=chat_id,
text=message.text,
parse_mode=message.parse_mode,
reply_markup=message.reply_markup
)
|
import tkinter as tk
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD) # to use Raspberry Pi board pin numbers
GPIO.setup(3, GPIO.OUT) # set up GPIO output channel
mainwindow=tk.Tk()
mainwindow.title('Test ')
mainwindow.geometry('640x340')
my_label=tk.Label(mainwindow,text="My First UI", font=("Arial",22), bg= "Green",fg="white")
my_label.grid(row=0,column=0,sticky='NSEW',padx=10,pady=10)
button_on=tk.Button(mainwindow,text="On",bg="black",fg="white", command=lambda :my_on())
button_on.grid(row=1,column=0,sticky='NSEW',padx=10,pady=10)
button_off=tk.Button(mainwindow,text="OFF",bg="black",fg="white", command=lambda:my_off())
button_off.grid(row=1,column=1,columnspa=1,sticky='NSEW',padx=10,pady=10)
def my_on():
print('Led Turn On !!!!! ')
GPIO.output(3, GPIO.HIGH) # set RPi board pin 11 low. Turn off LED. time.sleep(1)
print('Yes you did it !')
def my_off():
print('Led Turned Off !!!!!! ')
GPIO.output(3, GPIO.LOW) # set RPi board pin 11 high. Turn on LED. time.sleep(2)
print('Great Work ! ')
mainwindow.mainloop()
|
num_pieces = int(input())
library = {}
output = []
while True:
while num_pieces > 0:
piece, composer, note_key = input().split('|')
if piece not in library:
library[piece] = {'name': composer, 'note_key': note_key}
num_pieces -= 1
command = input().split('|')
act = command[0]
if act == 'Stop':
break
piece = command[1]
if act == 'Add':
composer, note_key = command[2], command[3]
if piece in library:
output.append(f"{piece} is already in the collection!")
else:
library[piece] = {'name': composer, 'note_key': note_key}
output.append(f"{piece} by {composer} in {note_key} added to the collection!")
elif act == 'Remove':
if piece in library:
output.append(f"Successfully removed {piece}!")
library.pop(piece)
else:
output.append(f"Invalid operation! {piece} does not exist in the collection.")
elif act == 'ChangeKey':
new_key = command[2]
if piece in library:
library[piece]["note_key"] = new_key
output.append(f"Changed the key of {piece} to {new_key}!")
else:
output.append(f"Invalid operation! {piece} does not exist in the collection.")
print(*output, sep='\n')
for piece, info in sorted(library.items(), key=lambda x: (x, x[1])):
print(f'{piece} -> Composer: {info["name"]}, Key: {info["note_key"]}')
# 3
# Fur Elise|Beethoven|A Minor
# Moonlight Sonata|Beethoven|C# Minor
# Clair de Lune|Debussy|C# Minor
# Add|Sonata No.2|Chopin|B Minor
# Add|Hungarian Rhapsody No.2|Liszt|C# Minor
# Add|Fur Elise|Beethoven|C# Minor
# Remove|Clair de Lune
# ChangeKey|Moonlight Sonata|C# Major
# Stop
|
def geometric_sequence_elements(a, r, n):
num = a
output = ''
for _ in range(n):
output+='{}, '.format(num)
num = num * r
return output[:-2]
'''
In your class, you have started lessons about geometric progression.
Since you are also a programmer, you have decided to write a function
that will print first n elements of the sequence with the given constant
r and first element a.
Result should be separated by comma and space.
Example
geometric_sequence_elements(2, 3, 5) == '2, 6, 18, 54, 162'
'''
|
import pygame
import glob
from threading import Thread
def get_alarm_sound():
alarmArray = glob.glob('alarm/*')
return alarmArray[0]
class AlarmHandler:
def __init__(self, src=0):
self.stopped = False
self.src = get_alarm_sound() if src == 0 else src
self.objectID = None
def start(self, objectID=None):
self.objectID = objectID
self.stopped = False
Thread(target=self.sound_alarm, args=()).start()
return self
def sound_alarm(self):
pygame.mixer.init()
pygame.mixer.music.load(self.src)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
if self.stopped:
pygame.mixer.music.stop()
return
def stop(self):
self.stopped = True
self.objectID = None
|
from .cifar10_dataset import *
from .load_dataset import *
|
preçoNormal = float(input('Digite o preço do produto: '))
print(''' FORMAS DE PAGAMENTO
[1] A vista Deinheiro/Cheque
[2] Á vista no cartão
[3] 2x no Cartão
[4] 3x ou Mais no cartão
''')
opcPag = int(input('Qual a forma de Pagamento?: '))
if opcPag == 1:
valorFinal = preçoNormal * 0.90
print('O valor fica em R${:.2f}.'.format(valorFinal))
elif opcPag == 2:
valorFinal = preçoNormal *0.95
print('O valor a vista no cartão fica em R$ {}'.format(valorFinal))
elif opcPag == 3:
valorFinal = preçoNormal
parcela = preçoNormal/2
print('Sua compra foi parcelada em 2x de : R${:.2f}'.format(parcela))
elif opcPag == 4:
valorFinal = preçoNormal * 1.20
print('Sua ompra de R${:.2f} vai custar R${:.2f} no final.'.format(preçoNormal, valorFinal))
|
# print out the first n primes
import numpy as np
import matplotlib.pyplot as plt
import sys
import math
if len(sys.argv) < 3:
n = 1000
else:
n = sys.argv[1]
def is_prime(n):
if n % 2 == 0 and n > 2:
return False
return all(n % i for i in range(3, int(math.sqrt(n)) + 1, 2))
def get_primes(n, lower=2):
primes = np.array([])
number = lower
i = 0
while i < int(n):
if is_prime(number):
primes = np.append(primes, number)
i+=1
number+=1
return primes
def main(n=1000,lower=2):
# primes = get_primes(n, lower)
# one_list = [i for i in primes if i%10 == 1 ]
# three_list = [i for i in primes if i%10 == 3 ]
# seven_list = [i for i in primes if i%10 == 7 ]
# nine_list = [i for i in primes if i%10 == 9 ]
# print len(one_list), len(three_list), len(seven_list), len(nine_list)
# Idea: plot length of 1's list versus n as n grows
# repeat for 3's, 7's and 9's list
for m in xrange(int(n)):
primes = get_primes(m)
one_list = np.array([i for i in primes if i%10 == 1 ])
three_list = np.array([i for i in primes if i%10 == 3 ])
seven_list = np.array([i for i in primes if i%10 == 7 ])
nine_list = np.array([i for i in primes if i%10 == 9 ])
plt.plot(m, len(one_list), 'b.', m, len(three_list), 'r.', m, len(seven_list), 'g.', m, len(nine_list), 'c.')
# Line of best fit
# least squares method
plt.xlabel('natural numbers')
plt.ylabel('number of primes ending in 1,3,7, and 9')
plt.title('Number of primes ending in 1,3,7 or 9 \n in a list of the first n natural numbers')
plt.show()
# Conclusion: number of primes ending in 1,3,7, and 9 remain about the same as
# the list of primes grow.
# Other things to look at:
# - sparsity of primes
# - distribution of primes as n list grows
# - ...
if __name__ == '__main__':
main(n)
# print get_primes(sys.argv[1])
|
import sqlite3
from PasswordRecovery import Ui_Dialog
from PyQt5 import QtCore, QtWidgets
from AdminWindow import Ui_AdminWindow
from PyQt5.QtWidgets import QMessageBox
from Usercreation import Ui_Registration
class Ui_MainWindow(object):
def open_admin_window(self):
"""
After succesfull login to system this function will close Login Window and open Admin Window
"""
self.window = QtWidgets.QMainWindow()
self.ui = Ui_AdminWindow()
self.ui.setupUi(self.window)
self.window.show()
def sign_Up_window(self):
"""
After pressing "Registery" button this function is opening UserRegister Window
"""
self.window = QtWidgets.QDialog()
self.ui = Ui_Registration()
self.ui.setupUi(self.window)
self.window.show()
def login_Check(self):
"""
Function to check with DB and see is user can login succesfully
"""
username = self.admin_login_info()
password = self.lineEdit_2.text()
connection = sqlite3.connect("login.db")
result = connection.execute("SELECT * FROM USERS WHERE USERNAME = ? AND PASSWORD = ?", (username, password))
if (len(result.fetchall()) > 0): # If User is found in DB
self.open_admin_window()
else:
QMessageBox.warning(MainWindow, "Error", "Invalid Username or Password")
def password_recovery(self):
"""
After pressing "Forgot password" button this function is opening "Password recovery" Window
"""
self.window = QtWidgets.QDialog()
self.ui = Ui_Dialog()
self.ui.setupUi(self.window)
self.window.show()
def admin_login_info(self):
return self.lineEdit.text()
def setupUi(self, MainWindow):
"""
Creating Main window with log in screen
"""
MainWindow.setObjectName("MainWindow")
MainWindow.resize(920, 549)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.ButtonEnter = QtWidgets.QPushButton(self.centralwidget)
self.ButtonEnter.setGeometry(QtCore.QRect(210, 250, 141, 51))
self.ButtonEnter.setObjectName("ButtonEnter")
#------------
self.ButtonEnter.clicked.connect(self.login_Check)
#------------
self.ButtonRegistery = QtWidgets.QPushButton(self.centralwidget)
self.ButtonRegistery.setGeometry(QtCore.QRect(370, 250, 141, 51))
self.ButtonRegistery.setObjectName("ButtonRegistery")
#------------
self.ButtonRegistery.clicked.connect(self.sign_Up_window)
#------------
self.ButtonCancel = QtWidgets.QPushButton(self.centralwidget)
self.ButtonCancel.setGeometry(QtCore.QRect(530, 250, 141, 51))
self.ButtonCancel.setObjectName("ButtonCancel")
#------------
self.ButtonCancel.clicked.connect(MainWindow.close)
#------------
self.checkBox_Rem_my_Username = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox_Rem_my_Username.setGeometry(QtCore.QRect(290, 330, 311, 17))
self.checkBox_Rem_my_Username.setObjectName("checkBox_Rem_my_Username")
self.checkBox_Show_pass = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox_Show_pass.setGeometry(QtCore.QRect(290, 360, 311, 17))
self.checkBox_Show_pass.setObjectName("checkBox_Show_pass")
#------------
self.checkBox_Show_pass.stateChanged.connect(self.Password_hide)
#------------
self.Button_Forgot_pass = QtWidgets.QPushButton(self.centralwidget)
self.Button_Forgot_pass.setGeometry(QtCore.QRect(290, 390, 171, 21))
self.Button_Forgot_pass.setObjectName("Button_Forgot_pass")
#------------
self.Button_Forgot_pass.clicked.connect(self.password_recovery)
#------------
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(210, 140, 461, 20))
self.lineEdit.setObjectName("lineEdit")
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_2.setGeometry(QtCore.QRect(210, 190, 461, 20))
self.lineEdit_2.setObjectName("lineEdit_2")
#------------
self.lineEdit_2.setEchoMode(self.lineEdit_2.Password)
#------------
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 920, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionNew = QtWidgets.QAction(MainWindow)
self.actionNew.setObjectName("actionNew")
self.actionSave = QtWidgets.QAction(MainWindow)
self.actionSave.setObjectName("actionSave")
self.menuFile.addAction(self.actionNew)
self.menuFile.addAction(self.actionSave)
self.menubar.addAction(self.menuFile.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def Password_hide(self):
"""
Checks status of "Show password" checkbox
"""
if self.checkBox_Show_pass.isChecked():
self.lineEdit_2.setEchoMode(self.lineEdit_2.Normal)
else:
self.lineEdit_2.setEchoMode(self.lineEdit_2.Password)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.ButtonEnter.setText(_translate("MainWindow", "Enter"))
self.ButtonRegistery.setText(_translate("MainWindow", "Registery"))
self.ButtonCancel.setText(_translate("MainWindow", "Exit"))
self.checkBox_Rem_my_Username.setText(_translate("MainWindow", "Remeber my Username"))
self.checkBox_Show_pass.setText(_translate("MainWindow", "Show password "))
self.Button_Forgot_pass.setText(_translate("MainWindow", "Forgot password?"))
self.lineEdit.setPlaceholderText(_translate("MainWindow", "Username"))
self.lineEdit_2.setPlaceholderText(_translate("MainWindow", "Password"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.actionNew.setText(_translate("MainWindow", "New"))
self.actionNew.setStatusTip(_translate("MainWindow", "Create a new file"))
self.actionSave.setText(_translate("MainWindow", "Save"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import pytest
from generate_docs import DocUrlRewriter, find_doc_urls, get_doc_slug, value_strs_iter
from pants.util.docutil import doc_url
def test_gather_value_strs():
help_info = {
"a": "foo",
"b": ["bar", 5, "baz"],
"c": 42,
"d": True,
"e": {"f": 5, "g": "qux", "h": {"i": "quux"}},
}
assert set(value_strs_iter(help_info)) == {"foo", "bar", "baz", "qux", "quux"}
@pytest.mark.parametrize("arg", ["foo-bar", "baz3", "qux#anchor"])
def test_slug_for_url(arg: str) -> None:
expected_slug = arg.split("#")[0]
assert get_doc_slug(doc_url(arg)) == expected_slug
def test_slug_for_url_error() -> None:
with pytest.raises(ValueError) as excinfo:
get_doc_slug("https://notthedocsite.com/v2.6/foobar")
assert "Not a docsite URL" in str(excinfo.value)
def test_find_doc_urls() -> None:
strs = [
f"See {doc_url('foo-bar')} for details.",
f"See {doc_url('qux')}.", # Don't capture trailing dot.
f"See {doc_url('foo-bar')} and {doc_url('baz3')}", # Multiple urls in string.
]
assert find_doc_urls(strs) == {doc_url(slug) for slug in ["foo-bar", "baz3", "qux"]}
def test_doc_url_rewriter():
dur = DocUrlRewriter(
{
"foo": "Foo",
"bar": "Welcome to Bar!",
}
)
assert dur.rewrite(f"See {doc_url('foo')} for details.") == "See [Foo](doc:foo) for details."
assert (
dur.rewrite(f"Check out {doc_url('bar#anchor')}.")
== "Check out [Welcome to Bar!](doc:bar#anchor)."
)
|
"""App related serializers."""
from rest_framework import serializers
from modoboa.admin import models as admin_models
from ... import models
class MXRecordSerializer(serializers.ModelSerializer):
"""Serializer for MXRecord."""
class Meta:
model = admin_models.MXRecord
fields = ("name", "address", "updated")
class DNSBLResultSerializer(serializers.ModelSerializer):
"""Serializer for DNSBLResult."""
mx = MXRecordSerializer()
class Meta:
model = admin_models.DNSBLResult
fields = ("provider", "mx", "status")
class DNSRecordSerializer(serializers.ModelSerializer):
"""Serializer for DNSRecord."""
class Meta:
model = models.DNSRecord
fields = ("type", "value", "is_valid", "error", "updated")
class DNSDetailSerializer(serializers.ModelSerializer):
mx_records = MXRecordSerializer(many=True, source="mxrecord_set")
autoconfig_record = DNSRecordSerializer()
autodiscover_record = DNSRecordSerializer()
spf_record = DNSRecordSerializer()
dkim_record = DNSRecordSerializer()
dmarc_record = DNSRecordSerializer()
dnsbl_results = DNSBLResultSerializer(many=True, source="dnsblresult_set")
class Meta:
model = admin_models.Domain
fields = (
"mx_records", "dnsbl_results", "autoconfig_record",
"autodiscover_record", "spf_record", "dkim_record", "dmarc_record"
)
|
from .kv_clients import MemcachedClient
from .query import MemcachedQuery
|
# Рассмотрим следующее объявление классов
class A:
pass
class B(A):
pass
class C:
pass
class D(C):
pass
class E(B, C, D):
pass
# Какие последовательности могут являться корректным порядком разрешения методов для класса E?
E, B, C, D, A, object
E, B, A, C, D, object
E, B, D, C, A, object
E, B, A, D, C, object
*Никакие из перечисленных
|
"""ChunkSet class.
Used by the OctreeLoader.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Dict, List, Set
if TYPE_CHECKING:
from napari.components.experimental.chunk._request import OctreeLocation
from napari.layers.image.experimental.octree_chunk import OctreeChunk
class ChunkSet:
"""A set of chunks with fast location membership test.
We use a dict as an ordered set, and then a set with just the locations
so OctreeLoader._cancel_futures() can quickly test if a location is
in the set.
"""
def __init__(self) -> None:
self._dict: Dict[OctreeChunk, int] = {}
self._locations: Set[OctreeLocation] = set()
def __len__(self) -> int:
"""Return the size of the size.
Returns
-------
int
The size of the set.
"""
return len(self._dict)
def __contains__(self, chunk: OctreeChunk) -> bool:
"""Return true if the set contains this chunk.
Returns
-------
bool
True if the set contains the given chunk.
"""
return chunk in self._dict
def add(self, chunks: List[OctreeChunk]) -> None:
"""Add these chunks to the set.
Parameters
----------
chunks : List[OctreeChunk]
Add these chunks to the set.
"""
for chunk in chunks:
self._dict[chunk] = 1
self._locations.add(chunk.location)
def chunks(self) -> List[OctreeChunk]:
"""Get all the chunks in the set.
Returns
-------
List[OctreeChunk]
All the chunks in the set.
"""
return self._dict.keys()
def has_location(self, location: OctreeLocation) -> bool:
"""Return True if the set contains this location.
Returns
-------
bool
True if the set contains this location.
"""
return location in self._locations
|
#!/usr/bin/env python
##############################################################################
#
# NAME: SRM-probe
#
# FACILITY: SAM (Service Availability Monitoring)
#
# COPYRIGHT:
# Copyright (c) 2009, Members of the EGEE Collaboration.
# http://www.eu-egee.org/partners/
# Licensed under the Apache License, Version 2.0.
# http://www.apache.org/licenses/LICENSE-2.0
# This software is provided "as is", without warranties
# or conditions of any kind, either express or implied.
#
# DESCRIPTION:
#
# SRM probe.
#
# AUTHORS: Konstantin Skaburskas, CERN
#
# CREATED: 21-Nov-2008
#
# NOTES:
#
# MODIFIED:
# 2009-12-07 : Konstantin Skaburskas
# - using 'gridmon' and 'gridmetrics' packages after merging
# 'gridmonsam' with 'gridmon'
# - metrics implementation class was moved into gridmetrics.srmmetrics
##############################################################################
"""
SRM probe.
SRM probe.
Konstantin Skaburskas <konstantin.skaburskas@cern.ch>, CERN
SAM (Service Availability Monitoring)
"""
import sys
try:
from gridmon import probe
from gridmetrics.srmmetrics import SRMMetrics as SRM
except ImportError,e:
summary = "UNKNOWN: Error loading modules : %s" % (e)
sys.stdout.write(summary+'\n')
sys.stdout.write(summary+'\nsys.path: %s\n'% str(sys.path))
sys.exit(3)
class SRMMetrics(SRM):
def __init__(self, tuples):
SRM.__init__(self, tuples, 'SRM')
runner = probe.Runner(SRMMetrics, probe.ProbeFormatRenderer())
sys.exit(runner.run(sys.argv))
|
from setuptools import setup, find_packages
setup(
name='fasth',
version='0.1',
packages=find_packages(),
install_requires=[
'screed',
'click',
'biopython'
],
author='Jordan Gumm',
author_email='jordan@variantanalytics.com',
description='A tool for quick fasta/fastq calculations'
)
|
from ..type import SimpleType
class NullType(SimpleType):
def __init__(self):
super(NullType, self).__init__()
self.typereference = "NULL"
|
import tensorflow as tf
import numpy as np
import functools
import argparse
import glob
import json
import os
from scipy.io import wavfile
from dataset import nsynth_input_fn
from models import GANSynth
from networks import generator, discriminator
from utils import Dict
from sys import exit
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", type=str, default="gan_synth_model/")
parser.add_argument('--filenames', type=str, default="nsynth_test.tfrecord")
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--num_epochs", type=int, default=None)
parser.add_argument("--total_steps", type=int, default=1000000)
parser.add_argument("--growing_steps", type=int, default=1000000)
parser.add_argument('--train', default=False, action="store_true")
parser.add_argument('--generate', default=True, action="store_true")
args = parser.parse_args()
with tf.Graph().as_default():
tf.set_random_seed(0)
growing_level=tf.cast(tf.divide(x=tf.train.create_global_step(),y=args.growing_steps), tf.float32)
gan_synth = GANSynth(
args = args,
generator=generator,
discriminator=discriminator,
growing_level = growing_level
)
os.makedirs("results", exist_ok=True)
generator = gan_synth.generate(
model_dir=args.model_dir
)
num_waveforms = 0
print("generator--->", generator)
# for waveforms in generator:
# print("Arriva ------>", waveforms)
# for waveform in waveforms:
# wavfile.write(f"results/{num_waveforms}.wav", rate=16000, data=waveform)
# num_waveforms += 1
print(f"{num_waveforms} waveforms are generated in `results` directory")
|
import os
import sys
import cv2
from multiprocessing.dummy import Pool as ThreadPool
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
voc_dict = {
'aeroplane' : 0,
'bicycle' : 1,
'bird' : 2,
'boat' : 3,
'bottle' : 4,
'bus' : 5,
'car' : 6,
'cat' : 7,
'chair' : 8,
'cow' : 9,
'diningtable' : 10,
'dog' : 11,
'horse' : 12,
'motorbike' : 13,
'person' : 14,
'pottedplant' : 15,
'sheep' : 16,
'sofa' : 17,
'train' : 18,
'tvmonitor' : 19
}
crop_label = list()
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return objects
def crop_img(image_id):
img_folder = "/localSSD/yyq/VOCdevkit0712/VOC0712/JPEGImages"
anno_folder = "/localSSD/yyq/VOCdevkit0712/VOC0712/Annotations"
img_save_folder = "/localSSD/yyq/VOCdevkit0712/VOC0712/class/images"
img_path = os.path.join(img_folder, image_id+".jpg")
anno_path = os.path.join(anno_folder, image_id+".xml")
im = cv2.imread(img_path)
im_w, im_h = im.shape[1], im.shape[0]
anno_bbox = parse_rec(anno_path)
for idx, obj_bbox in enumerate(anno_bbox):
bbox = obj_bbox['bbox']
cls = obj_bbox['name']
xmin, ymin, xmax, ymax = bbox[0], bbox[1], bbox[2], bbox[3]
xmin, ymin = max(0, xmin), max(0, ymin)
xmax, ymax = min(xmax, im_w), min(ymax, im_h)
w, h = xmax - xmin, ymax - ymin
if max(w, h) > 100:
crop_img = im[ymin:ymax, xmin:xmax, :]
crop_img_name = os.path.join(img_save_folder, image_id+"_{}_{}.jpg".format(str(cls), str(idx)))
cv2.imwrite(crop_img_name, crop_img)
crop_label.append((crop_img_name, cls))
image_id_list = list()
with open("/localSSD/yyq/VOCdevkit0712/VOC0712/ImageSets/Main/0712_trainval.txt", "r") as f:
for i in f.readlines():
image_id_list.append(i.strip())
pool = ThreadPool(processes=20)
pool.map(crop_img, image_id_list)
pool.close()
pool.join()
with open("cls_train.txt", "w") as f:
for item in crop_label:
print(item[0], voc_dict[item[1]], file=f)
|
txt = input()
cro_alpha = ['dz=','c=', 'c-' ,'d-','lj','nj','s=', 'z=']
for alpha in cro_alpha:
txt = txt.replace(alpha,'1')
print(len(txt))
|
# Generated by Django 2.2.6 on 2019-11-01 17:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('work', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='surveyqty',
name='pole_ht',
),
migrations.RemoveField(
model_name='surveyqty',
name='pole_lt',
),
migrations.AddField(
model_name='surveyqty',
name='dtr_100',
field=models.IntegerField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name='surveyqty',
name='dtr_25',
field=models.IntegerField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name='surveyqty',
name='dtr_63',
field=models.IntegerField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name='surveyqty',
name='pole_8m',
field=models.IntegerField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name='surveyqty',
name='pole_9m',
field=models.IntegerField(default=1),
preserve_default=False,
),
]
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import unittest
import time
from TestUtils import TestUtilsMixin, ROOT_PASSWORD
log = logging.getLogger('test.auto')
class WriteLots(unittest.TestCase, TestUtilsMixin):
"""Start a clean accumulo, ingest some data using lots of clients at once,
and verify it was stored properly"""
order = 30
settings = TestUtilsMixin.settings.copy()
settings['instance.zookeeper.timeout'] = '40s'
def ingest(self, host, start, count, **kwargs):
klass = 'org.apache.accumulo.test.TestIngest'
args = '--user root --size 50 --random 56 --rows %d --start %d --cols 1 -p %s' % (count, start, ROOT_PASSWORD)
return self.runClassOn(host, klass, args.split(), **kwargs)
def setUp(self):
TestUtilsMixin.setUp(self);
# initialize the database
self.createTable("test_ingest")
def tearDown(self):
TestUtilsMixin.tearDown(self)
self.pkill(self.masterHost(), 'TestIngest')
def runTest(self):
N = 10*len(self.hosts)
waitTime = 60 * N * self.options.rows / 200000 + 90
log.info("Starting %d clients", N)
handles = []
for i in range(N):
# start test ingestion
handle = self.ingest(self.hosts[i % len(self.hosts)],
i * self.options.rows,
self.options.rows)
handles.append(handle)
end = time.time() + waitTime
for handle in handles:
waitTime = end - time.time()
log.debug("Waiting %s seconds", waitTime)
self.waitForStop(handle, waitTime)
log.info("Verifying Ingestion")
self.waitForStop(self.verify(self.masterHost(), self.options.rows * N),
waitTime)
self.shutdown_accumulo()
def suite():
result = unittest.TestSuite()
result.addTest(WriteLots())
return result
|
'''
Name: Neil Shah
UCID: ns642
Section: 005
'''
import sys
from socket import*
serverIP = "192.168.1.122"
serverPort = 8000
dataLen = 1000000
#Create a UDP socket
serverSocket = socket(AF_INET,SOCK_DGRAM)
#Assign IP address and port number to socket
serverSocket.bind((serverIP, serverPort))
print('The server is ready to recieve on port: '+str(serverPort))
#loop forever listening for incoming datagram messages
while True:
#Recieve and print the client data from "data" socket
data,address = serverSocket.recvfrom(dataLen)
print("Recieve data from client "+address[0]+", "+str(address[1])+": "+data.decode())
#Echo back to client
print("Sending data to client "+address[0]+", "+str(address[1])+": "+data.decode())
serverSocket.sendto(data,address)
|
import os
import torch
from torch.autograd import Variable
def make_folder(path, version):
if not os.path.exists(os.path.join(path, version)):
os.makedirs(os.path.join(path, version))
def tensor2var(x, grad=False):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, requires_grad=grad)
def var2tensor(x):
return x.data.cpu()
def var2numpy(x):
return x.data.cpu().numpy()
def denorm(x):
out = (x + 1) / 2
return out.clamp_(0, 1)
|
from datetime import datetime, timedelta
import time
import smtplib
import email.utils
from email.mime.text import MIMEText
from privateConstants import *
def toMsEpoch(date):
tt = datetime.timetuple(date)
sec_epoch_loc = int(time.mktime(tt) * 1000)
return sec_epoch_loc
def fromMsEpoch(ms):
s = ms / 1000.0
date = datetime.fromtimestamp(s)
return date
def costString(cost, usingPoints):
return "%s%s %s" % ("$" if not usingPoints else "", cost, "points" if usingPoints else "")
def diffCostString(diff, usingPoints):
return "%s%s %s" % ("$" if not usingPoints else "", diff, "points" if usingPoints else "")
def sendEmail(to, subject, message):
msg = MIMEText(message)
msg['To'] = email.utils.formataddr(('Recipient', to))
msg['From'] = email.utils.formataddr(('Dragon Fare Scanner', author))
msg['Subject'] = subject
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(author, password)
server.sendmail(author, to, msg.as_string())
server.close()
print 'successfully sent the mail'
except Exception as e:
print 'fail to send mail'
print(e)
|
# -*- coding: utf-8 -*-
"""
_version.py
~~~~~~~~~~~
Provides Viki version information.
:license: Apache2, see LICENSE for more details.
"""
__version__ = "0.0.1.dev2"
__all__ = ["__version__"]
|
from time import time
def isPrime(n):
if n%2 == 0 and not(n==2):
return False
for i in range(3,n,2):
if n%i == 0:
return False
return True
|
#!/usr/bin/python
import sys
oldWeekday = None
sum = 0
count = 0
def printline (key, value): print key, "\t", value
def printResult (weekday, sum, count):
if sum == 0 or count == 0:
printline(weekday, 0)
else:
printline(weekday, (sum/count))
for line in sys.stdin:
data = line.strip().split("\t")
thisWeekday, thisPrice = data
if oldWeekday == None:
oldWeekday = thisWeekday
if oldWeekday != thisWeekday:
printResult(oldWeekday, sum, count)
oldWeekday = thisWeekday
sum = 0
count = 0
sum += float(thisPrice)
count += 1
if oldWeekday != None:
printResult(oldWeekday, sum, count)
|
import os
import csv
path = os.path.join("budget_data.csv")
with open(path) as budget_data:
# using the csv.reader method to read the data
reader = csv.reader(budget_data)
# skipping the first row because it's not valuable for our analysis
header = next(reader)
# set a counter before the for loop, put it inside the for loop and let it add 1 at every iteration
Total_month = 0
for row_of_data in reader:
#print(row_of_data[2])
Total_month += 1
import pandas as pd
df=pd.read_csv("budget_data.csv")
df.head()
x=0
for i in df['Profit/Losses']:
x+=i
print(x)
df['PPl']= df['Profit/Losses'].shift(1)
df.head()
df['PC']=df['Profit/Losses']-df['PPl']
df.head()
Total_PC=df['PC'].sum()
Total_PC
AVG_change= Total_PC/(Total_month-1)
AVG_change
Greatest_increase_index= df['PC'].idxmax()
Greatest_increase_index
Greatest_increase= df['PC'][df['PC'].idxmax()]
Greatest_increase
Greatest_increase_date = df['Date'][25]
Greatest_increase_date
Greatest_decrease_index= df['PC'].idxmin()
Greatest_decrease_index
Greatest_decrease_date = df['Date'][44]
Greatest_decrease_date
Greatest_decrease= df['PC'][df['PC'].idxmin()]
Greatest_decrease
print ("Total Month is :",Total_month)
print(f"Greatest Increase in Profits: {Greatest_increase_date} (${str(Greatest_increase)})")
print(f"Greatest Decrease in Profits: {Greatest_decrease_date} (${str(Greatest_decrease)})")
|
# try except 完善
# 账户不能为负数
# 注册功能未完成
import Choice
import datetime
import cx_Oracle as ora
import time
# 注册账号
def register(curs, conn):
conn.commit()
# 登陆系统
def logon(curs, conn):
account_id = input("请输入账号ID:")
passwd = input("请输入账号密码:")
global db_passwd
logon_date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(logon_date)
check_pass = "select passwd from account where id = {}".format(account_id)
for item in curs.execute(check_pass):
db_passwd = item[0]
if passwd == db_passwd:
logon_status = 'Y'
print("Logon Successfully!")
choice(curs, conn, account_id)
else:
logon_status = 'N'
print("Logon Failed!")
logon(curs, conn)
logon_hist_insert = "insert into logon_hist values({},to_date('{}','yyyy-mm-dd hh24:mi:ss')," \
"'{}')".format(account_id, logon_date, logon_status)
curs.execute(logon_hist_insert)
return account_id
# 选择功能
def choice(curs, conn, account_id):
cho = input("""请输入你要干啥:
1. 存款
2. 取款
3. 转账
4. 查询""")
if cho == '1':
deposit(curs, account_id)
elif cho == '2':
withdraw(curs, account_id)
elif cho == '3':
transfer(curs, account_id)
elif cho == '4':
sql_money = "select money from account where id={}".format(account_id)
curs.execute(sql_money)
money = curs.fetchone()[0]
print("您账户的余额为 {} RMB".format(money))
elif cho == '0':
return True
else:
print("请重新登陆:")
logon(curs, conn)
# 存款系统
def deposit(curs, account_id):
amount = input("请输入存入的数额:")
try:
upd_money = "update account set money = money + {} where id={}".format(amount, account_id)
curs.execute(upd_money)
except:
print('***** 连接/查询 失败! *****')
# 取款系统
def withdraw(curs, account_id):
amount = input("请输入取出的数额:")
upd_money = "update account set money = money - {} where id={}".format(amount, account_id)
curs.execute(upd_money)
# 转账系统
def transfer(curs, account_id):
to_id = input("请输入您要转向的账户ID:")
amount = input("请输入转账的数额:")
from_money = "update account set money = money - {} where id={}".format(amount, account_id)
to_money = "update account set money = money + {} where id={}".format(amount, to_id)
try:
curs.execute(from_money)
curs.execute(to_money)
except ora.DatabaseError as exc:
# 给变量名为error的赋值异常码
print('***** 连接/查询 失败! *****')
error, = exc.args
time_now = time.strftime("%Y%m%d %H:%M:%S", time.localtime())
print("数据库错误代码: {}\n发生错误时间为: {}\n".format(error.message, time_now))
if error.code == 936:
print('请重新输入!')
transfer(curs, account_id)
# ORA-12899: value too large for column
def vbank(curs, conn):
reg = input("您是否已有账户:1.有 2.没有")
if reg == '2':
# 注册银行系统
register(curs, conn)
elif reg == '1':
# 登陆银行系统
account_id = logon(curs, conn)
print("登陆用户为", account_id)
incont = input("是否继续:(输入1继续,输入0返回上一菜单,其他输入退出)")
if incont == '1':
choice(curs, conn, account_id)
elif incont == '0':
Choice.choice(curs, conn)
else:
return True
conn.commit()
|
from celery import shared_task
from django.core.mail import send_mail
from django.contrib.auth import get_user_model
from django.core.mail import EmailMultiAlternatives
from django.template import loader
UserModel = get_user_model()
# 가입 메일 보내기
@shared_task
def signup_mail(subject, message, sender, receivers):
send_mail(subject, message, sender, receivers)
# password reset 이메일
@shared_task
def password_mail(subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name=None):
subject = loader.render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
body = loader.render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
if html_email_template_name is not None:
html_email = loader.render_to_string(html_email_template_name, context)
email_message.attach_alternative(html_email, 'text/html')
email_message.send()
|
#!/usr/local/bin/python3
# -*- conding: utf-8 -*-
from flask import Blueprint
department_api = Blueprint('department', __name__, url_prefix='/api/department')
from . import views
|
from appengine_django.models import BaseModel
from google.appengine.ext import db
# Create your models here.
|
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
import codecs
def load_data_file(data_file):
print('loading file ', data_file)
raw_data = []
if not isinstance(data_file, list):
data_file = [data_file]
for file_name in data_file:
with codecs.open(file_name, 'r', 'utf-8') as f:
for line in f:
line = line.strip()
if not line:
continue
sample = parse_line(line)
raw_data.append((sample, len(raw_data)))
return raw_data
def parse_line(line):
'''You should write specific paring code here'''
fields = line.split('\t')
pID = fields[0]
sourceID = fields[1]
targetID = fields[2]
rels = fields[3].split(',')
sent = fields[4]
return pID,sourceID,targetID,rels,sent
def load_confidence(conf_file):
raw_conf = []
with codecs.open(conf_file, 'r', 'utf-8') as f:
for line in f:
line = line.strip()
if not line:
continue
fields = line.split('\t')
prediction = fields[0]
conf = fields[1]
conf = float(conf)
raw_conf.append(((prediction,conf), len(raw_conf)))
return raw_conf
def combine(raw_data, raw_conf):
return [(pid,sid,tid,rlabels,sent,rpred,conf,id1) for ((pid,sid,tid,rlabels,sent), id1), ((rpred,conf), id2) in zip(raw_data,raw_conf)]
def group(combined_raw):
grouped_data = dict()
for pid,sid,tid,rlabels,sent,rpred,conf,id in combined_raw:
if (pid,sid,tid) not in grouped_data:
grouped_data[(pid,sid,tid)] = [(rlabels,sent,rpred,conf,id)]
else:
grouped_data[(pid,sid,tid)].append((rlabels,sent,rpred,conf,id))
return grouped_data
raw_data = load_data_file('/home/gyc/Data/held_out_tester/test.sent.txt')
raw_conf = load_confidence('/home/gyc/Data/held_out_tester/test.scores.txt')
y_true = [rlabels[0] for (pid,sid,tid,rlabels,sent), id1 in raw_data]
y_pred = [rpred for (rpred,conf), id2 in raw_conf]
# multi-class and multi-label evaluation
# return f1_score(y_true, y_pred, average='macro')
# return f1_score(y_true, y_pred, average='micro')
print f1_score(y_true, y_pred, average='weighted')
# print f1_score(y_true, y_pred, average=None)
print confusion_matrix(y_true, y_pred)
y_true_set = set([(sid,tid,rlabels[0]) for (pid,sid,tid,rlabels,sent), id1 in raw_data])
y_pred_conf = [((sid,tid,rpred),conf) for ((pid,sid,tid,rlabels,sent), id1), ((rpred,conf), id2) in zip(raw_data,raw_conf)]
y_pred_sorted = sorted(y_pred_conf, key=lambda d:d[1], reverse=True)
count_pred,count_correct = 0.0,0.0
count_total = len(y_true_set)
for y,p in y_pred_sorted:
sid,tid,r = y
count_pred += 1
if y in y_true_set:
count_correct += 1
precision = count_correct/count_pred
recall = count_correct/count_total
f1score = 2*precision*recall/(precision+recall)
print 'precision, recall, f1 :: ', precision,recall,f1score
combined_raw = combine(raw_data,raw_conf)
grouped_data = group(combined_raw)
y_true_dict = dict()
for (pid,sid, tid, rlabels, sent), id1 in raw_data:
if (pid,sid,tid) not in y_true_dict:
y_true_dict[(pid,sid,tid)] = set(rlabels)
else:
for r in rlabels:
y_true_dict[(pid,sid,tid)].add(r)
y_true_num = [len(rs) for (pid,sid,tid),rs in y_true_dict.items()]
num_relations = sum(y_true_num)
print num_relations
y_pred_dict = dict()
for (pid,sid,tid),menList in grouped_data.items():
for rlabels,sent,rpred,conf,id in menList:
if rpred!='NA':
if (pid,sid,tid) not in y_pred_dict:
y_pred_dict[(pid,sid,tid)] = set([rpred])
else:
y_pred_dict[(pid,sid, tid)].add(rpred)
y_pred_num = [len(rs) for (pid,sid,tid),rs in y_pred_dict.items()]
num_predictions = sum(y_pred_num)
print num_predictions
num_correct = 0.0
for (pid,sid,tid),rpreds in y_pred_dict.items():
if (pid,sid,tid) in y_true_dict:
rtrues = y_true_dict[(pid,sid,tid)]
rinters = rpreds.intersection(rtrues)
num_correct += len(rinters)
print num_correct
print 'precision, recall', num_correct/num_predictions, num_correct/num_relations
|
import flask
import json
import os
import logging
import shlex
import time
from datetime import datetime, timedelta
app = flask.Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'upload'
@app.route('/status/<user_name>')
def status(user_name):
d = datetime.today() - timedelta(days=2)
start_time = d.strftime('%Y-%m-%d')
partition = 'experiment'
result = os.popen('sacct -S {} -u {} -b -n -P -r {}'.format(start_time, user_name, partition)).read()
response = []
for row in result.split('\n')[:-1]:
job_id, state, _ = row.split('|')
response.append({'job_id': job_id, 'slurm_state': state})
response = app.response_class(
response=json.dumps(response),
status=200,
mimetype='application/json'
)
return response
@app.route('/download/<experiment_name>/<file_name>')
def download(experiment_name, file_name):
working_dir = os.path.join(app.config['UPLOAD_FOLDER'], experiment_name)
return flask.send_from_directory(working_dir, file_name)
@app.route('/submit/<experiment_name>', methods=['POST'])
def submit(experiment_name):
config = flask.request.get_json()
wafer_module = config['wafer_module']
command = config['command']
software_version = config.get('software_version', 'nmpm_software/current')
working_directory = os.path.join(app.config['UPLOAD_FOLDER'], experiment_name)
app.logger.info(json.dumps(config))
app.logger.info(working_directory)
cwd = os.getcwd()
os.chdir(working_directory)
output = ''
try:
command = shlex.split(command)
shell_command = ['sbatch', '-p', 'experiment', '--wmod', str(wafer_module), '../../wafer_wrap.sh', 'singularity', 'exec', '--app',
'visionary-wafer', '$CONTAINER_IMAGE_NMPM_SOFTWARE', 'python'] + command
start_time = time.time()
out = os.popen(' '.join(shell_command))
output = out.read()
slurm_jobid = int(output.split(' ')[3])
data = {'result': 'ok', 'job_id': slurm_jobid, 'start_time': start_time }
except:
data = {'result': 'failed', 'output': output}
response = app.response_class(
response=json.dumps(data),
status=200,
mimetype='application/json'
)
os.chdir(cwd)
return response
@app.route('/upload/<experiment_name>', methods=['POST'])
def upload(experiment_name):
wafer_config = flask.request.files['wafer_config']
wafer_script = flask.request.files['wafer_script']
os.mkdir(os.path.join('upload', experiment_name))
wafer_config.save(os.path.join(app.config['UPLOAD_FOLDER'], experiment_name, wafer_config.filename))
wafer_script.save(os.path.join(app.config['UPLOAD_FOLDER'], experiment_name, wafer_script.filename))
data = {'result': 'ok'}
response = app.response_class(
response=json.dumps(data),
status=200,
mimetype='application/json'
)
return response
if __name__ == "__main__":
app.run()
|
import math
from function import FunctionManager
from regression import minimise_loss, find_classification
from lossfunction import squared_error
from plotting import plot_ideal_functions, plot_points_with_their_ideal_function
from utils import write_deviation_results_to_sqlite
# This constant is the factor for the criterion. It is specific to the assignment
ACCEPTED_FACTOR = math.sqrt(2)
if __name__ == '__main__':
# Provide paths for csv files
ideal_path = "data/ideal.csv"
train_path = "data/train.csv"
# The FunctionManager accepts a path to a csv and parses Function objects from the data.
# A Function stores X and Y points of a function. It uses Pandas to do this efficiently.
candidate_ideal_function_manager = FunctionManager(path_of_csv=ideal_path)
train_function_manager = FunctionManager(path_of_csv=train_path)
# A FunctionManager uses the .to_sql function from Pandas
# The suffix is added to comply to the requirement of the structure of the table
train_function_manager.to_sql(file_name="training", suffix=" (training func)")
candidate_ideal_function_manager.to_sql(file_name="ideal", suffix=" (ideal func)")
# As Recap:
# Within train_function_manager 4 functions are stored.
# Withing ideal_function_manager 50 functions are stored.
# In the next step we can use this data to compute an IdealFunction.
# An IdealFunction amongst others stores best fitting function, the train data and is able to compute the tolerance.
# All we now need to do is iterate over all train_functions
# Matching ideal functions are stored in a list.
ideal_functions = []
for train_function in train_function_manager:
# minimise_loss is able to compute the best fitting function given the train function
ideal_function = minimise_loss(training_function=train_function,
list_of_candidate_functions=candidate_ideal_function_manager.functions,
loss_function=squared_error)
ideal_function.tolerance_factor = ACCEPTED_FACTOR
ideal_functions.append(ideal_function)
# We can use the classification to do some plotting
plot_ideal_functions(ideal_functions, "train_and_ideal")
# Now it is time to look at all points within the test data
# The FunctionManager provides all the necessary to load a CSV, so it will be reused.
# Instead of multiple Functions like before, it will now contain a single "Function" at location [0]
# The benefit is that we can iterate over each point with the Function object
test_path = "data/test.csv"
test_function_manager = FunctionManager(path_of_csv=test_path)
test_function = test_function_manager.functions[0]
points_with_ideal_function = []
for point in test_function:
ideal_function, delta_y = find_classification(point=point, ideal_functions=ideal_functions)
result = {"point": point, "classification": ideal_function, "delta_y": delta_y}
points_with_ideal_function.append(result)
# Recap: within points_with_ideal_functions a list of dictionaries is stored.
# These dictionaries represent the classification result of each point.
# We can plot all the points with the corresponding classification function
plot_points_with_their_ideal_function(points_with_ideal_function, "point_and_ideal")
# Finally the dict object is used to write it to a sqlite
# In this method a pure SQLAlchamy approach has been choosen with a MetaData object to save myself from SQL-Language
write_deviation_results_to_sqlite(points_with_ideal_function)
print("following files created:")
print("training.db: All training functions as sqlite database")
print("ideal.db: All ideal functions as sqlite database")
print("mapping.db: Result of point test in which the ideal function and its delta is computed")
print("train_and_ideal.html: View the train data as scatter and the best fitting ideal function as curve")
print("points_and_ideal.html: View for those point with a matching ideal function the distance between them in a figure")
print("Author: Maurice ten Koppel")
print("Date: 01. September 2020")
print("Script completed successfully")
|
# coding: utf-8
# In[1]:
# Import pandas package
import pandas as pd
# Define a dictionary containing employee data
data = {'Name':['Jai', 'Princi', 'Gaurav', 'Anuj'],
'Age':[27, 24, 22, 32],
'Address':['Delhi', 'Kanpur', 'Allahabad', 'Kannauj'],
'Qualification':['Msc', 'MA', 'MCA', 'Phd']}
# Convert the dictionary into DataFrame
df = pd.DataFrame(data)
# select two columns
df[['Name', 'Qualification']]
print(df)
|
__author__ = 'sjaku'
import requests
from bs4 import BeautifulSoup, Tag
import csv
import pyodbc
import urllib
import os
url = "https://www.otodom.pl/oferta/dom-w-poznaniu-cena-tylko-539-000zl-ID3qyMk.html"
r = requests.get(url)
soup = BeautifulSoup(r.content, "html.parser")
#for dom in soup.find_all('section', class_='section-offer-title'):
title = soup.find("h1", {"itemprop": "name"}).text
print title
price = soup.find("strong", class_='box-price-value no-estimates').text
print price
phone = soup.find("span", class_="phone-number").text
#print phone
pelna_cena = soup.find("li", class_="param_price").text
print pelna_cena
powierzchnia = soup.find("li", class_="param_m").text
p = powierzchnia.split(" ")[1]
print powierzchnia
powierzchnia_dzialki = soup.find("li", class_="param_terrain_area").text
print powierzchnia_dzialki
liczba_pieter = soup.find("li", class_="param_floors_num").text
print liczba_pieter
main_list = soup.find("ul", class_="main-list")
#print main_list
|
from flask import Flask
class Decorator_Class:
def __init__(self):
self.metric = "metric"
def decorator_p(self, value):
print(f"Value received from declaring usage of decorator {value}")
def decorator_c(func):
print("First entry")
def inner(*args, **kwargs):
print("Second entry and function return")
return func(*args, **kwargs)
return inner
return decorator_c
def decorator_x(self, func):
print("One less level decorator trying")
def inner_x(*args, **kwargs):
print("Function return")
return func(*args, **kwargs)
return inner_x
d = Decorator_Class()
# @d.decorator_p("rrr111")
# def mock(str):
# print(str)
#mock("jjjj")
if __name__ == '__main__':
app = Flask(__name__)
#monitor(app)
@app.route('/with_passing_value')
@d.decorator_p("calling api")
def index():
return "one more level wrapped"
@app.route('/without_passing_value')
@d.decorator_x
def index():
return "one less level wrapped"
# Run the application!
app.run()
|
# 1. In the code provided, there are three mistake which stop the code to get run successfully; find those mistakes and explain why they need to be corrected to be able to get the code run
# 2. Add embedding layer to the model, did you experience any improvement?
# Task 1
# importing the required libraries
from keras.models import Sequential
from keras import layers
from keras.preprocessing.text import Tokenizer
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
# reading the data file
df = pd.read_csv('imdb_master.csv', encoding='latin-1')
print(df.head())
# extracting the features and target
sentences = df['review'].values
y = df['label'].values
# tokenizing the data
tokenizer = Tokenizer(num_words=2000)
tokenizer.fit_on_texts(sentences)
# getting the vocabulary of data
sentences = tokenizer.texts_to_matrix(sentences)
# label encoding the target and splitting the data
le = preprocessing.LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(sentences, y, test_size=0.25, random_state=1000)
input_dim = np.prod(X_train.shape[1:])
print(input_dim)
# implementing the model
model = Sequential()
model.add(layers.Dense(300, input_dim=input_dim, activation='relu'))
model.add(layers.Dense(3, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam',metrics=['acc'])
history = model.fit(X_train, y_train, epochs=5, verbose=True, validation_data=(X_test, y_test), batch_size=256)
# evaluating the model
[test_loss, test_acc] = model.evaluate(X_test, y_test)
print("Evaluation result on Test Data : Loss = {}, accuracy = {}".format(test_loss, test_acc))
# loss and accuracy plot
import matplotlib.pyplot as mplt
# summarize history for accuracy
mplt.plot(history.history['acc'])
mplt.plot(history.history['val_acc'])
mplt.title('model accuracy')
mplt.ylabel('accuracy')
mplt.xlabel('epoch')
mplt.legend(['accuracy', 'val_accuracy'], loc='upper left')
mplt.show()
# summarize history for loss
mplt.plot(history.history['loss'])
mplt.plot(history.history['val_loss'])
mplt.title('model loss')
mplt.ylabel('loss')
mplt.xlabel('epoch')
mplt.legend(['loss', 'val_loss'], loc='upper right')
mplt.show()
# Task 2
# Adding embedding layer to the model
# embedding Layer pre-processing
from keras.preprocessing.sequence import pad_sequences
pureSentences = df['review'].values
max_review_len = max([len(s.split()) for s in pureSentences])
vocab_size = len(tokenizer.word_index)+1
sentencesPre = tokenizer.texts_to_sequences(pureSentences)
padded_docs = pad_sequences(sentencesPre, maxlen=max_review_len)
X_train, X_test, y_train, y_test = train_test_split(padded_docs, y, test_size=0.25, random_state=1000)
print(vocab_size)
print(max_review_len)
# implementing the model by adding the embedding layer to the model
from keras.layers import Embedding, Flatten
m = Sequential()
m.add(Embedding(vocab_size, 50, input_length=max_review_len))
m.add(Flatten())
m.add(layers.Dense(300, activation='relu',input_dim=max_review_len))
m.add(layers.Dense(3, activation='softmax'))
m.compile(loss='sparse_categorical_crossentropy', optimizer='adam',metrics=['acc'])
history2 = m.fit(X_train, y_train, epochs=5, verbose=True, validation_data=(X_test, y_test), batch_size=256)
# evaluating the model
[test_loss1, test_acc1] = m.evaluate(X_test, y_test)
print("Evaluation result on Test Data : Loss = {}, accuracy = {}".format(test_loss1, test_acc1))
# loss and accuracy plot after adding the embedding layer
# history summarization for accuracy plot
mplt.plot(history2.history['acc'])
mplt.plot(history2.history['val_acc'])
mplt.title('model accuracy')
mplt.ylabel('accuracy')
mplt.xlabel('epoch')
mplt.legend(['accuracy', 'val_accuracy'], loc='upper left')
mplt.show()
# history summarization for loss plot
mplt.plot(history2.history['loss'])
mplt.plot(history2.history['val_loss'])
mplt.title('model loss')
mplt.ylabel('loss')
mplt.xlabel('epoch')
mplt.legend(['loss', 'val_loss'], loc='upper right')
mplt.show()
predt = m.predict_classes(X_test[[2],:])
print("Actual Prediction", y_test[1], "Predicted Prediction", predt)
|
##############################################################################
#
# Copyright (C) 2020-2030 Thorium Corp FP <help@thoriumcorp.website>
#
##############################################################################
from .product_template import *
from .lab_product import *
from .thoriumcorp_lab import *
|
from random import random
N = 30
# r = [random() for i in range(N)]
r = [0.32, 0.01, 0.23, 0.28, 0.89, 0.31, 0.64, 0.28, 0.83, 0.93, 0.99, 0.15, 0.33, 0.35, 0.91, 0.41, 0.6, 0.27, 0.75, 0.88, 0.68, 0.49, 0.05, 0.43, 0.95, 0.58, 0.19, 0.36, 0.69, 0.87]
i, m = 3, 5
# i + (M+1)m <= N
M = (N - i) // m - 1
# seq = [(r[i + k*m - 1], r[i + (k+1)*m - 1]) for k in range(M + 1)]
# print(seq)
rho = sum([r[i + k*m - 1] * r[i + (k+1)*m - 1] for k in range(M + 1)]) / (M + 1) - 0.25
sigma = ((13 * M + 7) ** 0.5) / (12 * (M + 1))
z_alpha_by_two = 1.96
z = rho / sigma
# print(rho, sigma)
print(z, z_alpha_by_two)
if z >= -z_alpha_by_two or z <= z_alpha_by_two:
print('Failed to reject Autocorrelation Test.')
else:
print('Rejected Autocorrelation Test!')
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
# @File:utils.py
# @Author: Michael.liu
# @Date:2020/4/23 14:06
# @Desc: 工具类
import numpy as np
from .Relation import *
from .Sentence import *
def cut_sentences(content):
# 结束符号,包含中文和英文的
end_flag = ['?', '!', '.', '?', '!', '。', '…']
content_len = len(content)
sentences = []
tmp_char = ''
for idx, char in enumerate(content):
# 拼接字符
tmp_char += char
# 判断是否已经到了最后一位
if (idx + 1) == content_len:
sentences.append(tmp_char)
break
# 判断此字符是否为结束符号
if char in end_flag:
# 再判断下一个字符是否为结束符号,如果不是结束符号,则切分句子
next_idx = idx + 1
if not content[next_idx] in end_flag:
sentences.append(tmp_char)
tmp_char = ''
return sentences
class DataUtil:
def __init__(self,squence_length):
self.wordvector_dim = 0
self.sequence_length =squence_length
self.word2index = {}
self.index2vector =[]
self.relations ={}
self.bags_train ={}
self.training_data =[]
self.bags_test = {}
self.load_word2vec()
self.load_relations()
def load_word2vec(self):
wordvector =list(open("../data/vector1.txt").readlines())
wordvector =[s.split() for s in wordvector]
self.wordvector_dim = len(wordvector[0]) -1
self.word2index["UNK"] = 0
self.index2vector.append(np.zeros(self.wordvector_dim))
index = 1
for vec in wordvector :
item =np.zero(self.wordvector_dim)
for i in range(self.wordvector_dim):
item[i] =float(vec[i+1])
self.word2index[vec[0]] = index
self.index2vector.append(item)
index +=1
print("WordTotal=\t",len(self.index2vector))
print("Word dimension=\t",self.wordvector_dim)
def load_relation(self):
relation_data = list(open("../data/RE/relation2id.txt").readlines())
relation_data = [s.split() for s in relation_data]
for relation in relation_data:
r = Relation(relation[0],int(relation[1]))
self.relations[relation[0]] = r
for r in self.relations:
self.relations[r].generate_vector(len(self.relations))
print("RelationTotal" + str(len(self.relations)))
def load_training_data(self,fileName):
f = open("","a")#open training data
print("Start loading training data")
print(">>>>>>>>>>>>>>>")
training_data =list(open(fileName).readlines())
training_data = [s.split() for s in training_data]
for data in training_data:
entity1 = data[2]
entity2 = data[3]
if data[4] not in self.relations:
relation = self.relations["NA"]
else:
relation = self.relations[data[4]]
s = Sententce(entity1,entity2,relation,data[5:-1])
self.training_data.append(s)
return self.training_data
def load_testing_data(self):
print("Start loading testing data")
print("==================")
testing_data = list(open("").readlines())
testing_data = [s.split() for s in testing_data]
for data in testing_data:
entity1 = data[2]
entity2 = data[3]
if data[4] not in self.relations:
relation = self.relations["NA"]
else:
relation = self.relations[data[4]]
s = Sententce(entity1,
entity2,
relation,
data[5:-1])
if data[0] + "\t" + data[1] not in self.bags_test:
self.bags_test[entity1 + " " + entity2] = [s]
else:
self.bags_test[entity1 + " " + entity2].append(s)
return self.bags_test
def relation_analyze(self):
for r in self.relations:
print(r + ""+ str(self.relations[r].number))
def batch_iter(self,data,batch_size,num_epochs,shuffle=True):
data = np.asarray(data)
data_size = len(data)
num_batches_per_epoch = int(len(data)/batch_size) + 1
for epoch in range(num_epochs):
#Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
if start_index == end_index:
continue
else:
yield shuffled_data[start_index:end_index]
def generate_x(self, data):
x = []
for d in data:
v = []
words = d.words
e1 = d.entity1
e2 = d.entity2
for i, w in enumerate(words):
if w not in self.word2index:
tmp = self.index2vector[0]
else:
tmp = self.index2vector[self.word2index[w]]
v.append(tmp)
vectors = self.padding(v)
x.append(vectors)
return x
def generate_y(self, data):
return [d.relation.vector for d in data]
def generate_p(self, data):
p1 = []
p2 = []
for d in data:
p11 = []
p22 = []
e1 = d.entity1
e2 = d.entity2
words = d.words
l1 = 0
l2 = 0
for i, w in enumerate(words):
if w == e1:
l1 = i
if w == e2:
l2 = i
for i, w in enumerate(words):
a = i - l1
b = i - l2
if a > 30:
a = 30
if b > 30:
b = 30
if a < -30:
a = -30
if b < -30:
b = -30
p11.append(a + 31)
p22.append(b + 31)
a = self.sequence_length - len(p11)
if a > 0:
front = a / 2
back = a - front
front_vec = [0 for i in range(front)]
back_vec = [0 for i in range(back)]
p11 = front_vec + p11 + back_vec
p22 = front_vec + p22 + back_vec
else:
p11 = p11[:self.sequence_length]
p22 = p22[:self.sequence_length]
p1.append(p11)
p2.append(p22)
return p1, p2
def padding(self, vectors):
a = self.sequence_length - len(vectors)
if a > 0:
front = a / 2
back = a - front
front_vec = [np.zeros(self.wordvector_dim) for i in range(front)]
back_vec = [np.zeros(self.wordvector_dim) for i in range(back)]
vectors = front_vec + vectors + back_vec
else:
vectors = vectors[:self.sequence_length]
return vectors
def word2num(self, words):
return [self.words2index[w] for w in words]
|
import logging
import sys
import io
import zmq
from PIL import Image
from flask import Flask
from flask import render_template
from flask import jsonify
from flask import request
from os import environ
from zmq import ssh
# initialize Flask web server
app = Flask(__name__)
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.INFO)
app.config.from_object('config')
# initialize zmq message queue
context = zmq.Context()
port = "6666"
ssh_password = environ.get("SSH_PASSWORD")
app.logger.info("Connecting to titan.elka.pw.edu.pl...")
socket = context.socket(zmq.REQ)
ssh.tunnel_connection(socket, "tcp://localhost:6666", "msucheck@mion.elka.pw.edu.pl", password=ssh_password)
app.logger.info("Connected!")
@app.route('/')
def homepage():
return render_template('index.html')
@app.route('/rate-photo', methods=["PUT"])
def rate_photo():
app.logger.info('Received image: {} bytes, {}.'.format(request.content_length, request.content_type))
app.logger.info('Reading data...')
stream = io.BytesIO(request.data)
image = Image.open(stream)
app.logger.info('Done. Size of the image is {}x{} pixels.'.format(image.size[0], image.size[1]))
app.logger.info('Sending the image to titan.elka.pw.edu.pl...')
socket.send(request.data)
response = socket.recv_json()
app.logger.info('Done. Photo got score of {}%.'.format(response['score']))
return jsonify(response)
if __name__ == '__main__':
app.run(debug=True, use_reloader=True)
|
#!/bin/python3
nap = ""
v = 0
def new_line():
print("\n")
print("A hét napjai: ")
while v != 7:
v += 1
new_line()
if v == 1:
nap = "Hétfő"
elif v == 2:
nap = "Kedd"
elif v == 3:
nap = "Szerda"
elif v == 4:
nap = "Csütörtök"
elif v == 5:
nap = "Péntek"
elif v == 6:
nap = "Szombat"
elif v == 7:
nap = "Vasárnap"
print(v, ". ", nap, sep="")
|
''' setup file for mqttgateway '''
from setuptools import setup#, find_packages
from mqttgateway import __version__
# Get the long description from the README file
with open('README.rst') as f:
long_description = f.read()
setup(
name='mqttgateway',
version=__version__,
description='Framework for MQTT Gateways.',
long_description=long_description,
long_description_content_type='text/x-rst',
url='http://mqttgateway.readthedocs.io/en/latest/',
author='Pier Paolo Taddonio',
author_email='paolo.taddonio@empiluma.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Embedded Systems',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
keywords='mqtt gateway',
packages=['mqttgateway'],#find_packages(),
install_requires=['paho-mqtt >= 1.4.0'],
package_data={'': ['*.conf', '*.json']},
entry_points={'console_scripts': ['dummy2mqtt = mqttgateway.dummy_start:main']}
)
|
# -*- coding: utf-8 -*-
#while循环方法
sum1=0
n=99
while(n>0):
sum1 = sum1 + n
n=n-2
print('奇数的和是:%d' % sum1)
sum2=0
i=100
while(i>0):
sum2 = sum2 + i
i=i-2
print('奇数的和是:%d' % sum2)
|
#coding : utf-8
#Ewan GRIGNOUX LEVERT
#Avril 2020
from PIL import Image, ImageTk
import csv
def ChargementImage(ListeNom):
ListeImages = {}
for nom in ListeNom:
img = Image.open(f"{nom}.png")
ListeImages[nom] = ImageTk.PhotoImage(img)
return ListeImages
def lireFichierCSV(nomFichier):
tableau =[]
with open (nomFichier, newline = '', encoding = 'utf-8') as fichier:
lecteur = csv.DictReader(fichier, delimiter=',')
for element in lecteur:
tableau.append(element)
return tableau
|
import comm
import gevent
from itm import UCFunctionality
class ITMKatzSFE(UCFunctionality):
def __init__(self, sid, pid, channels, handlers):
self.sid = sid; self.pid = pid
self.ssid = self.sid[0]
self.Rnd = self.sid[1]
self.parties = self.sid[2]
self.x = dict( (p,None) for p in self.parties )
self.y = dict( (p,None) for p in self.parties )
self.t = dict( (p,len(self.parties)) for p in self.parties )
self.l = 1
self.crupted = set()
UCFunctionality.__init__(self, sid, pid, channels, handlers)
def function(self):
raise Exception("ITMSyncFunctinality.function must be defined in the deriving class!")
def outputs_set(self):
for i in self.y.values():
if i is None: return False
return True
def are_all_honest_0(self):
for i in self.parties:
if i not in self.crupted and self.t[i] != 0: return False
return True
def input_input(self, pid, v):
# TODO can input be set more than once??
self.x[pid] = v
self.f2a.write( ('input', pid, v) )
def input_output(self, pid):
#if pid not in self.crupted and self.x[pid] is None:
if comm.ishonest(self.sid, pid) and self.x[pid] is None:
dump.dump(); return
if self.t[pid] > 0:
self.t[pid] = self.t[pid]-1
if self.are_all_honest_0() and self.l < self.Rnd:
self.l += 1
for i in self.t: self.t[i] = len(self.parties)
self.f2a.write( ('activated',pid) )
elif self.t[pid] == 0 and self.l < self.Rnd:
self.f2p.write( (pid, ('early',)) )
else:
if self.x[1] is not None and not self.outputs_set():
self.y = self.function()
self.f2p.write( (pid, self.y[pid]) )
|
# coding=utf8
import joblib
import re
class SubJobProcess(joblib.JobProcess):
info_from = '卓博人才网'
def __init__(self, queue):
setting = {
'corplist_url': 'http://www.jobcn.com/search/result_servlet.ujson',
'corp_url': 'http://www.jobcn.com/position/company.xhtml?comId={0.code}',
'corplist_post_data': {
'p.keyword': '',
'p.keyword2': '',
'p.keywordType': '2',
'p.pageNo': '1',
'p.pageSize': '20',
'p.sortBy': 'postdate',
'p.statistics': 'false',
},
'corplist_reg': None,
'corp_regs': [
re.compile(r'<dl><dt>联系人:</dt><dd>(?P<contact_person>[^<]+)', re.S),
re.compile(r'<dl><dt>联?系?电话:</dt><dd>(?P<contact_phone>[^<]+)', re.S),
re.compile(r'<dl><dt>主页:</dt><dd><a rel="external nofollow" href="(?P<website>[^"]+)', re.S),
re.compile(r'<dl><dt>地址:</dt><dd>(?P<address>[^<]+)', re.S),
],
'pages': 30,
'encoding': 'gbk',
}
super().__init__(queue, setting)
def get_corplist_urls(self):
for job_location in self.get_setting('job_locations'):
for page in range(1, self.get_setting('pages')+1):
yield self.get_setting('corplist_url').format(job_location['code'], page)
def get_corplist_urls(self):
for page in range(1, self.get_setting('pages')+1):
self.get_setting('corplist_post_data')['p.pageNo'] = page
yield self.get_setting('corplist_url')
def fetch_corplist(self, page_url):
json = self.retrieve_json(page_url, data=self.get_setting('corplist_post_data'))
corp_list = json['rows']
return ({
'name': corp_info['comName'],
'code': str(corp_info['comId']),
#'insert_date': corp_info['postDate'],
} for corp_info in corp_list)
|
from collections import deque
import numpy as np
import os
from PIL import Image
import pickle
import datetime
###################################################################################
class ListUtils(object):
@staticmethod
def deque_to_ndarray(deque):
deque_length = len(deque)
result = np.ndarray(shape=(deque_length, 1))
for i in range(0, deque_length):
result[i] = deque.pop()
return result
@staticmethod
def deque_to_numpy_array(deque):
result = np.array(deque)
return result
@staticmethod
def display_deque(deque):
for i in deque:
print(i)
###################################################################################
class Utils():
@staticmethod
def get_path(data=''):
current_directory = os.getcwd()
data_directory = current_directory + '/Resources/DataSets/' + data
return data_directory
@staticmethod
def get_parent_directory():
current_directory = os.getcwd()
parent_directory = os.path.abspath(os.path.join(current_directory, os.pardir))
return parent_directory
@staticmethod
def load_image(infilename):
"""Grap the image and convert it to the needed structure."""
data = Image.open(infilename)
image = np.asarray(data, dtype="uint8")
# Reformat the image so that using a for loop
# would go over col, x, y.
image = image.ravel('F').tolist()
return image
@staticmethod
def unpickle_data(filename):
with open(filename, 'rb') as f:
data = pickle.load(f)
return data
@staticmethod
def create_directory(directory):
"""Creates directory if it is not already there."""
if not os.path.exists(directory):
os.makedirs(directory)
@staticmethod
def check_directory(directory):
"""Raises an exception if directory is not there. This might
be helpful if it is the input directory for data and
necessary for the following executions."""
message = 'Necessary directory was not found. Please check your ' \
'path.'
if not os.path.exists(directory):
raise SystemExit(message)
@staticmethod
def get_current_date():
"""Returns a string which holds a manipulated from of the
current date and time."""
now = datetime.datetime.now()
# Prettify the string
now = str(now).replace(' ', '-')
now = now[:-7]
return now
# @staticmethod
# def display_image(path_to_image):
|
import os
# MANDATORY. Set this to be the Project Name.
# e.g. "RTP2021", "TIP2021", etc
PROJECT = "STIP2022"
# MANDATORY. Set this to be the Scenario Name
# Pass this as --scenario to build_network_mtc.py
assert(SCENARIO in ["NoProject","Project"])
# MANDATORY. Set this to be the git tag for checking out network projects.
TAG = "HEAD"
# MANDATORY. Set this to the directory in which to write your outputs.
# "hwy" and "trn" subdirectories will be created here.
OUT_DIR = PROJECT + "_" + SCENARIO + "_network_{}" # YEAR
# MANDATORY. Should be a dictionary with keys "hwy", "muni", "rail", "bus"
# to a list of projects. A project can either be a simple string, or it can be
# a dictionary with with keys 'name', 'tag' (optional), and 'kwargs' (optional)
# to specify a special tag or special keyword args for the projects apply() call.
# For example:
# {'name':"Muni_TEP", 'kwargs':{'servicePlan':"'2012oct'"}}
###########################################################
COMMITTED_PROJECTS = collections.OrderedDict([
(2015, {
'hwy':['PROJ_attributes', # adds PROJ attributes to NODE and LINK
{'name':'Bridge_Toll_Updates_2_2pct', 'kwargs':{'MODELYEAR':'2015'}}],
'trn':[]
}),
(2020, {
'hwy':[{'name':'Bridge_Toll_Updates_2_2pct', 'kwargs':{'MODELYEAR':'2020'}},
{'name':'EXP_237B', 'kwargs':{'FUTURE':"PBA50"}}, # todo: update this to support PBA50
'EXP_580C',
'EXP_680D',
'EXP_880A',
'HOV_680F',
'SCL130001_237_101_MAT_Int_Mod',
'REG090003_SCLARA_FIP',
'ALA130005_Dougherty_road_widening',
'ALA130006_Dublin_Blvd_widening',
'ALA130014_7th_St_road_diet',
'ALA130026_Shattuck_Complete_Streets',
'ALA170049_Central_AVE_Safety_Improvements',
'ALA150004_EastBay_BRT',
'CC_130001_BaileyRd_SR4',
'CC_130046_I680_SR4_Int_Rec',
'CC_070035_I80_SPDamRd_Int_Phase1',
'CC_070011_Brentwood_Blvd_Widening',
'CC_070075_Kirker_Pass_Truck_Lane',
'CC_090019_Bollinger_Canyon_Widening',
'CC_130006_Concord_BART_road_diets',
'CC_170001_SanRamonValleyBlvd_Lane_Addition',
'MRN150009_San_Rafael_Bridge_Improvements',
'SF_070027_Yerba_Buena_Ramp_Imp',
'SF_070005_VanNess_BRT',
'SF_130011_2ndSt_Road_Diet',
'SF_Market_Street_Closure',
'SM_110047_SR92_ElCam_Ramp_Mod',
'SOL110005_Jepson_Van_to_Com',
'FBP_SL_042_Jepson_2A',
'SON070004_101_MarinSonNarrows_Phase1',
'ALA050014_SR84_Widening',
'ALA170011_BayBridge_HOV_Connectors',
'ALA150047_TelegraphAve_Complete_Streets',
'SM_110047_SR92_ElCam_Ramp_Mod',
'SCL190002_280_Foothill_improvement',
'SCL190006_101SB_offramp_improvement',
'I80_AdaptiveRampMetering',
'VAR170021_Freeway_Performance_I880',
'SonomaCounty_Transit_NoBuild2050',
'SF_MuniForward_Committed',
'FBP_MU_029_Broadway_Transit_Only_Lanes',
'EXP_Blueprint_NoProject',
'FBP_AL_067_Rte84Wide',
'FBP_AL_065_Bancroft_Bus_Only',
'FBP_SM_032_US101_Willow_Interchange'],
'trn':['ALA050015_BART_to_WarmSprings',
'ACGo',
'CC_050025_EBart_to_Antioch',
'GGTransit_Committed',
'SCL110005_BART_to_Berryessa',
'SF_010015_Transbay_Terminal',
'SF_010037_Muni_Central_Subway',
'SF_070027_Yerba_Buena_Ramp_Imp',
'SOL030002_FairfieldVacaville_Stn',
'SON090002_SMART',
'SON090002_SMART_to_Larkspur',
'CC_070062_Richmond_Ferry',
'SF_MuniForward_Committed',
'VTA_Next',
'SCL130001_237_101_MAT_Int_Mod',
'SonomaCounty_Transit_NoBuild2050',
'SMART_Novato',
'Xfare_update_2020',
'ACTransit_Committed',
'ferry_update_2019',
'Napa_Solano_Updates_2020',
'FBP_Beale_Transit_Only_Lane',
'SamTrans_ECR_Rapid',
'ALA150004_EastBay_BRT',
{'name':'FBP_SL_026_SolExpressBus', 'kwargs':{'MODELYEAR':'2020'}}],
}),
(2025, {
'hwy':[{'name':'Bridge_Toll_Updates_2_2pct', 'kwargs':{'MODELYEAR':'2025'}},
'EXP_CC_050028_I680_SB_HOV_Completion',
'EXP_101B1',
'EXP_101B2',
'EXP_680C1',
'EXP_680F',
'EXP_85D',
'EXP_101C',
'ALA150001_I680_SR84_Int_Wid',
'ALA150043_Claremont_road_diet',
'CC_070009_Slatten_Ranch_Rd_Extension',
'SF_070004_Geary_BRT_Phase1',
'SON070004_101_MarinSonNarrows_Phase2',
'SOL110006_Jepson_1B_1C',
'SCL190008_US101_DLC_Int_Imp',
'I880_US101_AdaptiveRampMetering',
'SOL070020_I80_I680_SR12_Int_1_2A',
'FBP_NP_036_SR29_Imola_PNR',
'ALA170052_Fruitvale_Ave_ped_improvements',
'EXP_Blueprint_NoProject',
{'name': 'RRSP_Alameda_Point_Transit_Improvements', 'kwargs':{'BUILT':"'built'"}}],
'trn':['SF_010028_Caltrain_Modernization',
'SON090002_SMART_to_Windsor',
'REG090037_New_BART_Trains',
'FBP_NP_036_SR29_Imola_PNR',
'SOL070020_I80_I680_SR12_Int_1_2A',
{'name': 'RRSP_Alameda_Point_Transit_Improvements', 'kwargs':{'BUILT':"'built'"}}]
}),
(2030, {
'hwy':[{'name':'Bridge_Toll_Updates_2_2pct', 'kwargs':{'MODELYEAR':'2030'}},
'EXP_Blueprint_NoProject'],
'trn':['BART_NoProject']
}),
(2035, {
'hwy':[{'name':'Bridge_Toll_Updates_2_2pct', 'kwargs':{'MODELYEAR':'2035'}},
'EXP_Blueprint_NoProject'],
'trn':[]
}),
(2040, {
'hwy':[{'name':'Bridge_Toll_Updates_2_2pct', 'kwargs':{'MODELYEAR':'2040'}},
'EXP_Blueprint_NoProject'],
'trn':[]
}),
(2045, {
'hwy':[{'name':'Bridge_Toll_Updates_2_2pct', 'kwargs':{'MODELYEAR':'2045'}},
'EXP_Blueprint_NoProject'],
'trn':[]
}),
(2050, {
'hwy':[{'name':'Bridge_Toll_Updates_2_2pct', 'kwargs':{'MODELYEAR':'2050'}},
'EXP_Blueprint_NoProject'],
'trn':[]
})
])
###########################################################
# STIP projects
STIP_PROJECTS = collections.OrderedDict([
(2015, {
'hwy':[],
'trn':[]
}),
(2020, {
'hwy':[],
'trn':[],
}),
(2025, {
'hwy':['STIP2022_SCL110001_US101SV_EXP_P5',
'FBP_CC_050_SR4_Operation_Improvements_EB'],
'trn':[]
}),
(2030, {
'hwy':['STIP2022_SM190009_US101SM_EXP',
'STIP2022_CC170017_I680NB_EXP_P1',
'FBP_AL_045_Oak_Ala_Access_Pr'],
'trn':['MAJ_BRT030001_BART_to_SanJose',
'FBP_AL_045_Oak_Ala_Access_Pr']
}),
(2035, {
'hwy':[],
'trn':[]
}),
(2040, {
'hwy':['FBP_CC_051_SR4_Operation_Improvements_WB'],
'trn':[]
}),
(2045, {
'hwy':[],
'trn':[]
}),
(2050, {
'hwy':['STIP_ITS_SM',
'STIP2022_CC170017_I680_ITS_TOS'],
'trn':[]
})
])
###########################################################
# Put them together for NETWORK_PROJECTS
NETWORK_PROJECTS = collections.OrderedDict()
for YEAR in COMMITTED_PROJECTS.keys():
if SCENARIO == "NoProject":
# baseline: just committed
NETWORK_PROJECTS[YEAR] = {
'hwy':COMMITTED_PROJECTS[YEAR]['hwy'],
'trn':COMMITTED_PROJECTS[YEAR]['trn']
}
else:
# stip
NETWORK_PROJECTS[YEAR] = {
'hwy':COMMITTED_PROJECTS[YEAR]['hwy'] + STIP_PROJECTS[YEAR]['hwy'],
'trn':COMMITTED_PROJECTS[YEAR]['trn'] + STIP_PROJECTS[YEAR]['trn']
}
# handle net_remove, nets keywords
for netmode in ['hwy','trn']:
# iterate backwards via index to delete cleanly
for project_idx in range(len(NETWORK_PROJECTS[YEAR][netmode])-1,-1,-1):
project = NETWORK_PROJECTS[YEAR][netmode][project_idx]
# special handling requires project to be specified as dictionary
if not isinstance(project, dict): continue
# variants_exclude: specifies list of network variants for which this project should be *excluded*
if 'variants_exclude' in project.keys() and NET_VARIANT in project['variants_exclude']:
Wrangler.WranglerLogger.info("Removing {} {} {}".format(YEAR, netmode, project))
del NETWORK_PROJECTS[YEAR][netmode][project_idx]
continue
# variants_include: specifies list of network variants for which this project should be *included*
# if this keyword is present, then this project is included *only* for variants in this list
if 'variants_include' in project.keys() and NET_VARIANT not in project['variants_include']:
Wrangler.WranglerLogger.info("Removing {} {} {}".format(YEAR, netmode, project))
del NETWORK_PROJECTS[YEAR][netmode][project_idx]
continue
#
# For every year where a project is applied do the following:
# Convert all zero-length links to 0.01
# Move buses to HOV/Express lanes at the end
#
for YEAR in NETWORK_PROJECTS.keys():
# if anything is applied
if ((len(NETWORK_PROJECTS[YEAR]['hwy']) > 0) or (len(NETWORK_PROJECTS[YEAR]['trn']) > 0)):
NETWORK_PROJECTS[YEAR]['hwy'].append('No_zero_length_links')
if ((len(NETWORK_PROJECTS[YEAR]['hwy']) > 0) or (len(NETWORK_PROJECTS[YEAR]['trn']) > 0)):
NETWORK_PROJECTS[YEAR]['trn'].append('Move_buses_to_HOV_EXP_lanes')
# OPTIONAL. The default route network project directory is Y:\networks. If
# projects are stored in another directory, then use this variable to specify it.
# For example: Y:\networks\projects
# NETWORK_BASE_DIR = None
# NETWORK_PROJECT_SUBDIR = None
# NETWORK_SEED_SUBDIR = None
# NETWORK_PLAN_SUBDIR = None
# OPTIONAL. A list of project names which have been previously applied in the
# PIVOT_DIR network that projects in this project might rely on. For example
# if DoyleDrive exists, then Muni_TEP gets applied differently so transit lines
# run on the new Doyle Drive alignment
APPLIED_PROJECTS = None
# OPTIONAL. A list of project names. For test mode, these projects won't use
# the TAG. This is meant for developing a network project.
TEST_PROJECTS = []
|
import os
import sys
import tarfile
import zipfile
import numpy as np
import tensorflow as tf
import six.moves.urllib as urllib
from PIL import Image
from io import StringIO
from collections import defaultdict
from matplotlib import pyplot as plt
sys.path.append("..")
from utils import label_map_util
from utils import visualization_utils as vis_util
'''
Step 1:设置需要使用的模型
'''
# 需要下载的模型
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# 用来做目标检测的模型
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# 用来校对label的字符串列表
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
'''
Step 2:下载预训练模型,也就是SSD+MobileNet模型
'''
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
'''
Step 3:下载模型后,程序直接将其读入默认的计算图中
'''
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
'''
Step 4:进行真正检测之前,还需要定义一些辅助函数
'''
# 这部分代码是将神经网络检测到的index转换成类别名字符串
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# 辅助函数将图片转换成Numpy形式
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
'''
Step 5:开始检测图片
'''
# 只检测两张图片
PATH_TO_TEST_IMAGES_DIR = 'test_images'
TEST_IMAGE_PATHS = [os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 3)]
# 定义输出图像大小
IMAGE_SIZE = (12, 8)
# 检测操作代码
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
# 定义检测图的输入输出张量
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# 每个box代表图中一个检测的物体
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# 每个score代表预测所属目标类别的信心度
# score会和类别最终显示在图中
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
# class表示每个box框的类别
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_path)
# 将图像转换为Numpy格式
image_np = load_image_into_numpy_array(image)
# 将图像扩展一个维度,最后输入格式是[1,?,?,3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# 检测部分操作
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# 对得到的检测结果进行可视化
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np)
plt.show()
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from pants.backend.codegen.protobuf.scala.subsystem import ScalaPBSubsystem
from pants.backend.codegen.protobuf.target_types import ProtobufDependenciesField
from pants.backend.scala.subsystems.scala import ScalaSubsystem
from pants.build_graph.address import Address
from pants.engine.internals.selectors import Get
from pants.engine.rules import collect_rules, rule
from pants.engine.target import FieldSet, InferDependenciesRequest, InferredDependencies
from pants.engine.unions import UnionRule
from pants.jvm.dependency_inference.artifact_mapper import (
AllJvmArtifactTargets,
find_jvm_artifacts_or_raise,
)
from pants.jvm.resolve.common import Coordinate
from pants.jvm.subsystems import JvmSubsystem
from pants.jvm.target_types import JvmResolveField
_SCALAPB_RUNTIME_GROUP = "com.thesamet.scalapb"
_SCALAPB_RUNTIME_ARTIFACT = "scalapb-runtime"
@dataclass(frozen=True)
class ScalaPBRuntimeDependencyInferenceFieldSet(FieldSet):
required_fields = (ProtobufDependenciesField, JvmResolveField)
dependencies: ProtobufDependenciesField
resolve: JvmResolveField
class InferScalaPBRuntimeDependencyRequest(InferDependenciesRequest):
infer_from = ScalaPBRuntimeDependencyInferenceFieldSet
@dataclass(frozen=True)
class ScalaPBRuntimeForResolveRequest:
resolve_name: str
@dataclass(frozen=True)
class ScalaPBRuntimeForResolve:
addresses: frozenset[Address]
@rule
async def resolve_scalapb_runtime_for_resolve(
request: ScalaPBRuntimeForResolveRequest,
jvm_artifact_targets: AllJvmArtifactTargets,
jvm: JvmSubsystem,
scala_subsystem: ScalaSubsystem,
scalapb: ScalaPBSubsystem,
) -> ScalaPBRuntimeForResolve:
scala_version = scala_subsystem.version_for_resolve(request.resolve_name)
# TODO: Does not handle Scala 3 suffix which is just `_3` nor X.Y.Z versions.
scala_binary_version, _, _ = scala_version.rpartition(".")
version = scalapb.version
addresses = find_jvm_artifacts_or_raise(
required_coordinates=[
Coordinate(
group=_SCALAPB_RUNTIME_GROUP,
artifact=f"{_SCALAPB_RUNTIME_ARTIFACT}_{scala_binary_version}",
version=version,
)
],
resolve=request.resolve_name,
jvm_artifact_targets=jvm_artifact_targets,
jvm=jvm,
subsystem="the ScalaPB runtime",
target_type="protobuf_sources",
requirement_source="the `[scalapb].version` option",
)
return ScalaPBRuntimeForResolve(addresses)
@rule
async def infer_scalapb_runtime_dependency(
request: InferScalaPBRuntimeDependencyRequest,
jvm: JvmSubsystem,
) -> InferredDependencies:
resolve = request.field_set.resolve.normalized_value(jvm)
scalapb_runtime_target_info = await Get(
ScalaPBRuntimeForResolve, ScalaPBRuntimeForResolveRequest(resolve)
)
return InferredDependencies(scalapb_runtime_target_info.addresses)
def rules():
return (
*collect_rules(),
UnionRule(InferDependenciesRequest, InferScalaPBRuntimeDependencyRequest),
)
|
#!/usr/bin/python
import xsocket
from xia_address import *
import random
import sys
xsocket.set_conf("xsockconf_python.ini","hello_service.py")
xsocket.print_conf()
while(True):
try:
sock=xsocket.Xsocket(0)
if (sock<0):
print "error opening socket"
exit(-1)
# Make the sDAG (the one the server listens on)
dag = "RE %s %s %s" % (AD1, HID1, SID_HELLO)
# Bind to the DAG
ret= xsocket.Xbind(sock,dag);
print "listening on %s" % dag
print "bind returns %d socket %d" % (ret, sock)
xsocket.Xaccept(sock);
replyto = None
dlen = None
#n = xsocket.Xrecvfrom(sock, 1500, 0, replyto, dlen)
n = xsocket.Xrecv(sock, 1500, 0)
hello_message = "<html><body><h1>Hello World!</h1></body></html>"
http_header = "HTTP/1.1 200 OK\nDate: Sat, 08 Jan 2011 22:25:07 GMT\nServer: Apache/2.2.17 (Unix)\nAccess-Control-Allow-Origin: *\nCache-Control: no-cache\nConnection: close\nContent-Type: text/html\n\n"
#xsocket.Xsendto(sock, stock_feed, len(stock_feed), 0, replyto, dlen)
response = http_header+ hello_message
print "response len %d" % len(response)
xsocket.Xsend(sock, response, len(response), 0)
xsocket.Xclose(sock)
except (KeyboardInterrupt, SystemExit), e:
sys.exit()
xsocket.Xclose(sock)
|
from sexpdata import dumps, loads, Symbol
import signal
import traceback
from euslime.bridge import EuslispResult
from euslime.handler import DebuggerHandler
from euslime.logger import get_logger
log = get_logger(__name__)
class Protocol(object):
def __init__(self, handler, *args, **kwargs):
self.handler = handler(*args, **kwargs)
def dumps(self, sexp):
def with_header(sexp):
res = dumps(sexp, false_as='nil', none_as='nil')
# encode to adapt to japanese characters
header = '{0:06x}'.format(len(res.encode('utf-8')))
return header + res
try:
return with_header(sexp)
except UnicodeDecodeError:
# For example in (apropos "default")
log.warn('UnicodeDecodeError at %s' % sexp)
assert isinstance(sexp, list)
sexp = [unicode(x, 'utf-8', 'ignore') if isinstance(x, str)
else x for x in sexp]
return with_header(sexp)
def make_error(self, id, err):
debug = DebuggerHandler(id, err)
self.handler.debugger.append(debug)
res = [
Symbol(':debug'),
0, # the thread which threw the condition
len(self.handler.debugger), # the depth of the condition
[debug.message, str(), None], # s-exp with a description
debug.restarts, # list of available restarts
debug.stack, # stacktrace
[None], # pending continuation
]
yield self.dumps(res)
def make_response(self, id, sexp):
try:
res = [Symbol(':return'), {'ok': sexp}, id]
yield self.dumps(res)
except Exception as e:
for r in self.make_error(id, e):
yield r
def interrupt(self):
yield self.dumps([Symbol(":read-aborted"), 0, 1])
self.handler.euslisp.process.send_signal(signal.SIGINT)
self.handler.euslisp.reset()
yield self.dumps([Symbol(':return'),
{'abort': "'Keyboard Interrupt'"},
self.handler.command_id])
def process(self, data):
data = loads(data)
if data[0] == Symbol(":emacs-rex"):
cmd, form, pkg, thread, comm_id = data
self.handler.command_id = comm_id
self.handler.package = pkg
else:
form = data
comm_id = None
func = form[0].value().replace(':', '_').replace('-', '_')
args = form[1:]
log.info("func: %s" % func)
log.info("args: %s" % args)
try:
gen = getattr(self.handler, func)(*args)
if not gen:
if comm_id:
for r in self.make_response(comm_id, None):
yield r
return
for resp in gen:
if isinstance(resp, EuslispResult):
for r in self.make_response(self.handler.command_id,
resp.value):
yield r
else:
yield self.dumps(resp)
except Exception as e:
log.error(traceback.format_exc())
for r in self.make_error(self.handler.command_id, e):
yield r
|
import zipfile
import sys
import os.path as op
name = 'OKMIlLVft'
while True:
path = op.join(sys.path[0],name+'.tar.gz')
zf = zipfile.ZipFile(path)
zf.extractall(path = sys.path[0],pwd = bytes(name,"utf8"))
name = zf.filelist[0].filename
name = name.split(".tar.gz")[0]
|
from concurrent.futures import ProcessPoolExecutor
from functools import partial
from datasets import audio
import os
import numpy as np
from hparams import hparams
from tacotron.utils.utils import mulaw_quantize
def build_from_path(input_dirs, mel_dir, linear_dir, wav_dir, n_jobs=4):
executor = ProcessPoolExecutor(max_workers=n_jobs)
futures = []
index = 1
for input_dir in input_dirs:
with open(os.path.join(input_dir, 'metadata.csv'), encoding='utf-8') as f:
for line in f:
parts = line.strip().split('|')
wav_path = os.path.join(input_dir, 'wavs', '{}.wav'.format(parts[0]))
text = parts[2]
futures.append(executor.submit(partial(_process_utterance, mel_dir, linear_dir, wav_dir, index, wav_path, text)))
index += 1
return [future.result() for future in futures if future.result() is not None]
def _process_utterance(mel_dir, linear_dir, wav_dir, index, wav_path, text):
try:
# Load the audio as numpy array
wav = audio.load_wav(wav_path)
except :
print('file {} present in csv not in folder'.format(
wav_path))
return None
if hparams.rescale:
wav = wav / np.abs(wav).max() * hparams.rescaling_max
if hparams.trim_silence:
wav = audio.trim_silence(wav)
out = mulaw_quantize(wav, hparams.quantize_channels)
start, end = audio.start_and_end_indices(out, hparams.silence_threshold)
wav = wav[start: end]
out = out[start: end]
constant_values = mulaw_quantize(0, hparams.quantize_channels)
out_dtype = np.int16
mel_spectrogram = audio.melspectrogram(wav).astype(np.float32)
mel_frames = mel_spectrogram.shape[1]
linear_spectrogram = audio.linearspectrogram(wav).astype(np.float32)
linear_frames = linear_spectrogram.shape[1]
assert linear_frames == mel_frames
l, r = audio.pad_lr(wav, hparams.fft_size, audio.get_hop_size())
out = np.pad(out, (l, r), mode='constant', constant_values=constant_values)
time_steps = len(out)
assert time_steps >= mel_frames * audio.get_hop_size()
out = out[:mel_frames * audio.get_hop_size()]
assert time_steps % audio.get_hop_size() == 0
audio_filename = 'speech-audio-{:05d}.npy'.format(index)
mel_filename = 'speech-mel-{:05d}.npy'.format(index)
linear_filename = 'speech-linear-{:05d}.npy'.format(index)
np.save(os.path.join(wav_dir, audio_filename), out.astype(out_dtype), allow_pickle=False)
np.save(os.path.join(mel_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)
np.save(os.path.join(linear_dir, linear_filename), linear_spectrogram.T, allow_pickle=False)
return (audio_filename, mel_filename, linear_filename, time_steps, mel_frames, text)
|
# 给出两个 非空 的链表用来表示两个非负的整数。其中,它们各自的位数是按照 逆序 的方式存储的,并且它们的每个节点只能存储 一位 数字。
#
# 如果,我们将这两个数相加起来,则会返回一个新的链表来表示它们的和。
#
# 您可以假设除了数字 0 之外,这两个数都不会以 0 开头。
#
# 示例:
#
# 输入:(2 -> 4 -> 3) + (5 -> 6 -> 4)
# 输出:7 -> 0 -> 8
# 原因:342 + 465 = 807
#
# Related Topics 链表 数学
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
head = ListNode(0)
res = head
carry = 0
while l1 and l2:
add = l1.val + l2.val + carry
carry = 1 if add >= 10 else 0
head.next = ListNode(add % 10)
head = head.next
l1, l2 = l1.next, l2.next
l = l1 if l1 else l2
while l:
add = l.val + carry
carry = 1 if add >= 10 else 0
head.next = ListNode(add % 10)
head = head.next
l = l.next
if carry:
head.next = ListNode(1)
return res.next
# leetcode submit region end(Prohibit modification and deletion)
|
import autodisc as ad
import numpy as np
import plotly
def plotly_meanstd_scatter(data=None, config=None, **kwargs):
'''
param repetition_ids: Either scalar int with single id, list with several that are used for each experiment, or a dict with repetition ids per experiment.
'''
default_config = dict(
subplots=dict(
rows=None,
cols=None,
print_grid=False
),
std=dict(
style='shaded', # or errorbar
steps=1,
visible=True
),
init_mode='mean_std', # mean_std, mean, elements
plotly_format = 'webgl', # webgl or svg
layout=dict(
default_xaxis=dict(), # if several subplots, then these are the default values for all xaxis config in fig.layout
default_yaxis=dict(), # if several subplots, then these are the default values for all yaxis config in fig.layout
xaxis=dict(),
yaxis=dict(),
updatemenus=[
dict(type="buttons",
active=0,
buttons=[
dict(label='mean + std',
method='restyle',
args=[{'visible': []}]),
dict(label='mean',
method='restyle',
args=[{'visible': []}]),
dict(label='elements',
method='restyle',
args=[{'visible': []}]),
],
direction='right',
pad={'t': 70},
x=1,
xanchor='right',
y=0,
yanchor='top'
),
]
),
default_trace=dict(),
default_mean_trace=dict(
legendgroup='<subplot_idx>-<data_idx>', # subplot_idx, data_idx
hoverinfo='text+x',
),
default_subplot_mean_traces=[], # default config of traces per subplot
mean_traces=[],
default_std_trace=dict(
legendgroup='<mean_trace_legendgroup>', # subplot_idx, data_idx, mean_trace_legendgroup
hoverinfo='none',
showlegend=False,
),
default_subplot_std_traces=[], # default config of traces per subplot
std_traces=[],
default_element_trace=dict( # overall default
legendgroup=None, # subplot_idx, data_idx, elem_idx, subelem_idx, mean_trace_legendgroup, std_trace_legendgroup
),
default_subplot_element_traces=[], # default per subplot
default_data_element_traces=[], # default per data item
element_traces=[], # individual items
default_mean_label='<data_idx>',
mean_labels=[],
default_element_label='<mean_label> - <subelem_idx>', # possible replacements: <mean_label>, <subelem_idx>, <elem_idx>, <data_idx>
element_labels=[],
default_colors=plotly.colors.DEFAULT_PLOTLY_COLORS,
)
config = ad.config.set_default_config(kwargs, config, default_config)
if data is None:
data = np.array([])
# format data in form [subplot_idx:list][trace_idx:list][elems_per_trace:numpy.ndarray]
if isinstance(data, np.ndarray):
data = [[data]]
elif isinstance(data[0], np.ndarray):
data = [data]
# identify the number of subplots
n_subplots = len(data)
# if not defined, set rows and cols of subplots
if config.subplots.rows is None and config.subplots.cols is None:
config.subplots.rows = n_subplots
config.subplots.cols = 1
elif config.subplots.rows is not None and config.subplots.cols is None:
config.subplots.cols = int(np.ceil(n_subplots / config.subplots.rows))
elif config.subplots.rows is None and config.subplots.cols is not None:
config.subplots.rows = int(np.ceil(n_subplots / config.subplots.cols))
if config.plotly_format.lower() == 'webgl':
plotly_scatter_plotter = plotly.graph_objs.Scattergl
elif config.plotly_format.lower() == 'svg':
plotly_scatter_plotter = plotly.graph_objs.Scatter
else:
raise ValueError('Unknown config {!r} for plotly_format! Allowed values: \'webgl\', \'svg\'.')
# make figure with subplots
fig = plotly.tools.make_subplots(**config.subplots)
mean_traces = []
elem_traces = []
elem_idx = 0
# interate over subplots
for subplot_idx, subplot_data in enumerate(data):
subplot_mean_traces = []
subplot_elem_traces = []
# create for each experiment a trace
for data_idx, cur_data in enumerate(subplot_data):
mean_data = np.nanmean(cur_data, axis=0)
std_data = np.nanstd(cur_data, axis=0)
# TODO: allow setting of custom x values
# this can not simply be done by seeting the x attribute of the trace, because the std trace has an extra
x_values = list(range(len(mean_data)))
# handle trace for mean values
info_text = ['{} ± {}'.format(mean_data[idx], std_data[idx]) for idx in range(len(mean_data))]
mean_label = config.default_mean_label
if len(config.mean_labels) > data_idx:
mean_label = config.mean_labels[data_idx]
mean_label = ad.gui.jupyter.misc.replace_str_from_dict(str(mean_label), {'data_idx': data_idx})
mean_trace_params = dict(
x=x_values,
y=mean_data,
line=dict(color=config.default_colors[data_idx % len(config.default_colors)]),
name=mean_label,
text=info_text,
)
mean_trace_config = ad.config.set_default_config(config.default_mean_trace, config.default_trace)
if len(config.default_subplot_mean_traces) > subplot_idx:
mean_trace_config = ad.config.set_default_config(config.default_subplot_mean_traces[subplot_idx], mean_trace_config)
if len(config.mean_traces) > data_idx:
mean_trace_config = ad.config.set_default_config(config.mean_traces[data_idx], mean_trace_config)
mean_trace_params = ad.config.set_default_config(mean_trace_config, mean_trace_params)
# handle legendgroup
mean_trace_legendgroup = mean_trace_params.legendgroup
if isinstance(mean_trace_legendgroup, str):
mean_trace_legendgroup = ad.gui.jupyter.misc.replace_str_from_dict(mean_trace_legendgroup, {'data_idx': data_idx,
'subplot_idx': subplot_idx})
mean_trace_params.legendgroup = mean_trace_legendgroup
cur_mean_trace = plotly_scatter_plotter(**mean_trace_params)
subplot_mean_traces.append(cur_mean_trace)
# handle trace for std values
if config.std.style.lower() == 'shaded':
fill_color = config.default_colors[data_idx % len(config.default_colors)]
fill_color = fill_color.replace('rgb', 'rgba')
fill_color = fill_color.replace(')', ', 0.2)')
std_trace_params = dict(
x=x_values + x_values[::-1],
y=np.hstack((mean_data + std_data, mean_data[::-1] - std_data[::-1])),
fill='tozerox',
line=dict(color='rgba(255,255,255,0)'),
fillcolor=fill_color,
)
elif config.std.style.lower() == 'errorbar':
std_trace_params = dict(
x=x_values[::config.std.steps],
y=mean_data[::config.std.steps],
error_y=dict(type='data', array=std_data, visible=True),
mode='markers',
line=dict(color=config.default_colors[data_idx % len(config.default_colors)]),
marker=dict(size=0, opacity=0),
)
else:
raise ValueError('Unknown config.std.style ({!r})! Options: \'shaded\', \'errorbar\''.format(config.std.type))
std_trace_config = ad.config.set_default_config(config.default_std_trace, config.default_trace)
if len(config.default_subplot_std_traces) > subplot_idx:
std_trace_config = ad.config.set_default_config(config.default_subplot_std_traces[subplot_idx], std_trace_config)
if len(config.std_traces) > data_idx:
std_trace_config = ad.config.set_default_config(config.std_traces[data_idx], std_trace_config)
std_trace_params = ad.config.set_default_config(std_trace_config, std_trace_params)
# handle legendgroup
std_trace_legendgroup = std_trace_params.legendgroup
if isinstance(std_trace_legendgroup, str):
std_trace_legendgroup = ad.gui.jupyter.misc.replace_str_from_dict(std_trace_legendgroup, {'data_idx': data_idx,
'subplot_idx': subplot_idx,
'mean_trace_legendgroup': mean_trace_legendgroup}
)
std_trace_params.legendgroup = std_trace_legendgroup
cur_std_trace = plotly_scatter_plotter(**std_trace_params)
subplot_mean_traces.append(cur_std_trace)
# traces for each data element
n_elems = cur_data.shape[0]
color_coeff_step = 1 / n_elems
cur_color_coeff = 0 + color_coeff_step
for cur_elem_idx in range(n_elems):
cur_elem_data = cur_data[cur_elem_idx, :]
element_label = config.default_element_label
if len(config.element_labels) > data_idx:
element_label = config.element_labels[data_idx]
element_label = ad.gui.jupyter.misc.replace_str_from_dict(str(element_label), {'data_idx': data_idx,
'subelem_idx': cur_elem_idx,
'elem_idx': elem_idx,
'mean_label': mean_label})
color = ad.gui.jupyter.misc.transform_color_str_to_tuple(config.default_colors[data_idx % len(config.default_colors)])
color = (color[0],
int(color[1] * cur_color_coeff),
int(color[2] * cur_color_coeff),
int(color[3] * cur_color_coeff))
color = ad.gui.jupyter.misc.transform_color_tuple_to_str(color)
cur_color_coeff += color_coeff_step
element_trace_params = dict(
x=x_values,
y=cur_data[cur_elem_idx, :],
line=dict(color=color),
name=element_label,
visible=True,
)
element_trace_config = ad.config.set_default_config(config.default_element_trace, config.default_trace)
if len(config.default_subplot_element_traces) > subplot_idx:
element_trace_config = ad.config.set_default_config(config.default_subplot_element_traces[subplot_idx], element_trace_config)
if len(config.default_data_element_traces) > cur_elem_idx:
element_trace_config = ad.config.set_default_config(config.default_data_element_traces[cur_elem_idx], element_trace_config)
if len(config.element_traces) > elem_idx:
element_trace_config = ad.config.set_default_config(config.element_traces[elem_idx], element_trace_config)
element_trace_params = ad.config.set_default_config(element_trace_config, element_trace_params)
# handle legendgroup
element_trace_legendgroup = element_trace_params.legendgroup
if isinstance(element_trace_legendgroup, str):
element_trace_legendgroup = ad.gui.jupyter.misc.replace_str_from_dict(element_trace_legendgroup, {'subelem_idx': cur_elem_idx,
'elem_idx': elem_idx,
'data_idx': data_idx,
'subplot_idx': subplot_idx,
'mean_trace_legendgroup': mean_trace_legendgroup,
'std_trace_legendgroup': std_trace_legendgroup}
)
element_trace_params.legendgroup = element_trace_legendgroup
cur_elem_trace = plotly_scatter_plotter(**element_trace_params)
subplot_elem_traces.append(cur_elem_trace)
elem_idx += 1
mean_traces.append(subplot_mean_traces)
elem_traces.append(subplot_elem_traces)
# set for the std toggle buttons which traces should be hidden and which ones should be shown
layout = config.layout
# set default values for all layouts
def set_axis_properties_by_default(axis_name, fig_layout, config_layout):
# sets the axis properties to default values
def set_single_axis_property_default(cur_axis_name, default_name):
if cur_axis_name in fig_layout or cur_axis_name in config_layout:
cur_config = config_layout[cur_axis_name] if cur_axis_name in config_layout else dict()
config_layout[cur_axis_name] = ad.config.set_default_config(cur_config, config_layout[default_name])
default_name = 'default_' + axis_name
set_single_axis_property_default(axis_name, default_name)
set_single_axis_property_default(axis_name + '1', default_name)
axis_idx = 2
while True:
cur_axis_name = axis_name + str(axis_idx)
if cur_axis_name not in fig_layout and cur_axis_name not in config_layout:
break
set_single_axis_property_default(cur_axis_name, default_name)
axis_idx += 1
set_axis_properties_by_default('xaxis', fig['layout'], layout)
set_axis_properties_by_default('yaxis', fig['layout'], layout)
# remove default fields, because they are not true proerties of the plotly layout
del (layout['default_xaxis'])
del (layout['default_yaxis'])
update_menus_visible_meanstd = []
update_menus_visible_mean = []
update_menus_visible_elements = []
for subplot_idx in range(len(mean_traces)):
update_menus_visible_meanstd.extend([True, True] * int(len(mean_traces[subplot_idx]) / 2) + [False] * int(len(elem_traces[subplot_idx])))
update_menus_visible_mean.extend([True, False] * int(len(mean_traces[subplot_idx]) / 2) + [False] * int(len(elem_traces[subplot_idx])))
element_default_visibility = [elem_trace['visible'] for elem_trace in elem_traces[subplot_idx]]
update_menus_visible_elements.extend([False, False] * int(len(mean_traces[subplot_idx]) / 2) + element_default_visibility)
if layout.updatemenus:
layout.updatemenus[0]['buttons'][0]['args'][0]['visible'] = update_menus_visible_meanstd
layout.updatemenus[0]['buttons'][1]['args'][0]['visible'] = update_menus_visible_mean
layout.updatemenus[0]['buttons'][2]['args'][0]['visible'] = update_menus_visible_elements
if config.init_mode == 'mean_std':
config.layout.updatemenus[0]['active'] = 0
elif config.init_mode == 'mean':
config.layout.updatemenus[0]['active'] = 1
elif config.init_mode == 'elements':
config.layout.updatemenus[0]['active'] = 2
else:
raise ValueError('Value {!r} for \'config.init_mode\' is not supported! Only \'mean_std\',\'mean\',\'elements\'.'.format(config.init_mode))
if config.init_mode == 'mean_std':
trace_visibility = update_menus_visible_meanstd
elif config.init_mode == 'mean':
trace_visibility = update_menus_visible_mean
elif config.init_mode == 'elements':
trace_visibility = update_menus_visible_elements
else:
raise ValueError('Value {!r} for \'config.init_mode\' is not supported! Only \'mean_std\',\'mean\',\'elements\'.'.format(config.init_mode))
cur_row = 1
cur_col = 1
for subplot_idx in range(n_subplots):
n_traces = len(mean_traces[subplot_idx]) + len(elem_traces[subplot_idx])
fig.add_traces(mean_traces[subplot_idx] + elem_traces[subplot_idx],
rows=[cur_row] * n_traces,
cols=[cur_col] * n_traces)
if cur_col < config.subplots.cols:
cur_col += 1
else:
cur_col = 1
cur_row += 1
for trace_idx in range(len(fig['data'])):
fig['data'][trace_idx]['visible'] = trace_visibility[trace_idx]
fig['layout'].update(layout)
plotly.offline.iplot(fig)
return fig
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a target and a subsidiary dependent target from a
.gyp file in a subdirectory, without specifying an explicit output build
directory, and using the generated solution or project file at the top
of the tree as the entry point.
There is a difference here in the default behavior of the underlying
build tools. Specifically, when building the entire "solution", Xcode
puts the output of each project relative to the .xcodeproj directory,
while Visual Studio (and our implementation of Make) put it
in a build directory relative to the "solution"--that is, the entry-point
from which you built the entire tree.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('prog1.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('prog1.gyp', test.ALL, chdir='relocate/src')
test.run_built_executable('prog1',
stdout="Hello from prog1.c\n",
chdir='relocate/src')
if test.format == 'xcode':
chdir = 'relocate/src/subdir'
else:
chdir = 'relocate/src'
test.run_built_executable('prog2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import *
admin.site.register(Editorial)
admin.site.register(Genero)
admin.site.register(Autor)
admin.site.register(Dealer)
admin.site.register(Lector)
admin.site.register(Libro)
admin.site.register(Direccion)
admin.site.register(Pedido)
admin.site.register(Ciudad)
admin.site.register(Region)
admin.site.register(Pedido_Libro)
admin.site.register(Dealer_Catalogo)
admin.site.register(Pedido_Lector)
|
"""src/experiment_data_viz/utils.py"""
import os
import uuid
from pathlib import Path
from shutil import rmtree
import pandas as pd
import streamlit as st
def streamlit_static_downloads_folder() -> Path:
"""Create a downloads directory within the streamlit static asset directory.
HACK: This only works when we've installed streamlit with pipenv,
so the permissions during install are the same as the running process.
Returns
-------
Path
The path to the downloads directory.
"""
streamlit_static_path = Path(st.__path__[0]).joinpath("static")
downloads_path = streamlit_static_path.joinpath("downloads")
if downloads_path.exists():
rmtree(downloads_path)
downloads_path.mkdir()
return downloads_path
def get_table_download_link(df: pd.DataFrame, downloads_path: Path) -> str:
"""Create a table download link for a dataframe.
Parameters
----------
df : pd.DataFrame
The input dataframe to download.
downloads_path : Path
The path to the downloads directory.
Returns
-------
str
The download link for the data.
"""
temp_file_path = downloads_path.joinpath(f"{str(uuid.uuid4())}.csv")
df.to_csv(temp_file_path)
return f"[Download as .csv file](downloads/{os.path.basename(temp_file_path)})"
|
import numpy as np
class LabelDictionary:
def __init__(self, uniqueLabels):
self.uniqueLabels = uniqueLabels
self.dictionary = self.getDictionary()
def getDictionary(self):
# return dict(zip(np.arange(len(self.uniqueLabels)), self.uniqueLabels))
return dict(zip(self.uniqueLabels, np.arange(len(self.uniqueLabels))))
# labels = np.array(['clothes', 'device', 'food'])
# labelDict = LabelDictionary(labels)
# print(labelDict.dictionary)
|
from __future__ import print_function
import sys
from operator import add
from pyspark import SparkContext
from csv import reader
import re
def check_latitude(input):
if len(input) == 0:
return 'NULL\tNULL\tNULL'
try:
x = float(input)
return 'FLOAT\tLATITUDE\tVALID' if x >= 40.47 and x<= 40.93 else 'FLOAT\tLATITUDE\tINVALID/OUTLIER'
except ValueError as err:
return 'FLOAT\tLATITUDE\tINVALID/OUTLIER'
if __name__ == "__main__":
sc = SparkContext()
lines = sc.textFile(sys.argv[1], 1)
lines = lines.mapPartitions(lambda x: reader(x)).filter(lambda x: x[0] != 'CMPLNT_NUM')
results = lines.map(lambda x: check_latitude(x[21])) \
results.saveAsTextFile('check_latitude.out')
sc.stop()
|
# Copyright 2021 Vittorio Mazzia & Francesco Salvetti. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from utils.layers import PrimaryCaps, FCCaps, Length, Mask
def efficient_capsnet_graph(input_shape):
"""
Efficient-CapsNet graph architecture.
Parameters
----------
input_shape: list
network input shape
"""
inputs = tf.keras.Input(input_shape)
x = tf.keras.layers.Conv2D(32,5,activation="relu", padding='valid', kernel_initializer='he_normal')(inputs)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Conv2D(64,3, activation='relu', padding='valid', kernel_initializer='he_normal')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Conv2D(64,3, activation='relu', padding='valid', kernel_initializer='he_normal')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Conv2D(128,3,2, activation='relu', padding='valid', kernel_initializer='he_normal')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = PrimaryCaps(128, 9, 16, 8)(x)
digit_caps = FCCaps(10,16)(x)
digit_caps_len = Length(name='length_capsnet_output')(digit_caps)
return tf.keras.Model(inputs=inputs,outputs=[digit_caps, digit_caps_len], name='Efficient_CapsNet')
def generator_graph(input_shape):
"""
Generator graph architecture.
Parameters
----------
input_shape: list
network input shape
"""
inputs = tf.keras.Input(16*10)
x = tf.keras.layers.Dense(512, activation='relu', kernel_initializer='he_normal')(inputs)
x = tf.keras.layers.Dense(1024, activation='relu', kernel_initializer='he_normal')(x)
x = tf.keras.layers.Dense(np.prod(input_shape), activation='sigmoid', kernel_initializer='glorot_normal')(x)
x = tf.keras.layers.Reshape(target_shape=input_shape, name='out_generator')(x)
return tf.keras.Model(inputs=inputs, outputs=x, name='Generator')
def build_graph(input_shape, mode, verbose):
"""
Efficient-CapsNet graph architecture with reconstruction regularizer. The network can be initialize with different modalities.
Parameters
----------
input_shape: list
network input shape
mode: str
working mode ('train', 'test' & 'play')
verbose: bool
"""
inputs = tf.keras.Input(input_shape)
y_true = tf.keras.layers.Input(shape=(10,))
noise = tf.keras.layers.Input(shape=(10, 16))
efficient_capsnet = efficient_capsnet_graph(input_shape)
if verbose:
efficient_capsnet.summary()
print("\n\n")
digit_caps, digit_caps_len = efficient_capsnet(inputs)
noised_digitcaps = tf.keras.layers.Add()([digit_caps, noise]) # only if mode is play
masked_by_y = Mask()([digit_caps, y_true])
masked = Mask()(digit_caps)
masked_noised_y = Mask()([noised_digitcaps, y_true])
generator = generator_graph(input_shape)
if verbose:
generator.summary()
print("\n\n")
x_gen_train = generator(masked_by_y)
x_gen_eval = generator(masked)
x_gen_play = generator(masked_noised_y)
if mode == 'train':
return tf.keras.models.Model([inputs, y_true], [digit_caps_len, x_gen_train], name='Efficinet_CapsNet_Generator')
elif mode == 'test':
return tf.keras.models.Model(inputs, [digit_caps_len, x_gen_eval], name='Efficinet_CapsNet_Generator')
elif mode == 'play':
return tf.keras.models.Model([inputs, y_true, noise], [digit_caps_len, x_gen_play], name='Efficinet_CapsNet_Generator')
else:
raise RuntimeError('mode not recognized')
|
from django.shortcuts import render,HttpResponse,redirect
from django.contrib.auth import authenticate,login as loginUser,logout
from todoapp.models import TODO
from django.contrib.auth.forms import UserCreationForm , AuthenticationForm
from todoapp.forms import TODOForm
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required(login_url = 'login')
def home(request):
if request.user.is_authenticated:
user = request.user
form = TODOForm()
todos = TODO.objects.filter(user=user).order_by('priority')
return render(request,'index.html',context={'form' : form,'todos':todos})
def login(request):
if request.method == 'GET':
form1 = AuthenticationForm()
context = {
"form" : form1
}
return render(request , 'login.html' , context=context )
else:
form = AuthenticationForm(data=request.POST)
print(form.is_valid())
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username = username , password = password)
if user is not None:
loginUser(request , user)
return redirect('home')
else:
context = {
"form" : form
}
return render(request , 'login.html' , context=context )
def signup(request):
if request.method == 'GET':
form = UserCreationForm()
context = {
"form":form
}
return render(request,'signup.html',context=context)
else:
print(request.POST)
form = UserCreationForm(request.POST)
context = {
"form":form
}
if form.is_valid():
user = form.save()
print(user)
if user is not None:
return redirect('login')
else:
return render(request,'signup.html',context=context)
@login_required(login_url = 'login')
def addTask(request):
if request.user.is_authenticated:
user = request.user
print(user)
form = TODOForm(request.POST)
if form.is_valid():
print(form.cleaned_data)
task = form.save(commit=False)
task.user = user
task.save()
print("Taks is : ",task)
return redirect('home')
else:
return render(request,'index.html',context = {'form':form})
def deleteTodo(request , id):
TODO.objects.get(pk = id).delete()
return redirect('home')
def changestatus(request , id, status):
todo = TODO.objects.get(pk = id)
todo.status = status
todo.save()
return redirect('home')
def signout(request):
logout(request)
return redirect('login')
|
from kadi import events
# Range 1
t_start = '2000:001'
t_stop = None
# Range 2
#t_start = '2006:220:00:00:00.000'
#t_stop = '2007:100:00:00:00.000'
# Range 3
#t_start = '2006:349:00:00:00.000'
#t_stop = '2006:354:00:00:00.000'
# Range 4
#t_start = '2006:351:00:00:00.000'
#t_stop = '2006:351:12:00:00.000'
t_event = '2006:351:04:38:00.000'
old_range = [[59.55,96.9],[58.32, 97.77]]
close('all')
dumps = events.dumps
dumps.interval_pad = (10, 7200)
if DateTime(t_start).secs - DateTime(t_stop).secs > 3600*24*365:
x = fetch.Msidset(['PM3THV1T','PM3THV2T'], t_start, t_stop, stat='5min')
x['PM3THV1T'].remove_intervals(dumps)
x['PM3THV2T'].remove_intervals(dumps)
dt = x['PM3THV1T'].vals - x['PM3THV2T'].midvals
else:
x = fetch.Msidset(['PM3THV1T','PM3THV2T'], t_start, t_stop)
x.interpolate(dt=32.8)
x['PM3THV1T'].remove_intervals(dumps)
x['PM3THV2T'].remove_intervals(dumps)
dt = x['PM3THV1T'].vals - x['PM3THV2T'].vals
post = x['PM3THV1T'].times > DateTime(t_event).secs
for ab in range(1,3):
subplot(3,1,ab)
temp = 'PM3THV' + str(ab) + 'T'
x[temp].plot('b', label='Pre-event')
plot_cxctime(x[temp].times[post], x[temp].vals[post], 'r', label='Post-event')
title(temp)
ylabel('deg F')
ylim([45,115])
old_on = array([old_range[ab-1][0], old_range[ab-1][0]])
old_off = array([old_range[ab-1][1], old_range[ab-1][1]])
plot(xlim(), old_on, 'b:', label='Pre-Event Range')
plot(xlim(), old_off, 'b:')
legend()
subplot(3,1,3)
plot_cxctime(x['PM3THV1T'].times[~post], dt[~post], 'b', label='Pre-event')
plot_cxctime(x['PM3THV1T'].times[post], dt[post], 'r', label='Post-event')
title('PM3THV1T minus PM3THV2T')
ylabel('deg F')
ylim([-5,5])
plot(xlim(), array([2.45,2.45]), 'b:', label='Pre-Event Range')
plot(xlim(), array([-.86,-.86]), 'b:')
legend(loc='best')
|
#!/usr/bin/python
"""
Implementation of the IA for the Sudoku problem,
using AC-3 and Backtracking
Miguel de la Ossa July, 2017
"""
import sys
import copy
from collections import deque, OrderedDict
from heapq import heapify, heappush, heappop
import time
_DEBUG_LEVEL = 1
def main(script, *args):
initialTime = time.clock()
values = {}
values["1"] = 50
values["2"] = 25
values["3"] = 150
values["4"] = 100
values["5"] = 75
values["6"] = 10
values["7"] = 200
values["8"] = 120
values["9"] = 300
list = []
#TODO: comprobar que esto funciona
for w in sorted(values, key=values.get, reverse=True):
list.append(w)
for x in list:
print x
finalTime = time.clock()
print ('time', finalTime - initialTime)
if __name__ == '__main__':
main(*sys.argv)
|
import math
while True:
x = int(input())
if x == 0: break
n = list(str(math.factorial(x)))
con = 0
for i in range(len(n)-1,0,-1):
if n[i] != '0':
break
con += 1
print(con)
|
import numpy as np
import itertools
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
class Visualize:
"""Implements different visualization methods on top of matplotlib."""
def __init__(self):
"""Initialize the size of the plots."""
plt.rcParams['figure.figsize'] = [15, 8]
def plot_confusion_matrix(self, y_test, y_pred, classes, normalize=False,
title='Confusion Matrix', cmap=plt.cm.get_cmap('Reds')):
"""Plot a confusion matrix from the given parameters.
Args:
y_test (1darray): 1D array represeting the groundtruth values.
y_pred (1darray): 1D array representing the predicted values.
classes (List[str]): List of classes name.
normalize (bool): Weather to normalize the confusion matrix or not.
title (str): the title of the confusion matrix plot.
cmp (matplotlib.cmap): The color map used on the confusion matrix plot.
"""
cm = confusion_matrix(y_test, y_pred)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, round (cm[i, j],2), horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
|
# -*- coding: utf-8 -*-
from .models import Repo
from .views import repo
|
import tkinter as tk
from show_train import show_train
from mix_recommend import mix_re
class MainPage():
def __init__(self, window):
self.window_main = tk.Toplevel(window, bg='pink')
self.window_main.geometry('300x250')
self.window_main.title('主 界 面')
btn_train = tk.Button(self.window_main, bg='Light blue', text='训 练 模 型',
command=self.start_train)
btn_train.place(x=100, y=50)
btn_recd = tk.Button(self.window_main, bg='Light blue', text='混 合 推 荐',
command=self.start_recommend)
btn_recd.place(x=100, y=120)
def start_train(self):
self.train_model = show_train(self.window_main)
def start_recommend(self):
self.recommend = mix_re(self.window_main)
# ------------混合推荐 界面--------------#
|
# 文件读写
# StringIO和BytesIO
# 操作文件和目录
# 序列化
# f.read() f.close()
from io import StringIO
f = StringIO()
print(f.write('hello'))
print(f.write(' '))
print(f.write('world!'))
print(f.getValue())
f = StringIO('hello!\nHi\nGoogbye!')
while True:
s = f.readline()
if s == '':
break
print(s.strip())
from io import BytesIO
f = BytesIO()
# 写入的不是str,而是经过UTF-8编码的bytes
print(f.write('中文'.encode('utf-8')))
print(f.getvalue())
f = BytesIO(b'\xe4\xb8\xad\xe66\x96\x87')
print(f.read())
# 操作文件和目录
import os
# 如果是posix,说明系统是Linux、Unix或Mac OS X,如果是nt,就是Windows系统。
print(os.name)
# 此处获取信息信息 ,但不再支持windows.
print(os.uname())
# 查看操作系统中定义的环境变量
print(os.environ)
# 获取某个环境变量的值
print(os.environ.get('PATH'))
# 查看当前目录的绝对路径
print(os.path.abspath('.'))
# 在某个目录下创建一个新的目录,首先把新目录的完整路径表示出来
print(os.path.join('d:/', 'testdir'))
# 创建一个目录
os.mkdir('d:/testdir')
# 删除一个目录
os.rmdir('d:/testdir')
# 把两个路径合成一个时,不要直接拼字符串,而要通过os.path.join()函数
print(os.path.split('d:/testdir/file.txt'))
# os.path.splitext()可以直接得到文件扩展名
print(os.path.splitext('d:/testdir/file.txt'))
# 对文件重命名:
os.rename('test.txt', 'test.py')
# 删掉文件:
os.remove('test.py')
# 要列出当前目录下的所有目录
print([x for x in os.listdir('.') if os.path.isdir(x)])
# 列出所有的.py文件
print([x for x in os.listdir('.') if os.path.isfile(x) and os.path.splitext(x)[1] == '.py'])
# 序列化
'''
pickle.dumps()方法把任意对象序列化成一个bytes,然后,就可以把这个bytes写入文件。
或者用另一个方法pickle.dump()直接把对象序列化后写入一个file-like Object:
'''
import pickle
d = dict(name='Bob', age=20, score=88)
print(pickle.dumps(d))
f = open('dump.txt', 'wb')
pickle.dump(d, f)
f.close()
f = open('dump.txt', 'rb')
d = pickle.load(f)
f.close()
print(d)
# python 对象转为json
import json
d = dict(name='changjie', age=21, score=99)
# dumps()方法返回一个str,内容就是标准的JSON。
print(json.dumps(d))
# JSON反序列化为Python对象,用loads()或者对应的load()方法,
json_str = '{"age": 21, "score": 99, "name": "changjie"}'
print(json.loads(json_str))
class Student(object):
def __init__(self, name, age, score):
self.name = name
self.age = age
self.score = score
# 此处分开,不可写到同一类下 ,
def student2dict(std):
return {
'name': std.name,
'age': std.age,
'score': std.score
}
s = Student('changjie', 21, 99)
# print(json.dumps(s))
print(json.dumps(s, default=student2dict))
# 把任意class的实例变为dict
print(json.dumps(s, default=lambda obj: obj.__dict__))
|
from django.shortcuts import get_object_or_404
from django.views import generic
from ..models import Question
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
context_object_name = 'question'
pk_url_kwarg = 'question_id'
success_url = 'polls/results'
def get_object(self, queryset=None):
return get_object_or_404(self.model, id=self.kwargs['question_id'])
|
from datetime import datetime
from lxml import etree
class UserParser:
type_converter = {
1: 'TV',
2: 'OVA',
3: 'Movie',
4: 'Special',
5: 'ONA',
6: 'Music'
}
def __init__(self, html, title_type):
self._html = html
self._title_type = title_type
def get_user_name(self):
username_regex = etree.fromstring(self._html).xpath("//a[re:match(text(), '<title>(.*?)\'s(.*)</title>')]")
if username_regex:
username = username_regex
else:
username = None
return username
def get_list(self):
result = []
tree = etree.fromstring(self._html)
error_list = tree.xpath('//myanimelist/error/text()')
if error_list:
raise ValueError(error_list)
user_node = tree.xpath('//myanimelist/myinfo/user_id/text()')
if not user_node:
raise ValueError('User not found')
user_id = int(user_node[0])
if self._title_type == 'anime':
xml_list = tree.xpath('//anime')
else:
xml_list = tree.xpath('//manga')
dict_len = len(xml_list)
scores_len = 0
for xml_node in xml_list:
id = int(xml_node.xpath('series_' + self._title_type + 'db_id/text()')[0])
status = int(xml_node.xpath('my_status/text()')[0])
score = int(xml_node.xpath('my_score/text()')[0])
scores_len = scores_len + 1 if score else scores_len
time = int(xml_node.xpath('my_last_updated/text()')[0])
date = datetime.fromtimestamp(time)
int_type = int(xml_node.xpath('series_type/text()')[0])
result.append({'title': id, 'type': self._title_type, 'score': score, 'status': status,
'last_update': date, 'user': user_id})
return user_id, result, dict_len, scores_len
|
#!/usr/local/bin/python3.8
# There are only two boolean
print ( True )
print ( False )
### Example of a True boolean
4.5e9 == 4.5 * (10 ** 9)
# Integer
print ( 2 + 2 )
# Float (Scientific)
print ( 2.0 + 2.0 )
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author : Jerry Zhu
import numpy as np
import cv2
import random
import os
# calculate means and std
train_txt_path = './train_label.csv'
# 挑选多少图片进行计算
CNum = 4572
img_h, img_w = 32, 32
imgs = np.zeros([img_w, img_h, 3, 1])
means, stdevs = [], []
with open(train_txt_path, 'r') as f:
lines = f.readlines()
# random.shuffle(lines)
# 第一行是标签,要略过
for i in range(1,CNum):
img_path = os.path.join(lines[i].rstrip().split(',')[1])
img = cv2.imread(img_path)
img = cv2.resize(img, (img_h, img_w))
img = img[:, :, :, np.newaxis]
if (i+1)%100==0:
print(i)
imgs = np.concatenate((imgs, img), axis=3)
imgs = imgs.astype(np.float32) / 255.
for i in range(3):
# 将第三维中的每一维(RGB中的一个)拉成一行
pixels = imgs[:, :, i, :].ravel()
means.append(np.mean(pixels))
stdevs.append(np.std(pixels))
# cv2 读取的图像格式为BGR,PIL/Skimage读取到的都是RGB不用转
# BGR --> RGB
means.reverse()
stdevs.reverse()
print("normMean = {}".format(means))
print("normStd = {}".format(stdevs))
print('transforms.Normalize({},{})'.format(means, stdevs))
|
__author__ = "Panagiotis Garefalakis"
__copyright__ = "Imperial College London"
# The MIT License (MIT)
#
# Copyright (c) 2016 Panagiotis Garefalakis
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import sys
import os
import plots.utils as utils
# Global style configuration
utils.set_rcs()
colors = ['r', 'g', 'b', 'c', 'm']
markers = ['o', '^', 'v', 'h']
linestyle_list = ['--', '-.', '-', ':']
# workloads = ["A", "B", "C", "D", "E", "F"]
systems_compared = ['YARN', 'MEDEA (intra-only)', 'MEDEA']
systems_labels = ['YARN', 'MEDEA \n (intra-only)', 'MEDEA']
def cdf(data, label_count, label):
data_size=len(data)
# Set bins edges
data_set=sorted(set(data))
bins=np.append(data_set, data_set[-1]+1)
# Use the histogram function to bin the data
counts, bin_edges = np.histogram(data, bins=bins, density=False)
counts=counts.astype(float)/data_size
# Find the cdf
cdf = np.cumsum(counts)
# Plot the cdf
utils.plt.plot(bin_edges[0:-1], cdf,linestyle=linestyle_list[label_count], label=systems_labels[label_count], color=colors[label_count])
#############################################
# Message Format:
# {READ | INSERT | TOTAL},TIMESTAMP,LATENCY (us)
#############################################
def file_parser(fnames):
i = 0
for f in fnames:
print "Analyzing %s:" % (f)
# parsing
j = 0
minrto_outliers = 0
values = []
for line in open(f).readlines():
if "STDIO [INFO] READ:" in line:
fields = [x.strip() for x in line.split("READ:")]
if j == 100:
print 'Sample Latency: %s -> '% int(fields[1])
print line
print f
req_latency = int(fields[1]) # Latency in millis
values.append(req_latency)
#print "request: %s ts %s latency %d" % (req_type, str( req_ts), req_latency)
j += 1
print "--------------------------------------"
print "%s (%s)" % (labels[i], f)
print "--------------------------------------"
print "%d total samples" % (j)
# print "Runtime: %d seconds"% (end_ts-start_ts).total_seconds()
print "%d outliers due to MinRTO" % (minrto_outliers)
print "--------------------------------------"
# print "===== TYPE: %s ====="% (type)
# print "Throughput: %d req/sec" % (j / (end_ts - start_ts).total_seconds())
avg = np.mean(values)
print "AVG: %f" % (avg)
median = np.median(values)
print "MEDIAN: %f" % (median)
min_val = np.min(values)
print "MIN: %ld" % (min_val)
max_val = np.max(values)
print "MAX: %ld" % (max_val)
stddev = np.std(values)
print " STDEV: %f" % (stddev)
print " PERCENTILES:"
perc1 = np.percentile(values, 1)
print " 1st: %f" % (perc1)
perc10 = np.percentile(values, 10)
print " 10th: %f" % (np.percentile(values, 10))
perc25 = np.percentile(values, 25)
print " 25th: %f" % (np.percentile(values, 25))
perc50 = np.percentile(values, 50)
print " 50th: %f" % (np.percentile(values, 50))
perc75 = np.percentile(values, 75)
print " 75th: %f" % (np.percentile(values, 75))
perc90 = np.percentile(values, 90)
print " 90th: %f" % (np.percentile(values, 90))
perc99 = np.percentile(values, 99)
print " 99th: %f" % (np.percentile(values, 99))
values = utils.reject_outliers(np.array(values))
cdf(values, i, labels[i])
i += 1
if __name__ == '__main__':
print "Sytem Path {}".format(os.environ['PATH'])
if len(sys.argv) < 2:
print "Usage: storm_cdf.py <input PATH 1> <label 1> ... " \
"<input PATH n> <label n> [output file]"
if (len(sys.argv) - 1) % 2 != 0:
outname = sys.argv[-1]
else:
outname = "storm_placement_latency_cdf"
fpaths = []
labels = []
for i in range(0, len(sys.argv) - 1, 2):
fpaths.append(sys.argv[1 + i])
labels.append(sys.argv[2 + i])
print 'Paths given: {}'.format(" | ".join(fname for fname in fpaths))
print 'Labels given: {}'.format(" | ".join(label for label in labels))
fnames = []
for path in fpaths:
fnames.append(path + "trendingHashTags.log")
print "Processing.. "+ str(fnames)
file_parser(fnames)
utils.plot_cdf(outname, ylabel="Cache request latency [ms]")
|
from .uibase import UIBase
class Button(UIBase):
def __init__(self,text='',size=None):
self.html='<button>{}</button>'.format(text)
return
class Label(UIBase):
def __init__(self,text=''):
self.html='<b>{}</b>'.format(text)
return
class Field(UIBase):
def __init__(self):
self.html="<input type='text' />"
return
|
test_case = int(input())
for _ in range(test_case):
x, y, n = map(int, input().split())
r = n % x
# print(r, y, x)
if r >= y:
print(n - r + y)
else:
print(n - r - x + y)
|
import test
a= test.foo()
#print a
|
from osgeo import gdal, osr
import math
src_filename ='/home/user/thesis/IMG_0048_4.tif'
dst_filename = '/home/user/thesis/output.tif'
def myImageGeoReference(src_filename,dst_filename):
# Opens source dataset
src_ds = gdal.Open(src_filename)
format = "GTiff"
driver = gdal.GetDriverByName(format)
# Open destination dataset
dst_ds = driver.CreateCopy(dst_filename, src_ds, 0)
# Specify raster location through geotransform array
# (uperleftx, scalex, skewx, uperlefty, skewy, scaley)
# Scale = size of one pixel in units of raster projection
# this example below assumes 100x100
myEXIF = main_dic[src_filename]
coor = myEXIF["gps"]
groundheight = 238
Radius = 6371000.
flightHeight = coor[2]-groundheight
dx=89.04573706*flightHeight/100
dy=66.9190639*flightHeight/100
rx=myEXIF["size"][0]
ry=myEXIF["size"][1]
width = dx #( Radius*math.cos( math.radians(coor[1]) ) ))
height = -dy
x_scale = width/rx
y_scale = height/ry
alpha = myEXIF["azimuth"]
x_skew = -math.sin(math.radians(alpha)) * x_scale
y_skew = math.sin(math.radians(alpha)) * y_scale#math.cos(math.radians(alpha)) * y_scale
x_scale = math.cos(math.radians(alpha)) * x_scale
y_scale = math.cos(math.radians(alpha)) * y_scale
alpha = alpha + 306.79876698156386
d=(width**2+height**2)**0.5
x_coor = coor[0]+d/2*math.sin(math.radians(alpha))
y_coor = coor[1]+d/2*math.cos(math.radians(alpha))
#x_coor = coor[0]-width/2
#y_coor = coor[1]-height/2
gt = [x_coor, x_scale, x_skew, y_coor, y_skew, y_scale]
# Set location
dst_ds.SetGeoTransform(gt)
# Get raster projection
epsg = 3857
srs = osr.SpatialReference()
srs.ImportFromEPSG(epsg)
dest_wkt = srs.ExportToWkt()
# Set projection
dst_ds.SetProjection(dest_wkt)
# Close files
dst_ds = None
src_ds = None
|
# Copyright 2015 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""X264 baseline codec definition.
This file defines how to run encode and decode for the x264 implementation
of H.264 using options suitable for realtime - this includes no lookahead.
"""
import encoder
import x264
class X264RealtimeCodec(x264.X264Codec):
def __init__(self, name='x264-rt', formatter=None):
super(X264RealtimeCodec, self).__init__(name, formatter)
self.option_set.LockOption('rc-lookahead', '0')
def StartEncoder(self, context):
return encoder.Encoder(context, encoder.OptionValueSet(
self.option_set,
'--rc-lookahead 0 --preset faster --tune psnr',
formatter=self.option_formatter))
|
def calculate_net(gross, vat = 0.23):
net_price = gross / (1 + vat)
return round(net_price, 2)
print(calculate_net(100))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
class MLP (nn.Module):
def __init__(self, n_inputs, n_hidden, n_classes):
super(MLP, self).__init__()
self.n_inputs = n_inputs # scalar integer. 3072, nb_hiddenUnits, nb_hiddenUnits..., nb_classes
self.n_hidden = n_hidden # a list
print ("self.n_hidden",self.n_hidden)
self.n_classes = n_classes # =10
self.layers = nn.ModuleList() # an empty list
for i in range (0, len (self.n_hidden)):
nb_hiddenUnits = n_hidden[i]
self.layers.append(nn.Linear(self.n_inputs , nb_hiddenUnits))
self.layers.append(nn.BatchNorm1d(nb_hiddenUnits))
self.layers.append(nn.ELU())
self.n_inputs = nb_hiddenUnits
self.layers.append( nn.Linear(self.n_inputs, self.n_classes))
self.layers.append( nn.Softmax(dim=1)) #dim (int) – A dimension along which Softmax will be computed (so every slice along dim will sum to 1).
def forward(self, x):
"""
Performs forward pass of the input. Here an input tensor x is transformed through
several layer transformations.
Args:
x: input to the network
Returns:
out: outputs of the network
"""
out = x
# print ("forward type input: ",type(x))
for module in self.layers:
out = module(out)
return out
|
deck=[['b',1],['b',1],['b',1],['b',2],
['b',2],['b',3],['b',3],['b',4],
['w',1],['w',1],['w',1],['w',2],
['w',2],['w',3],['w',3],['w',4]]
comp_hand=[["b",1],["b",3],["w",4]]
play_hand=deck[0:3]
print("player hand : ",play_hand)
print("computer hand : ",comp_hand)
new_comp_hand=str(comp_hand[0][1])+" "+str(comp_hand[0][0])
del deck[0:6]
print(deck)
trash=["1 b"]
stack=[['1 b',"2 b"],['1 w']]
tips=3
if tips==0:
print("var trash'e yükleneceksin")
x=(comp_hand[0][0])+" "+str(comp_hand[0][1])
print(x)
tips+=1
for x in stack:
if new_comp_hand in x:
#print(comp_hand.index(x))
trash.append(new_comp_hand)
print(trash)
break
else:
print("yok peki sıralama yapıp stack yapıcam")
|
from datetime import date
anoAtual = date.today().year
sexo = str(input('Qual o seu sexo? (M)masculino (F)feminino\n')).upper()
if sexo == 'F':
print('Você não precisa fazer o alistamento')
elif sexo == 'M':
anoNasc = int(input('Em que ano você nasceu?\n'))
idade = anoAtual - anoNasc
if idade == 18:
print('Você tem {} anos se aliste IMEDIATAMENTE'.format(idade))
elif idade > 18:
anosDeveria = idade - 18
anoAlistamento = anoAtual-anosDeveria
print('Você tem {} anos deveria ter se alistado em {}'.format(idade, anoAlistamento))
else:
anosFaltam = 18 - idade
print('Você tem {} anos ainda faltam {} anos para se alistar'.format(idade, anosFaltam))
else:
print('Você deve ter digitado alguma tecla errada')
|
A=int(input("A= "))
B=int(input("B= "))
C=int(input("C= "))
print((A==B)or(A==C)or(B==C))
|
import pandas as pd
import copy
import uuid
class User:
def __init__(self, user_id, name, password, currentAuthority):
self.user_id = user_id
self.token = ""
self.name = name
self.password = password
self.currentAuthority = currentAuthority
def set_token(self):
self.token = str(uuid.uuid4())
return self.token
def clean_token(self):
self.token = None
|
# coding=UTF-8
import scream
import urllib2
import mechanize
import time
from bs4 import BeautifulSoup
import threading
import unicodedata
from unique import NamesCollection
# import ElementTree based on the python version
try:
import elementtree.ElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
names = NamesCollection.names
MALE = 1
FEMALE = 2
UNISEX = 3
UNKNOWN = 99
latin_letters = {}
symbols = (u"абвгдеёзийклмнопрстуфхъыьэАБВГДЕЁЗИЙКЛМНОПРСТУФХЪЫЬЭ",
u"abvgdeezijklmnoprstufh'y'eABVGDEEZIJKLMNOPRSTUFH'Y'E")
#tr = {ord(a): ord(b) for a, b in zip(*symbols)}
tr = dict()
#moving to python 2.7.3
for a in zip(*symbols):
for b in zip(*symbols):
tr.update({ord(a[0]): ord(b[1])})
def cyrillic2latin(input):
return input.translate(tr)
def is_latin(uchr):
try:
return latin_letters[uchr]
except KeyError:
return latin_letters.setdefault(uchr,
'LATIN' in unicodedata.name(uchr))
def only_roman_chars(unistr):
return all(is_latin(uchr)
for uchr in unistr
if uchr.isalpha()) # isalpha suggested by John Machin
def StripNonAlpha(s):
return "".join(c for c in s if c.isalpha())
class GeneralGetter(threading.Thread):
finished = False
name = None
browser = None
def __init__(self, threadId, name):
scream.say('Initiating GeneralGetter, running __init__ procedure.')
self.threadId = threadId
threading.Thread.__init__(self)
self.daemon = True
self.finished = False
self.name = name
#initialize browser the ultimate hacking and web-scrappig TARDIS
self.my_browser = mechanize.Browser()
#my_browser.set_all_readonly(False) # allow everything to be written to
self.my_browser.set_handle_robots(False) # ignore robots
self.my_browser.set_handle_refresh(False) # can sometimes hang without this
#end
def run(self):
scream.cout('GeneralGetter thread(' + str(self.threadId) + ')' + 'starts working on name ' + str(self.name.encode('utf-8')))
self.finished = False
self.get_data(self.name)
def is_finished(self):
return self.finished if self.finished is not None else False
def set_finished(self, finished):
scream.say('Marking the thread ' + str(self.threadId) + ' as finished..')
self.finished = finished
def cleanup(self):
scream.say('Marking thread on ' + str(self.threadId) + "/" + str(self.name.encode('utf-8')) + ' as definitly finished..')
self.finished = True
scream.say('Terminating/join() thread on ' + str(self.threadId) + ' ...')
self.my_browser.close()
def get_data(self, first_name):
global names
global UNKNOWN
global MALE
global FEMALE
global UNISEX
scream.say('#ask now internet for gender')
while True:
try:
self.response = self.my_browser.open('http://genderchecker.com/')
self.response.read()
break
except urllib2.URLError:
scream.ssay('Site genderchecker.com seems to be down' +
'. awaiting for 60s before retry')
time.sleep(60)
scream.say('Response read. Mechanize selecting form.')
self.my_browser.select_form("aspnetForm")
self.my_browser.form.set_all_readonly(False)
# allow everything to be written
self.control = self.my_browser.form.find_control("ctl00$TextBoxName")
if only_roman_chars(first_name):
self.control.value = StripNonAlpha(first_name.encode('utf-8'))
else:
self.control.value = StripNonAlpha(cyrillic2latin(first_name).encode('utf-8'))
#check if value is enough
#control.text = first_name
scream.say('Control value is set to :' + str(self.control.value))
self.submit_retry_counter = 4
while True:
try:
self.response = self.my_browser.submit()
self.html = self.response.read()
break
except mechanize.HTTPError, e:
self.submit_retry_counter -= 1
if self.submit_retry_counter < 1:
raise StopIteration
self.error_message = 'Site genderchecker.com seems to have ' +\
'internal problems. or my request is' +\
' wibbly-wobbly nonsense. HTTPError ' +\
str(e.code) +\
'. awaiting for 60s before retry'
scream.say(self.error_message)
scream.log_error(str(e.code) + ': ' + self.error_message)
time.sleep(60)
except:
self.submit_retry_counter -= 1
if self.submit_retry_counter < 1:
raise StopIteration
self.error_message = 'Site genderchecker.com seems to have ' +\
'internal problems. or my request is' +\
' wibbly-wobbly nonsense. ' +\
'awaiting for 60s before retry'
scream.say(self.error_message)
scream.log_error(self.error_message)
time.sleep(60)
self.local_soup = BeautifulSoup(self.html)
self.failed = self.local_soup.find("span",
{"id":
"ctl00_ContentPlaceHolder1_" +
"LabelFailedSearchedFor"})
if self.failed is not None:
scream.say("Name not found in the gender database")
names[first_name]['classification'] = UNKNOWN
self.gender_tag = self.local_soup.find("span",
{"id":
"ctl00_ContentPlaceHolder1_" +
"LabelGenderFound"})
if ((self.gender_tag is not None) and (self.gender_tag.contents is not None) and (len(self.gender_tag.contents) > 0)):
self.gender = self.gender_tag.contents[0].string
scream.say(self.gender)
if self.gender.lower() == 'male':
names[first_name]['classification'] = MALE
elif self.gender.lower() == 'female':
names[first_name]['classification'] = FEMALE
elif self.gender.lower() == 'unisex':
names[first_name]['classification'] = UNISEX
else:
scream.log_warning('Something really wrong, on result page there ' +
'was no not-found label neither a proper result', True)
names[first_name]['classification'] = UNKNOWN
self.set_finished(True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.