blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9125851b1d61361d8d141a4dca0d69cccfebbf50 | b62d6c292e573dca91d33dfbe61a417e18330d50 | /app/KS/image/helpers.py | b773d4bef9998935d10af95089338174274ed702 | [] | no_license | MCapallera/PatternRecognition_JungleSpeed | 7dd2e7854df8c4585612e5455505fcfb6f443972 | d8254abc26152af449fc17881283da46359b712d | refs/heads/master | 2020-05-01T17:54:37.985790 | 2019-05-26T17:22:27 | 2019-05-26T17:22:27 | 177,611,998 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | import numpy
from skimage.filters import threshold_yen
def get_background_color(img):
threshold = threshold_yen(img)
return numpy.mean(img[img > threshold])
| [
"melania.grotti@unifr.ch"
] | melania.grotti@unifr.ch |
f748579f1a20a23d901c31b88322c26f451af433 | 44ce370130c7cd9a6a763be8dcc62362f57724e7 | /vagrant/data/Math2/cont2_8/run | eb3597973d86ef0dc6f0ed2817058872d9b99942 | [] | no_license | akemery/cnp3-docker | c122aac493868ec4fa2b7795b1fd7110998f93dc | 3f24649ab81e828cf6babcfac7965251cb4dc7e2 | refs/heads/main | 2023-06-21T10:06:05.668090 | 2021-07-26T07:39:42 | 2021-07-26T07:39:42 | 363,279,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | #! /usr/bin/python3
# -*- coding: utf-8 -*-
import sys
sys.path.insert(1, '/course/common')
from inginious import input, feedback, rst
from parsingDomain import compareDomains
correct = "{}" # à modifier /!\
answer = input.get_input("q1")
grade = 0
result = compareDomains(answer, correct)
if result[0]:
feedback.set_problem_result("success","q1")
feedback.set_problem_feedback("Bravo!","q1")
grade += 100
else:
feedback.set_problem_result("failed","q1")
feedback.set_problem_feedback(result[1],"q1")
feedback.set_grade(grade)
if grade == 100 :
feedback.set_global_result("success")
else :
feedback.set_global_result("failed")
| [
"assogba.emery@gmail.com"
] | assogba.emery@gmail.com | |
f93a39f3c7ce5dc35b811f46c70586ec4a00c270 | 4d93acd63ce2835fcd7ea610fcd412b727a4f03e | /08-Markov/decay.py | aa454eea1ad7fb4d3765d62e0e5f8e83dfc8525a | [] | no_license | jsbarbosa/JuanBarbosa_MCA | 41ebcc27bb7dd8a886c9b4c1b416bd7e3cad2e57 | 4f49d17282679ae1fa81d7cc892b6560edf93828 | refs/heads/master | 2021-01-11T17:53:44.115810 | 2017-04-24T17:58:09 | 2017-04-24T17:58:09 | 79,863,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,590 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 13 18:21:00 2017
@author: juan
"""
import numpy as np
import matplotlib.pyplot as plt
obs = np.array([1.5, 1.7, 2])
def rand():
return 2*np.random.random() - 1
def integral(a, b, lm):
return -lm*(func(b, lm) - func(a, lm))
def func(x, lm):
return np.exp(-x/lm)
def probability(x, lm):
p = 1
z = integral(1, 20, lm)
for x_ in x:
p *= func(x_, lm)/z
return p
def bayesian(x, lm):
return probability(x, lm)
def hastings(N, dx = 1):
lambdas = np.ones(N+1)
lambdas[0] = np.random.random()*10.0
for i in range(N):
second = lambdas[i] + dx*rand()
q = bayesian(obs, second)/bayesian(obs, lambdas[i])
alpha = min(q, 1.0)
u = np.random.random()
if u <= alpha and second > 0:
lambdas[i+1] = second
else:
lambdas[i+1] = lambdas[i]
return lambdas
def rubin(N, M, dl):
avs = np.zeros(M)
vas = np.zeros(M)
R = np.zeros(N-2)
chains = np.array([hastings(N, dl) for i in range(M)])
for j in range(2, N):
for i in range(M):
avs[i] = np.mean(chains[i, :j])
vas[i] = np.std(chains[i, :j])**2
total = np.mean(avs)
B = j/(M-1)*np.sum((avs-total)**2)
W = vas.mean()
R[j-2] = (j-1)/j + (B/W)*(M+1)/(j*M)
return R
N = 10000
lm = np.logspace(-3, 3, 5)
for l in lm:
R = rubin(N, 5, l)
plt.plot(R, label="%f"%l)
plt.xscale('log')
plt.yscale('log')
plt.legend()
plt.show()
| [
"js.barbosa10@uniandes.edu.co"
] | js.barbosa10@uniandes.edu.co |
78d9bf44728572f4d21268ca5a3a81c35e52cf7e | 3dfb23604deb956cabd3e7d014389548f2b14e27 | /app/trade/apps.py | 33c34302eb16e58b685b3962cd5f3962890a1753 | [] | no_license | huanpython/mysite | 74c285f093a4af888d554d780997a23c25bc626e | 05bdba6174446117efd01f6f8c7d94768cb330fa | refs/heads/master | 2020-07-02T09:25:43.061976 | 2019-08-14T04:03:20 | 2019-08-14T04:03:20 | 201,485,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | from django.apps import AppConfig
class TradeConfig(AppConfig):
name = 'app.trade'
verbose_name = "交易管理" | [
"huanfuan@163.com"
] | huanfuan@163.com |
1ed5d148e48f6eaac83bf81e500e00be6515e921 | e9534ef4fbaea3cfee556e5da32927ba3860834c | /autoScale/main.py | 0f5a11984e217850e3df919739199b47384e4c69 | [] | no_license | joaoleite/marathon-autoscaling-app | 91791b831428105f4c4a778ffcacecad3edefe99 | ce45b562f006958dc14b88af6611092604cf4cfb | refs/heads/master | 2021-01-19T19:05:17.509686 | 2017-05-09T00:35:50 | 2017-05-09T00:35:50 | 88,398,742 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py | from marathonAPI import MarathonAPI
from rabbitMQ import rabbitMQ
from settings import VAR_MARATHON_PORT, VAR_MARATHON_USE_HTTPS, VAR_MARATHON_PASSWORD, VAR_MARATHON_USER, \
VAR_RABBITMQ_WEB_PORT, VAR_RABBITMQ_PASSWORD, VAR_RABBITMQ_USER, VAR_RABBITMQ_HOST
from settings import MARATHON_INTERVAL_REFRESH_APP
from settings import logger, VAR_MARATHON_HOST
logger.info('Configurating MarathonAPI...')
host = MarathonAPI(host=VAR_MARATHON_HOST, port=VAR_MARATHON_PORT, use_https=VAR_MARATHON_USE_HTTPS, user=VAR_MARATHON_USER, password=VAR_MARATHON_PASSWORD)
logger.info('Configurating RabbitMQ...')
target = rabbitMQ(host=VAR_RABBITMQ_HOST, user=VAR_RABBITMQ_USER, password=VAR_RABBITMQ_PASSWORD, web_port=VAR_RABBITMQ_WEB_PORT)
import asyncio
def callback(n, loop):
try:
host.findAppsWithAutoscaleLabels()
host.scaleApps(target)
except Exception as e:
logger.error(e)
finally:
now = loop.time()
loop.call_at(now + n, callback, n, loop)
async def main(loop):
delta_time = MARATHON_INTERVAL_REFRESH_APP
loop.call_soon(callback, delta_time, loop)
while True:
await asyncio.sleep(1)
event_loop = asyncio.get_event_loop()
try:
event_loop.run_until_complete(main(event_loop))
finally:
event_loop.close()
| [
"joaoleite@gmail.com"
] | joaoleite@gmail.com |
bed6c7b8a9b18521ccb6830724efd339b5523cb9 | a8cb99c512946691b6c53cf14538b44c39c62e88 | /models.py | a8f9314eba0b89e27e3eaf4139a8ce19fb2b8f63 | [] | no_license | Louise-LuLin/debias-gcn | 3d3f0c57cd22ed6506841c463820eac980ae4769 | 82ee00cce6f5672c8823cf31a2fe9e5b54eed56e | refs/heads/main | 2023-05-03T05:24:45.506722 | 2021-05-17T18:30:15 | 2021-05-17T18:30:15 | 360,814,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,250 | py | import dgl
from dgl.nn import SAGEConv # Define a GraphSAGE model
from dgl.nn import GATConv # Define a GAT model
import torch
import torch.nn as nn
import torch.nn.functional as F
######################################################################
# build a two-layer GraphSAGE model
class GraphSAGE(nn.Module):
def __init__(self, graph, in_dim, hidden_dim, out_dim):
super(GraphSAGE, self).__init__()
self.conv1 = SAGEConv(in_feats=in_dim,
out_feats=hidden_dim,
aggregator_type='mean')
self.conv2 = SAGEConv(in_feats=hidden_dim,
out_feats=out_dim,
aggregator_type='mean')
self.graph = graph
def forward(self, in_feat):
h = self.conv1(self.graph, in_feat)
h = F.relu(h)
h = self.conv2(self.graph, h)
return h
######################################################################
# build a two-layer GAT model
class GATLayer(nn.Module):
def __init__(self, graph, in_dim, out_dim):
super(GATLayer, self).__init__()
self.graph = graph
self.fc = nn.Linear(in_dim, out_dim, bias=False)
self.attn_fc = nn.Linear(2 * out_dim, 1, bias=False)
self.reset_parameters()
def reset_parameters(self):
"""Reinitialize learnable parameters."""
gain = nn.init.calculate_gain('relu')
nn.init.xavier_normal_(self.fc.weight, gain=gain)
nn.init.xavier_normal_(self.attn_fc.weight, gain=gain)
def edge_attention(self, edges):
# attention
z2 = torch.cat([edges.src['z'], edges.dst['z']], dim=1)
a = self.attn_fc(z2)
return {'e': F.leaky_relu(a)}
def message_func(self, edges):
return {'z': edges.src['z'], 'e': edges.data['e']}
def reduce_func(self, nodes):
alpha = F.softmax(nodes.mailbox['e'], dim=1)
h = torch.sum(alpha * nodes.mailbox['z'], dim=1)
return {'h': h}
def forward(self, in_feat):
z = self.fc(in_feat)
self.graph.ndata['z'] = z
self.graph.apply_edges(self.edge_attention)
self.graph.update_all(self.message_func, self.reduce_func)
return self.graph.ndata.pop('h')
class MultiHeadGATLayer(nn.Module):
def __init__(self, graph, in_dim, out_dim, num_heads, merge='cat'):
super(MultiHeadGATLayer, self).__init__()
self.heads = nn.ModuleList()
for i in range(num_heads):
self.heads.append(GATLayer(graph, in_dim, out_dim))
self.merge = merge
def forward(self, h):
head_outs = [attn_head(h) for attn_head in self.heads]
if self.merge == 'cat':
return torch.cat(head_outs, dim=1)
else:
return torch.mean(torch.stack(head_outs))
class GAT(nn.Module):
def __init__(self, graph, in_dim, hidden_dim, out_dim, num_heads):
super(GAT, self).__init__()
self.layer1 = MultiHeadGATLayer(graph, in_dim, hidden_dim, num_heads)
self.layer2 = MultiHeadGATLayer(graph, hidden_dim * num_heads, out_dim, 1)
def forward(self, in_feat):
h = self.layer1(in_feat)
h = F.elu(h)
h = self.layer2(h)
return h | [
"lulin199209@gmail.com"
] | lulin199209@gmail.com |
da6084b7a1b5f3aa319565d6778d11e1160946a3 | d7b403a8e4b124e42feb0b72c502df438f5552f6 | /speech/TextToSpeech.py | afda438ef79dc3cb346c9c3df8dea77969416e10 | [] | no_license | Mallington/Hacker-The-Hardware-bear | ea3b7b7bd1a908ff613cce414d623b2d263c0955 | 4f21318879bedac726c00a26f9c80095f73c1881 | refs/heads/master | 2020-09-16T04:10:39.319071 | 2019-11-24T11:01:43 | 2019-11-24T11:01:43 | 223,648,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | from gtts import gTTS
import os
from datetime import date
class tts():
def __init__(self):
pass
def say(self, message):
tts = gTTS(text=message, lang='en')
now = date.today()
tts.save("{}.mp3".format(now))
os.system("mpg321 {}.mp3".format(now))
| [
"funkymallington@gmail.com"
] | funkymallington@gmail.com |
cf8828a7f7d1d13855bca279de6c79655a778dcd | 97825c904d512fdfd6a4418d6820eb8fd3cdd202 | /largest_number.py | cf7f5a702a9e0821a28b1fd3e4074a3fe1484bf5 | [] | no_license | charliedmiller/coding_challenges | 97426741778d0498321cb2fec2e7d3c3d3c0b26e | ea6aa0cffb55b2d88f71f108f16c34e9779da9c7 | refs/heads/master | 2023-02-21T21:54:26.500399 | 2021-01-28T23:50:42 | 2021-01-28T23:50:42 | 287,142,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | # Charlie Miller
# Leetcode - 179. Largest Number
# https://leetcode.com/problems/largest-number/
"""
I had the right idea, though needed to look at the sln to get there
https://leetcode.com/problems/largest-number/solution/
Create a comparator that compares the numbers created by ordering
them differently (a then b or b then a) Sort using this
"""
from functools import cmp_to_key
class Solution:
#see what the numbers would look like using each order
def cmp(self,a,b):
a_first = int(a+b)
b_first = int(b+a)
#return the LARGER number as less than (before)
return b_first - a_first
#for edge cases where there are leading zeros
def remove_leading_zeros(self,string):
for start in range(len(string)):
if string[start] != "0":
return string[start:]
return string[len(string)-1:]
def largestNumber(self, nums: List[int]) -> str:
stringified = [str(num) for num in nums]
stringified.sort(key=cmp_to_key(self.cmp))
whole = ''.join(stringified)
return self.remove_leading_zeros(whole) | [
"charliedmiller@gmail.com"
] | charliedmiller@gmail.com |
d39c8a61833fc2f4123d6803bf8dce614ed0a12a | cfc9a8831e5946d738329fad2763d643dec8566f | /src/encoded/tests/test_create_mapping.py | 44d89dea3b1ec2d190ef281061e331a2302547be | [
"MIT"
] | permissive | emi80/encoded | 8e244a66b0d36610dcf8d9a47d385640dfa7987d | 2fe2c2afbd3be21b65b10a189a3bd623ecdaee37 | refs/heads/master | 2021-01-18T12:34:56.060690 | 2015-03-05T21:56:05 | 2015-03-05T21:56:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | import pytest
from ..loadxl import ORDER
@pytest.mark.parametrize('item_type', ORDER)
def test_create_mapping(registry, item_type):
from ..commands.create_mapping import type_mapping
from ..contentbase import TYPES
mapping = type_mapping(registry[TYPES], item_type)
assert mapping
| [
"laurence@lrowe.co.uk"
] | laurence@lrowe.co.uk |
89390f2b4c8bd7d25ec8c7791c9b3502343ed13a | ebe20199181927f3eb36aedfe66d5d179f28628f | /concierge/event/models.py | d66ac24619b9e9b28680c89204f8dfd3e80b9f4c | [] | no_license | rajat404/concierge | 8f1c1d826334f8a0364d4b64efebc722b6f6f061 | 60290e70741060c78c860779a19bf81a90058675 | refs/heads/master | 2021-07-01T08:28:27.020737 | 2017-09-03T22:23:12 | 2017-09-03T22:23:12 | 103,437,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,103 | py | # Third Party Imports
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from simple_history.models import HistoricalRecords
# Concierge Imports
from concierge.base.models import SlugModel, TimeStampedModel, UUIDModel
from concierge.quiz.models import Quiz
class Speaker(UUIDModel, TimeStampedModel):
history = HistoricalRecords(table_name='event_speaker_history')
first_name = models.CharField(max_length=120)
last_name = models.CharField(max_length=120)
email = models.EmailField(unique=True, db_index=True)
about = models.TextField(blank=True)
class Meta:
db_table = 'event_speaker'
verbose_name = _('Speaker')
verbose_name_plural = _('Speakers')
def __str__(self):
return '{} {}'.format(self.first_name, self.last_name)
class Event(UUIDModel, TimeStampedModel, SlugModel):
EVENT_CHOICES = (
('EVENT', 'EVENT'),
('SESSION', 'SESSION'),
('MEETUP', 'MEETUP'),
('CONFERENCE', 'CONFERENCE'),
('TALK', 'TALK'),
('WORKSHOP', 'WORKSHOP'),
('DEV_SPRINT', 'DEV SPRINT'),
('PANEL_DISCUSSION', 'PANEL DISCUSSION'),
# TODO: BOF & Open Spaces
)
VISIBILITY_CHOICES = (
('PUBLIC', 'PUBLIC'),
('PRIVATE', 'PRIVATE'),
)
# Need to be nullable, as the value will be populated after creation of the `Event` instance
registration_quiz = models.ForeignKey(Quiz, related_name='event_registration', null=True)
feedback_quiz = models.ForeignKey(Quiz, related_name='event_feedback', null=True)
history = HistoricalRecords(table_name='event_event_history')
kind = models.CharField(max_length=15, choices=EVENT_CHOICES)
happening = models.ForeignKey('self', blank=True, null=True)
speaker = models.ForeignKey(Speaker, related_name='events', null=True, blank=True)
venue = models.CharField(max_length=100, null=True, blank=True)
description = models.TextField(blank=True)
start = models.DateTimeField()
end = models.DateTimeField()
participation_open = models.BooleanField(default=False, help_text='can a user participate in this event')
participation_start = models.DateTimeField(null=True, blank=True)
participation_end = models.DateTimeField(null=True, blank=True)
is_offline = models.BooleanField(default=True)
class Meta:
db_table = 'event_event'
verbose_name = _('Event')
verbose_name_plural = _('Events')
def __str__(self):
return self.slug
def can_participate(self):
# Valiation for `participation_start` & `participation_end` is handled by the serializer
# These value cannot be None
return bool(self.participation_open and (self.participation_start <= timezone.now() < self.participation_end))
class OfflineEvent(UUIDModel, TimeStampedModel):
history = HistoricalRecords(table_name='event_offline_event_history')
event = models.OneToOneField(Event, related_name='offline')
longitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
latitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
address = models.TextField()
address_guidelines = models.TextField()
rsvp_open = models.BooleanField(default=False, help_text='can a participant RSVP for this event')
rsvp_start = models.DateTimeField(null=True, blank=True)
rsvp_end = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = 'event_offline_event'
verbose_name = _('Offline Event')
verbose_name_plural = _('Offline Events')
def __str__(self):
return self.event.slug
class Organisation(UUIDModel, TimeStampedModel, SlugModel):
ORG_CHOICES = (
('HOST', 'HOST'),
('SPONSOR', 'SPONSOR'),
('OTHER', 'OTHER'),
)
history = HistoricalRecords(table_name='organisation_organisation_history')
kind = models.CharField(max_length=15, choices=ORG_CHOICES)
class Meta:
db_table = 'organisation_organisation'
verbose_name = _('Organisation')
verbose_name_plural = _('Organisations')
def __str__(self):
return self.slug
class SponsorCategory(models.Model):
"""To be added via Admin Panel(or Fixture), prior to adding Sponsors"""
name = models.CharField(max_length=50, unique=True)
class Meta:
db_table = 'event_sponsor_category'
verbose_name = _('Sponsor Category')
verbose_name_plural = _('Sponsor Categories')
def __str__(self):
return self.name
class Sponsor(TimeStampedModel):
history = HistoricalRecords(table_name='event_sponsor_history')
event = models.ForeignKey(Event)
organisation = models.ForeignKey(Organisation)
category = models.ForeignKey(SponsorCategory, to_field='name')
class Meta:
db_table = 'event_sponsor'
verbose_name = _('Sponsor')
verbose_name_plural = _('Sponsors')
def __str__(self):
return '{}--{}'.format(self.organisation, self.event)
| [
"404rajat@gmail.com"
] | 404rajat@gmail.com |
9c93902848978428f5ced5f6c21786783cea6d85 | a4623b72797d87baf17ca48406e36da8af78e7eb | /backend/settings.py | b4dcb752ef5348911c4f31d20ef9747037555f2c | [] | no_license | janefwp/task | f410965a381769864f51d485f72e5c4a0738ebad | f7366c9f8e77a3927acaff3758916779e670cf53 | refs/heads/main | 2023-04-13T19:47:36.614907 | 2021-04-25T09:02:43 | 2021-04-25T09:02:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,957 | py | """
Django settings for backend project.
Generated by 'django-admin startproject' using Django 3.1.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from datetime import timedelta
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#js5i*nzoc5w(4a2v@+m@i0j8z^7()6+ne#^@q^%iur06fdg&7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', '.herokuapp.com',
'https://hospitalbooksystem.herokuapp.com/']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'storages',
'base.apps.BaseConfig',
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
)
}
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'frontend/build')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/images/'
STATICFILES_DIRS = [
BASE_DIR / 'static',
BASE_DIR / 'frontend/build/static'
]
MEDIA_ROOT = BASE_DIR / 'static/images'
STATIC_ROOT = BASE_DIR / 'staticfiles'
CORS_ALLOW_ALL_ORIGINS = True
if os.getcwd() == '/app':
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# SECURE_SSL_REDIRECT = True
DEBUG = False
| [
"cuij1012@gmail.com"
] | cuij1012@gmail.com |
007d263722520390990a2b56a70c7cb4328ec8b9 | 2846b0779aec695c3d2b6673e274a14f5bad374b | /Session info to table/macOS app/Tracks to Table/tracks_to_table.py | f7d4b2789f5e0e70306dadea74eacbeb13a6aed0 | [
"MIT"
] | permissive | fantopop/post-production-scripts | 43aeeec3cd2f0e21b487ce1e8a6d762f4b79a333 | 8192499d6ba716a0f72094c63c167cd2ae384eab | refs/heads/master | 2022-04-30T15:44:37.249831 | 2022-04-19T15:32:34 | 2022-04-19T15:32:34 | 91,468,867 | 24 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17,676 | py | #!/usr/bin/python
"""
tracks_to_table.py
Author: Ilya Putilin
https://github.com/fantopop/post-production-scripts
Special thanks to Philippe Lagadec for HTML.py module for generating HTML tables.
http://www.decalage.info/python/html
This scripts converts .txt file, that could be exported from Pro Tools
using "Export Session Info as Text" command into .csv file.
This CSV file can be easily opened with Number app.
There are two formats available:
- with TRACK_NAME column as one table.
- grouped by TRACK NAME with [--tracks] option.
"""
import sys, csv, argparse
from os import path
# Separator.
sep = '\t'
header = ['#', 'EVENT', 'START', 'END', 'DURATION']
footer = [''] * 5
# TABLE_STYLE_THINBORDER = "border: 1px solid #000000; border-collapse: collapse;"
TABLE_STYLE_THINBORDER = ""
table_style = 'table {border-collapse: collapse;} th, td {border: 1px solid #ccc;padding: 8px;}'
#--- CONSTANTS -----------------------------------------------------------------
# Table style to get thin black lines in Mozilla/Firefox instead of 3D borders
TABLE_STYLE_THINBORDER = "border: 1px solid #000000; border-collapse: collapse;"
#TABLE_STYLE_THINBORDER = "border: 1px solid #000000;"
#=== CLASSES ===================================================================
class TableCell (object):
"""
a TableCell object is used to create a cell in a HTML table. (TD or TH)
Attributes:
- text: text in the cell (may contain HTML tags). May be any object which
can be converted to a string using str().
- header: bool, false for a normal data cell (TD), true for a header cell (TH)
- bgcolor: str, background color
- width: str, width
- align: str, horizontal alignement (left, center, right, justify or char)
- char: str, alignment character, decimal point if not specified
- charoff: str, see HTML specs
- valign: str, vertical alignment (top|middle|bottom|baseline)
- style: str, CSS style
- attribs: dict, additional attributes for the TD/TH tag
Reference: http://www.w3.org/TR/html4/struct/tables.html#h-11.2.6
"""
def __init__(self, text="", bgcolor=None, header=False, width=None,
align=None, char=None, charoff=None, valign=None, style=None,
attribs=None):
"""TableCell constructor"""
self.text = text
self.bgcolor = bgcolor
self.header = header
self.width = width
self.align = align
self.char = char
self.charoff = charoff
self.valign = valign
self.style = style
self.attribs = attribs
if attribs==None:
self.attribs = {}
def __str__(self):
"""return the HTML code for the table cell as a string"""
attribs_str = ""
if self.bgcolor: self.attribs['bgcolor'] = self.bgcolor
if self.width: self.attribs['width'] = self.width
if self.align: self.attribs['align'] = self.align
if self.char: self.attribs['char'] = self.char
if self.charoff: self.attribs['charoff'] = self.charoff
if self.valign: self.attribs['valign'] = self.valign
if self.style: self.attribs['style'] = self.style
for attr in self.attribs:
attribs_str += ' %s="%s"' % (attr, self.attribs[attr])
if self.text:
text = str(self.text)
else:
# An empty cell should at least contain a non-breaking space
text = ' '
if self.header:
return ' <TH%s>%s</TH>\n' % (attribs_str, text)
else:
return ' <TD%s>%s</TD>\n' % (attribs_str, text)
#-------------------------------------------------------------------------------
class TableRow (object):
"""
a TableRow object is used to create a row in a HTML table. (TR tag)
Attributes:
- cells: list, tuple or any iterable, containing one string or TableCell
object for each cell
- header: bool, true for a header row (TH), false for a normal data row (TD)
- bgcolor: str, background color
- col_align, col_valign, col_char, col_charoff, col_styles: see Table class
- attribs: dict, additional attributes for the TR tag
Reference: http://www.w3.org/TR/html4/struct/tables.html#h-11.2.5
"""
def __init__(self, cells=None, bgcolor=None, header=False, attribs=None,
col_align=None, col_valign=None, col_char=None,
col_charoff=None, col_styles=None):
"""TableCell constructor"""
self.bgcolor = bgcolor
self.cells = cells
self.header = header
self.col_align = col_align
self.col_valign = col_valign
self.col_char = col_char
self.col_charoff = col_charoff
self.col_styles = col_styles
self.attribs = attribs
if attribs==None:
self.attribs = {}
def __str__(self):
"""return the HTML code for the table row as a string"""
attribs_str = ""
if self.bgcolor: self.attribs['bgcolor'] = self.bgcolor
for attr in self.attribs:
attribs_str += ' %s="%s"' % (attr, self.attribs[attr])
if self.header:
result = '<THEAD>'
else:
result = ''
result += ' <TR%s>\n' % attribs_str
for cell in self.cells:
col = self.cells.index(cell) # cell column index
if not isinstance(cell, TableCell):
cell = TableCell(cell, header=self.header)
# apply column alignment if specified:
if self.col_align and cell.align==None:
cell.align = self.col_align[col]
if self.col_char and cell.char==None:
cell.char = self.col_char[col]
if self.col_charoff and cell.charoff==None:
cell.charoff = self.col_charoff[col]
if self.col_valign and cell.valign==None:
cell.valign = self.col_valign[col]
# apply column style if specified:
if self.col_styles and cell.style==None:
cell.style = self.col_styles[col]
result += str(cell)
result += ' </TR>\n'
if self.header:
result += '</THEAD>'
return result
#-------------------------------------------------------------------------------
class Table (object):
"""
a Table object is used to create a HTML table. (TABLE tag)
Attributes:
- rows: list, tuple or any iterable, containing one iterable or TableRow
object for each row
- header_row: list, tuple or any iterable, containing the header row (optional)
- border: str or int, border width
- style: str, table style in CSS syntax (thin black borders by default)
- width: str, width of the table on the page
- attribs: dict, additional attributes for the TABLE tag
- col_width: list or tuple defining width for each column
- col_align: list or tuple defining horizontal alignment for each column
- col_char: list or tuple defining alignment character for each column
- col_charoff: list or tuple defining charoff attribute for each column
- col_valign: list or tuple defining vertical alignment for each column
- col_styles: list or tuple of HTML styles for each column
Reference: http://www.w3.org/TR/html4/struct/tables.html#h-11.2.1
"""
def __init__(self, rows=None, border='1', style=None, width=None,
cellspacing=None, cellpadding=4, attribs=None, header_row=None,
col_width=None, col_align=None, col_valign=None,
col_char=None, col_charoff=None, col_styles=None):
"""TableCell constructor"""
self.border = border
self.style = style
# style for thin borders by default
if style == None: self.style = TABLE_STYLE_THINBORDER
self.width = width
self.cellspacing = cellspacing
self.cellpadding = cellpadding
self.header_row = header_row
self.rows = rows
if not rows: self.rows = []
self.attribs = attribs
if not attribs: self.attribs = {}
self.col_width = col_width
self.col_align = col_align
self.col_char = col_char
self.col_charoff = col_charoff
self.col_valign = col_valign
self.col_styles = col_styles
def __str__(self):
"""return the HTML code for the table as a string"""
attribs_str = ""
if self.border: self.attribs['border'] = self.border
if self.style: self.attribs['style'] = self.style
if self.width: self.attribs['width'] = self.width
if self.cellspacing: self.attribs['cellspacing'] = self.cellspacing
if self.cellpadding: self.attribs['cellpadding'] = self.cellpadding
for attr in self.attribs:
attribs_str += ' %s="%s"' % (attr, self.attribs[attr])
result = '<TABLE%s>\n' % attribs_str
# insert column tags and attributes if specified:
if self.col_width:
for width in self.col_width:
result += ' <COL width="%s">\n' % width
# First insert a header row if specified:
if self.header_row:
if not isinstance(self.header_row, TableRow):
result += str(TableRow(self.header_row, header=True))
else:
result += str(self.header_row)
# Then all data rows:
for row in self.rows:
if not isinstance(row, TableRow):
row = TableRow(row)
# apply column alignments and styles to each row if specified:
# (Mozilla bug workaround)
if self.col_align and not row.col_align:
row.col_align = self.col_align
if self.col_char and not row.col_char:
row.col_char = self.col_char
if self.col_charoff and not row.col_charoff:
row.col_charoff = self.col_charoff
if self.col_valign and not row.col_valign:
row.col_valign = self.col_valign
if self.col_styles and not row.col_styles:
row.col_styles = self.col_styles
result += str(row)
result += '</TABLE>'
return result
def table(*args, **kwargs):
'return HTML code for a table as a string. See Table class for parameters.'
return str(Table(*args, **kwargs))
#-------------------------------------------------------------------------------
tab = ' '
class Tag():
'''
A class to provide correct opening and closing tags,
with intendation support via HTML class instance.
Implies usage of the "with" statement:
with Tag('tag', HTML-instance):
<code>
'''
def __init__(self, name, HTML):
self.name = name
self.HTML = HTML
def __enter__(self):
self.HTML.content += tab * self.HTML.indent + '<' + self.name + '>\n'
self.HTML.indent += 1
def __exit__(self, exc_type, exc_value, traceback):
self.HTML.indent -= 1
self.HTML.content += tab * self.HTML.indent + '</' + self.name + '>\n'
class HTML():
'''
HTML() class instance accumulates generated HTML code, handles indentation
and provides several html-tags as methods, returning Tag() class instances.
Common usage pattern:
h = HTML()
with h.html():
with h.head():
with h.title()
h.add('Hello world page')
with h.body():
with h.h1():
h.add('Hello World!')
with h.p():
h.add('This is the HTML code')
print(str(h))
'''
def __init__(self):
self.indent = 0
self.content = '<!DOCTYPE html>\n'
def __str__(self):
return self.content
def add(self, text):
for line in text.split('\n'):
self.content += tab * self.indent + line + '\n'
def html(self):
return Tag('html', self)
def body(self):
return Tag('body', self)
def head(self):
return Tag('head', self)
def title(self):
return Tag('title', self)
def h1(self):
return Tag('h1', self)
def h2(self):
return Tag('h2', self)
def style(self):
return Tag('style', self)
def p(self):
return Tag('p', self)
#-------------------------------------------------------------------------------
class Track():
'''
Stores track name and list of track events:
[NUMBER, CLIP_NAME, START TC, END TC, DURATION TC]
'''
def __init__(self, name):
self.name = name
self.events = []
class Session():
'''
Session() instance reads .txt file, exported from Pro Tools and
stores every tracks EDL as list of Track() instances.
Supports export to .csv and .html formats.
'''
def __init__(self, filename):
# Open session info file for reading
csv_reader = csv.reader(filename, dialect='excel-tab')
# Create array for Track() objects
self.tracks = []
for raw_row in csv_reader:
# Check, whether the row is not empty.
if raw_row:
# Remove all whitespaces from start and end of the cells.
row = [cell.strip() for cell in raw_row]
# Get track name.
if row[0].startswith('TRACK NAME:'):
track = Track(name=row[1])
self.tracks.append(track)
continue
# Skip original header lines.
if row[0].startswith('CHANNEL'):
continue
if len(row) > 6:
track.events.append(row[1:6])
def to_csv(self, filename):
with open(filename, 'w') as outputfile:
csv_writer = csv.writer(outputfile, dialect='excel-tab')
for track in self.tracks:
csv_writer.writerow([''] + [track.name] + ['']*3)
csv_writer.writerow(header)
for line in track.events:
csv_writer.writerow(line)
csv_writer.writerow(footer)
def to_html(self, filename):
h = HTML()
with h.html():
with h.head():
h.add('<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">')
with h.title():
# Add document title
h.add(filename.split('.')[-2].split('/')[-1])
with h.style():
h.add('@media print {')
h.indent += 1
# Add page break after each track's table when printing
h.add('TABLE { page-break-after: always}')
# Configure correct display of table over multiple printing pages
h.add('TR { page-break-inside:avoid; page-break-after:auto }')
h.add('TD { page-break-inside:avoid; page-break-after:auto }')
h.add('THEAD { display:table-header-group }')
h.add('TFOOT { display:table-footer-group }')
# Set default landscape orientation when printing
h.add('@page {size: landscape;}}')
h.indent -= 1
h.add(table_style)
with h.body():
for track in self.tracks:
# Add track name as header
with h.h2():
h.add(track.name)
# Add track's EDL table
h.add(table(track.events,
header_row=header,
width='100%',
border=None,
cellpadding=None,
col_width=['2.5%', '', '5%', '5%', '5%'],
col_align=['center', 'left', 'center', 'center', 'center'],
style=TABLE_STYLE_THINBORDER
))
with open(filename, 'w') as outputfile:
outputfile.write(str(h))
def export(self, filename, to):
outputfile = outname(filename, to)
if to == 'csv':
self.to_csv(outputfile)
else:
self.to_html(outputfile)
print('Source: ' + filename)
print('Result: ' + outputfile)
def outname(filename, ext='csv'):
"""
Constructs output filename from input file,
replacing extension with '.csv'.
Example:
input.txt >>> input.csv
"""
split = (path.basename(filename)).split('.')
l = len(split)
if l > 1:
output = '.'.join(split[0:l-1] + [ext])
else:
output = filename + '.' + ext
return path.join(path.dirname(filename), output)
def main():
parser = argparse.ArgumentParser(
description="Converts '.txt' file from Pro Tools 'Export Session Info as Text' command to '.csv' or '.html' file")
parser.add_argument(
'txt', metavar='textfile', type=argparse.FileType(mode='rU'),
help='session info text file from Pro Tools')
parser.add_argument(
'--to', choices=['csv', 'html'], required=True,
help='export format: "csv" or "html"')
args = parser.parse_args()
# Read session info to Session() object
session = Session(args.txt)
args.txt.close()
# Export to the file of choses format.
session.export(filename=args.txt.name, to=args.to)
if __name__ == '__main__':
main()
| [
"fantopop@gmail.com"
] | fantopop@gmail.com |
f7076115a366f407af38c60d3ee22cb4242a040a | 4a008af61a508c73a41d6907b57272b16bbf4b32 | /ephios/modellogging/models.py | eaddb22fb930e22cc477e58f5723af00c7e68613 | [
"MIT"
] | permissive | alexanderKuehling/ephios | dbe6cf9198d1f9b5c9bb27927726c32271c11eda | 2bec784b1bf5300201701ae1710f699b95cdd0e3 | refs/heads/main | 2023-05-08T02:22:23.163347 | 2021-05-06T16:20:07 | 2021-05-06T16:20:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,744 | py | from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from ephios.modellogging.json import LogJSONDecoder, LogJSONEncoder
from ephios.modellogging.recorders import (
InstanceActionType,
capitalize_first,
recorder_types_by_slug,
)
# pylint: disable=protected-access
class LogEntry(models.Model):
content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE,
related_name="logentries",
)
content_object_id = models.PositiveIntegerField(db_index=True)
content_object = GenericForeignKey("content_type", "content_object_id")
attached_to_object_type = models.ForeignKey(
ContentType, on_delete=models.CASCADE, related_name="associated_logentries"
)
attached_to_object_id = models.PositiveIntegerField(db_index=True)
attached_to_object = GenericForeignKey("attached_to_object_type", "attached_to_object_id")
datetime = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
blank=True,
on_delete=models.PROTECT,
related_name="logging_entries",
)
action_type = models.CharField(
max_length=255, choices=[(value, value) for value in InstanceActionType]
)
request_id = models.CharField(max_length=36, null=True, blank=True)
data = models.JSONField(default=dict, encoder=LogJSONEncoder, decoder=LogJSONDecoder)
class Meta:
ordering = ("-datetime", "-id")
verbose_name = _("Log entry")
verbose_name_plural = _("Log entries")
@cached_property
def records(self):
recorder_types = recorder_types_by_slug(self.content_type.model_class())
for recorder in self.data.values():
if not isinstance(recorder, dict) or "slug" not in recorder:
continue
yield recorder_types[recorder["slug"]].deserialize(
recorder["data"], self.content_type.model_class(), self.action_type
)
@property
def content_object_classname(self):
return capitalize_first(self.content_type.model_class()._meta.verbose_name)
@property
def content_object_or_str(self):
return self.content_object or self.data.get("__str__")
def __str__(self):
if self.content_object:
return f"{self.action_type} {type(self.content_object)._meta.verbose_name} {str(self.content_object)}"
return f"{self.action_type} {self.content_type.model} {self.content_object_or_str}"
| [
"noreply@github.com"
] | alexanderKuehling.noreply@github.com |
8fc10d35f9fa5cced3f4939ab0d2ca50d42ab5cb | b5dbf732d26a2a924c85c5a107035be48bfe69cd | /2.7.py | a41cca6bfe45aaf10f7b7a81df3ea5680c11f318 | [] | no_license | Beks667/2.7Hw | 2435bfa58e252357c46819f6987639ca025549be | 4e03706bdfc70f2f94145a50f493f36995d08cdb | refs/heads/main | 2023-04-19T13:10:24.348768 | 2021-05-07T12:44:27 | 2021-05-07T12:44:27 | 365,230,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,341 | py | # class Phone :
# def __init__ (self,brand,model,color):
# self.brand = brand
# self.model = model
# self.color = color
# def show (self):
# print(f"{self.brand},{self.model},{self.color}")
# phone = Phone("Apple", "XS", "black")
# phone.show()
# class Monkey:
# max_age = 12
# loves_bananas = True
# def climb(self):
# print('I am climbing the tree')
# abc = Monkey()
# abc.climb()
# print(abc.max_age)
# abc.climb()
# print(abc.loves_bananas)
# Это через input----------------------------------------------------------------
# class Person:
# def __init__(self,name,age,gender):
# self.name = name
# self.age = age
# self.gender = gender
# def calculate_age(self):
# self.number = int(input('enter year:'))
# print(self.age + self.number)
# p = Person('John', 23, 'male')
# p.calculate_age()
# #Это через self-----------------------------------------------------------------------
# class Person:
# def __init__(self,name,age,gender):
# self.name = name
# self.age = age
# self.gender = gender
# def calculate_age(self,year):
# self.year = year
# print(self.age + self.year)
# p = Person('John', 23, 'male')
# p.calculate_age(10)
# | [
"you@example.com"
] | you@example.com |
b892725d3b4dcb01c4b54bf97ca0a1f759db0640 | 87a7e63bf86dad9ca5dbe812ea8caaa013ab1856 | /Step6RectangleDivision.py | 31a102235257fba937444e2cf0323933e2ceefe3 | [] | no_license | sugoigroup/CodeTestPython | 884ee5aa7abec588eb04df930b32c824b1a37a93 | ba08dc8c2a7c8f385055b4e21a6de10e73f7e8fd | refs/heads/master | 2023-03-18T12:01:30.055923 | 2021-03-15T13:32:58 | 2021-03-15T13:32:58 | 347,641,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | # 긴변의 길이가 1000이하에서 만들어질수 있는 정사각형 개수가 딱 20개인 직사각형의 가로세로 길이 쌍이 몇 쌍인지 구하시요? 응?
# 단 직사각형의 가로세로 길ㄹ이를 바꾼 경우는 하나로 취급됨.
# 뭔개소리냐
W, N = 1000, 20
def cut(w, h, n):
if w==h:
return n==0
if w>h:
w, h = h, w
q, r = divmod(h, w)
if (n-q<0) or (r==0):
return (n-q==0)
else:
return cut(w,r,n-q)
cnt = 0
for i in range(1, W+1):
for j in range(i, W+1):
if cut(i, j, N):
cnt += 1
print(cnt) | [
""
] | |
04d3c52147bf2d6f5af145bd01926191bd945680 | d4af57cf5b00e13e78966b20b21c0a052d078d3a | /Lab1/Source/wsgi.py | 617a28297304cf4bfa7b756309ba4adf0c6fed80 | [] | no_license | Bersik/Web-Technology-Course | e717aab6df994e21bc6bb4c3944d094bf95328f9 | 3d955921b8187987d86e3339aedba6c4bf9cf01c | refs/heads/master | 2021-05-04T06:56:27.566335 | 2016-12-13T02:14:35 | 2016-12-13T02:14:35 | 70,544,089 | 0 | 1 | null | 2016-12-03T21:45:11 | 2016-10-11T01:32:59 | Python | UTF-8 | Python | false | false | 388 | py | """
WSGI config for Lab1 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Source.settings")
application = get_wsgi_application()
| [
"k.sergyk@gmail.com"
] | k.sergyk@gmail.com |
c2b8476c5f2885a7c220049fadf41636f702b471 | f2e063c84f0cfa36b47ca5ee0752790ce7ae7a7a | /22 april/second.py | 85c9b8cf0f154f60577713c5eb85ff1cdcd2501c | [] | no_license | niksanand1717/TCS-434 | a1b1ca9ca96b389cb30fb3a104dda122408a52c9 | 0c1c16426d127d84733d26475278f148a1e512d8 | refs/heads/master | 2023-04-17T12:48:11.475162 | 2021-05-01T19:05:38 | 2021-05-01T19:05:38 | 347,687,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # Input n strings in a list and print all the strings ending with a specific character provided by the user
def seive(string):
index = (len(string)-1) - len(match)
if string[index + 1: ] == match:
return string
strnum = int(input("Enter num of strings: "))
strs = []
for i in range(0, strnum):
strs.append(input(f"Enter string {i+1}: "))
global match
matchstr: str = input("Enter the matching character at end of string: ")
match = matchstr
output = list(filter(seive, strs))
print(f"Strings ending with {matchstr}:", output) | [
"nikhilanandgaya01@gmail.com"
] | nikhilanandgaya01@gmail.com |
ecca9f66970644f8ebd7962b370b64e54427a5c2 | 339ec05910ea45e91222a33ef926d8f108f87486 | /server.py | 24f5afea45ca70ff80b5c4b8f1bc8566d864ad53 | [] | no_license | ajeebbasheer/chat-server | 199eda1b67a2449e300ba693a1f735521c89d236 | 9ffdd57a617bed06f256e3fe2fd1926c34064cc9 | refs/heads/master | 2021-09-13T17:45:04.167524 | 2018-05-02T15:44:19 | 2018-05-02T15:44:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | #/usr/bin/env python
import socket
import sys
import select
CONNECTION_LIST=[]
RECV_BUFFER=4096
PORT=5000
def broadcast(sock,message):
for s in CONNECTION_LIST:
if s != server_socket and socket!=sock:
try:
s.send(message)
except:
s.close()
CONNECTION_LIST.remove(socket)
server_socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
server_socket.bind(("localhost", PORT))
server_socket.listen(10)
CONNECTION_LIST.append(server_socket)
print "Chat server started on port " + str(PORT)
while 1:
read_sockets,write_sockets,error_sockets = select.select(CONNECTION_LIST,[],[])
for sock in read_sockets:
if sock==server_socket:
sockfd, addr = server_socket.accept()
CONNECTION_LIST.append(sockfd)
print "client (%s,%s) is connected" %addr
broadcast(sockfd,"[%s:%s] entered room\n" %addr)
else:
try:
data=sock.recv(RECV_BUFFER)
if data:
broadcast(sock, "\r" + 'machan::<' + str(sock.getpeername()) + '> ::' + data)
except:
broadcast(sock, "client(%s,%s) is offline" %addr)
print "client(%s,%s) is offline " %addr
server_socket.close()
CONNECTION_LIST.remove(sock)
continue
server_socket.close()
| [
"to.ajeeb@gmail.com"
] | to.ajeeb@gmail.com |
ecd72f46add5e5f213fc1533ff3e25f25160af31 | 9de18e1e39c941aeba1781630711cef1d3d4d44c | /experiments/cifar10/conv.py | 41757c9d21758f8c35cf7d9e176d18cd6ff88602 | [] | no_license | BINDS-LAB-UMASS/bindsnet_experiments | cee786ae7e087845f58e0af4a49fa319d4fb81d5 | 8a20be9d1ede021b70ff95cc7e85024ff5a222db | refs/heads/master | 2022-11-12T11:33:20.451028 | 2019-10-01T15:40:28 | 2019-10-01T15:40:28 | 135,615,246 | 41 | 10 | null | 2022-10-28T00:35:03 | 2018-05-31T17:28:04 | Python | UTF-8 | Python | false | false | 14,333 | py | import os
import sys
import torch
import argparse
import numpy as np
import matplotlib.pyplot as plt
from time import time as t
sys.path.append('..')
from utils import print_results, update_curves
from bindsnet.datasets import CIFAR10
from bindsnet.network import Network
from bindsnet.learning import Hebbian
from bindsnet.encoding import bernoulli
from bindsnet.network.monitors import Monitor
from bindsnet.network.nodes import Input, DiehlAndCookNodes
from bindsnet.evaluation import update_ngram_scores, assign_labels
from bindsnet.network.topology import Conv2dConnection, SparseConnection
from bindsnet.analysis.plotting import plot_input, plot_spikes, plot_conv2d_weights
print()
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--n_train', type=int, default=60000)
parser.add_argument('--n_test', type=int, default=10000)
parser.add_argument('--kernel_size', type=int, nargs='+', default=[16])
parser.add_argument('--stride', type=int, nargs='+', default=[4])
parser.add_argument('--n_filters', type=int, default=25)
parser.add_argument('--padding', type=int, default=0)
parser.add_argument('--inhib', type=float, default=100.0)
parser.add_argument('--time', type=int, default=100)
parser.add_argument('--dt', type=float, default=1.0)
parser.add_argument('--intensity', type=float, default=0.5)
parser.add_argument('--progress_interval', type=int, default=10)
parser.add_argument('--update_interval', type=int, default=250)
parser.add_argument('--train', dest='train', action='store_true')
parser.add_argument('--test', dest='train', action='store_false')
parser.add_argument('--plot', dest='plot', action='store_true')
parser.add_argument('--gpu', dest='gpu', action='store_true')
parser.set_defaults(plot=False, gpu=False, train=True)
args = parser.parse_args()
seed = args.seed
n_train = args.n_train
n_test = args.n_test
kernel_size = args.kernel_size
stride = args.stride
n_filters = args.n_filters
padding = args.padding
inhib = args.inhib
time = args.time
dt = args.dt
intensity = args.intensity
progress_interval = args.progress_interval
update_interval = args.update_interval
train = args.train
plot = args.plot
gpu = args.gpu
if len(kernel_size) == 1:
kernel_size = [kernel_size[0], kernel_size[0]]
if len(stride) == 1:
stride = [stride[0], stride[0]]
args = vars(args)
print('\nCommand-line argument values:')
for key, value in args.items():
print('-', key, ':', value)
print()
model = 'conv'
data = 'cifar10'
assert n_train % update_interval == 0 and n_test % update_interval == 0, \
'No. examples must be divisible by update_interval'
params = [seed, n_train, kernel_size, stride, n_filters,
padding, inhib, time, dt, intensity, update_interval]
model_name = '_'.join([str(x) for x in params])
if not train:
test_params = [seed, n_train, n_test, kernel_size, stride, n_filters,
padding, inhib, time, dt, intensity, update_interval]
np.random.seed(seed)
if gpu:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
torch.cuda.manual_seed_all(seed)
else:
torch.manual_seed(seed)
n_examples = n_train if train else n_test
input_shape = [32, 32, 3]
if kernel_size == input_shape:
conv_size = [1, 1]
else:
conv_size = (int((input_shape[0] - kernel_size[0]) / stride[0]) + 1,
int((input_shape[1] - kernel_size[1]) / stride[1]) + 1)
n_classes = 10
n_neurons = n_filters * np.prod(conv_size)
per_class = int(n_neurons / n_classes)
total_kernel_size = int(np.prod(kernel_size))
total_conv_size = int(np.prod(conv_size))
# Build network.
network = Network()
input_layer = Input(n=32*32*3, shape=(1, 3, 32, 32), traces=True)
conv_layer = DiehlAndCookNodes(n=n_filters * total_conv_size, shape=(1, n_filters, *conv_size),
thresh=-64.0, traces=True, theta_plus=0.05, refrac=0)
conv_layer2 = DiehlAndCookNodes(n=n_filters * total_conv_size, shape=(1, n_filters, *conv_size), refrac=0)
conv_conn = Conv2dConnection(input_layer, conv_layer, kernel_size=kernel_size, stride=stride, update_rule=Hebbian,
norm=0.5 * int(np.sqrt(total_kernel_size)), nu=(1e-3, 1e-3), wmax=2.0)
conv_conn2 = Conv2dConnection(input_layer, conv_layer2, w=conv_conn.w, kernel_size=kernel_size, stride=stride,
update_rule=None, nu=(0, 1e-3), wmax=2.0)
w = torch.ones(1, n_filters, conv_size[0], conv_size[1], 1, n_filters, conv_size[0], conv_size[1])
for f in range(n_filters):
for i in range(conv_size[0]):
for j in range(conv_size[1]):
w[0, f, i, j, 0, f, i, j] = 0
w = w.view(conv_layer.n, conv_layer.n)
i = w.nonzero()
v = -inhib * torch.ones(i.shape[0])
w = torch.sparse.FloatTensor(i.t(), v, w.size())
# for fltr1 in range(n_filters):
# for fltr2 in range(n_filters):
# for i1 in range(conv_size):
# for j1 in range(conv_size):
# for i2 in range(conv_size):
# for j2 in range(conv_size):
# if not (i1 == i2 and j1 == j2):
# w[0, fltr1, i1, j1, 0, fltr2, i2, j2] = -inhib
# if fltr1 != fltr2:
# for i in range(conv_size):
# for j in range(conv_size):
# w[0, fltr1, i, j, 0, fltr2, i, j] = -inhib
# for i1 in range(conv_size[0]):
# for j1 in range(conv_size[1]):
# for i2 in range(conv_size[0]):
# for j2 in range(conv_size[1]):
# if not (fltr1 == fltr2 and i1 == i2 and j1 == j2):
# w[0, fltr1, i1, j1, 0, fltr2, i2, j2] = -inhib
# if fltr1 != fltr2:
# for i1 in range(conv_size):
# for j1 in range(conv_size):
# for i2 in range(conv_size):
# for j2 in range(conv_size):
# w[0, fltr1, i1, j1, 0, fltr2, i2, j2] = -inhib
recurrent_conn = SparseConnection(conv_layer, conv_layer, w=w)
network.add_layer(input_layer, name='X')
network.add_layer(conv_layer, name='Y')
network.add_layer(conv_layer2, name='Y_')
network.add_connection(conv_conn, source='X', target='Y')
network.add_connection(conv_conn2, source='X', target='Y_')
network.add_connection(recurrent_conn, source='Y', target='Y')
# Voltage recording for excitatory and inhibitory layers.
voltage_monitor = Monitor(network.layers['Y'], ['v'], time=time)
network.add_monitor(voltage_monitor, name='output_voltage')
# Load CIFAR-10 data.
dataset = CIFAR10(path=os.path.join('..', '..', 'data', 'CIFAR10'), download=True)
if train:
images, labels = dataset.get_train()
else:
images, labels = dataset.get_test()
images *= intensity
# Record spikes during the simulation.
spike_record = torch.zeros(update_interval, time, n_neurons)
# Neuron assignments and spike proportions.
if train:
assignments = -torch.ones_like(torch.Tensor(n_neurons))
proportions = torch.zeros_like(torch.Tensor(n_neurons, n_classes))
rates = torch.zeros_like(torch.Tensor(n_neurons, n_classes))
ngram_scores = {}
else:
path = os.path.join('..', '..', 'params', data, model)
path = os.path.join(path, '_'.join(['auxiliary', model_name]) + '.pt')
assignments, proportions, rates, ngram_scores = torch.load(open(path, 'rb'))
# Sequence of accuracy estimates.
curves = {'all': [], 'proportion': [], 'ngram': []}
if train:
best_accuracy = 0
spikes = {}
for layer in set(network.layers):
spikes[layer] = Monitor(network.layers[layer], state_vars=['s'], time=time)
network.add_monitor(spikes[layer], name='%s_spikes' % layer)
# Train the network.
if train:
print('\nBegin training.\n')
else:
print('\nBegin test.\n')
inpt_ims = None
inpt_axes = None
spike_ims = None
spike_axes = None
weights_im = None
start = t()
for i in range(n_examples):
if i % progress_interval == 0:
print('Progress: %d / %d (%.4f seconds)' % (i, n_train, t() - start))
start = t()
if i % update_interval == 0 and i > 0:
if i % len(labels) == 0:
current_labels = labels[-update_interval:]
else:
current_labels = labels[i % len(images) - update_interval:i % len(images)]
# Update and print accuracy evaluations.
curves, predictions = update_curves(
curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments,
proportions=proportions, ngram_scores=ngram_scores, n=2
)
print_results(curves)
if train:
if any([x[-1] > best_accuracy for x in curves.values()]):
print('New best accuracy! Saving network parameters to disk.')
# Save network to disk.
path = os.path.join('..', '..', 'params', data, model)
if not os.path.isdir(path):
os.makedirs(path)
network.save(os.path.join(path, model_name + '.pt'))
path = os.path.join(path, '_'.join(['auxiliary', model_name]) + '.pt')
torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb'))
best_accuracy = max([x[-1] for x in curves.values()])
# Assign labels to excitatory layer neurons.
assignments, proportions, rates = assign_labels(spike_record, current_labels, n_classes, rates)
# Compute ngram scores.
ngram_scores = update_ngram_scores(spike_record, current_labels, n_classes, 2, ngram_scores)
print()
# Get next input sample.
image = images[i].permute(2, 0, 1)
sample = bernoulli(datum=image, time=time, dt=dt, max_prob=1.0).unsqueeze(1)
inpts = {'X': sample}
# Run the network on the input.
network.run(inpts=inpts, time=time)
retries = 0
while spikes['Y_'].get('s').sum() < 5 and retries < 3:
retries += 1
sample = bernoulli(datum=image, time=time, dt=dt, max_prob=1.0).unsqueeze(1)
inpts = {'X': sample}
network.run(inpts=inpts, time=time)
# Add to spikes recording.
spike_record[i % update_interval] = spikes['Y_'].get('s').view(time, -1)
# Optionally plot various simulation information.
if plot:
_input = image.permute(1, 2, 0).float()
_input /= _input.max()
reconstruction = inpts['X'].sum(0).view(3, 32, 32).permute(1, 2, 0).float()
reconstruction /= reconstruction.max()
w = conv_conn.w
_spikes = {'X': spikes['X'].get('s').view(32*32*3, time),
'Y': spikes['Y'].get('s').view(n_filters * total_conv_size, time),
'Y_': spikes['Y_'].get('s').view(n_filters * total_conv_size, time)}
inpt_axes, inpt_ims = plot_input(
images[i].view(32, 32, 3), reconstruction, label=labels[i], ims=inpt_ims, axes=inpt_axes
)
spike_ims, spike_axes = plot_spikes(spikes=_spikes, ims=spike_ims, axes=spike_axes)
weights_im = plot_conv2d_weights(w, im=weights_im, wmax=0.1)
plt.pause(1e-8)
network.reset_() # Reset state variables.
print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)')
i += 1
if i % len(labels) == 0:
current_labels = labels[-update_interval:]
else:
current_labels = labels[i % len(images) - update_interval:i % len(images)]
# Update and print accuracy evaluations.
curves, predictions = update_curves(
curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments,
proportions=proportions, ngram_scores=ngram_scores, n=2
)
print_results(curves)
if train:
if any([x[-1] > best_accuracy for x in curves.values()]):
print('New best accuracy! Saving network parameters to disk.')
# Save network to disk.
path = os.path.join('..', '..', 'params', data, model)
if not os.path.isdir(path):
os.makedirs(path)
network.save(os.path.join(path, model_name + '.pt'))
path = os.path.join(path, '_'.join(['auxiliary', model_name]) + '.pt')
torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb'))
best_accuracy = max([x[-1] for x in curves.values()])
if train:
print('\nTraining complete.\n')
else:
print('\nTest complete.\n')
print('Average accuracies:\n')
for scheme in curves.keys():
print('\t%s: %.2f' % (scheme, np.mean(curves[scheme])))
# Save accuracy curves to disk.
path = os.path.join('..', '..', 'curves', data, model)
if not os.path.isdir(path):
os.makedirs(path)
if train:
to_write = ['train'] + params
else:
to_write = ['test'] + params
to_write = [str(x) for x in to_write]
f = '_'.join(to_write) + '.pt'
torch.save((curves, update_interval, n_examples), open(os.path.join(path, f), 'wb'))
# Save results to disk.
path = os.path.join('..', '..', 'results', data, model)
if not os.path.isdir(path):
os.makedirs(path)
results = [
np.mean(curves['all']), np.mean(curves['proportion']), np.mean(curves['ngram']),
np.max(curves['all']), np.max(curves['proportion']), np.max(curves['ngram'])
]
if train:
to_write = params + results
else:
to_write = test_params + results
to_write = [str(x) for x in to_write]
name = 'train.csv' if train else 'test.csv'
if not os.path.isfile(os.path.join(path, name)):
with open(os.path.join(path, name), 'w') as f:
if train:
columns = [
'seed', 'n_train', 'kernel_size', 'stride', 'n_filters', 'padding', 'inhib', 'time', 'dt',
'intensity', 'update_interval', 'mean_all_activity', 'mean_proportion_weighting',
'mean_ngram', 'max_all_activity', 'max_proportion_weighting', 'max_ngram'
]
header = ','.join(columns) + '\n'
f.write(header)
else:
columns = [
'seed', 'n_train', 'n_test', 'kernel_size', 'stride', 'n_filters', 'padding', 'inhib', 'time',
'dt', 'intensity', 'update_interval', 'mean_all_activity', 'mean_proportion_weighting',
'mean_ngram', 'max_all_activity', 'max_proportion_weighting', 'max_ngram'
]
header = ','.join(columns) + '\n'
f.write(header)
with open(os.path.join(path, name), 'a') as f:
f.write(','.join(to_write) + '\n')
print()
| [
"djsaunde@umass.edu"
] | djsaunde@umass.edu |
e1bccde57c18d31ab7ae91528e51e89563c8c9b2 | 3e7b2ebb64e9e324ce47d19def21ae62cc1e56a6 | /Problem-solving/HackerRank/p14- sWAP cASE.py | 5f4f5a0512103085cb85a010c0c4672a7a9a5c87 | [] | no_license | shuvo14051/python-data-algo | 9b6622d9260e95ca9ffabd39b02996f13bdf20d1 | 8f66ff6f2bd88a0ae48dac72e4ea6c5382a836ec | refs/heads/master | 2023-02-03T03:04:01.183093 | 2020-12-13T10:13:15 | 2020-12-13T10:13:15 | 274,106,480 | 0 | 0 | null | 2020-07-05T06:33:28 | 2020-06-22T10:24:05 | Python | UTF-8 | Python | false | false | 194 | py | # n = input()
#
# swaped_n = n.swapcase()
#
# print(swaped_n)
def swap_case(s):
return s.swapcase()
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result) | [
"shuvo1137017@gmail.com"
] | shuvo1137017@gmail.com |
96522b5e865f8371b780903ffd0f11fcf1ca2dfa | 6a18c479a83e238484fd481c97fdca776091d4aa | /deepracer/RL_Model_Training/benchmark_dr.py | 4d10f2d5ad61ca5e66c85c7cf425a6a26321f5ae | [
"BSD-3-Clause",
"MIT"
] | permissive | Currycurrycurry/Time-in-State-RL | f07e91fc3f6111869dfa380de87970086895e046 | f106d92c3556b955c108bf3e147bb0b12e60259c | refs/heads/main | 2023-03-20T16:13:53.414126 | 2021-03-09T02:24:31 | 2021-03-09T02:24:31 | 345,659,435 | 0 | 0 | BSD-3-Clause | 2021-03-08T13:07:45 | 2021-03-08T13:07:45 | null | UTF-8 | Python | false | false | 24,399 | py | # Code credits: The model loading code is taken from open AI baselines with modifications done to allow
# variable timing characteristics during evaluation. The deepracer environment is taken from the aws deepracer github
# code with modifications for the variable timing characteristics.
# Changing the sampling and latency input to the model
#multiple models and multiple paths can be added
path1 = 'Deepracer-checkpoints/Latency_138.pb' #Path of saved model
#path2 = 'dr_tf_frozen_model.pb' #Path of saved model
#path3 = 'dr_tf_frozen_model.pb' #Path of saved model
#paths = [path1, path2, path3]
paths = [path1]
#the folder to save the data
data_folder = 'data_dr/'
#the data saved in in folder with this name
#experiments = ['dr_1', 'dr_2', 'dr_3']
experiments = ['dr_1']
latencies = [20, 20, 40, 40, 60, 60, 80, 80, 100, 100, 120, 120]
sampling_sleeps = [0.033, 0.033, 0.040, 0.040, 0.060, 0.060, 0.080, 0.080, 0.100, 0.100, 0.120, 0.120]
directions = [ 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2]
#number of continuous steps to run the car
steps = 5000
# Changing the sampling and latency input to the model
import time
import gym
import queue
import numpy as np
from gym import spaces
from PIL import Image
import os
import math
from rotation import Rotation
from collections import OrderedDict
import random
import bisect
import json
import math
# Type of worker
SIMULATION_WORKER = "SIMULATION_WORKER"
SAGEMAKER_TRAINING_WORKER = "SAGEMAKER_TRAINING_WORKER"
node_type = os.environ.get("NODE_TYPE", SIMULATION_WORKER)
#saving the debug data
import pickle
if node_type == SIMULATION_WORKER:
import rospy
from ackermann_msgs.msg import AckermannDriveStamped
from gazebo_msgs.msg import ModelState
from gazebo_msgs.srv import GetLinkState, GetModelState, JointRequest
from gazebo_msgs.srv import SetModelState
from std_msgs.msg import Float64
from sensor_msgs.msg import Image as sensor_image
from deepracer_msgs.msg import Progress
from shapely.geometry import Point, Polygon
from shapely.geometry.polygon import LinearRing, LineString
TRAINING_IMAGE_SIZE = (160, 120)
FINISH_LINE = 1000
# REWARD ENUM
CRASHED = -30.0
NO_PROGRESS = -1
FINISHED = 10000000.0
MAX_STEPS = 100000000
# WORLD NAME
EASY_TRACK_WORLD = 'easy_track'
MEDIUM_TRACK_WORLD = 'medium_track'
HARD_TRACK_WORLD = 'hard_track'
# Normalized track distance to move with each reset
ROUND_ROBIN_ADVANCE_DIST = 0.02#0.02 #0.01
# List of required velocity topics, one topic per wheel
VELOCITY_TOPICS = ['/racecar/left_rear_wheel_velocity_controller/command',
'/racecar/right_rear_wheel_velocity_controller/command',
'/racecar/left_front_wheel_velocity_controller/command',
'/racecar/right_front_wheel_velocity_controller/command']
# List of required steering hinges
STEERING_TOPICS = ['/racecar/left_steering_hinge_position_controller/command',
'/racecar/right_steering_hinge_position_controller/command']
# List of all effort joints
EFFORT_JOINTS = ['/racecar/left_rear_wheel_joint', '/racecar/right_rear_wheel_joint',
'/racecar/left_front_wheel_joint','/racecar/right_front_wheel_joint',
'/racecar/left_steering_hinge_joint','/racecar/right_steering_hinge_joint']
# Radius of the wheels of the car in meters
WHEEL_RADIUS = 0.1
# Size of the image queue buffer, we want this to be one so that we consume 1 image
# at a time, but may want to change this as we add more algorithms
IMG_QUEUE_BUF_SIZE = 1
#print(delays_array)
### Gym Env ###
class DeepRacerEnv(gym.Env):
def __init__(self):
self.sampling_rate = 30.0
self.sampling_sleep = (1.0/self.sampling_rate)
#self.sampling_rates = [15.0, 30.0]
self.sampling_rates = [30.0, 30.0]
self.sampling_rate_index = 0
self.latencies = [10.0, 20.0, 40.0, 60.0, 80.0, 100.0, 120.0]
self.latency_index = 0
self.latency_max_num_steps = 500 # for these steps latency will be fixed or change on reset or done after 500.
self.latency_steps = 0
self.latency = 10.0 #10 is the starting latency
self.model_running_time = (2.0/1000.0) #model runtime
screen_height = TRAINING_IMAGE_SIZE[1]
screen_width = TRAINING_IMAGE_SIZE[0]
self.on_track = 0
self.progress = 0
self.yaw = 0
self.x = 0
self.y = 0
self.z = 0
self.distance_from_center = 0
self.distance_from_border_1 = 0
self.distance_from_border_2 = 0
self.steps = 0
self.progress_at_beginning_of_race = 0
self.reverse_dir = False
self.start_ndist = 0.0
# actions -> steering angle, throttle
self.action_space = spaces.Box(low=np.array([-1, 0]), high=np.array([+1, +1]), dtype=np.float32)
# given image from simulator
self.observation_space = spaces.Box(low=0, high=255,
shape=(screen_height, screen_width, 1), dtype=np.uint8)
self.allow_servo_step_signals = True
#stores the time when camera images are received
self.cam_update_time=[]
#stores the time when consequetive actions are send
self.cons_action_send_time=[]
#stores the time when progress updates are received
self.progress_update_time = []
#folder location to store the debug data
self.debug_data_folder = []
self.debug_index = 0
if node_type == SIMULATION_WORKER:
# ROS initialization
rospy.init_node('rl_coach', anonymous=True)
self.ack_publisher = rospy.Publisher('/vesc/low_level/ackermann_cmd_mux/output',
AckermannDriveStamped, queue_size=100)
self.racecar_service = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
self.clear_forces_client = rospy.ServiceProxy('/gazebo/clear_joint_forces',
JointRequest)
# Subscribe to ROS topics and register callbacks
rospy.Subscriber('/progress', Progress, self.callback_progress)
rospy.Subscriber('/camera/zed/rgb/image_rect_color', sensor_image, self.callback_image)
self.world_name = 'hard_track'#rospy.get_param('WORLD_NAME')
self.set_waypoints()
waypoints = self.waypoints
is_loop = np.all(waypoints[0,:] == waypoints[-1,:])
if is_loop:
self.center_line = LinearRing(waypoints[:,0:2])
else:
self.center_line = LineString(waypoints[:,0:2])
self.center_dists = [self.center_line.project(Point(p), normalized=True) for p in self.center_line.coords[:-1]] + [1.0]
self.track_length = self.center_line.length
self.reward_in_episode = 0
self.prev_progress = 0
self.steps = 0
# Create the publishers for sending speed and steering info to the car
self.velocity_pub_dict = OrderedDict()
self.steering_pub_dict = OrderedDict()
for topic in VELOCITY_TOPICS:
self.velocity_pub_dict[topic] = rospy.Publisher(topic, Float64, queue_size=1)
for topic in STEERING_TOPICS:
self.steering_pub_dict[topic] = rospy.Publisher(topic, Float64, queue_size=1)
def get_data_debug(self):
print("center_line",self.center_line)
print("track_length",self.track_length)
def reset(self,inp_x=1.75,inp_y=0.6):
if node_type == SAGEMAKER_TRAINING_WORKER:
return self.observation_space.sample()
# print('Total Reward Reward=%.2f' % self.reward_in_episode,
# 'Total Steps=%.2f' % self.steps)
#self.send_reward_to_cloudwatch(self.reward_in_episode)
self.reward_in_episode = 0
self.reward = None
self.done = False
self.next_state = None
self.image = None
self.steps = 0
self.prev_progress = 0
# Reset car in Gazebo
self.send_action(0, 0) # set the throttle to 0
self.racecar_reset(0, 0)
self.infer_reward_state(0, 0)
self.cam_update_time = []
self.cons_action_send_time = []
self.progress_update_time = []
self.debug_index= self.debug_index+1
return self.next_state
def add_latency_to_image(self,observation):
observation = observation.reshape(observation.shape[0],observation.shape[1],1)
#print('Set latency is:',self.latency*self.latency_max)
#observation[119, 159, 0] = int(self.latency)
#setting the sampling rate
#observation[119, 158, 0] = int(self.sampling_rate)
#print(observation[119, 159, 0],observation[119, 158, 0] )
return observation
def convert_rgb_to_gray(self, observation):
r, g, b = observation[:, :, 0], observation[:, :, 1], observation[:, :, 2]
observation = 0.2989 * r + 0.5870 * g + 0.1140 * b
return observation
def set_next_state(self):
if(self.image!=None):
#t1 = time.time()
image_data = self.image
# Read the image and resize to get the state
#print(image_data.width, image_data.height)
image = Image.frombytes('RGB', (image_data.width, image_data.height), image_data.data, 'raw', 'RGB', 0, 1)
image = image.resize(TRAINING_IMAGE_SIZE, resample=2)
image = np.array(image)
#image = do_randomization(image)
image = self.convert_rgb_to_gray(image)
image = self.add_latency_to_image(image)
self.next_state = image
def racecar_reset(self, ndist, next_index):
rospy.wait_for_service('gazebo/set_model_state')
#random_start = random.random()
prev_index, next_index = self.find_prev_next_waypoints(self.start_ndist)
# Compute the starting position and heading
#start_point = self.center_line.interpolate(ndist, normalized=True)
start_point = self.center_line.interpolate(self.start_ndist, normalized=True)
start_yaw = math.atan2(self.center_line.coords[next_index][1] - start_point.y,
self.center_line.coords[next_index][0] - start_point.x)
start_quaternion = Rotation.from_euler('zyx', [start_yaw, 0, 0]).as_quat()
# Construct the model state and send to Gazebo
model_state = ModelState()
model_state.model_name = 'racecar'
model_state.pose.position.x = start_point.x
model_state.pose.position.y = start_point.y
model_state.pose.position.z = 0
model_state.pose.orientation.x = start_quaternion[0]
model_state.pose.orientation.y = start_quaternion[1]
model_state.pose.orientation.z = start_quaternion[2]
model_state.pose.orientation.w = start_quaternion[3]
model_state.twist.linear.x = 0
model_state.twist.linear.y = 0
model_state.twist.linear.z = 0
model_state.twist.angular.x = 0
model_state.twist.angular.y = 0
model_state.twist.angular.z = 0
self.racecar_service(model_state)
for joint in EFFORT_JOINTS:
self.clear_forces_client(joint)
#keeping track where to start the car
self.reverse_dir = not self.reverse_dir
self.start_ndist = (self.start_ndist + ROUND_ROBIN_ADVANCE_DIST) % 1.0
self.progress_at_beginning_of_race = self.progress
def find_prev_next_waypoints(self, ndist):
if self.reverse_dir:
next_index = bisect.bisect_left(self.center_dists, ndist) - 1
prev_index = next_index + 1
if next_index == -1: next_index = len(self.center_dists) - 1
else:
next_index = bisect.bisect_right(self.center_dists, ndist)
prev_index = next_index - 1
if next_index == len(self.center_dists): next_index = 0
return prev_index, next_index
def step(self, action):
self.latency_steps = self.latency_steps+1
#print('latency set in env:',self.latency)
#bookeeping when the action was send
#self.cons_action_send_time.append([self.steps,time.time()])
latency = (self.latency-2.0)/1000.0
#10ms latency is substracted, because that is the avg default latency observed on the training machine
if latency>0.001:
time.sleep(latency)
else:
latency = 0.0
# Initialize next state, reward, done flag
self.next_state = None
self.reward = None
self.done = False
# Send this action to Gazebo and increment the step count
self.steering_angle = float(action[0])
self.speed = float(action[1])
self.send_action(self.steering_angle, self.speed)
self.steps += 1
#sleep to control sampling rate
to_sleep = (self.sampling_sleep - self.model_running_time - latency)
if to_sleep>0.001:
time.sleep(to_sleep)
# if self.latency_steps == self.latency_max_num_steps:
# #update the latency
# self.latency_index = (self.latency_index+1) % (len(self.latencies))
# self.latency = self.latencies[self.latency_index]
# #update the sampling rate
# self.sampling_rate_index = random.randint(0,1)
# self.sampling_rate = self.sampling_rates[self.sampling_rate_index]
# self.sampling_sleep = (1.0/self.sampling_rate)
# if (self.latency/1000.0)> self.sampling_sleep: # match sampling input to the model and latency
# self.sampling_rate = 1000.0/self.latency
# self.latency_steps = 0
# Compute the next state and reward
self.infer_reward_state(self.steering_angle, self.speed)
return self.next_state, self.reward, self.done, {}
def send_action(self, steering_angle, speed):
# Simple v/r to computes the desired rpm
wheel_rpm = speed/WHEEL_RADIUS
for _, pub in self.velocity_pub_dict.items():
pub.publish(wheel_rpm)
for _, pub in self.steering_pub_dict.items():
pub.publish(steering_angle)
def callback_image(self, data):
self.image = data
#bookeeping when the image was received
#self.cam_update_time.append([self.steps,time.time()])
def callback_progress(self, data):
self.on_track = not (data.off_track)
self.progress = data.progress
self.yaw = data.yaw
self.x = data.x
self.y = data.y
self.z = data.z
self.distance_from_center = data.distance_from_center
self.distance_from_border_1 = data.distance_from_border_1
self.distance_from_border_2 = data.distance_from_border_2
#bookeeping when the progress was received
#self.progress_update_time.append([self.steps,time.time()])
def reward_function (self, on_track, x, y, distance_from_center,
throttle, steering, track_width):
marker_1 = 0.1 * track_width
marker_2 = 0.15 * track_width
marker_3 = 0.20 * track_width
reward = (track_width - distance_from_center) #max reward = 0.44
if distance_from_center >= 0.0 and distance_from_center <= marker_1:
reward = reward * 2.5 #0.90, 0.44 max is scaled to 1.0
elif distance_from_center <= marker_2:
reward = reward * 1.33 #0.85, 0.375 max is scaled to 0.5
elif distance_from_center <= marker_3:
reward = reward * 0.71 #0.80, 0.352 max is scaled to 0.25
else:
reward = 0.001 # may go close to off track
# penalize reward for the car taking slow actions
if throttle < 1.6 and reward>0:
reward *= 0.95
if throttle < 1.4 and reward>0:
reward *= 0.95
return float(reward)
def infer_reward_state(self, steering_angle, throttle):
#state has to be set first, because we need most accurate reward signal
self.set_next_state()
on_track = self.on_track
done = False
if on_track != 1:
reward = CRASHED
done = True
else:
reward = self.reward_function(on_track, self.x, self.y, self.distance_from_center,
throttle, steering_angle, self.road_width)
#after 500 steps in episode we want to restart it
if self.steps==500:
done = True
if reward > 0: #car is not crashed
reward = reward *5.0 #bonus on completing 500 steps
self.reward_in_episode += reward
self.reward = reward
self.done = done
def set_waypoints(self):
if self.world_name.startswith(MEDIUM_TRACK_WORLD):
self.waypoints = vertices = np.zeros((8, 2))
self.road_width = 0.50
vertices[0][0] = -0.99; vertices[0][1] = 2.25;
vertices[1][0] = 0.69; vertices[1][1] = 2.26;
vertices[2][0] = 1.37; vertices[2][1] = 1.67;
vertices[3][0] = 1.48; vertices[3][1] = -1.54;
vertices[4][0] = 0.81; vertices[4][1] = -2.44;
vertices[5][0] = -1.25; vertices[5][1] = -2.30;
vertices[6][0] = -1.67; vertices[6][1] = -1.64;
vertices[7][0] = -1.73; vertices[7][1] = 1.63;
elif self.world_name.startswith(EASY_TRACK_WORLD):
self.waypoints = vertices = np.zeros((2, 2))
self.road_width = 0.90
vertices[0][0] = -1.08; vertices[0][1] = -0.05;
vertices[1][0] = 1.08; vertices[1][1] = -0.05;
else:
self.waypoints = vertices = np.zeros((30, 2))
self.road_width = 0.44
vertices[0][0] = 1.5; vertices[0][1] = 0.58;
vertices[1][0] = 5.5; vertices[1][1] = 0.58;
vertices[2][0] = 5.6; vertices[2][1] = 0.6;
vertices[3][0] = 5.7; vertices[3][1] = 0.65;
vertices[4][0] = 5.8; vertices[4][1] = 0.7;
vertices[5][0] = 5.9; vertices[5][1] = 0.8;
vertices[6][0] = 6.0; vertices[6][1] = 0.9;
vertices[7][0] = 6.08; vertices[7][1] = 1.1;
vertices[8][0] = 6.1; vertices[8][1] = 1.2;
vertices[9][0] = 6.1; vertices[9][1] = 1.3;
vertices[10][0] = 6.1; vertices[10][1] = 1.4;
vertices[11][0] = 6.07; vertices[11][1] = 1.5;
vertices[12][0] = 6.05; vertices[12][1] = 1.6;
vertices[13][0] = 6; vertices[13][1] = 1.7;
vertices[14][0] = 5.9; vertices[14][1] = 1.8;
vertices[15][0] = 5.75; vertices[15][1] = 1.9;
vertices[16][0] = 5.6; vertices[16][1] = 2.0;
vertices[17][0] = 4.2; vertices[17][1] = 2.02;
vertices[18][0] = 4; vertices[18][1] = 2.1;
vertices[19][0] = 2.6; vertices[19][1] = 3.92;
vertices[20][0] = 2.4; vertices[20][1] = 4;
vertices[21][0] = 1.2; vertices[21][1] = 3.95;
vertices[22][0] = 1.1; vertices[22][1] = 3.92;
vertices[23][0] = 1; vertices[23][1] = 3.88;
vertices[24][0] = 0.8; vertices[24][1] = 3.72;
vertices[25][0] = 0.6; vertices[25][1] = 3.4;
vertices[26][0] = 0.58; vertices[26][1] = 3.3;
vertices[27][0] = 0.57; vertices[27][1] = 3.2;
vertices[28][0] = 1; vertices[28][1] = 1;
vertices[29][0] = 1.25; vertices[29][1] = 0.7;
def get_closest_waypoint(self):
res = 0
index = 0
x = self.x
y = self.y
minDistance = float('inf')
for row in self.waypoints:
distance = math.sqrt((row[0] - x) * (row[0] - x) + (row[1] - y) * (row[1] - y))
if distance < minDistance:
minDistance = distance
res = index
index = index + 1
return res
class DeepRacerDiscreteEnv(DeepRacerEnv):
def __init__(self):
DeepRacerEnv.__init__(self)
with open('custom_files/model_metadata.json', 'r') as f:
model_metadata = json.load(f)
self.json_actions = model_metadata['action_space']
self.action_space = spaces.Discrete(len(self.json_actions))
print("Intialized action space")
print(self.json_actions)
print("num of actions",self.action_space )
def step(self, action):
action = int(action)
# Convert discrete to continuous
steering_angle = float(self.json_actions[action]['steering_angle']) * math.pi / 180.0
throttle = float(self.json_actions[action]['speed'])
continous_action = [steering_angle, throttle]
return super().step(continous_action)
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
import json
with open('custom_files/model_metadata.json', 'r') as f:
model_metadata = json.load(f)
json_actions = model_metadata['action_space']
def get_session(frozen_graph):
with tf.gfile.GFile(frozen_graph, "rb") as f:
restored_graph_def = tf.GraphDef()
restored_graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(restored_graph_def, name="",input_map=None)
x = graph.get_tensor_by_name('ppo2_model/Ob:0')
y = graph.get_tensor_by_name('ppo2_model/pi_1/add:0')
sess = tf.Session(graph=graph, config=config)
return sess, x,y
# Automate the testing of the models
#Runs a simple setting
def test_in_simulator(sess, latency, sampling_sleep, total_steps, direction, x, y):
time_taken = []
Actions_Taken = [] #stores speed and steering of the actions taken
total_rewards = []
env = DeepRacerDiscreteEnv()
env.sampling_sleep = sampling_sleep
env.sampling_rate = 1.0/(env.sampling_sleep)
#print(env.sampling_sleep, env.sampling_rate)
env.latency = latency
env.dist_and_speed = []
if direction==2: # when 2, we want to reverse the direction
env.reverse_dir = not env.reverse_dir
steps_done = 0
local_steps = 0
obs = env.reset()
#warmup
obs = obs.reshape(1,120,160,1)
action = sess.run(y, feed_dict={x: obs})
while local_steps<=total_steps:
done = False
obs = env.reset()
while not done and local_steps<=total_steps:
t1 = time.time()
obs = obs.reshape(1,120,160,1)
action = sess.run(y, feed_dict={x: obs})
action = np.argmax(action)
steering_angle = json_actions[action]['steering_angle']
throttle = json_actions[action]['speed']
Actions_Taken.append([steering_angle,throttle])
#updating the exact model runtime
env.model_running_time = (time.time() - t1)
obs, rew, done, _ = env.step(action)
total_rewards.append(rew)
t2 = time.time()
time_taken.append(t2-t1)
local_steps = local_steps + 1
if done:
obs = env.reset()
dist_and_speed = env.dist_and_speed
del env
return total_rewards, Actions_Taken, dist_and_speed, time_taken
# save the data
import pickle
def save_data(path, total_rewards, dist_and_speed, Actions_Taken):
with open(path, 'wb') as f:
print("Saving the data", path)
data = [total_rewards, dist_and_speed, Actions_Taken]
pickle.dump(data, f)
def do_testing(sess, x, y, exp_name):
for i in range(len(latencies)):
latency = latencies[i]
sampling_sleep = sampling_sleeps[i]
direction = directions[i]
total_rewards, Actions_Taken, dist_and_speed, time_taken = test_in_simulator(sess, latency, sampling_sleep, steps, direction, x, y)
path = data_folder + exp_name+'_'+str(direction)+'_'+str(latency)
save_data(path, total_rewards, dist_and_speed, Actions_Taken)
del total_rewards, Actions_Taken, dist_and_speed, time_taken
for i in range(len(paths)):
frozen_graph = paths[i]
exp_name = experiments[i]
sess, x,y = get_session(frozen_graph)
do_testing(sess,x,y, exp_name)
del sess, x, y
print('done')
| [
"sandha.iitr@gmail.com"
] | sandha.iitr@gmail.com |
4e9439bccd64614f8be9f6d1df393b1b365247a7 | 86059cbbba04e62a1a6b217ea043081d10c55174 | /Tutorial 2/Code/Lambda/lambda2.py | cd103bfd0dd8cfcf1ccfe882d4d4c68f8bb162ff | [] | no_license | AdityaJNair/PythonAssignment | 4d0190a8bf2576fcf7863fea5cd7b195e6060bc5 | 3c0b8cb1d47b29382bc40239fe4735034db1965e | refs/heads/master | 2021-01-20T11:50:55.030343 | 2016-08-18T05:21:20 | 2016-08-18T05:21:20 | 65,005,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | #!/usr/bin/env python3
OPERATION_COUNT = 4
simple_collection_of_lambdas = [None] * OPERATION_COUNT
#set up the lambdas for calculator
def setup_lambdas():
#add
simple_collection_of_lambdas[0] = lambda x, y: x + y
#subtract
simple_collection_of_lambdas[1] = lambda x, y: x - y
#multiply
simple_collection_of_lambdas[2] = lambda x, y: x * y
#divide
simple_collection_of_lambdas[3] = divide
def divide(x, y):
return (x / y)
if __name__ == '__main__':
setup_lambdas()
number_one = int(input("Enter the first number: "))
number_two = int(input("Enter the second number: "))
for x in range(0, OPERATION_COUNT):
print(simple_collection_of_lambdas[x](number_one, number_two))
| [
"adijnair@gmail.com"
] | adijnair@gmail.com |
5934e0899c738d89b998b2594786891958736c99 | 933a4f98b3ab1df987bce525d20ca904b225140f | /scripts/common/gerrit/query.py | ffea6bdf65921d27e1df63902163c310d54f0365 | [
"BSD-3-Clause"
] | permissive | mcgreevy/chromium-build | 3881c489b4d9be2f113da755487808b3593f8156 | f8e42c70146c1b668421ee6358dc550a955770a3 | refs/heads/master | 2020-12-30T12:32:15.685191 | 2017-05-17T06:58:18 | 2017-05-17T06:58:18 | 91,419,271 | 0 | 2 | NOASSERTION | 2020-07-22T09:27:35 | 2017-05-16T05:52:45 | Python | UTF-8 | Python | false | false | 5,859 | py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import urllib
################################################################################
# Gerrit API
################################################################################
class QueryBuilder(object):
"""Class to iteratively construct a Gerrit query string.
This functions as a helper class to simplify explicit versus implicit
quoting and nesting of Gerrit query strings.
Gerrit query semantics are documented here:
https://gerrit-review.googlesource.com/Documentation/user-search.html
"""
def __init__(self, terms, operator):
"""
Initializes a Gerrit query object. This should not be used directly;
instead, one of the supplied constructors (New, NewOR, NewAND) should be
used to create a new builder.
Args:
terms: (list) A list of explicit query parameters to start with. If
'terms' is an existing Query instance, the current instance will be
initialized as a clone.
operator: (str) If not 'None', this term will be implicitly added after
each explicit query term. Suggested values are 'AND' and 'OR'.
"""
self._terms = tuple(terms)
self._operator = operator
@classmethod
def _New(cls, terms, operator=None):
return cls(
[cls._prepareTerm(t) for t in terms],
operator)
@classmethod
def New(cls, *terms):
return cls._New(terms)
@classmethod
def NewOR(cls, *terms):
return cls._New(terms, operator='OR')
@classmethod
def NewAND(cls, *terms):
return cls._New(terms, operator='AND')
@classmethod
def _prepareTerm(cls, value):
"""Analyze the type of 'value' and generates a term from it (see 'add()')"""
if isinstance(value, basestring):
parts = value.split(':', 1)
if len(parts) == 2:
return cls._prepareSelector(parts[0], parts[1])
else:
return cls._prepareString(value, quoted=True)
if isinstance(value, QueryBuilder):
# Return its query verbatim, enclosed in parenthesis
return list(value.termiter())
# Try iterator
it = None
try:
it = iter(value)
except TypeError:
pass
if it is not None:
return tuple(cls._prepareTerm(x) for x in it)
# Default to stringify
return cls._prepareString(str(value), quoted=True)
@classmethod
def _prepareString(cls, value, quoted=False):
"""Constructs a string term."""
if quoted:
value = urllib.quote(value)
return value
@classmethod
def _prepareSelector(cls, key, value):
"""Constructs a selector (e.g., 'label:Code-Review+1') term"""
# Quote key/value individually; the colon does not get quoted
return '%s:%s' % (
cls._prepareString(key, quoted=True),
cls._prepareString(value, quoted=True))
def _cloneWithTerms(self, *terms):
"""Creates a new 'QueryBuilder' with an augmented term set."""
new_terms = self._terms + terms
return self.__class__(new_terms, self._operator)
def add(self, *values):
"""Adds a new query term to the Query.
This is a generic 'add' function that infers how to add 'value' based on
its type and contents. For more specific control, use the specialised
'add*' functions.
The query term ('value') may be any of the following:
- A key:value term, in which case the key and value are quoted but the
colon is left unquoted.
- A single term string, in which case the entire term is quoted
- A QueryBuilder instance, in which case it is embedded as a single term
bounded by parenthesis.
- An iterable of query terms, in which case each term will be formatted
recursively and placed inside parenthesis.
Args:
values: The query term to add (see above).
Returns: (Query) this Query object
"""
terms = []
for value in values:
term = self._prepareTerm(value)
if term is not None:
terms.append(term)
if len(terms) == 0:
return self
return self._cloneWithTerms(*terms)
def addSelector(self, key, value):
"""Adds a 'key:value' term to the query.
The 'key' and 'value' terms will be URL quoted.
Args:
key: (str) the key
value: (str) the value
Returns: (Query) this Query object
"""
return self._cloneWithTerms(self._prepareSelector(key, value))
def addQuoted(self, value):
"""Adds a URL-quoted term to the query.
Args:
value: (str) the value to quote and add
Returns: (Query) this Query object
"""
return self._cloneWithTerms(self._prepareString(value, quoted=True))
def addUnquoted(self, value):
"""Directly adds a term to the query.
Args:
value: (str) the value to add
Returns: (Query) this Query object
"""
return self._cloneWithTerms(self._prepareString(value, quoted=False))
@classmethod
def _formatQuery(cls, terms):
"""Recursive method to convert internal nested string/list to a query"""
formatted_terms = []
for term in terms:
if isinstance(term, (list, tuple)):
if len(term) == 0:
continue
term = '(%s)' % (cls._formatQuery(term))
formatted_terms.append(term)
return '+'.join(formatted_terms)
def termiter(self):
"""Iterator overload to iterate over individual query terms"""
first = True
for param in self._terms:
if first:
first = False
elif self._operator is not None:
yield self._operator
yield param
def __len__(self):
"""Returns: (int) the number of explicit query terms"""
return len(self._terms)
def __str__(self):
"""Constructs a URL-quoted query string from this query's terms"""
return self._formatQuery(self.termiter())
| [
"zty@chromium.org"
] | zty@chromium.org |
14c4afa7c0d18bc0eb4ddc092102498ed554f5ad | 1af050f5fce1e500d688e325876107d696eb8159 | /pythonprac/bin/easy_install-3.7 | 55d12364ae5d829cf69d629922b828061c904d7d | [] | no_license | Novandev/pythonprac | 30c587b1eb6ba6c1cd9a6f66b59c03c5c98ec0d4 | 78eded9f1d6d9b2f9ffc16e57139b10cc13355e1 | refs/heads/master | 2022-12-13T20:13:53.976110 | 2020-07-27T05:04:55 | 2020-07-27T05:04:55 | 108,077,649 | 0 | 0 | null | 2022-12-08T01:30:30 | 2017-10-24T04:42:31 | Python | UTF-8 | Python | false | false | 278 | 7 | #!/Users/novan/Desktop/Github/pythonprac/pythonprac/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"donovan.adams@students.makeschool.com"
] | donovan.adams@students.makeschool.com |
7dc54bea595127ad1357734ba33347c1f4cb9188 | c26e98676846ab321065e416ee8b3e2f5b1d4b43 | /PyPoll_Challenge.py | 6fa47eaa8b51b4dfb0beb98ed3f51a144c38e5f7 | [] | no_license | jaredcclarke/Election_Analysis | d510f401555515fdf8e601bfdab5b401b9ef9de9 | 512c2ee41ed8aec440e6e3b7f3459e58211a4e51 | refs/heads/main | 2022-12-26T15:11:30.816855 | 2020-10-12T06:03:51 | 2020-10-12T06:03:51 | 301,816,774 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,497 | py | # -*- coding: UTF-8 -*-
# """PyPoll Homework Challenge Solution."""
# Add our dependencies.
import csv
import os
# Add a variable to load a file from a path.
file_to_load = os.path.join("Resources", "election_results.csv")
# Add a variable to save the file to a path.
file_to_save = os.path.join("analysis", "election_analysis.txt")
# Initialize a total vote counter.
total_votes = 0
total_county_votes = 0
# Candidate Options and candidate votes.
candidate_options = []
candidate_votes = {}
# 1: Create a county list and county votes dictionary.
county_list = []
county_votes = {}
# Track the winning candidate, vote count and percentage
winning_candidate = ""
winning_count = 0
winning_percentage = 0
# 2: Track the largest county and county voter turnout.
county_largest_turnout = ""
largest_turnout_count = 0
largest_county_percentage = 0
# Read the csv and convert it into a list of dictionaries
with open(file_to_load) as election_data:
reader = csv.reader(election_data)
# Read the header
header = next(reader)
# For each row in the CSV file.
for row in reader:
# Add to the total vote count
total_votes = total_votes + 1
# Get the candidate name from each row.
candidate_name = row[2]
# If the candidate does not match any existing candidate add it to
# the candidate list
if candidate_name not in candidate_options:
# Add the candidate name to the candidate list.
candidate_options.append(candidate_name)
# And begin tracking that candidate's voter count.
candidate_votes[candidate_name] = 0
# Add a vote to that candidate's count
candidate_votes[candidate_name] += 1
# 4a: Write a decision statement that checks that the
total_county_votes = total_votes + 1
# 3: Extract the county name from each row.
county_name = row[1]
# county does not match any existing county in the county list.
if county_name not in county_list:
# 4b: Add the existing county to the list of counties.
county_list.append(county_name)
# 4c: Begin tracking the county's vote count.
county_votes[county_name] = 0
# 5: Add a vote to that county's vote count.
county_votes[county_name] += 1
# Save the results to our text file.
with open(file_to_save, "w") as txt_file:
# Print the final vote count (to terminal)
election_results = (
f"\nElection Results\n"
f"-------------------------\n"
f"Total Votes: {total_votes:,}\n"
f"-------------------------\n\n"
f"County Votes:\n")
print(election_results, end="")
txt_file.write(election_results)
# 6a: Write a repetition statement to get the county from the county dictionary.
for county_name in county_votes:
# 6b: Retrieve the county vote count.
votes_county = county_votes[county_name]
# 6c: Calculate the percent of total votes for the county.
county_vote_percentage = float(votes_county) / float(total_votes) * 100
# 6d: Print the county results to the terminal.
county_results = (
f"{county_name}: {county_vote_percentage:.1f}% ({votes_county:,})\n")
print(county_results)
# 6e: Save the county votes to a text file.
txt_file.write(county_results)
# 6f: Write a decision statement to determine the winning county and get its vote count.
if (votes_county > largest_turnout_count) and (county_vote_percentage > largest_county_percentage):
largest_turnout_count = votes_county
largest_county_percentage = county_vote_percentage
county_largest_turnout = county_name
# 7: Print the county with the largest turnout to the terminal.
largest_turnout_summary = (
f"\n"
f"-------------------------\n"
f"Largest County Turnout: {county_largest_turnout}\n"
f"-------------------------\n")
print(largest_turnout_summary)
# 8: Save the county with the largest turnout to a text file.
txt_file.write(largest_turnout_summary)
# Save the final candidate vote count to the text file.
for candidate_name in candidate_votes:
# Retrieve vote count and percentage
votes = candidate_votes.get(candidate_name)
vote_percentage = float(votes) / float(total_votes) * 100
candidate_results = (
f"{candidate_name}: {vote_percentage:.1f}% ({votes:,})\n")
# Print each candidate's voter count and percentage to the
# terminal.
print(candidate_results)
# Save the candidate results to our text file.
txt_file.write(candidate_results)
# Determine winning vote count, winning percentage, and candidate.
if (votes > winning_count) and (vote_percentage > winning_percentage):
winning_count = votes
winning_candidate = candidate_name
winning_percentage = vote_percentage
# Print the winning candidate (to terminal)
winning_candidate_summary = (
f"-------------------------\n"
f"Winner: {winning_candidate}\n"
f"Winning Vote Count: {winning_count:,}\n"
f"Winning Percentage: {winning_percentage:.1f}%\n"
f"-------------------------\n")
print(winning_candidate_summary)
# Save the winning candidate's name to the text file
txt_file.write(winning_candidate_summary)
| [
"jared@Jareds-Air.fios-router.home"
] | jared@Jareds-Air.fios-router.home |
d547d4935857ad9e36b591b0da66d196ef409bef | 51903bfb827916bcf437cb29bf0eeefc438685e3 | /337easy.py | c18de0d1e97c766789122fad9d5b0573eb2f973d | [] | no_license | flyingdan/dailyprogrammer | 186a82e9324a7948058fe66291f9b9ab1828a8f8 | 684f3cb6d83478ddeb125805d69b81a773e22196 | refs/heads/master | 2021-07-19T06:35:20.670278 | 2017-10-25T20:10:07 | 2017-10-25T20:10:07 | 105,844,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | # https://www.reddit.com/r/dailyprogrammer/comments/784fgr/20171023_challenge_337_easy_minimize_maximize/
# Area of circle = (pi)r^2
# Circumference of circle = 2(pi)r
# Area of sector covering x of circumference = rx/2
# x - angle subtended by arc | [
"1051205+flyingdan@users.noreply.github.com"
] | 1051205+flyingdan@users.noreply.github.com |
91a9bfc31c26aa2120baa0b5004b7fc8989683ab | 2fb2291259b27291d379df07712c5449819992a5 | /config.py | 31aadb55b848c588abbf76a2d9b57469d09fbf93 | [] | no_license | ChoungJX/doweb_server | 6d4dcb68c4eab5eda8125543f4c644bdaa4d7dfb | bbbfa34713badcf7f8e58c23171269dca0085437 | refs/heads/master | 2021-07-13T15:59:40.674541 | 2021-04-27T17:23:03 | 2021-04-27T17:23:03 | 244,540,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | import datetime
class index():
SQLALCHEMY_DATABASE_URI = 'sqlite:///service.db'
SQLALCHEMY_TRACK_MODIFICATIONS = True
PERMANENT_SESSION_LIFETIME = datetime.timedelta(hours=2,minutes=30)
SEND_FILE_MAX_AGE_DEFAULT = datetime.timedelta(days=7)
#PERMANENT_SESSION_LIFETIME = datetime.timedelta(seconds=10)
DEBUG = True | [
"zhenglinfeng43@gmail.com"
] | zhenglinfeng43@gmail.com |
824806407a297dd5bce984576799615452155162 | ee6fc02e8392ff780a4f0d1a5789776e4d0b6a29 | /code/practice/abc/abc068/a.py | b3800d499a667ec8af18bf78ae9c85b83569e746 | [] | no_license | mollinaca/ac | e99bb5d5c07159b3ef98cd7067424fa2751c0256 | 2f40dd4333c2b39573b75b45b06ad52cf36d75c3 | refs/heads/master | 2020-12-22T11:02:13.269855 | 2020-09-18T01:02:29 | 2020-09-18T01:02:29 | 236,757,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
print("ABC"+input()) | [
"morisyot@square-enix.com"
] | morisyot@square-enix.com |
048078b6d92b6a6cea61415ef5bbcb2f3fb1edbb | aef857a784b9028e6f13eddb3584660ac7575c3a | /Homeworks/Homework 2/Q5/Q5b.py | c2c7ea469d32a4a082125212b241531d5c65f572 | [] | no_license | pratikmistry0410/DSA-Spring2020 | 2a72cd21cefb0cce9e5c679be6825c3bbe74503d | 4b6d9a7a0d6ce025cdf0084de99ccface45be2cb | refs/heads/master | 2022-07-19T09:49:20.843643 | 2020-05-19T19:52:45 | 2020-05-19T19:52:45 | 259,977,141 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,145 | py | import time
# Global variable to calculate the complexity for algorithm and store the cutoff value to insertion sort
cutoff = 6
complexity_count = 0
# Function to read the dataset
def readDataset():
filename = "data0.32768" # Dataset file name
file = "/Users/learning/Documents/Pratik Mistry/Sem2/Data Structures and Algorithms/dataset-problem2-hw2/" + filename # Complete File Path
file_object = open(file,"r")
lines = file_object.readlines() # Reading all the lines from the file opened
dataset = []
for line in lines:
line = line.strip()
dataset.append(int(line)) # Casting to int as numbers are read as strings while reading file
return dataset # Return the dataset
# Function to sort the dataset using insertion sort
def insertionSort(arr,low,high):
global complexity_count # Referring global scope variable for counting complexity
for i in range(low+1,high+1,1): # Traversing each array element
temp = arr[i]
index = i
complexity_count+=1 # Increment the count complexity
while index > 0 and arr[index-1] > temp: # Sort the left subarray of the current index
complexity_count+=1 # Increment the count complexity
arr[index] = arr[index-1]
index-=1
arr[index] = temp
# Function to calculate median of the array
def medianOf3(arr,low,mid,high):
if arr[low] > arr [high]:
if arr[high] > arr[mid]:
return high
elif arr[mid]> arr[low]:
return low
else:
return mid
else:
if arr[low] > arr[mid]:
return low
elif arr[mid] > arr[high]:
return high
else:
return mid
# Function to quick sort the array with median of 3 and Cutoff to insertion method
def medianQuickSort_CutOff(data_list,low,high):
if (high - low + 1)<= cutoff: # Base condition to stop resursion while sorting the elements using insertion sort
insertionSort(data_list,low,high)
return
mid = int((low+high)/2)
median = medianOf3(data_list,low,mid,high) # Calculate the median of array
swap(data_list,low,median) # Swap median with lowest index of the array
pivot_partition = partition(data_list,low,high) # Find the pivot/partition
medianQuickSort_CutOff(data_list,low,pivot_partition-1) # Apply quick sort to left subarray
medianQuickSort_CutOff(data_list,pivot_partition+1,high) # Apply quick sort to right subarray
# Function to partition the array and returning the pivot element
def partition(arr,low,high):
global complexity_count # Referring global scope variable for counting complexity
pivot = arr[low] # Selecting lowest element as pivot
left = low
right = high
while left < right:
while arr[right] >= pivot and left < right: # Move from right towards left and check for element less than pivot
complexity_count +=1 # Increment the count complexity
right-=1
if right!=left:
arr[left] = arr[right] # Swap the smaller element at the right to the left of pivot
left+=1
while arr[left] <= pivot and left < right: # Move from left towards right and check for element greater than pivot
complexity_count +=1 # Increment the count complexity
left += 1
if right!=left:
arr[right] = arr[left] # Swap the greater element at the left to the right of pivot
right-=1
arr[left] = pivot
return left
# Function to swap the median and lowest index of the subarray
def swap(data_list,low,median):
temp = data_list[median]
data_list[median] = data_list[low]
data_list[low] = temp
# Driver/Main program to read dataset, and call quick sort with median of 3 and cutoff to insertion printing output
if __name__ == "__main__":
data_list = readDataset() # Reading the dataset
start = time.time()
medianQuickSort_CutOff(data_list,0,len(data_list)-1) # Calling Quick Sort: cutoff to insertion sort
end = time.time()
total_time = end-start # Calculating physical clock time
# Printing the outputs
print("\nThe sorted list using quick sort with cutoff to insertion sort is: ")
print(data_list)
print("\nThe total time taken for quick sort with cutoff to insertion sort is:",total_time*1000 , " ms")
print("\nThe complexity count for quick sort with cutoff to insertion sort is:",complexity_count) | [
"noreply@github.com"
] | pratikmistry0410.noreply@github.com |
4676da0d782299dd3de0559176956456b31fd9e1 | 5bf1c5acaa09e7c3604a08cadac05fd913401491 | /run_grain_facet_from_params.py | 3b6dbf7a87ba349cac4c67f36cbd37db19a1195d | [] | no_license | gregtucker/mountain_front_model | b836641521c4627e2c6f2267cb7b4449a088d87f | 49ad583c55dc20aaa2bc08729068f2ec39e56f57 | refs/heads/master | 2021-01-17T14:46:53.591850 | 2018-09-24T13:45:04 | 2018-09-24T13:45:04 | 48,451,368 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | # -*- coding: utf-8 -*-
"""
run_grain_facet_from_params.py: demonstrates how to instantiate and run
a GrainFacetSimulator from another Python script, passing parameters via
a dictionary rather than using a separate input file.
A note on time scales, in seconds:
Duration
(sec) (equiv)
--------------
1 s ~ 1 s
10 s ~ 1 min
100 s ~ 1 min
1000 s ~ 1 hr
10,000 s ~ 1 hr
10^5 s ~ 1 day (28 hrs)
10^6 s ~ 1 week (12 days)
10^7 s ~ 3 months
10^8 s ~ 3 years
Created on Sun Jun 26 09:13:46 2016
@author: gtucker
"""
import time
from grain_facet_model import GrainFacetSimulator
params = {
'number_of_node_rows' : 20,
'number_of_node_columns' : 31,
'report_interval' : 5.0,
'run_duration' : 150.0,
'output_interval' : 1000.0,
'plot_interval' : 10.0,
'uplift_interval' : 10.0,
'disturbance_rate' : 0.01,
'weathering_rate' : 0.002,
'friction_coef' : 1.0,
'fault_x' : 8.0,
'cell_width' : 1.0
}
start = time.time()
gridsize = (params['number_of_node_rows'], params['number_of_node_columns'])
gfs = GrainFacetSimulator(gridsize, **params)
gfs.run()
print('Run complete. Run time (sec):')
print(time.time() - start)
| [
"gtucker@colorado.edu"
] | gtucker@colorado.edu |
16a8903ece9e78204eed8acc202a5f650cf59dd2 | 8a932cf081cdbcdee998b2e71ff5cc57a4799cbb | /pentakillers.py | dadc28ec0e1d3ffb3f9aae8a2664550075afd334 | [
"MIT"
] | permissive | vitaum88/pentakillers | 2c0b910daf38b9b47f315361e0046e4fc6e992ff | c930493bb4ff3ced65a48492569a7f5770c88d41 | refs/heads/main | 2023-06-30T13:42:08.659121 | 2021-08-02T19:04:20 | 2021-08-02T19:04:20 | 392,063,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,711 | py | import cassiopeia as cass
import arrow
import pandas as pd
import time
import requests
'''
Go to https://developer.riotgames.com/ and create a LOGIN. After that, you'll be taken to a screen with the API key.
There are 3 types of API keys in Riot Games:
- Development API (which is the default once you create a developer account): it's a key that needs to be refreshed every 24h
- Personal API: after registering a product (I didn't do it, so the API I've been using is Development), you don't need to
refreseh your api key. There are some restrcitions in the access (such as how many calls per minute/hour etc)
- Production API: this is for a real product, deployed, etc. I didn't even read details about it because it's way out of
the scope of this project.
You can get reference for them in https://developer.riotgames.com/docs/portal#product-registration_application-process
'''
API_KEY = "RGAPI-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"
REGION = 'NA' # can be any region (NA, BR, TR, etc)
def get_curr_data(pentakiller, kill, start_time,):
'''
This function returns the requested info from the pentakiller (items, position, timestamp, etc)
'''
curr_data = {
"summoner": pentakiller['summoner'],
"match id": pentakiller['match'],
"champion": pentakiller['champion'],
"region": REGION,
"x_pos": tuple(kill.get('position').values())[0],
"y_pos": tuple(kill.get('position').values())[1],
"item_1": list(map(lambda x: x if x else "empty slot", pentakiller.get("items")))[0],
"item_2": list(map(lambda x: x if x else "empty slot", pentakiller.get("items")))[1],
"item_3": list(map(lambda x: x if x else "empty slot", pentakiller.get("items")))[2],
"item_4": list(map(lambda x: x if x else "empty slot", pentakiller.get("items")))[3],
"item_5": list(map(lambda x: x if x else "empty slot", pentakiller.get("items")))[4],
"item_6": list(map(lambda x: x if x else "empty slot", pentakiller.get("items")))[5],
"timestamp": start_time
}
return curr_data
def new_kills_heatmap(self):
'''
I am MonkeyPatching the cassiopedia.core.match.Match.kills_heatmap method (because originally it didn't return the FIG image)
Now that it is being returned, I can save to a file. That method was already written by the developers of the
cassiopedia module, and I'm simply updating it for our needs.
'''
if self.map.name == "Summoner's Rift":
rx0, ry0, rx1, ry1 = 0, 0, 14820, 14881
elif self.map.name == "Howling Abyss":
rx0, ry0, rx1, ry1 = -28, -19, 12849, 12858
else:
raise NotImplemented
imx0, imy0, imx1, imy1 = self.map.image.image.getbbox()
def position_to_map_image_coords(position):
x, y = position.x, position.y
x -= rx0
x /= (rx1 - rx0)
x *= (imx1 - imx0)
y -= ry0
y /= (ry1 - ry0)
y *= (imy1 - imy0)
return x, y
import matplotlib.pyplot as plt
size = 8
fig = plt.figure(figsize=(size, size)) # updated this line
plt.imshow(self.map.image.image.rotate(-90))
for p in self.participants:
for kill in p.timeline.champion_kills:
x, y = position_to_map_image_coords(kill.position)
if p.team.side == cass.data.Side.blue:
plt.scatter([x], [y], c="b", s=size * 10)
else:
plt.scatter([x], [y], c="r", s=size * 10)
plt.axis('off')
plt.show()
return fig # added this line
cass.core.match.Match.kills_heatmap = new_kills_heatmap # updating the method
def setup(key, region):
'''
Basic setups for the cassiopedia module - logging, API_KEY and REGION
'''
cass.apply_settings({"logging": {
"print_calls": False,
"print_riot_api_key": False,
"default": "WARNING",
"core": "WARNING"
}})
cass.set_riot_api_key(API_KEY)
cass.set_default_region(REGION)
def get_week_matches(summoner):
'''
This function takes the 'summoner' object and returns the match history for the period of 7 days that the summoner played
'''
now = arrow.utcnow()
last_week = now.shift(days=-7)
since = last_week.floor('day')
until = now.floor('day')
matches = cass.get_match_history(summoner, begin_time=since, end_time=until)
return matches
def get_uri_region(region=REGION):
mapping = {
'BR':'BR1',
'EUNE':'EUN1',
'EUW':'EUW1',
'JP':'JP1',
'KR':'KR',
'LAN':'LA1',
'LAS':'LA2',
'NA':'NA1',
'OCE':'OC1',
'TR':'TR1',
'RU':'RU'
}
return mapping.get(region)
def get_diamonds(page, tier):
'''
Generator for diamond players. Since there's no implementation in the module Cass for diamond (and the # of players is vast), I
created this function. Handle with care not overload the server with thousands of requests.
'''
headers_dict = {"X-Riot-Token": API_KEY}
region_api = str.lower(get_uri_region(REGION))
URL = f"https://{region_api}.api.riotgames.com/lol/league/v4/entries/RANKED_SOLO_5x5/DIAMOND/{tier}?page={page}"
response = requests.get(URL, headers=headers_dict)
players_list = map(lambda x: x.get('summonerId'), response.json())
for player in players_list:
yield player
def get_masters():
'''
Generator for all masters in 'master league'
'''
masters = cass.get_master_league(queue=cass.Queue.ranked_solo_fives)
for master in masters:
yield master
def get_grandmasters():
'''
Generator for all grandmasters in 'grandmaster league'
'''
grandmasters = cass.get_grandmaster_league(queue=cass.Queue.ranked_solo_fives)
for gm in grandmasters:
yield gm
def get_challengers():
'''
Generator for all challengers in 'challenger league'
'''
challengers = cass.get_challenger_league(queue=cass.Queue.ranked_solo_fives)
for challenger in challengers:
yield challenger
def get_participant_info(match):
'''
This function generates a dictionary with the required data from a match if it had a pentakill
'''
pentakiller = None
for participant in match.participants:
if participant.stats.largest_multi_kill >= 5:
pentakiller = {
'summoner':participant.summoner.name,
'match':match.id,
'region':match.region.value,
'champion':participant.champion.name,
'participant':participant,
'participant_id':participant.id,
'items':list(map(lambda x: x.name if x is not None else None, participant.stats.items)),
}
return pentakiller
def get_kills_dict(participant_no, match_id):
'''
This function takes the match that had the kill and the participant that had the pentakill.
It then access the 'frames' of that match's timeline and creates a list of dictionaries of frames events (kills, drops, items built, etc)
Then I only keep the events that had the property 'killerId' (which means it's a kill that a player did, and not a NPC) and
filter only CHAMPION_KILLS (so PvP, and not PvE, for instance).
Then I save into kills_list and return that information
'''
kills_list = []
events = []
match = cass.get_match(match_id)
for frame in match.timeline.frames:
events.extend([x.to_dict() for x in frame.events])
kill_events = [x for x in events if 'killerId' in x]
kills = filter(lambda x: x['killerId']==participant_no and x['type']=='CHAMPION_KILL', kill_events)
kills_list += kills
return kills_list
def get_pentakill(kills_list):
'''
According to LoL wiki, the kills interval must be under 10 seconds until the 4th kill and then 30s (max) in the 5th kill.
That way, I'm looping through all kills and checking if the next 1, 2, 3 and 4 kills are in the time range in relation to
the 0, 1, 2 and 3 kill. The timestamp comes in miliseconds, so I have to multiply by 1000.
When it finds a group of 5 kills that fits the restrictions, breaks out of the loop and returns the first kill.
'''
for i, kill in enumerate(kills_dict):
if all([(kills_dict[i+4]['timestamp'] - kills_dict[i+3]['timestamp'] <= 1000 * 30),
(kills_dict[i+3]['timestamp'] - kills_dict[i+2]['timestamp'] <= 1000 * 10),
(kills_dict[i+2]['timestamp'] - kills_dict[i+1]['timestamp'] <= 1000 * 10),
(kills_dict[i+1]['timestamp'] - kills_dict[i]['timestamp'] <= 1000 * 10)]):
break
return kill
def generate_heatmap(match_id):
'''
Simple function that takes the match_id and saves the heatmap with the match_id in the filename.
'''
match = cass.get_match(match_id)
fig = match.kills_heatmap()
fig.savefig(f"{match_id}_heatmap.png")
setup(API_KEY, REGION)
print('Fetching data for Challengers League:\n')
counter = 0 # I added a counter so we could stop early if we wanted
MATCH_LIST = [] # this match_list is a list where I append all matches that are processed. That way, we can avoid repeated calls for similar matches
PENTAKILLERS_LIST = [] # a list with data from matches that happened to have pentakills
players = get_challengers() # assigned the challengers generator to the variable 'players'
player = next(players, None) # tried to retrieve the next challenger. if the generator is exhausted, this will return None
while player: # loops until the challengers generator is exhausted
counter += 1
print(f"\n{counter}. Evaluating Player: {player.summoner.name}")
matches = get_week_matches(player.summoner)
if not matches:
print(f"No matches in the last 7 days for {player.summoner.name}")
player = next(players, None)
continue
for i, match in enumerate(matches):
print(f"Fetching data for Match {i+1}/{len(matches)}")
if MATCH_LIST.count(match.id):
print("Already fetched this Match")
continue
MATCH_LIST.append(match.id)
pentakillers = get_participant_info(match)
if not pentakillers:
print(f"Match {match.id} did not have any pentakillers...")
continue
print(f"Pentakillers on Match {match.id}: {pentakillers}")
PENTAKILLERS_LIST.append(pentakillers)
print(f"Finished fetching data for Player: {player.summoner.name}")
print('\n--- Waiting 5 seconds to start next Player ---\n') # this is to try to avoig making too many requests and being interrupted
time.sleep(5)
player = next(players, None)
if counter == 50:
break
print("Finished fetching data for Challenger League.\n")
print('Fetching data for GrandMasters League:\n')
counter = 0
players = get_grandmasters() # assigned the grandmasters generator to the variable 'players'
player = next(players, None) # tried to retrieve the next grandmaster. if the generator is exhausted, this will return None
while player: # loops until the challengers generator is exhausted
counter += 1
print(f"\n{counter}. Evaluating Player: {player.summoner.name}")
matches = get_week_matches(player.summoner)
if not matches:
print(f"No matches in the last 7 days for {player.summoner.name}")
player = next(players, None)
continue
for i, match in enumerate(matches):
print(f"Fetching data for Match {i+1}/{len(matches)}")
if MATCH_LIST.count(match.id):
print("Already fetched this Match")
continue
MATCH_LIST.append(match.id)
pentakillers = get_participant_info(match)
if not pentakillers:
print(f"Match {match.id} did not have any pentakillers...")
continue
print(f"Pentakillers on Match {match.id}: {pentakillers}")
PENTAKILLERS_LIST.append(pentakillers)
print(f"Finished fetching data for Player: {player.summoner.name}")
print('\n--- Waiting 5 seconds to start next Player ---\n') # this is to try to avoig making too many requests and being interrupted
time.sleep(5)
player = next(players, None)
if counter == 50:
break
print("Finished fetching data for GrandMaster League.\n")
print('Fetching data for Masters League:\n')
counter = 0
players = get_masters() # assigned the challengers generator to the variable 'players'
player = next(players, None) # tried to retrieve the next master. if the generator is exhausted, this will return None
while player: # loops until the challengers generator is exhausted
counter += 1
print(f"\n{counter}. Evaluating Player: {player.summoner.name}")
matches = get_week_matches(player.summoner)
if not matches:
print(f"No matches in the last 7 days for {player.summoner.name}")
player = next(players, None)
continue
for i, match in enumerate(matches):
print(f"Fetching data for Match {i+1}/{len(matches)}")
if MATCH_LIST.count(match.id):
print("Already fetched this Match")
continue
MATCH_LIST.append(match.id)
pentakillers = get_participant_info(match)
if not pentakillers:
print(f"Match {match.id} did not have any pentakillers...")
continue
print(f"Pentakillers on Match {match.id}: {pentakillers}")
PENTAKILLERS_LIST.append(pentakillers)
print(f"Finished fetching data for Player: {player.summoner.name}")
print('\n--- Waiting 5 seconds to start next Player ---\n') # this is to try to avoig making too many requests and being interrupted
time.sleep(5)
player = next(players, None)
if counter == 50:
break
print("Finished fetching data for Master League.\n")
print('Fetching data for Diamond League:\n')
counter = 0
players = get_diamonds(page=1, tier='I') # assigned the challengers generator to the variable 'players'
player = next(players, None) # tried to retrieve the next diamond. if the generator is exhausted, this will return None
while player: # loops until the challengers generator is exhausted
counter += 1
summoner = cass.get_summoner(id=player)
print(f"\n{counter}. Evaluating Player: {summoner.name}")
matches = get_week_matches(summoner)
if not matches:
print(f"No matches in the last 7 days for {summoner.name}")
player = next(players, None)
continue
for i, match in enumerate(matches):
print(f"Fetching data for Match {i+1}/{len(matches)}")
if MATCH_LIST.count(match.id):
print("Already fetched this Match")
continue
MATCH_LIST.append(match.id)
pentakillers = get_participant_info(match)
if not pentakillers:
print(f"Match {match.id} did not have any pentakillers...")
continue
print(f"Pentakillers on Match {match.id}: {pentakillers}")
PENTAKILLERS_LIST.append(pentakillers)
print(f"Finished fetching data for Player: {summoner.name}")
print('\n--- Waiting 5 seconds to start next Player ---\n') # this is to try to avoig making too many requests and being interrupted
time.sleep(5)
player = next(players, None)
if counter == 50:
break
print("Finished fetching data for Diamond League.\n")
data = []
'''
general printing and returning images for the pentakills
'''
for pentakiller in PENTAKILLERS_LIST:
print(f"Fetching data for Pentakiller '{pentakiller['summoner']}' in Match {pentakiller['match']}:")
print("Generating kills heatmap...",end=' ')
generate_heatmap(pentakiller['match'])
print("Done!")
kills_dict = get_kills_dict(pentakiller['participant_id'], pentakiller['match'])
kill = get_pentakill(kills_dict)
minutes = kill['timestamp']//60000
seconds = int(60*(kill['timestamp']/60000 - minutes))
start_time = f"{minutes:02}:{seconds:02}"
print(f"The Pentakill started at the {start_time} mark, with coordinates {tuple(kill.get('position').values())}.")
print(f"The player finished the game with the following items:\n{pentakiller.get('items')}")
data.append(get_curr_data(pentakiller, kill, start_time))
print('\n')
# exporting datat to a csv file.
pd.DataFrame(data).to_csv('pentakills.csv', index=False, header=True, encoding='utf-8')
| [
"noreply@github.com"
] | vitaum88.noreply@github.com |
0d8d56fe358d827f22efe436159e711e93ae5a8c | 87ba7263b2dcffcd952d0dee8dd42a9cc5142286 | /tensforflow_mnist.py | 4568bbd70fd6d989aa360fb55688e845d8d71b85 | [] | no_license | kpodlaski/NeuralNetworks2018 | 40064acf14522229a66333523a3cc7342ce507b4 | 1d143bc51bce94e80eb9e9d6c9b465ef174689ee | refs/heads/master | 2020-04-01T13:39:57.231298 | 2019-01-15T11:02:31 | 2019-01-15T11:02:31 | 153,261,867 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py | import tensorflow as tf
import numpy as np
net = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28,28)),
tf.keras.layers.Dense(30,activation=tf.nn.sigmoid),#tf.nn.relu
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
net.compile(optimizer=tf.train.GradientDescentOptimizer(0.5),
loss='sparse_categorical_crossentropy',#tf.keras.losses.mean_absolute_error , #
metrics=['accuracy']#[tf.keras.metrics.mean_squared_error]
)
mnist = tf.keras.datasets.mnist
(train_inputs, train_labels), (test_inputs, test_labels) = mnist.load_data()
train_inputs, test_inputs = train_inputs / 255.0, test_inputs / 255.0
net.fit(train_inputs, train_labels, epochs=10, batch_size=100)
test_loss, test_acc = net.evaluate(test_inputs, test_labels)
print('Test accuracy:', test_acc)
print('Test loss:', test_loss)
predictions = net.predict(test_inputs)
print("Result : ")
good_response = 0
for elem in range(0,len(test_inputs)):
if np.argmax(predictions[elem]) == test_labels[elem]:
good_response+=1
print(predictions[len(test_inputs)-1])
print(np.argmax(predictions[len(test_inputs)-1]))
print(test_labels[len(test_inputs)-1])
print(good_response/len(test_inputs)*100.0) | [
"podlaski@uni.lodz.pl"
] | podlaski@uni.lodz.pl |
4e2cb97de7241505f45b49f05c410dc4676b145b | e94dc3776101d5f3083cb2321ff47f76a269791d | /examples_save/examples_save/medusaCsvProcess.py | 0364397467cc80c76fad33293461c82638994854 | [] | no_license | nanjunda/fc_iscsi_scripts | d004b57c1d5b95eb9a1f196238d887640ecaf96f | 8ea16efb39833daa52223a1fcbd9a8dabe84589e | refs/heads/nanjunda-dev | 2021-09-04T10:11:49.252915 | 2018-01-17T21:38:59 | 2018-01-17T21:38:59 | 112,405,356 | 0 | 0 | null | 2018-01-17T21:35:36 | 2017-11-29T00:26:22 | Python | UTF-8 | Python | false | false | 499 | py | import csv
with open('x.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
next(readCSV)
count = 0
sum4=0.0
sum8=0.0
sum11=0.0
for row in readCSV:
s = str(row[4])
if "Avg" not in s:
count += 1
print(row[4],row[8],row[11])
sum4 += float(row[4])
sum8 += float(row[8])
sum11 += float(row[11])
avg4=sum4/count
avg8=sum8/count
avg11=sum11/count
print (avg4, avg8, avg11)
| [
"noreply@github.com"
] | nanjunda.noreply@github.com |
866ea041035d1cf1c46a7455c4a1351d919e81e8 | 5b3eb673f6597b90acc98b48852417982924c5d6 | /users/views.py | 1919f9cdb270a67aa66acf8dce15ecc79da27b2b | [] | no_license | ritiksingh8/Buy-It-An-Ecommerce-WebApplication | 3e78d8ca35d167660bdbc5092ddabef41211335c | 6b9fd48e30660fd58ee0b6f3256fdab1b6a9b9a9 | refs/heads/master | 2020-12-01T18:13:46.229547 | 2020-02-16T12:34:15 | 2020-02-16T12:34:15 | 230,723,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,594 | py | from django.shortcuts import render, redirect
from django.contrib import messages
from .forms import UserRegisterForm
from shop.models import Products
from django.contrib.auth.models import User
from .models import Cart
from django.contrib.auth.decorators import login_required
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Account created for {username}!')
return redirect('index')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
@login_required
def new_cart(request,param1,param2):
user=request.user
product=Products.objects.filter(title=param2).first()
cart_row=Cart.objects.filter(user=user).filter(product=product).first()
if param1=='add':
if cart_row is None:
new_cart_row=Cart(user=user,product=product)
new_cart_row.save()
else:
cart_row.quantity=cart_row.quantity+1
cart_row.save()
print("in the else")
elif param1=='remove':
cart_row.quantity=cart_row.quantity-1
cart_row.save()
if cart_row.quantity==0:
cart_row.delete()
if len(Cart.objects.filter(user=user))==0:
empty=True
else:
empty=False
return render(request,'users/cart.html',{'cart_items':Cart.objects.filter(user=user),'add':'add','remove':'remove','empty':empty})
| [
"ritik.singh@spit.ac.in"
] | ritik.singh@spit.ac.in |
67ef6df236a7de311ac3d78f352c53cc03be5d79 | e278055ba8efb122e88e6af2bff1e56b207942de | /extractors.py | f48d8f939973ded002832670ca897cbac198a5e1 | [] | no_license | ilay32/wals-project | 3e7f789bda1874909c02c2ea204b66de672f3a7f | 9ff9d16326d4af88655efb952b98a304bb80d028 | refs/heads/master | 2021-04-06T10:59:34.236027 | 2018-03-16T08:40:51 | 2018-03-16T08:40:51 | 83,216,194 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | import re,logging
### globals ####
"""
match first numeric sequence as group(1), and make sure there are no
other numbers after it.
"""
numerizer = re.compile("(^\d+)([^\d]*$)")
"""
(tmp?) fallback for failed numerization.
"""
simplenumerizer = re.compile("(^\d+)")
### extractors ###
def natural(c):
"""
just get the numeric value of the cell
"""
return numerize(c)
def mult2bin(target_value,value):
"""
binarize a multi-valued feature, returning -1 if the value is n,
and 1 otherwise, returns the function that does that
"""
def which(c):
return value if numerize(c) == target_value else -1*value
return which
### helpers ###
def numerize(txt):
"""
if there's no match, it means there is more
than one numeric sequence in the cell, in which
case, print the cell contents so, we can see what's what
"""
m = numerizer.match(txt)
if m:
return int(m.group(1))
else:
logging.warning("can't numerize cell contents: %s",txt)
return int(simplenumerizer.match(txt).group(1))
| [
"silayster@gmail.com"
] | silayster@gmail.com |
106c381e4786484cc282efd00c668dcd74a0a99b | 420b062ab05f6dcbe718acfbb7fa92e380b03d25 | /runit_sfdc.py | de72019a86fd4cd675992b4640518d513d99dc89 | [] | no_license | fkoncz/sfdc---PoC-code | e130e5f8addfbd7dcbca55b5a93a3657baa694b6 | 076f27cddac17a7be65a04469cd509373ae62156 | refs/heads/master | 2022-04-03T10:23:24.234233 | 2020-02-14T03:10:00 | 2020-02-14T03:10:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,884 | py | import requests
import xml.dom.minidom
import collections
import time
import os
import zipfile
import base64
import beatbox
from runit_sfdc_ui import *
from random import choice
from string import ascii_lowercase
from Config.config_sfdc import *
from simple_salesforce import Salesforce
sf = Salesforce(username=ADMIN1_USERNAME, password=ADMIN1_PASSWORD, security_token=ADMIN1_TOKEN)
def main():
# -----Admin 1--Getting global Administrator Session ID.
admin_sid = get_user_sid(ADMIN1_USERNAME, ADMIN1_PTK)
# Admin 1--Making sure we will be able to manipulate without any identification
set_ip_range(sysadmin_profile_name, admin_sid)
# -----Super-Admin-----
# -----Admin 1--Because of weak lockout policy, it triggers
# Security Control: Lockout effective period -super-admin
change_lockout_period(admin_sid)
# -----Admin 1--Disable clickjack protection for customer Visualforce pages with standard headers
disable_clickjack_with_standard_headers(admin_sid)
# -----Admin 1--Creating 4 users - due to license limitations,
# the other 2 will be Force.com Free users.
create_user(LSL_USER1_USERNAME, LSL_USER1_ALIAS, LSL_USER1_USERNAME, LSL_USER1_USERNAME, 'Standard Platform User')
create_user(LSL_USER2_USERNAME, LSL_USER2_ALIAS, LSL_USER2_USERNAME, LSL_USER2_USERNAME, 'Force.com - Free User')
create_user(LSL_USER3_USERNAME, LSL_USER3_ALIAS, LSL_USER3_USERNAME, LSL_USER3_USERNAME, 'Force.com - Free User')
create_user(LSL_USER4_USERNAME, LSL_USER4_ALIAS, LSL_USER4_USERNAME, LSL_USER4_USERNAME, 'Force.com - App'
'Subscription User')
# -----Admin 1--set IP range (for admin profile) - making sure we
# will be able to manipulate without any identification
set_ip_range(sysadmin_profile_name, admin_sid)
# Path 1: Account compromise -- User1
# -----User 1--brute force login, Attacker brute forced account successfully,
# triggers Threat: Failed login(e.g. 5 average, 2x)
switch_user_profile_or_role(LSL_USER1_USERNAME, 'System Administrator')
# failUserLogins(SFDC_TEST_USER1, "X", num_failed_attempts)
# -----User 1--Login from remote triggers UBA Risk User: High, activity from unseen browser,
# device, OS, unseen location(including unseen IPs v2) (score approx: 45-50)
# failUserLogins(SFDC_TEST_USER1, SFDC_TEST_USER1_PASSWORD, num_failed_attempts, tor_proxy_ip,
# tor_proxy_port, "Mozilla/1.0 (Windows CE 0.1; Win63; x63; rv:1.1) GeckoX/20100101 Firebug/0.1")
# -----User 1-----UBA Risk User: 10x High, Data export --- Instead of this,
# Attacker set Trusted IP Range to enable backdoor access, triggers Policy alert.
# To verify, in the UI this is at "Network Access"
set_trusted_ip_range(howmany_trusted_ip_range_sets, 'lsl-TrustRange-' + random_string_generator(4), '192.168.0.11',
'192.168.0.200', LSL_USER1_USERNAME, default_user_password)
switch_user_profile_or_role(LSL_USER1_USERNAME, 'Standard Platform User')
# Path 2: Data exfiltration -- User2
# -----User 2--Grant Admin permissions
switch_user_profile_or_role(LSL_USER2_USERNAME, 'System Administrator')
# -----User 2--60+(configurable) Mass Transfer to another account,
# triggers UBA Risk User: Medium, Mass Transfer+After-hr.
# Creating given numbers of mockup account data to have something to transfer.
LSL_USER2_FULLNAME = get_user_full_name(LSL_USER2_USERNAME)
admin1_full_name = get_user_full_name(ADMIN1_USERNAME)
create_mockup_account(howManyMockupAccounts, ADMIN1_USERNAME)
mass_transfer(LSL_USER2_USERNAME, default_user_password, admin1_full_name, LSL_USER2_FULLNAME,
how_many_mass_transfers)
switch_user_profile_or_role(LSL_USER2_USERNAME, 'Force.com - Free User')
# Path#3: Insider Threat--User3
# -----User 3--Admin grant excessive permissions to insider user, triggers Policy alert:
# Profile/Change user permissions
switch_user_profile_or_role(LSL_USER3_USERNAME, 'System Administrator')
# -----User 3--We deploy new Sharing Rules as an insider threat.
# We have some static XML content and if we want to add multiple rules,
# don't want to add the header all the time.
# create some mockup sharing rules.
create_zip_objects()
add_lead_sharing_rule(how_many_sharing_rules, "Read")
close_rules()
deploy_zipfile(LSL_USER3_USERNAME, default_user_password)
# -----User 3--3-Insider user is corrupted by a vendor, he helped vendor to extend
# contract term, triggers Policy alert: Contract Create+Update
response = create_mockup_contract(LSL_USER3_USERNAME, "lsl-Account-firstMockup", "3", "2016-03-01")
update_contract(response['id'])
# -----User 3--4-Before termination, insider user also Mass deleting data,
# triggers UBA Risk User: High, Mass Delete
for x in range(0, howManyMassDelete):
create_mockup_account(howManyMockupAccounts, LSL_USER3_USERNAME)
mass_delete(LSL_USER3_USERNAME, default_user_password)
print("Mass Delete iteration nr.: " + str(x))
# -----User 3--Policy alert: Change user profile
switch_user_profile_or_role(LSL_USER3_USERNAME, 'Force.com - Free User')
# Path 4: Insider Threat--User4
# -----User 4--UBA Risk User: 20x Medium, Reports export, Report Run
# 2 - The 3rd party has the permission to access sensitive data and function,
# he run and export the reports, sale to competitor, triggers UBA Risk User: Medium,
# Reports exported, Report Run
# 3 - The 3rd party also export data, triggers UBA Risk User: High, Data Export
# 4 - For all report activities by the 3rd party, stand out in KSI:
# Top customer report run and Top customer report exported
switch_user_profile_or_role(LSL_USER4_USERNAME, 'System Administrator')
report_name = create_report(howManyReportsCreate, LSL_USER4_USERNAME, default_user_password, "Accounts")
export_report(how_many_export_reports, report_name, LSL_USER4_USERNAME, default_user_password)
switch_user_profile_or_role(LSL_USER4_USERNAME, 'Force.com - App Subscription User')
# Creating a user
def create_user(username, alias, email, last_name, profile_name):
"""
:param username:
:param alias:
:param email:
:param last_name:
:param profile_name:
:return:
"""
profile_id = get_profile_id(profile_name)
try:
sf.User.create({'userName': username,
'Alias': alias,
'Email': email,
'lastName': last_name,
'EmailEncodingKey': 'UTF-8',
'TimeZoneSidKey': 'America/New_York',
'LocaleSidKey': 'en_US',
'profile_id': profile_id,
'LanguageLocaleKey': 'en_US'})
set_password(username, default_user_password)
except Exception as e:
try:
activate_user(username)
set_password(username, default_user_password)
except Exception as e:
set_password(username, default_user_password)
def get_user_full_name(username):
"""
:param username:
:return:
"""
userinfo = sf.query("SELECT FirstName, LastName FROM User WHERE username = '" + username + "'")
dict = collections.OrderedDict(userinfo)
dictitems = list(dict.values())[2]
firstname = list(collections.OrderedDict(dictitems.pop()).values())[1]
lastname = list(collections.OrderedDict(dictitems.pop()).values())[2]
if firstname is None:
fullname = lastname
else:
fullname = firstname + " " + lastname
return fullname
# Resetting a user's password
def set_password(username, default_user_password):
"""
:param username:
:param default_user_password:
:return:
"""
uid = get_user_id(username)
print("\nDefaulting Password for user with UID: " + uid + "\n")
sf2 = beatbox.PythonClient()
sf2.login(ADMIN1_USERNAME, ADMIN1_PASSWORD)
try:
sf2.setPassword(uid, default_user_password)
except Exception as e:
pass
# Login for all users, keep session Ids
def get_user_sid(username, password):
"""
:param username:
:param password:
:return:
"""
login_headers = {
'content-type': 'text/xml',
'charset': 'UTF-8',
'SOAPAction': 'login'
}
login_envelope = """
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:urn="urn:enterprise.soap.sforce.com">
<soapenv:Header>
</soapenv:Header>
<soapenv:Body>
<urn:login>
<urn:username>""" + '' + username + '' + """</urn:username>
<urn:password>""" + '' + password + '' + """</urn:password>
</urn:login>
</soapenv:Body>
</soapenv:Envelope>
"""
login_response = requests.post(partnerURL, login_envelope, headers=login_headers)
dom = xml.dom.minidom.parseString(login_response.text)
user_sid_result = dom.getElementsByTagName('sessionId')
if user_sid_result[0].firstChild.nodeValue is None:
print("\nI wasn't successful. Error was:\n")
print(login_response.text + '\n')
else:
user_sid = user_sid_result[0].firstChild.nodeValue
return user_sid
# This is useful in general to manipulate any user's details
def get_user_id(username):
"""
:param username:
:return:
"""
# Userinfo is an OrderedDict that contains a list that contains another OrderedDict so we need to dig in a bit.
userinfo = sf.query("SELECT Id FROM User WHERE username = '" + username + "'")
dict = collections.OrderedDict(userinfo)
dictitems = list(dict.values())[2]
itemlist = (dictitems.pop())
dict2 = collections.OrderedDict(itemlist)
uid = list(dict2.values())[1]
return uid
def get_user_profile_id(which_user):
"""
:param which_user:
:return:
"""
query = sf.query("SELECT ProfileId FROM User where username = '" + which_user + "'")
dictitems = list(collections.OrderedDict(query).values())[2]
if len(dictitems) == 0:
print("Could not get System Administrator Profile Id. Continuing...\n")
return None
else:
prof_id = list(collections.OrderedDict(dictitems.pop()).values())[1]
return prof_id
def get_profile_id(profile_name):
"""
:param profile_name:
:return:
"""
query = sf.query("SELECT Id FROM Profile WHERE name = '" + profile_name + "'")
dictitems = list(collections.OrderedDict(query).values())[2]
if len(dictitems) == 0:
print("Could not get System Administrator Profile Id. Continuing...\n")
return None
else:
prof_id = list(collections.OrderedDict(dictitems.pop()).values())[1]
return prof_id
def switch_user_profile_or_role(user1, user1_profile, user2_profile=None, how_many_times=None):
"""
:param user1:
:param user1_profile:
:param user2_profile:
:param how_many_times:
:return:
"""
if how_many_times is None:
user_id = get_user_id(user1)
switch_to_profile_id = get_profile_id(user1_profile)
sf.User.update(user_id, {'ProfileId': '' + switch_to_profile_id + ''})
else:
while how_many_times > 0:
user_id = get_user_id(user1)
get_user_profile_id(user1)
switch_between1 = get_profile_id(user1_profile)
switch_between2 = get_profile_id(user2_profile)
sf.User.update(user_id, {'ProfileId': '' + switch_between2 + ''})
print("The " + user1 + "'s profile switched from " + switch_between1 + " to " + switch_between2 +
" Profile Id.")
get_user_profile_id(user1)
sf.User.update(user_id, {'ProfileId': '' + switch_between1 + ''})
print("The " + user1 + "'s profile switched from " + switch_between2 + " to " + switch_between1 +
" Profile Id.")
print("UserProfile switches left: " + str(how_many_times - 1))
how_many_times -= 1
# Reactivate a user if existing
def activate_user(username):
"""
:param username:
:return:
"""
userinfo = sf.query("SELECT IsActive FROM User WHERE username = '" + username + "'")
itemlist = (userinfo.values())[2]
dictitems = list(collections.OrderedDict(userinfo).values())[2]
is_active = list(collections.OrderedDict(dictitems.pop()).values())[1]
if not is_active:
print("User exists, but is not active. Activating.")
sf.User.update(get_user_id(username), {'IsActive': 'true'})
else:
print("User is active, no need to re-enable.")
def create_mockup_account(how_many, owner):
"""
:param how_many:
:param owner:
:return:
"""
owner_id = get_user_id(owner)
sf.Account.create({'type': 'Account',
'Name': 'lsl-Account-firstMockup',
'Website': 'http://www.IamJustAtestWebSite.com',
'owner_id': '' + owner_id + ''})
acc_list = ['lsl-Account-firstMockup']
how_many -= 1
while how_many > 0:
test_data = "lsl-Account-" + random_string_generator(8)
owner_id = get_user_id(owner)
sf.Account.create({'type': 'Account',
'Name': '' + test_data + '',
'Website': 'http://www.IamJustAtestWebSite.com',
'owner_id': '' + owner_id + ''})
print("Some mockup Account " + test_data + " for user: " + owner + " created.")
acc_list.append(test_data)
how_many -= 1
print("Following mockup Accounts have been created: " + str(acc_list))
return acc_list
def get_account_id(account_name):
"""
:param account_name:
:return:
"""
userinfo = sf.query("SELECT Id FROM Account WHERE Name = '" + account_name + "'")
acc_id = list(collections.OrderedDict(list(collections.OrderedDict(userinfo).values())[2].pop()).values())[1]
return acc_id
def create_mockup_contract(owner, account_name, contract_term, start_date):
"""
:param owner:
:param account_name:
:param contract_term:
:param start_date:
:return:
"""
account_id = get_account_id(account_name)
data1 = sf.Contract.create({'AccountId': account_id,
'ContractTerm': contract_term,
'StartDate': start_date,
'owner_id': get_user_id(owner)})
print("Mockup contract for Account " + account_id + " created.")
return data1
def update_contract(user_id):
"""
:param user_id:
:return:
"""
sf.Contract.update(user_id, {'ContractTerm': '75'})
def set_ip_range(profile_name, admin_sid):
"""
:param profile_name:
:param admin_sid:
:return:
"""
update_metadata_envelope = """
<env:Envelope xmlns:env="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<env:Header>
<urn:SessionHeader xmlns:urn="http://soap.sforce.com/2006/04/metadata">
<urn:sessionId>""" + admin_sid + """</urn:sessionId>
</urn:SessionHeader>
</env:Header>
<env:Body>
<updateMetadata xmlns="http://soap.sforce.com/2006/04/metadata">
<metadata xsi:type="Profile">
<fullName>""" + profile_name + """</fullName>
<loginIpRanges>
<endAddress>255.255.255.255</endAddress>
<startAddress>0.0.0.0</startAddress>
</loginIpRanges>
</metadata>
</updateMetadata>
</env:Body>
</env:Envelope>
"""
soap_response = requests.post(metadata_url, update_metadata_envelope, headers=updateMetadataHeader)
dom = xml.dom.minidom.parseString(soap_response.text)
result_element = dom.getElementsByTagName('success')
result_value = result_element[0].firstChild.nodeValue
if len(result_value) == 0:
print("I've encountered an issue. Request response:\n")
print(soap_response.text + "\n")
return None
else:
if result_element[0].firstChild.nodeValue:
print("Login IP range successfully set.")
else:
print("I've encountered an issue. Request response:\n")
print(soap_response.text + "\n")
return None
def change_lockout_period(admin_sid):
"""
:param admin_sid:
:return:
"""
soap_body = """
<env:Envelope xmlns:env="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<env:Header>
<urn:SessionHeader xmlns:urn="http://soap.sforce.com/2006/04/metadata">
<urn:sessionId>""" + admin_sid + """</urn:sessionId>
</urn:SessionHeader>
</env:Header>
<env:Body>
<updateMetadata xmlns="http://soap.sforce.com/2006/04/metadata">
<metadata xsi:type="SecuritySettings">
<fullName>*</fullName>
<passwordPolicies>
<lockoutInterval>""" + lockout_interval + """</lockoutInterval>
</passwordPolicies>
</metadata>
</updateMetadata>
</env:Body>
</env:Envelope>
"""
soap_result = requests.post(metadata_url, soap_body, headers=updateMetadataHeader)
dom = xml.dom.minidom.parseString(soap_result.text)
result_element = dom.getElementsByTagName('success')
result_value = result_element[0].firstChild.nodeValue
if len(result_value) == 0:
print("I've encountered an issue. Request response:\n")
print(soap_result.text + "\n")
return None
else:
if result_element[0].firstChild.nodeValue:
print("New Lockout time successfully set.")
else:
print("I've encountered an issue. Request response:\n")
print(soap_result.text + "\n")
return None
def disable_clickjack_with_standard_headers(admin_sid):
"""
:param admin_sid:
:return:
"""
soap_body = """
<env:Envelope xmlns:env="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<env:Header>
<urn:SessionHeader xmlns:urn="http://soap.sforce.com/2006/04/metadata">
<urn:sessionId>""" + admin_sid + """</urn:sessionId>
</urn:SessionHeader>
</env:Header>
<env:Body>
<updateMetadata xmlns="http://soap.sforce.com/2006/04/metadata">
<metadata xsi:type="SecuritySettings">
<fullName>*</fullName>
<sessionSettings>
<enableClickjackNonsetupUser>false</enableClickjackNonsetupUser>
</sessionSettings>
</metadata>
</updateMetadata>
</env:Body>
</env:Envelope>
"""
soap_result = requests.post(metadata_url, soap_body, headers=updateMetadataHeader)
dom = xml.dom.minidom.parseString(soap_result.text)
result_element = dom.getElementsByTagName('success')
result_value = result_element[0].firstChild.nodeValue
if len(result_value) == 0:
print("I've encountered an issue. Request response:\n")
print(soap_result.text + "\n")
return None
else:
if result_element[0].firstChild.nodeValue:
print("Successfully disabled clickjack protection for customer Visualforce pages with standard headers.")
else:
print("I've encountered an issue. Request response:\n")
print(soap_result.text + "\n")
return None
def random_string_generator(nr):
"""
:param nr:
:return:
"""
rand_string = (''.join(choice(ascii_lowercase) for i in range(nr)))
return rand_string
def create_zip_objects():
"""
:return:
"""
if not os.path.exists(os.path.dirname(rulefile)):
try:
os.makedirs(os.path.dirname(rulefile))
except Exception as e:
pass
with open(rulefile, "w") as f:
f.write("""<?xml version="1.0" encoding="UTF-8"?>
<SharingRules xmlns="http://soap.sforce.com/2006/04/metadata">""" + "\n")
with open('./tmp/unpackaged/package.xml', "w") as f:
f.write("""<?xml version="1.0" encoding="UTF-8"?>
<Package xmlns="http://soap.sforce.com/2006/04/metadata">
<types>
<members>*</members>
<name>SharingRules</name>
</types>
<version>35.0</version>
</Package>""" + "\n")
def add_lead_sharing_rule(how_many, access_level):
"""
:param how_many:
:param access_level:
:return:
"""
while how_many > 0:
full_name = "lsl_" + random_string_generator(4)
label = "lsl-" + random_string_generator(5)
with open(rulefile, "a") as f:
f.write(""" <sharingOwnerRules>
<full_name>""" + full_name + """</full_name>
<accessLevel>""" + access_level + """</accessLevel>
<label>""" + label + """</label>
<sharedTo>
<allInternalUsers></allInternalUsers>
</sharedTo>
<sharedFrom>
<allInternalUsers></allInternalUsers>
</sharedFrom>
</sharingOwnerRules>""" + "\n")
print("Lead sharing rule with label: " + label + " successfully created.")
how_many -= 1
def close_rules():
with open(rulefile, "a+") as f:
f.write("""</SharingRules>""" + "\n")
def get_report_id(report_name, as_user, as_password):
"""
:param report_name:
:param as_user:
:param as_password:
:return:
"""
user_sid = get_user_sid(as_user, as_password)
sf2 = Salesforce(instance_url=instanceURL, session_id=user_sid)
query = sf2.query("SELECT Id FROM Report WHERE Name = '" + report_name + "'")
dictitems = list(collections.OrderedDict(query).values())[2]
report_id = list(collections.OrderedDict(dictitems.pop()).values())[1]
if len(collections.OrderedDict(dictitems.pop())) == 0:
print("Could not get report_id.\n")
return None
else:
return report_id, user_sid
def export_report(how_many, report_name, as_user, as_password):
"""
:param how_many:
:param report_name:
:param as_user:
:param as_password:
:return:
"""
(report_id, user_sid) = get_report_id(report_name, as_user, as_password)
while how_many > 0:
response = requests.get(instanceURL + "/" + report_id + "?view=d&snip&export=1&enc=UTF-8&excel=1",
headers=sf.headers, cookies={'sid': user_sid})
f = open("lsl-report-" + random_string_generator(4) + ".csv", 'w')
f.write(response.text)
f.close()
how_many -= 1
def deploy_zipfile(as_user, as_password):
"""
:param as_user:
:param as_password:
:return:
"""
user_sid = get_user_sid(as_user, as_password)
new_zip = zipfile.ZipFile(packageZipFile, "w")
dir_path = './tmp'
len_dir_path = len(dir_path)
for root, _, files in os.walk(dir_path):
for file in files:
file_path = os.path.join(root, file)
new_zip.write(file_path, file_path[len_dir_path:])
new_zip.close()
with open(packageZipFile, "rb") as f:
bytes_read = f.read()
encoded = base64.b64encode(bytes_read)
b64code = encoded.decode("utf-8")
deploy_headers = {
'content-type': 'text/xml',
'charset': 'UTF-8',
'SOAPAction': 'deploy'
}
deploy_body = """<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:met="http://soap.sforce.com/2006/04/metadata">
<soapenv:Header>
<met:SessionHeader>
<sessionId>""" + user_sid + """</sessionId>
</met:SessionHeader>
</soapenv:Header>
<soapenv:Body>
<deploy xmlns="http://soap.sforce.com/2006/04/metadata">
<ZipFile>""" + b64code + """</ZipFile>
<DeployOptions>
<allowMissingFiles>false</allowMissingFiles>
<autoUpdatePackage>true</autoUpdatePackage>
<checkOnly>false</checkOnly>
<ignoreWarnings>false</ignoreWarnings>
<performRetrieve>false</performRetrieve>
<rollbackOnError>true</rollbackOnError>
<runAllTests>false</runAllTests>
<singlePackage>false</singlePackage>
</DeployOptions>
</deploy>
</soapenv:Body>
</soapenv:Envelope>"""
soap_result = requests.post(metadata_url, deploy_body, headers=deploy_headers)
dom = xml.dom.minidom.parseString(soap_result.text)
result_element = dom.getElementsByTagName('id')
result_value = result_element[0].firstChild.nodeValue
if len(result_value) == 0:
print("I've encountered an issue. Request response:\n")
print(soap_result.text + "\n")
return None
else:
if result_element[0].firstChild.nodeValue:
print("Got deployment ID.")
did = result_element[0].firstChild.nodeValue
else:
print("I've encountered an issue. Request response:\n")
print(soap_result.text + "\n")
return None
time.sleep(2)
check_deploy_headers = {
'content-type': 'text/xml',
'charset': 'UTF-8',
'SOAPAction': 'checkDeployStatus'
}
check_deploy_status = """<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:met="http://soap.sforce.com/2006/04/metadata">
<soapenv:Header>
<met:SessionHeader>
<sessionId>""" + user_sid + """</sessionId>
</met:SessionHeader>
</soapenv:Header>
<soapenv:Body>
<met:checkDeployStatus>
<met:asyncProcessId>""" + did + """</met:asyncProcessId>
<met:includeDetails>true</met:includeDetails>
</met:checkDeployStatus>
</soapenv:Body>
</soapenv:Envelope>"""
soap_result = requests.post(metadata_url, check_deploy_status, headers=check_deploy_headers)
dom = xml.dom.minidom.parseString(soap_result.text)
result_element = dom.getElementsByTagName('status')
result_value = result_element[0].firstChild.nodeValue
if len(result_value) == 0:
print("I've encountered an issue. Request response:\n")
print(soap_result.text + "\n")
return None
else:
if result_element[0].firstChild.nodeValue == 'Succeeded':
print("Deployment succeeded.")
else:
print("I've encountered an issue. Request response:\n")
print(soap_result.text + "\n")
return None
# UBA Risk User: 10x High, Set Trusted IP range.
def set_trusted_ip_range(count, description, start_ip, end_ip, owner, password):
"""
:param count:
:param description:
:param start_ip:
:param end_ip:
:param owner:
:param password:
:return:
"""
user_sid = get_user_sid(owner, password)
soap_body_part1 = """
<env:Envelope xmlns:env="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<env:Header>
<urn:SessionHeader xmlns:urn="http://soap.sforce.com/2006/04/metadata">
<urn:sessionId>""" + user_sid + """</urn:sessionId>
</urn:SessionHeader>
</env:Header>
<env:Body>
<updateMetadata xmlns="http://soap.sforce.com/2006/04/metadata">
<metadata xsi:type="SecuritySettings">
<fullName>*</fullName>
<networkAccess>"""
soap_body_part2 = """
</networkAccess>
</metadata>
</updateMetadata>
</env:Body>
</env:Envelope>
"""
while count > 0:
ip_range = """
<ipRanges>
<description>""" + description + """</description>
<start>""" + start_ip + """</start>
<end>""" + end_ip + """</end>
</ipRanges>"""
requests.post(metadata_url, soap_body_part1 + ip_range + soap_body_part2, headers=updateMetadataHeader)
print("Added trusted IP Range " + str(count) + " time(s).")
requests.post(metadata_url, soap_body_part1 + soap_body_part2, headers=updateMetadataHeader)
print("Deleted trusted IP Ranges " + str(count) + " times.")
count -= 1
if __name__ == "__main__":
main()
| [
"fkoncz@outlook.com"
] | fkoncz@outlook.com |
191e1a055ae6199a5c8835115f3f4c9f0708d3b9 | 68151600bd725c87047020e3f3e10e7b609ef113 | /main/migrations/0013_auto_20200527_2144.py | 55849a1241f1fa2d2e4b1b47eb46e5d3d8f0a01c | [] | no_license | rezendi/scanvine | 8a8bff3dad5342bf257319a35fbc0dd7b322e438 | cc8a41112e2f178617faa42056189d54b0d99785 | refs/heads/master | 2023-07-03T20:39:36.060393 | 2020-11-24T20:15:44 | 2020-11-24T20:15:44 | 257,799,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | # Generated by Django 3.0.5 on 2020-05-27 21:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0012_auto_20200525_2338'),
]
operations = [
migrations.AlterField(
model_name='sharer',
name='category',
field=models.IntegerField(choices=[(-2, 'Personal'), (-1, 'None'), (0, 'Health'), (1, 'Science'), (2, 'Tech'), (3, 'Business'), (4, 'Media')], db_index=True),
),
migrations.CreateModel(
name='FeedShare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('share', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Share')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"jon@rezendi.com"
] | jon@rezendi.com |
e8b0c3235cae1f212444dfb8a51751d4dc4ad88f | c0717724c7dc3937252bb4a7bd7c796088db4c5d | /solutions/rijeci.py | f7bb3e0093b8d9f71b50155f01803ba70acde38f | [] | no_license | matthew-cheney/kattis-solutions | 58cd03394ad95e9ca7ffa3de66b69d90647b31ff | d9397ca4715a3ad576046a62bdd6c0fb9542d838 | refs/heads/main | 2023-01-24T12:49:18.871137 | 2020-12-10T04:10:48 | 2020-12-10T04:10:48 | 318,857,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | K = int(input())
A, B = 1, 0
for k in range(K):
A, B = B, A + B
print(A, B) | [
"m.cheney95@outlook.com"
] | m.cheney95@outlook.com |
601c2feab2272fe4859dece473351049ed440a94 | 9117cee84a90c3c8a93028b5af67799e7ac5a802 | /CCC/CCC '12 S5 - Mouse Journey.py | 8e5e740163672ae2e0b5794ac50f7ba607b0f032 | [] | no_license | Stevan-Zhuang/DMOJ | 7a1fc3b00d553dcbb2e7069d046c4922fdf2286e | b0c7b1e52473e71e3b4a1f15fc34e35c9f5bd92c | refs/heads/master | 2023-06-16T16:34:36.497282 | 2021-07-15T20:09:32 | 2021-07-15T20:09:32 | 292,061,961 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | num_rows, num_cols = [int(data) for data in input().split()]
num_paths = [[0] * num_cols for row in range(num_rows)]
num_paths[0][0] = 1
num_cats = int(input())
for cat in range(num_cats):
row, col = input().split()
row, col = int(row) - 1, int(col) - 1
num_paths[row][col] = -1
for row in range(num_rows):
for col in range(num_cols):
if num_paths[row][col] != -1:
left = num_paths[row][col - 1] if num_paths[row][col - 1] != -1 else 0
up = num_paths[row - 1][col] if num_paths[row - 1][col] != -1 else 0
cur = num_paths[row][col]
num_paths[row][col] = max(left + up, cur)
print(num_paths[-1][-1])
| [
"noreply@github.com"
] | Stevan-Zhuang.noreply@github.com |
3e63ae681a9a108917e4e8eb68534010225514bd | cfc804a95325bba903300bdc0984d57316e20861 | /ID Scanner/yolo/frontend.py | 83633e175c23fcf9d88e6e36a170c019421bf846 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Haomin-Yu/Smart-Bartender | 3408d849779d3cec56a8dfb4822790eb5b39afc0 | 52e9ff7e7ab8ffc13e248f20cebf110ed9897a5d | refs/heads/master | 2022-07-22T18:02:04.083038 | 2019-12-10T19:56:33 | 2019-12-10T19:56:33 | 213,276,187 | 1 | 0 | MIT | 2022-07-06T20:25:19 | 2019-10-07T02:00:31 | Jupyter Notebook | UTF-8 | Python | false | false | 6,181 | py | # -*- coding: utf-8 -*-
# This module is responsible for communicating with the outside of the yolo package.
# Outside the package, someone can use yolo detector accessing with this module.
import os
import numpy as np
from yolo.backend.decoder import YoloDecoder
from yolo.backend.loss import YoloLoss
from yolo.backend.network import create_yolo_network
from yolo.backend.batch_gen import create_batch_generator
from yolo.backend.utils.fit import train
from yolo.backend.utils.annotation import get_train_annotations, get_unique_labels
from yolo.backend.utils.box import to_minmax
def get_object_labels(ann_directory):
files = os.listdir(ann_directory)
files = [os.path.join(ann_directory, fname) for fname in files]
return get_unique_labels(files)
def create_yolo(architecture,
labels,
input_size = 416,
anchors = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828],
coord_scale=1.0,
class_scale=1.0,
object_scale=5.0,
no_object_scale=1.0):
n_classes = len(labels)
n_boxes = int(len(anchors)/2)
yolo_network = create_yolo_network(architecture, input_size, n_classes, n_boxes)
yolo_loss = YoloLoss(yolo_network.get_grid_size(),
n_classes,
anchors,
coord_scale,
class_scale,
object_scale,
no_object_scale)
yolo_decoder = YoloDecoder(anchors)
yolo = YOLO(yolo_network, yolo_loss, yolo_decoder, labels, input_size)
return yolo
class YOLO(object):
def __init__(self,
yolo_network,
yolo_loss,
yolo_decoder,
labels,
input_size = 416):
"""
# Args
feature_extractor : BaseFeatureExtractor instance
"""
self._yolo_network = yolo_network
self._yolo_loss = yolo_loss
self._yolo_decoder = yolo_decoder
self._labels = labels
# Batch를 생성할 때만 사용한다.
self._input_size = input_size
def load_weights(self, weight_path, by_name=False):
if os.path.exists(weight_path):
print("Loading pre-trained weights in", weight_path)
self._yolo_network.load_weights(weight_path, by_name=by_name)
else:
print("Fail to load pre-trained weights. Make sure weight file path.")
def predict(self, image, threshold=0.3):
"""
# Args
image : 3d-array (BGR ordered)
# Returns
boxes : array, shape of (N, 4)
probs : array, shape of (N, nb_classes)
"""
def _to_original_scale(boxes):
height, width = image.shape[:2]
minmax_boxes = to_minmax(boxes)
minmax_boxes[:,0] *= width
minmax_boxes[:,2] *= width
minmax_boxes[:,1] *= height
minmax_boxes[:,3] *= height
return minmax_boxes.astype(np.int)
netout = self._yolo_network.forward(image)
boxes, probs = self._yolo_decoder.run(netout, threshold)
if len(boxes) > 0:
boxes = _to_original_scale(boxes)
return boxes, probs
else:
return [], []
def train(self,
img_folder,
ann_folder,
nb_epoch,
saved_weights_name,
batch_size=8,
jitter=True,
learning_rate=1e-4,
train_times=1,
valid_times=1,
valid_img_folder="",
valid_ann_folder="",
first_trainable_layer=None,
is_only_detect=False):
# 1. get annotations
train_annotations, valid_annotations = get_train_annotations(self._labels,
img_folder,
ann_folder,
valid_img_folder,
valid_ann_folder,
is_only_detect)
# 1. get batch generator
train_batch_generator = self._get_batch_generator(train_annotations, batch_size, train_times, jitter=jitter)
valid_batch_generator = self._get_batch_generator(valid_annotations, batch_size, valid_times, jitter=False)
# 2. To train model get keras model instance & loss fucntion
model = self._yolo_network.get_model(first_trainable_layer)
loss = self._get_loss_func(batch_size)
# 3. Run training loop
train(model,
loss,
train_batch_generator,
valid_batch_generator,
learning_rate = learning_rate,
nb_epoch = nb_epoch,
saved_weights_name = saved_weights_name)
def _get_loss_func(self, batch_size):
return self._yolo_loss.custom_loss(batch_size)
def _get_batch_generator(self, annotations, batch_size, repeat_times=1, jitter=True):
"""
# Args
annotations : Annotations instance
batch_size : int
jitter : bool
# Returns
batch_generator : BatchGenerator instance
"""
batch_generator = create_batch_generator(annotations,
self._input_size,
self._yolo_network.get_grid_size(),
batch_size,
self._yolo_loss.anchors,
repeat_times,
jitter=jitter,
norm=self._yolo_network.get_normalize_func())
return batch_generator
| [
"xinyug2@uw.edu"
] | xinyug2@uw.edu |
c4a735ef7e38d09ec514e43a3557b1d581e8db67 | b3e39afef4ddac88f1391964995c2eefd02818e7 | /CantUseFour.py | e114529ae5c0e7df55d188f30bf5f8355f1ac7c3 | [] | no_license | ommmishra/randomPrograms | 44628e3c4b381e14b8d908342dcb9b7a854284c2 | f52f3b0b485b310b9d5429667fba9422b6af045c | refs/heads/master | 2022-02-25T08:12:25.656096 | 2022-01-19T15:08:14 | 2022-01-19T15:08:14 | 179,322,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | n = "44523432342343243234237"
x = n.replace("4","3")
z = int(n) - int(x)
print(x)
print(z) | [
"ommmishra830@gmail.com"
] | ommmishra830@gmail.com |
eccd89a0b9800c91f111c41cba906f0bace5a4ff | f361f4477577716ab3e8796c787157e43b2f9178 | /training/python-training/number_game_2.py | ac3f9357ec9eb22351ea7a4f999f5241704c8e9f | [] | no_license | drewmullen/personal | edba910f5ea3a6311094d2b2047e522e700e5219 | 37995ad786bf1926d02cdb02ac7bb11ead9641ae | refs/heads/master | 2021-09-13T02:48:53.239667 | 2018-04-24T03:15:00 | 2018-04-24T03:15:00 | 83,120,385 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | import random
secret_num = random.randint(1,10)
def main():
counter = 0
while counter < 6:
guess = take_input()
input_eval(guess)
counter+=1
print("Too many guesses, you lose!")
exit()
def take_input():
try:
guess = int(input("Guess a number between 1 and 10: "))
# counter+=1
except ValueError:
print("That's not a number! Try again...")
take_input()
if guess > 10:
print("Thats too high! Try again...")
take_input()
elif guess <= 0:
print("Thats too low! Try again...")
take_input()
else:
return guess
def input_eval(guess):
if guess == secret_num:
print("You got it! The number was {}.".format(secret_num))
exit()
#It took you {} guesses".format(secret_num, counter))
else:
print("That's not it! Try again...")
main()
| [
"drew.mullen@ge.com"
] | drew.mullen@ge.com |
e957134d472c84d28c43e089b40cdb76ab62d681 | 64314270bfa5d14d13944ccf9d1fb15085a1ca68 | /Problema 1/naive_bayes/naive_bayes.py | 3a8f4ac2c700be49419fb56625b482b91b6f575c | [
"MIT"
] | permissive | jarvis-fga/Projetos | bfef5becc31e15850410194a847a3e4396dd31a3 | 59b89527e8b9ddb4caf353eb85b229ec27eae2a3 | refs/heads/master | 2021-01-19T13:41:15.855101 | 2017-11-28T17:12:24 | 2017-11-28T17:12:24 | 100,855,314 | 2 | 1 | null | 2017-11-28T17:12:24 | 2017-08-20T10:54:56 | Jupyter Notebook | UTF-8 | Python | false | false | 974 | py | import csv
def carregar_acessos(arquivo_nome):
dados = []
marcacoes = []
arquivo = open(arquivo_nome, 'rb')
leitor = csv.reader(arquivo)
leitor.next()
for P1,P2,P3,P4,P5,P6,P7,P8,P9,P10,P11,P12,P13,Origem in leitor:
dados.append([float(P1), float(P2), float(P3), float(P4), float(P5), float(P6), float(P7), float(P8), float(P9), float(P10), float(P11), float(P12), float(P13)])
marcacoes.append(Origem)
return dados, marcacoes
def taxa_acerto(resultado, gabarito):
i=0
acertos=0
for r in resultado:
if r == gabarito[i]:
acertos=acertos+1
taxa = 100.0*acertos/len(resultado)
return taxa
dados, marcacoes = carregar_acessos('dados_tratados.csv')
teste, marcacoes_teste = carregar_acessos('dados_teste.csv')
from sklearn.naive_bayes import MultinomialNB
modelo = MultinomialNB()
modelo.fit(dados, marcacoes)
resultado1 = modelo.predict(teste)
taxa_final = taxa_acerto(resultado1, marcacoes_teste)
print("Taxa de acerto em % :")
print(taxa_final)
| [
"lucasandradeunb@gmail.com"
] | lucasandradeunb@gmail.com |
adef9bc871758731eed912cc5563b6f355987ed3 | ec03d9949eb202c9de6c163566d7edffd583db93 | /dags/hello_world.py | 515b4e25dbc49b07eb81865d92302d7164e7deff | [] | no_license | tomatohope/airflow | 770ba89dd68259566969259c08bb63071d24248a | 8cbb9f94edf10fb66bddcd522fa71c8a186b4e6d | refs/heads/master | 2021-07-21T03:26:34.251577 | 2021-01-08T09:31:44 | 2021-01-08T09:31:44 | 231,941,543 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,614 | py | # coding: utf-8
import os
import airflow
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from datetime import datetime as datetime1, timedelta
import datetime as datetime2
# interval time: 与 schedule_interval=timedelta(days=1), 一致
dt = datetime1.now() - datetime2.timedelta(days=1)
airflow_home = '/root/airflow'
os.environ['airflow_home'] = str(airflow_home)
# default_args
default_args = {
'owner': 'user1',
'depends_on_past': False,
# start time: year month day hour minutes seconds
'start_date': datetime1(dt.year, dt.month, dt.day, 10, 2, 0),
'email': ['user1@xxx.com', 'user2@xxx.com'],
'email_on_failure': True,
'email_on_retry': True,
'retries': 0,
'retry_delay': timedelta(seconds=5)
}
# define DAG
dag = DAG(
# display DAG name
dag_id='test',
default_args=default_args,
# interval time: 与 dt = datetime1.now() - datetime2.timedelta(days=1) 一致
schedule_interval=timedelta(days=1),
# 禁用回补 禁止执行过期任务
catchup=False
)
def hello_world_1(ds, **kwargs):
print("hello_world1")
with open("/tmp/a", "at") as f:
f.write("hello----word" + "\n")
# 可以在函数中使用assert断言来判断执行是否正常,也可以直接抛出异常
assert 1 == 1
def hello_world_2(ds, **kwargs):
ret = os.system("python $airflow_home/python_file/print.py")
# 执行状态返回值判断
if ret != 0:
os._exit(-1)
print("Continued....")
# task 1
t1 = PythonOperator(
task_id='hello_world_1',
# 指定要执行的函数
python_callable=hello_world_1,
# 指定归属的dag
provide_context=True,
dag=dag,
retries=0,
)
# task 2
t2 = PythonOperator(
task_id='hello_world_2',
python_callable=hello_world_2,
provide_context=True,
dag=dag,
)
# task plan
#t2.set_upstream(t1) # t2依赖于t1; 等价于 t1.set_downstream(t2);同时等价于 dag.set_dependency('hello_world_1', 'hello_world_2')
# 表示t2这个任务只有在t1这个任务执行成功时才执行
# t1 ##only t1
# t1 >> t2 ## t1 first success && t2
t1 >> t2
# airflow.note
# http://note.youdao.com/noteshare?id=bb4888b561b3468e732361de74c7794e&sub=FD605AE047F04575A92C1DF2BCF9E7EA
# exec time
###############################################
# start_date + schedule_interval
# https://www.jianshu.com/p/5aa4447f48ea
#
# # start_date
#
# if now time ago:
# real
# start
# time: now
# time + schedule_interval
#
# # schedule_interval
# if cron:
# not now
# time: now
# time + schedule_interval | [
"hope.gong@jingdigital.com"
] | hope.gong@jingdigital.com |
0769073e54f97a7b28ca46674614b73ce89d67c6 | 37906b41991719dff0590f9161f9b69af8d7e491 | /tensorflow/python/tpu/tensor_tracer.py | b9aec3f2e26e5272030fbfb380877f6d6a789d29 | [
"Apache-2.0"
] | permissive | nauman07/tensorflow | 7ae4277564bb596c0f8ba5d107a35d9505c3c2fb | f88cf68393e60525506a567e0081b8e2e6db409b | refs/heads/master | 2020-08-28T15:55:35.510154 | 2019-10-26T15:34:58 | 2019-10-26T15:39:08 | 217,742,698 | 3 | 0 | Apache-2.0 | 2019-10-26T17:11:10 | 2019-10-26T17:11:09 | null | UTF-8 | Python | false | false | 66,668 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""A utility to trace tensor values on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import sys
import numpy as np
import six
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import analytics
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.tpu import tensor_tracer_flags
from tensorflow.python.tpu import tensor_tracer_report
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training import training_util
_DEVICE_TYPE_TPU = 'tpu'
_DEVICE_TYPE_CPU = 'cpu'
_TRACE_MODE_PART_TENSOR_SIZE = 3
_REASON_OUTSIDE_OP_RANGE = 'not-traced-outside-op-range'
_REASON_UNSAFE_OP = 'not-traced-unsafe-op'
_REASON_WHILELOOP_OP = 'not-traced-special-whileloop-op'
_REASON_UNSAFE_SCALAR = 'not-traced-unsafe-scalar'
_REASON_SKIP_SCALAR = 'not-traced-scalar'
_REASON_LESS_INTERESTING_OP = 'not-traced-less-interesting-op'
_REASON_DEVICE_MISMATCH = 'not-traced-device-mismatch'
_REASON_DYNAMIC_SHAPE = 'not-traced-dynamic-shape'
_REASON_SCALAR_GET_TRACED = 'traced-scalar'
_REASON_TENSOR_GET_TRACED = 'traced-tensor'
_REASON_USER_INCLUDED = 'traced-user-included'
_REASON_USER_EXCLUDED = 'not-traced-user-excluded'
_REASON_NOT_EXECUTED = 'not-traced-not-in-exec-path'
_REASON_NON_NUMERIC_TENSOR = 'not-traced-non-numeric-tensor'
_REASON_FEEDS_WHILELOOP_OP = 'not-traced-feeds-special-whileloop-op'
_OUTPUT_STREAM_ESCAPE = 'file://'
_TENSOR_TRACER_COLLECTION = 'tensor_tracer_variables'
_TRACE_FILE_NAME = 'trace.all'
_COMPACT_TRACE_FILE_PREFIX = 'compact_trace.'
_COMPACT_TRACE_ENTRY_INIT_VALUE = -1.0
_TENSOR_TRACER_STORAGE = 'tensor_tracer_storage'
_TT_SNAPSHOT = 'tensor_tracer_snapshot'
_REPLICA_ID_TAG = '#replica-id: '
_TT_SUMMARY_NORM = tensor_tracer_flags.TT_SUMMARY_NORM
_TT_SUMMARY_MAX = tensor_tracer_flags.TT_SUMMARY_MAX
_TT_SUMMARY_MIN = tensor_tracer_flags.TT_SUMMARY_MIN
_TT_SUMMARY_MEAN = tensor_tracer_flags.TT_SUMMARY_MEAN
_TT_SUMMARY_VAR = tensor_tracer_flags.TT_SUMMARY_VAR
_TT_SUMMARY_SIZE = tensor_tracer_flags.TT_SUMMARY_SIZE
_TT_SUMMARY_TAG = 'tensor_tracer_summary'
_TT_TENSORBOARD_PLUGIN_NAME = 'tensor_tracer'
_TT_HOSTCALL_KEY = 'tensor_tracer_host_call'
_TT_EVENT_FILE_SUFFIX = '.tensor_tracer'
_TT_SUMMARY_MAX_QUEUE = 100
def op_priority(op_type):
"""Returns the priority of the op.
If the priority of the op is k, it will be traced if trace_level>=k.
Args:
op_type: String name of the operation type.
Returns:
Integer value corresponding the priority of the op.
"""
if op_type in ('Const', 'Shape', 'BroadcastGradientArgs', 'Range',
'VariableShape', 'Fill', 'OneHot'):
# Lowest priority ops, e.g., constant ops accross different steps,
# They will be traced only if trace_level>=7
return 7
if op_type in ('Identity', 'Cast', 'Reshape', 'ExpandDims', 'StopGradient',
'PreventGradient', 'Squeeze'):
# Operations without numerical effects.
# They will be only if trace_level>=6
return 6
if op_type in ('ConcatV2', 'Concat', 'StridedSlice', 'Slice', 'Pack', 'Tile'):
# Operations that merge or slice an input, will be traced if trace_level>=5
return 5
if op_type in ('Pad', 'RandomUniformInt', 'GreaterEqual'):
# Operations less likely to provide useful information,
# will be traced if trace_level>=4
return 4
if op_type in ('Sum', 'AddV2', 'Add', 'AddN', 'BiasAdd', 'CrossReplicaSum'):
# Add operations that are less likely create any issues, will be traced
# if trace_level>=3 (default=3)
return 3
if op_type in ('Neg', 'Sub'):
# Sub operations that are less likely create any issues, will be traced
# trace_level>=2
return 2
if op_type in ('Mul', 'Square', 'MatMul', 'RandomUniform', 'Select',
'Maximum', 'Mean', 'Variance'):
# Multiplication and some other operations, will be traced if trace_level>=1
return 1
return 0
def read_tensor_tracer_event_file(event_file):
"""Reads the event file written by tensor tracer.
Args:
event_file: Path to the event file that contains only tensor tracer events.
Returns:
An event dictionary in the form of
{step_number: {tensor_name: tensor_content}}
Raises:
ValueError: If an unexpected trace is found.
"""
event_dict = {}
for trace_event in summary_iterator.summary_iterator(event_file):
# First event is an event with file_version: "brain.Event:2"
if not trace_event.HasField('summary'):
continue
step = trace_event.step
if step not in event_dict:
event_dict[step] = {}
if len(trace_event.summary.value) != 1:
raise ValueError('Single step contains %d summary values,'
' expected 1.' % len(trace_event.summary.value))
tensor_value = trace_event.summary.value[0]
tensor_name = tensor_value.tag
real_shape = [d.size for d in tensor_value.tensor.tensor_shape.dim]
tensor_content = np.frombuffer(
tensor_value.tensor.tensor_content,
dtypes.DType(tensor_value.tensor.dtype).as_numpy_dtype()
).reshape(real_shape)
event_dict[step][tensor_name] = tensor_content
return event_dict
def tensor_tracepoint(tensor, checkpoint_name):
"""Adds a checkpoint with the given checkpoint name for the given tensor.
The tensor will be added to the list of tensors that will be traced by the
tensor tracer.
Args:
tensor: the tensor object for which the tracing is requested.
checkpoint_name: a string name for the checkpoint. This name has to be a
unique name if used within model comparison. The tensors that have the same
checkpoint identifier is compared in model comparison.
Returns:
The provided tensor.
"""
tensor.graph.get_collection(_TENSOR_TRACER_COLLECTION)
tensor.graph.add_to_collection(_TENSOR_TRACER_COLLECTION,
(tensor, checkpoint_name))
return tensor
def keras_layer_tracepoint(layer, checkpoint_name):
"""An interface for adding the tensor outputs of a keras layer.
Encapsulates tensor_tracepoint.
Args:
layer: A keras layer.
checkpoint_name: a string name for the checkpoint. This name has to be a
unique name if used within model comparison. The tensors that have the same
checkpoint identifier is compared in model comparison.
Returns:
The provided layer.
"""
try:
outputs = layer.output
if tensor_util.is_tensor(outputs):
tensor_tracepoint(outputs, '%s' % (checkpoint_name))
else:
idx = 0
for output_tensor in outputs:
if tensor_util.is_tensor(outputs):
tensor_tracepoint(output_tensor, '%s_%d' % (checkpoint_name, idx))
idx += 1
except AttributeError:
pass
except RuntimeError:
pass
return layer
def _trace_files_need_precreated(output_dir):
"""Return True if trace files must be pre-created by users."""
if not output_dir.startswith('/'):
return False
if len(output_dir) < 5:
return False
if output_dir[2] != 'n':
return False
if output_dir[3] != 's':
return False
if output_dir[1] != 'c':
return False
if output_dir[4] != '/':
return False
return True
class TensorTracer(object):
"""A software construct for tracing tensor values in a TF graph on TPU.
This utility is disabled by default. It can be enabled by setting
the TENSOR_TRACER_FLAGS env variable as:
export TENSOR_TRACER_FLAGS="--enable=1"
If it is enabled, it will trace the output tensor values of
selected Ops in the graph. It has two outputs: (1) the traces and (2)
a report. The traces are dumped to a specified local file on the TPU
host. The report is printed to the log.info of the TPU job.
By passing options via the env variable, users can change:
(1) the trace mode (e.g., detecting NaN/Inf, printing partial or
full tensor values)
(2) which Ops to be traced (via op.name or op.type)
(3) output trace file path.
"""
# The set of graphs that are rewritten by tensor tracer.
_traced_graphs = set()
@staticmethod
def is_enabled():
"""Returns True if TensorTracer is enabled."""
return tensor_tracer_flags.TTParameters().is_enabled()
@staticmethod
def check_device_type(device_type):
"""Checks if the given device type is valid."""
if device_type not in (_DEVICE_TYPE_TPU, _DEVICE_TYPE_CPU):
raise ValueError('Invalid device_type "%s"'%device_type)
@staticmethod
def check_trace_mode(device_type, trace_mode):
"""Checks if the given trace mode work on the given device type.
Args:
device_type: Device type, TPU, GPU, CPU.
trace_mode: Tensor tracer trace mode.
Raises:
ValueError: If the given trace mode is not supported for the device.
"""
if trace_mode in (tensor_tracer_flags.TRACE_MODE_SUMMARY,
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY):
if device_type != _DEVICE_TYPE_TPU:
raise ValueError('Device_type "%s" is not yet supported for '
'trace mode "%s"' % (device_type, trace_mode))
@staticmethod
def loop_cond_op(op):
return op.type in ('LoopCond', 'RefLoopCond')
@staticmethod
def while_loop_op(op):
"""Returns true if op is one of the special ops of in a while loop.
Args:
op: A tf.Operation.
Returns:
True if the given op is one of [Switch, Merge, Enter, Exit,
NextIteration, LoopCond], which are all building blocks for TF while
loops.
"""
return (control_flow_util.IsLoopSwitch(op) or
control_flow_util.IsLoopMerge(op) or
control_flow_util.IsLoopEnter(op) or
control_flow_util.IsLoopExit(op) or
TensorTracer.loop_cond_op(op) or
op.type in ('RefNextIteration', 'NextIteration'))
@staticmethod
def unsafe_op(op):
"""Returns True if this op is not safe to be traced."""
if control_flow_util.IsInCond(op):
return True
# Reasons for not including following op types:
# Assign: cause incorrect result with CPU tracing.
if op.type == 'Assign':
return True
return False
@staticmethod
def device_mismatch(device_type, op):
if device_type == _DEVICE_TYPE_TPU:
# pylint: disable=protected-access
return tpu._TPU_REPLICATE_ATTR not in op.node_def.attr
# pylint: enable=protected-access
return False
@staticmethod
def unsafe_scalar_trace(op):
"""Return true if scalar output tensor from Op is not safe to be traced."""
# Tracing the following causes cycle in the graph on TPU.
if op.type in ('LoopCond', 'Enter', 'Merge', 'Const',
'Switch', 'Less', 'ReadVariableOp'):
return True
# Tracing the following will cause casting-issue
# with the norm tracing mode or other compilation issues on CPU.
if op.type in ('VarHandleOp', 'IteratorToStringHandle',
'IteratorGetNext', 'OneShotIterator',
'IteratorV2', 'MakeIterator',
'BatchDatasetV2', 'MapDataset',
'FixedLengthRecordDataset', 'TakeDataset', 'ZipDataset',
'Placeholder', 'PlaceholderWithDefault', 'StridedSlice'):
return True
return False
def _is_interesting_op(self, op):
"""Returns True if the given op is not an interesting one to be traced."""
# If flag is set to include less interesting ops, then include everything.
if self._parameters.include_less_interesting_ops:
return True
return op_priority(op.type) <= self._parameters.trace_level
@staticmethod
def reason(op_idx, details):
"""Returns reason why the Op at op_idx is traced or not."""
return '%d %s'%(op_idx, details)
def __init__(self):
"""Initializes a TensorTracer.
Sets the various member fields from the flags (if given) or the defaults.
"""
self._replica_id = None
self._tt_config = tensor_tracer_report.TensorTracerConfig()
self._parameters = tensor_tracer_flags.TTParameters()
self._included_op_full_names = set()
self._host_call_fn = {}
self._cache_variables = {}
def _get_all_cache_variables(self):
return self._cache_variables
def _create_or_get_tensor_values_cache(self, cache_name, graph=None,
shape=None, dtype=dtypes.float32):
"""Creates a variable as the cache to store intermediate tensor values.
Args:
cache_name: Name to be given to the cache (an instance of tf.variable).
graph: Tensorflow graph.
shape: A list of dimensions.
dtype: Data type of created cache.
Returns:
A ref to newly created or existing cache with the given dimensions.
Raises:
ValueError: If missing a parameter to create the cache.
"""
def _escape_namescopes(variable_name):
# TODO(deveci): This might cause name collisions as in "foo/bar/mytensor"
# and "foo_bar/mytensor".
return variable_name.replace('/', '_').replace(':', '_')
if cache_name not in self._cache_variables:
if graph is None:
raise ValueError('Graph must be provided at cache creation.')
if shape is None:
raise ValueError('shape must be provided at cache creation.')
graph = graph or ops.get_default_graph()
if dtype.is_integer:
init_val = int(_COMPACT_TRACE_ENTRY_INIT_VALUE)
else:
init_val = _COMPACT_TRACE_ENTRY_INIT_VALUE
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
self._cache_variables[cache_name] = variable_scope.get_variable(
_TT_SNAPSHOT + '_' + _escape_namescopes(cache_name),
shape=shape, dtype=dtype,
initializer=init_ops.constant_initializer(init_val),
trainable=False,
use_resource=True,
collections=[_TENSOR_TRACER_STORAGE, ops.GraphKeys.LOCAL_VARIABLES])
return self._cache_variables[cache_name]
def _add_replica_id_to_graph(self):
"""Adds nodes for computing the replica ID to the graph."""
if self._tt_config.num_replicas:
with ops.control_dependencies(None):
# Uses None as dependency to run outside of TPU graph rewrites.
self._replica_id = tpu_ops.tpu_replicated_input(
list(range(self._tt_config.num_replicas)),
name='tt_replica_id')
else:
self._replica_id = 'unknown'
def _inside_op_range(self, idx):
"""Return True if the given index is inside the selected range."""
if idx < self._parameters.op_range[0]:
return False
return (self._parameters.op_range[1] < 0 or
idx <= self._parameters.op_range[1])
def _is_user_included_op(self, op):
"""Checks whether the op is included in the tensor tracer flags.
Args:
op: tf Operation
Returns:
True, if the op is included.
An op is included if:
- Its op name is given in included_opnames
- Its op type is given in included_optypes
- The op is at most _trace_ops_before_included hops before an included op
- The op is at most _trace_ops_after_included hops after an included op
"""
def _is_op_or_any_neighbor_included(op, check_before=0, check_after=0):
"""Helper function to check if op is included or not."""
if op.name in self._included_op_full_names:
return True
for opname_re in self._parameters.included_opname_re_list:
if opname_re.match(op.name):
self._included_op_full_names.add(op.name)
return True
for optype_re in self._parameters.included_optype_re_list:
if optype_re.match(op.type):
self._included_op_full_names.add(op.name)
return True
if check_after > 0:
for out_tensor in op.outputs:
for consumer in out_tensor.consumers():
if _is_op_or_any_neighbor_included(consumer, check_after - 1, 0):
self._included_op_full_names.add(op.name)
return True
if check_before > 0:
for input_tensor in op.inputs:
if _is_op_or_any_neighbor_included(input_tensor.op,
0,
check_before - 1):
self._included_op_full_names.add(op.name)
return True
return False
# check_after and check_before are swapped below, as below operation
# checks the distance from an arbitrary op to included ops.
return _is_op_or_any_neighbor_included(
op, self._parameters.trace_ops_after_included,
self._parameters.trace_ops_before_included)
def _is_user_excluded_op(self, op):
for opname_re in self._parameters.excluded_opname_re_list:
if opname_re.match(op.name):
return True
for optype_re in self._parameters.excluded_optype_re_list:
if optype_re.match(op.type):
return True
return False
def _signature_types(self):
"""Returns a dictionary holding the order of signatures in the cache for the selected trace mode."""
if self._parameters.trace_mode in set([
tensor_tracer_flags.TRACE_MODE_NAN_INF,
tensor_tracer_flags.TRACE_MODE_NORM,
tensor_tracer_flags.TRACE_MODE_MAX_ABS]):
return {self._parameters.trace_mode: 0}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:
return self._parameters.summary_signatures
return {}
def _num_signature_dimensions(self):
return len(self._signature_types())
def _use_tensor_values_cache(self):
"""Returns True if immediate tensors should be first saved to a cache."""
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:
# For summary tace mode only compact format is supported.
return True
if self._parameters.trace_mode not in set([
tensor_tracer_flags.TRACE_MODE_NAN_INF,
tensor_tracer_flags.TRACE_MODE_NORM,
tensor_tracer_flags.TRACE_MODE_MAX_ABS,
tensor_tracer_flags.TRACE_MODE_SUMMARY
]):
return False
if (self._parameters.trace_dir and
_trace_files_need_precreated(self._parameters.trace_dir)):
return True
return self._parameters.use_compact_trace
def _use_tensor_buffer(self):
"""Returns true if the whole tensor needs to be cached/buffered in memory."""
return (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY)
def _save_tensor_value_to_cache_op(self, cache_idx, updates):
"""Returns an op that will save the given updates to an entry in the cache.
Args:
cache_idx: The cache index of the tensor within the cache.
updates: A dictionary of the signature updates.
Returns:
Cache update operation.
"""
# state_ops.scatter_update allows updates only along the first dimension.
# Make a compact array by concantating different signatures, and update
# them all together.
sorted_update = []
if self._num_signature_dimensions() > 1:
signature_indices = self._signature_types()
for _, val in sorted(updates.items(),
key=lambda item: signature_indices[item[0]]):
sorted_update.append(val)
updates = array_ops.stack(sorted_update, axis=0)
updates = array_ops.reshape(updates, [1,
self._num_signature_dimensions()])
else:
(_, val), = updates.items()
updates = array_ops.reshape(val, [1, self._num_signature_dimensions()])
indices = constant_op.constant([cache_idx])
cache = self._create_or_get_tensor_values_cache(_TT_SUMMARY_TAG)
return state_ops.scatter_update(cache, indices, updates).op
def _snapshot_tensor(self, tensor):
"""Creates a new tf.Variable and a new tf.Operation that assigns the value of the tensor to this variable.
Args:
tensor: tensor whose values will be stored in a new tf.Variable.
Returns:
An assignment operation.
"""
snapshot_variable = self._create_or_get_tensor_values_cache(
tensor.name, tensor.op.graph,
tensor.shape.as_list(), tensor.dtype)
return state_ops.assign(snapshot_variable, tensor).op
def _preprocess_traced_tensor(self, tensor):
"""Computes NAN/Norm/Max on TPUs before sending to CPU.
Args:
tensor: The tensor to be traced.
Returns:
A tensor that should be input to the trace_function.
Raises:
RuntimeError: If the trace mode is invalid.
"""
def _detect_nan_inf(tensor):
"""Trace function for detecting any NaN/Inf in the tensor."""
if tensor.dtype.is_floating:
mask = math_ops.reduce_any(
gen_math_ops.logical_or(
gen_math_ops.is_nan(tensor), gen_math_ops.is_inf(tensor)))
output_tensor = control_flow_ops.cond(
mask,
lambda: constant_op.constant([1.0]),
lambda: constant_op.constant([0.0]))
else:
output_tensor = constant_op.constant([0.0])
return output_tensor
def _compute_signature(tensor, tf_op, cast_to_f32=True):
if cast_to_f32:
tensor = math_ops.cast(tensor, dtypes.float32)
output_tensor = tf_op(tensor)
# Return type should be scalar. Set it if it does not have the
# information.
if not output_tensor.get_shape().is_fully_defined():
output_tensor = array_ops.reshape(output_tensor, [])
return output_tensor
def _show_size(tensor):
# In order to check the size of a tensor.
# Not all sizes are known at the compile time, also, different replicas
# sometimes get different sizes of tensors.
# Collect it here to be used in merging replica data.
tsize = _compute_signature(tensor, array_ops.size, cast_to_f32=False)
# Cast to float32, so that it can be placed into same cache with other
# signatures.
return math_ops.cast(tsize, dtypes.float32)
def _show_max(tensor, cast_to_f32=True):
# returns -inf for empty tensor
return _compute_signature(tensor, math_ops.reduce_max, cast_to_f32)
def _show_min(tensor, cast_to_f32=True):
# returns inf for empty tensor
return _compute_signature(tensor, math_ops.reduce_min, cast_to_f32)
def _show_norm(tensor, cast_to_f32=True):
# returns 0 for empty tensor
return _compute_signature(tensor, linalg_ops.norm, cast_to_f32)
def _show_mean_and_variance(tensor, cast_to_f32=True):
"""Returns the mean and variance of the given tensor."""
if cast_to_f32:
tensor = math_ops.cast(tensor, dtypes.float32)
# returns nan for empty tensor
mean, var = nn_impl.moments(array_ops.reshape(tensor, [-1]), axes=[0])
# The shape has to be 1. Set it if it does not have the information.
if not mean.get_shape().is_fully_defined():
mean = array_ops.reshape(mean, [])
if not var.get_shape().is_fully_defined():
var = array_ops.reshape(var, [])
return mean, var
def _show_max_abs(tensor):
tensor = math_ops.cast(tensor, dtypes.float32)
output_tensor = math_ops.reduce_max(math_ops.abs(tensor))
zero = constant_op.constant(0, dtypes.float32)
output_tensor = gen_math_ops.maximum(zero, output_tensor)
# The shape has to be 1. Set it if it does not have the information.
output_tensor = array_ops.reshape(output_tensor, [1])
return output_tensor
def _detect_inf_nan_producer(tensor):
"""Checks if the tensor is the first NaN/Inf tensor in the computation path."""
if tensor.op.inputs:
inp_check = [
_detect_nan_inf(inp_tensor) for inp_tensor in tensor.op.inputs
]
is_any_input_inf_nan = math_ops.add_n(inp_check)
else:
is_any_input_inf_nan = constant_op.constant(0, dtypes.bool)
is_current_tensor_inf_nan = _detect_nan_inf(tensor)
# An op is NaN/INF producer only when all inputs are nan/inf free (
# is_any_input_inf_nan = 0), and its output has nan/inf (
# is_current_tensor_inf_nan=1). Below will be 1 if op nan/inf is producer.
is_nan_producer = is_current_tensor_inf_nan - is_any_input_inf_nan
is_nan_producer = math_ops.reduce_any(is_nan_producer > 0)
return is_nan_producer
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_FULL_IF_NAN):
return {self._parameters.trace_mode: _detect_inf_nan_producer(tensor)}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NAN_INF:
return {self._parameters.trace_mode: _detect_nan_inf(tensor)}
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_PART_TENSOR):
return {self._parameters.trace_mode: tensor}
if (self._parameters.trace_mode in (
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR,
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY)):
return {self._parameters.trace_mode: tensor}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NORM:
return {self._parameters.trace_mode: array_ops.reshape(
_show_norm(tensor), [1])}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_MAX_ABS:
return {self._parameters.trace_mode: _show_max_abs(tensor)}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:
tensor = math_ops.cast(tensor, dtypes.float32)
result_dict = {}
# Call mean and variance computation here to avoid adding the same nodes
# twice.
if (_TT_SUMMARY_MEAN in self._signature_types() or
_TT_SUMMARY_VAR in self._signature_types()):
mean, variance = _show_mean_and_variance(tensor, cast_to_f32=False)
for signature_name, _ in sorted(self._signature_types().items(),
key=lambda x: x[1]):
if signature_name == _TT_SUMMARY_NORM:
signature_result_tensor = _show_norm(tensor, cast_to_f32=False)
elif signature_name == _TT_SUMMARY_MAX:
signature_result_tensor = _show_max(tensor, cast_to_f32=False)
elif signature_name == _TT_SUMMARY_MIN:
signature_result_tensor = _show_min(tensor, cast_to_f32=False)
elif signature_name == _TT_SUMMARY_SIZE:
signature_result_tensor = _show_size(tensor)
elif signature_name == _TT_SUMMARY_MEAN:
signature_result_tensor = mean
elif signature_name == _TT_SUMMARY_VAR:
signature_result_tensor = variance
else:
raise ValueError('Unknown signature type :%s.' % signature_name)
result_dict[signature_name] = signature_result_tensor
return result_dict
raise RuntimeError(
'Tensor trace fun for %s is not yet implemented'
% self._parameters.trace_mode)
def _make_tensor_trace_fun(self, tensor_name, tensor_trace_order):
"""Makes the tensor tracing function called by outside compilation.
Args:
tensor_name: name of the tensor being traced.
tensor_trace_order: TensorTraceOrder object holding tensorname to id map.
Returns:
A function to be passed as the first argument to outside compilation.
Raises:
RuntimeError: If the trace mode is invalid.
"""
def _print_tensor(tensor_name, num_elements, tensor, output_tensor):
"""Prints a tensor value to a file.
Args:
tensor_name: name of the tensor being traced.
num_elements: number of elements to print (-1 means print all).
tensor: the tensor needs to be returned.
output_tensor: the tensor needs to be printed.
Returns:
The same tensor passed via the "tensor" argument.
Raises:
ValueError: If tensor_name is not already in
self._tensorname_idx_map.
"""
if self._parameters.is_brief_mode():
if tensor_name not in tensor_trace_order.tensorname_idx_map:
raise ValueError(
'Tensor name %s is not in the tensorname_idx_map'%tensor_name)
msg = '%d'%self._tensorname_idx_map[tensor_name]
else:
msg = '"%s"'%tensor_name
if self._parameters.trace_dir:
output_path = os.path.join(self._parameters.trace_dir, _TRACE_FILE_NAME)
output_stream = _OUTPUT_STREAM_ESCAPE + output_path
else:
output_stream = sys.stderr
return logging_ops.print_v2(msg, array_ops.shape(output_tensor),
'@', self._replica_id,
'\n', output_tensor, '\n',
summarize=num_elements,
output_stream=output_stream)
def _show_part_tensor(tensor):
"""Trace function for printing part of the tensor."""
return _print_tensor(tensor_name, _TRACE_MODE_PART_TENSOR_SIZE,
tensor, tensor)
def _show_full_tensor(tensor):
"""Trace function for printing the entire tensor."""
return _print_tensor(tensor_name, -1, tensor, tensor)
def _show_full_tensors(tensor):
"""Prints the full tensor values for the tensors that are _trace_stack_size hops away from a given tensor."""
def _get_distance_k_tensors(k_before=0):
"""Returns the tensors that are at most k_before hops away from the tensor."""
if k_before < 0:
return []
visited_tensors = {tensor: 0}
visitor_queue = [tensor]
head = 0
while head < len(visitor_queue):
current_tensor = visitor_queue[head]
head += 1
distance = visited_tensors[current_tensor]
if distance == k_before:
break
for input_tensor in current_tensor.op.inputs:
if input_tensor in visited_tensors:
continue
visitor_queue.append(input_tensor)
visited_tensors[input_tensor] = distance + 1
return visitor_queue
tensors_to_print = _get_distance_k_tensors(
self._parameters.trace_stack_size)
print_ops = [_print_tensor(t.name, -1, t, t) for t in tensors_to_print]
with ops.control_dependencies(print_ops):
return constant_op.constant(True)
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_FULL_IF_NAN):
return _show_full_tensors
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_PART_TENSOR):
return _show_part_tensor
# The input tensor has a shape of "[1]" for TRACE_MODE_NAN_INF,
# TRACE_MODE_NORM, and TRACE_MODE_MAX_ABS, as related computations are
# performed within TPUs and only their results are transferred to CPU.
# Simply, print the full tensor for these trace modes.
if self._parameters.trace_mode in (
tensor_tracer_flags.TRACE_MODE_NAN_INF,
tensor_tracer_flags.TRACE_MODE_NORM,
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR,
tensor_tracer_flags.TRACE_MODE_MAX_ABS,
tensor_tracer_flags.TRACE_MODE_SUMMARY
):
return _show_full_tensor
raise RuntimeError('Tensor trace fun for %s is not yet implemented'
%self._parameters.trace_mode)
def _skip_op(self, op_id, op, ops_in_exec_path, report_handler):
"""Returns True if we should not trace Op.
Args:
op_id: Topological index of the op.
op: tf.Operation
ops_in_exec_path: Set of operations that are in the execution path.
report_handler: An instance of tensor_tracer_report.TTReportHandle.
Returns:
True if the op should not be traced, false otherwise.
"""
if TensorTracer.while_loop_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_WHILELOOP_OP))
return True
if TensorTracer.unsafe_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_UNSAFE_OP))
return True
if TensorTracer.device_mismatch(self._tt_config.device_type, op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_DEVICE_MISMATCH))
return True
if op not in ops_in_exec_path:
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_NOT_EXECUTED))
return True
if self._is_user_included_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_USER_INCLUDED))
return False
if not self._inside_op_range(op_id):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_OUTSIDE_OP_RANGE))
return True
if not self._is_interesting_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_LESS_INTERESTING_OP))
return True
if self._is_user_excluded_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_USER_EXCLUDED))
return True
return False
def _skip_tensor(self, op_id, out_tensor, report_handler):
"""Returns True if we should not trace out_tensor.
Args:
op_id: Topological index of the op producing tensor.
out_tensor: tf.Tensor
report_handler: An instance of tensor_tracer_report.TTReportHandle.
Returns:
True if the tensor should not be traced, false otherwise.
"""
# Skips a tensor if the tensor has a non-numeric type.
# Note: we cannot use check_ops.is_numeric_tensor(out_tensor)
# because it also excludes tensors with dtypes, bool, and
# float32_ref, which we actually want to trace.
non_numeric_tensor_types = set([dtypes.variant, dtypes.resource,
dtypes.string])
if out_tensor.dtype in non_numeric_tensor_types:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_NON_NUMERIC_TENSOR))
return True
# Skip a tensor if it feeds a special while loop op.
if [consumer for consumer in out_tensor.consumers() if
TensorTracer.while_loop_op(consumer)]:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_FEEDS_WHILELOOP_OP))
return True
if self._is_user_included_op(out_tensor.op):
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_USER_INCLUDED))
return False
if self._is_user_excluded_op(out_tensor.op):
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_USER_EXCLUDED))
return True
if not out_tensor.get_shape().is_fully_defined():
# If trace mode is nan-inf, norm or max, then the tensor will be reduced
# to a scalar before the outside compilation call.
if self._parameters.trace_mode in (
tensor_tracer_flags.TRACE_MODE_NAN_INF,
tensor_tracer_flags.TRACE_MODE_NORM,
tensor_tracer_flags.TRACE_MODE_MAX_ABS,
tensor_tracer_flags.TRACE_MODE_SUMMARY
):
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_TENSOR_GET_TRACED))
return False
else:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_DYNAMIC_SHAPE))
return True
rank = len(out_tensor.shape)
if rank < 1:
# scalar
if self._parameters.trace_scalar_ops:
if TensorTracer.unsafe_scalar_trace(out_tensor.op):
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_UNSAFE_SCALAR))
return True
else:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_SCALAR_GET_TRACED))
return False
else:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_SKIP_SCALAR))
return True
else:
# tensor
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_TENSOR_GET_TRACED))
return False
def _filter_execution_path_operations(self, operations, fetches):
"""Returns the set of ops in the execution path to compute given fetches."""
# If no fetch provided, then return all operations.
if fetches is None:
return set(operations)
# Convert to list, if a single element is provided.
if not isinstance(fetches, (list, tuple)):
fetches = [fetches]
# If a tensor is given as fetch, convert it to op.
op_fetches = []
for fetch in fetches:
if isinstance(fetch, ops.Operation):
op_fetches.append(fetch)
elif isinstance(fetch, ops.Tensor):
op_fetches.append(fetch.op)
else:
raise RuntimeError('Given fetch:%s is neither a tensor nor an op.'
%fetch)
execution_path_operations = set(op_fetches)
traverse_stack = list(op_fetches)
while True:
if not traverse_stack:
break
head_op = traverse_stack.pop()
input_ops = [tensor_input.op for tensor_input in head_op.inputs]
input_ops.extend(head_op.control_inputs)
for input_op in input_ops:
if input_op not in execution_path_operations:
# Filter out loop condition operations, tracing them causes a cycle.
# Trace only the loop-body.
if TensorTracer.loop_cond_op(input_op):
continue
execution_path_operations.add(input_op)
traverse_stack.append(input_op)
return execution_path_operations
def _determine_and_instrument_traced_tensors(self, graph_order,
ops_in_exec_path,
tensor_trace_points,
report_handler):
"""Determines the tensors to trace and instruments the trace details.
Args:
graph_order: graph_order tuple containing graph (tf.graph), operations
(list of operations), op_to_idx (op id mapping), (tensors) list of
tensors, tensor_to_idx (tensor id mapping), contains_cycle (whether
there is a cycle in the graph), topological_order_or_cycle (list of ops
in topological order or list of ops creating a cycle).
ops_in_exec_path: Set of ops in the execution path.
tensor_trace_points: Collection of programatic tensor trace points.
report_handler: An instance of tensor_tracer_report.TTReportHandle.
Returns:
List of tensors to be traced.
"""
traced_tensors = []
checkpoint_operations = set([tensor.op
for (tensor, _) in tensor_trace_points])
for op_id, op in enumerate(graph_order.operations):
if checkpoint_operations and op not in checkpoint_operations:
continue
if self._skip_op(op_id, op, ops_in_exec_path, report_handler):
continue
for i in range(len(op.outputs)):
out_tensor = op.outputs[i]
if not self._skip_tensor(op_id, out_tensor, report_handler):
traced_tensors.append(out_tensor)
return traced_tensors
def _check_trace_files(self):
"""Checks if any requirements for trace files are satisfied."""
if not self._parameters.trace_dir:
# traces will be written to stderr. No need to check trace files.
return
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:
# Output files are handled by tf.summary operations, no need to precreate
# them.
return
if _trace_files_need_precreated(self._parameters.trace_dir):
for replica_id in range(0, self._tt_config.num_replicas):
trace_file_path = os.path.join(
self._parameters.trace_dir,
_COMPACT_TRACE_FILE_PREFIX) + '%d'%replica_id
if not gfile.Exists(trace_file_path):
raise RuntimeError(
'%s must be pre-created with the '
'appropriate properties.'%trace_file_path)
else:
if not gfile.Exists(self._parameters.trace_dir):
gfile.MkDir(self._parameters.trace_dir)
if not gfile.Exists(self._parameters.trace_dir):
raise RuntimeError('Failed to create %s'%self._parameters.trace_dir)
def _determine_trace_and_create_report(self, graph, ops_in_exec_path):
"""Work needs to be done prior to TPU or CPU tracing.
Args:
graph: tf.graph
ops_in_exec_path: Set of operations in the execution path.
Returns:
An instance of tensor_tracer_report.TensorTraceOrder, containing list of
tensors to be traced with their topological order information.
"""
self._check_trace_files()
graph_order = tensor_tracer_report.sort_tensors_and_ops(graph)
tensor_trace_points = graph.get_collection(_TENSOR_TRACER_COLLECTION)
report_handler = tensor_tracer_report.TTReportHandle()
traced_tensors = self._determine_and_instrument_traced_tensors(
graph_order, ops_in_exec_path, tensor_trace_points, report_handler)
tensor_trace_order = tensor_tracer_report.TensorTraceOrder(graph_order,
traced_tensors)
num_signatures = self._num_signature_dimensions()
if num_signatures:
self._create_or_get_tensor_values_cache(_TT_SUMMARY_TAG,
graph,
[len(traced_tensors),
num_signatures])
if self._parameters.trace_mode in (
tensor_tracer_flags.TRACE_MODE_SUMMARY,
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY):
report_proto = report_handler.create_report_proto(self._tt_config,
self._parameters,
tensor_trace_order,
tensor_trace_points,
self._signature_types())
report_handler.write_report_proto(report_proto, self._parameters)
else:
report_handler.create_report(self._tt_config, self._parameters,
tensor_trace_order, tensor_trace_points)
return tensor_trace_order
def _create_host_call(self):
return self._parameters.trace_mode in (
tensor_tracer_flags.TRACE_MODE_SUMMARY,
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY)
def _generate_flush_cache_op(self, num_replicas, on_tpu):
"""Generates an Op that will flush the cache to file.
Args:
num_replicas: total number of replicas.
on_tpu: if the graph is executed on TPU.
Returns:
The Op to flush the cache to file.
"""
def _flush_fun(cache, replica_id):
"""Flushes the cache to a file corresponding to replica_id."""
def _f(file_index):
"""Generates a func that flushes the cache to a file."""
def _print_cache():
"""Flushes the cache to a file."""
replica_str = ('%d' % file_index)
if self._parameters.trace_dir:
output_path = (os.path.join(self._parameters.trace_dir,
_COMPACT_TRACE_FILE_PREFIX)
+ replica_str)
output_stream = _OUTPUT_STREAM_ESCAPE + output_path
else:
output_stream = sys.stderr
new_step_line = _REPLICA_ID_TAG + replica_str
print_ops = []
for i in range(self._num_signature_dimensions()):
print_ops.append(logging_ops.print_v2(
new_step_line, '\n',
cache[:, i], '\n',
summarize=-1,
output_stream=output_stream))
with ops.control_dependencies(print_ops):
return constant_op.constant(0).op
return _print_cache
def _eq(file_index):
return math_ops.equal(replica_id, file_index)
flush_op_cases = {}
for i in range(num_replicas):
flush_op_cases[_eq(i)] = _f(i)
# Each replica needs to determine where to write their output.
# To do this, we check if replica_id is 0, then 1, ..., and then
# num_replicas - 1 statically; and return the corresponding static file
# name. We cannot simply set the file name in python, as replica_id is
# only known during tf runtime, and we cannot create dynamic filenames.
return control_flow_ops.case(flush_op_cases, exclusive=True)
cache = self._create_or_get_tensor_values_cache(_TT_SUMMARY_TAG)
if on_tpu:
flush_op = tpu.outside_compilation(_flush_fun,
cache.value(), self._replica_id)
else:
flush_op = _flush_fun(cache.value(), self._replica_id)
with ops.control_dependencies([flush_op]):
reset_value = constant_op.constant(_COMPACT_TRACE_ENTRY_INIT_VALUE,
dtype=cache.dtype,
shape=cache.shape)
assign_op = state_ops.assign(cache, reset_value).op
with ops.control_dependencies([assign_op]):
return constant_op.constant(0).op
def _flush_tensor_values_cache(self, tensor_fetches, op_fetches, on_tpu):
"""Flushes the intermediate tensor values in the graph to the cache.
Args:
tensor_fetches: list of tensor results returned by the model_fn.
op_fetches: list of ops that are returned by the model_fn, e.g., train_op.
on_tpu: if the graph is executed on TPU.
Returns:
An identical copy of tensor_fetches.
"""
# Add a dependency to op and tensor fetches to make sure that all tracing
# ops are executed before flushing trace results.
with ops.control_dependencies(op_fetches +
[tensor.op for tensor in tensor_fetches]):
flush_cache_op = self._generate_flush_cache_op(
self._tt_config.num_replicas, on_tpu)
return control_flow_ops.tuple(tensor_fetches,
control_inputs=[flush_cache_op])
def _process_tensor_fetches(self, tensor_fetches):
"""Check that tensor_fetches is not empty and have valid tensors."""
# If none or empty list.
if tensor_fetches is None:
raise RuntimeError('tensor_fetches provided to tensor_tracer cannot be '
'None.')
if not isinstance(tensor_fetches, (list, tuple)):
tensor_fetches = [tensor_fetches]
elif not tensor_fetches:
raise RuntimeError('tensor_fetches provided to tensor_tracer cannot be '
'empty list.')
fetches = []
for fetch in tensor_fetches:
if isinstance(fetch, ops.Tensor):
fetches.append(fetch)
else:
raise RuntimeError('Given tensor_fetch:%s is not a tensor.' % fetch)
return fetches
def _process_op_fetches(self, op_fetches):
"""Check that op_fetches have valid ops."""
if op_fetches is None:
return []
if not isinstance(op_fetches, (list, tuple)):
op_fetches = [op_fetches]
fetches = []
for fetch in op_fetches:
if isinstance(fetch, ops.Operation):
fetches.append(fetch)
elif isinstance(fetch, ops.Tensor):
fetches.append(fetch.op)
else:
logging.warning('Ignoring the given op_fetch:%s, which is not an op.' %
fetch)
return fetches
def _convert_fetches_to_input_format(self, input_fetches, current_fetches):
"""Changes current_fetches' format, so that it matches input_fetches."""
if isinstance(input_fetches, ops.Tensor):
if len(current_fetches) != 1:
raise RuntimeError('Tensor tracer input/output fetches do not match.')
return current_fetches[0]
else:
if len(current_fetches) != len(current_fetches):
raise RuntimeError('Tensor tracer input/output fetches do not match.')
elif isinstance(input_fetches, tuple):
return tuple(current_fetches)
else:
return current_fetches
def _get_op_control_flow_context(self, op):
"""Returns the control flow of the given op.
Args:
op: tf.Operation for which the control flow context is requested.
Returns:
op_control_flow_context: which the is control flow context of the given
op. If the operation type is LoopExit, returns the outer control flow
context.
"""
# pylint: disable=protected-access
op_control_flow_context = op._control_flow_context
# pylint: enable=protected-access
if control_flow_util.IsLoopExit(op):
op_control_flow_context = op_control_flow_context.outer_context
return op_control_flow_context
def _prepare_host_call_fn(self, processed_t_fetches, op_fetches):
"""Creates a host call function that will write the cache as tb summary.
Args:
processed_t_fetches: List of tensor provided to session.run.
op_fetches: List of operations provided to session.run.
Raises:
ValueError if trace_dir is not set.
"""
if self._parameters.trace_dir is None:
raise ValueError('Provide a trace_dir for tensor tracer in summary mode. '
'--trace_dir=/model/dir')
def _write_cache(step, **kwargs):
"""Writes the given caches as tensor summary.
Args:
step: Step tensor with dimension [num_cores].
**kwargs: The dictionary of tensors that needs to be written as
summaries. Key and value pairs within kwargs correspond to the tag
name, and tensor content that will be written using summary.write.
The trace_modes that use this function are:
- summary: In summary mode, kwargs includes a single (tag, content)
pair which are, _TT_SUMMARY_TAG and a tf.float32 signature_cache
variable. The dimension of the signature_cache is:
num_cores x num_traced_tensors x num_signatures.
- full_tensor_summary: kwargs will include all traced tensors. Tag
and content correspond to the name of the tensor, and its actual
content.
Returns:
A tf.Operation that needs to be executed for the host call dependencies.
"""
# TODO(deveci): Parametrize max_queue, so that flushing op can be called
# less frequently.
# Setting max_queue to 100 appears to be safe even when the number of
# iterations are much lower, as the destructor of the writer will flushes
# it.
summary_write_ops = []
with summary.create_file_writer_v2(
self._parameters.trace_dir,
filename_suffix=_TT_EVENT_FILE_SUFFIX,
max_queue=_TT_SUMMARY_MAX_QUEUE).as_default():
summary_metadata = summary_pb2.SummaryMetadata(
plugin_data=summary_pb2.SummaryMetadata.PluginData(
plugin_name=_TT_TENSORBOARD_PLUGIN_NAME))
for key, value in kwargs.items():
summary_write_ops.append(summary.write(
_TT_SUMMARY_TAG + '/' + key, value, metadata=summary_metadata,
step=step[0]))
return control_flow_ops.group(summary_write_ops)
step = array_ops.reshape(training_util.get_or_create_global_step(), [1])
self._host_call_fn = {}
host_call_deps = op_fetches + [tensor.op for tensor in processed_t_fetches]
caches_to_write = {}
with ops.control_dependencies(host_call_deps):
all_caches = self._get_all_cache_variables()
for cache_name, cache_variable in all_caches.items():
# Increase the cache rank by 1, so that when host call concatenates
# tensors from different replicas, we can identify them with [core_id].
new_cache_shape = [1]
new_cache_shape.extend(cache_variable.shape.as_list())
cache = array_ops.reshape(cache_variable.value(), new_cache_shape)
caches_to_write[cache_name] = cache
# Add step to parameter dictionary.
caches_to_write['step'] = step
# Other options without adding step to parameter dictionary are
# * host_call_fn = (_write_cache(step, caches_to_write)) : fails as it
# considers caches_to_write as a single parameter, rather than a keyword
# parameters.
# * host_call_fn = (_write_cache(step, **caches_to_write)) : fails with
# a syntax error.
self._host_call_fn[_TT_HOSTCALL_KEY] = (_write_cache, caches_to_write)
def host_call_deps_and_fn(self):
return self._host_call_fn
def _trace_execution(self, graph,
tensor_fetches,
op_fetches=None,
on_tpu=True):
"""Commong tracing function for both CPU and TPUs.
The caller function should set device_type, num_replicas,
num_replicas_per_host, num_hosts and replica_id before calling
_trace_execution.
Args:
graph: the graph of Ops executed on the TPU.
tensor_fetches: a (list,tuple,or a single object) of tensor fetches
returned by model_fn given to session.run. Function must be provided
with as least one tensor to fetch.
op_fetches: A list of op fetches returned by model_fn given to
session.run. op_fetches and tensor_fetches are used to determine the
nodes that will be executed. Can be None.
on_tpu: True if executing on TPU.
Returns:
tensor_fetches: an exact copy of tensor_fetches that has additional
dependencies.
Raises:
RuntimeError: If tensor_fetches is None or empty.
"""
def _cast_unsupported_dtypes(tensor):
"""Casts tensor to a supported type."""
if tensor.dtype.__eq__(dtypes.int64):
# outside-compilation doesn't support int64 input yet.
return math_ops.cast(tensor, dtypes.int32)
if tensor.dtype.__eq__(dtypes.bfloat16) or tensor.dtype.__eq__(
dtypes.float16):
# Since host can't handle bf16, convert tensor to f32.
return math_ops.cast(tensor, dtypes.float32)
return tensor
trace_mode = self._parameters.trace_mode
device_type = self._tt_config.device_type
analytics.track_usage('tensor_tracer', [trace_mode, device_type])
TensorTracer.check_device_type(device_type)
TensorTracer.check_trace_mode(device_type, trace_mode)
# Check in_tensor_fetches, and op_fetches and convert them to lists.
processed_t_fetches = self._process_tensor_fetches(tensor_fetches)
op_fetches = self._process_op_fetches(op_fetches)
all_fetches = op_fetches + [tensor.op for tensor in processed_t_fetches]
# Filter out the operations that won't be executed.
# if fetches=None, then ops_in_exec_path = set(operations)
exec_op_set = self._filter_execution_path_operations(graph.get_operations(),
all_fetches)
# Write report file, and determine the traced tensors.
tensor_trace_order = self._determine_trace_and_create_report(
graph, exec_op_set)
tensor_fetch_set = set(processed_t_fetches)
tracing_ops = []
# pylint: disable=protected-access
current_control_flow_context = graph._get_control_flow_context()
# pylint: enable=protected-access
sorted_exec_op_list = list(exec_op_set)
sorted_exec_op_list.sort(key=lambda op: op.name)
# Trace ops only if they are in the execution path.
for op in sorted_exec_op_list:
for i in range(len(op.outputs)):
out_tensor = op.outputs[i]
tensor_name = out_tensor.name
if tensor_name not in tensor_trace_order.tensorname_to_cache_idx:
continue
# Create the list of consumers before calling _preprocess_traced_tensor.
# Otherwise, adding control input below, will introduce a cycle in the
# graph.
consumers = out_tensor.consumers()
# Not all consumers may be in the exec path. Filter out the consumers
# to keep the graph simpler.
consumers = [cop for cop in consumers if cop in exec_op_set]
# If there is no consumer of the tensor, there is no need to trace it;
# unless the tensor itself is one of the fetches.
is_a_fetched_tensor = out_tensor in tensor_fetch_set
if (not consumers) and (not is_a_fetched_tensor):
continue
op_control_flow_context = self._get_op_control_flow_context(op)
# pylint: disable=protected-access
graph._set_control_flow_context(op_control_flow_context)
# pylint: enable=protected-access
processed_tensors = self._preprocess_traced_tensor(out_tensor)
if on_tpu:
for signature in processed_tensors.keys():
processed_tensors[signature] = _cast_unsupported_dtypes(
processed_tensors[signature])
if self._use_tensor_values_cache():
# Use a small cache to store the characteristics of the tensor.
cache_idx = tensor_trace_order.tensorname_to_cache_idx[tensor_name]
trace_op = self._save_tensor_value_to_cache_op(cache_idx,
processed_tensors)
elif self._use_tensor_buffer():
if len(processed_tensors) != 1:
raise RuntimeError('Multiple stats are only allowed in compact '
'mode.')
processed_out_tensor = processed_tensors.values()[0]
# Store the whole tensor in a buffer.
trace_op = self._snapshot_tensor(processed_out_tensor)
else:
def tpu_wrap_trace_fn(tensor, out_tensor_name):
"""Wraps the trace_fn with outside compilation if on TPUs."""
tensor_trace_fn = self._make_tensor_trace_fun(out_tensor_name,
tensor_trace_order)
if on_tpu:
return tpu.outside_compilation(tensor_trace_fn, tensor)
else:
return tensor_trace_fn(tensor)
def conditional_trace_fn(predicate_tensor, out_tensor, trace_fn,
out_tensor_name):
"""Creates a cond op that traces the out_tensor if predicate is satisfied."""
return control_flow_ops.cond(
predicate_tensor, lambda: trace_fn(out_tensor, out_tensor_name),
lambda: constant_op.constant(False)).op
if len(processed_tensors) != 1:
raise RuntimeError('Multiple stats are only allowed in compact '
'mode.')
# Collecting multiple statistics are only supported in the summary
# mode that uses compact format(self._use_tensor_values_cache = true).
# Non-compact mode currently allows single stat per tensor.
processed_out_tensor = six.next(six.itervalues(processed_tensors))
if self._parameters.is_conditional_trace:
trace_op = conditional_trace_fn(processed_out_tensor, out_tensor,
tpu_wrap_trace_fn, tensor_name)
elif self._parameters.included_cores:
should_print = constant_op.constant(False)
for core in self._parameters.included_cores:
should_print = gen_math_ops.logical_or(
should_print, gen_math_ops.equal(self._replica_id, core))
trace_op = conditional_trace_fn(should_print, processed_out_tensor,
tpu_wrap_trace_fn, tensor_name)
else:
trace_op = tpu_wrap_trace_fn(processed_out_tensor, tensor_name)
if is_a_fetched_tensor:
tracing_ops.append(trace_op)
continue
# Add it to all consumers, as some consumers may not be executed if they
# are in a control flow.
for consumer_op in consumers:
# pylint: disable=protected-access
consumer_op._add_control_input(trace_op)
# pylint: enable=protected-access
# pylint: disable=protected-access
graph._set_control_flow_context(current_control_flow_context)
# pylint: enable=protected-access
if tracing_ops:
# If we are tracing a fetched tensor, their dependency is stored in
# tracing_ops.
processed_t_fetches = control_flow_ops.tuple(processed_t_fetches,
control_inputs=tracing_ops)
if self._use_tensor_values_cache() or self._use_tensor_buffer():
if self._create_host_call() and on_tpu:
self._prepare_host_call_fn(processed_t_fetches, op_fetches)
else:
processed_t_fetches = self._flush_tensor_values_cache(
processed_t_fetches, op_fetches, on_tpu=on_tpu)
# processed_t_fetches is a list at this point. Convert it to the same
# format as given in tensor_fetches.
return self._convert_fetches_to_input_format(tensor_fetches,
processed_t_fetches)
def trace_tpu(self, graph,
tensor_fetches,
op_fetches=None,
num_replicas=None,
num_replicas_per_host=None,
num_hosts=None):
"""Traces the tensors generated by TPU Ops in a TF graph.
Args:
graph: the graph of Ops executed on the TPU.
tensor_fetches: a (list,tuple,or a single object) of tensor fetches
returned by model_fn given to session.run. Function must be provided
with as least one tensor to fetch.
op_fetches: A list of op fetches returned by model_fn given to
session.run. op_fetches and tensor_fetches are used to determine the
nodes that will be executed. Can be None.
num_replicas: number of replicas used on the TPU.
num_replicas_per_host: number of replicas per TPU host.
num_hosts: total number of TPU hosts.
Returns:
tensor_fetches: an exact copy of tensor_fetches that has additional
dependencies.
Raises:
RuntimeError: If num_replicas_per_host > 8.
RuntimeError: If tensor_fetches is None or empty.
"""
if graph in TensorTracer._traced_graphs:
logging.warning('Graph is already rewritten with tensor tracer, ignoring '
'multiple calls.')
return tensor_fetches
else:
TensorTracer._traced_graphs.add(graph)
self._tt_config.device_type = _DEVICE_TYPE_TPU
self._tt_config.num_replicas = num_replicas
self._tt_config.num_replicas_per_host = num_replicas_per_host
self._tt_config.num_hosts = num_hosts
if self._tt_config.num_replicas is not None:
if self._tt_config.num_replicas_per_host is None:
self._tt_config.num_replicas_per_host = 8
if self._tt_config.num_hosts is None:
self._tt_config.num_hosts = (
num_replicas // self._tt_config.num_replicas_per_host +
(num_replicas % self._tt_config.num_replicas_per_host > 0))
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_before_tt.pbtxt')
with graph.as_default():
self._add_replica_id_to_graph()
tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches,
on_tpu=True)
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_after_tt.pbtxt')
return tensor_fetches
def trace_cpu(self, graph, tensor_fetches, op_fetches=None):
"""Traces the tensors generated by CPU Ops in a TF graph.
Args:
graph: the graph of Ops executed on the CPU.
tensor_fetches: a (list,tuple,or a single object) of tensor fetches
returned by model_fn given to session.run. Function must be provided
with as least one tensor to fetch.
op_fetches: A list of op fetches returned by model_fn given to
session.run. op_fetches and tensor_fetches are used to determine the
nodes that will be executed. Can be None.
Returns:
tensor_fetches: an exact copy of tensor_fetches that has additional
dependencies.
Raises:
RuntimeError: If tensor_fetches is None or empty.
"""
if graph in TensorTracer._traced_graphs:
logging.warning('Graph is already rewritten with tensor tracer, ignoring '
'multiple calls.')
return tensor_fetches
else:
TensorTracer._traced_graphs.add(graph)
self._tt_config.device_type = _DEVICE_TYPE_CPU
self._tt_config.num_replicas = 1
self._tt_config.num_replicas_per_host = 1
self._tt_config.num_hosts = 1
self._replica_id = 0
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_before_tt.pbtxt')
with graph.as_default():
tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches,
on_tpu=False)
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_after_tt.pbtxt')
return tensor_fetches
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
f6d5d30640cd3cfa6990ed9c790c621a34e1b867 | db7aa767430bab121142bf00812751ac6c0da3cc | /bin/2020_day_06.py | f12a032c0798fe86ed0c73d74032f4a5cb698525 | [] | no_license | osterbek/adventofcode | 64f4383e1532987732d1ed9fa673f279c4106bd3 | fdb017f0efdf4fdccc4e41874579e826ec1d02fc | refs/heads/main | 2023-02-05T19:30:25.868594 | 2020-12-25T09:18:18 | 2020-12-25T09:18:18 | 320,618,733 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | from pathlib import Path
if __name__ == '__main__':
content = Path('../input/input_2020_06.txt').read_text().split('\n\n')
dataset = []
for group in range(0, len(content)):
dataset.append(content[group].split('\n'))
questions = [chr(value) for value in range(97, 123)]
solution = [0, 0]
for group in range(0, len(dataset)):
yes = [0 for letter in range(0, len(questions))]
for person in range(0, len(dataset[group])):
for letter in range(0, len(questions)):
if questions[letter] in dataset[group][person]:
yes[letter] += 1
for letter in range(0, len(questions)):
solution[0] += (yes[letter] > 0)
solution[1] += (yes[letter] == len(dataset[group]))
print('Answer part 1 = {:d} '.format(solution[0]))
print('Answer part 2 = {:d} '.format(solution[1]))
| [
"noreply@github.com"
] | osterbek.noreply@github.com |
1a5134d5029d306a341b613d4b8f710505dd01e7 | 2d18404c29d4031d92d2eea5fdb583d01822824c | /Ecommerce_Project/App_Order/apps.py | 0db55b0dd6104f9a9a0f4c17eed8e851574a5c95 | [] | no_license | ShamimMSAS7/CSE470-Project | 5eb22eb189f51131c27b843f69470c3459579b83 | c0063b42612cead988e5b13c652f18459931a3f9 | refs/heads/main | 2023-08-04T17:00:02.200085 | 2021-09-17T17:33:20 | 2021-09-17T17:33:20 | 402,445,678 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | from django.apps import AppConfig
class AppOrderConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'App_Order'
| [
"shamimmsas7@gmail.com"
] | shamimmsas7@gmail.com |
b1c18147b5e2ae4f5dac680af85aad00eeae7872 | 1d8b108cb5720917c2de0b87f58db40349b82c3d | /get_data_gov_sg_met.py | 06904d7660f5904214d6e6b1282f0d4256dbab09 | [
"MIT"
] | permissive | tangshiyuan/access-data-gov-sg | baff5b3124d2b17bc7b3006fbdbf26d0d15f8ec3 | a5b013475162c6ea2deccf42e4ab9d0ea2d97dd5 | refs/heads/master | 2020-03-11T14:31:45.266000 | 2018-04-23T14:09:42 | 2018-04-23T14:09:42 | 130,057,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,666 | py | #!/usr/bin/env python3
"""
get_data_gov_sg_met.py:
Download meteorological station data (and/or air quality data) for a specific month via the
data.gov.sg APIs.
API key requirement:
In order to use this script, an API key needs to be obtained via
https://developers.data.gov.sg.
Usage:
To download a specific month and variable, specify the month (e.g. 2017_02) and variable, e.g.:
./get_data_gov_sg_met.py 2017_02 rainfall
To download data for all variables in a specific month, specify just the month:
./get_data_gov_sg_met.py 2017_02
To download data for all variables from last month, just run the script with no command-line
arguments:
./get_data_gov_sg_met.py
Output files:
Gzipped CSV files, corresponding to different variables, will be saved in data_gov_sg_met_v1/
The file naming convention is as follows:
data_gov_sg_met_v1/<variable>_<yyyy-mm>_c<today>.csv.gz
where <today> is the date on which the file was created.
For example,
data_gov_sg_met_v1/wind-speed_2017-02_c20170526.csv.gz
Information about input data:
For information about the input data used to derive the output CSV files, please see
https://developers.data.gov.sg, https://data.gov.sg/open-data-licence, and
https://data.gov.sg/api-terms.
Author:
Benjamin S. Grandey, 2017
"""
import calendar
import os
import pandas as pd
import requests
import sys
import time
# Get my API keys
from my_api_keys import my_api_dict
# Note: this module, containing my API keys, will not be shared via GitHub
# You can obtain your own API key(s) by registering at https://developers.data.gov.sg
my_key = my_api_dict['data.gov.sg'] # API key for data.gov.sg
# Output directory
here = os.path.abspath(os.path.dirname(__file__))
data_dir = os.path.join(here, 'data_gov_sg_met_v1')
# If directory does not exist, create it
if not os.path.exists(data_dir):
os.makedirs(data_dir)
print('Created {}'.format(data_dir))
def retrieve_data_via_api(variable, dt, n_attempts=10):
"""
Function to attempt to retrieve data for a specific datetime.
Args:
variable: string of variable name used by API (e.g. 'rainfall')
dt: pd.datetime, corresponding to 'date_time' in the API
n_attempts: number of attempts to retry if API connection fails
Returns:
pd.DataFrame containing data (if successful), or None
"""
try:
# Try to connect to API
r = requests.get('https://api.data.gov.sg/v1/environment/{}'.format(variable),
headers={'api-key': my_key},
params={'date_time': dt.strftime('%Y-%m-%dT%H:%M:%S')},
timeout=30)
if r.status_code == 200:
# If API connection was successful, load data into DataFrame, unless no data present
if len(r.json()['items'][0]['readings']) >= 1:
result = pd.DataFrame(r.json()['items'][0]['readings'])
if variable == 'pm25': # necessary due to diff in pm25 API return format
result = result.reset_index()
result = result.rename(columns={'index': 'region'})
result['timestamp_sgt'] = pd.to_datetime(r.json()['items'][0]['timestamp']
.split('+')[0])
else:
result = None
else:
# If API query failed, sleep 10s, then retry recursively (up to n_attempts)
if n_attempts > 1:
print(' dt = {}, r.status_code = {}, (n_attempts-1) = {}. '
'Retrying in 10s.'.format(dt, r.status_code, (n_attempts-1)))
time.sleep(10)
result = retrieve_data_via_api(variable, dt, n_attempts=(n_attempts-1))
else:
print(' dt = {}, r.status_code = {}, (n_attempts-1) = {}. '
'FAILED TO RETRIEVE DATA.'.format(dt, r.status_code, (n_attempts-1)))
result = None
r.close()
except (requests.exceptions.SSLError, requests.exceptions.ConnectionError,
requests.exceptions.ConnectTimeout, requests.exceptions.ReadTimeout):
# If connection failed, sleep 10s, then retry recursively (up to n_attempts)
if n_attempts > 1:
print(' dt = {}, error = {}, (n_attempts-1) = {}. '
'Retrying in 10s.'.format(dt, sys.exc_info()[0], (n_attempts-1)))
time.sleep(10)
result = retrieve_data_via_api(variable, dt, n_attempts=(n_attempts-1))
else:
print(' dt = {}, error = {}, (n_attempts-1) = {}. '
'FAILED TO CONNECT.'.format(dt, sys.exc_info()[0], (n_attempts-1)))
result = None
except KeyError:
# KeyError is encountered, sleep 10s, then retry once only
if n_attempts > 1:
print(' dt = {}, error = {}, (n_attempts-1) = {}. '
'Retrying ONCE in 10s.'.format(dt, sys.exc_info()[0], (n_attempts-1)))
time.sleep(10)
result = retrieve_data_via_api(variable, dt, n_attempts=1)
else:
print(' dt = {}, error = {}, (n_attempts-1) = {}. '
'FAILED TO RETRIEVE DATA.'.format(dt, sys.exc_info()[0], (n_attempts - 1)))
result = None
return result
def download_month(variable, yyyy, mm):
"""
Function to attempt to retrieve data for a specific month.
Args:
variable: string of variable name used by API (e.g. 'rainfall')
yyyy: string containing year (e.g. '2017')
mm: string containing month (e.g. '05')
Output file:
CSV file:
data_gov_sg_met_v1/<variable>_<yyyy-mm>_c<today>.csv
where <today> is today's date.
"""
print('variable = {}, yyyy = {}, mm = {}'.format(variable, yyyy, mm))
# Number of days in month
ndays = calendar.monthrange(int(yyyy), int(mm))[1] # supports leap years
# Time interval dependent on variable
if variable == 'rainfall':
freq = '5 min'
periods = (ndays * 24 * 12) + 1
elif variable == 'pm25':
freq = '60 min'
periods = (ndays * 24 * 1) + 1
else:
freq = '1 min'
periods = (ndays * 24 * 60) + 1
# Datetime range to search through
datetime_range = pd.date_range('{}-{}-01 00:00:00'.format(yyyy, mm),
periods=periods, freq=freq)
# Loop over datetimes
for dt, i in zip(datetime_range, range(len(datetime_range))):
# Attempt to retrieve data via API
temp_df = retrieve_data_via_api(variable, dt)
# If data available and timestamp indicates correct month, then append to DataFrame df
if temp_df is not None:
if temp_df['timestamp_sgt'].loc[0].month == int(mm): # querying 00:00 on 1st day may
try: # may return 23:59 on prev. day
df = df.append(temp_df, ignore_index=True)
except UnboundLocalError: # 1st time, initialise df
df = temp_df
# Indicate progress
perc = i / periods * 100 # percentage progress
print(' {:000.1f}%'.format(perc), end='\r', flush=True)
print() # start new line
# Print summary of number of records
print(' {} records'.format(len(df)))
# Remove duplicates
df = df.drop_duplicates()
print(' {} records after removing duplicates'.format(len(df)))
# Save DataFrame to CSV file
out_filename = '{}/{}_{}_{}_c{}.csv.gz'.format(data_dir, variable, yyyy, mm,
pd.datetime.today().strftime('%Y%m%d'))
df.to_csv(out_filename, index=False, compression='gzip')
print(' Written {}'.format(out_filename))
return 0
if __name__ == '__main__':
# Year and month to get data for
try:
yyyy, mm = sys.argv[1].split('_') # if specified via command-line
except IndexError: # otherwise get data for last month
month_ago = (pd.datetime.today() - pd.Timedelta(1, 'M')) # ~1 month ago (not exact)
yyyy, mm = month_ago.strftime('%Y_%m').split('_')
# Variable(s) to get data for
try:
variables = [sys.argv[2], ] # if specified via command-line
except IndexError: # otherwise get data for all variables
variables = ['rainfall', 'wind-speed', 'wind-direction', 'air-temperature',
'relative-humidity', 'pm25']
# Loop over variables
for variable in variables:
download_month(variable, yyyy, mm)
| [
"benjamin.grandey@gmail.com"
] | benjamin.grandey@gmail.com |
52f7b8091977541e4b8412d8743831f8ae5c963c | 28c69e41de8054279d6b8ebf2788693c1dfde159 | /33 雷家硕 上海/第一次作业/第二节课之熟悉embedding.py | 3aeb07dacb8bdd2d6d78da08db406710abad0ac9 | [] | no_license | edmlover/badouai-tujiban | 45260aad27da78c6c34c991f06568fe2adb7c324 | 088080ea1f617e0ce64655f8389288101e277702 | refs/heads/main | 2023-08-15T11:33:57.830821 | 2021-10-16T14:25:31 | 2021-10-16T14:25:31 | 409,547,422 | 0 | 2 | null | 2021-09-23T10:33:02 | 2021-09-23T10:33:01 | null | UTF-8 | Python | false | false | 422 | py | import torch
import torch.nn as nn
num_embeddings = 6#字符集的大小
embedding_dim = 3#每个字符向量化后的维度
embedding_layer = nn.Embedding(num_embeddings, embedding_dim)#指定生成的随机初始化的矩阵的行列
print(embedding_layer.weight,"随机初始化矩阵")
x = torch.LongTensor([1,2,5])#给定字符编号,也就是输入
embedding_out = embedding_layer(x)
print(embedding_out)
| [
"noreply@github.com"
] | edmlover.noreply@github.com |
545c14b1b076d85bfb13ffa68e1b7ccb88aab197 | 8ec32cd65ba298a194887566ba9b4d0f8cd18893 | /moviedatabase.py | ea0bf255e32330d3eab089ee0229fca0cc70aa02 | [] | no_license | yvonnebutselaar/data-driven-design | b82f4ec2ab6efbf80852cd654e9e69b337af44ef | 7a723fb57d04eb64a15a5521cd4d87324599ebb7 | refs/heads/master | 2020-03-29T15:49:43.483908 | 2019-01-17T19:41:11 | 2019-01-17T19:41:11 | 150,082,676 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | import json
with open("movies.json") as f:
movies = json.load(f)
ayear = input("What year do you want to see?")
for items in movies:
if str(items["year"]) == ayear:
print(f'{items["title"]} is a movie from {items["year"]}')
f.close() | [
"yvonnebutselaar@gmail.com"
] | yvonnebutselaar@gmail.com |
edb367901d6a40a0fc07e0cb1fbefce67e8619fe | 08b640c667f69b449f02ff6b41d9c6a4bc804947 | /csduarte/ex35/sd2.py | a346aaf891197513b52bd00e419bef3593e7c2f1 | [] | no_license | csduarte/FunPy | db2ad9e60849820a823b0fcf7dd5c15d70ec0c5a | 2a73e975dc6617f1fe11fc5b5ed7243f95bb865d | refs/heads/master | 2021-05-04T11:21:02.010283 | 2016-10-07T00:24:13 | 2016-10-07T00:24:13 | 48,732,399 | 0 | 0 | null | 2016-10-07T00:24:14 | 2015-12-29T06:48:26 | Python | UTF-8 | Python | false | false | 50 | py | # Looks good. Cleared up a few caps and what not.
| [
"csduarte@gmail.com"
] | csduarte@gmail.com |
d5c8cacfbea754c24822fed7168c9ea69133b51e | 56fe5ad50f01e347182f75b984e97f5b7ac4d647 | /security_in_memory_db.py | b3c8dd1f3784fda121e1d4c45d15b0505afd46e2 | [] | no_license | leenjiru/Sonia_books | e6a18033155e808339b6786018c3bdaca99fcf72 | 3cb752160eb789d9155482701fd581eb4aa8d170 | refs/heads/master | 2023-03-24T11:37:41.607157 | 2020-07-22T23:38:17 | 2020-07-22T23:38:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | # this applies when using an in memory db
from models.users import UserModel
from werkzeug.security import safe_str_cmp
# users = [
# {
# 'id': 1,
# 'username': 'Nick',
# 'Password': 'password'
# }
# ]
users = [
UserModel(1, 'Nicky', 'password1')
]
username_mapping = {u.username: u for u in users}
user_id_mapping = {u.id: u for u in users}
# user_id_mapping = {
# 1: {
# 'id': 1,
# 'username': 'Nick',
# 'Password': 'password'
# }
# }
def authenticate(username, password):
user = username_mapping.get(username, None)
if user and safe_str_cmp(user.password, password):
return user
def identity(payload):
user_id = payload['identity']
return user_id_mapping.get(user_id, None)
| [
"www.leenick@gmail.com"
] | www.leenick@gmail.com |
e8a67b91c6b02523eb9741b4118764bca75190c1 | cbb29f7a30b5de0dc2cb421edc259d6ce2586279 | /tests/fixtures/readme_book.py | a9e759336c311c41d1228ce0baafcb0cb304adf2 | [] | no_license | Madoshakalaka/python-typing-to-typescript | a4ebd58ff22f85fe1d2affcd32390ae5e8f19104 | d5fd19b7f804a5e95f8b62a4d208c98d5b830593 | refs/heads/master | 2023-02-02T05:55:23.095977 | 2020-12-22T22:38:23 | 2020-12-22T22:38:23 | 321,219,843 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | from typing import TypedDict
class Book(TypedDict):
pages: int
chapters: List[Chapter]
Authors: List[str]
class Chapter(TypedDict):
title: str
# short chapters only has one paragraph
content: Union[str, List[Paragraph]]
class Paragraph(TypedDict):
content: str | [
"syan4@ualberta.ca"
] | syan4@ualberta.ca |
25dd87758892b414426ec0e9c48e05fb4ac4a527 | a4a44ad46cd1306e2da72ff89483b0102fc9787d | /SamplePython/Developer Tool/fab_fabric/pengenalan_dr_web/11_settings.py | 8a018fc9d73ccd0692dcf6cf3d3a40dad2777d35 | [] | no_license | okipriyadi/NewSamplePython | 640eb3754de98e6276f0aa1dcf849ecea22d26b1 | e12aeb37e88ffbd16881a20a3c37cd835b7387d0 | refs/heads/master | 2020-05-22T01:15:17.427350 | 2017-02-21T04:47:08 | 2017-02-21T04:47:08 | 30,009,299 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | """
settings (fabric.context_managers.settings)
When you need to temporarily (i.e. for a certain command chain),
you can use the settings statement (i.e. override env values).
Usage examples:
"""
from fabric.api import settings, sudo
# Perform actions using a different *user*
with settings(user="avionics"):
sudo("cmd")
| [
"oki.priyadi@pacificavionics.net"
] | oki.priyadi@pacificavionics.net |
ad5ff59ea22ba3812b95850738f8c40ca4e9036d | d19a463c154da8e66330450d8462d1c4d6de3cc1 | /server/data/sp500.py | c725d6384d1d1d94739c2f624948942607e13ade | [] | no_license | nhatduy227/StockApp | 7209ede008205de1f8b2997ed2d8d8677bad43ea | 5a2dc2153d92eb8afba1dfd4b61b6a849f237b9b | refs/heads/master | 2023-07-14T06:48:02.937346 | 2021-09-02T18:22:13 | 2021-09-02T18:22:13 | 390,561,887 | 3 | 1 | null | 2021-08-29T23:35:36 | 2021-07-29T01:33:13 | JavaScript | UTF-8 | Python | false | false | 1,728 | py | '''S&P 500 Stock Reader
The script reads in all companines in the Standard & Poor Index, provided by Wikipedia.
It then grabs the historical stock price of each ticker and saves them in PyStore. Then,
continuously adds in end-of-day stocks daily.
'''
import pandas as pd
import yfinance as yf
from sqlalchemy import create_engine
import psycopg2
import io
def read_in_sp500(
URL: str = 'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies',
) -> pd.DataFrame:
'''Read in all of Standard & Poor's 500 largest companies
Args:
URL (str): a Wikipedia url holding a data table of all S&P 500 companies
Returns:
pd.DataFrame: a data frame with info on all S&P 500 companies
'''
sp500_df = pd.read_html(URL)[0]
sp500_df['Symbol'] = sp500_df['Symbol'].str.replace('.', '-', regex=True)
return sp500_df
# ----------------- Run only once -------------------
def get_stock_data(
interval: str = '1d',
) -> pd.DataFrame:
'''Retrieves all previous day-by-day stock prices in the S&P500
Note: This function should only run once
'''
sp500_tickers = read_in_sp500()['Symbol']
# Batch calls all stock tickers
sp500_df = yf.download(
tickers=sp500_tickers.to_list(),
interval=interval,
)
sp500_df = sp500_df.stack().reset_index().rename(
columns={'level_1': 'Ticker'})
sp500_df.columns = sp500_df.columns.str.lower().str.replace(' ', '_')
return sp500_df
# ---------------------------------------------------
if __name__ == '__main__':
db_url = 'postgresql://postgres:password@localhost:5432/stocks'
engine = create_engine(db_url)
df = get_stock_data()
df.to_sql('stock_data', engine)
| [
"aowangphilly@gmail.com"
] | aowangphilly@gmail.com |
4e8a125a7458dd004507e648e9417922ad85affe | 14d7f5f83b6f84871ff6ebfa0af4c17b7115a33f | /eco_models/mpb/integration_stop.py | f391a20c2a14bae90e14d4ebe8bd5777a3fa7d32 | [] | no_license | tonychangmsu/Python_Scripts | 8ca7bc841c94dcab36743bce190357ac2b1698a5 | 036f498b1fc68953d90aac15f0a5ea2f2f72423b | refs/heads/master | 2016-09-11T14:32:17.133399 | 2016-03-28T16:34:40 | 2016-03-28T16:34:40 | 10,370,475 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | # Title: integration_stop.py
# Author: Tony Chang
# Date: 10.26.2015
# Abstract: Attempt to find a solution to determining where the cumulative sum (numerical integration), of a array of
2D matricies sum up to one (find the index)
import numpy as np
#first suppose we have a 3D matrix of values under 1
G = np.random.uniform(0,.05, (365,500,400))
#now develop a cumulative sum for each step
integral_G = np.cumsum(G, axis =0)
#now find out the index of the first axis where the value is equal to one.
index = np.argmax(integral_G>1, axis = 0)
#if any of these equals to 0 then we have a development that didn't complete, and we have a problem
#need more time to finish (i.e. more years to inspect).
#done!
| [
"tony.chang@msu.montana.edu"
] | tony.chang@msu.montana.edu |
b593fcc836a48a7354d9240bfcd92cf6de664747 | 992f080016e7de4e3de9ff875a9778404c6c8fdc | /app.py | 528c58018f342d6301ce0278c345f7c43269f087 | [] | no_license | Garasuno/lineBot2 | 9c50903c26fc27947dfca7b5fd8915779aa2b1a4 | 034cf2ed295710cb3f04483cc36f8d0bc15a462d | refs/heads/master | 2020-06-10T13:37:15.646984 | 2019-09-03T07:09:11 | 2019-09-03T07:09:11 | 193,651,533 | 0 | 0 | null | 2019-06-25T06:51:46 | 2019-06-25T06:51:46 | null | UTF-8 | Python | false | false | 1,592 | py | from flask import Flask, jsonify, request
import os
import json
import requests
app = Flask(__name__)
@app.route('/')
def index():
a=os.environ['Authorization']
return "นางสาวนฤภร สัมมา เลขที่ 10 ชั้น ม.4/9"
@app.route("/webhook", methods=['POST'])
def webhook():
if request.method == 'POST':
return "OK"
@app.route('/callback', methods=['POST'])
def callback():
json_line = request.get_json()
json_line = json.dumps(json_line)
decoded = json.loads(json_line)
#user = decoded["events"][0]['replyToken']
user = decoded['originalDetectIntentRequest']['playload']['data']['replyToken']
#usertext = decoded["events"][0]['message']['text']
userText = decoded['queryResult']['intent']['displayname']
#sendText(user,userText)
if (usertext == 'สวัสดี'):
senttext(user , 'เอ่อดีด้วย')
elif (usertext == 'บายจ้า'):
senttext(user , 'บาย')
else :
senttext(user , 'กาว')
return '',200
def sendText(user, text):
LINE_API = 'https://api.line.me/v2/bot/message/reply'
headers = {
'Content-Type': 'application/json; charset=UTF-8',
'Authorization': os.environ['Authorization'] # ตั้ง Config vars ใน heroku พร้อมค่า Access token
}
data = json.dumps({
"replyToken":user,
"messages":[{"type":"text","text":text}]
})
r = requests.post(LINE_API, headers=headers, data=data) # ส่งข้อมูล
if __name__ == '__main__':
app.run()
| [
"noreply@github.com"
] | Garasuno.noreply@github.com |
64940b59557a57f6050239f90c6e4c8057f3ca09 | 7becb767c5536f450d9aa83821c2a62a0b66175a | /admintools/round_lib/get_round_id.py | 90fc6b42835245bf12461b4b1a8a8f3cf9a678a8 | [] | no_license | urirot/planetWarsRunner | af69837cc8f83902505bbe1f4628aaee7476348a | 76e0458115ebbe581c104d569ad17899dae78fb3 | refs/heads/master | 2021-01-01T17:55:33.012912 | 2018-07-04T19:06:04 | 2018-07-04T19:06:04 | 98,202,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | #! /usr/bin/python
from urllib import urlopen
import json
import sys
from config import *
if len(sys.argv) != 3:
print "Usage: ./get_round_id.py <tournament id> <round number>"
sys.exit(2)
tourn_id = sys.argv[1]
round_number = sys.argv[2]
url = HOST + "tournaments/%s/round_by_name?name=%s" % (tourn_id, round_number)
result = urlopen(url).read()
if not result:
sys.stderr.write("Can't find this round (round number = %s). Are you sure you created it?\n" % round_number)
sys.exit(1)
round_id = str(json.loads(result)["id"])
print round_id
| [
"gazolin@gmail.com"
] | gazolin@gmail.com |
986d5bf4bc52b34aa41124711f6c80c7e1957253 | 4538728d33a8700e5bec08ec0d7b05480e042902 | /utils/send_email.py | 7f3a23ccb391e13913a08307e567cd922bcea390 | [] | no_license | Faouzizi/Alerting-System | 5b841a7374da0012bc0a9a8111d8925139ce89a7 | 98fdf263efb0f3e007bf666fb355ec3cede44076 | refs/heads/main | 2023-01-19T02:00:19.773848 | 2020-11-26T01:11:01 | 2020-11-26T01:11:01 | 314,676,909 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,533 | py | #############################################################################
########### Import python packages
#############################################################################
import smtplib
import config
from utils.get_templates import get_html_template
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
#############################################################################
########### Send the email
#############################################################################
def send_alerting_email(alert_message,type_alert):
# get the email
message_template = get_html_template(type_alert)
#connect to the SMTP server
s = smtplib.SMTP(host='smtp.gmail.com', port=587)
s.starttls()
s.login(config.smtp_email, config.smtp_password)
# Send the email for each email on the recipient list
for email in config.recipient_list:
msg = MIMEMultipart() # create a message
# add in the actual person name to the message template
message = message_template.substitute()
# setup the parameters of the message
msg['From']=config.smtp_email
msg['To']=email
msg['Subject'] = alert_message
# add in the message body
msg.attach(MIMEText(message, 'html'))
# send the message via the server set up earlier.
s.send_message(msg)
del msg
# Terminate the SMTP session and close the connection
s.quit()
return('email sent :)')
| [
"noreply@github.com"
] | Faouzizi.noreply@github.com |
a15574410724ba4ab74e9a614969967bd761fc75 | 242fc8beff7e1e51b5cdd9bfa3d118267f56861d | /shunting_yard/parser.py | dd42d72177d0963e445e9387f19599b0eaaefeaf | [] | no_license | gmkvaal/shunting_yard | dac0dd780cf2c779f410edad54f72618c5379f80 | d84b0c4aa3c36aff435ede5252f143182f656fc0 | refs/heads/master | 2021-07-23T12:14:48.666536 | 2017-11-03T13:08:13 | 2017-11-03T13:08:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,794 | py | from typing import List
from collections import namedtuple
from enum import Enum, auto
from .tokenizer import tokenizer
StateRet = namedtuple('StateRet', ['next_state', 'increment'])
class StateType(Enum):
NUMBER = auto()
def classify_token(token: dict, operator_stack: List[str], output_queue: List[str]) -> StateRet:
"""Classifies tokens
Args:
token: Currently read token.
operator_stack: Stack of operators
output_queue: Tokens in RPN order
Returns:
Tuple of: Next state, if increment
"""
print(token['name'], [operator['name'] for operator in output_queue],
[operator['name'] for operator in operator_stack])
if token['type'] == StateType.NUMBER:
output_queue.append(token)
return StateRet(classify_token, True)
if token['type'] == 'OPERATOR':
return StateRet(operator, False)
if token['type'] == 'FUNCTION':
operator_stack.append(token)
return StateRet(classify_token, True)
if token['type'] == 'LEFT_PARENTHESIS':
operator_stack.append(token)
return StateRet(classify_token, True)
if token['type'] == 'RIGHT_PARENTHESIS':
return StateRet(right_parenthesis, False)
if token['type'] == 'SKIP':
return StateRet(classify_token, True)
def operator(token: dict, operator_stack: List[str], output_queue: List[str]) -> StateRet:
"""Called when a token is classified as an operator
Appends to stack of the operator stack is empty, if the last token
in the stack is a function, or if the token is right associative.
Else, pops operators from the stack
Args:
token: Currently read token.
operator_stack: Stack of operators
output_queue: Tokens in RPN order
Returns:
Tuple of: Next state, if increment
"""
del output_queue # Not used in this state
if len(operator_stack) == 0 or operator_stack[-1]['precedence'] is None:
operator_stack.append(token)
return StateRet(classify_token, True)
elif token['associativity'] == 'RIGHT':
operator_stack.append(token)
return StateRet(classify_token, True)
else:
return StateRet(pop_operators, False)
def pop_operators(token: dict, operator_stack: List[str], output_queue: List[str]) -> StateRet:
"""Pops operators from the stack
Operators are popped from the operator stack to the output queue
until reaching an operator with lower precedence or the stack is empty
Args:
token: Currently read token.
operator_stack: Stack of operators
output_queue: Tokens in RPN order
Returns:
Tuple of: Next state, if increment
"""
if (len(operator_stack) > 0
and operator_stack[-1]['precedence'] is not None
and operator_stack[-1]['precedence'] >= token['precedence']
and operator_stack[-1]['associativity'] == 'LEFT'):
output_queue.append(operator_stack.pop())
return StateRet(pop_operators, False)
else:
operator_stack.append(token)
return StateRet(classify_token, True)
def right_parenthesis(token: dict, operator_stack: List[str], output_queue: List[str]) -> StateRet:
"""Called when a token is classified as a right parenthesis
Operators are popped from the operator stack to the output queue
until reaching a left parenthesis
Args:
token: Currently read token.
operator_stack: Stack of operators
output_queue: Tokens in RPN order
Returns:
Tuple of: Next state, if increment
"""
del token # Not used in this state
if operator_stack == []:
raise Exception('Mismatching parentheses')
elif operator_stack[-1]['type'] != 'LEFT_PARENTHESIS':
output_queue.append(operator_stack.pop())
return StateRet(right_parenthesis, False)
else:
operator_stack.pop()
return StateRet(post_right_parenthesis, False)
def post_right_parenthesis(token: dict, operator_stack: List[str], output_queue: List[str]) -> StateRet:
"""Called after brackets are matched
If a function is atop of the stack it is poped to the output queue
Args:
token: Currently read token.
operator_stack: Stack of operators
output_queue: Tokens in RPN order
Returns:
Tuple of: Next state, if increment
"""
if len(operator_stack) > 0 and operator_stack[-1]['type'] == 'FUNCTION':
output_queue.append(operator_stack.pop())
return StateRet(classify_token, True)
def empty_operator_stack(operator_stack: List[str], output_queue: List[str]) -> None:
""" Pops remaining operators from the operator stack to the output queue
Args:
token: Currently read token.
operator_stack: Stack of operators
output_queue: Tokens in RPN order
"""
while len(operator_stack) > 0:
output_queue.append(operator_stack.pop())
def shunting_yard(input_string: str) -> List[str]:
""" Engine of shunting yard parser finite state machine algorithm
Args:
input_string: A mathematical expression
Returns:
A list of tokens ordered in Reverse Polish Notation
"""
operator_stack = []
output_queue = []
token_list = tokenizer(input_string)
state = classify_token
idx = 0
while True:
token = token_list[idx]
#print(token['name'], state.__name__, operator_stack)
return_state = state(token, operator_stack, output_queue)
if return_state.increment:
idx += 1
state = return_state.next_state
if idx == len(token_list):
empty_operator_stack(operator_stack, output_queue)
break
return output_queue
| [
"guttorm.kvaal@gmail.com"
] | guttorm.kvaal@gmail.com |
455eb5dcfc9b2227454cfd2a1ff46a485409a8ad | 3b7d8027e34f2338517d9c34f82bb02eb88de201 | /Vote.py | b8027b299ae5b4a3e556f3b8b20d8304c2ad44ca | [] | no_license | amishofficial/decentralizeVoting | 44dd2dd0fb4f4779a59503ff87e8cbfeea9d0028 | e957e317958173b4ba006518c8b87dfbb765593a | refs/heads/main | 2023-03-08T13:03:45.835971 | 2021-02-24T11:27:40 | 2021-02-24T11:27:40 | 341,876,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | from collections import OrderedDict
from utility.printable import Printable
class Vote(Printable):
"""A transaction which can be added to a vote_block in the votechain.
Attributes:
:voter: The voter of the coins.
:vote_to: The vote_to of the coins.
:signature: The signature of the transaction.
:amount:
"""
def __init__(self, voter, vote_to, signature):
self.voter = voter
self.vote_to = vote_to
self.signature = signature
def to_ordered_dict(self):
"""Converts this transaction into a (hashable) OrderedDict."""
return OrderedDict([('voter', self.voter), ('vote_to', self.vote_to)])
| [
"noreply@github.com"
] | amishofficial.noreply@github.com |
9ae067e5cd4eccc2e3a324cc2e07669caccf8637 | 6630694f401f6f475dd81bb01ff9368db844ccff | /configs/_base_/models/hrnet/hrnet-w48.py | f0604958481ba2af277e3a0f9515dc1423def6c6 | [
"Apache-2.0"
] | permissive | open-mmlab/mmpretrain | 98a4d6b3bb747efc3d50decebf84fc3ffa41076a | d2ccc44a2c8e5d49bb26187aff42f2abc90aee28 | refs/heads/main | 2023-08-30T19:11:24.771498 | 2023-08-23T02:45:18 | 2023-08-23T02:45:18 | 278,415,292 | 652 | 186 | Apache-2.0 | 2023-09-08T08:01:40 | 2020-07-09T16:25:04 | Python | UTF-8 | Python | false | false | 418 | py | # model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HRNet', arch='w48'),
neck=[
dict(type='HRFuseScales', in_channels=(48, 96, 192, 384)),
dict(type='GlobalAveragePooling'),
],
head=dict(
type='LinearClsHead',
in_channels=2048,
num_classes=1000,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
| [
"noreply@github.com"
] | open-mmlab.noreply@github.com |
170f4291b543e014fadf954a0e8b37173c22f52f | 965e1e205bf053d93b32be0dab4d45455b42b3a2 | /NLP/PartsSpeech.py | 29aa35ab37a1d1ca416e2d528400a686da8f4ba4 | [] | no_license | panditdandgule/DataScience | 9e58867dd960ec554e0bbb8e4ce93baa226ab927 | 3eb59c129d81a6ba6b45e24113e25e63d19c60cb | refs/heads/master | 2021-07-22T21:44:12.700518 | 2020-05-14T12:01:05 | 2020-05-14T12:01:05 | 166,497,260 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 15 19:59:50 2018
@author: pandit
"""
import nltk
from nltk.corpus import state_union
from nltk.tokenize import PunktSentenceTokenizer
train_text=state_union.raw("2005-GWBush.txt")
sample_text=state_union.raw("2005-GWBush.txt")
custom_sent_tokenizer=PunktSentenceTokenizer(train_text)
tokenized =custom_sent_tokenizer.tokenize(sample_text)
def process_content():
try:
for i in tokenized:
words=nltk.word_tokenize(i)
tagged=nltk.pos_tag(words)
print(tagged)
except Exception as e:
print(str(e))
process_content()
| [
"panditdandgule777@gmail.com"
] | panditdandgule777@gmail.com |
5d9a526f41762caa210b4add3fe922444501227d | 1546f47a68577c55371d61d9805d890fddf843d5 | /Arthur/events/filters.py | 4c0aeb65c0fcf2f8dd463090bb5dc37135f85a58 | [] | no_license | tadgh/Arthur | b24299fc4edc5eba424e3007389e2a469a38e1a0 | 6ff839904973b8c3ad3eecb677cb9f3e3bbc1934 | refs/heads/master | 2022-12-14T13:34:44.610442 | 2020-04-28T04:37:07 | 2020-04-28T04:37:07 | 189,875,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py |
from django_filters import rest_framework as filters
class LeaderboardFilter(filters.FilterSet):
posted = filters.DateFromToRangeFilter(field_name='date') | [
"garygrantgraham@gmail.com"
] | garygrantgraham@gmail.com |
f70d4e2e4894ba7b8637af7ba93f753c0b5faa18 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /115_testing/examples/Github/_Level_1/python_unittests-master/sample_functions.py | 8d63bc99d2a30ac0321b97976440c0d8474e1244 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 204 | py |
def sum(a, b):
return a + b
def contains_numbers(alpha_num_str):
for char in alpha_num_str:
if char.isdigit():
return False
return False
def div(a, b):
return a / b | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
ee1b43a7f7c4f3012ce2cae10f1509013f318252 | 4564b5786932bd5a94f442a2eae170564e12640b | /python/NaverMovieCrawling.py | 9f08c4b89a304193341b12545e911ac0ae60723a | [] | no_license | slomeer/sparta | 60feb9d5e9ecebedca1d08735f4c99912deb39c6 | bd96e3700c10bebd0c3e742fb35f4151f88a7e89 | refs/heads/master | 2021-03-15T04:44:54.479873 | 2020-06-15T06:44:46 | 2020-06-15T06:44:46 | 246,825,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | import requests
from bs4 import BeautifulSoup
# 타겟 URL을 읽어서 HTML를 받아오고,
headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
data = requests.get('https://movie.naver.com/movie/sdb/rank/rmovie.nhn?sel=pnt&date=20200303',headers=headers)
# HTML을 BeautifulSoup이라는 라이브러리를 활용해 검색하기 용이한 상태로 만듦
# soup이라는 변수에 "파싱 용이해진 html"이 담긴 상태가 됨
# 이제 코딩을 통해 필요한 부분을 추출하면 된다.
soup = BeautifulSoup(data.text, 'html.parser') # data.txt는 웹사이트에서 우리가 받는 html
# 분석된 html 파일이 soup에 들어가 있는 상태
movies = soup.select('#old_content > table > tbody > tr')
for i, movie in enumerate(movies):
# movie 안에 a 가 있으면,
a_tag = movie.select_one('td.title > div > a')
rate = movie.select_one('td.point')
if a_tag is not None:
# a의 text를 찍어본다.
print(i, end=' ')
print(a_tag.text, end=' ')
print(rate.text)
| [
"61731412+slomeer@users.noreply.github.com"
] | 61731412+slomeer@users.noreply.github.com |
d4a278c814384d490f690a077bab77a109b60b57 | 0ad79e7104500b5988f07e9f19f17a540f07605a | /Python算法指南/动态规划/67_最小和子数组_灵活运用动态规划.py | 3273d8c3606054f4d873463057975d507015c93a | [] | no_license | tonyyo/PycharmProjects | f0ce458ed662e33e75ddffbfcf28b0d1ed638743 | a28620923336c352103858e0ccfc4117d1c4ea01 | refs/heads/master | 2022-09-19T02:02:15.919201 | 2020-06-03T12:57:38 | 2020-06-03T12:57:38 | 263,204,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | class Solution:
def minSubArray(self, nums):
MIN = 65536
SUM = nums[0]
for i in range(1, len(nums)):
SUM = SUM + nums[i] if SUM < 0 else nums[i] # SUM > 0有害于最小和
MIN = min(MIN, SUM)
return MIN
if __name__ == '__main__':
temp = Solution()
List1 = [1, -1, -2, 1]
List2 = [3, -2, 2, 1]
print("输入:" + str(List1))
print(("输出:" + str(temp.minSubArray(List1))))
print("输入:" + str(List2))
print(("输出:" + str(temp.minSubArray(List2))))
| [
"1325338208@qq.com"
] | 1325338208@qq.com |
6c34ab7d080c5769a3dcf15d2a5cfd4dce22ea9d | 394742b366c0eed8997e8c4058daa1e122fffdf3 | /Lesson23/manage.py | 3081204679636e0df707aa01ca17a805669e6dc5 | [
"MIT"
] | permissive | IslamRaslambekov/HomeWork | e293468d73998f7f5e5e8f50a318546a01a6d593 | a1454c3539edf7475de686383cee8db9f1bdf448 | refs/heads/master | 2022-05-07T02:46:48.916601 | 2022-04-29T01:38:42 | 2022-04-29T01:38:42 | 243,369,694 | 0 | 1 | MIT | 2022-04-14T10:25:28 | 2020-02-26T21:29:16 | CSS | UTF-8 | Python | false | false | 664 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Lesson24.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"rodoslav12345@gmail.com"
] | rodoslav12345@gmail.com |
13989c3455665529fd642327e8ef34cd1679e3aa | 743057b36fda035eb8e6378f0c192ca4e2abf76e | /toMap.py | 4e527f566d14f32355e1c1308db25b20b02582a2 | [] | no_license | haegray/Python-and-Java-Files | f53ffa0a6872d7f29328353bbf7e63d98b796250 | 0b6fcc0158711b184ee1bf4b59f6dc835361e03a | refs/heads/master | 2021-01-13T14:39:06.230924 | 2016-12-23T17:10:45 | 2016-12-23T17:10:45 | 76,681,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | #toNumbers.py
def toMap(strList):
for i in strList:
return (map(float,i))
toMap()
| [
"anaisgray.ag@gmail.com"
] | anaisgray.ag@gmail.com |
716139a42c9d3176dc33f024f607d86df864e7be | b5cd2a7828acdaebd49964ac93be7085bea0d011 | /carte.py | c4e4e9aa68d71d16ec5026c7e3144cfd0e940827 | [] | no_license | stevchen99/pythonRoboc | e5576930cd9295c08336ad3754c7a4e326e5a8a5 | 5301be8e918ce6b3c36b491e02c8aaddd520eb4e | refs/heads/master | 2020-12-08T16:44:46.439760 | 2020-01-17T18:03:39 | 2020-01-17T18:03:39 | 233,036,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | # -*-coding:Utf-8 -*
"""Ce module contient la classe Carte."""
class Carte:
"""Objet de transition entre un fichier et un labyrinthe."""
def __init__(self, nom, chaine):
self.nom = nom
self.labyrinthe = creer_labyrinthe_depuis_chaine(chaine)
def __repr__(self):
return "<Carte {}>".format(self.nom)
| [
"stevchen99@gmail.com"
] | stevchen99@gmail.com |
3f2d06c3d1274caa259fdb14604ed63acc54c4a3 | e950cafc9feeeacba9d40b18477dab43bb8737bf | /src/lab1/tfs_client.py | a184cf7077222781caa9649835a758e543e1b1d3 | [
"MIT-0"
] | permissive | aws-samples/aws-inf1-gcr-workshop | fe21b4637f09a2c51a977aaea999a20c31b43277 | a5712c17a66809fc60cd57a056a00df3b9b2fc8e | refs/heads/master | 2022-04-23T20:34:53.914422 | 2020-04-28T02:36:42 | 2020-04-28T02:36:42 | 254,085,220 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,791 | py | '''
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
'''
import numpy as np
import grpc
import tensorflow as tf
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.applications.resnet50 import decode_predictions
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
tf.keras.backend.set_image_data_format('channels_last')
if __name__ == '__main__':
channel = grpc.insecure_channel('localhost:8500')
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
img_file = tf.keras.utils.get_file(
"./kitten_small.jpg",
"https://raw.githubusercontent.com/awslabs/mxnet-model-server/master/docs/images/kitten_small.jpg")
img = image.load_img(img_file, target_size=(224, 224))
img_array = preprocess_input(image.img_to_array(img)[None, ...])
request = predict_pb2.PredictRequest()
request.model_spec.name = 'resnet50_inf1_serve'
request.inputs['input'].CopyFrom(
tf.contrib.util.make_tensor_proto(img_array, shape=img_array.shape))
result = stub.Predict(request)
prediction = tf.make_ndarray(result.outputs['output'])
print(decode_predictions(prediction))
| [
"zhazhn@amazon.com"
] | zhazhn@amazon.com |
5e85990864fdcde2e416a6a7d1e2c645b29cd5de | 93b866284ca1ac29c5005555f2cb30454a0fb5cf | /Problems/59-Problem/Problem 59.py | 7f38e437f006d4d25166047a46688def172fbf69 | [] | no_license | FrancoisdeFouchecour/Projet-Euler | c2b17d1e35fbd10a708ba3221825a62a17818382 | 0cf70457c0418264c2eff7cdd0e92a07b61ecb07 | refs/heads/master | 2021-12-25T05:44:08.054648 | 2021-11-27T21:47:42 | 2021-11-27T21:47:42 | 168,253,571 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,087 | py | import time
import numpy as np
problem_number = 59
def decimal_to_binar(n):
if n == 0:
return 0
return n%2 + 10*decimal_to_binar(n//2)
def binar_to_decimal(n):
if n == 1 or n == 0:
return n
else:
return int(str(n)[-1]) + 2* binar_to_decimal(int(str(n)[:-1]))
def XOR(A, B):
a = str(decimal_to_binar(A))
b = str(decimal_to_binar(B))
while len(a) < len(b):
a = '0'+a
while len(a) > len(b):
b = '0'+b
c = ''
for i in range(len(a)):
if a[i] == b[i]:
c += '0'
else:
c += '1'
return binar_to_decimal(c)
def decipher(text, key):
plain_text = ""
for i in range(len(text)):
plain_text += chr(XOR(text[i], key[i%3]))
return plain_text
#read data
number_file = open("data.txt", "r")
raw_data = number_file.read()
number_file.close()
list_name = []
word = ""
for char in raw_data:
if char == ',':
list_name.append(int(word))
word = ""
elif char == '\n':
list_name.append(int(word))
elif char != '"':
word += char
#Solution
def solution(input_list):
result = 0
length = len(input_list)
normal_frequency = [11.682, 4.434, 5.238, 3.174, 2.799, 4.027, 1.642, 4.200, 7.294, 0.511, 0.456, 2.415, 3.826, 2.284, 7.631, 4.319, 0.222, 2.826, 6.686, 15.978, 1.183, 0.824, 5.497, 0.045, 0.763, 0.045]
score = np.infty
for a in range(97, 123):
for b in range(97, 123):
for c in range(97, 123):
key = [a, b, c]
new_text = [0 for i in range(length)]
for i in range(len(new_text)):
new_text[i] = XOR(input_list[i], key[i%3])
letter_frequency = [[0 for j in range(26)] for i in range(3)]
for i in range(len(new_text)):
if 65 <= new_text[i] and new_text[i] <= 90:
letter_frequency[i%3][new_text[i] - 65] += 1
elif 97 <= new_text[i] and new_text[i] <= 122:
letter_frequency[i%3][new_text[i] - 97] += 1
new_score = 0
for i in range(3):
for j in range(26):
letter_frequency[i][j] = letter_frequency[i][j]/(length//3)
new_score += abs(letter_frequency[i][j] - normal_frequency[j])
if new_score < score:
score = new_score
result = sum(new_text)
return result
#Test & Result
fichier = open("Solution "+str(problem_number)+".txt", "w")
string = ""
begin_problem = time.time()
problem_value = solution(list_name)
end_problem = time.time()
problem_time = end_problem - begin_problem
string += "RESULT PROBLEM #"+str(problem_number)+"\n\n"
string += "Output: "+str(problem_value)+"\n"
string += "Computation time: "+str(problem_time)+" sec\n"
string += "\n\n\nCurrent date & time: " + time.strftime("%c")
fichier.write(string)
fichier.close()
| [
"francois.de-salivet-de-fouchecour@polytechnique.edu"
] | francois.de-salivet-de-fouchecour@polytechnique.edu |
962ad189b3695ad55e5db43027b6e869b2817147 | fb408595c1edee0be293302c6d7bfc0c77d37c46 | /python/DP/DP_2096.py | a5753e0e8dda2057310f4dee0f056e7940fbb74d | [] | no_license | as950118/Algorithm | 39ad25519fd0e42b90ddf3797a61239862ad79b5 | 739a7d4b569057cdb6b6faa74254512b83d02bb1 | refs/heads/master | 2023-07-21T12:38:00.653579 | 2023-07-19T06:57:17 | 2023-07-19T06:57:17 | 125,176,176 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | n = int(input())
arr = [0]*(3)
dp_max = [0]*(3)
dp_min = [0]*(3)
arr = list(map(int, input().split()))
temp = arr[:]
dp_max = temp[:]
dp_min = temp[:]
for i in range(1, n):
arr = list(map(int, input().split()))
temp[0] = max(dp_max[0], dp_max[1]) + arr[0]
temp[1] = max(dp_max[0], dp_max[1], dp_max[2]) + arr[1]
temp[2] = max(dp_max[1], dp_max[2]) + arr[2]
dp_max = temp[:]
temp[0] = min(dp_min[0], dp_min[1]) + arr[0]
temp[1] = min(dp_min[0], dp_min[1], dp_min[2]) + arr[1]
temp[2] = min(dp_min[1], dp_min[2]) + arr[2]
dp_min = temp[:]
print(max(dp_max), min(dp_min))
| [
"na_qa@icloud.com"
] | na_qa@icloud.com |
f443e27275903b151314c40311f6464aafca1b44 | 72784799e5436e8a96462bdbcb29baeb644dcc7f | /utilities/animate.py | 2c562e41c8ec2e736db293f0f772a55ff0091345 | [] | no_license | simonsben/undergrad_thesis | 31dd205cb734f7c876b5053040e5ab0bf8fbd5cb | 8458d00ae6525602b944279c2c280149a5957cb1 | refs/heads/master | 2020-04-02T10:46:55.255322 | 2019-04-08T06:01:48 | 2019-04-08T06:01:48 | 154,354,775 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | from matplotlib.pylab import figure, show, savefig, title, axis, draw
from networkx import spring_layout, draw_networkx_edges, draw_networkx_nodes
from matplotlib.animation import FuncAnimation
def add_node(graph, i, plot_layout):
# draw_networkx_edges(graph, plot_layout, alpha=.3)
# draw_networkx_nodes(node, plot_layout, node_size=100, edgecolors='k', node_color='w')
i += 1
draw()
def animate_creation(network, blocking=True, save_plot=True):
_title = 'Free-Scale Network'
fig = figure(_title)
axis('off')
graph = network.network_plot
plot_layout = spring_layout(graph)
init_nodes = graph.nodes[:3]
init_edges = graph.edges[:2]
draw_networkx_nodes(graph, plot_layout, nodelist=init_nodes, node_size=100, edgecolors='k', node_color='w')
draw_networkx_edges(graph, plot_layout, edgelist=init_edges, alpha=.3)
draw()
show()
i = 3
animation = FuncAnimation(fig, add_node, fargs=(graph, i, plot_layout))
| [
"simons.ben0@gmail.com"
] | simons.ben0@gmail.com |
52a608c85aa5b18e530c6cb0cae1d8d2f58b7ec4 | 14d8418ca5990217be67aee89fdaa310db03fbba | /test_stats_d_graph_display.py | fffe014750a15f323e8f39408530e03c6133cae4 | [
"Apache-2.0"
] | permissive | sachanta/lm-sdk-python | 3a16457bd2d5b880a0d238a88a9d1d5b8d9675f0 | e476d415c7279457f79b5d032a73d950af2fe96b | refs/heads/master | 2023-08-03T08:39:42.842790 | 2021-09-13T07:20:56 | 2021-09-13T07:20:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | # coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. Note: For Python SDKs, the REQUEST parameters can contain camelCase or an underscore. However, the RESPONSE parameters will always contain an underscore. For example, the REQUEST parameter can be testLocation or test_location. The RESPONSE parameter will be test_location. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import logicmonitor_sdk
from logicmonitor_sdk.models.stats_d_graph_display import StatsDGraphDisplay # noqa: E501
from logicmonitor_sdk.rest import ApiException
class TestStatsDGraphDisplay(unittest.TestCase):
"""StatsDGraphDisplay unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStatsDGraphDisplay(self):
"""Test StatsDGraphDisplay"""
# FIXME: construct object with mandatory attributes with example values
# model = logicmonitor_sdk.models.stats_d_graph_display.StatsDGraphDisplay() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"bamboo@build01.us-west-1.logicmonitor.net"
] | bamboo@build01.us-west-1.logicmonitor.net |
1bcb03882cd8aba3be9ee674c15f8f2ca62224bf | 42170b78e5277d96a33b8d796e3075a378723aa8 | /dataxHWSp2021/Practice Homework/Numpy_Pandas/student/tests/q1d.py | dca0b114bbcac2ff4f73cc5d33860855e83c8837 | [
"Apache-2.0"
] | permissive | 6shun/datax | e3d4d32295c26a6e62c6cd1ae2cabdd9b2f1addf | f912d22c838b511d3ada4ecfa3548afd80437b74 | refs/heads/main | 2023-03-03T09:51:35.255111 | 2022-01-21T16:13:34 | 2022-01-21T16:13:34 | 338,253,155 | 1 | 0 | null | 2021-02-12T07:42:14 | 2021-02-12T07:42:14 | null | UTF-8 | Python | false | false | 392 | py | test = { 'name': 'q1d',
'points': 1,
'suites': [ { 'cases': [ { 'code': '>>> isinstance(d, float)\nTrue',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| [
"52470060+Mahan-Tajrobehkar@users.noreply.github.com"
] | 52470060+Mahan-Tajrobehkar@users.noreply.github.com |
d88adc7061a3c48c6d2afe7420444e5c70762261 | 5297bdd3ccf64be915c05bfc599cb66d52fa6b17 | /memword/api/lessons.py | d2fb313eb68375d2f5e5ce06628007a4cb09c546 | [] | no_license | scdekov/memword | ee04ef4d4ca55084bf678d354ff77b0cb42403ba | bd011358c252ac6e3930dcae15df76c7103c61e0 | refs/heads/master | 2022-12-11T09:13:27.507111 | 2019-11-13T21:37:10 | 2019-11-13T21:37:10 | 146,096,522 | 1 | 1 | null | 2022-12-08T02:50:40 | 2018-08-25T13:14:04 | Python | UTF-8 | Python | false | false | 5,519 | py | from django.contrib.auth import get_user_model
from django.shortcuts import get_object_or_404
from django.utils import timezone
from rest_framework import serializers, viewsets, decorators, status
from rest_framework.response import Response
from memword.api.serializers import TargetSerializer
from memword.models.lesson import Lesson, Question
from memword.logic.target_picker import TargetPicker
from memword.logic.learning_intervals_manager import LearningIntervalsManager
User = get_user_model()
class SubmitQuestionSerializer(serializers.Serializer):
confidence_level = serializers.IntegerField()
question_id = serializers.IntegerField()
answer = serializers.CharField(allow_blank=True, required=False)
def validate(self, data):
if self.context['lesson'].lesson_type == 'exam' and not data.get('answer'):
raise serializers.ValidationError('answer is required when submitting exam question')
return data
def validate_confidence_level(self, confidence_level):
if confidence_level not in range(1, 11):
raise serializers.ValidationError('confidence_level should be between 1 and 10')
return confidence_level
class QuestionSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = ('id', 'target', 'passed', 'correct', 'confidence_level')
target = TargetSerializer()
class LessonSerializer(serializers.ModelSerializer):
class Meta:
model = Lesson
fields = ('id', 'student_id', 'questions', 'lesson_type', 'start_time',
'end_time', 'expected_duration', 'title', 'target_ids', 'planned_start_time')
questions = QuestionSerializer(many=True, read_only=True)
lesson_type = serializers.ChoiceField(allow_blank=True, default=Lesson.TYPE_LECTURE, choices=Lesson.TYPES)
target_ids = serializers.ListField(child=serializers.IntegerField(), write_only=True)
planned_start_time = serializers.DateTimeField(default=timezone.now)
expected_duration = serializers.DurationField(default='60')
def save(self):
# target_ids may need to be validated if they belongs to the current user
target_ids = self.validated_data.pop('target_ids', [])
student_id = self.context['request'].user.id
lesson = super().save(student_id=student_id)
Question.objects.bulk_create([Question(lesson=lesson, target_id=target_id) for target_id in target_ids])
return lesson
class TopTargetsQuerySerializer(serializers.Serializer):
targets_count = serializers.IntegerField(required=False, default=10)
class LessonsViewSet(viewsets.ModelViewSet):
queryset = Lesson.objects.all().order_by('-id')
serializer_class = LessonSerializer
def filter_queryset(self, queryset):
queryset = super().filter_queryset(queryset)
return queryset.filter(student=self.request.user)
@decorators.action(detail=True, methods=['POST'], url_path='@submit-answer')
def submit_answer(self, request, pk):
lesson = self.get_object()
serializer = SubmitQuestionSerializer(data=request.data,
context={'request': request, 'lesson': lesson})
serializer.is_valid(raise_exception=True)
question = get_object_or_404(Question, lesson_id=pk, id=serializer.validated_data['question_id'])
question.confidence_level = serializer.validated_data['confidence_level']
question.passed = True
question.pass_time = timezone.now()
if lesson.lesson_type == 'exam':
question.correct = serializer.validated_data['answer'] == question.target.description
question.save()
if question.lesson.should_finish():
question.lesson.finalize()
LearningIntervalsManager.handle_submitted_question(question)
return Response({'question': QuestionSerializer(question).data})
@decorators.action(detail=True, methods=['POST'], url_path='@start')
def start(self, request, **kwargs):
lesson = self.get_object()
lesson.start_time = timezone.now()
lesson.save()
return Response({'lesson': LessonSerializer(lesson).data})
@decorators.action(detail=True, methods=['POST'], url_path='@duplicate')
def duplicate(self, request, **kwargs):
original_lesson = self.get_object()
# this is suposed to be in atomic transactions
new_lesson = Lesson.objects.create(student_id=request.user.id,
lesson_type=original_lesson.lesson_type,
expected_duration=original_lesson.expected_duration,
planned_start_time=timezone.now())
# start time should be calculated somehow
Question.objects.bulk_create([Question(target_id=question.target_id, lesson_id=new_lesson.id)\
for question in original_lesson.questions.all()])
return Response({'lesson': LessonSerializer(new_lesson).data}, status=status.HTTP_201_CREATED)
@decorators.action(detail=False, url_path='@get-top-targets')
def get_top_targets(self, request):
serializer = TopTargetsQuerySerializer(data=request.GET)
serializer.is_valid(raise_exception=True)
top_targets = TargetPicker.pick_top(request.user, serializer.validated_data['targets_count'])
return Response({'targets': TargetSerializer(top_targets, many=True).data})
| [
"svetlio1994@gmail.com"
] | svetlio1994@gmail.com |
d5816bf17fecd71e306ba321ee4fd6bda1913e63 | 2c4f00e65671467ed14b33f4f6b574bd9944eaea | /test.py | eceeb7bd27460d1c67779648392af96cb4ff1456 | [
"MIT"
] | permissive | ashafer01/chain | 20a459a7a4f0bc43668cfe71f58d568689c61f23 | e7ea9b3fb2b92459c581dfc0ebc424300cd333c0 | refs/heads/master | 2020-07-06T14:18:42.242072 | 2019-08-18T20:37:19 | 2019-08-18T20:37:19 | 203,046,524 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | import unittest
from chain import chain, args
class TestChain(unittest.TestCase):
def test_chain(self):
def _test_x(x):
return x + 'x'
def _test_y(x):
return x + 'y'
def _test_z(x):
return x + 'z'
def _test_2(a,b):
return a+b
def _test_3(a,b,c):
return a+b+c
chain_res = chain(args('w') | _test_x | _test_y | args('2'), _test_2, _test_z, args('3', '4'), _test_3)
native_res = _test_3(_test_z(_test_2(_test_y(_test_x('w')), '2')), '3', '4')
self.assertEqual(chain_res, native_res)
def test_return_args(self):
def _test_ret_args(x):
res = args('hello', world=x)
return res
def _test_accept_args(hello, world=''):
return hello + ' ' + world
def _test_1(x):
return '1 ' + x
def _test_2(x):
return x + ' 2'
res = chain(args('test') | _test_1 | _test_2 | _test_ret_args | _test_accept_args)
expected = 'hello 1 test 2'
self.assertEqual(res, expected)
| [
"ashafer@pm.me"
] | ashafer@pm.me |
ed3cea97ae571dfe1f0a45dba14fc43b93212a84 | fb21a8f1fc02f5cee6f0a759e336561726d3b184 | /experiments/lstm-notcut/run.py | 74e6c8e8a8f1be2abab441657d41651360c17bd5 | [] | no_license | yamaguchi-milkcocholate/GoogleBrain-VentilatorPressurePrediction | 76632353ff25a0c9ad8db51ef1f4b728954537b5 | 1996bb81f5b6880a20b8e39c681fecef0bc8a201 | refs/heads/main | 2023-09-05T17:25:46.980274 | 2021-11-24T04:40:50 | 2021-11-24T04:40:50 | 410,795,933 | 0 | 0 | null | 2021-11-04T01:28:27 | 2021-09-27T08:06:55 | Python | UTF-8 | Python | false | false | 6,082 | py | from random import seed
import numpy as np
import pandas as pd
import json
import os
import sys
import gc
import shutil
from pprint import pprint
from pathlib import Path
from typing import *
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from sklearn.metrics import mean_absolute_error as mae
from sklearn.preprocessing import RobustScaler, PowerTransformer, MinMaxScaler
from sklearn.model_selection import KFold
import sys
print(str(Path(__file__).resolve().parent.parent.parent))
sys.path.append(str(Path(__file__).resolve().parent.parent.parent))
from src.utils import (
seed_every_thing,
fetch_data,
Config,
plot_metric,
reduce_tf_gpu_memory,
reduce_mem_usage,
fetch_custom_data,
CustomL1Loss
)
def build_model(config: Config, n_features) -> keras.models.Sequential:
model = keras.models.Sequential([keras.layers.Input(shape=(config.cut, n_features))])
for n_unit in config.n_units:
model.add(
keras.layers.Bidirectional(
keras.layers.LSTM(
n_unit,
return_sequences=True,
)
)
)
for n_unit in config.n_dense_units:
model.add(keras.layers.Dense(n_unit, activation="selu"))
model.add(keras.layers.Dense(1))
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=config.lr),
loss='mae')
return model
def main(config: Dict[str, Any]):
config = Config().update(config)
seed_every_thing(seed=config.seed)
reduce_tf_gpu_memory(gpu_id=config.gpu_id)
basedir = Path(__file__).resolve().parent
datadir = basedir / ".." / ".." / "data"
logdir = basedir / ".." / ".." / "logs" / config.dirname
cachedir = basedir / ".." / ".." / "cache"
os.makedirs(logdir, exist_ok=True)
config.to_json(logdir / "config.json")
_, test_df, submission_df = fetch_custom_data(datadir=datadir, n_splits=config.n_splits)
test_df["count"] = (np.arange(test_df.shape[0]) % 80).astype(int)
test_preds_idx = test_df["count"] < config.cut
test_df = test_df[test_preds_idx].reset_index(drop=True)
test_df["pressure"] = 0
train_df = reduce_mem_usage(pd.read_csv(cachedir / f"train-10fold-debug{config.debug}.csv"))
test_df = reduce_mem_usage(pd.read_csv(cachedir / f"test-10fold-debug{config.debug}.csv"))
kfolds = train_df.iloc[0::config.cut]['kfold'].values
features = list(train_df.drop(["kfold", "pressure"], axis=1).columns)
pprint(features)
print(len(features))
cont_features = [f for f in features if ("RC_" not in f) and ("R_" not in f) and ("C_" not in f) and ("u_out" not in f)]
pprint(cont_features)
RS = RobustScaler()
train_df[cont_features] = RS.fit_transform(train_df[cont_features])
test_df[cont_features] = RS.transform(test_df[cont_features])
train_data, test_data = train_df[features].values, test_df[features].values
train_data = train_data.reshape(-1, config.cut, train_data.shape[-1])
targets = train_df[["pressure"]].to_numpy().reshape(-1, config.cut)
test_data = test_data.reshape(-1, config.cut, test_data.shape[-1])
with tf.device(f"/GPU:{config.gpu_id}"):
valid_preds = np.empty_like(targets)
test_preds = []
for fold in range(config.n_splits):
train_idx, test_idx = (kfolds != fold), (kfolds == fold)
print("-" * 15, ">", f"Fold {fold+1}", "<", "-" * 15)
savedir = logdir / f"fold{fold}"
os.makedirs(savedir, exist_ok=True)
X_train, X_valid = train_data[train_idx], train_data[test_idx]
y_train, y_valid = targets[train_idx], targets[test_idx]
model = build_model(config=config, n_features=len(features))
# es = EarlyStopping(
# monitor="val_loss",
# patience=config.es_patience,
# verbose=1,
# mode="min",
# restore_best_weights=True,
# )
customL1 = CustomL1Loss(
X_valid=X_valid,
y_valid=y_valid,
u_outs=X_valid[:, :, features.index("u_out")],
filepath=savedir / "weights_custom_best.h5"
)
check_point = ModelCheckpoint(
filepath=savedir / "weights_best.h5",
monitor="val_loss",
verbose=1,
save_best_only=True,
mode="min",
save_weights_only=True,
)
schedular = ReduceLROnPlateau(
mode="min", **config.schedular
)
history = model.fit(
X_train,
y_train,
validation_data=(X_valid, y_valid),
epochs=config.epochs,
batch_size=config.batch_size,
callbacks=[check_point, schedular, customL1]
)
model.save_weights(savedir / "weights_final.h5")
model.load_weights(savedir / "weights_custom_best.h5")
pd.DataFrame(history.history).to_csv(savedir / "log.csv")
plot_metric(filepath=savedir / "log.csv", metric="loss")
valid_preds[test_idx, :] = model.predict(X_valid).squeeze()
test_preds.append(model.predict(test_data).squeeze().reshape(-1, 1).squeeze())
del model, X_train, X_valid, y_train, y_valid
keras.backend.clear_session()
gc.collect()
pd.DataFrame(valid_preds).to_csv(logdir / "valid_preds.csv")
if not config.debug:
submission_df.loc[test_preds_idx, "pressure"] = np.median(test_preds, axis=0)
submission_df.to_csv(logdir / "submission.csv", index=False)
shutil.copyfile(Path(__file__), logdir / "script.py")
if __name__ == "__main__":
cnf_file = sys.argv[1]
cfg_file_path = Path(__file__).resolve().parent / cnf_file
with open(cfg_file_path, "rb") as f:
config = json.load(f)
main(config=config)
| [
"zuuuubo.tetsu@outlook.jp"
] | zuuuubo.tetsu@outlook.jp |
ed0a4b5efada1939d7898d3beec9436407d38b31 | 7c208711405aa6bd99106b94697028734e3fb1f9 | /app/campaign_rec.py | f3bbb988956ae76c790a81ac4f90a42ced4f46be | [] | no_license | tb16/fundraising-campaign | 8e280dcec7820c6b65ac7ce0b7a4edc68cde735d | a091d67fb3e314d081b6c8d3ec3a447bf134d498 | refs/heads/master | 2021-01-11T18:17:27.329103 | 2016-10-18T19:32:35 | 2016-10-18T19:32:35 | 69,335,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,454 | py | import pandas as pd
import numpy as np
import requests
import bs4
from string import punctuation
import cPickle as pickle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from tokenizer import tokenize
'''
campaign recommendation using cosine similarity of vectorised stories.
'''
df = pd.read_csv('../data/featured_data1.csv')
def bag_of_words(df):
'''
Applies Tfidf vectorizer to descriptions in the
dataframe.
Returns the vectorizer instance and sparse matrix.
'''
vectorizer = TfidfVectorizer(max_features = 4000, decode_error='ignore', max_df = 0.90, min_df= 2, stop_words = 'english', tokenizer = tokenize)
vectorizer.fit(df.story)
sparse = vectorizer.fit_transform(df.story)
return vectorizer, sparse
def pickle_vec(vectorizer, sparse):
'''
Pickle the vectorizer instance and sparse matrix
'''
v = open('../data/vectorizer.pkl', 'w')
pickle.dump(vectorizer, v)
v.close()
s = open('../data/sparse_mat.pkl', 'w')
pickle.dump(sparse, s)
s.close()
def get_success_index(df):
'''
returns the indices of successsful campaigns from the dataframe
'''
indices = df[df.percentage>=0.5].index.tolist()
return np.array(indices)
def download(url, *a, **kw):
'''
download and returns the html parsed beautifulsoup
'''
_user_agent = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36')
kw.setdefault('headers', {})['User-Agent'] = _user_agent
return bs4.BeautifulSoup(requests.get(url, *a, **kw).text, 'html.parser')
def search_url(title):
'''
url search for gofund me website given a title
'''
search_url = 'https://www.gofundme.com/mvc.php?'
soup = download(search_url, params={'term' : title, 'route': 'search'})
for tile in soup.select('.search_tile'):
try:
return 'https:'+tile.select('.name')[0]['href']
except:
continue
return 'link not found'
def similar_campaign(vector, vectorizer, sparse_mat):
'''
Finds the similar success story to the given campaign. returns top 3 campaigns
and keywords. similarity from cosine similarity with tfidf vectors. top words
from tfidf values of a story
'''
feature_names = np.array(vectorizer.get_feature_names())
similarity = linear_kernel(vector, sparse_mat)
top_indices_story = np.argsort(similarity.flatten())[-1::-1]
success_indices = []
for top_index in top_indices_story:
if df.percentage[top_index] >= 0.5:
success_indices.append(top_index)
keywords = []
for idx in success_indices[:3]:
keywords_indices = np.argsort(sparse_mat[idx].toarray()).flatten()[-1:-11:-1]
keywords.append(' '.join(feature_names[keywords_indices]))
print success_indices[:3]
output_df = df.iloc[success_indices[:3]]
output_df['keywords'] = keywords
output_df['url'] = map(search_url, output_df.title)
output_df.reset_index(inplace = True)
return output_df[['category', 'days','title', 'story', 'friends','shares', 'goal', 'percentage', 'keywords', 'url']]
if __name__ == '__main__':
# df = df[df['percentage'] >= 0.50]
# df.to_csv('../data/featured_data_success.csv', index = False)
vectorizer, sparse = bag_of_words(df)
pickle_vec(vectorizer, sparse)
| [
"Komal@Teks-MacBook-Pro.local"
] | Komal@Teks-MacBook-Pro.local |
0649ed61513fbe6a09949d36e72df66a81bef44a | ec1eb0dd3ceeff03ab1e20c80259c5868a1a5004 | /project2/rl_federated_nas/glace_search_svhn.py | 556cb4600ca063cf21cae147f2417b3d5384845e | [] | no_license | dixiyao/CS385 | 601a7aebaa9a7644be472fbdaa002c7ca76a5816 | 405a5632dde1fef96ccb301c0994d783776c7108 | refs/heads/main | 2023-06-02T01:10:12.892223 | 2021-06-20T09:30:17 | 2021-06-20T09:30:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,604 | py | import os
import sys
import time
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import random
import copy
from torch.autograd import Variable
from model_search import Network
from model_search_local import MaskedNetwork
from architect import Architect
from federated import sample_mask, client_update, fuse_weight_gradient, init_gradient, client_weight_param, extract_index
from data_distribution import _data_transforms_cifar10, even_split, none_iid_split
from noniid_svhn import client_data
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--client_batch_size', type=int, default=256, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.001, help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=1000, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=10000, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=16, help='num of init channels')
parser.add_argument('--layers', type=int, default=8, help='total number of layers')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--train_portion', type=float, default=0.9, help='portion of training data')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--arch_learning_rate', type=float, default=3e-3, help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')
parser.add_argument('--arch_baseline_decay', type=float, default=0.99, help='weight decay for reward baseline')
parser.add_argument('--client', type=int, default=10, help='number of clients')
parser.add_argument('--glace_epoch', type=int, default=10000, help='number of epoch for freezing alpha')
parser.add_argument('--non_iid', action='store_true', default=False, help='use non iid dataset')
parser.add_argument('--fed_non_iid',action='store_true',default=False,help='use non iid distribution in FedNAS(CVPR20)')
parser.add_argument('--fed_selection',default=None,help='prepared distribution')
args = parser.parse_args()
args.save = 'search-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
CIFAR_CLASSES = 10
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
criterion = nn.CrossEntropyLoss()
# criterion = criterion.cuda()
global_model = Network(args.init_channels, CIFAR_CLASSES, args.layers, criterion)
# global_model = global_model.cuda()
# logging.info("param size = %fMB", utils.count_parameters_in_MB(global_model))
global_optimizer = torch.optim.SGD(
global_model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
#train_transform, valid_transform = _data_transforms_cifar10()
#dataset = dset.SVHN(root='../data', download=True, transform=train_transform)
'''
testds=[]
for (img, label) in dataset:
if label in [0,2,3,5,9]:
testds.append((img,label))
random.shuffle(testds)
dataset=copy.deepcopy(testds)
'''
train_queues = []
if args.fed_non_iid:
train_queues=client_data(args.data,args.client,args.client_batch_size)
else:
if args.non_iid:
user_split = none_iid_split(dataset, num_user=args.client)
else:
user_split = even_split(dataset, args.client)
for i in range(args.client):
train_data = user_split[i]
num_train = len(train_data)
indices = list(range(num_train))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.client_batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices),
pin_memory=True, num_workers=2)
train_queues.append(train_queue)
# valid_data = user_split[-1]
# num_train = len(valid_data)
# indices = list(range(num_train))
# valid_queue = torch.utils.data.DataLoader(
# valid_data, batch_size=32,
# sampler=torch.utils.data.sampler.SubsetRandomSampler(indices),
# pin_memory=True, num_workers=2)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
global_optimizer, int(args.epochs), eta_min=args.learning_rate_min)
global_architect = Architect(global_model, args)
init_gradient(global_model)
global_accuracy = []
client_accuracy = []
total_loss = []
for epoch in range(args.epochs):
scheduler.step()
lr = scheduler.get_lr()[0]
logging.info('epoch %d lr %e', epoch, lr)
# genotype = global_model.genotype()
# logging.info('genotype = %s', genotype)
#
client_models = []
epoch_acc = []
epoch_loss = []
epoch_index_normal = []
epoch_index_reduce = []
for client_idx in range(args.client):
mask_normal = sample_mask(global_model.alphas_normal)
mask_reduce = sample_mask(global_model.alphas_reduce)
index_normal = extract_index(mask_normal)
index_reduce = extract_index(mask_reduce)
client_model = MaskedNetwork(args.init_channels, CIFAR_CLASSES, args.layers, criterion, mask_normal, mask_reduce)
client_models.append(client_model)
epoch_index_normal.append(index_normal)
epoch_index_reduce.append(index_reduce)
# copy weight of global model to client models
# alphas in client models are actually gates, and equal to 1 forever
client_weight_param(global_model, client_models)
for client_idx in range(args.client):
client_model = client_models[client_idx]
client_models[client_idx], acc, loss = client_update(train_queues[client_idx], client_model, criterion)
epoch_acc.append(acc)
epoch_loss.append(loss)
avg_acc = float(torch.mean(torch.Tensor(epoch_acc)))
avg_loss = float(torch.mean(torch.Tensor(epoch_loss)))
logging.info("client accuracy: " + str(epoch_acc))
logging.info("client loss: " + str(epoch_loss))
logging.info("client accuracy: "+str(avg_acc)+" , loss: "+str(avg_loss))
client_accuracy.append(avg_acc)
total_loss.append(avg_loss)
fuse_weight_gradient(global_model,client_models)
global_optimizer.step()
global_optimizer.zero_grad()
# if epoch > args.glace_epoch:
# global_architect.step(epoch_acc,epoch_index_normal,epoch_index_reduce)
if (epoch+1) % args.report_freq == 0:
# valid_acc, valid_obj = infer(valid_queue,global_model,criterion)
# logging.info('valid_acc %f', valid_acc)
# global_accuracy.append(valid_acc)
logging.info("alphas normal")
logging.info(F.softmax(global_model.alphas_normal, dim=-1))
logging.info("alphas reduce")
logging.info(F.softmax(global_model.alphas_reduce, dim=-1))
utils.save(global_model, os.path.join(args.save, 'weights_epoch'+str(epoch)+'.pt'))
logging.info("*** final log ***")
logging.info("loss")
logging.info(total_loss)
logging.info("client accuracy")
logging.info(client_accuracy)
logging.info("global accuracy")
logging.info(global_accuracy)
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model = model.cuda()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(async=True)
logits = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
# if step % args.report_freq == 0:
# logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
model = model.cpu()
return top1.avg, objs.avg
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | dixiyao.noreply@github.com |
7bc0cb59175906afc9c0c195273cb6c3843800f3 | f213d968b6d43ca27c52d8d61311c054be0e204c | /tutorial2.py | 84a2831dee2e41157438f719de429b18aaf113bf | [] | no_license | datasqr/OpenCV | f9a8c161735fd3b31f6a65b73b41e5ed7fda783c | 3fe8d52e1ea133f3c402ae7fd2606d1f716e0215 | refs/heads/master | 2021-01-25T04:57:50.930615 | 2015-01-07T19:25:34 | 2015-01-07T19:25:34 | 28,535,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | # http://docs.opencv.org/trunk/doc/py_tutorials/py_gui/py_drawing_functions/py_drawing_functions.html
#Drawing Functions
import numpy as np
import cv2
# Create a black image
img = np.zeros((250,250,3), np.uint8)
print img
# Draw a diagonal blue line with thickness of 5 px
#cv2.line(img,(0,0),(511,511),(255,0,0),5)
#scv2.rectangle(img,(384,0),(510,128),(0,255,0),3)
#cv2.circle(img,(447,63), 63, (0,0,255), -1)
#cv2.ellipse(img,(256,256),(100,50),0,0,180,255,-1)
pts = np.array([[10,5],[20,30],[50,10]], np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(img,[pts],True,(0,255,255))
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"mateusz.zbikowski@gmail.com"
] | mateusz.zbikowski@gmail.com |
2804da30f453e1a5fd2133a3e59538c1b210397e | 8b37b0378f1b423629633c7cc8e22becca640641 | /runs/deep_forest_ranger_outpost.py | dbfba7bec08ca23410551657f4d2fcd1b330c2b2 | [] | no_license | destor/zombii | 11b7617a1205b04da10f351b834151651d1e150b | f9e658c128e6fc8c4551237143d2c1c1a5ccea9f | refs/heads/master | 2021-09-23T08:56:06.863946 | 2018-09-21T04:39:30 | 2018-09-21T04:39:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | # vim: syn=python
from align import *
FILE = [
{
'announce': '9w',
},
{
'path': '6 w;sw;7 w;sw;5 w;4 sw;6 w;sw;w;sw;enter',
'name': '__announce__',
'announce': 'Deep Forest Ranger Outpost: Grizmogh',
'summary': True,
'skip': 4,
},
{
'target': 'commander',
'alignment': SLIGHTLY_GOOD,
'announce': 'Grizmogh 4.5m',
'out': 'out',
'in': 'enter',
'warnings': "Uses 'hurl axes' randomly",
'skip': 2,
},
{
'announce': 'Deep Forest Ranger Outpost',
},
{
'path': 'out;ne;e;ne;6 e;4 ne;5 e;ne;7 e;ne;6 e',
'name': '__announce__',
'announce': '9w',
},
{
'name': 'Unknown',
},
]
| [
"alexander@schrepfer.us"
] | alexander@schrepfer.us |
75b4c345054f9757d6e642ce84b0d8c16a1c82c6 | eb00755d9d0f2630ffdb21e3ab6685b2fbcb0d9e | /tests/bench/bench_scripts/bench_sampleData.py | 729fcf79af5383d0af68875e3179d971fe99aff2 | [
"BSD-3-Clause"
] | permissive | mlangill/biom-format | aca45518c71b807cf30b0f548ad726880802a2b5 | 4cebfbdba8b6b64ff0d503df33634e3d52de1de0 | refs/heads/master | 2021-01-16T21:59:51.218830 | 2013-12-04T16:41:50 | 2013-12-04T16:41:50 | 9,486,201 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | #!/usr/bin/env python
from sys import argv
from gzip import open as gzip_open
from biom.parse import parse_biom_table
from random import choice
if __name__ == '__main__':
table = parse_biom_table(gzip_open(argv[1]))
foo = table.sampleData(choice(table.SampleIds))
| [
"mcdonadt@colorado.edu"
] | mcdonadt@colorado.edu |
85f5b6db2f09ac1feae49616d3363ce62a7724e1 | bab70d19d523e616ebaa1f74c114e88fd5d2ad83 | /CAhw2/RunMeForTest.py | af2bd375ebc9796f9d89d82792c8c2ae4b9716d8 | [] | no_license | FuHsinyu/Cellular-Automata-model | 4ef3351735aee8a4d268f67d3d04b36809bb33d2 | 2a0bdf505fd0b793caee5b1b708fb9d591532ebb | refs/heads/master | 2021-05-06T19:49:16.148410 | 2018-06-13T01:55:14 | 2018-06-13T01:55:14 | 112,194,748 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | from pycx_gui import GUI
from myModel import CAmodel
import matplotlib.pyplot as plt
import numpy as np
import random
#change digit in line 18 and run to get a FAST TESTING RESULT
initRow = [0] * 100
for i in range(100):
initRow[i] = random.randrange(2)
r = 1 #range
systemSize = r * 2 + 1
k = 2 #base
rule = 0 #decimal rule initilized
cycleLenList = [] #final cycle length recording list
cycleReachedBool = False #booleanB
resultDict = dict() #hash results with respect to rules
while rule < 6: #By changing these for FAST TEST
model = CAmodel(r, k, rule, initRow, systemSize)
#def __init__(self, range, base, rule, initRow, systemSize):
runTime = 0 #run times
while runTime < 10e4:
currentRow = tuple(model.currentRow)
if currentRow not in resultDict:
resultDict[currentRow] = runTime
#print(resultDict)
else:
cycleLenList.append(runTime - resultDict[currentRow])
cycleReachedBool = True
break
runTime += 1
model.step()
if not cycleReachedBool:
cycleLenList.append(-1)
rule += 1 #
cycleReachedBool = False
resultDict.clear()
model.draw(cycleLenList) | [
"247047.niuitmo.ru"
] | 247047.niuitmo.ru |
904f11ece1f3a1f0e9f815aa7965f064e2510a83 | dbe770c12a3186e439ffe7bd1f3853a1b3ec6e4f | /test1.py | dab87f2a97cd837ab8954612da96924a871cd88a | [] | no_license | ankurmishra727/JenkinsWithJenkinsFile2 | d5d2f659b514c334e22736a1809946b6165dbc4e | 80632d059612583a9d8e1991415ecd603657146b | refs/heads/master | 2020-03-19T06:07:45.947120 | 2018-06-04T10:54:06 | 2018-06-04T10:54:06 | 135,992,780 | 0 | 0 | null | 2018-06-04T09:51:34 | 2018-06-04T08:15:14 | Python | UTF-8 | Python | false | false | 44 | py | print("merging into master from branch 1")
| [
"ankurgargmishra@gmail.com"
] | ankurgargmishra@gmail.com |
17e16a08041f1fc5702bff45cbade47ad9622093 | eceeef628f926a51797f6bbe1bfd409c566d3d3b | /Res18_T2_transfer.py | cb27dac94ca41ab0b66f921c24551c18adcb1558 | [] | no_license | wangshuai-bit/T2_classification | 2ca33cb6b52be4f12846e245ca2bbd6a87d3ec7f | 94488e168d618abe8228c75f07f58209c1cbccbc | refs/heads/main | 2023-02-18T20:14:50.913843 | 2021-01-22T01:30:44 | 2021-01-22T01:30:44 | 331,675,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,511 | py | # try to print the error image 20190520
import tensorflow as tf
import pickle
import time
from tflearn.layers.conv import global_avg_pool
from tensorflow.contrib.layers import batch_norm, flatten
from tensorflow.contrib.layers import xavier_initializer
from tensorflow.contrib.framework import arg_scope
from PIL import Image
from load_data import *
import matplotlib.pyplot as plt
from tensorflow.python import pywrap_tensorflow
import math
from itertools import cycle
from sklearn.metrics import roc_curve,auc
from scipy import interp
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Hyperparameter
growth_k = 24
nb_block = 2 # how many (dense block + Transition Layer) ?
#init_learning_rate = 2.0e-4
init_learning_rate = 0.01
#tmp_learning_rate = 0.1
init_lamda_1 = 0.00
init_lamda_2 = 1.00
epsilon = 1e-4 # AdamOptimizer epsilon
dropout_rate = 0.30
keep_prob = 1.0
# Momentum Optimizer will use
nesterov_momentum = 0.9
weight_decay = 8e-4
weight_decay_l1 = 0
# Label & batch_size
batch_size = 32
dataset_size = 4800
iteration = 150
# batch_size * iteration = data_set_number
test_iteration = 10
# total_epochs = 300
total_epochs = 300
# regularzer
reg_scale = 0.4
# train
isTrain =False
#datasets
datasets = {}
root_path = '/home/wangshuai/ckpts_for_zhengyao/pt_5_to_2_lr_transfer_test_5_5to2'
#os.mkdir(root_path)
txt_path = os.path.join(root_path, 'logs.txt')
print(txt_path)
ckpt_path = root_path
summary_path = root_path
save_path = os.path.join(root_path, 'train_64_pt_5_to_2')
write_title = "train_64_pt_5_to_2, init_learning_rate:%.6f, dropout_rate:%.2f, " \
"weight_decay%.4f,total_epochs%.4f, batch_size%.1f\n" \
% (init_learning_rate,dropout_rate,weight_decay,total_epochs,batch_size)
start_time = time.time()
print("start time is", start_time)
def variable_summaries(var,name):
with tf.name_scope(name):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
tf.summary.histogram('histogram', var)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape, dtype=tf.float32)
return tf.Variable(initial)
def Global_Average_Pooling(x, stride=1):
"""
width = np.shape(x)[1]
height = np.shape(x)[2]
pool_size = [width, height]
return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride) # The stride value does not matter
It is global average pooling without tflearn
"""
return global_avg_pool(x, name='Global_avg_pooling')
# But maybe you need to install h5py and curses or not
def Batch_Normalization(x, training, scope):
with arg_scope([batch_norm],
scope=scope,
updates_collections=None,
decay=0.9,
center=True,
scale=True,
zero_debias_moving_mean=False):
return tf.cond(training,
lambda: batch_norm(inputs=x, is_training=training, reuse=None),
lambda: batch_norm(inputs=x, is_training=training, reuse=True))
def Drop_out(x, rate, training):
return tf.layers.dropout(inputs=x, rate=rate, training=training)
def Relu(x):
return tf.nn.relu(x)
def Average_pooling(x, pool_size=[2, 2], stride=2, padding='VALID'):
return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride, padding=padding)
def max_pool(input, k_size=1, stride=1, name=None):
return tf.nn.max_pool(input, ksize=[1, k_size, k_size, 1], strides=[1, stride, stride, 1],
padding='SAME', name=name)
def Concatenation(layers):
return tf.concat(layers, axis=3)
def Linear(x):
#dropout = tf.layers.dropout(inputs=x, rate=0.2, training=training_flag) # add dropout here
#relu_1 = tf.nn.relu(x)
dense_1 = tf.layers.dense(inputs=x, units=10, name='linear_1', use_bias=True,
kernel_regularizer=tf.contrib.layers.l2_regularizer(reg_scale))
dense = tf.layers.dense(inputs=dense_1, units=class_num, name='linear_5', use_bias=True,
kernel_regularizer=tf.contrib.layers.l2_regularizer(reg_scale))
return dense
def Evaluate(sess, epoch):
test_acc = 0.0
test_acc_norm = 0.0
test_acc_arc = 0.0
test_loss = 0.0
test_pre_index = 0
train_pre_index = 0
add = 67
#add = 930
y_amount_0 = 0
y_amount_1 = 0
y_amount_2 = 0
y_amount_3 = 0
equal = 0
y_equal_0 = 0
y_equal_1 = 0
y_equal_2 = 0
y_equal_3 = 0
y_0to1 = 0
y_0to2 = 0
y_0to3 = 0
y_1to0 = 0
y_1to2 = 0
y_1to3 = 0
y_2to0 = 0
y_2to1 = 0
y_2to3 = 0
y_3to0 = 0
y_3to1 = 0
y_3to2 = 0
y_equal_0_pro_sigmoid = 0
y_equal_1_pro_sigmoid = 0
y_equal_0_pro_softmax = 0
y_equal_1_pro_softmax = 0
y_equal_2_pro_softmax = 0
y_equal_3_pro_softmax = 0
y_all_1_pro_sigmoid = 0
y_all_0_pro_sigmoid = 0
y_all_0_pro_softmax = 0
y_all_1_pro_softmax = 0
y_all_2_pro_softmax = 0
y_all_3_pro_softmax = 0
y_equal_0_pro_sigmoid_wrong = 0
mid = 0
mid_1 = 0
y_score = np.empty(shape=[0, 4])
y_onehot = np.empty(shape=[0, 4])
for it in range(test_iteration):
test_batch_x = test_x[test_pre_index: test_pre_index + add]
test_batch_y = test_y[test_pre_index: test_pre_index + add]
test_batch_p = test_p[test_pre_index: test_pre_index + add]
test_pre_index = test_pre_index + add
test_feed_dict = {
x: test_batch_x,
label: test_batch_y,
path: test_batch_p,
learning_rate: epoch_learning_rate,
training_flag: False
}
loss_, acc_ = sess.run([cost, accuracy], feed_dict=test_feed_dict)
'''
logits_watch = sess.run(logits, feed_dict=test_feed_dict)
print("logit is", logits_watch)
print("label is ", test_batch_y)
'''
if epoch >= total_epochs-1:
result_one = sess.run(logits, feed_dict=test_feed_dict)
loss_, acc_= sess.run([cost, accuracy], feed_dict=test_feed_dict)
y_score = np.append(y_score, result_one, axis=0)
y_onehot = np.append(y_onehot, test_batch_y, axis=0)
test_loss += loss_ / 10.0
test_acc += acc_ / 10.0
if epoch >= total_epochs-1:
# print("the acc of this time is ", acc_)
# print("the all acc is ", test_acc)
result_one_sigmoid = sess.run(tf.nn.sigmoid(result_one))
result_one_softmax = sess.run(tf.nn.softmax(result_one))
result_one_argmax = sess.run(tf.argmax(result_one, 1))
test_batch_y_argmax = sess.run(tf.argmax(test_batch_y, 1))
path_one = test_batch_p
for i in range(len(test_batch_y_argmax)):
if test_batch_y_argmax[i] == 0:
y_amount_0 = y_amount_0 + 1
y_all_0_pro_softmax = y_all_0_pro_softmax + result_one_softmax[i]
if result_one_argmax[i] == 1:
y_0to1 = y_0to1 + 1
#print("y_0to1 is ", path_one[i])
elif result_one_argmax[i] == 2:
y_0to2 = y_0to2 + 1
#print("y_0to2 is ", path_one[i])
elif result_one_argmax[i] == 3:
y_0to3 = y_0to3 + 1
#print("y_0to3 is ", path_one[i])
elif result_one_argmax[i] == test_batch_y_argmax[i]:
y_equal_0 = y_equal_0 + 1
y_equal_0_pro_sigmoid = y_equal_0_pro_sigmoid + result_one_sigmoid[i]
y_equal_0_pro_softmax = y_equal_0_pro_softmax + result_one_softmax[i]
#print("0 is", path_one[i])
elif test_batch_y_argmax[i] == 1:
y_amount_1 = y_amount_1 + 1
y_all_1_pro_softmax = y_all_1_pro_softmax + result_one_softmax[i]
if result_one_argmax[i] == 0:
y_1to0 = y_1to0 + 1
#print("y_1to0 is", path_one[i])
elif result_one_argmax[i] == 2:
y_1to2 = y_1to2 + 1
#print("y_1to2 is", path_one[i])
elif result_one_argmax[i] == 3:
y_1to3 = y_1to3 + 1
#print("y_1to3 is", path_one[i])
elif result_one_argmax[i] == test_batch_y_argmax[i]:
y_equal_1 = y_equal_1 + 1
y_equal_1_pro_sigmoid = y_equal_1_pro_sigmoid + result_one_sigmoid[i]
y_equal_1_pro_softmax = y_equal_1_pro_softmax + result_one_softmax[i]
#print("1 is", path_one[i])
elif test_batch_y_argmax[i] == 2:
y_amount_2 = y_amount_2 + 1
y_all_2_pro_softmax = y_all_2_pro_softmax + result_one_softmax[i]
if result_one_argmax[i] == 0:
y_2to0 = y_2to0 + 1
#print("y_2to0 is", path_one[i])
elif result_one_argmax[i] == 1:
y_2to1 = y_2to1 + 1
#print("y_2to1 is", path_one[i])
elif result_one_argmax[i] == 3:
y_2to3 = y_2to3 + 1
#print("y_2to3 is", path_one[i])
if result_one_argmax[i] == test_batch_y_argmax[i]:
y_equal_2 = y_equal_2 + 1
y_equal_2_pro_softmax = y_equal_2_pro_softmax + result_one_softmax[i]
#print("2 is" , path_one[i])
elif test_batch_y_argmax[i] == 3:
y_amount_3 = y_amount_3 + 1
y_all_3_pro_softmax = y_all_3_pro_softmax + result_one_softmax[i]
if result_one_argmax[i] == 0:
y_3to0 = y_3to0 + 1
#print("y_3to0 is", path_one[i])
elif result_one_argmax[i] == 1:
y_3to1 = y_3to1 + 1
#print("y_3to1 is", path_one[i])
elif result_one_argmax[i] == 2:
y_3to2 = y_3to2 + 1
#print("y_3to2 is", path_one[i])
elif result_one_argmax[i] == test_batch_y_argmax[i]:
y_equal_3 = y_equal_3 + 1
y_equal_3_pro_softmax = y_equal_3_pro_softmax + result_one_softmax[i]
#print("3 is", path_one[i])
# print("the result_one_argmax is ", result_one_argmax)
# print("the test_batch_y_argmax is ", test_batch_y_argmax)
# print("result_one_softmax is ", result_one_softmax)
# print("test_batch_y is ", test_batch_y)
if epoch >=total_epochs-1:
print("y_score and y_onehot shape is ", y_score.shape, y_onehot.shape)
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(class_num):
fpr[i], tpr[i], _ = roc_curve(y_onehot[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# first aggregate all the false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(class_num)]))
# then interpolate all ROC curves at this point
mean_tpr = np.zeros_like(all_fpr)
for i in range(class_num):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# finally average it and compute AUC
mean_tpr /= class_num
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
fpr_macro = fpr["macro"]
tpr_macro = tpr["macro"]
roc_auc_macro = roc_auc["macro"]
# plot all ROC curves
subtype = ["ccRCC","CRCC","AML","PRCC"]
plt.plot(fpr["macro"], tpr["macro"], label="macro-average ROC curve(area = {0:0.2f})".format(roc_auc["macro"]),
color="navy", linestyle=":", linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(class_num), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=2,
label="ROC curve of {0}(area = {1:0.2f})".format(subtype[i], roc_auc[i]))
print(fpr[0].shape)
plt.plot([0, 1], [0, 1], "k--", lw=2)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("false positive rate")
plt.ylabel("true positive rate")
plt.title("ROC to multi-classification")
plt.legend(loc="lower right")
plt.savefig("ROC of 5_to_2.jpg")
plt.show()
if epoch >= total_epochs-1:
print("the amount of 0 is and the equal is ", y_amount_0, y_equal_0)
print("the amount of 1 is and the equal is ", y_amount_1, y_equal_1)
print("the amount of 2 is and the equal is ", y_amount_2, y_equal_2)
print("the amount of 3 is and the equal is ", y_amount_3, y_equal_3)
print("the equal pro of 0 is sigmoid, softmax", y_equal_0_pro_sigmoid / y_amount_0,
y_equal_0_pro_softmax / y_amount_0)
print("the equal pro of 1 is sigmoid, softmax", y_equal_1_pro_sigmoid / y_amount_1,
y_equal_1_pro_softmax / y_amount_1)
print("the equal pro of 2 is sigmoid, softmax", y_equal_2_pro_softmax / y_amount_2)
print("the equal pro of 3 is sigmoid, softmax", y_equal_3_pro_softmax / y_amount_3)
print("the all pro of 0 is sigmoid ", y_all_0_pro_softmax / y_amount_0)
print("the all pro of 1 is sigmoid ", y_all_1_pro_softmax / y_amount_1)
print("the all pro of 0 is sigmoid ", y_all_2_pro_softmax / y_amount_2)
print("the all pro of 1 is sigmoid ", y_all_3_pro_softmax / y_amount_3)
# print("the pro of 0 wrong is , and the mid0, mid1 is ", y_equal_0_pro_sigmoid_wrong/(y_amount_0-y_equal_0), mid, mid_1)
y_0_acc = y_equal_0 / y_amount_0
y_1_acc = y_equal_1 / y_amount_1
y_2_acc = y_equal_2 / y_amount_2
y_3_acc = y_equal_3 / y_amount_3
print("the acc of 0 is ", y_0_acc)
print("the acc of 1 is ", y_1_acc)
print("the acc of 2 is ", y_2_acc)
print("the acc of 3 is ", y_3_acc)
print("the 0 class is", y_equal_0, y_0to1, y_0to2, y_0to3, y_amount_0)
print("the 1 class is", y_1to0, y_equal_1, y_1to2, y_1to3, y_amount_1)
print("the 2 class is", y_2to0, y_2to1, y_equal_2, y_2to3, y_amount_2)
print("the 3 class is", y_3to0, y_3to1, y_3to2, y_equal_3, y_amount_3)
print("the precision of 0,1,2,3", "0", (y_equal_0 + y_1to0 + y_2to0 + y_3to0),
y_equal_0 / (y_equal_0 + y_1to0 + y_2to0 + y_3to0),
"1", (y_0to1 + y_equal_1 + y_2to1 + y_3to1), y_equal_1 / (y_0to1 + y_equal_1 + y_2to1 + y_3to1),
"2", (y_0to2 + y_1to2 + y_equal_2 + y_3to2), y_equal_2 / (y_0to2 + y_1to2 + y_equal_2 + y_3to2),
"3", (y_0to3 + y_1to3 + y_2to3 + y_equal_3), y_equal_3 / (y_0to3 + y_1to3 + y_2to3 + y_equal_3)
)
summary = tf.Summary(value=[tf.Summary.Value(tag='test_loss', simple_value=test_loss),
tf.Summary.Value(tag='test_accuracy', simple_value=test_acc)])
return test_acc, test_loss, summary
class RESNet():
def __init__(self, x, training, labels):
self.training = training
self.model = self.ResNet18(x, is_training=training, pooling_and_fc=True,
reuse=False, kernel_initializer = tf.contrib.layers.variance_scaling_initializer())
def identity_block2d(self,input_tensor, kernel_size, filters, stage, block, is_training, reuse,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer()):
filters1, filters2, filters3 = filters
conv_name_2 = 'conv' + str(stage) + '_' + str(block) + '_3x3'
bn_name_2 = 'bn' + str(stage) + '_' + str(block) + '_3x3'
x = tf.layers.conv2d(input_tensor, filters2, kernel_size, use_bias=False, padding='SAME',
kernel_initializer=kernel_initializer, name=conv_name_2, reuse=reuse)
x = Batch_Normalization(x, training=is_training, scope=bn_name_2)
x = tf.nn.relu(x)
conv_name_3 = 'conv' + str(stage) + '_' + str(block) + '_1x1_increase'
bn_name_3 = 'bn' + str(stage) + '_' + str(block) + '_1x1_increase'
x = tf.layers.conv2d(x, filters3, (kernel_size, kernel_size), use_bias=False, padding='SAME',
kernel_initializer=kernel_initializer, name=conv_name_3, reuse=reuse)
x = Batch_Normalization(x, training=is_training, scope=bn_name_3)
x = tf.add(input_tensor, x)
x = tf.nn.relu(x)
return x
def conv_block_2d(self,input_tensor, kernel_size, filters, stage, block, is_training, reuse, strides=(2, 2),
kernel_initializer=tf.contrib.layers.variance_scaling_initializer()):
filters1, filters2, filters3 = filters
conv_name_2 = 'conv' + str(stage) + '_' + str(block) + '_3x3'
bn_name_2 = 'bn' + str(stage) + '_' + str(block) + '_3x3'
x = tf.layers.conv2d(input_tensor, filters2, (kernel_size, kernel_size), use_bias=False, strides=strides,
padding='SAME', kernel_initializer=kernel_initializer, name=conv_name_2, reuse=reuse)
x = Batch_Normalization(x, training=is_training, scope=bn_name_2)
x = tf.nn.relu(x)
conv_name_3 = 'conv' + str(stage) + '_' + str(block) + '_1x1_increase'
bn_name_3 = 'bn' + str(stage) + '_' + str(block) + '_1x1_increase'
x = tf.layers.conv2d(x, filters3, (kernel_size, kernel_size), use_bias=False, padding='SAME',
kernel_initializer=kernel_initializer, name=conv_name_3, reuse=reuse)
x = Batch_Normalization(x, training=is_training, scope=bn_name_3)
conv_name_4 = 'conv' + str(stage) + '_' + str(block) + '_1x1_shortcut'
bn_name_4 = 'bn' + str(stage) + '_' + str(block) + '_1x1_shortcut'
shortcut = tf.layers.conv2d(input_tensor, filters3, (kernel_size, kernel_size), use_bias=False, strides=strides,
padding='SAME', kernel_initializer=kernel_initializer, name=conv_name_4,
reuse=reuse)
shortcut = Batch_Normalization(shortcut, training=is_training, scope=bn_name_4)
x = tf.add(shortcut, x)
x = tf.nn.relu(x)
return x
def ResNet18(self,input_tensor, is_training=True, pooling_and_fc=True, reuse=False,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer()):
print("the input_tensor is ", input_tensor)
input_tensor_tile = tf.tile(input_tensor, [1,1,1,3])
print("after tf.tile, the input tensor is", input_tensor_tile)
x = tf.layers.conv2d(input_tensor_tile, 32, (3, 3), strides=(1, 1), kernel_initializer=kernel_initializer,
use_bias=False, padding='SAME', name='conv1_1/3x3_s1', reuse=reuse)
x = Batch_Normalization(x, training=is_training,scope ='bn1_1/3x3_s1')
x = tf.nn.relu(x)
x1 = self.identity_block2d(x, 3, [48, 32, 32], stage=2, block='1b', is_training=is_training, reuse=reuse,
kernel_initializer=kernel_initializer)
x1 = self.identity_block2d(x1, 3, [48, 32, 32], stage=3, block='1c', is_training=is_training, reuse=reuse,
kernel_initializer=kernel_initializer)
x2 = self.conv_block_2d(x1, 3, [96, 64, 64], stage=3, block='2a', strides=(2, 2), is_training=is_training,
reuse=reuse, kernel_initializer=kernel_initializer)
x2 = self.identity_block2d(x2, 3, [96, 64, 64], stage=3, block='2b', is_training=is_training, reuse=reuse,
kernel_initializer=kernel_initializer)
x3 = self.conv_block_2d(x2, 3, [128, 128, 128], stage=4, block='3a', strides=(2, 2), is_training=is_training,
reuse=reuse, kernel_initializer=kernel_initializer)
x3 = self.identity_block2d(x3, 3, [128, 128, 128], stage=4, block='3b', is_training=is_training, reuse=reuse,
kernel_initializer=kernel_initializer)
x4 = self.conv_block_2d(x3, 3, [256, 256, 256], stage=5, block='4a', strides=(2, 2), is_training=is_training,
reuse=reuse, kernel_initializer=kernel_initializer)
x4 = self.identity_block2d(x4, 3, [256, 256, 256], stage=5, block='4b', is_training=is_training, reuse=reuse,
kernel_initializer=kernel_initializer)
# print('before gap: ', x4)
x4 = tf.reduce_mean(x4, [1, 2])
x4 = Drop_out(x4, dropout_rate, is_training)
# print('after gap: ', x4)
# flatten = tf.contrib.layers.flatten(x4)
prob = tf.layers.dense(x4, 4, reuse=reuse, kernel_initializer=tf.contrib.layers.xavier_initializer(),
use_bias=True, name="fully_connected")
return prob
def ResNet34(self, input_tensor, is_training, pooling_and_fc=True,
reuse=False, kernel_initializer = tf.contrib.layers.variance_scaling_initializer()):
x = tf.layers.conv2d(input_tensor, 32, (5, 5), strides=(1, 1), kernel_initializer=kernel_initializer,
use_bias=False, padding='SAME', name='conv1_1/3x3_s1', reuse=reuse)
x = Batch_Normalization(x, training=is_training,scope ='bn1_1/3x3_s1')
x = tf.nn.relu(x)
variable_summaries(x, name='x_0')
x1 = self.identity_block2d(x, 3, [48, 32, 32], stage=1, block='1a', is_training=is_training, reuse=reuse,
kernel_initializer=kernel_initializer)
x1 = self.identity_block2d(x1, 3, [48, 32, 32], stage=1, block='1b', is_training=is_training, reuse=reuse,
kernel_initializer=kernel_initializer)
x1 = self.identity_block2d(x1, 3, [48, 32, 32], stage=1, block='1c', is_training=is_training, reuse=reuse,
kernel_initializer=kernel_initializer)
variable_summaries(x1, name='x_1')
x2 = self.conv_block_2d(x1, 3, [96, 64, 64], stage=2, block='2a', strides=(2, 2), is_training=is_training,
reuse=reuse, kernel_initializer=kernel_initializer)
x2 = self.identity_block2d(x2, 3, [96, 64, 64], stage=2, block='2b', is_training=is_training, reuse=reuse,
kernel_initializer=kernel_initializer)
x2 = self.identity_block2d(x2, 3, [96, 64, 64], stage=2, block='2c', is_training=is_training, reuse=reuse,
kernel_initializer=kernel_initializer)
x2 = self.identity_block2d(x2, 3, [96, 64, 64], stage=2, block='2d', is_training=is_training, reuse=reuse,
kernel_initializer=kernel_initializer)
variable_summaries(x2, name='x_2')
x3 = self.conv_block_2d(x2, 3, [128, 128, 128], stage=3, block='3a', strides=(2, 2), is_training=is_training,
reuse=reuse, kernel_initializer=kernel_initializer)
x3 = self.identity_block2d(x3, 3, [128, 128, 128], stage=3, block='3b', is_training=is_training, reuse=reuse,
kernel_initializer=kernel_initializer)
x3 = self.identity_block2d(x3, 3, [128, 128, 128], stage=3, block='3c', is_training=is_training, reuse=reuse,
kernel_initializer=kernel_initializer)
x3 = self.identity_block2d(x3, 3, [128, 128, 128], stage=3, block='3d', is_training=is_training, reuse=reuse,
kernel_initializer=kernel_initializer)
x3 = self.identity_block2d(x3, 3, [128, 128, 128], stage=3, block='3e', is_training=is_training, reuse=reuse,
kernel_initializer=kernel_initializer)
x3 = self.identity_block2d(x3, 3, [128, 128, 128], stage=3, block='3f', is_training=is_training, reuse=reuse,
kernel_initializer=kernel_initializer)
variable_summaries(x3, name='x_3')
x4 = self.conv_block_2d(x3, 3, [256, 256, 256], stage=4, block='4a', strides=(2, 2), is_training=is_training,
reuse=reuse, kernel_initializer=kernel_initializer)
x4 = self.identity_block2d(x4, 3, [256, 256, 256], stage=4, block='4b', is_training=is_training, reuse=reuse,
kernel_initializer=kernel_initializer)
x4 = self.identity_block2d(x4, 3, [256, 256, 256], stage=4, block='4c', is_training=is_training, reuse=reuse,
kernel_initializer=kernel_initializer)
# print('before gap: ', x4)
x4 = tf.reduce_mean(x4, [1, 2])
x4 = Drop_out(x4, dropout_rate, is_training)
# print('after gap: ', x4)
# flatten = tf.contrib.layers.flatten(x4)
prob = tf.layers.dense(x4, 4, reuse=reuse, kernel_initializer=tf.contrib.layers.xavier_initializer(seed=1),
name="fully_connected")
return prob
#train_x_pre, train_y_pre, test_x_pre, test_y_pre = prepare_data(train_files = '/training_64_4class_pk.pickle', test_files = '/test_64_4class_pk.pickle')
train_x, train_y, train_p, test_x, test_y, test_p = prepare_data(train_files = '/train_64_pt_all_sel_5_to_2', test_files = '/test_64_pt_all_sel_5_to_2')
train_x, test_x = color_preprocessing(train_x, test_x)
print("after select,the shape of train data and label is ", train_x.shape, train_y.shape)
print("aftre select, the shape of test data and label is ", test_x.shape, test_y.shape)
# image_size = 32, img_channels = 3, class_num = 10 in cifar10
x = tf.placeholder(tf.float32, shape=[None, image_size, image_size, img_channels])
label = tf.placeholder(tf.float32, shape=[None, class_num])
path = tf.placeholder(tf.string)
training_flag = tf.placeholder(tf.bool)
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
logits = RESNet(x=x, training=training_flag, labels=label).model
#logits, cos_t, s_train, logits_2, logits_3, logits_4, logits_5, logits_6, center_loss= DenseNet(x=x, nb_blocks=nb_block, filters=growth_k, training=training_flag, labels = label).model
# reg_ws = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,"DenseNet")
# weights_regularizer = tf.contrib.layers.l1_regularizer(0.4)
reg_ws = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, 'DenseNet')
print("label", label, "logits", logits)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=label, logits=logits))
l2 = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
l1 = tf.add_n([tf.contrib.layers.l1_regularizer(0.5)(var) for var in tf.trainable_variables()])
"""
l2_loss = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=nesterov_momentum, use_nesterov=True)
train = optimizer.minimize(cost + l2_loss * weight_decay)
In paper, use MomentumOptimizer
init_learning_rate = 0.1
but, I'll use AdamOptimizer
"""
cost = cross_entropy + L_metric_l2_regularizer
#optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=epsilon)
tr_vars = tf.trainable_variables()
var_list = [t for t in tr_vars]
print("type of var is ", type(var_list),var_list)
i=0
for ttt in var_list:
print("t is ", i,ttt)
i+=1
new_var_list = var_list[30:]
optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9)
train = optimizer.minimize(cost + l2 * weight_decay )
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#merge all the summary
restore_variable_list = tf.contrib.framework.get_variables_to_restore(exclude=["fully_connected","is_training"])
saver = tf.train.Saver(restore_variable_list)
saver_2 = tf.train.Saver()
with open(txt_path, 'a') as f:
f.write(write_title)
print("write finished,\n")
with tf.Session() as sess:
''' '''
ckpt = tf.train.get_checkpoint_state(ckpt_path)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
print("tensor name",key)
sess.run(tf.global_variables_initializer())
saver_2.restore(sess, ckpt.model_checkpoint_path)
print("load the model")
else :
sess.run(tf.global_variables_initializer())
print("new initial")
#sess.run(tf.global_variables_initializer())
#print("new initial")
summary_writer = tf.summary.FileWriter(summary_path, sess.graph)
epoch_learning_rate = init_learning_rate
#epoch_learning_rate = tmp_learning_rate
if isTrain:
test_acc_old=0
for epoch in range(1, total_epochs + 1):
#for epoch in range(1, 2):
if epoch == (total_epochs * 0.5) or epoch == (total_epochs * 0.75):
epoch_learning_rate = epoch_learning_rate / 10
pre_index = 0
train_acc = 0.0
train_acc_norm = 0.0
train_acc_arcface = 0.0
train_loss = 0.0
train_center_loss = 0.0
train_y_equal_0 = 0
train_y_equal_1 = 0
train_y_equal_2 = 0
train_y_equal_3 = 0
for step in range(1, iteration + 1):
if pre_index + batch_size < dataset_size:
batch_x = train_x[pre_index: pre_index + batch_size]
batch_y = train_y[pre_index: pre_index + batch_size]
batch_p = train_p[pre_index: pre_index + batch_size]
else:
batch_x = train_x[pre_index:]
batch_y = train_y[pre_index:]
batch_p = train_p[pre_index:]
batch_x = data_augmentation(batch_x)
train_feed_dict = {
x: batch_x,
label: batch_y,
path: batch_p,
learning_rate: epoch_learning_rate,
training_flag: True
}
_, batch_loss= sess.run([train, cost], feed_dict=train_feed_dict)
batch_acc = accuracy.eval(feed_dict=train_feed_dict)
'''
logits_watch = sess.run(logits,feed_dict=train_feed_dict)
print("logit is",logits_watch )
print("label is ", batch_y)
'''
train_loss += batch_loss
#train_center_loss += batch_center_loss
train_acc += batch_acc
pre_index += batch_size
if step == iteration:
train_loss /= iteration # average loss
train_acc /= iteration # average accuracy
train_center_loss /= iteration
if epoch >= total_epochs-1:
train_acc_norm /= iteration
train_acc_arcface /= iteration
train_summary = tf.Summary(value=[tf.Summary.Value(tag='train_loss', simple_value=train_loss),
tf.Summary.Value(tag='train_accuracy', simple_value=train_acc)])
test_acc, test_loss, test_summary= Evaluate(sess, epoch)
summary_writer.add_summary(summary=train_summary, global_step=epoch)
summary_writer.add_summary(summary=test_summary, global_step=epoch)
summary_writer.flush()
line = "epoch: %d/%d, train_loss: %.4f, train_acc: %.4f, test_loss: %.4f, test_acc: %.4f\n" % (
epoch, total_epochs, train_loss, train_acc, test_loss, test_acc)
print(line)
with open(txt_path, 'a') as f :
f.write(line)
if epoch >= total_epochs-10:
test_acc_new = test_acc
if test_acc_new >= test_acc_old:
saver_2.save(sess=sess, save_path=save_path)
print("model saved ,acc is", test_acc_new)
test_acc_old = test_acc
if epoch >= total_epochs-1:
train_result = sess.run(tf.argmax(logits, 1),feed_dict=train_feed_dict)
label_argmax = sess.run(tf.argmax(label, 1), feed_dict=train_feed_dict)
for itrain in range(len(batch_y)):
if label_argmax[itrain] == 0:
if train_result[itrain] == label_argmax[itrain]:
train_y_equal_0 = train_y_equal_0 + 1
elif label_argmax[itrain] == 1:
if train_result[itrain] == label_argmax[itrain]:
train_y_equal_1 = train_y_equal_1 + 1
elif label_argmax[itrain] == 2:
if train_result[itrain] == label_argmax[itrain]:
train_y_equal_2 = train_y_equal_2 + 1
elif label_argmax[itrain] == 3:
if train_result[itrain] == label_argmax[itrain]:
train_y_equal_3 = train_y_equal_3 + 1
#s_train_val = sess.run(s_train, feed_dict=train_feed_dict)
if epoch >= total_epochs-1:
#print("s_train_val is ", s_train_val)
print("the right amount of train of 0 and 1 and 2 and 3 is ", train_y_equal_0, train_y_equal_1, train_y_equal_2, train_y_equal_3)
else:
epoch = total_epochs-1
test_acc, test_loss, test_summary = Evaluate(sess, epoch)
print("test_loss:",test_loss,"test_acc",test_acc)
end_time = time.time()
print("end time is", end_time)
time_dur = end_time - start_time
print("time_dur is ", time_dur)
| [
"noreply@github.com"
] | wangshuai-bit.noreply@github.com |
667615d24df3f447ef773eb76c4de08b7f9c84c4 | aa5db0b160300c61c6a243c10a9ae4f24e61acbe | /main.py | 38d11176559c61a5e2a98abcb2dfe3406902bd6e | [] | no_license | ShlomiRex/Twitter-Slack-Bot-Interview-Home-Assignment | 68e7a36baae49653b8e151c128e822db4cd057f9 | c2c8d9421046f792acf35e4cdf1d62bc85a7dfc3 | refs/heads/main | 2023-08-29T21:41:23.942498 | 2021-11-15T17:46:13 | 2021-11-15T17:46:13 | 427,901,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,811 | py | import configparser
import datetime
import os.path
import pickle
import threading
import time
from dotenv import load_dotenv
from flask import Flask, Response, request
import logging
import slack_worker
import twitter_worker
# Environment
from twitter_worker.twitter_worker import Tweet
load_dotenv()
# Configuration files
config = configparser.ConfigParser()
config.read("config.ini")
# Logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
# Flask
app = Flask(__name__)
# Globals / others
running = False
pickled_timestamps_file = "scan_timestamps.pkl"
@app.route("/new-content", methods=["POST"])
def command_new_content():
"""
Command handler for '/new-content'.
:return:
"""
logger.info("Command 'new-content' called")
# In order to not get "operation_timeout" we can run this in another thread
def threaded_task():
for page in twitter_worker.pages_to_pull:
scan_timestamp = get_last_scan_timestamp(page)
if not scan_timestamp:
# Defaults to one hour as per instructions.
tweets = twitter_worker.pull_tweets_last_hour(page)
push_scan_timestamp(page, datetime.datetime.utcnow() - datetime.timedelta(hours=1))
else:
# Else, we scan again from the scan timestamp. If new tweets appear, it will be because of from the delta
# timing.
tweets = twitter_worker.pull_tweets(page, start_time=scan_timestamp)
push_scan_timestamp(page, datetime.datetime.utcnow())
slack_worker.post_new_content(page, tweets)
threading.Thread(target=threaded_task).start()
return Response(), 200
@app.route("/now", methods=["POST"])
def command_now():
logger.info("Command 'now' called")
slack_worker.post_current_datetime()
return Response(), 200
@app.route("/tweet", methods=["POST"])
def command_tweet():
logger.info("Command 'tweet' called")
command_text = request.form.get("text")
if command_text:
s = command_text.split(" ", 1)
if len(s) != 2:
return Response("No recipient and no message was given.", 400)
twitter_id = s[0]
msg = s[1]
success, reason = twitter_worker.tweet(twitter_id, msg)
if success:
return Response(), 200
else:
return Response(reason, 400)
else:
return Response("No tweeter id specified.", 400)
def get_last_scan_timestamp(twitter_id: str):
"""
Read pickle file and return the scan timestamp for this user.
:param twitter_id:
:return:
"""
if os.path.exists(pickled_timestamps_file):
with open(pickled_timestamps_file, "rb") as file:
obj = pickle.load(file)
if obj and obj.get(twitter_id):
return obj[twitter_id]
def push_scan_timestamp(twitter_id: str, timestamp: datetime.datetime):
"""
Write scan timestamp for a user.
:param twitter_id:
:param timestamp:
:return:
"""
if not os.path.exists(pickled_timestamps_file):
open(pickled_timestamps_file, "x")
with open(pickled_timestamps_file, "rb") as file:
try:
obj = pickle.load(file)
except EOFError:
obj = None
with open(pickled_timestamps_file, "wb") as file:
if obj:
obj[twitter_id] = timestamp
else:
obj = {twitter_id: timestamp}
pickle.dump(obj, file)
def dispatch_bot(twitter_username: str, every: int):
"""
Run the time bot. It writes to channel every X seconds the current time. It also scans for new tweets.
:param twitter_username:
:param every:Amount of seconds to wait between sends.
:return:
"""
def time_loop():
while running:
timestamp = get_last_scan_timestamp(twitter_username)
#utc_now = datetime.datetime.utcnow() - datetime.timedelta(minutes=60) # TODO: Remove timedelta
utc_now = datetime.datetime.utcnow()
push_scan_timestamp(twitter_username, utc_now)
if timestamp:
tweets = twitter_worker.pull_tweets(twitter_username, timestamp)
if tweets:
slack_worker.post_tweets(twitter_username, tweets)
slack_worker.post_current_datetime()
time.sleep(every)
threading.Thread(target=time_loop).start()
if __name__ == "__main__":
running = True
# Run flask
kwargs = {'host': '127.0.0.1', 'port': 5000, 'threaded': True, 'use_reloader': False, 'debug': False}
flaskThread = threading.Thread(target=app.run, daemon=True, kwargs=kwargs).start()
# Run bot's time functionality in separate thread
dispatch_bot(twitter_username="DomnenkoShlomi", every=3600)
| [
"vgtvgy1@gmail.com"
] | vgtvgy1@gmail.com |
9e954532ac48853decc0acb672da57bb9369029b | 94aa3e2f78a8cc9a5b32af6a78197e79e876af3d | /Ex09_4_uses_only.py | e2e7285eefc034e3f5338572a40ccb07a565c9ac | [] | no_license | ishwarjindal/Think-Python | 7c41e190a86280d99b6e5bd1d2dcdcf84394bb92 | 9e997f87189357ad12b9893e2da3960843b0b853 | refs/heads/master | 2020-05-19T13:31:14.417642 | 2019-10-06T03:44:27 | 2019-10-06T03:44:27 | 185,041,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | #Author : Ishwar Jindal
#Created On : 26-May-2019 05:19 PM IST
#Purpose : Find words that uses only the specificed letters
def has_no_char(word, char_to_be_missing):
for char in word:
if char == char_to_be_missing:
return False
return True
def uses_only(word, letters_to_use):
for letter in word:
if has_no_char(letters_to_use, letter) == False:
continue
else:
return False
return True
print("main started")
total = 0
matched = 0
letters_to_use = input("Enter the superset of letters to be used in word : ")
fin = open("words.txt")
for line in fin:
total = total + 1
word = line.strip()
if uses_only(word, letters_to_use):
matched = matched + 1
print(str.format("{0} does have all letters in {1}", word, letters_to_use))
print(str.format("{0} words out of {1} i.e. {2}% have all their letters in {3}", matched, total, round(matched*100/total, 2), letters_to_use))
print("main ended")
| [
"ishwarjindal@MacBook-Pro-4.local"
] | ishwarjindal@MacBook-Pro-4.local |
00556680676e49944ba71fefdd6fed4756bfb9a5 | 17f75be58052605ddf4da0af2dd3abba69dc3bc4 | /api/migrations/0001_initial.py | bd6e5a842c3771f5a8eb56800966d4f2ba674a6b | [] | no_license | assasin-lv/my-first-blog | 9f8547a84091ebba2d91d73a7554f2279d463a21 | f068517e7df5d6f3ed026213a3afc6528dd944dc | refs/heads/master | 2021-02-19T05:03:52.086526 | 2020-03-10T02:56:37 | 2020-03-10T02:56:37 | 245,278,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | # Generated by Django 2.0.6 on 2019-06-04 19:15
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Terminal',
fields=[
('id_terminal', models.AutoField(primary_key=True, serialize=False)),
('serie', models.CharField(max_length=50)),
('mac', models.CharField(max_length=50)),
('android_id', models.CharField(max_length=50)),
('terminal', models.CharField(max_length=50)),
],
),
]
| [
"noob.assasin.lv@gmail.com"
] | noob.assasin.lv@gmail.com |
7019b6b2ccb716f997d7907943ca21567284877d | 091eb8602b0de816222b3c2dabbdb84cc4c63298 | /game_8_puzzle.py | 573945d64189f0df47ca0ed7ec53352372908d3c | [] | no_license | 888yzbt888/game_8_puzzle | 5b32f1aaf8aca4a09aafa333a0c06a83292b8d30 | 1b9bdc2cb6d707e11455d49245a8501b7e69b587 | refs/heads/master | 2021-06-30T19:44:09.575701 | 2017-09-18T02:10:31 | 2017-09-18T02:10:31 | 103,508,934 | 0 | 0 | null | 2017-09-22T14:00:54 | 2017-09-14T08:45:40 | Python | UTF-8 | Python | false | false | 6,697 | py | import sys
import pygame
import random
import time
import numpy as np
import algorithm_8_puzzle
REPLAY_SPEED=0.4
XOFFSET = 30
YOFFSET = 15
WINDOW_HEIGHT=440
WINDOW_WIDTH=400
FINAL_STATE=[[1,2,3],[4,5,6],[7,8,0]]
def initgame():
img = []
for i in range(0, 9):
img.append(pygame.image.load(str(i) + ".bmp"))
game=Game()
state=game.getState()
return game,state,img
#move to algorithm
def find_0_posi(block):
return [int(np.where(block == 0)[0]), int(np.where(block == 0)[1])]#[row,col]
#move to algorithm
def if_solvable(block):
block=block.reshape(9)
posi=int(np.where(block==0)[0])
total_rev=0
for i in range(1,9):
for k in range(i):
if block[k]>block[i]:
total_rev=total_rev+1
if (total_rev+posi)%2==0:
return True
else:
return False
class Game:
def __init__(self):
self.block=np.array(random.sample(range(9),9))
self.block=self.block.reshape((3,3))
print("yes" if if_solvable(self.block) else "no")##
def move(self,action):
#print(action)
if self.checkvalid(action)==False:
return self.block,"invalid"
else:
posi = find_0_posi(self.block)
if action=="down":
tem=self.block[posi[0]-1,posi[1]]
self.block[posi[0]-1,posi[1]]=self.block[posi[0],posi[1]]
self.block[posi[0],posi[1]]=tem
if action=="up":
tem = self.block[posi[0]+1, posi[1]]
self.block[posi[0]+1, posi[1]] = self.block[posi[0], posi[1]]
self.block[posi[0], posi[1]] = tem
if action=="left":
tem = self.block[posi[0], posi[1]+1]
self.block[posi[0], posi[1]+1] = self.block[posi[0], posi[1]]
self.block[posi[0], posi[1]] = tem
if action=="right":
tem = self.block[posi[0], posi[1] - 1]
self.block[posi[0], posi[1] - 1] = self.block[posi[0], posi[1]]
self.block[posi[0], posi[1]] = tem
return self.block,"done"
def checkvalid(self,action):
if action=="down" or action=="up" or action=="left" or action=="right":
posi = find_0_posi(self.block)
if posi[0]==0 and action=="down":
return False
if posi[0]==2 and action=="up":
return False
if posi[1]==0 and action=="right":
return False
if posi[1]==2 and action=="left":
return False
return True
else:
return False
def getState(self):
return self.block
def display_img(state,screen,img):
pygame.display.update()
screen.blit(img[state[0, 0]], (0 + XOFFSET, 0 + YOFFSET))
screen.blit(img[state[0, 1]], (120 + XOFFSET, 0 + YOFFSET))
screen.blit(img[state[0, 2]], (240 + XOFFSET, 0 + YOFFSET))
screen.blit(img[state[1, 0]], (0 + XOFFSET, 140 + YOFFSET))
screen.blit(img[state[1, 1]], (120 + XOFFSET, 140 + YOFFSET))
screen.blit(img[state[1, 2]], (240 + XOFFSET, 140 + YOFFSET))
screen.blit(img[state[2, 0]], (0 + XOFFSET, 280 + YOFFSET))
screen.blit(img[state[2, 1]], (120 + XOFFSET, 280 + YOFFSET))
screen.blit(img[state[2, 2]], (240 + XOFFSET, 280 + YOFFSET))
def user(screen):
game, state, img = initgame()
sol=if_solvable(state)
esc=False
while True:
if sol==False and esc==True:
break
if (state==FINAL_STATE).all():
break
action=""
for event in pygame.event.get():
if event.type==pygame.KEYDOWN:
k=event.key
if k==pygame.K_LEFT:
action="left"
elif k==pygame.K_RIGHT:
action="right"
elif k==pygame.K_UP:
action="up"
elif k==pygame.K_DOWN:
action="down"
elif k==pygame.K_ESCAPE:
esc=True
state,msg=game.move(action)
#print(msg,action)
display_img(state,screen,img)
if esc==False:
while True:
end = False
display_img(state, screen, img)
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
end = True
if end == True:
break
else:
pass
def auto(screen):
game, state, img = initgame()
if if_solvable(state):
while True:
print(state)#
procedure = algorithm_8_puzzle.solve(state)
print(procedure)#
l=len(procedure)
if l>0:
if procedure[0]=="finish":
break
for action in procedure:
state, msg = game.move(action)
#print(msg, action)
display_img(state,screen,img)
time.sleep(REPLAY_SPEED)
else:
print("unsolvable")
while True:
end = False
display_img(state, screen, img)
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
end = True
if end == True:
break
def menu():
screen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT), 0, 32)
pygame.display.set_caption("Game")
pygame.init()
menu_option_img=[]
menu_option_img.append(pygame.image.load("Manual.bmp"))
menu_option_img.append(pygame.image.load("Auto.bmp"))
menu_option_img.append(pygame.image.load("Exit.bmp"))
while True:
pygame.display.update()
screen.fill([0,0,0])
screen.blit(menu_option_img[0],(10,80))
screen.blit(menu_option_img[1],(210,80))
screen.blit(menu_option_img[2],(110,300))
option=""
for event in pygame.event.get():
if event.type==pygame.KEYDOWN:
k=event.key
if k==pygame.K_LEFT:
option="manual"
elif k==pygame.K_RIGHT:
option="auto"
elif k==pygame.K_DOWN:
option="exit"
if option=="manual":
screen.fill([0,0,0])
user(screen)
elif option=="auto":
screen.fill([0, 0, 0])
auto(screen)
elif option=="exit":
print("exit")
pygame.quit()
sys.exit()
def main():
menu()
if __name__ == '__main__' :
main()
| [
"1002789177@qq.com"
] | 1002789177@qq.com |
a9cc0883b47e3569797ac2468dfcffe5081ffe26 | 7787db9eaf80ac4a366648902ee945112bca127a | /Leetcode300/14. Longest Common Prefix.py | 692f5ec20f2b5de8345b7f4b768d6f26010650f4 | [] | no_license | LYXalex/Leetcode-PythonSolution | 0de7af69373171affe15f2074bacc74955d09a2c | 2ae3529366227efb5f2ad81a8b039ad71e8d1ed5 | refs/heads/main | 2023-06-22T18:49:32.492547 | 2021-07-14T02:12:05 | 2021-07-14T02:12:05 | 325,213,787 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | class Solution:
def longestCommonPrefix(self, strs):
if not strs: return ""
shortest = min(strs,key=len)
for i,char in enumerate(shortest):
for each in strs:
if each[i] != char:
return shortest[:i]
return shortest | [
"yul801@ucsd.edu"
] | yul801@ucsd.edu |
0dac53703ab198d385005c1bd7a5a57d670af88e | caee06b143be5117a667e0f14ed6cf54958e85c1 | /page/showreview.py | 0b3dcc9721d9eed2b09256eae20318e2959a16f8 | [
"Apache-2.0",
"MIT"
] | permissive | ctesting/critic | 720b68331aac81443e53ccd0c4c6cb4c3b75b5ec | 8ba956d124279d0fca9d4522fb0ee6970e863588 | refs/heads/master | 2021-01-17T13:43:41.205293 | 2013-03-15T20:34:47 | 2013-03-15T20:34:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49,272 | py | # -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2012 Jens Lindström, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import time
import re
import datetime
import calendar
import traceback
import dbutils
import gitutils
import htmlutils
import page.utils
import log.html
import review.utils as review_utils
import review.html as review_html
import review.comment as review_comment
import configuration
import diff
import profiling
import linkify
import extensions
try:
from customization.paths import getModuleFromFile
except:
def getModuleFromFile(repository, filename):
try:
base, rest = filename.split("/", 1)
return base + "/"
except:
return None
class SummaryColumn(log.html.SummaryColumn):
def __init__(self, review, linkToCommit):
log.html.SummaryColumn.__init__(self, linkToCommit)
self.__review = review
self.__cache = {}
def fillCache(self, db, review):
cursor = db.cursor()
cursor.execute("""SELECT DISTINCT assignee, child
FROM fullreviewuserfiles
JOIN changesets ON (changesets.id=changeset)
WHERE review=%s
AND state='pending'""",
(review.id,))
for user_id, commit_id in cursor:
self.__cache.setdefault(commit_id, set()).add(user_id)
def render(self, db, commit, target):
user_ids = self.__cache.get(commit.getId(db))
if user_ids:
users = ["%s:%s" % (user.fullname, user.status) for user in dbutils.User.fromIds(db, [user_id for user_id in user_ids])]
target.setAttribute("critic-reviewers", ",".join(sorted(users)))
log.html.SummaryColumn.render(self, db, commit, target)
class ApprovalColumn:
APPROVED = 1
TOTAL = 2
def __init__(self, user, review, type, cache):
self.__user = user
self.__review = review
self.__type = type
self.__cache = cache
@staticmethod
def fillCache(db, user, review, cache, profiler):
cursor = db.cursor()
profiler.check("fillCache")
cursor.execute("""SELECT child, state, COUNT(*), SUM(deleted), SUM(inserted)
FROM changesets
JOIN reviewfiles ON (changeset=changesets.id)
WHERE review=%s
GROUP BY child, state""",
(review.id,))
for commit_id, state, nfiles, deleted, inserted in cursor:
data = cache.get(commit_id)
if not data: data = cache[commit_id] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
if state == 'reviewed':
data[3] += nfiles
data[4] += deleted
data[5] += inserted
data[0] += nfiles
data[1] += deleted
data[2] += inserted
profiler.check("fillCache: total")
cursor.execute("""SELECT child, COALESCE(reviewfilechanges.to, reviewfiles.state) AS effective_state, COUNT(*), SUM(deleted), SUM(inserted)
FROM changesets
JOIN reviewfiles ON (changeset=changesets.id)
JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id)
LEFT OUTER JOIN reviewfilechanges ON (reviewfilechanges.file=reviewfiles.id
AND reviewfilechanges.uid=reviewuserfiles.uid
AND reviewfilechanges.state='draft')
WHERE review=%s
AND reviewuserfiles.uid=%s
GROUP BY child, effective_state""",
(review.id, user.id))
for commit_id, state, nfiles, deleted, inserted in cursor:
data = cache.get(commit_id)
if state == 'reviewed':
data[9] += nfiles
data[10] += deleted
data[11] += inserted
data[6] += nfiles
data[7] += deleted
data[8] += inserted
profiler.check("fillCache: user")
def __calculate(self, db, commit):
return self.__cache.get(commit.id, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def className(self, db, commit):
if commit:
(total_nfiles, total_deleted, total_inserted,
approved_nfiles, approved_deleted, approved_inserted,
user_total_nfiles, user_total_deleted, user_total_inserted,
user_approved_nfiles, user_approved_deleted, user_approved_inserted) = self.__calculate(db, commit)
if user_approved_nfiles == user_total_nfiles:
category = ""
else:
category = " user"
else:
category = ""
if self.__type == ApprovalColumn.APPROVED:
return "approval" + category
else:
return "total" + category
def heading(self, target):
if self.__type == ApprovalColumn.APPROVED:
target.text("Pending")
else:
target.text("Total")
def render(self, db, commit, target):
(total_nfiles, total_deleted, total_inserted,
approved_nfiles, approved_deleted, approved_inserted,
user_total_nfiles, user_total_deleted, user_total_inserted,
user_approved_nfiles, user_approved_deleted, user_approved_inserted) = self.__calculate(db, commit)
if self.__type == ApprovalColumn.APPROVED:
if user_approved_nfiles == user_total_nfiles:
if approved_nfiles == total_nfiles:
target.text()
elif approved_deleted == total_deleted and approved_inserted == total_inserted:
target.span().text("?? %")
else:
target.span().text("%d %%" % int(100.0 * ((total_deleted + total_inserted) - (approved_deleted + approved_inserted)) / (total_deleted + total_inserted)))
elif user_approved_deleted == user_total_deleted and user_approved_inserted == user_total_inserted:
target.span().text("?? %")
else:
target.span().text("%d %%" % int(100.0 * ((user_total_deleted + user_total_inserted) - (user_approved_deleted + user_approved_inserted)) / (user_total_deleted + user_total_inserted)))
else:
if user_approved_deleted == user_total_deleted and user_approved_inserted == user_total_inserted:
target.span().text("-%d/+%d" % (total_deleted, total_inserted))
else:
target.span().text("-%d/+%d" % (user_total_deleted, user_total_inserted))
def notModified(req, db, user, review):
value = req.getRequestHeader("If-None-Match")
return review.getETag(db, user) == value
def usesExperimentalFeature(req, db, review):
return False
def renderShowReview(req, db, user):
profiler = profiling.Profiler()
cursor = db.cursor()
if user.getPreference(db, "commit.diff.compactMode"): default_compact = "yes"
else: default_compact = "no"
compact = req.getParameter("compact", default_compact) == "yes"
highlight = req.getParameter("highlight", None)
review_id = req.getParameter("id", filter=int)
review = dbutils.Review.fromId(db, review_id, load_commits=False, profiler=profiler)
profiler.check("create review")
if not review:
raise page.utils.DisplayMessage, ("Invalid Review ID", "%d is not a valid review ID." % review_id)
if review.getETag(db, user) == req.getRequestHeader("If-None-Match"):
raise page.utils.NotModified
profiler.check("ETag")
# if usesExperimentalFeature(req, db, review):
# def renderMessage(target):
# url = "%s/r/%d" % (configuration.URL_PER_TYPE['development'], review.id)
# p = target.p(style="padding-top: 1em")
# p.text("Sorry, this review uses experimental features currently only available in the development version of Critic. Because of that, it can only be displayed there.")
# p = target.p(style="padding-top: 1em")
# p.b().a(href=url).text(url)
# yield page.utils.displayMessage(db, req, user, "Experimental Feature Alert!", message=renderMessage)
# return
repository = review.repository
prefetch_commits = {}
cursor.execute("""SELECT sha1, child
FROM changesets
JOIN reviewchangesets ON (reviewchangesets.changeset=changesets.id)
JOIN commits ON (commits.id=changesets.child)
WHERE review=%s""",
(review.id,))
prefetch_commits.update(dict(cursor))
profiler.check("commits (query)")
cursor.execute("""SELECT old_head, commits1.sha1, new_head, commits2.sha1, new_upstream, commits3.sha1
FROM reviewrebases
LEFT OUTER JOIN commits AS commits1 ON (commits1.id=old_head)
LEFT OUTER JOIN commits AS commits2 ON (commits2.id=new_head)
LEFT OUTER JOIN commits AS commits3 ON (commits3.id=new_upstream)
WHERE review=%s""",
(review.id,))
rebases = cursor.fetchall()
if rebases:
has_finished_rebases = False
for old_head_id, old_head_sha1, new_head_id, new_head_sha1, new_upstream_id, new_upstream_sha1 in rebases:
if old_head_id:
prefetch_commits[old_head_sha1] = old_head_id
if new_head_id:
prefetch_commits[new_head_sha1] = new_head_id
has_finished_rebases = True
if new_upstream_id:
prefetch_commits[new_upstream_sha1] = new_upstream_id
profiler.check("auxiliary commits (query)")
if has_finished_rebases:
cursor.execute("""SELECT commits.sha1, commits.id
FROM commits
JOIN reachable ON (reachable.commit=commits.id)
WHERE branch=%s""",
(review.branch.id,))
prefetch_commits.update(dict(cursor))
profiler.check("actual commits (query)")
prefetch_commits = gitutils.FetchCommits(repository, prefetch_commits)
document = htmlutils.Document(req)
html = document.html()
head = html.head()
body = html.body(onunload="void(0);")
def flush(target=None):
return document.render(stop=target, pretty=not compact)
def renderHeaderItems(target):
has_draft_items = review_utils.renderDraftItems(db, user, review, target)
target = target.div("buttons")
if not has_draft_items:
if review.state == "open":
if review.accepted(db):
target.button(id="closeReview", onclick="closeReview();").text("Close Review")
else:
if user in review.owners or user.getPreference(db, "review.pingAnyReview"):
target.button(id="pingReview", onclick="pingReview();").text("Ping Review")
if user in review.owners or user.getPreference(db, "review.dropAnyReview"):
target.button(id="dropReview", onclick="dropReview();").text("Drop Review")
if user in review.owners and not review.description:
target.button(id="writeDescription", onclick="editDescription();").text("Write Description")
else:
target.button(id="reopenReview", onclick="reopenReview();").text("Reopen Review")
target.span("buttonscope buttonscope-global")
profiler.check("prologue")
page.utils.generateHeader(body, db, user, renderHeaderItems)
cursor.execute("SELECT 1 FROM fullreviewuserfiles WHERE review=%s AND state='pending' AND assignee=%s", (review.id, user.id))
hasPendingChanges = bool(cursor.fetchone())
if hasPendingChanges:
head.setLink("next", "showcommit?review=%d&filter=pending" % review.id)
profiler.check("header")
document.addExternalStylesheet("resource/showreview.css")
document.addExternalStylesheet("resource/review.css")
document.addExternalStylesheet("resource/comment.css")
document.addExternalScript("resource/showreview.js")
document.addExternalScript("resource/review.js")
document.addExternalScript("resource/comment.js")
document.addExternalScript("resource/autocomplete.js")
document.addInternalScript(user.getJS())
document.addInternalScript("var owners = [ %s ];" % ", ".join(owner.getJSConstructor() for owner in review.owners))
document.addInternalScript("var updateCheckInterval = %d;" % user.getPreference(db, "review.updateCheckInterval"));
log.html.addResources(document)
document.addInternalScript(review.getJS())
target = body.div("main")
basic = target.table('paleyellow basic', align='center')
basic.col(width='10%')
basic.col(width='60%')
basic.col(width='30%')
h1 = basic.tr().td('h1', colspan=3).h1()
h1.text("r/%d: " % review.id)
h1.span(id="summary").text("%s" % review.summary, linkify=linkify.Context(db=db, review=review))
h1.a("edit", href="javascript:editSummary();").text("[edit]")
def linkToCommit(commit):
cursor.execute("SELECT 1 FROM commits JOIN changesets ON (child=commits.id) JOIN reviewchangesets ON (changeset=changesets.id) WHERE sha1=%s AND review=%s", (commit.sha1, review.id))
if cursor.fetchone():
return "%s/%s?review=%d" % (review.repository.name, commit.sha1, review.id)
return "%s/%s" % (review.repository.name, commit.sha1)
def row(heading, value, help, right=None, linkify=False, cellId=None):
main_row = basic.tr('line')
main_row.td('heading').text("%s:" % heading)
if right is False: colspan = 2
else: colspan = None
if callable(value): value(main_row.td('value', id=cellId, colspan=colspan).preformatted())
else: main_row.td('value', id=cellId, colspan=colspan).preformatted().text(value, linkify=linkify, repository=review.repository)
if right is False: pass
elif callable(right): right(main_row.td('right', valign='bottom'))
else: main_row.td('right').text()
if help: basic.tr('help').td('help', colspan=3).text(help)
def renderBranchName(target):
target.code("branch").text(review.branch.name, linkify=linkify.Context())
if repository.name != user.getPreference(db, "defaultRepository"):
target.text(" in ")
target.code("repository").text("%s:%s" % (configuration.base.HOSTNAME, repository.path))
cursor.execute("""SELECT id, remote, remote_name, disabled, previous
FROM trackedbranches
WHERE repository=%s
AND local_name=%s""",
(repository.id, review.branch.name))
row = cursor.fetchone()
if row:
trackedbranch_id, remote, remote_name, disabled, previous = row
target.p("tracking disabled" if disabled else "tracking").text("tracking")
target.code("branch").text(remote_name, linkify=linkify.Context(remote=remote))
target.text(" in ")
target.code("repository").text(remote, linkify=linkify.Context())
if previous:
target.span("lastupdate").script(type="text/javascript").text("document.write('(last fetched: ' + shortDate(new Date(%d)) + ')');" % (calendar.timegm(previous.utctimetuple()) * 1000))
if user in review.owners:
buttons = target.div("buttons")
if disabled:
buttons.button("enabletracking", onclick="enableTracking(%d);" % trackedbranch_id).text("Enable Tracking")
else:
buttons.button("disabletracking", onclick="triggerUpdate(%d);" % trackedbranch_id).text("Update Now")
buttons.button("disabletracking", onclick="disableTracking(%d);" % trackedbranch_id).text("Disable Tracking")
def renderReviewers(target):
if review.reviewers:
for index, reviewer in enumerate(review.reviewers):
if index != 0: target.text(", ")
span = target.span("user %s" % reviewer.status)
span.span("name").text(reviewer.fullname)
if reviewer.status == 'absent':
span.span("status").text(" (%s)" % reviewer.getAbsence(db))
elif reviewer.status == 'retired':
span.span("status").text(" (retired)")
else:
target.i().text("No reviewers.")
cursor.execute("""SELECT reviewfilters.id, reviewfilters.uid, reviewfilters.directory, reviewfilters.file
FROM reviewfilters
JOIN users ON (reviewfilters.uid=users.id)
WHERE reviewfilters.review=%s
AND reviewfilters.type='reviewer'
AND users.status!='retired'""",
(review.id,))
rows = cursor.fetchall()
reviewer_filters_hidden = []
if rows:
table = target.table("reviewfilters reviewers")
row = table.thead().tr("h1")
row.th("h1", colspan=4).text("Custom filters:")
filter_data = {}
reviewfilters = {}
for filter_id, user_id, directory_id, file_id in rows:
filter_user = dbutils.User.fromId(db, user_id)
if file_id: path = dbutils.describe_file(db, file_id)
else: path = dbutils.describe_directory(db, directory_id) + "/"
reviewfilters.setdefault(filter_user.fullname, []).append(path)
filter_data[(filter_user.fullname, path)] = (filter_id, filter_user)
count = 0
tbody = table.tbody()
for fullname in sorted(reviewfilters.keys()):
original_paths = sorted(reviewfilters[fullname])
trimmed_paths = diff.File.eliminateCommonPrefixes(original_paths[:])
first = True
for original_path, trimmed_path in zip(original_paths, trimmed_paths):
row = tbody.tr("filter")
if first:
row.td("username", rowspan=len(original_paths)).text(fullname)
row.td("reviews", rowspan=len(original_paths)).text("reviews")
first = False
row.td("path").span().innerHTML(trimmed_path)
filter_id, filter_user = filter_data[(fullname, original_path)]
href = "javascript:removeReviewFilter(%d, %s, 'reviewer', %s, %s);" % (filter_id, filter_user.getJSConstructor(), htmlutils.jsify(original_path), "true" if filter_user != user else "false")
row.td("remove").a(href=href).text("[remove]")
count += 1
tfoot = table.tfoot()
tfoot.tr().td(colspan=4).text("%d line%s hidden" % (count, "s" if count > 1 else ""))
if count > 10:
tbody.setAttribute("class", "hidden")
reviewer_filters_hidden.append(True)
else:
tfoot.setAttribute("class", "hidden")
reviewer_filters_hidden.append(False)
buttons = target.div("buttons")
if reviewer_filters_hidden:
buttons.button("showfilters", onclick="toggleReviewFilters('reviewers', $(this));").text("%s Custom Filters" % ("Show" if reviewer_filters_hidden[0] else "Hide"))
if review.applyfilters and review.repository.parent and not review.applyparentfilters:
buttons.button("applyparentfilters", onclick="applyParentFilters();").text("Apply Upstream Filters")
buttons.button("addreviewer", onclick="addReviewer();").text("Add Reviewer")
buttons.button("manage", onclick="location.href='managereviewers?review=%d';" % review.id).text("Manage Assignments")
def renderWatchers(target):
if review.watchers:
for index, watcher in enumerate(review.watchers):
if index != 0: target.text(", ")
span = target.span("user %s" % watcher.status)
span.span("name").text(watcher.fullname)
if watcher.status == 'absent':
span.span("status").text(" (%s)" % watcher.getAbsence(db))
elif watcher.status == 'retired':
span.span("status").text(" (retired)")
else:
target.i().text("No watchers.")
cursor.execute("""SELECT reviewfilters.id, reviewfilters.uid, reviewfilters.directory, reviewfilters.file
FROM reviewfilters
JOIN users ON (reviewfilters.uid=users.id)
WHERE reviewfilters.review=%s
AND reviewfilters.type='watcher'
AND users.status!='retired'""",
(review.id,))
rows = cursor.fetchall()
watcher_filters_hidden = []
if rows:
table = target.table("reviewfilters watchers")
row = table.thead().tr("h1")
row.th("h1", colspan=4).text("Custom filters:")
filter_data = {}
reviewfilters = {}
for filter_id, user_id, directory_id, file_id in rows:
filter_user = dbutils.User.fromId(db, user_id)
if file_id: path = dbutils.describe_file(db, file_id)
else: path = dbutils.describe_directory(db, directory_id) + "/"
reviewfilters.setdefault(filter_user.fullname, []).append(path)
filter_data[(filter_user.fullname, path)] = (filter_id, filter_user)
count = 0
tbody = table.tbody()
for fullname in sorted(reviewfilters.keys()):
original_paths = sorted(reviewfilters[fullname])
trimmed_paths = diff.File.eliminateCommonPrefixes(original_paths[:])
first = True
for original_path, trimmed_path in zip(original_paths, trimmed_paths):
row = tbody.tr("filter")
if first:
row.td("username", rowspan=len(original_paths)).text(fullname)
row.td("reviews", rowspan=len(original_paths)).text("watches")
first = False
row.td("path").span().innerHTML(trimmed_path)
filter_id, filter_user = filter_data[(fullname, original_path)]
href = "javascript:removeReviewFilter(%d, %s, 'watcher', %s, %s);" % (filter_id, filter_user.getJSConstructor(), htmlutils.jsify(original_path), "true" if filter_user != user else "false")
row.td("remove").a(href=href).text("[remove]")
count += 1
tfoot = table.tfoot()
tfoot.tr().td(colspan=4).text("%d line%s hidden" % (count, "s" if count > 1 else ""))
if count > 10:
tbody.setAttribute("class", "hidden")
watcher_filters_hidden.append(True)
else:
tfoot.setAttribute("class", "hidden")
watcher_filters_hidden.append(False)
buttons = target.div("buttons")
if watcher_filters_hidden:
buttons.button("showfilters", onclick="toggleReviewFilters('watchers', $(this));").text("%s Custom Filters" % ("Show" if watcher_filters_hidden[0] else "Hide"))
buttons.button("addwatcher", onclick="addWatcher();").text("Add Watcher")
if user not in review.reviewers and user not in review.owners:
if user not in review.watchers:
buttons.button("watch", onclick="watchReview();").text("Watch Review")
elif review.watchers[user] == "manual":
buttons.button("watch", onclick="unwatchReview();").text("Stop Watching Review")
def renderEditOwners(target):
target.button("description", onclick="editOwners();").text("Edit Owners")
def renderEditDescription(target):
target.button("description", onclick="editDescription();").text("Edit Description")
def renderRecipientList(target):
cursor.execute("SELECT uid, fullname, include FROM reviewrecipientfilters JOIN users ON (uid=id) WHERE review=%s", (review.id,))
default_include = True
included = dict((owner.fullname, owner.id) for owner in review.owners)
excluded = {}
for user_id, fullname, include in cursor:
if user_id == 0: default_include = include
elif include: included[fullname] = user_id
elif user_id not in review.owners: excluded[fullname] = user_id
mode = None
users = None
buttons = []
opt_in_button = False
opt_out_button = False
if default_include:
if excluded:
mode = "Everyone except "
users = excluded
opt_out_button = user.fullname not in excluded
opt_in_button = not opt_out_button
else:
mode = "Everyone."
opt_out_button = True
else:
if included:
mode = "No-one except "
users = included
opt_in_button = user.fullname not in included
opt_out_button = not opt_in_button
else:
mode = "No-one at all."
opt_in_button = True
if user in review.owners or user in review.reviewers or user in review.watchers:
if opt_in_button:
buttons.append(("Include me, please!", "includeRecipient(%d);" % user.id))
if opt_out_button:
buttons.append(("Exclude me, please!", "excludeRecipient(%d);" % user.id))
target.span("mode").text(mode)
if users:
container = target.span("users")
first = True
for fullname in sorted(users.keys()):
if first: first = False
else: container.text(", ")
container.span("user", critic_user_id=users[fullname]).text(fullname)
container.text(".")
if buttons:
container = target.div("buttons")
for label, onclick in buttons:
container.button(onclick=onclick).text(label)
row("Branch", renderBranchName, "The branch containing the commits to review.", right=False)
row("Owner%s" % ("s" if len(review.owners) > 1 else ""), ", ".join(owner.fullname for owner in review.owners), "The users who created and/or owns the review.", right=renderEditOwners)
if review.description:
row("Description", review.description, "A longer description of the changes to be reviewed.", linkify=linkToCommit, cellId="description", right=renderEditDescription)
row("Reviewers", renderReviewers, "Users responsible for reviewing the changes in this review.", right=False)
row("Watchers", renderWatchers, "Additional users who receive e-mails about updates to this review.", right=False)
row("Recipient List", renderRecipientList, "Users (among the reviewers and watchers) who will receive any e-mails about the review.", right=False)
profiler.check("basic")
review_state = review.getReviewState(db)
profiler.check("review state")
progress = target.table('paleyellow progress', align='center')
progress_header = progress.tr().td('h1', colspan=3).h1()
progress_header.text("Review Progress")
progress_header_right = progress_header.span("right")
progress_header_right.text("Display log: ")
progress_header_right.a(href="showreviewlog?review=%d&granularity=module" % review.id).text("[per module]")
progress_header_right.text()
progress_header_right.a(href="showreviewlog?review=%d&granularity=file" % review.id).text("[per file]")
progress_h1 = progress.tr().td('percent', colspan=3).h1()
title_data = { 'id': 'r/%d' % review.id,
'summary': review.summary,
'progress': str(review_state) }
if review.state == "closed":
progress_h1.img(src=htmlutils.getStaticResourceURI("seal-of-approval-left.png"),
style="position: absolute; margin-left: -80px; margin-top: -100px")
progress_h1.text("Finished!")
elif review.state == "dropped":
progress_h1.text("Dropped...")
elif review.state == "open" and review_state.accepted:
progress_h1.img(src=htmlutils.getStaticResourceURI("seal-of-approval-left.png"),
style="position: absolute; margin-left: -80px; margin-top: -100px")
progress_h1.text("Accepted!")
progress_h1.div().span("remark").text("Hurry up and close it before anyone has a change of heart.")
else:
progress_h1.text(review_state.getProgress())
if review_state.issues:
progress_h1.span("comments").text(" and ")
progress_h1.text("%d" % review_state.issues)
progress_h1.span("comments").text(" issue%s" % (review_state.issues > 1 and "s" or ""))
if review_state.getPercentReviewed() != 100.0:
cursor = db.cursor()
cursor.execute("""SELECT 1
FROM reviewfiles
LEFT OUTER JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id)
WHERE reviewfiles.review=%s
AND reviewfiles.state='pending'
AND reviewuserfiles.uid IS NULL""",
(review.id,))
if cursor.fetchone():
progress.tr().td('stuck', colspan=3).a(href="showreviewlog?review=%d&granularity=file&unassigned=yes" % review.id).text("Not all changes have a reviewer assigned!")
cursor.execute("""SELECT uid, MIN(reviewuserfiles.time)
FROM reviewfiles
JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id)
WHERE reviewfiles.review=%s
AND reviewfiles.state='pending'
GROUP BY reviewuserfiles.uid""",
(review.id,))
def total_seconds(delta):
return delta.days * 60 * 60 * 24 + delta.seconds
now = datetime.datetime.now()
pending_reviewers = [(dbutils.User.fromId(db, user_id), total_seconds(now - timestamp)) for (user_id, timestamp) in cursor.fetchall() if total_seconds(now - timestamp) > 60 * 60 * 8]
if pending_reviewers:
progress.tr().td('stragglers', colspan=3).text("Needs review from")
for reviewer, seconds in pending_reviewers:
if reviewer.status == 'retired': continue
elif reviewer.status == 'absent': warning = " absent"
elif not reviewer.getPreference(db, "email.activated"): warning = " no-email"
else: warning = ""
if seconds < 60 * 60 * 24:
hours = seconds / (60 * 60)
duration = " (%d hour%s)" % (hours, "s" if hours > 1 else "")
elif seconds < 60 * 60 * 24 * 7:
days = seconds / (60 * 60 * 24)
duration = " (%d day%s)" % (days, "s" if days > 1 else "")
elif seconds < 60 * 60 * 24 * 30:
weeks = seconds / (60 * 60 * 24 * 7)
duration = " (%d week%s)" % (weeks, "s" if weeks > 1 else "")
else:
duration = " (wake up!)"
progress.tr().td('straggler' + warning, colspan=3).text("%s%s" % (reviewer.fullname, duration))
if user in review.owners:
progress.tr().td('pinging', colspan=3).span().text("Send a message to these users by pinging the review.")
title_format = user.getPreference(db, 'ui.title.showReview')
try:
document.setTitle(title_format % title_data)
except Exception, exc:
document.setTitle(traceback.format_exception_only(type(exc), exc)[0].strip())
profiler.check("progress")
check = profiler.start("ApprovalColumn.fillCache")
def linkToCommit(commit):
return "%s?review=%d" % (commit.sha1[:8], review.id)
approval_cache = {}
ApprovalColumn.fillCache(db, user, review, approval_cache, profiler)
check.stop()
summary_column = SummaryColumn(review, linkToCommit)
summary_column.fillCache(db, review)
profiler.check("SummaryColumn.fillCache")
columns = [(10, log.html.WhenColumn()),
(60, summary_column),
(16, log.html.AuthorColumn()),
(7, ApprovalColumn(user, review, ApprovalColumn.APPROVED, approval_cache)),
(7, ApprovalColumn(user, review, ApprovalColumn.TOTAL, approval_cache))]
def renderReviewPending(db, target):
if not user.isAnonymous():
target.text("Filter: ")
if hasPendingChanges:
target.a(href="showcommit?review=%d&filter=pending" % review.id, title="All changes you need to review.").text("[pending]")
target.text()
if user in review.reviewers:
target.a(href="showcommit?review=%d&filter=reviewable" % review.id, title="All changes you can review, including what you've already reviewed.").text("[reviewable]")
target.text()
target.a(href="showcommit?review=%d&filter=relevant" % review.id, title="All changes that match your filters.").text("[relevant]")
target.text()
target.text("Manual: ")
target.a(href="filterchanges?review=%d" % review.id, title="Manually select what files to display of the changes from all commits.").text("[full]")
target.text()
target.a(href="javascript:void(filterPartialChanges());", title="Manually select what what files to display of the changes in a selection of commits.").text("[partial]")
req.addResponseHeader("ETag", review.getETag(db, user))
if user.getPreference(db, "review.useMustRevalidate"):
req.addResponseHeader("Cache-Control", "must-revalidate")
yield flush(target)
try:
try: prefetch_commits.getCommits(db)
except AttributeError: raise Exception, prefetch_commits.error
profiler.check("FetchCommits.getCommits()")
cursor.execute("""SELECT child
FROM changesets
JOIN reviewchangesets ON (reviewchangesets.changeset=changesets.id)
JOIN commits ON (commits.id=changesets.child)
WHERE review=%s""",
(review.id,))
commits = [gitutils.Commit.fromId(db, repository, commit_id) for (commit_id,) in cursor]
cursor.execute("""SELECT id, old_head, new_head, new_upstream, uid, branch
FROM reviewrebases
WHERE review=%s""",
(review.id,))
all_rebases = [(rebase_id,
gitutils.Commit.fromId(db, repository, old_head),
gitutils.Commit.fromId(db, repository, new_head) if new_head else None,
dbutils.User.fromId(db, user_id),
gitutils.Commit.fromId(db, repository, new_upstream) if new_upstream is not None else None,
branch_name)
for rebase_id, old_head, new_head, new_upstream, user_id, branch_name in cursor]
bottom_right = None
finished_rebases = filter(lambda item: item[2] is not None, all_rebases)
current_rebases = filter(lambda item: item[2] is None, all_rebases)
if current_rebases:
assert len(current_rebases) == 1
def renderCancelRebase(db, target):
target.button("cancelrebase").text("Cancel Rebase")
if user == current_rebases[0][3]:
bottom_right = renderCancelRebase
else:
def renderPrepareRebase(db, target):
target.button("preparerebase").text("Prepare Rebase")
bottom_right = renderPrepareRebase
if finished_rebases:
cursor.execute("""SELECT commit
FROM reachable
WHERE branch=%s""",
(review.branch.id,))
actual_commits = [gitutils.Commit.fromId(db, repository, commit_id) for (commit_id,) in cursor]
else:
actual_commits = []
log.html.render(db, target, "Commits (%d)", commits=commits, columns=columns, title_right=renderReviewPending, rebases=finished_rebases, branch_name=review.branch.name, bottom_right=bottom_right, review=review, highlight=highlight, profiler=profiler, user=user, extra_commits=actual_commits)
yield flush(target)
profiler.check("log")
except gitutils.GitError, error:
div = target.div("error")
div.h1().text("Error!")
if error.sha1:
div.text("The commit %s is missing from the repository." % error.sha1)
else:
div.text("Failed to read commits from the repository: %s" % error.message)
all_chains = review.getCommentChains(db, user, skip=set(['commits', 'lines']))
profiler.check("chains (load)")
if all_chains:
issue_chains = filter(lambda chain: chain.type == "issue", all_chains)
draft_issues = filter(lambda chain: chain.state == "draft", issue_chains)
open_issues = filter(lambda chain: chain.state == "open", issue_chains)
addressed_issues = filter(lambda chain: chain.state == "addressed", issue_chains)
closed_issues = filter(lambda chain: chain.state == "closed", issue_chains)
note_chains = filter(lambda chain: chain.type == "note", all_chains)
draft_notes = filter(lambda chain: chain.state == "draft", note_chains)
open_notes = filter(lambda chain: chain.state != "draft" and chain.state != "empty", note_chains)
else:
open_issues = []
open_notes = []
chains = target.table("paleyellow comments", align="center", cellspacing=0)
h1 = chains.tr("h1").td("h1", colspan=3).h1().text("Comments")
links = h1.span("links")
if all_chains:
links.a(href="showcomments?review=%d&filter=all" % review.id).text("[display all]")
if not user.isAnonymous():
links.a(href="showcomments?review=%d&filter=all&blame=%s" % (review.id, user.name)).text("[in my commits]")
cursor.execute("""SELECT count(commentstoread.comment) > 0
FROM commentchains
JOIN comments ON (comments.chain=commentchains.id)
JOIN commentstoread ON (commentstoread.comment=comments.id)
WHERE commentchains.review=%s
AND commentstoread.uid=%s""",
[review.id, user.id])
if cursor.fetchone()[0]:
links.a(href="showcomments?review=%d&filter=toread" % review.id).text("[display unread]")
def renderChains(target, chains):
for chain in chains:
row = target.tr("comment %s %s" % (chain.type, chain.state))
row.td("author").text(chain.user.fullname)
row.td("title").a(href="showcomment?chain=%d" % chain.id).innerHTML(chain.leader())
ncomments = chain.countComments()
nunread = chain.countUnread()
cell = row.td("when")
if ncomments == 1:
if nunread: cell.b().text("Unread")
else: cell.text("No replies")
else:
if nunread: cell.b().text("%d of %d unread" % (nunread, ncomments))
else: cell.text("%d repl%s" % (ncomments - 1, "ies" if ncomments > 2 else "y"))
if draft_issues:
h2 = chains.tr("h2", id="draft-issues").td("h2", colspan=3).h2().text("Draft Issues")
h2.a(href="showcomments?review=%d&filter=draft-issues" % review.id).text("[display all]")
h2.a(href="showcomments?review=%d&filter=draft-issues&blame=%s" % (review.id, user.name)).text("[in my commits]")
renderChains(chains, draft_issues)
if open_issues:
h2 = chains.tr("h2", id="open-issues").td("h2", colspan=3).h2().text("Open Issues")
h2.a(href="showcomments?review=%d&filter=open-issues" % review.id).text("[display all]")
h2.a(href="showcomments?review=%d&filter=open-issues&blame=%s" % (review.id, user.name)).text("[in my commits]")
renderChains(chains, open_issues)
if addressed_issues:
h2 = chains.tr("h2", id="addressed-issues").td("h2", colspan=3).h2().text("Addressed Issues")
h2.a(href="showcomments?review=%d&filter=addressed-issues" % review.id).text("[display all]")
h2.a(href="showcomments?review=%d&filter=addressed-issues&blame=%s" % (review.id, user.name)).text("[in my commits]")
renderChains(chains, addressed_issues)
if closed_issues:
h2 = chains.tr("h2", id="closed-issues").td("h2", colspan=3).h2().text("Resolved Issues")
h2.a(href="showcomments?review=%d&filter=closed-issues" % review.id).text("[display all]")
h2.a(href="showcomments?review=%d&filter=closed-issues&blame=%s" % (review.id, user.name)).text("[in my commits]")
renderChains(chains, closed_issues)
if draft_notes:
h2 = chains.tr("h2", id="draft-notes").td("h2", colspan=3).h2().text("Draft Notes")
h2.a(href="showcomments?review=%d&filter=draft-notes" % review.id).text("[display all]")
h2.a(href="showcomments?review=%d&filter=draft-notes&blame=%s" % (review.id, user.name)).text("[in my commits]")
renderChains(chains, draft_notes)
if open_notes:
h2 = chains.tr("h2", id="notes").td("h2", colspan=3).h2().text("Notes")
h2.a(href="showcomments?review=%d&filter=open-notes" % review.id).text("[display all]")
h2.a(href="showcomments?review=%d&filter=open-notes&blame=%s" % (review.id, user.name)).text("[in my commits]")
renderChains(chains, open_notes)
buttons = chains.tr("buttons").td("buttons", colspan=3)
buttons.button(onclick="CommentChain.create('issue');").text("Raise Issue")
buttons.button(onclick="CommentChain.create('note');").text("Write Note")
profiler.check("chains (render)")
yield flush(target)
cursor.execute("""SELECT DISTINCT reviewfiles.file, theirs.uid
FROM reviewfiles
JOIN reviewuserfiles AS yours ON (yours.file=reviewfiles.id)
JOIN reviewuserfiles AS theirs ON (theirs.file=yours.file AND theirs.uid!=yours.uid)
WHERE reviewfiles.review=%s
AND yours.uid=%s""",
(review.id, user.id))
rows = cursor.fetchall()
profiler.check("shared assignments (query)")
if rows:
reviewers = {}
for file_id, user_id in rows:
reviewers.setdefault(file_id, {})[user_id] = set()
shared = target.table('paleyellow shared', align='center', cellspacing=0)
row = shared.tr('h1')
shared_header = row.td('h1', colspan=2).h1()
shared_header.text("Shared Assignments")
shared_buttons = row.td('buttons', colspan=2).span(style="display: none")
shared_buttons.button("confirm").text("Confirm")
shared_buttons.button("cancel").text("Cancel")
granularity = "module"
def moduleFromFile(file_id):
filename = dbutils.describe_file(db, file_id)
return getModuleFromFile(repository, filename) or filename
def formatFiles(files):
paths = sorted([dbutils.describe_file(db, file_id) for file_id in files])
if granularity == "file":
return diff.File.eliminateCommonPrefixes(paths)
else:
modules = set()
files = []
for path in paths:
module = getModuleFromFile(path)
if module: modules.add(module)
else: files.append(path)
return sorted(modules) + diff.File.eliminateCommonPrefixes(files)
files_per_team = review_utils.collectReviewTeams(reviewers)
teams_per_modules = {}
profiler.check("shared assignments (collect teams)")
for team, files in files_per_team.items():
modules = set()
for file_id in files:
modules.add(moduleFromFile(file_id))
teams_per_modules.setdefault(frozenset(modules), set()).update(team)
for modules, team in teams_per_modules.items():
row = shared.tr("reviewers")
cell = row.td("reviewers")
members = sorted([dbutils.User.fromId(db, user_id).fullname for user_id in team])
for member in members: cell.text(member).br()
row.td("willreview").innerHTML("<span class='also'>also</span> review changes in")
cell = row.td("files")
for path in diff.File.eliminateCommonPrefixes(sorted(modules)):
cell.span("file").innerHTML(path).br()
directory_ids = "[ %s ]" % ", ".join([str(dbutils.find_directory(db, path=path[:-1])) for path in modules if path.endswith("/")])
file_ids = "[ %s ]" % ", ".join([str(dbutils.find_file(db, path=path)) for path in modules if not path.endswith("/")])
user_ids = "[ %s ]" % ", ".join(map(str, team))
cell = row.td("buttons")
cell.button("accept", critic_directory_ids=directory_ids, critic_file_ids=file_ids, critic_user_ids=user_ids).text("I will review this!")
cell.button("deny", critic_directory_ids=directory_ids, critic_file_ids=file_ids, critic_user_ids=user_ids).text("They will review this!")
yield flush(target)
profiler.check("shared assignments")
cursor.execute("SELECT batches.id, users.fullname, batches.comment, batches.time FROM batches JOIN users ON (users.id=batches.uid) WHERE batches.review=%s ORDER BY batches.id DESC", [review.id])
rows = cursor.fetchall()
if rows:
notes = dict([(chain.id, chain) for chain in open_notes])
batches = target.table("paleyellow batches", align="center", cellspacing=0)
batches.tr().td("h1", colspan=3).h1().text("Work Log")
for batch_id, user_fullname, chain_id, when in rows:
row = batches.tr("batch")
row.td("author").text(user_fullname)
title = "<i>No comment</i>"
if chain_id:
if chain_id in notes:
title = notes[chain_id].leader()
else:
for chain in all_chains:
if chain.id == chain_id:
title = chain.leader()
break
row.td("title").a(href="showbatch?batch=%d" % batch_id).innerHTML(title)
row.td("when").text(time.strftime("%Y-%m-%d %H:%M", when.timetuple()))
profiler.check("batches")
profiler.output(db, user, target)
yield flush()
if review.branch.head:
try: head_according_to_git = repository.revparse(review.branch.name)
except: head_according_to_git = None
head_according_to_us = review.branch.head.sha1
if head_according_to_git != head_according_to_us:
# The git repository disagrees with us. Potentially harmful updates
# to the branch will be rejected by the git hook while this is the
# case, but this means that "our" head might not be referenced at
# all and thus that it might be GC:ed by the git repository at some
# point. To avoid that, add a keepalive reference.
repository.keepalive(head_according_to_us)
yield "\n<!-- branch head mismatch: git=%s, us=%s (corrected) -->" % (head_according_to_git[:8] if head_according_to_git else "N/A", head_according_to_us[:8])
| [
"jl@opera.com"
] | jl@opera.com |
0c2fddd11b78d0ae7d34b0e19aadb724ad55b1a1 | 9d652cc94bf07c149cd6c7c6060b0f97875a78d4 | /apps/my_app/views.py | 7b94dacae39a7d56bf02bd6f0ab841340dae1466 | [] | no_license | herimiguel/cdExam | 2c84a46f526518b691de0f6bfe215d2713664f76 | a119b9b6f336b035ad7f003ac4e44a9ce4d67ee1 | refs/heads/master | 2020-03-19T08:25:56.222809 | 2018-06-05T19:13:29 | 2018-06-05T19:13:29 | 136,203,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,136 | py | from __future__ import unicode_literals
from django.shortcuts import render, redirect
from django.contrib import messages
from models import *
from django.db import IntegrityError
from django.core.exceptions import ObjectDoesNotExist
# Create your views here.
def index(request):
return render(request, 'my_app/index.html')
def register(request):
if request.method=='POST':
firstName= request.POST['firstName']
lastName= request.POST['lastName']
email= request.POST['email']
password= request.POST['password']
conPassword= request.POST['conPassword']
isValid=True
minVal= 3
maxVP= 8
if len(request.POST['firstName']) < minVal:
messages.error(request, 'Name needs to be at least 3 characters!')
isValid = False
if len(request.POST['lastName']) < minVal:
messages.error(request, 'Last Name needs to be at least 3 characters!')
isValid = False
if len(request.POST['email']) < minVal:
messages.error(request, 'Email is required!')
isValid = False
if request.POST['email'] != email:
messages.error(request, 'Email is already registered!')
isValid = False
if len(request.POST['password']) < minVal:
messages.error(request, 'Password is required!')
isValid = False
if request.POST['conPassword'] != password:
messages.error(request, 'Password confirmation failed!')
isValid = False
if not isValid:
return redirect('/')
if request.POST['conPassword'] == password:
try:
user=User.objects.create(firstName=firstName, lastName=lastName, email=email, password=password )
except IntegrityError:
messages.error(request, 'This Email is already registered!')
return redirect('/')
request.session['user.id']= user.id
return redirect('my_app:viewItems')
# return render(request,'myApp/success.html')
def login(request):
if request.method=='POST':
email = request.POST['email']
password= request.POST['password']
isValid= True
minVal= 3
if len(request.POST['email']) < minVal:
messages.error(request, 'Email is required!')
isValid = False
if len(request.POST['password']) < minVal:
messages.error(request, 'Password is required!')
isValid = False
try:
User.objects.get(email=request.POST['email'], password= request.POST['password'])
except ObjectDoesNotExist:
messages.error(request, "Email and Password don't match!")
isValid = False
else:
messages.error(request, " ")
if not isValid:
return redirect('/')
else:
request.session['user.id'] = (User.objects.get(email=request.POST['email'])).id
return redirect('my_app:viewItems')
# return render(request, 'my_app/success.html')
# def success(request):
# if 'user.id' in request.session.keys():
# user= User.objects.get(id=request.session['user.id'])
# context={
# 'user': user
# }
# return render(request, 'my_app/success.html', context)
def viewItems(request):
user= request.session['user.id']
context={
'items': Item.objects.all().exclude(additions__user_id=user),
'myItems': Addition.objects.filter(user_id=user),
'additions': Addition.objects.all(),
'user': User.objects.get(id=request.session['user.id'])
}
return render(request, 'my_app/success.html', context)
# return render(request, 'my_app/success.html', context)
def logOut(request):
request.session.clear()
messages.success(request, 'Successfully logged out')
return redirect('/')
def addItem(request):
if request.method == 'POST':
user = User.objects.get(id=request.session['user.id'])
itemName = request.POST['itemName']
isValid=True
minVal= 3
if len(request.POST['itemName']) < minVal:
messages.error(request, 'COVFEFE! Your Wishlist Item must contian at least 3 characters!')
isValid = False
if not isValid:
return redirect('my_app:viewItems')
else:
Item.objects.create(itemName=itemName, creator = user)
messages.error(request, "HOPE YOUR WISH COMES TRUE")
return redirect('my_app:viewItems')
def toItems(request, id):
user= request.session['user.id']
context={
'item': Item.objects.get(id=id),
# 'myItems': Addition.objects.filter(user_id=user),
'additions': Addition.objects.filter(item_id=id)
}
return render(request, 'my_app/show.html', context)
def addToMyItem(request, item_id):
Addition.objects.create(item_id=item_id, user_id=request.session['user.id'])
return redirect('my_app:viewItems')
def deleteItem(request, item_id):
item= Addition.objects.get(item_id=item_id, user_id=request.session['user.id'])
item.delete()
return redirect('my_app:viewItems')
def deleteFromD(request, id):
item= Item.objects.get(id=id)
item.delete()
return redirect('my_app:viewItems')
| [
"herimiguel84@hotmail.com"
] | herimiguel84@hotmail.com |
f64ca4a352ebd20fb444b43b39e98c4f44f8f5c4 | c146bce0f8585307877b53448088000ad5b6e690 | /setupStimuliandWalks.py | bd13ee914a88eb0faa08e328c78e5016ec583777 | [] | no_license | utooley/netlearn_task_v1 | 07b4dbbc5a8856a45118901709903607c0582d15 | 914411c34fc9551e704c1e8f67519308e35cdc0a | refs/heads/master | 2021-01-16T08:24:39.363760 | 2020-02-25T16:06:18 | 2020-02-25T16:06:18 | 243,041,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,508 | py | #Internet says that to run scripts from Terminal on new Macs, modules need to be imported in this order
from pandas import DataFrame, read_csv
from psychopy import core, gui
from psychopy import data, event, logging, visual
# Import modules
import os
import random
import re
import urllib
import csv
import numpy as np
from psychopy import prefs
#prefs.general['audioLib'] = ['pyo']
prefs.general['audioLib'] = ['pygame']
prefs.general['shutdownKey'] = 'q'
from psychopy import sound
from config import *
#print prefs
################
# Set up instruction stimuli #
################
#prior to task
pretask_instruc_1="""Now we're going to play the alien game.
\n\n
You'll see two alien friends. You can tap either alien to see the next set of friends.
\n\n
Try to tap on the aliens as fast as you can.
\n\n
Now, let's practice!
"""
#Set up instructions to show
fixation = visual.TextStim(win, text="+", height=2, color="#FFFFFF")
pretask_instrucScreen_1= visual.TextStim(win, text=pretask_instruc_1, wrapWidth=30, alignHoriz="center", height=1.0, color="#FFFFFF")
#set up a mouse
mymouse = event.Mouse(win=win)
mymouse.setPos((0,0))
#transition to task
transition_instruc_1="""Great! Now, let's play for real.
\n\n
Remember, your job as a scientist is to watch the aliens and try to figure out who's friends with who!\
\n\n
Ready? Let's go!
"""
transition_screen_1= visual.TextStim(win, text=transition_instruc_1, wrapWidth=30, alignHoriz="center", height=1.0, color="#FFFFFF")
# Final SCREEN
completion_instruc_1="""Great job!
\n\n
Now you're back on Planet Earth...
\n\n
Remember how when you saw two aliens together, that meant they were friends?
\n\n\
Now we're going to ask you about the aliens you just saw.
"""
completion_screen_1= visual.TextStim(win, text=completion_instruc_1, wrapWidth=30, alignHoriz="center", height=1.0, color="#FFFFFF")
################
# Import trial lists #
################
# def get_trials(subj_id):
# # import trial list and info and set up trial handler
# trialFile = 'subjects/subj{}/walks1.csv'.format(subj_id)
# trial_list = [ item for item in csv.DictReader(open(trialFile,'rU'))]
# trials = data.TrialHandler(trial_list,nReps=1,method='sequential')
# return trials
#####
# SHOW INSTRUCTIONS
#####
#define a function to show instructions
def show_instructions():
print('started instructionss')
mymouse.setPos((0,0))
mymouse.getPos()
press1=False
press2=False
press3=False
press4=False
#core.wait(3)
print('started instruct 2')
# while not press1 and not press2 and not press3 and not press4:
pretask_instrucScreen_1.draw()
win.flip()
core.wait(3)
event.waitKeys()
# if mymouse.mouseMoved():
# press1 = True
# core.wait(.2)
#####
# READY SCREEN INSTRUCTIONS
#####
#define a function to show instructions
def show_ready_screen():
mymouse.setPos((0,0))
mymouse.getPos()
press1=False
press2=False
# while not press1 and press2:
transition_screen_1.draw()
win.flip()
event.waitKeys()
# if mymouse.mouseMoved():
# press1 = True
# core.wait(.2)
############
# Set up trial stimuli #
##############
#background image
background_image = visual.ImageStim(win, 'stimuli/Monster-Bkg-1-BW.jpg')
#Set up a mouse?
mymouse = event.Mouse(win=win)
#Import audio wav files
#soundL = sound.Sound('sounds/low_200.wav')
#soundR = sound.Sound('sounds/high_200.wav')
#Set Trial Stimuli
img = visual.ImageStim(win,'stimuli/null.png')
imgL = visual.ImageStim(win,'stimuli/null.png',pos=(-7,-4), size=10)
imgR = visual.ImageStim(win,'stimuli/null.png',pos=(7,-4), size=10)
#Completion sound
donesound=sound.Sound('sounds/high_200.wav')
#####
#Make a function to get the practice trial data #
####
def set_practicedata(subj_id):
#########
# log file
# Get logfile name
#Split trials into here runs if desired
#trials=get_trials(subj_id)
# import trial list and info and set up trial handler
trialFile = 'subjData/{}/exposure_walk1.csv'.format(subj_id)
trial_list = [ item for item in csv.DictReader(open(trialFile,'rU'))]
prac_trial_list=trial_list[0:4]
prac_trials = data.TrialHandler(prac_trial_list,nReps=1,method='sequential')
#return trials
### DON'T NEED THIS ANYMORE
# import animation conditions and info and set up list
#animateFile = 'stimuli/animation_conds.csv'
#animate_list = [ item for item in csv.DictReader(open(animateFile,'rU'))]
#Add data types to trials
#trials.data.addDataType('resp')
prac_trials.data.addDataType('onset')
prac_trials.data.addDataType('rt')
# setup logging #
#log_file = logging.LogFile("logs/subj%s.log" % (subj_id), level=logging.DATA, filemode="w")
return (prac_trials)
#####
#Make a function to get the walk data #
####
def set_walkdata(subj_id):
#########
# log file
# Get logfile name
expdir = os.getcwd()
logdir = '{}/logs/{}'.format(expdir,subj_id)
print logdir
#if one participant is run more than once, make sure their log is saved separately
ct = 0
while 'logname' not in locals() or os.path.exists(logname):
if ct > 0:
lognum = '_%d' % (ct)
else:
lognum = ''
logname = '{}/{}_log{}.csv'.format(logdir, subj_id, lognum)
ct += 1
if not os.path.exists(os.path.join('logs/%s/' % subj_id)):
print "creating subject data directory"
directory="logs/%s/" % subj_id
os.makedirs(directory)
#Split trials into here runs if desired
#trials=get_trials(subj_id)
# import trial list and info and set up trial handler
trialFile = 'subjData/{}/exposure_walk1.csv'.format(subj_id)
trial_list = [ item for item in csv.DictReader(open(trialFile,'rU'))]
trial_list=trial_list[5:len(trial_list)]
trials = data.TrialHandler(trial_list,nReps=1,method='sequential')
#return trials
# import animation conditions and info and set up list
#animateFile = 'stimuli/animation_conds.csv'
#animate_list = [ item for item in csv.DictReader(open(animateFile,'rU'))]
#Add data types to trials
#trials.data.addDataType('resp')
trials.data.addDataType('onset')
trials.data.addDataType('rt')
# setup logging #
log_file = logging.LogFile("logs/%s/subj%s.log" % (subj_id, subj_id), level=logging.DATA, filemode="w")
return (log_file,logname,trials)
#####
#Make a function to run the practice trials #
####
def do_runpractrials(subj_id,prac_trials,runID):
#log_file = logging.LogFile("logs/subj%s.log" % (subj_id), level=logging.DATA, filemode="w")
#change logging level to DATA if don't want so much info
########################
# SHOW READY SCREEN #
########################
mymouse.getPos()
atimer=core.CountdownTimer(1.5)
while atimer.getTime() > 0:
fixation.draw()
win.flip()
# wait for trigger from scanner
#specify a key here
#event.waitKeys()
# set clock
globalClock = core.Clock()
logging.setDefaultClock(globalClock)
logging.log(level=logging.DATA, msg="** START TASK **")
prac_trials.extraInfo={'START':globalClock.getTime()}
prac_trials.extraInfo={'participant':subj_id}
# # disdaq fixation
# logging.log(level=logging.DATA, msg="FIXATION")
# for frame in range(frames['disdaq']):
# fixation.draw()
# win.flip()
#size_list=[-= 0.1, -= 2, += 0.5, += 0.1]
tidx = 0
for tidx, trial in enumerate(prac_trials):
print('In trial {} - node1 = {} node2 = {}'. format(tidx+1, trial['node1'], trial['node2']))
print(trial['path1'],trial['path2'])
logging.log(level=logging.DATA, msg="Trial %i - Stimuli1 %s - Stimuli2 %s" % (tidx+1, trial['path1'], trial['path2']))
#Set values for trial
imgL.setImage(trial['path1'])
imgR.setImage(trial['path2'])
#pick at random from animate_list
animateone=trial['movement1']
animatetwo=trial['movement2']
#print(animateone,animatetwo)
#add sounds here
soundL=sound.Sound(trial['sound1'], secs=0.1)
soundR=sound.Sound(trial['sound2'], secs=0.1)
#soundR=sound.Sound(trial['sound2'])
#imgR.size(0.1, '+')
onset = globalClock.getTime()
prac_trials.addData('onset', onset)
#event.Mouse.clickReset(mouseclick)
#correct=None
#responses=None
mymouse.setPos((0,0))
mymouse.getPos()
key=None
rt=None
Pressed=False
#while not mouseclick.getPressed():
#while globalClock.getTime() < (tidx+1)*trialDur:
#timeimg1 = core.CountdownTimer(alien_duration_short)#how long the entire trial lasts for
while not Pressed:
#img_rect.draw()
#set moving animation characteristics here after resetting normal!
imgL.ori=(0)
imgR.ori=(0)
imgL.opacity=(1)
imgR.opacity=(1)
imgL.size=(10)
imgR.size=(10)
# imgL.pos=(-7,-4)
# imgR.pos = (7,-4)
#exec('imgR.'+ animateone['animation'])
#exec('imgL.' + animatetwo['animation'])
#print(animateone)
#print(animatetwo)
#show the result of the above
background_image.draw()
imgL.draw()
imgR.draw()
win.flip()
soundL.play()
timeimg1 = core.CountdownTimer(alien_duration_short)
#mymouse.getPos()
#while (timeimg1.getTime() > 2 and np.all(mymouse.getPos()) == 0):
while (timeimg1.getTime() > 0 and not (imgL.contains(mymouse) or imgR.contains(mymouse))):
#while localClock.getTime() < fixDur:
#for frame in range(10*frame_rate):
exec(animateone)#first have the left image zoom off
background_image.draw()
imgL.draw()
imgR.draw()
win.flip()
#mymouse.getPos()
soundR.play(loops=0)
timeimg2=core.CountdownTimer(alien_duration_short)
while (timeimg2.getTime() > 0 and not (imgL.contains(mymouse) or imgR.contains(mymouse))):
#while (timeimg1.getTime() > 0 and timeimg1.getTime() < 2 and np.all(mymouse.getPos()) == 0):
#while localClock.getTime() < fixDur:
#for frame in range(10*frame_rate):
exec(animatetwo)#first have the left image zoom off
background_image.draw()
imgL.draw()
imgR.draw()
win.flip()
if len(event.getKeys(['escape'])):
logging.flush()
win.close()
core.quit()
break
if imgL.contains(mymouse) or imgR.contains(mymouse):
#if np.any(mymouse.getPos()) != 0 or timeimg1.getTime() < 0:
donesound.play()
rt=globalClock.getTime()-onset
soundL.stop()
soundR.stop()
timer1 = core.CountdownTimer(.6)#how fast L image moves off screen.
while timer1.getTime() > 0:
#while localClock.getTime() < fixDur:
#for frame in range(10*frame_rate):
imgL.pos-=(.25,0)#first have the left image zoom off
background_image.draw()
imgL.draw()
imgR.draw()
win.flip()
#imgL.size += 10
donesound.stop()
timer2 = core.CountdownTimer(.9)
while timer2.getTime() > 0:
imgR.pos-=(.25,0)#then move the right image over
background_image.draw()
imgR.draw()
win.flip()
core.wait(.25)
Pressed= True
event.clearEvents()
soundL.stop()
soundR.stop()
imgL.pos=(-7,-4)
imgR.pos = (7,-4)
#event.clearEvents()
# If no response, play low sound
#if responses==None:
#low.play()
#responses='NA'
#rt='NA'
#correct=0
# record response
#trials.addData('resp',responses)
prac_trials.addData('rt',rt)
# final fixation
timer = core.CountdownTimer(fixDur)
while timer.getTime() > 0:
#while localClock.getTime() < fixDur:
#for frame in range(10*frame_rate):
fixation.draw()
win.flip()
# # break
# if runID<5:
# NS_breakScreen.draw()
# win.flip()
# event.waitKeys(keyList=('1'))
logging.log(level=logging.DATA, msg="*** END ****")
prac_trials.extraInfo['END']=globalClock.getTime()
#####
#Make a function to run the trials #
####
def do_runtrials(subj_id,trials,logname,runID):
log_file = logging.LogFile("logs/%s/subj%s.log" % (subj_id, subj_id), level=logging.DATA, filemode="w")
#change logging level to DATA if don't want so much info
########################
# SHOW READY SCREEN #
########################
atimer=core.CountdownTimer(1.5)
while atimer.getTime() > 0:
fixation.draw()
win.flip()
# wait for trigger from scanner
# set clock
globalClock = core.Clock()
logging.setDefaultClock(globalClock)
logging.log(level=logging.DATA, msg="** START TASK **")
trials.extraInfo={'START':globalClock.getTime()}
trials.extraInfo={'participant':subj_id}
# # disdaq fixation
# logging.log(level=logging.DATA, msg="FIXATION")
# for frame in range(frames['disdaq']):
# fixation.draw()
# win.flip()
#size_list=[-= 0.1, -= 2, += 0.5, += 0.1]
tidx = 0
for tidx, trial in enumerate(trials):
print('In trial {} - node1 = {} node2 = {}'. format(tidx+1, trial['node1'], trial['node2']))
print(trial['path1'],trial['path2'])
logging.log(level=logging.DATA, msg="Trial %i - Stimuli1 %s - Stimuli2 %s" % (tidx+1, trial['path1'], trial['path2']))
#Set values for trial
imgL.setImage(trial['path1'])
imgR.setImage(trial['path2'])
#pick at random from animate_list
animateone=trial['movement1']
animatetwo=trial['movement2']
#print(animateone,animatetwo)
#add sounds here
soundL=sound.Sound(trial['sound1'], secs=0.1)
soundR=sound.Sound(trial['sound2'], secs=0.1)
#soundR=sound.Sound(trial['sound2'])
#imgR.size(0.1, '+')
onset = globalClock.getTime()
trials.addData('onset', onset)
#event.Mouse.clickReset(mouseclick)
#correct=None
#responses=None
mymouse.setPos((0,0))
mymouse.getPos()
key=None
rt=None
Pressed=False
#while not mouseclick.getPressed():
#while globalClock.getTime() < (tidx+1)*trialDur:
#timeimg1 = core.CountdownTimer(alien_duration)
while not Pressed:
#img_rect.draw()
#set moving animation characteristics here after resetting normal!
imgL.ori=(0)
imgR.ori=(0)
imgL.opacity=(1)
imgR.opacity=(1)
imgL.size=(10)
imgR.size=(10)
imgL.pos=(-7,-4)
imgR.pos = (7,-4)
#print(animateone)
#print(animatetwo)
#show the result of the above
background_image.draw()
imgL.draw()
imgR.draw()
win.flip()
timeimg1 = core.CountdownTimer(alien_duration_short)#how fast L image moves off screen.
soundL.play()
while (timeimg1.getTime() > 0 and not (imgL.contains(mymouse) or imgR.contains(mymouse))):
#while (timeimg1.getTime() > 0 and not (imgL.contains(mymouse) or imgR.contains(mymouse))):
#while localClock.getTime() < fixDur:
#for frame in range(10*frame_rate):
exec(animateone)#first have the left image zoom off
background_image.draw()
imgL.draw()
imgR.draw()
win.flip()
timeimg2 = core.CountdownTimer(alien_duration_short)#how fast L image moves off screen.
soundR.play()
while (timeimg2.getTime() > 0 and not (imgL.contains(mymouse) or imgR.contains(mymouse))):
#while localClock.getTime() < fixDur:
#for frame in range(10*frame_rate):
exec(animatetwo)#first have the left image zoom off
background_image.draw()
imgL.draw()
imgR.draw()
win.flip()
if len(event.getKeys(['escape'])):
logging.flush()
trials.saveAsWideText(fileName=logname, delim='\t', appendFile=False)
win.close()
core.quit()
break
if imgL.contains(mymouse) or imgR.contains(mymouse):
#if np.any(mymouse.getPos()) != 0 or timeimg1.getTime() < 0:
donesound.play()
rt=globalClock.getTime()-onset
soundL.stop()
soundR.stop()
timer1 = core.CountdownTimer(.6)#how fast L image moves off screen.
while timer1.getTime() > 0:
#while localClock.getTime() < fixDur:
#for frame in range(10*frame_rate):
imgL.pos-=(.25,0)#first have the left image zoom off
background_image.draw()
imgL.draw()
imgR.draw()
win.flip()
donesound.stop()
timer2 = core.CountdownTimer(.9)
while timer2.getTime() > 0:
imgR.pos-=(.25,0)#then move the right image over
background_image.draw()
imgR.draw()
win.flip()
core.wait(.25)
Pressed= True
event.clearEvents()
soundL.stop()
soundR.stop()
#event.clearEvents()
# record response
#trials.addData('resp',responses)
#imgL.pos-=(1,0)
trials.addData('rt',rt)
# # break
# if runID<5:
# NS_breakScreen.draw()
# win.flip()
# event.waitKeys(keyList=('1'))
logging.log(level=logging.DATA, msg="*** END ****")
trials.extraInfo['END']=globalClock.getTime()
trials.saveAsWideText(fileName=logname, delim='\t', appendFile=False)
#####
# COMPLETION SCREEN
#####
#define a function to show instructions
def show_completion_screen():
mymouse.setPos((0,0))
mymouse.getPos()
press1=False
press2=False
# while not press1 and not press2:
completion_screen_1.draw()
win.flip()
event.waitKeys()
# core.wait(2)
# if mymouse.mouseMoved():
# press1 = True
# core.wait(.2)
print('done')
win.close()
####
# If this script is run by itself, not loaded as a module, do the below:
####
if __name__ == '__main__':
subj_id = 1
#just show the instructions
#show_instructions()
#and then run through trials
log_file,logname,trials = set_walkdata(subj_id)
practrials=set_practicedata(subj_id)
#round 1
do_runpractrials(subj_id, practrials)
do_runtrials(subj_id,trials,logname.replace('.csv','_run1.csv'),1)
| [
"utooley@gmail.com"
] | utooley@gmail.com |
37a2620996f5b4f1543105bffdc6fb58220c624c | 6a4ebebbe0d7f81efc4f1749054a2ed7242c0e58 | /granary/test/test_googleplus.py | e12902c656d570b1ffc904713e8a4b875bb87829 | [
"LicenseRef-scancode-public-domain"
] | permissive | skylarker/granary | 6e192ecd2475febb3585728d5ba7afe34742107d | 2fd8ef017588b955e78606242ce582849cfd57ac | refs/heads/master | 2020-12-26T21:35:04.155528 | 2016-04-18T18:15:30 | 2016-04-18T18:15:30 | 56,891,160 | 1 | 0 | null | 2016-04-22T23:43:09 | 2016-04-22T23:43:09 | null | UTF-8 | Python | false | false | 21,560 | py | # coding=utf-8
"""Unit tests for googleplus.py.
See googleapiclient/http.py for details on using RequestMockBuilder to mock out
Google API calls. (This is the current doc on apiclient mocks, but it doesn't
mention RequestMockBuilder:
https://developers.google.com/api-client-library/python/guide/mocks )
TODO: figure out how to check the query parameters. Right now they're ignored. :/
"""
__author__ = ['Ryan Barrett <granary@ryanb.org>']
import copy
from email.message import Message
from email.mime.multipart import MIMEMultipart
import json
import os
from apiclient import discovery
from apiclient import http
import httplib2
from oauth_dropins import googleplus as oauth_googleplus
from oauth_dropins.webutil import util
from oauth_dropins.webutil import testutil
from granary import appengine_config
appengine_config.GOOGLE_CLIENT_ID = 'my client id'
appengine_config.GOOGLE_CLIENT_SECRET = 'my client secret'
from granary import googleplus
DISCOVERY_DOC = appengine_config.read(
os.path.join(os.path.dirname(__file__), '../../googleplus_api_discovery.json'))
def tag_uri(name):
return util.tag_uri('plus.google.com', name)
ACTIVITY_GP = { # Google+
'kind': 'plus#activity',
'verb': 'post',
'id': '001',
'actor': {'id': '444', 'displayName': 'Charles'},
'object': {
'content': 'my post',
'url': 'http://plus.google.com/001',
},
}
ACTIVITY_AS = { # ActivityStreams
'kind': 'plus#activity',
'verb': 'post',
'id': tag_uri('001'),
'actor': {'id': tag_uri('444'), 'displayName': 'Charles'},
'object': {
'content': 'my post',
'url': 'http://plus.google.com/001',
'author': {'id': tag_uri('444'), 'displayName': 'Charles'},
'to': [{'objectType':'group', 'alias':'@public'}],
},
}
COMMENT_GP = { # Google+
'kind': 'plus#comment',
'verb': 'post',
'id': 'zyx.888',
'actor': {'id': '777', 'displayName': 'Eve'},
'object': {'content': 'my content'},
'inReplyTo': [{'url': 'http://post/url'}],
}
COMMENT_AS = { # ActivityStreams
'kind': 'plus#comment',
'verb': 'post',
'id': tag_uri('zyx.888'),
'url': 'http://post/url#zyx%23888',
'author': {'id': tag_uri('777'), 'displayName': 'Eve'},
'content': 'my content',
'object': {'content': 'my content'},
'inReplyTo': [{'url': 'http://post/url'}],
'to': [{'objectType':'group', 'alias':'@public'}],
}
PLUSONER = { # Google+
'kind': 'plus#person',
'id': '222',
'displayName': 'Alice',
'url': 'https://profiles.google.com/alice',
'image': {'url': 'https://alice/picture'},
}
LIKE = { # ActivityStreams
'id': tag_uri('001_liked_by_222'),
'url': 'http://plus.google.com/001#liked-by-222',
'objectType': 'activity',
'verb': 'like',
'object': {'url': 'http://plus.google.com/001'},
'author': {
'kind': 'plus#person',
'id': tag_uri('222'),
'displayName': 'Alice',
'url': 'https://profiles.google.com/alice',
'image': {'url': 'https://alice/picture'},
},
}
RESHARER = { # Google+
'kind': 'plus#person',
'id': '444',
'displayName': 'Bob',
'url': 'https://plus.google.com/bob',
'image': {'url': 'https://bob/picture'},
}
SHARE = { # ActivityStreams
'id': tag_uri('001_shared_by_444'),
'url': 'http://plus.google.com/001#shared-by-444',
'objectType': 'activity',
'verb': 'share',
'object': {'url': 'http://plus.google.com/001'},
'author': {
'kind': 'plus#person',
'id': tag_uri('444'),
'displayName': 'Bob',
'url': 'https://plus.google.com/bob',
'image': {'url': 'https://bob/picture'},
},
}
ACTIVITY_GP_EXTRAS = copy.deepcopy(ACTIVITY_GP) # Google+
ACTIVITY_GP_EXTRAS['object'].update({
'replies': {'totalItems': 1},
'plusoners': {'totalItems': 1},
'resharers': {'totalItems': 1},
})
ACTIVITY_AS_EXTRAS = copy.deepcopy(ACTIVITY_AS) # ActivityStreams
ACTIVITY_AS_EXTRAS['object'].update({
'replies': {'totalItems': 1, 'items': [COMMENT_AS]},
'plusoners': {'totalItems': 1},
'resharers': {'totalItems': 1},
'tags': [LIKE, SHARE],
})
# HTML from http://plus.google.com/
HTML_ACTIVITY_GP = [
["..."],
[1002, None, None, None, None, [1001, "z13gjrz4ymeldtd5f04chnrixnvpjjqy42o"],
{"33558957" : [
"",
"",
"",
"David Barrett",
"",
1440425513401,
None,
[], # first comment (if any) would be here
"z13gjrz4ymeldtd5f04chnrixnvpjjqy42o",
"",
"a:ext:client.sharebox.108380595987.apps.googleusercontent.com",
[None],
[None],
"",
None,
[None],
"105815303293125791402",
[None],
"https://lh4.googleusercontent.com/-OvNQMFbbks0/AAAAAAAAAAI/AAAAAAAAOuo/YXnsx5bfWxo/photo.jpg",
None,
u"Hi! It’s been a while since I’ve written because we’ve been hard at work, but I’m very happy to take the wraps off our latest feature (or really, series of features): Realtime Expense Reports. I know I’ve been hyping this up for a long time, and you’re…",
"+DavidBarrettQuinthar/posts/VefFHLMoCqV",
0,
0,
"./105815303293125791402",
[None], None,
[ # location
41.230564,
9.172682,
"(41.2305630, 9.1726818)",
"",
None,
"/maps/api/staticmap?center=41.230564,9.172682&zoom=14&size=300x220&sensor=false&markers=41.230564,9.172682&client=google-buzz&signature=GDLZ49Fe0-uc4BoVt-e7p-OmZ50%3D",
["1152921504606846977", "-7273273746059208260"],
"",
"https://maps.google.com?ll=41.230564,9.172682&q=41.230564,9.172682",
None,
"https://maps-api-ssl.google.com/maps/api/staticmap?center=41.230564,9.172682&zoom=15&size=100x100&sensor=false&client=google-buzz&signature=Doqggt3WB5BQzKieZRSA2VwHRXM%3D",
0, None, 412305629, 91726818, None, None, [None]
],
"", 0, 0, 0, 1, None, 0, 1, None, 0,
1440425513401,
] + [None] * 58 + [ # collapsed for brevity
[
[335, 0],
"http://blog.expensify.com/2015/08/24/realtime-expense-reports-are-here-and-so-much-more/",
None, None, None, None,
[
1440425513266,
"http://blog.expensify.com/2015/08/24/realtime-expense-reports-are-here-and-so-much-more/",
"http://blog.expensify.com/2015/08/24/realtime-expense-reports-are-here-and-so-much-more/",
"http://blog.expensify.com/2015/08/24/realtime-expense-reports-are-here-and-so-much-more/",
[None], [None], [None]
],
"http://blog.expensify.com/2015/08/24/realtime-expense-reports-are-here-and-so-much-more/",
{
"39748951" : [
"http://blog.expensify.com/2015/08/24/realtime-expense-reports-are-here-and-so-much-more/",
"http://0.gravatar.com/blavatar/ee4c59993abdb971416349dee59ca9d1?s=200&ts=1440425508",
"Realtime Expense Reports are Here! (And so much more...)",
"Hi! It's been a while since I've written because we've been hard at work, but I'm very happy to take the wraps off our latest feature (or really, series of features): Realtime Expense Reports. I kn...",
None,
["//lh6.googleusercontent.com/proxy/IvWQIbjjvIWCUhTACtHDQRysGY2NYqf-A6XWPOGMLdr4W5BHFjIeQw4ZOTDrkDA2oc1kKfCgkV7gT-iQIFvOaeUhtfEf_3BPBTNsmesTGSawvh5kednyc-Oi8MPmpdRZ_SE2=w120-h120",
120, 120, None, None, None, None, 120,
[2,
"https://lh6.googleusercontent.com/proxy/IvWQIbjjvIWCUhTACtHDQRysGY2NYqf-A6XWPOGMLdr4W5BHFjIeQw4ZOTDrkDA2oc1kKfCgkV7gT-iQIFvOaeUhtfEf_3BPBTNsmesTGSawvh5kednyc-Oi8MPmpdRZ_SE2=w800-h800"]],
"//s2.googleusercontent.com/s2/favicons?domain=blog.expensify.com",
[[[350, 335, 0], "http://quinthar.com/",
{"41007156" : ["http://quinthar.com/", None, None, None, None, None,
None, [None], None, None, [None]]}]],
None, None, [None], "blog.expensify.com",] + [None] * 172 + [# collapsed for brevity
[[339, 338, 336, 335, 0],
"http://0.gravatar.com/blavatar/ee4c59993abdb971416349dee59ca9d1?s=200&ts=1440425508",
{"40265033" : [
"http://0.gravatar.com/blavatar/ee4c59993abdb971416349dee59ca9d1?s=200&ts=1440425508",
"http://0.gravatar.com/blavatar/ee4c59993abdb971416349dee59ca9d1?s=200&ts=1440425508",
None, None, None,
["//lh6.googleusercontent.com/proxy/IvWQIbjjvIWCUhTACtHDQRysGY2NYqf-A6XWPOGMLdr4W5BHFjIeQw4ZOTDrkDA2oc1kKfCgkV7gT-iQIFvOaeUhtfEf_3BPBTNsmesTGSawvh5kednyc-Oi8MPmpdRZ_SE2=w120-h120",
120, 120, None, None, None, None, 120,
[2,
"https://lh6.googleusercontent.com/proxy/IvWQIbjjvIWCUhTACtHDQRysGY2NYqf-A6XWPOGMLdr4W5BHFjIeQw4ZOTDrkDA2oc1kKfCgkV7gT-iQIFvOaeUhtfEf_3BPBTNsmesTGSawvh5kednyc-Oi8MPmpdRZ_SE2=w800-h800"]],
# ...
]}]]}], # ...
]}],
# second element is non-post, under 7 items long
[1002, None, None],
# third element is non-post, item 6 is empty
[1002, None, None, None, None, None, {}],
] # ...
HTML_ACTIVITIES_GP_HEADER = """
<!DOCTYPE html><html lang="en" dir="ltr" ><head><meta name="referrer" content="origin"><base href="https://plus.google.com/"><style>
...
</style></head><body class="Td lj"><input type="text" name="hist_state" id="hist_state" style="display:none;"><iframe id="hist_frame" name="hist_frame1623222153" class="ss" tabindex="-1"></iframe><script>window['OZ_wizstart'] && window['OZ_wizstart']()</script>
<script>AF_initDataCallback({key: '199', isError: false , hash: '13', data:[2,0]
});</script><script>AF_initDataCallback({key: '161', isError: false , hash: '14', data:["os.con",[[]
,"these few lines test the code that collapses commas",
[,1,1,,,,20,,"social.google.com",[,]
,,,2,,,0,,15,,[[1002,2],"..."]],,[,],,,"""
HTML_ACTIVITIES_GP_FOOTER = """
]
]
});</script></body></html>"""
HTML_ACTIVITY_AS = { # Google+
'id': tag_uri('z13gjrz4ymeldtd5f04chnrixnvpjjqy42o'),
'url': 'https://plus.google.com/+DavidBarrettQuinthar/posts/VefFHLMoCqV',
'actor': {
'id': tag_uri('105815303293125791402'),
'url': 'https://plus.google.com/105815303293125791402',
'objectType': 'person',
'displayName': 'David Barrett',
'image': {
'url': 'https://lh4.googleusercontent.com/-OvNQMFbbks0/AAAAAAAAAAI/AAAAAAAAOuo/YXnsx5bfWxo/photo.jpg',
},
},
'verb': 'post',
'object': {
'id': tag_uri('z13gjrz4ymeldtd5f04chnrixnvpjjqy42o'),
'url': 'https://plus.google.com/+DavidBarrettQuinthar/posts/VefFHLMoCqV',
'objectType': 'note',
'published': '2015-08-24T14:11:53Z',
'updated': '2015-08-24T14:11:53Z',
'content': u'Hi! It’s been a while since I’ve written because we’ve been hard at work, but I’m very happy to take the wraps off our latest feature (or really, series of features): Realtime Expense Reports. I know I’ve been hyping this up for a long time, and you’re…',
'attachments': [
{
'objectType': 'article',
'displayName': 'Realtime Expense Reports are Here! (And so much more...)',
'content': "Hi! It's been a while since I've written because we've been hard at work, but I'm very happy to take the wraps off our latest feature (or really, series of features): Realtime Expense Reports. I kn...",
'url': 'http://blog.expensify.com/2015/08/24/realtime-expense-reports-are-here-and-so-much-more/',
'image': {
'url': 'http://0.gravatar.com/blavatar/ee4c59993abdb971416349dee59ca9d1?s=200&ts=1440425508',
}
}
]
},
'location': {
'displayName': '(41.2305630, 9.1726818)',
'url': 'https://maps.google.com?ll=41.230564,9.172682&q=41.230564,9.172682',
'latitude': 41.230564,
'longitude': 9.172682,
},
# 'access': {
# 'kind': 'plus#acl',
# 'description': 'Public',
# 'items': [
# {
# 'type': 'public'
# }
# ]
# }
}
CREDS_JSON = json.dumps({
'access_token': 'my token',
'client_id': appengine_config.GOOGLE_CLIENT_ID,
'client_secret': appengine_config.GOOGLE_CLIENT_SECRET,
'refresh_token': 'my refresh token',
'token_expiry': '',
'token_uri': '',
'user_agent': '',
'invalid': '',
})
class GooglePlusTest(testutil.HandlerTest):
def setUp(self):
super(GooglePlusTest, self).setUp()
self.auth_entity = oauth_googleplus.GooglePlusAuth(
id='my_string_id',
user_json=json.dumps({
'displayName': 'Bob',
}),
creds_json=CREDS_JSON)
self.googleplus = googleplus.GooglePlus(auth_entity=self.auth_entity)
def tearDown(self):
oauth_googleplus.json_service = None
def init(self, **kwargs):
"""Sets up the API service from test_googleplus_discovery.
Pass a requestBuilder or http kwarg to inject expected HTTP requests and
responses.
"""
oauth_googleplus.json_service = discovery.build_from_document(
DISCOVERY_DOC, **kwargs)
def test_get_comment(self):
self.init(requestBuilder=http.RequestMockBuilder({
'plus.comments.get': (None, json.dumps(COMMENT_GP)) # None means 200 OK
}))
self.assert_equals(COMMENT_AS, self.googleplus.get_comment('234'))
def test_get_activity(self):
self.init(requestBuilder=http.RequestMockBuilder({
'plus.activities.get': (None, json.dumps(ACTIVITY_GP))
}))
self.assert_equals([ACTIVITY_AS],
self.googleplus.get_activities(activity_id='234'))
def test_get_activities_no_extras_to_fetch(self):
self.init(requestBuilder=http.RequestMockBuilder({
'plus.activities.list': (None, json.dumps({
'items': [ACTIVITY_GP, ACTIVITY_GP],
})),
},
# ACTIVITY_GP doesn't say there are any comments, +1s, or shares (via
# totalItems), so we shouldn't ask for them.
check_unexpected=True))
got = self.googleplus.get_activities(fetch_replies=True, fetch_likes=True,
fetch_shares=True)
self.assert_equals([ACTIVITY_AS, ACTIVITY_AS], got)
def test_get_activities_fetch_extras(self):
self.init()
# Generate minimal fake responses for each request in the batch.
#
# Test with multiple activities to cover the bug described in
# https://github.com/snarfed/bridgy/issues/22#issuecomment-56329848 :
# util.CacheDict.get_multi() didn't originally handle generator args.
batch = MIMEMultipart()
for i, item in enumerate((COMMENT_GP, PLUSONER, RESHARER) * 2):
msg = Message()
msg.set_payload('HTTP/1.1 200 OK\n\r\n\r\n' + json.dumps({'items': [item]}))
msg['Content-ID'] = '<response-abc+%d>' % (i + 1)
batch.attach(msg)
# as_string() must be called before get_boundary() to generate the
# boundaries between parts, but can't be called again, so we capture the
# result.
batch_str = batch.as_string()
gpe_1 = ACTIVITY_GP_EXTRAS
gpe_2 = copy.deepcopy(gpe_1)
gpe_2['id'] = '002'
http_seq = http.HttpMockSequence(
[({'status': '200'}, json.dumps({'items': [gpe_1, gpe_2]})),
({'status': '200',
'content-type': 'multipart/mixed; boundary="%s"' % batch.get_boundary()},
batch_str),
({'status': '200'}, json.dumps({'items': [gpe_1, gpe_2]})),
])
self.auth_entity.http = lambda: http_seq
ase_1 = ACTIVITY_AS_EXTRAS
ase_2 = copy.deepcopy(ase_1)
ase_2['id'] = tag_uri('002')
ase_2['object']['tags'][0]['id'] = tag_uri('002_liked_by_222')
ase_2['object']['tags'][1]['id'] = tag_uri('002_shared_by_444')
cache = util.CacheDict()
self.assert_equals([ase_1, ase_2], self.googleplus.get_activities(
fetch_replies=True, fetch_likes=True, fetch_shares=True, cache=cache))
for id in '001', '002':
for prefix in 'AGL ', 'AGS ':
self.assertEquals(1, cache[prefix + id])
# no new extras, so another request won't fill them in
as_1 = copy.deepcopy(ACTIVITY_AS)
for field in 'replies', 'plusoners', 'resharers':
as_1['object'][field] = {'totalItems': 1}
as_2 = copy.deepcopy(as_1)
as_2['id'] = tag_uri('002')
self.assert_equals([as_1, as_2], self.googleplus.get_activities(
fetch_replies=True, fetch_likes=True, fetch_shares=True, cache=cache))
def test_get_activities_search(self):
self.init(requestBuilder=http.RequestMockBuilder({
'plus.activities.search': (None, json.dumps({'items': [ACTIVITY_GP]})),
}))
self.assert_equals([ACTIVITY_AS],
self.googleplus.get_activities(search_query='qwert'))
# TODO: resurrect?
# def test_get_activities_request_etag(self):
# self.init()
# http_seq = http.HttpMockSequence(
# [({'status': '200'}, json.dumps({'items': [item]}))])
# self.auth_entity.http = lambda: http_seq
# resp = self.googleplus.get_activities_response(
# fetch_replies=True, fetch_likes=True, fetch_shares=True)
# self.assertEquals('"my etag"', resp['etag'])
def test_get_activities_response_etag(self):
self.init(requestBuilder=http.RequestMockBuilder({
'plus.activities.list': (httplib2.Response({'status': 200}),
json.dumps({'etag': '"my etag"'})),
}))
resp = self.googleplus.get_activities_response(
fetch_replies=True, fetch_likes=True, fetch_shares=True)
self.assertEquals('"my etag"', resp['etag'])
def test_get_activities_304_not_modified(self):
"""Requests with matching ETags return 304 Not Modified."""
self.init(requestBuilder=http.RequestMockBuilder({
'plus.activities.list': (httplib2.Response({'status': 304}), '{}'),
}))
self.assert_equals([], self.googleplus.get_activities(
fetch_replies=True, fetch_likes=True, fetch_shares=True))
def test_postprocess_actor_url_field(self):
pa = self.googleplus.postprocess_actor
self.assertEqual({'foo': 'bar'}, pa({'foo': 'bar'}))
self.assertEqual({'url': 'x',
'urls': [{'value': 'x'}]},
pa({'urls': [{'value': 'x'}]}))
self.assertEqual({'url': 'x',
'urls': [{'value': 'x'}, {'value': 'y'}]},
pa({'urls': [{'value': 'x'}, {'value': 'y'}]}))
# check alias
self.assertEquals(self.googleplus.postprocess_actor,
self.googleplus.user_to_actor)
def test_get_actor_minimal(self):
self.assert_equals({'displayName': 'Bob'}, self.googleplus.get_actor())
def test_get_actor(self):
user = {
'id': '222',
'displayName': 'Alice',
'urls': [{'value': 'https://profiles.google.com/alice'}],
}
self.auth_entity.user_json = json.dumps(user)
user.update({
'id': tag_uri('222'),
'url': 'https://profiles.google.com/alice',
})
self.assert_equals(user, self.googleplus.get_actor())
def test_get_actor_other_user(self):
with self.assertRaises(NotImplementedError):
self.googleplus.get_actor('other')
def test_get_activities_extra_fetches_fail(self):
"""Sometimes the extras fetches return errors. Ignore that."""
self.init()
batch = MIMEMultipart()
for i in range(3):
msg = Message()
msg.set_payload('HTTP/1.1 500 Foo Bar\n\r\n\r\n')
msg['Content-ID'] = '<response-abc+%d>' % (i + 1)
batch.attach(msg)
# as_string() must be called before get_boundary() to generate the
# boundaries between parts, but can't be called again, so we capture the
# result.
batch_str = batch.as_string()
self.auth_entity.http = lambda: http.HttpMockSequence(
[({'status': '200'}, json.dumps({'items': [ACTIVITY_GP_EXTRAS]})),
({'status': '200',
'content-type': 'multipart/mixed; boundary="%s"' % batch.get_boundary()},
batch_str),
])
cache = util.CacheDict()
self.assert_equals([ACTIVITY_AS], self.googleplus.get_activities(
fetch_replies=True, fetch_likes=True, fetch_shares=True, cache=cache))
for prefix in 'AGC ', 'AGL ', 'AGS ':
self.assertNotIn(prefix + '001', cache)
def test_html_to_activities(self):
html = (HTML_ACTIVITIES_GP_HEADER + json.dumps(HTML_ACTIVITY_GP) +
HTML_ACTIVITIES_GP_FOOTER)
self.assert_equals([HTML_ACTIVITY_AS], self.googleplus.html_to_activities(html))
def test_html_to_activities_plusoned(self):
html_gp = copy.deepcopy(HTML_ACTIVITY_GP)
html_gp[1][6].values()[0][69] = [
202,
[['Billy Bob',
'1056789',
1,
1,
'https://lh3.googleusercontent.com/billybob.jpg',
'https://plus.google.com/+BillyBob',
'male',
]],
# ...
]
expected = copy.deepcopy(HTML_ACTIVITY_AS)
expected.update({
'verb': 'like',
'actor': {
'id': tag_uri('1056789'),
'url': 'https://plus.google.com/+BillyBob',
'objectType': 'person',
'displayName': 'Billy Bob',
'image': {'url': 'https://lh3.googleusercontent.com/billybob.jpg'},
},
})
html = (HTML_ACTIVITIES_GP_HEADER + json.dumps(html_gp) +
HTML_ACTIVITIES_GP_FOOTER)
self.assert_equals([expected], self.googleplus.html_to_activities(html))
def test_html_to_activities_similar_to_plusoned(self):
html_gp = copy.deepcopy(HTML_ACTIVITY_GP)
for data_at_69 in None, [], [None], [None, None], [None, [None]]:
html_gp[1][6].values()[0][69] = data_at_69
html = (HTML_ACTIVITIES_GP_HEADER + json.dumps(html_gp) +
HTML_ACTIVITIES_GP_FOOTER)
self.assert_equals([HTML_ACTIVITY_AS],
self.googleplus.html_to_activities(html))
def test_html_to_activities_missing_data(self):
self.assert_equals([], self.googleplus.html_to_activities(''))
| [
"git@ryanb.org"
] | git@ryanb.org |
b9299ec6d17a4f7f9476a364ca7ba6aac57cba1c | 39debb4a11094caffa06e0c026cc40fe3e298c6c | /staff/staff_login_interface.py | 4685702e395e0bc6a05bc55bee0c32a393a151bd | [] | no_license | sumanbashyal007/Clinic_management_system | 66204c5628a4dd8085a73c76adfb743ee7f3635d | 0a318697ad04fc61bfe289be7490d01e393a9a7a | refs/heads/master | 2022-12-04T22:22:31.640301 | 2020-08-27T08:21:39 | 2020-08-27T08:21:39 | 290,690,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,260 | py | # ====================================== Importing Necessary photos ========================================#
from tkinter import *
from tkinter import messagebox
from PIL import Image, ImageTk
from admin.connection import MyDatabase
from staff.staff_registration import Staffregistrationwindow
from staff.staff_interface import Staff_interface
class Staffwindow:
# ====================================== Generating Windows ========================================#
def __init__(self):
self.wn=Tk()
self.wn.title("Staff Login")
self.wn.geometry("1370x735+0+0")
self.wn.resizable(False,False)
self.my_db = MyDatabase()
# ====================================== Necessary Photos ========================================#
self.title_photo = PhotoImage(file="C:\\Users\\Aashrit\\Desktop\\Clinic_management_system\\pictures\\ad.png")
self.title_photo_lable = Label(self.wn, image=self.title_photo)
self.title_photo_lable.image = self.title_photo
self.title_photo_lable.place(x=0, y=0)
self.title01_photo = PhotoImage(file="C:\\Users\\Aashrit\\Desktop\\Clinic_management_system\\pictures\\nurse.png")
self.title01_photo_lable = Label(self.wn, image=self.title01_photo,bg="white")
self.title01_photo_lable.image = self.title01_photo
self.title01_photo_lable.place(x=355, y=177)
self.title02_photo = PhotoImage(file="C:\\Users\\Aashrit\\Desktop\\Clinic_management_system\\pictures\\username_logo.png")
self.title02_photo_lable = Label(self.wn, image=self.title02_photo)
self.title02_photo_lable.image = self.title02_photo
self.title03_photo = PhotoImage(file="C:\\Users\\Aashrit\\Desktop\\Clinic_management_system\\pictures\\password.png")
self.title03_photo_lable = Label(self.wn, image=self.title03_photo)
self.title03_photo_lable.image = self.title03_photo
# ====================================== All Frames ========================================#
self.staff_frame=Frame(self.wn,bg="white")
self.staff_frame.place(x=683, y=256)
self.staff_frame1 = Frame(self.wn, bg="white")
self.staff_frame1.place(x=683, y=177)
self.staff_frame2 = Frame(self.wn, bg="white")
self.staff_frame2.place(x=824, y=177)
# ====================================== All Lables ========================================#
self.lb_heading = Label(self.staff_frame1, text="Staff",font=('Impact',37,'bold','underline'),justify="center", fg='red',bg="white")
self.lb_heading.grid(row=0, column=0,columnspan=1,padx=40,pady=10)
self.lb_heading2 = Label(self.staff_frame2, text="Login",font=('Impact',37,'bold','underline'),justify="center", fg='blue',bg="white")
self.lb_heading2.grid(row=0, column=1,columnspan=1,padx=22,pady=10)
self.lb_username = Label(self.staff_frame, text="Username:", bg="white",fg="Blue", font=("cambria", 15, 'bold','underline'),image=self.title02_photo,compound=LEFT)
self.lb_username.grid(row=5, column=0, padx=10, pady=5)
self.lb_password = Label(self.staff_frame, text="Password:", bg="white", fg="Blue", font=("cambria", 15, 'bold','underline'),image=self.title03_photo,compound=LEFT)
self.lb_password.grid(row=10, column=0, padx=10, pady=5)
# ====================================== All Entries ========================================#
self.ent_username = Entry(self.staff_frame, bg="white", fg="black", font=("arial", 15, "bold"))
self.ent_username.grid(row=6, column=0,padx=40, pady=5)
self.ent_pass = Entry(self.staff_frame, bg="white", fg="black", font=("arial", 15, "bold"), show="*")
self.ent_pass.grid(row=11, column=0, padx=40, pady=5)
self.butn_forget = Button(self.staff_frame, text="Forgot your password?", fg="#000080", bg="white",font=("Arial", 10, "underline"),cursor="hand2",command=self.forgotpassword, relief=FLAT)
self.butn_forget.grid(row=14, columnspan=3, pady=5)
# ====================================== Buttons Required ========================================#
self.ch_btn = Checkbutton(self.staff_frame, text="Remember me", bg="white", fg="Black",font=("Arial MT", 10, "bold"),cursor="hand2")
self.ch_btn.grid(row=18, columnspan=2, padx=5, pady=2)
self.loginbtn_photo = PhotoImage(file="C:\\Users\\Aashrit\\Desktop\\Clinic_management_system\\pictures\\loginbutn.png")
self.loginbtn_photo_button = Button(self.staff_frame, image=self.loginbtn_photo,bg='white', fg="#3498eb", activebackground="#73C2FB",cursor="hand2",command=self.checking_credentials, font=("bold", 13), height=39, width=120,relief=RAISED)
self.loginbtn_photo_button.image = self.loginbtn_photo
self.loginbtn_photo_button.grid(row=20, columnspan=2, padx=0, pady=6)
self.butn_dont_have_an_account = Button(self.staff_frame, text="Don't have an account? | Sign Up", fg="#000080", bg="white", font=("Arial", 10, "underline"),command=self.open_staffregpage,cursor="hand2", relief=FLAT)
self.butn_dont_have_an_account.grid(row=22, columnspan=3, pady=5)
self.show_menu()
self.wn.mainloop()
# ====================================== Open Staff Regestritation Page ========================================#
def open_staffregpage(self):
self.wn.destroy()
Staffregistrationwindow()
# ====================================== Opening Staff Dashboard ========================================#
def open_staff_dashboard(self,usrlgn):
self.wn.destroy()
Staff_interface(usrlgn)
# ====================================== Checking Credentials ========================================#
def checking_credentials(self):
username=self.ent_username.get().lower()
password=self.ent_pass.get().lower()
if len(username)==0 or len(password)==0:
messagebox.showerror("Missing data entry","You can't leave any of the sections empty.")
else:
values=self.my_db.fetchingdata_staff()
username_mylist = []
for i in values:
data = (i[0]).lower()
username_mylist.append(data)
if username in username_mylist:
required_index=username_mylist.index(username)
name_logged_in_user=values[required_index][0]
if (username == values[required_index][0].lower() and password == values[required_index][1].lower()):
if values[required_index][3] == "yes" or values[required_index][3] == "Yes":
messagebox.showinfo("Login Successful",f"Welcome Mr {values[required_index][2]}")
self.open_staff_dashboard(name_logged_in_user)
else:
messagebox.showerror("User not authenticated","Your registration hasn't been\n approved by the admin yet.")
else:
messagebox.showerror("Login Credintials didn't matched","The given username and password didn't matched")
else:
messagebox.showerror("User Doesn't Exist","Sorry you aren't registered yet")
# =================================== MENU Button ===================================#
def show_menu(self):
my_menu = Menu(self.wn)
self.wn.config(menu=my_menu)
log_out = Menu(my_menu)
my_menu.add_cascade(label="<-- Back", menu=log_out)
log_out.add_cascade(label="<-- Back", command=self.logout)
# =================================== Logging out ===================================#
def logout(self):
self.wn.destroy()
from interface.first_window import Firstwindow
Firstwindow()
# =================================== Forgot Password ==============================#
def forgotpassword(self):
messagebox.showinfo("Service Unavailable","The system is in its inital phase."
"\n Service regarding credintials shall"
"\n be provided very soon.\n"
"Please consult admin desk for more info.") | [
"suman.bashyal007@gmail.com"
] | suman.bashyal007@gmail.com |
2b8b167f852914d1fd4dbd941c92ebeffbc7c63a | de033d5aba647555fa4fd4844df9b563cfc1e2f4 | /py/elfs/debuginfo.py | b699b2d0abeab20dad29e6c8fe6e2f91ed3f87f3 | [
"Apache-2.0"
] | permissive | eth-sri/debin | 16fc0499901149bdc9818f268178569469f197df | 715771c1e1468eaafbb599d8bf81a19b5b2e22d2 | refs/heads/master | 2022-08-14T12:31:13.648564 | 2022-05-20T15:12:01 | 2022-05-20T15:12:01 | 160,524,006 | 392 | 64 | Apache-2.0 | 2022-06-22T05:14:48 | 2018-12-05T13:40:37 | Python | UTF-8 | Python | false | false | 33,661 | py | import traceback
import sys
import ctypes
from common import utils
from elfs.framebase import FrameBase
from elftools.dwarf.callframe import ZERO
from elftools.dwarf.locationlists import LocationEntry
from elftools.elf.elffile import ELFFile
from elements.regs import GivReg
from common.constants import UNKNOWN_LABEL
from common.constants import ENUM_DW_FORM_exprloc, ENUM_DW_TAG, ENUM_DW_AT, ENUM_DW_FORM
from common.constants import ENUM_ABBREV_CODE, ENUM_DW_CHILDREN, ENUM_DW_AT_language
from common.constants import POINTER, ENUM, ARRAY, UNION, STRUCT, VOID
from common.constants import SHORT, UNSIGNED_SHORT, CHAR, UNSIGNED_CHAR, LONG_LONG
from common.constants import UNSIGNED_LONG_LONG, LONG, UNSIGNED_LONG
from common.constants import INT, UNSIGNED_INT, BOOL
from common.constants import TEXT, RODATA, DATA, BSS, MAX_UPPER_BOUND
from common.constants import SYMTAB, STRTAB
from common.utils import decode_sleb128, decode_uleb128, decode_address, encode_address
class DebugInfo:
def __init__(self, *args, **kwargs):
self.binary = kwargs['binary']
self.dies = dict()
self.debug_elffile = ELFFile(kwargs['debug_elffile'])
if self.debug_elffile.has_dwarf_info():
self.dwarf_info = self.debug_elffile.get_dwarf_info()
self.location_lists = self.dwarf_info.location_lists()
self.symtab = self.debug_elffile.get_section_by_name(SYMTAB)
self.strtab = self.debug_elffile.get_section_by_name(STRTAB)
self.call_frames = []
self.init_call_frames()
def init_call_frames(self):
cfi_entries = []
if self.binary.elffile.get_dwarf_info().has_EH_CFI():
cfi_entries += self.binary.elffile.get_dwarf_info().EH_CFI_entries()
if self.dwarf_info.has_CFI():
cfi_entries += self.dwarf_info.CFI_entries()
call_frames = []
for entry in cfi_entries:
if not isinstance(entry, ZERO):
for row in entry.get_decoded().table:
cfa = row['cfa']
pc = row['pc']
if cfa.reg is not None and cfa.offset is not None and cfa.reg in self.binary.config.REG_MAPPING:
call_frames.append(FrameBase(base_register=self.binary.config.REG_MAPPING[cfa.reg], offset=cfa.offset, low_pc=pc, high_pc=None))
call_frames = sorted(call_frames, key=lambda f: f.low_pc)
for i, frame in enumerate(call_frames):
if i < len(call_frames) - 1:
frame.high_pc = call_frames[i + 1].low_pc - 1
if len(call_frames) > 0:
call_frames[-1].high_pc = self.binary.config.HIGH_PC
self.call_frames = call_frames
def get_pointer_ttype_die(self, die):
die_type_offset = die.attributes.get('DW_AT_type', None)
cu_offset = die.cu.cu_offset
die_type = None
if die_type_offset is not None and die_type_offset.value + cu_offset in self.dies:
die_type = self.dies[die_type_offset.value + cu_offset]
else:
abstract_origin_attr = die.attributes.get('DW_AT_abstract_origin', None)
specification_attr = die.attributes.get('DW_AT_specification', None)
if abstract_origin_attr is not None:
origin_offset = abstract_origin_attr.value + die.cu.cu_offset
return self.get_pointer_ttype_die(self.dies[origin_offset])
elif specification_attr is not None:
specification_offset = specification_attr.value + die.cu.cu_offset
return self.get_pointer_ttype_die(self.dies[specification_offset])
if die_type is None:
return None
else:
if die.tag == 'DW_TAG_pointer_type':
return die_type
else:
return self.get_pointer_ttype_die(die_type)
def get_ttype_name(self, die):
if die.tag == 'DW_TAG_pointer_type':
return POINTER
elif die.tag == 'DW_TAG_enumeration_type':
return ENUM
elif die.tag == 'DW_TAG_array_type':
return ARRAY
elif die.tag == 'DW_TAG_union_type':
return UNION
elif die.tag in ('DW_TAG_structure_type', 'DW_TAG_class_type'):
return STRUCT
elif die.tag == 'DW_TAG_base_type':
type_name_attr = die.attributes.get('DW_AT_name', None)
if type_name_attr is None:
return VOID
else:
type_name = type_name_attr.value.decode('ascii')
if 'short' in type_name:
if 'unsigned' in type_name:
return UNSIGNED_SHORT
else:
return SHORT
elif 'char' in type_name:
if 'unsigned' in type_name:
return UNSIGNED_CHAR
else:
return CHAR
elif type_name.count('long') == 2:
if 'unsigned' in type_name:
return UNSIGNED_LONG_LONG
else:
return LONG_LONG
elif type_name.count('long') == 1:
if 'unsigned' in type_name:
return UNSIGNED_LONG
else:
return LONG
elif 'int' in type_name:
if 'unsigned' in type_name:
return UNSIGNED_INT
else:
return INT
elif 'bool' in type_name.lower():
return BOOL
else:
return VOID
else: # ('DW_TAG_typedef', 'DW_TAG_const_type', 'DW_TAG_volatile_type'):
die_type_offset = die.attributes.get('DW_AT_type', None)
cu_offset = die.cu.cu_offset
if die_type_offset is not None and die_type_offset.value + cu_offset in self.dies:
die_type = self.dies[die_type_offset.value + cu_offset]
return self.get_ttype_name(die_type)
else:
abstract_origin_attr = die.attributes.get('DW_AT_abstract_origin', None)
specification_attr = die.attributes.get('DW_AT_specification', None)
if abstract_origin_attr is not None:
origin_offset = abstract_origin_attr.value + die.cu.cu_offset
return self.get_ttype_name(self.dies[origin_offset])
elif specification_attr is not None:
specification_offset = specification_attr.value + die.cu.cu_offset
return self.get_ttype_name(self.dies[specification_offset])
else:
return VOID
def get_name_origin(self, die):
name_attr = die.attributes.get('DW_AT_name', None)
abstract_origin_attr = die.attributes.get('DW_AT_abstract_origin', None)
specification_attr = die.attributes.get('DW_AT_specification', None)
cu_offset = die.cu.cu_offset
if name_attr is None:
if abstract_origin_attr is not None:
origin_offset = abstract_origin_attr.value + cu_offset
return self.get_name_origin(self.dies[origin_offset])
elif specification_attr is not None:
origin_offset = specification_attr.value + cu_offset
return self.get_name_origin(self.dies[origin_offset])
else:
return die
else:
return die
def get_die_type(self, die):
if die is None:
return None
die_type_offset = die.attributes.get('DW_AT_type', None)
cu_offset = die.cu.cu_offset
if die_type_offset is None:
abstract_origin_attr = die.attributes.get('DW_AT_abstract_origin', None)
specification_attr = die.attributes.get('DW_AT_specification', None)
if abstract_origin_attr is not None:
origin_offset = abstract_origin_attr.value + cu_offset
return self.get_die_type(self.dies[origin_offset])
elif specification_attr is not None:
origin_offset = specification_attr.value + cu_offset
return self.get_die_type(self.dies[origin_offset])
else:
return die
else:
die_type = self.dies[die_type_offset.value + cu_offset]
if die_type.tag in ('DW_TAG_typedef', 'DW_TAG_const_type', 'DW_TAG_volatile_type'):
return self.get_die_type(die_type)
else:
return die_type
def get_byte_size(self, die):
byte_size_attr = die.attributes.get('DW_AT_byte_size', None)
if byte_size_attr is not None:
return byte_size_attr.value
else:
type_offset_attr = die.attributes.get('DW_AT_type', None)
if type_offset_attr is None:
return None
else:
cu_offset = die.cu.cu_offset
offset = type_offset_attr.value + cu_offset
if offset not in self.dies:
return None
else:
return self.get_byte_size(self.dies[offset])
def get_array_upper_bound(self, die):
for child in die.iter_children():
if child.tag == 'DW_TAG_subrange_type':
upper_bound_attr = child.attributes.get('DW_AT_upper_bound', None)
if upper_bound_attr is None:
return None
else:
if upper_bound_attr.form in ('DW_FORM_data1',
'DW_FORM_data2',
'DW_FORM_data4',
'DW_FORM_data8'):
return upper_bound_attr.value
elif upper_bound_attr.form == 'DW_FORM_exprloc':
loc = upper_bound_attr.value
if loc[0] == ENUM_DW_FORM_exprloc['DW_OP_const1u']:
return ctypes.c_uint8(loc[1]).value
elif loc[0] == ENUM_DW_FORM_exprloc['DW_OP_const1s']:
return ctypes.c_int8(loc[1]).value
elif loc[0] == ENUM_DW_FORM_exprloc['DW_OP_const2u']:
return ctypes.c_uint16(utils.decode_kbytes(loc[1:], 2)).value
elif loc[0] == ENUM_DW_FORM_exprloc['DW_OP_const2s']:
return ctypes.c_int16(utils.decode_kbytes(loc[1:], 2)).value
elif loc[0] == ENUM_DW_FORM_exprloc['DW_OP_const4u']:
return ctypes.c_uint32(utils.decode_kbytes(loc[1:], 2)).value
elif loc[0] == ENUM_DW_FORM_exprloc['DW_OP_const4s']:
return ctypes.c_int32(utils.decode_kbytes(loc[1:], 2)).value
elif loc[0] == ENUM_DW_FORM_exprloc['DW_OP_const8u']:
return ctypes.c_uint64(utils.decode_kbytes(loc[1:], 2)).value
elif loc[0] == ENUM_DW_FORM_exprloc['DW_OP_const8s']:
return ctypes.c_int64(utils.decode_kbytes(loc[1:], 2)).value
elif loc[0] == ENUM_DW_FORM_exprloc['DW_OP_constu']:
return utils.decode_uleb128(loc[1:])
elif loc[0] == ENUM_DW_FORM_exprloc['DW_OP_consts']:
return utils.decode_sleb128(loc[1:])
else:
return None
else:
return None
def binary_train_info(self):
for cu in self.dwarf_info.iter_CUs():
for die in cu.iter_DIEs():
self.dies[die.offset] = die
added_die = set()
for cu in self.dwarf_info.iter_CUs():
top_die = cu.get_top_DIE()
low_pc_attr = top_die.attributes.get('DW_AT_low_pc', None)
if low_pc_attr is not None:
cu_low_pc = low_pc_attr.value
else:
cu_low_pc = 0
for die in cu.iter_DIEs():
if die.tag == 'DW_TAG_subprogram':
low_pc_attr = die.attributes.get('DW_AT_low_pc', None)
# high_pc_attr = die.attributes.get('DW_AT_high_pc', None)
origin = self.get_name_origin(die)
if low_pc_attr is not None:
low_pc = low_pc_attr.value
if self.binary.functions.is_lowpc_function(low_pc):
function = self.binary.functions.get_function_by_lowpc(low_pc)
if function.is_run_init:
self.function_train_info(function, die, cu_low_pc, True)
added_die.add(die)
else:
pass
else:
pass
if die.tag == 'DW_TAG_variable':
loc_attr = die.attributes.get('DW_AT_location', None)
if loc_attr is not None:
loc = loc_attr.value
form = loc_attr.form
if form == 'DW_FORM_block1' or form == 'DW_FORM_exprloc':
if loc[0] == ENUM_DW_FORM_exprloc['DW_OP_addr'] and len(loc) == self.binary.config.ADDRESS_BYTE_SIZE + 1:
offset = utils.decode_address(loc[1:], self.binary)
self.direct_offset_train_info(offset, die)
else:
pass
else:
pass
else:
pass
for sym in self.symtab.iter_symbols():
ttype = sym.entry['st_info']['type']
name = self.strtab.get_string(sym.entry['st_name'])
if '@@' in name:
name = name[:name.find('@@')]
value = sym.entry['st_value']
if ttype == 'STT_FUNC' and self.binary.functions.is_lowpc_function(value):
function = self.binary.functions.get_function_by_lowpc(value)
if function.train_name == UNKNOWN_LABEL:
function.train_name = name
if ttype == 'STT_OBJECT' and value in self.binary.direct_offsets:
direct_offset = self.binary.direct_offsets[value]
if direct_offset.train_name == UNKNOWN_LABEL:
direct_offset.train_name = name
for cu in self.dwarf_info.iter_CUs():
top_die = cu.get_top_DIE()
low_pc_attr = top_die.attributes.get('DW_AT_low_pc', None)
if low_pc_attr is not None:
cu_low_pc = low_pc_attr.value
else:
cu_low_pc = 0
for die in cu.iter_DIEs():
if die.tag == 'DW_TAG_subprogram':
origin = self.get_name_origin(die)
name_attr = origin.attributes.get('DW_AT_name', None)
if name_attr is not None:
name = name_attr.value.decode('ascii')
for function in self.binary.functions.functions:
if function.is_run_init \
and (function.name == name or function.train_name == name):
self.function_train_info(function, die, cu_low_pc, True)
break
die_linkage_name_attr = die.attributes.get('DW_AT_linkage_name', None)
origin_linkage_name_attr = origin.attributes.get('DW_AT_linkage_name', None)
name = None
if die_linkage_name_attr is not None:
name = die_linkage_name_attr.value.decode('ascii')
elif origin_linkage_name_attr is not None:
name = origin_linkage_name_attr.value.decode('ascii')
if name is not None:
for function in self.binary.functions.functions:
if function.is_run_init \
and (function.name == name or function.train_name == name):
self.function_train_info(function, die, cu_low_pc, True)
break
if die.tag == 'DW_TAG_variable':
origin = self.get_name_origin(die)
name_attr = origin.attributes.get('DW_AT_name', None)
if name_attr is not None:
name = name_attr.value.decode('ascii')
for direct_offset in self.binary.direct_offsets.values():
if direct_offset.train_name == name \
and direct_offset.ttype.train_name == UNKNOWN_LABEL:
ttype = self.get_ttype_name(die)
direct_offset.ttype.train_info(ttype)
# for f in self.binary.functions.functions:
# if f.train_name != UNKNOWN_LABEL \
# and f.ttype.train_name == UNKNOWN_LABEL:
# f.ttype.train_info(VOID)
def function_train_info(self, function, die, cu_low_pc, add_info):
frame_base_attr = die.attributes.get('DW_AT_frame_base', None)
function.add_frame_bases(frame_base_attr, cu_low_pc)
function.init_run = True
if add_info:
name = self.get_ttype_name(die)
function.ttype.train_info(name)
origin = self.get_name_origin(die)
name_attr = origin.attributes.get('DW_AT_name', None)
if name_attr is not None:
function.train_name = name_attr.value.decode('ascii')
descendants = []
def get_die_descendants(d):
if d.tag in ('DW_TAG_inlined_subroutine', 'DW_TAG_GNU_call_site'):
pass
else:
if d.tag in ('DW_TAG_formal_parameter', 'DW_TAG_variable'):
descendants.append(d)
for child in d.iter_children():
get_die_descendants(child)
get_die_descendants(die)
for desc in descendants:
if desc.tag in ('DW_TAG_formal_parameter', 'DW_TAG_variable'):
loc_attr = desc.attributes.get('DW_AT_location', None)
if loc_attr is not None:
loc = loc_attr.value
form = loc_attr.form
if form == 'DW_FORM_exprloc':
self.loc_train_info(function, loc, desc)
elif form in ('DW_FORM_data4', 'DW_FORM_sec_offset'):
self.location_list_train_info(function, loc, desc, cu_low_pc)
elif form == 'DW_FORM_block1':
if len(loc) == 1:
if ENUM_DW_FORM_exprloc['DW_OP_reg0'] <= loc[0] <= ENUM_DW_FORM_exprloc['DW_OP_reg31'] \
and (loc[0] - ENUM_DW_FORM_exprloc['DW_OP_reg0']) in self.binary.config.REG_MAPPING:
base_register = self.binary.config.REG_MAPPING[loc[0] - ENUM_DW_FORM_exprloc['DW_OP_reg0']]
self.reg_add_info(function, base_register, desc, None, None)
else:
self.loc_train_info(function, loc, desc)
else:
pass
else:
pass
def fbreg_train_info(self, function, offset, die, low_pc=None, high_pc=None):
if len(function.frame_bases) == 0:
pass
elif len(function.frame_bases) == 1:
frame_base = function.frame_bases[0]
base_pointer = frame_base.base_register
frame_offset = frame_base.offset + offset
self.indirect_offset_train_info(function, base_pointer, frame_offset, die, self.get_die_type(die))
else:
for frame_base in function.frame_bases:
base_pointer = frame_base.base_register
frame_offset = frame_base.offset + offset
frame_low_pc = frame_base.low_pc
frame_high_pc = frame_base.high_pc
if low_pc is None and high_pc is None:
self.indirect_offset_train_info(function, base_pointer, frame_offset, die, self.get_die_type(die), frame_low_pc, frame_high_pc)
elif high_pc > frame_low_pc and low_pc < frame_high_pc:
self.indirect_offset_train_info(function, base_pointer, frame_offset, die, self.get_die_type(die), max(frame_low_pc, low_pc), min(frame_high_pc, high_pc))
def indirect_offset_add_info(self, function, base_pointer, offset, die, low_pc, high_pc, ttype):
key = (base_pointer, offset)
# print(key)
# traceback.print_stack(file=sys.stdout)
if key in function.indirect_offsets:
for indirect_offset in function.indirect_offsets[key].values():
if low_pc is None and high_pc is None:
indirect_offset.train_info(die, ttype)
else:
for pc in indirect_offset.pcs:
if pc >= low_pc and pc < high_pc:
indirect_offset.train_info(die, ttype)
break
def reg_add_info(self, function, base_register, die, low_pc, high_pc):
ttype = self.get_ttype_name(die)
for reg in function.regs.values():
if not isinstance(reg, GivReg) and reg.base_register == base_register:
for pc in reg.pcs:
if (low_pc is None and high_pc is None) or low_pc <= pc < high_pc:
reg.train_info(die, ttype)
break
if ttype == POINTER:
pointer_ttype_die = self.get_pointer_ttype_die(die)
pointer_ttype_name = self.get_ttype_name(pointer_ttype_die) if pointer_ttype_die is not None else VOID
self.indirect_offset_train_info(function, base_register, 0, die, self.get_die_type(pointer_ttype_die), low_pc, high_pc, pointer_ttype_name)
def indirect_offset_train_info(self, function, base_pointer, offset, die, die_type, low_pc=None, high_pc=None, ttype=None):
if ttype is None:
ttype = self.get_ttype_name(die)
if die_type is None:
self.indirect_offset_add_info(function, base_pointer, offset, die, low_pc, high_pc, ttype)
elif die_type.tag == 'DW_TAG_array_type':
byte_size = self.get_byte_size(die_type)
upper_bound = self.get_array_upper_bound(die_type)
if byte_size is not None and upper_bound is not None:
if upper_bound * byte_size > MAX_UPPER_BOUND:
for key in function.indirect_offsets:
if key[0] == base_pointer and offset <= key[1] < upper_bound * byte_size + offset:
self.indirect_offset_add_info(function, key[0], key[1], die, low_pc, high_pc, ttype)
else:
for i in range(0, upper_bound * byte_size):
off = offset + i
self.indirect_offset_add_info(function, base_pointer, off, die, low_pc, high_pc, ttype)
else:
self.indirect_offset_add_info(function, base_pointer, offset, die, low_pc, high_pc, ttype)
elif die_type.tag == 'DW_TAG_union_type':
byte_size = self.get_byte_size(die_type)
if byte_size is not None:
if byte_size > MAX_UPPER_BOUND:
for key in function.indirect_offsets:
if key[0] == base_pointer and offset <= key[1] < byte_size + offset:
self.indirect_offset_add_info(function, key[0], key[1], die, low_pc, high_pc, ttype)
else:
for i in range(0, byte_size):
off = offset + i
self.indirect_offset_add_info(function, base_pointer, off, die, low_pc, high_pc, ttype)
else:
self.indirect_offset_add_info(function, base_pointer, offset, die, low_pc, high_pc, ttype)
elif die_type.tag in ('DW_TAG_structure_type', 'DW_TAG_class_type'):
byte_size = self.get_byte_size(die_type)
if byte_size is not None:
if byte_size > MAX_UPPER_BOUND:
for key in function.indirect_offsets:
if key[0] == base_pointer and offset <= key[1] < byte_size + offset:
self.indirect_offset_add_info(function, key[0], key[1], die, low_pc, high_pc, ttype)
else:
for i in range(0, byte_size):
off = offset + i
self.indirect_offset_add_info(function, base_pointer, off, die, low_pc, high_pc, ttype)
else:
self.indirect_offset_add_info(function, base_pointer, offset, die, low_pc, high_pc, ttype)
for child in die_type.iter_children():
child_offset_attr = die.attributes.get('DW_AT_data_member_location', None)
if child_offset_attr is not None:
if child_offset_attr.form == 'DW_FORM_block1':
if child_offset_attr.value[0] == 0x23:
child_offset = utils.decode_uleb128(child_offset_attr[1:])
off = offset + child_offset
self.indirect_offset_train_info(function, base_pointer, off, die, die_type, low_pc, high_pc)
else:
pass
elif child_offset_attr.form == 'DW_FORM_data1':
child_offset = child_offset_attr.value
off = offset + child_offset
self.indirect_offset_train_info(function, base_pointer, off, die, die_type, low_pc, high_pc)
else:
pass
else:
byte_size = self.get_byte_size(die_type)
if byte_size is not None:
if byte_size > MAX_UPPER_BOUND:
for key in function.indirect_offsets:
if key[0] == base_pointer and offset <= key[1] < byte_size + offset:
self.indirect_offset_add_info(function, key[0], key[1], die, low_pc, high_pc, ttype)
else:
for i in range(0, byte_size):
off = offset + i
self.indirect_offset_add_info(function, base_pointer, off, die, low_pc, high_pc, ttype)
else:
self.indirect_offset_add_info(function, base_pointer, offset, die, low_pc, high_pc, ttype)
def direct_offset_train_info(self, offset, die, ttype=None):
die_type = self.get_die_type(die)
if ttype is None:
ttype = self.get_ttype_name(die)
if die_type is None:
if offset in self.binary.direct_offsets:
self.binary.direct_offsets[offset].train_info(die, ttype)
else:
pass
elif die_type.tag == 'DW_TAG_array_type':
byte_size = self.get_byte_size(die_type)
upper_bound = self.get_array_upper_bound(die_type)
if byte_size is not None and upper_bound is not None:
if upper_bound * byte_size > MAX_UPPER_BOUND:
for off in self.binary.direct_offsets:
if offset <= off < upper_bound * byte_size:
self.binary.direct_offsets[off].train_info(die, ttype)
else:
for i in range(0, upper_bound * byte_size):
off = offset + i
if off in self.binary.direct_offsets:
self.binary.direct_offsets[off].train_info(die, ttype)
elif offset in self.binary.direct_offsets:
self.binary.direct_offsets[offset].train_info(die, ttype)
else:
pass
elif die_type.tag == 'DW_TAG_union_type':
byte_size = self.get_byte_size(die_type)
if byte_size is not None:
if byte_size > MAX_UPPER_BOUND:
for off in self.binary.direct_offsets:
if offset <= off < offset + byte_size:
self.binary.direct_offsets[off].train_info(die, ttype)
else:
for i in range(0, byte_size):
off = offset + i
if off in self.binary.direct_offsets:
self.binary.direct_offsets[off].train_info(die, ttype)
elif offset in self.binary.direct_offsets:
self.binary.direct_offsets[offset].train_info(die, ttype)
else:
pass
elif die_type.tag in ('DW_TAG_structure_type', 'DW_TAG_class_type'):
byte_size = self.get_byte_size(die_type)
if byte_size is not None:
if byte_size > MAX_UPPER_BOUND:
for off in self.binary.direct_offsets:
if offset <= off < offset + byte_size:
self.binary.direct_offsets[off].train_info(die, ttype)
else:
for i in range(0, byte_size):
off = offset + i
if off in self.binary.direct_offsets:
self.binary.direct_offsets[off].train_info(die, ttype)
elif offset in self.binary.direct_offsets:
self.binary.direct_offsets[offset].train_info(die, ttype)
else:
pass
for child in die_type.iter_children():
child_offset_attr = die.attributes.get('DW_AT_data_member_location', None)
if child_offset_attr is not None:
if child_offset_attr.form == 'DW_FORM_block1':
if child_offset_attr.value[0] == 0x23:
child_offset = utils.decode_uleb128(child_offset_attr[1:])
off = offset + child_offset
self.direct_offset_train_info(off, die, ttype)
else:
pass
elif child_offset_attr.form == 'DW_FORM_data1':
child_offset = child_offset_attr.value
off = offset + child_offset
self.direct_offset_train_info(off, die, ttype)
else:
pass
elif offset in self.binary.direct_offsets:
byte_size = self.get_byte_size(die_type)
if byte_size is not None:
if byte_size > MAX_UPPER_BOUND:
for off in self.binary.direct_offsets:
if offset <= off < byte_size + offset:
self.binary.direct_offsets[off].train_info(die, ttype)
else:
for i in range(0, byte_size):
off = offset + i
if off in self.binary.direct_offsets:
self.binary.direct_offsets[off].train_info(die, ttype)
else:
self.binary.direct_offsets[offset].train_info(die, ttype)
else:
pass
def location_list_train_info(self, function, loc_offset, die, cu_low_pc):
location_list = self.location_lists.get_location_list_at_offset(loc_offset)
for entry in location_list:
if isinstance(entry, LocationEntry):
low_pc = entry.begin_offset + cu_low_pc
high_pc = entry.end_offset + cu_low_pc
loc = entry.loc_expr
if len(loc) > 0:
# print(entry)
self.loc_train_info(function, loc, die, low_pc, high_pc)
else:
pass
else:
pass
def loc_train_info(self, function, loc, die, low_pc=None, high_pc=None):
if loc[0] == ENUM_DW_FORM_exprloc['DW_OP_fbreg']:
self.fbreg_train_info(function, decode_sleb128(loc[1:]), die, low_pc, high_pc)
elif ENUM_DW_FORM_exprloc['DW_OP_breg0'] <= loc[0] <= ENUM_DW_FORM_exprloc['DW_OP_breg31'] \
and (loc[0] - ENUM_DW_FORM_exprloc['DW_OP_breg0']) in self.binary.config.REG_MAPPING:
base_pointer = self.binary.config.REG_MAPPING[loc[0] - ENUM_DW_FORM_exprloc['DW_OP_breg0']]
offset = decode_sleb128(loc[1:])
self.indirect_offset_train_info(function, base_pointer, offset, die, self.get_die_type(die))
elif loc[0] == ENUM_DW_FORM_exprloc['DW_OP_addr']:
offset = decode_address(loc[1:], self.binary)
self.direct_offset_train_info(offset, die)
elif ENUM_DW_FORM_exprloc['DW_OP_reg0'] <= loc[0] <= ENUM_DW_FORM_exprloc['DW_OP_reg31'] \
and (loc[0] - ENUM_DW_FORM_exprloc['DW_OP_reg0']) in self.binary.config.REG_MAPPING:
base_register = self.binary.config.REG_MAPPING[loc[0] - ENUM_DW_FORM_exprloc['DW_OP_reg0']]
self.reg_add_info(function, base_register, die, low_pc, high_pc)
else:
pass
| [
"he4444mingtian@gmail.com"
] | he4444mingtian@gmail.com |
fc9eada358e8a8bab6e2d5cabb8ef8dc7c58307a | 7ff9410466d608d5fc1df2a0d3c6f4ddfc3b713c | /xml_to_csv.py | 8eea90b9897b514f6a7356f7affd001615bc52a9 | [] | no_license | wilson-boca/identify-objects | 03d0b539d9ad1358cf3e95922e3003bd874b7127 | 07626727c31b1ae65e40ff99ff5c68ae8ed54d1b | refs/heads/master | 2023-04-05T05:43:53.356305 | 2020-03-30T14:59:22 | 2020-03-30T14:59:22 | 250,657,993 | 0 | 0 | null | 2023-03-24T22:34:29 | 2020-03-27T22:02:32 | Python | UTF-8 | Python | false | false | 1,189 | py | import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
def xml_to_csv(path):
xml_list = []
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
value = (root.find('filename').text,
int(root.find('size')[0].text),
int(root.find('size')[1].text),
member[0].text,
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)
)
xml_list.append(value)
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
def main():
for folder in ['train', 'test']:
image_path = os.path.join(os.getcwd(), ('images/' + folder))
xml_df = xml_to_csv(image_path)
xml_df.to_csv(('images/'+folder+'_labels.csv'), index=None)
print('Successfully converted xml to csv.')
if __name__ == '__main__':
main()
| [
"wilson.boca@gmail.com"
] | wilson.boca@gmail.com |
9663d2aacfb226c81bf9757446cc9fe4df27d7ce | a732353686f6b22561edc6905e0243c2a2038667 | /szamlazz/models.py | 2ccb14534612b5d25db06aac0c8d415cc442cf5f | [
"MIT"
] | permissive | freemanPy/szamlazz.py | 34fac9bf94df6f628119d27908e43a834e4c1371 | 2822d1c6ea19178131fc38c283d68ef350dcfde9 | refs/heads/master | 2023-08-12T04:07:37.381429 | 2021-09-12T21:18:18 | 2021-09-12T21:18:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,854 | py | import base64
import logging
from pathlib import Path
from requests.models import Response
from typing import NamedTuple, Tuple
from urllib.parse import unquote
# noinspection PyPep8Naming
import xml.etree.ElementTree as ET
__all__ = ["Header", "Merchant", "Buyer", "Item", "Disbursement", "SzamlazzResponse", "PdfDataMissingError", "EmailDetails", ] # "WayBill"
logger = logging.getLogger(__name__)
class PdfDataMissingError(Exception):
pass
class Header(NamedTuple):
"""<fejlec>"""
creating_date: str = "" # <keltDatum>2020-01-20</keltDatum>
payment_date: str = "" # <teljesitesDatum>2020-01-20</teljesitesDatum>
due_date: str = "" # <fizetesiHataridoDatum>2020-01-20</fizetesiHataridoDatum>
payment_type: str = "Átutalás" # <fizmod>Átutalás</fizmod>
currency: str = "HUF" # <penznem>HUF</penznem>
invoice_language: str = "hu" # <szamlaNyelve>hu</szamlaNyelve> // can be: de, en, it, hu, fr, ro, sk, hr
invoice_comment: str = "" # <megjegyzes>Invoice comment</megjegyzes>
name_of_bank: str = "MNB" # <arfolyamBank>MNB</arfolyamBank>
exchange_rate: float = 0.0 # <arfolyam>0.0</arfolyam>
order_number: str = "" # <rendelesSzam></rendelesSzam>
pro_forma_number_ref: str = "" # <dijbekeroSzamlaszam></dijbekeroSzamlaszam>
deposit_invoice: bool = False # <elolegszamla>false</elolegszamla>
invoice_after_deposit_invoice: bool = False # <vegszamla>false</vegszamla>
correction_invoice: bool = False # <helyesbitoszamla>false</helyesbitoszamla>
number_of_corrected_invoice: str = "" # <helyesbitettSzamlaszam></helyesbitettSzamlaszam>
proforma_invoice: bool = False # <dijbekero>false</dijbekero>
invoice_prefix: str = "" # <szamlaszamElotag></szamlaszamElotag>
invoice_number: str = "" # <szamlaszam>E-TST-2011-1</szamlaszam> // needed for reverse_invoice|storno only
invoice_template: str = "" # <!-- Codomain: 'SzlaMost' | 'SzlaAlap' | 'SzlaNoEnv' | 'Szla8cm' | 'SzlaTomb' | 'SzlaFuvarlevelesAlap' -->
class Merchant(NamedTuple):
"""<elado>"""
bank_name: str = "" # <bank>BB</bank>
bank_account_number: str = "" # <bankszamlaszam>11111111-22222222-33333333</bankszamlaszam>
reply_email_address: str = "" # <emailReplyto> </emailReplyto>
email_subject: str = "" # <emailTargy>Invoice notification</emailTargy>
email_text: str = "" # <emailSzoveg>mail text</emailSzoveg>
class Buyer(NamedTuple):
"""<vevo>"""
name: str = "" # <nev>Kovacs Bt.</nev>
zip_code: str = "" # <irsz>2030</irsz>
city: str = "" # <telepules>Érd</telepules>
address: str = "" # <cim>Tárnoki út 23.</cim>
email: str = "" # <email>buyer@example.com</email>
send_email: bool = False # <sendEmail>false</sendEmail>
tax_number: str = "" # <adoszam>12345678-1-42</adoszam>
tax_number_eu: str = "" # <adoszamEU>HU55555555</adoszamEU> // needed for reverse_invoice|storno only
delivery_name: str = "" # <postazasiNev>Kovács Bt. mailing name</postazasiNev>
delivery_zip: str = "" # <postazasiIrsz>2040</postazasiIrsz>
delivery_city: str = "" # <postazasiTelepules>Budaörs</postazasiTelepules>
delivery_address: str = "" # <postazasiCim>Szivárvány utca 8.</postazasiCim>
identification: str = "" # <azonosito>1234</azonosito>
phone_number: str = "" # <telefonszam>Tel:+3630-555-55-55, Fax:+3623-555-555</telefonszam>
comment: str = "" # <megjegyzes>Call extension 214 from the reception</megjegyzes>
# class WayBill(NamedTuple):
# """<fuvarlevel>"""
# <!-- waybill/confinement note, you do not need this: omit the entire tag -->
# uticel: str = "" #
# futarSzolgalat: str = "" #
class ItemLedger(NamedTuple):
# language=XML
"""
<sequence>
<element name="gazdasagiEsem" type="string" maxOccurs="1" minOccurs="0"></element>
<element name="gazdasagiEsemAfa" type="string" maxOccurs="1" minOccurs="0"></element>
<element name="arbevetelFokonyviSzam" type="string" maxOccurs="1" minOccurs="0"></element>
<element name="afaFokonyviSzam" type="string" maxOccurs="1" minOccurs="0"></element>
<element name="elszDatumTol" type="date" maxOccurs="1" minOccurs="0"></element>
<element name="elszDatumIg" type="date" maxOccurs="1" minOccurs="0"></element>
</sequence>
"""
economic_event: str = "" # <gazdasagiesemeny></gazdasagiesemeny>
economic_event_tax: str = "" # <gazdasagiesemenyafa></gazdasagiesemenyafa>
sales_ledger_number: str = ""
vat_ledger_number: str = ""
settlement_date_from: str = ""
settlement_date_to: str = ""
class Item(NamedTuple):
name: str = "" # <megnevezes>Elado izé</megnevezes>
identifier: str = "" # <azonosito>ASD-123</azonosito>
quantity: str = "" # <mennyiseg>1.0</mennyiseg>
quantity_unit: str = "" # <mennyisegiEgyseg>db</mennyisegiEgyseg>
unit_price: str = "" # <nettoEgysegar>10000</nettoEgysegar>
vat_rate: str = "" # <afakulcs>27</afakulcs>
margin_tax_base: float = "" # <arresAfaAlap>10.25</arresAfaAlap>
net_price: str = "" # <nettoErtek>10000.0</nettoErtek>
vat_amount: str = "" # <afaErtek>2700.0</afaErtek>
gross_amount: str = "" # <bruttoErtek>12700.0</bruttoErtek>
comment_for_item: str = "" # <megjegyzes>lorem ipsum</megjegyzes>
item_ledger: ItemLedger = "" # <element name="tetelFokonyv" type="tns:tetelFokonyvTipus" maxOccurs="1" minOccurs="0"></element>
class Disbursement(NamedTuple):
date: str
title: str
amount: float
description: str = ""
class EmailDetails(NamedTuple):
addresses: str
reply_to_address: str
subject: str
body_text: str = ""
class SzamlazzResponse:
def __init__(self,
response: Response,
xml_namespace: str,
):
self.xml_namespace = xml_namespace
self.__response = response
self.__action_success: bool = False
content_type = response.headers.get("Content-Type")
if content_type == "application/octet-stream":
# Parse XML and map into class members
root = ET.fromstring(self.__response.text)
self.__pdf: str = self.__get_tag_text(root, "pdf")
self.__pdf_bytes: bytes = b""
self.__action_success: bool = True if (self.__get_tag_text(root, "sikeres") == "true") else False
else:
self.__pdf_bytes: bytes = response.content
self.__pdf: str = base64.b64encode(self.__pdf_bytes).decode("ascii")
# Error Handling
self.error_code: str = response.headers.get("szlahu_error_code")
self.error_message: str = response.headers.get("szlahu_error")
if self.error_message:
self.error_message = unquote(self.error_message)
self.http_request_success: str = "false" if self.error_code else "true"
# Extract Details
self.invoice_number: str = response.headers.get("szlahu_szamlaszam")
self.invoice_net_price: str = response.headers.get("szlahu_nettovegosszeg")
self.invoice_gross_price: str = response.headers.get("szlahu_bruttovegosszeg")
self.receivables: str = response.headers.get("szlahu_kintlevoseg")
self.buyer_account_url: str = response.headers.get("szlahu_vevoifiokurl")
if self.buyer_account_url:
self.buyer_account_url = unquote(response.headers.get("szlahu_vevoifiokurl"))
self.payment_method: str = response.headers.get("szlahu_fizetesmod")
self.__has_errors = self.error_code or self.error_message
if self.has_errors:
logger.error(f"Error Code: {self.error_code}")
logger.error(f"Error Message: {self.error_message}")
@property
def action_success(self) -> bool:
return self.__action_success
@property
def has_errors(self):
return self.__has_errors
@property
def ok(self):
"""
Shortcut to the original response's attribute with the same name
"""
return self.__response.ok
@property
def response(self) -> Response:
"""
Original HTTP Response object returned by the requests package
:return: requests.models.Response
"""
return self.__response
@property
def text(self) -> str:
"""
Shortcut to the original response's attribute with the same name
"""
return self.__response.text
def get_pdf_base64(self) -> str:
"""
Get PDF from response in Base64 format
:return: PDF (in Base64 format)
:rtype: str
"""
if (not self.__pdf) and (not self.__pdf_bytes):
raise PdfDataMissingError("No PDF was returned. Check the value of szamlaLetoltes|invoice_download")
return self.__pdf
def get_pdf_bytes(self) -> bytes:
pdf_base64 = self.get_pdf_base64()
return base64.b64decode(pdf_base64) if pdf_base64 else self.__pdf_bytes
def write_pdf_to_disk(self, pdf_output_path: Path):
if not pdf_output_path.parent.exists():
raise FileNotFoundError(f"Output file's parent folder is missing: {pdf_output_path.parent.as_posix()}")
data = self.get_pdf_bytes()
with open(pdf_output_path, "wb+") as f:
f.write(data)
def print_errors(self) -> Tuple[str, str]:
"""
Prints the returned error_code and error_message
:return: Tuple[error_code, error_message]
"""
if self.has_errors:
print("error_code:", self.error_code)
print("error_message:", self.error_message)
return self.error_code, self.error_message
def __get_tag_text(self, root: ET.Element, tag_name):
tag = root.find(f"{self.xml_namespace}{tag_name}")
return tag.text if tag is not None else None
| [
"kristof.daja@semcon.com"
] | kristof.daja@semcon.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.