blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2959c1b6c50ff7a27e9df376d62a132185731b61 | d8eb32d7df364342421f31276611f35c7407274d | /report3.py | 7e48bcbe78a5ed9dfc795f9076ef350c5e39a55a | [] | no_license | polinatea/databases | d8db882f94d16cee1efb67afd0351d79c9e0676d | 484a7e24a72cb2a8fe768af1313ee4fa5ce61aef | refs/heads/main | 2023-07-16T15:19:11.506659 | 2021-09-03T12:01:22 | 2021-09-03T12:01:22 | 389,042,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,235 | py | import pyodbc
import tkinter as tk
from tkinter import ttk
from tkinter import *
from reportlab.pdfgen import canvas
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
import re
def report3(txtNum):
# Установление соединения с базой данной
conn = pyodbc.connect(driver = '{SQL Server Native Client 11.0}',server = '192.168.112.103' , database = 'db22205', user = 'User053', password = 'User053#!31')
cursor = conn.cursor()
report=canvas.Canvas("report3.pdf")
pdfmetrics.registerFont(TTFont('FreeSans', 'FreeSans.ttf'))
report.setFont('FreeSans', 10)
report.drawString(200, 810, "Выписка по счету")
report.setStrokeColorRGB(0.2,0.5,0.3)
report.line(0, 805, 1000, 805)
report.line(0, 805, 1000, 806) # линия толще
#txtNum = '40817810570000123456'
cursor.execute("""Select tblAccountType.txtAccountTypeName, tblClient.txtClientSurname, tblClient.txtClientName,
tblClient.txtClientSecondName, tblAccount.datAccountBegin
from tblAccountType, tblClient, tblAccount
Where (tblAccount.intAccountTypeId=tblAccountType.intAccountTypeId ) AND
(tblAccount.intClientId=tblClient.intClientId) AND
(tblAccount.txtAccountNumber = ?)""", txtNum)
row = cursor.fetchone() # получение данных записи
x = 20
y = 780
pdfmetrics.registerFont(TTFont('FreeSans', 'FreeSans.ttf'))
report.setFont('FreeSans', 16)
report.setFillColorRGB(1,0,0)
report.drawString(x + 160, y,txtNum)
y-= 5
report.setFillColorRGB(0,0,0)
report.setFont('FreeSans', 10)
report.drawString(x , y-10, "Тип счета: " + re.sub(r'\s+', ' ', row[0]))
report.drawString(x , y-20,"Клиент: "+ row[1].replace(" ", "")+" "+row[2].replace(" ", "")+" "+row[3].replace(" ", ""))
report.drawString(x , y-30, "Дата открытия: "+str(row[4]))
y-= 60
pdfmetrics.registerFont(TTFont('FreeSans','FreeSans.ttf'))
report.setFont('FreeSans', 12)
report.setFillColorRGB(0,0,0)
report.drawString(x, y, "Список операций: ")
y=y-15
pdfmetrics.registerFont(TTFont('FreeSans','FreeSans.ttf'))
report.setFont('FreeSans', 12)
report.drawString(x, y, "Дата проведения операции:"+" " + "Тип операции:" +" "+ "Сумма операции:")
y=y-5
report.line(0, y, 1000, y - 3)
y=y-20
cursor.execute(""" SELECT tblOperation.datOperation, tblOperationType.txtOperationTypeName, tblOperation.fltValue
FROM tblOperation, tblOperationType,tblAccountType,tblAccount
WHERE (tblOperation.intOperationTypeId=tblOperationType.intOperationTypeId) AND (tblOperation.intAccountId=tblAccount.intAccountId)
AND (tblAccount.intAccountTypeId=tblAccountType.intAccountTypeId) AND (tblAccount.txtAccountNumber=?)
order by tblOperation.datOperation desc """,txtNum)
row2=cursor.fetchone()
print(row2)
y-=15
report.setFillColorRGB(0,0,0)
report.setFont('FreeSans', 12)
while row2:
report.drawString(x, y, str(row2[0]).replace(" ", "") +" "+re.sub(r'\s+', ' ', row2[1]) +" "+ str(row2[2]) )
row2=cursor.fetchone()
y=y-15
y=y-20
if y <= 40:
x = 20
y = 790
report.showPage() # новая страница
pdfmetrics.registerFont(TTFont('FreeSans','FreeSans.ttf'))
report.setFont('FreeSans', 12)
report.drawString(200, 810, "Закрытые счета")
report.setStrokeColorRGB(1,0,0)
y=y-5
report.line(0, y, 1000, y - 3)
y=y-20
report.save()
# Закрытие курсора и соединения с базой данной
print ('privet3')
cursor.close()
conn.close()
| [
"noreply@github.com"
] | noreply@github.com |
8e80b5a407f1aeb26dc2bd7ddb84d6a2065cd7ff | c03d565ec39b250a373e62431f88d2044f09864f | /backend/catalog/migrations/0001_initial.py | 8f520f8f3fb214080f37d1811599bf544f350069 | [
"MIT"
] | permissive | aevtikheev/full-text-search | e7d7e8ea613e2d93025cf02a5737b2244297d7cc | 25909bbcb6361898f4920c94fa51f93918981124 | refs/heads/master | 2023-04-03T18:13:37.705378 | 2021-04-08T21:48:18 | 2021-04-08T21:48:18 | 353,132,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 995 | py | # Generated by Django 3.1.7 on 2021-03-31 16:58
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Wine',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID',
),
),
('country', models.CharField(max_length=255)),
('description', models.TextField(blank=True)),
('points', models.IntegerField()),
(
'price',
models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
('variety', models.CharField(max_length=255)),
('winery', models.CharField(max_length=255)),
],
),
]
| [
"eft000@gmail.com"
] | eft000@gmail.com |
11f2fe4d1f01ee2b1139b7b7076221a351cbf9e4 | cb26d3f745628cd113f4f954ceac23ce262afbb5 | /Day4/answer_day4_question2.py | 93ae2c3f566d420feb9820a96e4d196c55632b24 | [] | no_license | nama-aman/Qiskit-Challenge-India-2020 | 88e56982012221b4153dc9d73e4f09508c3d7807 | 038956e5b241868637c06cbbf81928f3f9a1d796 | refs/heads/master | 2022-12-16T00:11:15.135572 | 2020-09-06T12:58:47 | 2020-09-06T12:58:47 | 293,277,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py |
### WRITE YOUR CODE BETWEEN THESE LINES - START
# import libraries that are used in the functions below.
from qiskit import QuantumCircuit
import numpy as np
### WRITE YOUR CODE BETWEEN THESE LINES - END
def init_circuit():
# create a quantum circuit on two qubits
qc = QuantumCircuit(2)
# initializing the circuit
qc.h(0)
qc.x(1)
return qc
# The initial state has been defined above.
# You'll now have to apply necessary gates in the build_state() function to convert the state as asked in the question.
def build_state():
### WRITE YOUR CODE BETWEEN THESE LINES - START
# the initialized circuit
circuit = init_circuit()
circuit.cu3(0,-np.pi/2,0,1,0)
# apply a single cu3 gate
### WRITE YOUR CODE BETWEEN THESE LINES - END
return circuit
| [
"noreply@github.com"
] | noreply@github.com |
49256118e79555242d05bc0d7a022c34619aa4ae | c86cd75be4f5b4eef605fb0f40743406ae19685f | /core/ui_test.py | cd1ce62099cf077a55dbf0934f3f6763c20bac3b | [
"Apache-2.0"
] | permissive | jyn514/oil | 3de53092c81e7f9129c9d12d51a8dfdbcacd397b | 42adba6a1668ff30c6312a6ce3c3d1f1acd529ec | refs/heads/master | 2022-02-23T08:12:48.381272 | 2019-03-15T08:54:31 | 2019-03-15T08:54:31 | 176,316,917 | 0 | 0 | Apache-2.0 | 2019-03-18T15:36:14 | 2019-03-18T15:36:13 | null | UTF-8 | Python | false | false | 279 | py | #!/usr/bin/python -S
from __future__ import print_function
"""
ui_test.py: Tests for ui.py
"""
import unittest
from core import ui # module under test
class UiTest(unittest.TestCase):
def testFoo(self):
ui.usage('oops')
if __name__ == '__main__':
unittest.main()
| [
"andy@oilshell.org"
] | andy@oilshell.org |
aede3fd3cb4197fe6a020021747a399fd4b17719 | 978ba4aa0a767589f663341c39d39d6b1b301cb2 | /experiment.py | eec95805bf5bc2d77ca6040f770676147482037e | [] | no_license | dalj170/ML-Toy-Case-Framework | fcb0746121b5035e918466e7367159ca05478bb9 | 49ac5d85630181611eaf35ed7c2ef4baba0b3bc5 | refs/heads/master | 2021-05-27T07:53:58.922008 | 2020-04-09T01:16:21 | 2020-04-09T01:16:21 | 254,238,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,583 | py | import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from dataloader import SimpleDataset
from torch.utils.data import DataLoader
from torch.utils.data.dataset import random_split
from datagenerator import Generator, Dynamics
from models import *
# default parameters can be modified pretty easily, no real basis for a 1e-4 learning rate or any of the other settings.
# changing batch sizes or n_data may cause some issues though, as the data is split 80%/20% train/test, and both of
# those resulting numbers must be divisible by the batch_size_train and batch_size_val
parser = argparse.ArgumentParser()
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--n_epochs', type=int, default=1000)
parser.add_argument('--batch_size_train', type=int, default=100)
parser.add_argument('--batch_size_val', type=int, default=100)
parser.add_argument('--t_span', type=float, default=None)
parser.add_argument('--dt', type=float, default=0.1)
parser.add_argument('--n_data', type=int, default=500)
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# generating data using functions from datagenerator.py
f = Dynamics
data = Generator(f, args.dt, args.n_data, args.t_span)
# works for an n x 2 matrix but would not for a larger dimensional one
x = data[:, 0]
y = data[:, 1]
# making the dataset and randomly splitting for test/validation
dataset = SimpleDataset(x, y)
train_data, val_data = random_split(dataset, [int(len(x)*.80), int(len(y)*.20)])
train_loader = DataLoader(dataset=train_data, batch_size=args.batch_size_train, shuffle=True)
val_loader = DataLoader(dataset=val_data, batch_size=args.batch_size_val, shuffle=True)
lr = args.lr
n_epochs = args.n_epochs
# define model and optimizer. Adam seems to perform better than SGD
model = EncoderDecoder(args.batch_size_train).to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
# define loss and training functions (functions in models.py)
loss_fn = loss_calc2
train_step = make_train_step(model, loss_fn, optimizer)
losses = []
val_losses = []
for epoch in range(n_epochs):
for x_batch, y_batch in train_loader:
x_batch = x_batch.to(device)
y_batch = y_batch.to(device)
# the unsqueezing is done to make the x and y (which is x(t+1)) vectors from vectors that only have one
# dimension into two dimensional matrices with one dimension being 1. Also done to get orientation correct for
# matrix multiplication.
x_batch = x_batch.unsqueeze(1)
y_batch = y_batch.unsqueeze(1)
loss = train_step(x_batch, y_batch, args.dt)
losses.append(loss)
with torch.no_grad():
for x_val, y_val in val_loader:
x_val = x_val.to(device)
x_val = x_val.unsqueeze(1)
y_val = y_val.to(device)
y_val = y_val.unsqueeze(1)
model.eval()
yhat = model(x_val, args.dt)
val_loss = loss_fn(x_val, y_val, args.dt, yhat, model)
val_losses.append(val_loss.item())
if epoch % 25 == 0:
print("Epoch: {:04d} | train loss: {:0.6f} | val loss: {:0.6f}".format(epoch, losses[len(losses) - 1],
val_losses[len(val_losses) - 1]))
# 0 -> 2 -> 4 in terms of layers. 0.weight, 0.bias etc
print(model.ev.float())
# TODO: Status update:
"""
I think I've generalized it better, but now it's harder to tell.
"""
| [
"noreply@github.com"
] | noreply@github.com |
fbad71338ab509e1dfd81e377efb259dc9287a74 | 4e092f6a59c0aad5cfe4235baac06f4f9823c62a | /0026. Remove Duplicates from Sorted Array.py | 3628990533211b662c2e9da54cc9ee0e44197a56 | [] | no_license | xinmiaoo/leetcode_Aug_3 | 72091fd7d23f98fb646ffb6f0b1a3068c930ea3f | 732d8aa126dd5149013a4ce084f37ee8bc123ac7 | refs/heads/master | 2020-07-12T04:34:24.506629 | 2019-08-10T21:54:50 | 2019-08-10T21:54:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums)<1:
return
if len(nums)==1:
return
i=1
while i<len(nums):
if nums[i]==nums[i-1]:
del nums[i]
else:
i+=1
return
| [
"noreply@github.com"
] | noreply@github.com |
06b43121f55062f22988a5b9411f16234b2dd4c9 | a527ab5e3c2cf34e92117d657eda52bcc76e24f8 | /gender.py | d5e7b764b1601b921cb0f6f51492e26b2ef874a0 | [] | no_license | pankajdahilkar/python_codes | 2e4246aa9b725b28f3cccb4c7a4c7094927b8b5a | 56fda0257fb4769da59f2b5c81536f9a5b9ee2db | refs/heads/master | 2021-05-25T17:16:55.765443 | 2020-04-13T07:21:02 | 2020-04-13T07:21:02 | 253,838,761 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | import csv
name = input("Enter your name ")
with open("Female.csv",'r', encoding='utf-8') as f:
reader = csv.reader(f)
for row in reader :
for field in row :
if field == name:
print(name,"is girl")
break
else : print("not found")
| [
"pankajmdahilkar@gmail.com"
] | pankajmdahilkar@gmail.com |
45a17b83e5063af268f032dd425b660dcf74ebf6 | ae67cd669cb733f0ed7b4b02c1320757c91ebd9f | /lut/__init__.py | ff33cffbf0d228820ebf83cb62a25232e9a5b6ea | [] | no_license | anguelos/lut | bef878485a6ce31195593a504f0a819032e878be | 6898fefc748cbbc323415e96e22c1711dfcce1e1 | refs/heads/master | 2016-09-14T01:29:15.626110 | 2016-05-06T16:50:22 | 2016-05-06T16:50:22 | 58,217,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | import core
import train
import models
import view
import loss
import layers
import metric
import ds
core=reload(core)
models=reload(models)
view=reload(view)
loss=reload(loss)
layers=reload(layers)
metric=reload(metric)
ds=reload(ds)
train=reload(train)
from core import *
from train import *
from models import *
from view import *
from ds import *
from loss import *
from layers import *
from metric import *
| [
"anguelos.nicolaou@gmail.com"
] | anguelos.nicolaou@gmail.com |
f4430e22cc2f6c99418d9e381141e4def5bbadbe | 07504838d12c6328da093dce3726e8ed096cecdb | /pylon/resources/properties/loadGroupName.py | 91317396187f0aba508194373e1dc407e7c35dc1 | [] | no_license | lcoppa/fiat-lux | 9caaa7f3105e692a149fdd384ec590676f06bf00 | 7c166bcc08768da67c241078b397570de159e240 | refs/heads/master | 2020-04-04T02:47:19.917668 | 2013-10-10T10:22:51 | 2013-10-10T10:22:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,416 | py | """loadGroupName userdefined property type, originally defined in resource
file set iot 90:00:00:05:00:00:00:00-1."""
# Copyright (C) 2013 Echelon Corporation. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software" to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# This file is generated from device resource files using an automated
# database to source code conversion process. Grammar and punctuation within
# the embedded documentation may not be correct, as this data is gathered and
# combined from several sources. The machine-generated code may not meet
# compliance with PEP-8 and PEP-257 recommendations at all times.
# Generated at 23-Sep-2013 09:15.
import pylon.resources.base
from pylon.resources.userdefined import userdefined
import pylon.resources.enumerations.char_encoding_t
class loadGroupName(pylon.resources.base.Structure):
"""loadGroupName userdefined property type. Text load group name. Name
for a load group to be used by optional user interface applications;
used to create an array of load group names."""
def __init__(self):
super().__init__(
key=10,
scope=1
)
self.__encoding = pylon.resources.enumerations.char_encoding_t.char_encoding_t(
)
self._register(('encoding', self.__encoding))
self.__name = pylon.resources.base.Array(
[
pylon.resources.base.Scaled(
size=1,
signed=False,
minimum=0,
maximum=255
) for i in range(120)
]
)
self._register(('name', self.__name))
self._default_bytes = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
self._original_name = 'UCPTloadGroupName'
self._property_scope, self._property_key = 1, 10
self._definition = userdefined.add(self)
def __set_encoding(self, v):
self.__encoding._value = v
encoding = property(
lambda self: self.__encoding._value,
__set_encoding,
None,
"""."""
)
def __set_name(self, v):
self.__name._value = v
name = property(
lambda self: self.__name._value,
__set_name,
None,
"""."""
)
def __set(self, v):
if not isinstance(v, type(self)):
raise TypeError(
'Expected instance of {0}, got {1}'.format(
type(self),
type(v)
)
)
self.__set_encoding(v.__encoding)
self.__set_name(v.__name)
_value = property(lambda self: self, __set)
def __len__(self):
"""Return the length of the type, in bytes."""
return 121
if __name__ == '__main__':
# unit test code.
item = loadGroupName()
pass
| [
"lcoppa@rocketmail.com"
] | lcoppa@rocketmail.com |
e9f5931b0f3981e13979a53f1112073389b40d5c | 106a93f16943bb978ed5e793d2b4295dd32c72e7 | /BiliLive/src/extension.py | 06e6892f0086a8bbfdb12e96362e6fb9a475aa44 | [
"MIT"
] | permissive | ml87124909/BiliLive | c69fc62db48533d340b5e57284755fdae5bd7790 | d022c10411c4055fffb409633fa1cc99184ffcce | refs/heads/master | 2022-03-04T00:20:42.577051 | 2019-07-17T14:40:31 | 2019-07-17T14:40:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,383 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/3/16 下午 11:15
# @Author : kamino
import os
import json
import requests
import random
import re
import time
import socket
from .database import DbLink
from .timer import Timer
from .auth import Auth
from .config import Config
from .error import Control
from .encrypt import Encrypt
class Extension(object):
E = {'sign': [], 'user': {}}
@staticmethod
def GetWord():
"""随机获取一个单词"""
dl = DbLink()
data = dl.query("SELECT * FROM `wordlist-2017` WHERE `ID` = '%d';" % random.randint(1, 5536))
return data[0]
@staticmethod
def GetYiyan():
"""一言"""
try:
data = requests.get('https://v1.hitokoto.cn/', headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063'}).content.decode()
arr = json.loads(data)
return (arr['hitokoto'], arr['from'])
except Exception as e:
return (str(e), 'ERROR')
@staticmethod
def SignedList():
"""已签到列表"""
dl = DbLink()
data = dl.query(
"SELECT * FROM `sign` WHERE `date` = '%s' ORDER BY 'time' ASC;" % Timer.stamp2str(Timer.timestamp(),
'%Y-%m-%d'))
return data
@staticmethod
def SignAdd(uid, uname):
"""添加签到"""
Extension.E['user'][uid] = uname
if int(Timer.stamp2str(Timer.timestamp(), '%H')) < 4:
return '请在4点之后打卡,要注意休息哦'
for k, s in enumerate(Extension.E['sign']):
if uid in s:
return f'{uname[0:2]}*已于{str(s[1])[0:5]}打卡成功,排名{k + 1}'
dl = DbLink()
Extension.E['sign'].append((uid, Timer.stamp2str(Timer.timestamp(), '%H:%M:%S')))
dl.insert("INSERT INTO `sign`(`uid`, `name`, `date`, `time`) VALUES ('%s', '%s', '%s', '%s');" % (
uid, uname, Timer.stamp2str(Timer.timestamp(),
'%Y-%m-%d'), Timer.stamp2str(Timer.timestamp(),
'%H:%M:%S')))
rk = dl.query(
"SELECT COUNT(*) FROM `sign` WHERE `date` = '%s' ORDER BY 'time' ASC;" % Timer.stamp2str(Timer.timestamp(),
'%Y-%m-%d'))[0][0]
return f'{uname[0:2]}*打卡成功,今日排名{rk}'
@staticmethod
def SignRank():
"""签到排名"""
if int(Timer.stamp2str(Timer.timestamp(), '%H')) < 4:
Extension.E['sign'].clear()
return ['请在4点之后打卡,要注意休息哦']
if len(Extension.E['sign']) == 0:
for sn in Extension.SignedList():
Extension.E['sign'].append((sn[1], sn[4]))
msg = []
rank = Extension.E['sign']
if len(rank) == 0:
return ['暂无(可能会有1分钟延迟)']
elif len(rank) < 5:
max = len(rank)
else:
max = 5
for i in range(max):
try:
uname = Extension.E['user'][rank[i][0]]
except KeyError:
Extension.E['user'][rank[i][0]] = Auth.get_uname(rank[i][0])
uname = Extension.E['user'][rank[i][0]]
msg.append(f'No{i + 1} {uname} {rank[i][1]}')
return msg
@staticmethod
def IsSign(text=None):
if re.search(r'打(.*)卡', text) == None and re.search(r'签(.*)到', text) == None:
return False
return True
@staticmethod
def HasKey(text, key):
if re.search(key, text) == None:
return False
return True
@staticmethod
def ChgColor(text=None):
"""修改文字颜色"""
r = re.search(r'color (.*)', text)
if r == None:
return False
color = r.group(1)
rgb = {
'random': (999, 999, 999, 999),
'reset': (255, 117, 0, 0),
'red': (255, 0, 0, 0),
'green': (0, 255, 0, 0),
'blue': (0, 0, 0, 255)
}
try:
Config.set('color', rgb[color])
return True
except KeyError:
return False
@staticmethod
def ForbidBot(*args):
"""禁言机器人(分钟)"""
Config.set('forbid', False)
@staticmethod
def AutoReboot(*args):
"""自动重启"""
while True:
time.sleep(60 * 60)
print("AUTO RESTART AT %s" % Timer.stamp2str(Timer.timestamp(), '%H:%M:%S'))
Control.force_exit()
@staticmethod
def MusicCtrlCore(arr):
"""
与音乐服务器通信 TODO:整成异步的
:param arr:
:return:
"""
try:
if Config.get('bgmserver') == 'unix':
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(10)
sock.connect(Config.get('unixpath'))
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(10)
sock.connect((Config.get('tcphost'), int(Config.get('tcpport'))))
sock.send(json.dumps(arr).encode('utf-8'))
recv = json.loads(sock.recv(10240).decode('utf-8'))
if recv['data'] is False:
raise Exception('执行失败')
return recv['data']
except Exception as e:
print(e)
return None
@staticmethod
def MusicNext():
"""
下一曲
:return:
"""
recv = Extension.MusicCtrlCore({"action": "next"})
if recv is None:
return '切歌失败'
else:
return '即将播放 %s' % recv['name']
@staticmethod
def MusicPlaying():
"""
当前播放的啥
:return:
"""
recv = Extension.MusicCtrlCore({"action": "playing"})
if recv is None:
return '不知道呢'
else:
return '当前播放 %s' % recv['name']
@staticmethod
def MusicPlayingShow():
"""
用于显示
:return:
"""
recv = Extension.MusicCtrlCore({"action": "playing"})
if recv is None:
return 'Unknown'
else:
return recv['name']
@staticmethod
def MusicWillplay():
"""
下一曲是啥
:return:
"""
recv = Extension.MusicCtrlCore({"action": "playing"})
if recv is None:
return '不知道呢'
else:
return '下一首 %s' % recv['name']
@staticmethod
def IsAddMusic(text):
"""
判断是否为点歌
:param text:
:return:
"""
mat = re.search(r'点歌(.*)', text)
if mat is None:
return False
if mat.group(0) == '':
return 'https://music.163.com/#/song?id=156811'
mid = mat.group(1).strip()
try:
int(mid)
return 'https://music.163.com/#/song?id=%s' % mid
except ValueError:
return False
@staticmethod
def MusicAdd(url):
"""
点歌
:param url: 网易云音乐链接
:return:
"""
recv = Extension.MusicCtrlCore({"action": "add", "url": url})
if recv is None:
return '失败啦'
else:
return '成功 %s' % recv['name']
@staticmethod
def HelloKamino(message='hello kamino'):
"""
召唤kamino
:return:
"""
try:
response = requests.get('https://api.isdut.cn/notice/wx?text=livenotice&desp=%s' % message)
print(response.content)
if json.loads(response.content.decode('utf-8'))['status'] == True:
return '稍等片刻'
finally:
return '失败啦'
@staticmethod
def FuxiDays():
"""
剩余天数
:return:
"""
return '距离2020考研剩余%d天!' % int((Timer.str2stamp(Config.get('end-time')) - Timer.timestamp()) / 86400 + 1)
| [
"lishichenjs@163.com"
] | lishichenjs@163.com |
b7faec9f841a1e905120e9fd48f0dc20eb80aacb | 90a83608759f16e587709ef04b3369b400deb200 | /blog/admin.py | cad8431ae3d541f7bab81489c6d21c906c87ede2 | [] | no_license | rafaelmeyer95/curso_django | dfc0311ecfb0cd391b388207741a59a6b6246818 | ac63c86e4d454c61d76ae0497af7dce4af37cb1d | refs/heads/master | 2023-07-27T10:43:10.765289 | 2021-09-12T02:29:27 | 2021-09-12T02:29:27 | 405,452,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | from django.contrib import admin
from .models import Post
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ('titulo','autor','publicado','status')
list_filter = ('status','criado','publicado','autor')
raw_id_fields = ('autor',)
date_hierarchy = 'publicado'
search_fields = ('titulo','conteudo')
prepopulated_fields = {'slug':('titulo',)}
# Register your models here.
| [
"rafael_meyer_@hotmail.com"
] | rafael_meyer_@hotmail.com |
16ea94b565c90c3077c4932cb3e4640dfbff2612 | d85de32efcb4a6f876972e08f684270b9897fa1b | /cDCGAN.py | cc81103f3ce63bef8d9f5ff5d75d6000151c7b23 | [] | no_license | saumyasinha/Logo_generation_GAN | e8f0273c826875853213c15c0df1da8aaac50522 | 219fb2981d19499da1747fb67bfd294ef5473eb0 | refs/heads/master | 2020-03-08T13:52:59.615392 | 2018-05-02T02:19:49 | 2018-05-02T02:19:49 | 128,169,799 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,830 | py | from __future__ import print_function, division
from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply
from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.layers.convolutional import UpSampling2D, Conv2D, Conv2DTranspose
import matplotlib.pyplot as plt
import numpy as np
N_CLASSES=32
class cDCGAN():
def __init__(self):
self.img_rows = 32
self.img_cols = 32
self.channels = 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.num_classes = N_CLASSES
self.latent_dim = 100
optimizer = Adam(0.0002, 0.5)
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss=['binary_crossentropy'],
optimizer=optimizer,
metrics=['accuracy'])
# Build and compile the generator
self.generator = self.build_generator()
self.generator.compile(loss=['binary_crossentropy'],
optimizer=optimizer)
# The generator takes noise and the target label as input
# and generates the corresponding digit of that label
noise = Input(shape=(100,))
label = Input(shape=(1,))
img = self.generator([noise, label])
print(img.shape)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated image as input and determines validity
# and the label of that image
valid = self.discriminator([img, label])
print(valid.shape)
# The combined model (stacked generator and discriminator) takes
# noise as input => generates images => determines validity
self.combined = Model([noise, label], valid)
self.combined.compile(loss=['binary_crossentropy'],
optimizer=optimizer)
def build_generator(self):
model = Sequential()
model.add(Dense(4 * 4 * 512, input_dim=self.latent_dim))
model.add(Reshape((4, 4, 512)))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2DTranspose(256, kernel_size=5, strides=2, padding='same'))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2DTranspose(128, kernel_size=5, strides=2, padding='same'))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2DTranspose(self.channels, kernel_size=5, strides=2, padding='same'))
model.add(Activation("tanh"))
model.summary()
noise = Input(shape=(self.latent_dim,))
label = Input(shape=(1,), dtype='int32')
label_embedding = Flatten()(Embedding(self.num_classes, self.latent_dim)(label))
model_input = multiply([noise, label_embedding])
img = model(model_input)
print([noise, label][:5])
return Model([noise, label], img)
def build_discriminator(self):
model = Sequential()
# model.add(Dense(512, input_dim=np.prod(self.img_shape)))
model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=self.img_shape)
label = Input(shape=(1,), dtype='int32')
label_embedding = Flatten()(Embedding(self.num_classes, np.prod(self.img_shape))(label))
# print(label_embedding.shape)
flat_img = Flatten()(img)
model_input = multiply([flat_img, label_embedding])
model_input=Reshape(self.img_shape)(model_input)
# print(model_input.shape)
validity = model(model_input)
# print(validity.shape)
# print([img, label][:3])
return Model([img, label], validity)
def train(self, epochs, batch_size=16, save_interval=5):
# Load the dataset
# X_train = np.load('C:\\Users\Shivendra\Desktop\GAN\icon_dataset.npy')
# y_train = np.load('C:\\Users\Shivendra\Desktop\GAN\icon_color_label.npy')
# print(X_train.shape)
X_train = np.load('C:\\Users\Shivendra\Desktop\GAN\icon_dataset_from_hd5.npy')
y_train = np.load('C:\\Users\Shivendra\Desktop\GAN\\rc_cluster_icon_dataset_from_hd5.npy')
print(X_train.shape)
print(y_train.shape)
print(y_train[0])
## changing dim from (3,32,32) to (32,32,3)
X_train = np.rollaxis(X_train, 3, 1)
X_train = np.rollaxis(X_train, 3, 2)
X_train = X_train[:y_train.shape[0]]
print(X_train.shape)
# Rescale -1 to 1
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
# X_train = np.expand_dims(X_train, axis=4)
y_train = y_train.reshape(-1, 1)
half_batch = int(batch_size / 2)
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random half batch of images
idx = np.random.randint(0, X_train.shape[0], half_batch)
imgs, labels = X_train[idx], y_train[idx]
noise = np.random.normal(0, 1, (half_batch, 100))
# Generate a half batch of new images
gen_imgs = self.generator.predict([noise, labels])
# print(gen_imgs.shape)
valid = np.ones((half_batch, 1))
fake = np.zeros((half_batch, 1))
# Train the discriminator
d_loss_real = self.discriminator.train_on_batch([imgs, labels], valid)
d_loss_fake = self.discriminator.train_on_batch([gen_imgs, labels], fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
noise = np.random.normal(0, 1, (batch_size, 100))
valid = np.ones((batch_size, 1))
# Generator wants discriminator to label the generated images as the intended
# digits
sampled_labels = np.random.randint(0, N_CLASSES, batch_size).reshape(-1, 1)
# Train the generator
g_loss = self.combined.train_on_batch([noise, sampled_labels], valid)
# Plot the progress
print("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100 * d_loss[1], g_loss))
# If at save interval => save generated image samples
if epoch % save_interval == 0:
self.save_imgs(epoch)
def save_imgs_COLOUR(self, epoch):
r, c = 2, 5
noise = np.random.normal(0, 1, (r * c, 100))
sampled_labels = np.arange(0, N_CLASSES).reshape(-1, 1)
colour_map = {0: 'red',
1: 'green',
2: 'blue',
3: 'yellow',
4: 'orange',
5: 'white',
6: 'black',
7: 'pink',
8: 'purple',
9: 'gray'}
gen_imgs = self.generator.predict([noise, sampled_labels])
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
fig.suptitle("DCGAN: Generated colours", fontsize=12)
cnt = 0
for i in range(r):
for j in range(c):
axs[i, j].imshow(gen_imgs[cnt, :, :, :], cmap='jet')
axs[i, j].set_title("%s " % colour_map[int(sampled_labels[cnt])])
axs[i, j].axis('off')
cnt += 1
fig.savefig("C:\\Users\Shivendra\Desktop\GAN\GAN_HCML\images_cDCGAN\%d.png" % epoch)
plt.close()
def save_imgs(self, epoch):
r, c = 8, 4
noise = np.random.normal(0, 1, (r * c, 100))
sampled_labels = np.arange(0, N_CLASSES).reshape(-1, 1)
gen_imgs = self.generator.predict([noise, sampled_labels])
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
fig.suptitle("CDCGAN: Generated clusters", fontsize=12)
cnt = 0
for i in range(r):
for j in range(c):
axs[i, j].imshow(gen_imgs[cnt, :, :, :], cmap='jet')
axs[i, j].set_title("Cluster:%s " % cnt)
axs[i, j].axis('off')
cnt += 1
fig.savefig("C:\\Users\Shivendra\Desktop\GAN\GAN_HCML\images_CDCGAN_RC_clusters\%d.png" % epoch)
plt.close()
if __name__ == '__main__':
cdcgan = cDCGAN()
cdcgan.train(epochs=50000, batch_size=16, save_interval=10000) | [
"saumya.somu09@gmail.com"
] | saumya.somu09@gmail.com |
e02fdfc77ed8ff75c967cc103270bbf078cc1a93 | dca131a24069267482c0cbfb84fb0dd5e0ad0c94 | /python/40-49/problem48.py | df6784dc9e350e097f2ab7f5b5f867ae681110a8 | [] | no_license | jpzgoku/projectEuler | 23d7567a6836875634b5ac30df2baafaa1cead3e | d156ae1f48b4f8dcc3f0eda2ab7119ac0ae6410f | refs/heads/master | 2021-01-13T04:09:06.671692 | 2018-09-19T04:20:54 | 2018-09-19T04:20:54 | 77,886,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | def self_powers(max):
return [x ** x for x in range(1, max + 1)]
if __name__ == '__main__':
x = str(sum(self_powers(1000)))
print(x[len(x) - 10:])
| [
"jpzgoku@hotmail.com"
] | jpzgoku@hotmail.com |
1afefbe7797fabd4b8f9a7ab4164ea1bf07b73e6 | b13c95cb06434a985f9da51575f1eb3fa484cb0c | /helloworld/main.py | 9320a568b423d22b5eaab013607325ea89c83177 | [] | no_license | shandre-github/minikube | aa34294d6355ade16e0155bd5caef6a946840e69 | 94cfe856bf202e90464c2d85ccabe1ce673dbeb2 | refs/heads/master | 2023-03-13T15:11:39.785525 | 2021-02-25T04:53:19 | 2021-02-25T04:53:19 | 341,305,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | import os
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
name = os.environ.get('MY_POD_NAME')
return 'Hello World from {name}'.format(name=name)
app.run(host='0.0.0.0', port=8080) | [
"shpnaruto@gmail.com"
] | shpnaruto@gmail.com |
1080efe864c0987d212ee7dc9f93c2eb3b2aac02 | ced90a261ca425d86581ff301c21ce08e005c067 | /tranx/datasets/django/dataset.py | 6e4e64b95bf293feb1a5c1198b55baed0eb6137f | [] | no_license | gaoliujie2016/tranx-1 | 0a9727d9f5bf96ea0874b2c723045d734fa824d6 | f9e4de91f678284f2935e2f9c4cdf71a32d8deca | refs/heads/master | 2022-03-08T14:49:42.148163 | 2019-08-27T18:15:21 | 2019-08-27T18:15:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,743 | py | # coding=utf-8
from __future__ import print_function
import torch
import re
import pickle
import ast
import astor
import nltk
import sys
import numpy as np
from asdl.lang.py.py_asdl_helper import python_ast_to_asdl_ast, asdl_ast_to_python_ast
from asdl.lang.py.py_transition_system import PythonTransitionSystem
from asdl.hypothesis import *
from asdl.lang.py.py_utils import tokenize_code
from components.action_info import ActionInfo, get_action_infos
p_elif = re.compile(r'^elif\s?')
p_else = re.compile(r'^else\s?')
p_try = re.compile(r'^try\s?')
p_except = re.compile(r'^except\s?')
p_finally = re.compile(r'^finally\s?')
p_decorator = re.compile(r'^@.*')
QUOTED_STRING_RE = re.compile(r"(?P<quote>['\"])(?P<string>.*?)(?<!\\)(?P=quote)")
def replace_string_ast_nodes(py_ast, str_map):
for node in ast.walk(py_ast):
if isinstance(node, ast.Str):
str_val = node.s
if str_val in str_map:
node.s = str_map[str_val]
else:
# handle cases like `\n\t` in string literals
for key, val in str_map.items():
str_literal_decoded = key.decode('string_escape')
if str_literal_decoded == str_val:
node.s = val
class Django(object):
@staticmethod
def canonicalize_code(code):
if p_elif.match(code):
code = 'if True: pass\n' + code
if p_else.match(code):
code = 'if True: pass\n' + code
if p_try.match(code):
code = code + 'pass\nexcept: pass'
elif p_except.match(code):
code = 'try: pass\n' + code
elif p_finally.match(code):
code = 'try: pass\n' + code
if p_decorator.match(code):
code = code + '\ndef dummy(): pass'
if code[-1] == ':':
code = code + 'pass'
return code
@staticmethod
def canonicalize_str_nodes(py_ast, str_map):
for node in ast.walk(py_ast):
if isinstance(node, ast.Str):
str_val = node.s
if str_val in str_map:
node.s = str_map[str_val]
else:
# handle cases like `\n\t` in string literals
for str_literal, slot_id in str_map.items():
str_literal_decoded = str_literal.decode('string_escape')
if str_literal_decoded == str_val:
node.s = slot_id
@staticmethod
def canonicalize_query(query):
"""
canonicalize the query, replace strings to a special place holder
"""
str_count = 0
str_map = dict()
matches = QUOTED_STRING_RE.findall(query)
# de-duplicate
cur_replaced_strs = set()
for match in matches:
# If one or more groups are present in the pattern,
# it returns a list of groups
quote = match[0]
str_literal = match[1]
quoted_str_literal = quote + str_literal + quote
if str_literal in cur_replaced_strs:
# replace the string with new quote with slot id
query = query.replace(quoted_str_literal, str_map[str_literal])
continue
# FIXME: substitute the ' % s ' with
if str_literal in ['%s']:
continue
str_repr = '_STR:%d_' % str_count
str_map[str_literal] = str_repr
query = query.replace(quoted_str_literal, str_repr)
str_count += 1
cur_replaced_strs.add(str_literal)
# tokenize
query_tokens = nltk.word_tokenize(query)
new_query_tokens = []
# break up function calls like foo.bar.func
for token in query_tokens:
new_query_tokens.append(token)
i = token.find('.')
if 0 < i < len(token) - 1:
new_tokens = ['['] + token.replace('.', ' . ').split(' ') + [']']
new_query_tokens.extend(new_tokens)
query = ' '.join(new_query_tokens)
query = query.replace('\' % s \'', '%s').replace('\" %s \"', '%s')
return query, str_map
@staticmethod
def canonicalize_example(query, code):
canonical_query, str_map = Django.canonicalize_query(query)
query_tokens = canonical_query.split(' ')
canonical_code = Django.canonicalize_code(code)
ast_tree = ast.parse(canonical_code)
Django.canonicalize_str_nodes(ast_tree, str_map)
canonical_code = astor.to_source(ast_tree)
# sanity check
# decanonical_code = Django.decanonicalize_code(canonical_code, str_map)
# decanonical_code_tokens = tokenize_code(decanonical_code)
# raw_code_tokens = tokenize_code(code)
# if decanonical_code_tokens != raw_code_tokens:
# pass
# try:
# ast_tree = ast.parse(canonical_code).body[0]
# except:
# print('error!')
# canonical_code = Django.canonicalize_code(code)
# gold_ast_tree = ast.parse(canonical_code).body[0]
# str_map = {}
# parse_tree = python_ast_to_asdl_ast(gold_ast_tree, grammar)
# gold_source = astor.to_source(gold_ast_tree)
# ast_tree = asdl_ast_to_python_ast(parse_tree, grammar)
# source = astor.to_source(ast_tree)
# assert gold_source == source, 'sanity check fails: gold=[%s], actual=[%s]' % (gold_source, source)
#
# # action check
# parser = PythonTransitionSystem(grammar)
# actions = parser.get_actions(parse_tree)
#
# hyp = Hypothesis()
# for action in actions:
# assert action.__class__ in parser.get_valid_continuation_types(hyp)
# if isinstance(action, ApplyRuleAction):
# assert action in parser.get_valid_continuations(hyp)
# hyp.apply_action(action)
#
# src_from_hyp = astor.to_source(asdl_ast_to_python_ast(hyp.tree, grammar))
# assert src_from_hyp == gold_source
return query_tokens, canonical_code, str_map
@staticmethod
def parse_django_dataset(annot_file, code_file, asdl_file_path, max_query_len=70, vocab_freq_cutoff=10):
asdl_text = open(asdl_file_path).read()
grammar = ASDLGrammar.from_text(asdl_text)
transition_system = PythonTransitionSystem(grammar)
loaded_examples = []
from components.vocab import Vocab, VocabEntry
from components.dataset import Example
for idx, (src_query, tgt_code) in enumerate(zip(open(annot_file), open(code_file))):
src_query = src_query.strip()
tgt_code = tgt_code.strip()
src_query_tokens, tgt_canonical_code, str_map = Django.canonicalize_example(src_query, tgt_code)
python_ast = ast.parse(tgt_canonical_code).body[0]
gold_source = astor.to_source(python_ast).strip()
tgt_ast = python_ast_to_asdl_ast(python_ast, grammar)
tgt_actions = transition_system.get_actions(tgt_ast)
# print('+' * 60)
# print('Example: %d' % idx)
# print('Source: %s' % ' '.join(src_query_tokens))
# if str_map:
# print('Original String Map:')
# for str_literal, str_repr in str_map.items():
# print('\t%s: %s' % (str_literal, str_repr))
# print('Code:\n%s' % gold_source)
# print('Actions:')
# sanity check
hyp = Hypothesis()
for t, action in enumerate(tgt_actions):
assert action.__class__ in transition_system.get_valid_continuation_types(hyp)
if isinstance(action, ApplyRuleAction):
assert action.production in transition_system.get_valid_continuating_productions(hyp)
p_t = -1
f_t = None
if hyp.frontier_node:
p_t = hyp.frontier_node.created_time
f_t = hyp.frontier_field.field.__repr__(plain=True)
print('\t[%d] %s, frontier field: %s, parent: %d' % (t, action, f_t, p_t))
hyp = hyp.clone_and_apply_action(action)
assert hyp.frontier_node is None and hyp.frontier_field is None
src_from_hyp = astor.to_source(asdl_ast_to_python_ast(hyp.tree, grammar)).strip()
assert src_from_hyp == gold_source
print('+' * 60)
loaded_examples.append({'src_query_tokens' : src_query_tokens,
'tgt_canonical_code': gold_source,
'tgt_ast' : tgt_ast,
'tgt_actions' : tgt_actions,
'raw_code' : tgt_code, 'str_map': str_map})
# print('first pass, processed %d' % idx, file=sys.stderr)
train_examples = []
dev_examples = []
test_examples = []
action_len = []
for idx, e in enumerate(loaded_examples):
src_query_tokens = e['src_query_tokens'][:max_query_len]
tgt_actions = e['tgt_actions']
tgt_action_infos = get_action_infos(src_query_tokens, tgt_actions)
example = Example(idx=idx,
src_sent=src_query_tokens,
tgt_actions=tgt_action_infos,
tgt_code=e['tgt_canonical_code'],
tgt_ast=e['tgt_ast'],
meta={'raw_code': e['raw_code'], 'str_map': e['str_map']})
# print('second pass, processed %d' % idx, file=sys.stderr)
action_len.append(len(tgt_action_infos))
# train, valid, test split
if 0 <= idx < 16000:
train_examples.append(example)
elif 16000 <= idx < 17000:
dev_examples.append(example)
else:
test_examples.append(example)
print('Max action len: %d' % max(action_len), file=sys.stderr)
print('Avg action len: %d' % np.average(action_len), file=sys.stderr)
print('Actions larger than 100: %d' % len(list(filter(lambda x: x > 100, action_len))), file=sys.stderr)
src_vocab = VocabEntry.from_corpus([e.src_sent for e in train_examples], size=5000, freq_cutoff=vocab_freq_cutoff)
primitive_tokens = [map(lambda a: a.action.token,
filter(lambda a: isinstance(a.action, GenTokenAction), e.tgt_actions))
for e in train_examples]
primitive_vocab = VocabEntry.from_corpus(primitive_tokens, size=5000, freq_cutoff=vocab_freq_cutoff)
assert '_STR:0_' in primitive_vocab
# generate vocabulary for the code tokens!
code_tokens = [tokenize_code(e.tgt_code, mode='decoder') for e in train_examples]
code_vocab = VocabEntry.from_corpus(code_tokens, size=5000, freq_cutoff=vocab_freq_cutoff)
vocab = Vocab(source=src_vocab, primitive=primitive_vocab, code=code_vocab)
print('generated vocabulary %s' % repr(vocab), file=sys.stderr)
return (train_examples, dev_examples, test_examples), vocab
@staticmethod
def process_django_dataset():
vocab_freq_cutoff = 15 # TODO: found the best cutoff threshold
annot_file = 'data/django/all.anno'
code_file = 'data/django/all.code'
(train, dev, test), vocab = Django.parse_django_dataset(annot_file, code_file,
'asdl/lang/py/py_asdl.txt',
vocab_freq_cutoff=vocab_freq_cutoff)
pickle.dump(train, open('data/django/train.bin', 'w'))
pickle.dump(dev, open('data/django/dev.bin', 'w'))
pickle.dump(test, open('data/django/test.bin', 'w'))
pickle.dump(vocab, open('data/django/vocab.freq%d.bin' % vocab_freq_cutoff, 'w'))
@staticmethod
def run():
asdl_text = open('asdl/lang/py/py_asdl.txt').read()
grammar = ASDLGrammar.from_text(asdl_text)
annot_file = 'data/django/all.anno'
code_file = 'data/django/all.code'
transition_system = PythonTransitionSystem(grammar)
for idx, (src_query, tgt_code) in enumerate(zip(open(annot_file), open(code_file))):
src_query = src_query.strip()
tgt_code = tgt_code.strip()
query_tokens, tgt_canonical_code, str_map = Django.canonicalize_example(src_query, tgt_code)
python_ast = ast.parse(tgt_canonical_code).body[0]
gold_source = astor.to_source(python_ast)
tgt_ast = python_ast_to_asdl_ast(python_ast, grammar)
tgt_actions = transition_system.get_actions(tgt_ast)
# sanity check
hyp = Hypothesis()
hyp2 = Hypothesis()
for action in tgt_actions:
assert action.__class__ in transition_system.get_valid_continuation_types(hyp)
if isinstance(action, ApplyRuleAction):
assert action.production in transition_system.get_valid_continuating_productions(hyp)
hyp = hyp.clone_and_apply_action(action)
hyp2.apply_action(action)
src_from_hyp = astor.to_source(asdl_ast_to_python_ast(hyp.tree, grammar))
assert src_from_hyp == gold_source
assert hyp.tree == hyp2.tree and hyp.tree is not hyp2.tree
print(idx)
@staticmethod
def canonicalize_raw_django_oneliner(code):
# use the astor-style code
code = Django.canonicalize_code(code)
py_ast = ast.parse(code).body[0]
code = astor.to_source(py_ast).strip()
return code
def generate_vocab_for_paraphrase_model(vocab_path, save_path):
from components.vocab import VocabEntry, Vocab
vocab = pickle.load(open(vocab_path))
para_vocab = VocabEntry()
for i in range(0, 10):
para_vocab.add('<unk_%d>' % i)
for word in vocab.source.word2id:
para_vocab.add(word)
for word in vocab.code.word2id:
para_vocab.add(word)
pickle.dump(para_vocab, open(save_path, 'w'))
if __name__ == '__main__':
# Django.run()
# f1 = Field('hahah', ASDLPrimitiveType('123'), 'single')
# rf1 = RealizedField(f1, value=123)
#
# # print(f1 == rf1)
# a = {f1: 1}
# print(a[rf1])
Django.process_django_dataset()
# generate_vocab_for_paraphrase_model('data/django/vocab.freq10.bin', 'data/django/vocab.para.freq10.bin')
# py_ast = ast.parse("""sorted(asf, reverse='k' 'k', k='re' % sdf)""")
# canonicalize_py_ast(py_ast)
# for node in ast.walk(py_ast):
# if isinstance(node, ast.Str):
# print(node.s)
# print(astor.to_source(py_ast))
| [
"alex.dinu07@gmail.com"
] | alex.dinu07@gmail.com |
0795e38e200dc99858a29c73d98fbc5eada52473 | 9d8314bcb2cdabb62e6ce97cf61fc79e559e1c4f | /python-ds-practice/fs_5_read_file_list/read_file_list.py | 74b36e94617c01f212c124ad2b7d30a53cadb857 | [] | no_license | hannahsylee/18_Python | 110ac6bfcda571bfafa0a737723616ba0ac87957 | 165465755fd3707b20fa5f7a094826ee2374cbe8 | refs/heads/main | 2023-08-27T02:22:53.879605 | 2021-10-18T04:05:22 | 2021-10-18T04:05:22 | 417,318,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | def read_file_list(filename):
"""Read file and print out each line separately with a "-" before it.
For example, if we have a file, `dogs`, containing:
Fido
Whiskey
Dr. Sniffle
This should work:
>>> read_file_list("dogs")
- Fido
- Whiskey
- Dr. Sniffle
It will raise an error if the file cannot be found.
"""
with open(filename) as f:
for line in f:
# remove newline at end of line!
line = line.strip()
print(f"- {line}")
# hint: when you read lines of files, there will be a "newline"
# (end-of-line character) at the end of each line, and you want to
# strip that off before you print it. Do some research on that! | [
"hannahsylee@gmail.com"
] | hannahsylee@gmail.com |
c72344f39a2b47d401f7e46eee77380b114764d6 | 32a3a7522546df5f18effcaec43b66115c1cd2c1 | /alpha/migrations/0001_initial.py | deee8f56a15d69afa30df48f53d2484a0ee593bc | [] | no_license | seian/youask | 4b90804a289d3b7d21c9fb9b7b7736279845b95b | 490c8f730e32462eae6a7f69af25fed10c92aba9 | refs/heads/master | 2016-08-09T08:17:19.168489 | 2016-02-17T05:58:46 | 2016-02-17T05:58:46 | 50,502,910 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,849 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-17 04:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Celeb_info',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.IntegerField()),
('name', models.CharField(max_length=100)),
('birth', models.DateTimeField()),
('group', models.CharField(max_length=500)),
('job', models.CharField(max_length=500)),
('school', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='Celeb_info_career',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.IntegerField()),
('_from', models.DateTimeField()),
('_to', models.DateTimeField()),
('award', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='Members',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=200)),
('nick_name', models.CharField(max_length=50)),
('password', models.CharField(max_length=512)),
('signup_date', models.DateTimeField(auto_now=True)),
('contents', models.TextField(max_length=300)),
],
),
migrations.CreateModel(
name='Replies',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.IntegerField()),
('thread', models.IntegerField()),
('content', models.CharField(max_length=500)),
('preference', models.IntegerField()),
('post_date', models.DateTimeField(auto_now=True)),
('parent_id', models.IntegerField()),
],
),
migrations.CreateModel(
name='Thread',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.IntegerField()),
('topic', models.CharField(max_length=500)),
('title', models.CharField(max_length=500)),
('post_date', models.DateTimeField(auto_now=True)),
],
),
]
| [
"wnsdud1861@gmail.com"
] | wnsdud1861@gmail.com |
6ecde04930e4c4d2d909383a6d9ae3c2c8476230 | c089d51e00ace0e2d2c1c08cc7278ea4a43ff8f9 | /classifiers/keras.py | 4ac55c4f41d6fadb0db78e8d3ac6e6246d879ec9 | [] | no_license | salman-kha3/ActiveLearning | 84f088aac802c59edba94c205b4666bfa8e7b15f | 3d6c6c120788ea2fd338cff5d3f5b88b5b8901ec | refs/heads/master | 2021-01-11T20:35:46.977142 | 2017-04-02T12:58:01 | 2017-04-02T12:58:01 | 79,150,978 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | import keras
from libact.base.interfaces import ContinuousModel
import numpy as np
class KerasClassifier(ContinuousModel):
def __init__(self, *args, **kwargs):
self.model = keras.models.Sequential(*args, **kwargs)
def train(self, dataset, *args, **kwargs):
return self.model.fit(*(dataset.format_sklearn()+args), **kwargs)
def predict(self, feature, *args, **kwargs):
return self.model.predict(feature, *args, **kwargs)
def score(self, *args, **kwargs):
return self.model.evaluate(*args, **kwargs)
def predict_real(self, feature, *args, **kwargs):
if hasattr(self.model, "decision_function"):
dvalue = self.model.decision_function(feature, *args, **kwargs)
else:
dvalue = self.model.predict_proba(feature, *args, **kwargs)
# [:, 1]
if len(np.shape(dvalue)) == 1: # n_classes == 2
return np.vstack((-dvalue, dvalue)).T
else:
return dvalue
def add(self, *args, **kwargs):
self.model.add(*args, **kwargs)
def compile(self, *args, **kwargs):
self.model.compile(*args, **kwargs)
| [
"salman.khatri3@gmail.com"
] | salman.khatri3@gmail.com |
be4bc8669b12545f0c578c87d72131ebfc8489d0 | 947273c16f8984a20cd002b99b52facd6e63e43b | /server/authentication/urls.py | dacfd5c43349691a7bc454b922558db58c2608aa | [] | no_license | ecuaappgye/App | 8e3b50b4f7a8b9c50876d24343781e8f53a51bbc | 2df7be6fd206d012f6a83acd0aa0cb75cf6d5937 | refs/heads/master | 2023-07-05T00:48:24.341021 | 2021-07-31T17:02:12 | 2021-07-31T17:02:12 | 385,267,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py | from django.urls import include, path
from .apis import (UserEmailChange, UserGetApi, UserLoginApi, UserLogoutApi,
UserPasswordChange, UserPasswordReset,
UserPasswordResetCheck, UserRegisterApi,
UserRegisterVerifyApi, UserRegisterVerifyCheckApi,
UserUpdateApi)
authentication_urls = [
path('register/', UserRegisterApi.as_view(), name='register'),
path('register/verify/<int:user_id>/', UserRegisterVerifyApi.as_view(), name='register_verify'),
path('register/verify_check/<int:user_id>/', UserRegisterVerifyCheckApi.as_view(), name='register_verify_check'),
path('login/', UserLoginApi.as_view(), name='login'),
path('logout/', UserLogoutApi.as_view(), name='logout'),
path('password_reset/', UserPasswordReset.as_view()),
path('password_reset_check/', UserPasswordResetCheck.as_view()),
path('password_change/<int:user_id>/', UserPasswordChange.as_view(), name='password_change'),
path('email_change/<int:user_id>/', UserEmailChange.as_view(), name='email_change'),
path('get/<int:user_id>/', UserGetApi.as_view(), name='get'),
]
drivers_urls =[
path('update/<int:user_id>/', UserUpdateApi.as_view(), name='update')
]
urlpatterns =[
path('auth/', include((authentication_urls, 'auth'))),
path('driver/', include((drivers_urls, 'driver')))
]
| [
"italobarzola18@gmail.com"
] | italobarzola18@gmail.com |
595a9e74a588b9a31577ba1c84a3e2bd2e99a3bc | e4c798246339e765f04424d727106e80e810f47c | /Medium/iNo008.py | 7c70fbb6da2f54341b2bef3bbcc9b1e6fae85c2f | [] | no_license | kikihiter/LeetCode | 3a61dc4ee3223d634632e30b97c30a73e5bbe253 | 62b5ae50e3b42ae7a5a002efa98af5ed0740a37f | refs/heads/master | 2021-05-26T08:05:00.126775 | 2019-05-21T09:18:37 | 2019-05-21T09:18:37 | 127,999,978 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
str = str.lstrip()
posNum = True
if str == "":
return 0
if str[0]=='-':
posNum = False
str = str[1:]
elif str[0]=='+':
str = str[1:]
try:
int(str[0])
except:
return 0
rStr = ""
for i in str:
try:
int(i)
except:
break
rStr = rStr + i
rStr = rStr.lstrip('0')
if rStr == "":
return 0
if posNum == False:
return max(-int(rStr),-2147483648)
print rStr
return min(int(rStr),2147483647)
| [
"noreply@github.com"
] | noreply@github.com |
78196c0216eb2c3169434d91e959b6edf1d91d1a | 44d9cdc0ca026036c314b8530a7eeda1fb3ee21c | /src/JZ/JZ14-II.py | ea9dde6589c4ad917732d8c88169b2ff6c251334 | [] | no_license | Sibyl233/LeetCode | 525a2104a4f96e0e701fa175dfad6d02d6cf40f3 | 3e20e54bb11f7a56d8fa8189a0a1a1461f81eb8a | refs/heads/master | 2021-09-06T14:52:31.718922 | 2021-08-20T15:51:37 | 2021-08-20T15:51:37 | 234,050,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | """解法:找规律
- 时间复杂度:O(logN)。为二分求余法复杂度。
- 空间复杂度:O(1)
"""
class Solution:
def cuttingRope(self, n: int) -> int:
if n <= 3:
return n - 1
# 不同于JZ14-I,根据题意此处求余采用二分求余法
a, b, p, x, rem = n // 3 - 1, n % 3, 1000000007, 3 , 1
while a > 0:
if a % 2: rem = (rem * x) % p
x = x ** 2 % p
a //= 2
if b == 0:
return (rem * 3) % p # = 3^(a+1) % p
if b == 1:
return (rem * 4) % p # = 3^a * 4 % p
return (rem * 6) % p # = 3^(a+1) * 2 % p
if __name__=="__main__":
n = 10
print(Solution().cuttingRope(n)) # 36
| [
"780598113@qq.com"
] | 780598113@qq.com |
f1b79ea5db5f487fdf2cfc2259c01ffb58418ed7 | 39c032082a86120d06c6daa33abe81fd228077bb | /tsne-test.py | c86c780cdfe556efdb8c869957eca1457810f9a9 | [] | no_license | briantimar/nn-test1 | 4b3365743f31b6bcd9aa3565a55c1239ccb81312 | 980b29b3909c6b9539a06e771f3fafbc413a2a14 | refs/heads/master | 2020-03-19T15:20:28.541618 | 2018-06-14T18:28:12 | 2018-06-14T18:28:12 | 136,667,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,180 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 14 00:13:18 2018
@author: btimar
Visualizing ground states?
"""
import sys
import numpy as np
import tensorflow as tf
sys.path.append("/Users/btimar/Documents/ryd-theory-code/python_code")
from ryd_base import make_1d_TFI_spin
from tools import get_sent
from quspin.operators import hamiltonian
from tsne import tsne
def z(i, basis):
static = [['z', [[1.0, i]]]]
return hamiltonian(static, [], basis=basis)
def zz(i, j, basis):
static = [['zz', [[1.0, i, j]]]]
return hamiltonian(static, [], basis=basis, dtype=np.float64)
def make_labels(gvals):
return (gvals <1).astype(int)
def get_features(gvals, basis):
N=len(gvals)
s = np.empty((N, basis.Ns))
for i in range(N):
h = make_1d_TFI_spin(1, gvals[i], basis, dtype=np.float64)
_, psi0 = h.eigsh(k=1, which='SA')
s[i, :] = psi0.reshape(basis.Ns)
return s
from quspin.basis import spin_basis_1d
L=8
basis = spin_basis_1d(L, kblock=0, pblock=1)
basis_full = spin_basis_1d(L)
proj=basis.get_proj(np.float64)
N=500
gvals = np.linspace(0, 2.0, N)
states = get_features(gvals, basis)
labels = make_labels(gvals)
psi_full = np.asarray(proj.todense().dot( features.transpose()))
zzops = [zz(0, i, basis_full) for i in range(1,L//2)]
features = np.empty((N, len(zzops)))
for i in range(len(zzops)):
features[:, i] = zzops[i].expt_value(psi_full)
from tools import overlap
overlaps = [np.abs(overlap(features[i, :], features[-1, :]))**2 for i in range(N-1)]
zz1 = zz1op.expt_value(psi_full)
#tSNE params
no_dims = 2
#dimensionality of raw data
d = features.shape[1]
initial_dims = d
perplexity = 30.0
print("Passing to tsne")
y = tsne(features, no_dims=no_dims, initial_dims=initial_dims, perplexity=perplexity)
import matplotlib.pyplot as plt
from EDIO import save
fig, ax=plt.subplots()
plt.scatter(y[:, 0], y[:, 1], c=labels)
#save(fig, "20180613/tsne-tfi-symm-wfs-L={0}".format(L),which='mac')
blockA = (y[:, 1]>0)*(y[:, 0]<15)
blockB = np.logical_not(blockA)
plt.plot(gvals[blockA], zz1[blockA], label='A')
plt.plot(gvals[blockB], zz1[blockB], 'rx',label='B')
| [
"timarbrian@gmail.com"
] | timarbrian@gmail.com |
c009051241a83019b6b7b8d33d001752d3ebe5bb | 428dd6e4ab9ee4916664f13a25f04ae424769d47 | /pwd.py | 162233077798636c35d2030fa2e855c4d0cb7694 | [] | no_license | licmnn/pwd | 16e8ae1113f8a7bed8d16bdb4e989983c3e41f50 | 82921059804619e28f777691cf4d9c46e528deaa | refs/heads/master | 2022-07-10T12:50:05.914564 | 2020-05-19T15:20:21 | 2020-05-19T15:20:21 | 265,280,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | #password = 'a123456'
#x = 3
#while x > 0:
# pw = input('请输入密码: ')
# if pw == password:
# print('登入成功') # quit
# break
# elif x != 0:
# x = x - 1
# print('密码错误! 还有%d次机会!' % x)
# else:
# pirnt('')
'''
pw = input('请输入您的密码: ')
x = 2
print('密码错误! 还有%d次机会 ' % x)
while x > 0 :
if pw != 'a123456' and x > 0:
pw = input('请输入您的密码: ')
x = x - 1
print('密码错误! 还有%d次机会 ' % x)
elif x == 0:
print('密码错误!灭有机会了')
elif pw == 'a123456':
print('登入成功')
'''
password = 'a123456'
x = 3
while x > 0:
x = x -1
pw = input('请输入密码: ')
if pw == password:
print('登入成功') # quit
break
else:
# x = x -1
print('密码错误!')
if x > 0:
print('还有%d次机会!' % x)
else:
print('没有机会了,账号锁定!') | [
"licmn@live.com"
] | licmn@live.com |
77f78c54499c89158741dcfdec3c99815d78fd7f | 48399403b64b9d8e52ea573c0cbefb941f33e540 | /server.py | 62775f3592e3fea539d03b16d9b96c25a5d4ac78 | [] | no_license | Henry-Aybar/counter | 97c54c9a3060fb0249ee432ec89163ae796c014e | 8bb089134d502bd334d785751352086340561862 | refs/heads/master | 2023-08-03T09:41:43.420948 | 2021-09-14T20:50:30 | 2021-09-14T20:50:30 | 406,517,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | from flask import Flask, render_template, request, redirect, session
app = Flask(__name__)
app.secret_key = 'Im just Super Sayin!'
@app.route('/')
def index():
if 'visit' in session:
session['visit'] += 1
else:
session['visit'] = 1
if 'count' in session:
session['count'] += 1
else:
session['count'] = 0
return render_template("index.html")
@app.route('/clear')
def clear_session():
session.clear()
return redirect('/')
if __name__=="__main__":
app.run(debug=True) | [
"aybar.henry.usmc@gmail.com"
] | aybar.henry.usmc@gmail.com |
92b948fe97b26eb0b10d0f37347f317f1618052f | 47aada5f50fdb6dfe0c8b34ff30de93a32797dca | /tpfa/boundary_conditions.py | 760274d702f7f588864f85db98cb8d975fd5a9bf | [
"MIT"
] | permissive | Filipe-Cumaru/hello-world-1 | 17a7e11ac9f3efa0026c3471538cf4f28323e8ef | b12137beebbf1a4bd87d02583b609d8c68307474 | refs/heads/master | 2020-04-24T23:59:01.077919 | 2019-02-24T02:40:58 | 2019-02-24T02:40:58 | 172,363,231 | 0 | 0 | MIT | 2019-02-24T16:47:05 | 2019-02-24T16:47:05 | null | UTF-8 | Python | false | false | 779 | py | import numpy as np
from scipy.sparse import csr_matrix, lil_matrix
class BoundaryConditions():
def __init__(self, num_elements, nx, ny, coef):
self.coef = coef
self.num_elements = num_elements
self.nx = nx
self.ny = ny
self.coef, self.q = self.pressao_prescrita()
def pressao_prescrita(self):
self.q = lil_matrix((self.num_elements, 1), dtype=np.float_)
self.coef[0:self.nx*self.ny] = 0
self.q [0:self.nx*self.ny] = 500
self.coef[self.num_elements-(self.nx*self.ny):self.num_elements] = 0
for r in range(self.nx*self.ny):
self.coef[r,r] = 1
self.coef[r+self.num_elements-(self.nx*self.ny),r+self.num_elements-(self.nx*self.ny)] = 1
return self.coef, self.q
| [
"renatattavares@hotmail.com"
] | renatattavares@hotmail.com |
f4dbe6287f26b3505bbd41c6c3a493e894db9f0e | 43e36f639e69bb3c0ace7b89eee8571d946da1c8 | /scripts/ball_gripping_test.py | 28d81f812fe81d24a1a294c322afcb8d91a80c66 | [] | no_license | mikolak/test_package | 309a07e78b322a0fe378ac595200b473cba0a391 | c9978a440bcdeed54839aeeb1e80795060cc1b5e | refs/heads/master | 2016-09-03T07:24:23.361858 | 2014-09-08T13:36:32 | 2014-09-08T13:36:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,630 | py | #!/usr/bin/env python
import rospy
import tf
import actionlib
import math
from controller_manager_msgs.srv import *
from std_msgs.msg import *
from diagnostic_msgs.msg import *
from geometry_msgs.msg import *
from trajectory_msgs.msg import *
from control_msgs.msg import *
from cartesian_trajectory_msgs.msg import *
from force_control_msgs.msg import *
from tf.transformations import *
import PyKDL
import tf_conversions.posemath as pm
def getDownOrientedQuaternion():
real_angle = math.pi * 1 #180 stopni to pionowo w dol
v_x = 0.0
v_y = -1
v_z = 0
angle = 0.5 * real_angle
_sin = math.sin(angle)
x = _sin * v_x
y = _sin * v_y
z = _sin * v_z
w = math.cos(angle)
return Quaternion(x, y, z, w)
if __name__ == '__main__':
rospy.init_node('simple_trajectory_test')
rospy.wait_for_service('/controller_manager/switch_controller')
conManSwitch = rospy.ServiceProxy('/controller_manager/switch_controller', SwitchController)
#------------------------------------------------
# Stawy
#------------------------------------------------
conManSwitch(['Irp6pmSplineTrajectoryGeneratorJoint'], [], True)
client = actionlib.SimpleActionClient('/irp6p_arm/spline_trajectory_action_joint', FollowJointTrajectoryAction)
client.wait_for_server()
print 'Inicjacja postawy'
goal = FollowJointTrajectoryGoal()
goal.trajectory.joint_names = ['joint1', 'joint2', 'joint3', 'joint4', 'joint5', 'joint6']
goal.trajectory.points.append(JointTrajectoryPoint([0, -0.5 * math.pi, 0, 0, 1.45 * math.pi, -0.5 * math.pi], [], [], [], rospy.Duration(6.0)))
goal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.2)
client.send_goal(goal)
client.wait_for_result()
command_result = client.get_result()
#=====================================================
conManSwitch(['Irp6pmPoseInt'], ['Irp6pmSplineTrajectoryGeneratorJoint'], True)
pose_client = actionlib.SimpleActionClient('/irp6p_arm/pose_trajectory', CartesianTrajectoryAction)
pose_client.wait_for_server()
print 'Ustawienie pozycji poczatkowej'
goal = CartesianTrajectoryGoal()
quaternion = getDownOrientedQuaternion()
point = Point(0.85, 0, 1.20)
goal.trajectory.points.append(CartesianTrajectoryPoint(rospy.Duration(10.0), Pose(point, quaternion), Twist()))
goal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.2)
pose_client.send_goal(goal)
pose_client.wait_for_result()
command_result = pose_client.get_result()
#====================================================
conManSwitch(['Irp6pmPoseInt'], [], True)
pose_client = actionlib.SimpleActionClient('/irp6p_arm/pose_trajectory', CartesianTrajectoryAction)
pose_client.wait_for_server()
print 'Podejscie do podjecia'
goal = CartesianTrajectoryGoal()
quaternion = getDownOrientedQuaternion()
point = Point(0.9, 0, 0.95)
goal.trajectory.points.append(CartesianTrajectoryPoint(rospy.Duration(10.0), Pose(point, quaternion), Twist()))
goal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.2)
pose_client.send_goal(goal)
pose_client.wait_for_result()
command_result = pose_client.get_result()
rospy.sleep(5.0)
#========================================================
conManSwitch(['Irp6ptfgSplineTrajectoryGeneratorMotor'], ['Irp6pmPoseInt'], True)
motor_client = actionlib.SimpleActionClient('/irp6p_tfg/spline_trajectory_action_motor', FollowJointTrajectoryAction)
motor_client.wait_for_server()
print 'Chwyt'
goal = FollowJointTrajectoryGoal()
goal.trajectory.joint_names = ['joint1']
goal.trajectory.points.append(JointTrajectoryPoint([1000.0], [0.0], [], [], rospy.Duration(3.0)))
goal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.2)
motor_client.send_goal(goal)
motor_client.wait_for_result()
command_result = motor_client.get_result()
#=======================================================
conManSwitch(['Irp6pmPoseInt'], ['Irp6ptfgSplineTrajectoryGeneratorMotor'], True)
pose_client = actionlib.SimpleActionClient('/irp6p_arm/pose_trajectory', CartesianTrajectoryAction)
pose_client.wait_for_server()
print 'Powrot do pozycji poczatkowej'
goal = CartesianTrajectoryGoal()
quaternion = getDownOrientedQuaternion()
point = Point(0.85, 0, 1.20)
goal.trajectory.points.append(CartesianTrajectoryPoint(rospy.Duration(15.0), Pose(point, quaternion), Twist()))
goal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.2)
pose_client.send_goal(goal)
pose_client.wait_for_result()
command_result = pose_client.get_result()
#========================================================
conManSwitch(['Irp6pmPoseInt'], [], True)
pose_client = actionlib.SimpleActionClient('/irp6p_arm/pose_trajectory', CartesianTrajectoryAction)
pose_client.wait_for_server()
print 'Podejscie do odlozenia'
goal = CartesianTrajectoryGoal()
quaternion = getDownOrientedQuaternion()
point = Point(0.9, 0.3, 0.95)
goal.trajectory.points.append(CartesianTrajectoryPoint(rospy.Duration(10.0), Pose(point, quaternion), Twist()))
goal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.2)
pose_client.send_goal(goal)
pose_client.wait_for_result()
command_result = pose_client.get_result()
#===========================================
conManSwitch(['Irp6ptfgSplineTrajectoryGeneratorMotor'], ['Irp6pmPoseInt'], True)
motor_client = actionlib.SimpleActionClient('/irp6p_tfg/spline_trajectory_action_motor', FollowJointTrajectoryAction)
motor_client.wait_for_server()
print 'Wypuszczenie'
goal = FollowJointTrajectoryGoal()
goal.trajectory.joint_names = ['joint1']
goal.trajectory.points.append(JointTrajectoryPoint([-1000.0], [0.0], [], [], rospy.Duration(3.0)))
goal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.2)
motor_client.send_goal(goal)
motor_client.wait_for_result()
command_result = motor_client.get_result()
#============================================
conManSwitch(['Irp6pmPoseInt'], ['Irp6ptfgSplineTrajectoryGeneratorMotor'], True)
pose_client = actionlib.SimpleActionClient('/irp6p_arm/pose_trajectory', CartesianTrajectoryAction)
pose_client.wait_for_server()
print 'Powrot do pozycji poczatkowej'
goal = CartesianTrajectoryGoal()
quaternion = getDownOrientedQuaternion()
point1 = Point(0.9, 0.3, 1.20)
point2 = Point(0.85, 0, 1.20)
goal.trajectory.points.append(CartesianTrajectoryPoint(rospy.Duration(8.0), Pose(point1, quaternion), Twist()))
goal.trajectory.points.append(CartesianTrajectoryPoint(rospy.Duration(16.0), Pose(point2, quaternion), Twist()))
goal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.2)
pose_client.send_goal(goal)
pose_client.wait_for_result()
command_result = pose_client.get_result()
#================================================
conManSwitch(['Irp6pmSplineTrajectoryGeneratorJoint'], ['Irp6pmPoseInt'], True)
client = actionlib.SimpleActionClient('/irp6p_arm/spline_trajectory_action_joint', FollowJointTrajectoryAction)
client.wait_for_server()
print 'Powrot do pozycji synchronizacji'
goal = FollowJointTrajectoryGoal()
goal.trajectory.joint_names = ['joint1', 'joint2', 'joint3', 'joint4', 'joint5', 'joint6']
goal.trajectory.points.append(JointTrajectoryPoint([-0.10087151336609543, -1.5417429815634993, 0.019743230015841898, 1.1331041783656084, 3.658011557435151, -2.7351279214366393], [], [], [], rospy.Duration(10.0)))
goal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.2)
client.send_goal(goal)
client.wait_for_result()
command_result = client.get_result()
conManSwitch([], ['Irp6pmSplineTrajectoryGeneratorJoint'], True)
print 'Skonczylem!'
| [
"mikolak.k@gmail.com"
] | mikolak.k@gmail.com |
d0e5a0905a4356ef92ac7aaf23d6954bde4d27c3 | 6a2d708c290d5fc8b4eb6d2b6d52186d6ae57406 | /Chef Race (SRTF) Final/srtf.py | 5bbf4bbf634474a5b9be024dfb09b3ce1e213ed3 | [] | no_license | ac-marlon/ChefRaceUD | 64080c1e575234f03113c16d9c017c4f42f55010 | b3c8312af7b6b8e3e6fc0a457169358e4bd1d62b | refs/heads/master | 2021-08-26T01:09:25.324512 | 2021-08-18T15:01:24 | 2021-08-18T15:01:24 | 108,355,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,609 | py | import cola
import time
from procesos import *
import recursos as rs
import queue
import threading
import numpy as np
import pygame
from pygame.sprite import Sprite
from pygame.locals import *
import util
import sys, pygame, util
from receta import Receta
from recursos import CuchillosIma
from recursos import LicuadoraIma
from recursos import HornoIma
from pizarra import Pizarra
size = width, height = 900, 712
screen = pygame.display.set_mode(size)
class Procesador(threading.Thread):
def __init__(self,idProcesador,*args):
threading.Thread.__init__(self)
self.idProcesador=idProcesador
self.proceso=None
self.lis=cola.Cola()
self.ter=cola.Cola()
self.blo=cola.Cola()
self.sus=cola.Cola()
self._args=args
self.uso=True
self.minIter=50
def __str__(self):
return str(self.idProcesador)
def run(self):
while self.uso:
self.usarProcesador(*self._args)
def usarProcesador(self,q):
while not self.proceso==None or not q.empty() or not self.lis.es_vacia() or not self.sus.es_vacia() or not self.blo.es_vacia() or self.minIter>0:
time.sleep(2)
self.minIter-=1
if not q.empty(): self.asignar(q.get())
self.lis.ordenar()
if not self.lis.es_vacia() and self.proceso==None:
posible=self.lis.desencolar()
if posible.recurso.libre:
self.ocupado=True
self.proceso=posible
self.proceso.recurso.utilizar()
self.proceso.estado=3
else:
posible.bloquear()
self.blo.encolar(posible)
elif not self.lis.es_vacia() and not self.proceso==None:
posible=self.lis.desencolar()
if self.proceso.t>posible.t and posible.recurso.libre:
self.proceso.suspender()
self.sus.encolar(self.proceso)
self.proceso=posible
self.proceso.recurso.utilizar()
else:
self.lis.encolar(posible)
self.contarColaBlo()
self.contarColaLis()
self.revisarColaSus()
self.revisarColaBlo()
if not self.proceso==None:
self.proceso.procesar()
if self.proceso.t==0:
self.proceso.recurso.liberar()
print("\nterminando proceso",self.proceso,"en el procesador",self,",sus",self.proceso.sus,",lis",self.proceso.lis,",blo",self.proceso.blo,",zona critica",self.proceso.zc)
self.proceso.estado=4
self.ter.encolar(self.proceso)
self.proceso=None
q.task_done()
print("termino el procesador",self,"lista de tareas completadas en este procesador:")
for i in range(self.ter.tam):
print(self.ter.desencolar())
self.uso=False
def revisarColaSus(self):
tam = self.sus.tam
for i in range(tam):
n=self.sus.desencolar()
n.tr-=1
n.sus+=1
if n.tr==0:
self.asignar(n)
print("\nse saco el proceso",n,"de la cola de suspendidos y entro a la cola de listo")
else:
self.sus.encolar(n)
def revisarColaBlo(self):
for i in range(self.blo.tam):
posible=self.blo.desencolar()
if posible.recurso.libre:
self.asignar(posible)
print("\nse saco el proceso",posible," de la cola de bloqueados y entro en la cola de listos")
else:
self.blo.encolar(posible)
def contarColaLis(self):
tam = self.lis.tam
for i in range(tam):
n=self.lis.desencolar()
n.lis+=1
self.lis.encolar(n)
def contarColaBlo(self):
tam = self.blo.tam
for i in range(self.blo.tam):
n=self.blo.desencolar()
n.blo+=1
self.blo.encolar(n)
def asignar(self,proceso):
proceso.estado=0
self.lis.encolar(proceso)
class cliente:
def __init__(self):
self.numPo=0
self.numMa=0
self.numEn=0
self.recursos=[rs.Horno(),rs.Cuchillos(),rs.Licuadora()]
self.cola1=queue.Queue()
self.cola2=queue.Queue()
self.cola3=queue.Queue()
self.colaProcesadores=queue.Queue()
self.procesador1=Chef((width-900,height),1,self.cola1)
self.procesador2=Chef((width-700,height),2,self.cola2)
self.procesador3=Chef((width-500,height),3,self.cola3)
pygame.init()
pygame.mixer.init()
self.fondo = pygame.image.load("imagenes/cocina.png")
self.intro = pygame.image.load("imagenes/intro.png")
self.fondorect = self.fondo.get_rect()
self.introrect = self.intro.get_rect()
pygame.display.set_caption( "Chef Race (Universidad Distrital)" )
self.pizarra = pygame.image.load("imagenes/pizarra.png")
self.sInicio = util.cargar_sonido('sonidos/inicio.wav')
self.sHorno = util.cargar_sonido('sonidos/horno.wav')
self.sCuchillo = util.cargar_sonido('sonidos/cuchillo.wav')
self.sLicuadora = util.cargar_sonido('sonidos/licuadora.wav')
self.sPrincipal = util.cargar_sonido('sonidos/principal.wav')
self.pizarra1 = Pizarra((width-900,height))
self.pizarra2 = Pizarra((width-700,height))
self.pizarra3 = Pizarra((width-500,height))
self.receta1 = Receta((width,height))
self.receta2 = Receta((width+200,height))
self.receta3 = Receta((width+400,height))
self.comida1 = PolloConPapas(000,self.recursos[0],size)
self.comida2 = Ensalada(111,self.recursos[1],size)
self.comida3 = Malteada(222,self.recursos[2],size)
self.listaChefs = [self.procesador1, self.procesador2, self.procesador3]
self.listaPizarras = [self.pizarra1, self.pizarra2, self.pizarra3]
self.listaRecetas = [self.receta1, self.receta2, self.receta3]
self.listaComida = [self.comida1, self.comida2, self.comida3]
self.cuchillos = CuchillosIma(size)
self.licuadora = LicuadoraIma(size)
self.horno = HornoIma(size)
self.reloj = pygame.time.Clock()
self.fuente1 = pygame.font.Font(None,70)
self.fuente2 = pygame.font.Font(None,25)
self.textoBienvenida = self.fuente1.render("Bienvenido a Chef Race UD", 1, (255,255,255))
self.textoAutor1 = self.fuente2.render("Marlon Arias", 1, (0,0,0))
self.textoAutor2 = self.fuente2.render("David Amado", 1, (0,0,0))
self.textoAutor3 = self.fuente2.render("Realizado por:", 1, (0,0,0))
def iniciar(self):
self.sInicio.play()
aux = 3
while aux > 0:
screen.blit(self.intro, self.introrect)
screen.blit(self.textoAutor1,(width-170,height-680))
screen.blit(self.textoAutor2,(width-170,height-660))
screen.blit(self.textoAutor3,(width-170,height-700))
screen.blit(self.textoBienvenida,((width-880, (height/2)+30)))
pygame.display.update()
time.sleep(1)
aux=aux-1
self.sPrincipal.play(1)
self.procesador1.start()
self.procesador2.start()
self.procesador3.start()
self.hiloAnimacion = threading.Thread(name='Animacion', target = self.pintar)
self.hiloEventos = threading.Thread(name='Animacion', target = self.capturarEventos)
#self.hiloEventos.daemon=True
self.hiloEventos.start()
self.hiloAnimacion.daemon=True
self.hiloAnimacion.start()
self.cola1.join()
self.cola2.join()
self.cola3.join()
self.hiloAnimacion.join()
self.hiloEventos.join()
def capturarEventos(self):
while self.procesador1.uso or self.procesador2.uso or self.procesador3.uso:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
print("Evento ratonBtn capturado")
for x in range(700, 760):
for y in range(282, 342):
proceso = Malteada(self.numMa,self.recursos[2],size)
self.numMa+=1
estado="trabajandoLicuadora1"
if event.button == 1 and event.pos == (x, y):
self.cola1.put(proceso)
self.procesador1.estado=estado
self.pizarra1.arregloRecetas.append(proceso)
print("pico el click izq")
elif event.button == 2 and event.pos == (x, y):
self.cola2.put(proceso)
self.procesador2.estado=estado
self.pizarra2.arregloRecetas.append(proceso)
print("pico el click cent")
elif event.button == 3 and event.pos == (x, y):
self.cola3.put(proceso)
self.procesador3.estado=estado
self.pizarra3.arregloRecetas.append(proceso)
print("pico el click der")
for x in range(700, 760):
for y in range(27, 87):
proceso=PolloConPapas(self.numPo,self.recursos[0],size)
self.numPo+=1
estado="trabajandoHorno1"
if event.button == 1 and event.pos == (x, y):
self.cola1.put(proceso)
self.procesador1.estado=estado
self.pizarra1.arregloRecetas.append(proceso)
print("pico el click izq")
elif event.button == 2 and event.pos == (x, y):
self.cola2.put(proceso)
self.procesador2.estado=estado
self.pizarra2.arregloRecetas.append(proceso)
print("pico el click cent")
elif event.button == 3 and event.pos == (x, y):
self.cola3.put(proceso)
self.procesador3.estado=estado
self.pizarra3.arregloRecetas.append(proceso)
print("pico el click der")
for x in range(700, 750):
for y in range(137, 197):
proceso=Ensalada(self.numEn,self.recursos[1],size)
self.numEn+=1
estado="trabajandoCuchillo1"
if event.button == 1 and event.pos == (x, y):
self.cola1.put(proceso)
self.procesador1.estado=estado
self.pizarra1.arregloRecetas.append(proceso)
print("pico el click izq")
elif event.button == 2 and event.pos == (x, y):
self.cola2.put(proceso)
self.procesador2.estado=estado
self.pizarra2.arregloRecetas.append(proceso)
print("pico el click cent")
elif event.button == 3 and event.pos == (x, y):
self.cola3.put(proceso)
self.procesador3.estado=estado
self.pizarra3.arregloRecetas.append(proceso)
print("pico el click der")
def pintar(self):
while self.procesador1.uso or self.procesador2.uso or self.procesador3.uso:
self.reloj.tick(3)
for elemento in self.listaChefs:
elemento.update()
time.sleep(0.5)
screen.blit(self.fondo, self.fondorect)
for elemento in self.listaChefs:
screen.blit(elemento.image, elemento.rect)
for elemento in self.listaPizarras:
screen.blit(elemento.image, elemento.rect)
for i in elemento.arregloRecetas:
if elemento.arregloRecetas[elemento.arregloRecetas.index(i)].estado==0:
screen.blit(i.iml, (elemento.rect[0]+30,elemento.rect[1]+elemento.arregloRecetas.index(i)*60+10))
elif elemento.arregloRecetas[elemento.arregloRecetas.index(i)].estado==1:
screen.blit(i.imb, (elemento.rect[0]+30,elemento.rect[1]+elemento.arregloRecetas.index(i)*60+10))
elif elemento.arregloRecetas[elemento.arregloRecetas.index(i)].estado==2:
screen.blit(i.ims, (elemento.rect[0]+30,elemento.rect[1]+elemento.arregloRecetas.index(i)*60+10))
elif elemento.arregloRecetas[elemento.arregloRecetas.index(i)].estado==3:
screen.blit(i.ime, (elemento.rect[0]+30,elemento.rect[1]+elemento.arregloRecetas.index(i)*60+10))
elif elemento.arregloRecetas[elemento.arregloRecetas.index(i)].estado==4:
elemento.arregloRecetas.remove(i)
for elemento in self.listaRecetas:
screen.blit(elemento.image, elemento.rect)
for elemento in self.listaComida:
screen.blit(elemento.iml, elemento.rect)
screen.blit(self.cuchillos.image, self.cuchillos.rect)
screen.blit(self.licuadora.image, self.licuadora.rect)
screen.blit(self.horno.image, self.horno.rect)
pygame.display.update()
def crearProceso(self,nProcesos):
for i in range(nProcesos):
self.asignar_pedido_aleatorio()
def asignar_pedido_aleatorio(self):
aleatorio1=np.random.randint(3)
aleatorio2=np.random.randint(3)
if aleatorio1==0:
proceso=PolloConPapas(self.numPo,self.recursos[0],size)
self.numPo+=1
estado="trabajandoHorno1"
elif aleatorio1==1:
proceso=Ensalada(self.numEn,self.recursos[1],size)
self.numEn+=1
estado="trabajandoCuchillo1"
else:
proceso= Malteada(self.numMa,self.recursos[2],size)
self.numMa+=1
estado="trabajandoLicuadora1"
if aleatorio2==0:
self.cola1.put(proceso)
self.procesador1.estado=estado
elif aleatorio2==1:
self.cola2.put(proceso)
self.procesador2.estado=estado
else:
self.cola3.put(proceso)
self.procesador3.estado=estado
class Chef(Sprite, Procesador):
def __init__(self, cont_size,idProcesador,*args):
Sprite.__init__(self)
Procesador.__init__(self,idProcesador,*args)
self.cont_size = cont_size
self.estados = ["espera", "trabajandoCuchillo1", "trabajandoCuchillo2",
"trabajandoHorno1", "trabajandoHorno2",
"trabajandoLicuadora1", "trabajandoLicuadora2"]
self.estado = self.estados[0]
self.imagenes = [util.cargar_imagen('imagenes/chef.png'),
util.cargar_imagen('imagenes/chefCuchi.png'),
util.cargar_imagen('imagenes/chefCuchi2.png'),
util.cargar_imagen('imagenes/chefHorno.png'),
util.cargar_imagen('imagenes/chefHorno2.png'),
util.cargar_imagen('imagenes/chefLicu.png'),
util.cargar_imagen('imagenes/chefLicu2.png')]
self.image = self.imagenes[0]
self.rect = self.image.get_rect()
self.rect.move_ip(cont_size[0], cont_size[1]-250)
def update(self):
#animacion sprite
if self.proceso==None:
self.image = self.imagenes[0]
print("el procesador",self,"no tiene proceso")
else:
if self.proceso.recurso.nombre=="Cuchillos":
if self.estado == self.estados[1]:
self.image = self.imagenes[1]
self.estado = self.estados[2]
else:
self.image = self.imagenes[2]
self.estado = self.estados[1]
elif self.proceso.recurso.nombre=="Horno":
if self.estado == self.estados[3]:
self.image = self.imagenes[3]
self.estado = self.estados[4]
else:
self.image = self.imagenes[4]
self.estado = self.estados[3]
else:
if self.estado == self.estados[5]:
self.image = self.imagenes[5]
self.estado = self.estados[6]
else:
self.image = self.imagenes[6]
self.estado = self.estados[5]
cliente = cliente()
cliente.iniciar()
| [
"noreply@github.com"
] | noreply@github.com |
0c8795e3c608547bd7a3663df3bd69eeabac19fa | 1978ed0ffd9264e0b598e82966acd53add379fce | /BK_Scripts/shapes.py | cdc2e922bed34aca1b93307f0e70aaebe4d1941e | [
"MIT"
] | permissive | bkvignesh/manim | 70d30c22d601eccf67108bca52317bfe3e2b4a7b | a4dace8f85113b1605235531f5de8f5d408ac823 | refs/heads/master | 2023-01-22T23:39:57.324803 | 2020-10-12T13:11:21 | 2020-10-12T13:11:21 | 295,242,598 | 0 | 0 | NOASSERTION | 2020-09-13T21:41:33 | 2020-09-13T21:41:32 | null | UTF-8 | Python | false | false | 1,112 | py | from manimlib.imports import *
from math import cos, sin, pi
import numpy as np
class Shapes(Scene):
def construct(self):
circle = Circle(color = YELLOW)
square = Square(color = DARK_BLUE)
square.surround(circle)
rectangle = Rectangle(height=2, width=3, color=RED)
ring = Annulus(inner_radius=.2, outer_radius=1, color=BLUE)
ring2 = Annulus(inner_radius=.6, outer_radius=1, color=BLUE)
ring3 = Annulus(inner_radius=.2, outer_radius=1, color=BLUE)
ellipse = Ellipse(width=5, height=3, color=DARK_BLUE)
pointers = []
for i in range(8):
pointers.append(Line(ORIGIN, np.array([cos(pi/180*360/8*i),sin(pi/180*360/8*i), 0]), color = YELLOW))
self.add(circle)
self.play(FadeIn(square))
self.play(Transform(square, rectangle))
self.play(FadeOut(circle), FadeIn(ring))
self.play(Transform(ring, ring2))
self.play(Transform(ring2, ring3))
self.play(FadeOut(square), GrowFromCenter(ellipse), Transform(ring2, ring))
self.add(*pointers)
self.wait(2)
| [
"vigneshbk42@gmail.com"
] | vigneshbk42@gmail.com |
c78584df8800ee65609ce9f1b1ef2ac873ef1205 | e75c4f0b2c69620e84d180d52ca94f9f03a29e1f | /account/account_move_line.py | 63ebf436ba160d9c0132e354d64ac617909f64d7 | [] | no_license | Tecvemar/openerp-addons-6.0 | 1af14214c557a9a1525812d837fe1528d0f6029a | 357956e64b4c5189b799065cbbd028d8a99e0c74 | refs/heads/master | 2021-01-21T05:32:46.649690 | 2017-08-31T11:57:48 | 2017-08-31T11:57:48 | 101,927,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71,198 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from operator import itemgetter
import netsvc
from osv import fields, osv
from tools.translate import _
import decimal_precision as dp
import tools
class account_move_line(osv.osv):
_name = "account.move.line"
_description = "Journal Items"
def _query_get(self, cr, uid, obj='l', context=None):
fiscalyear_obj = self.pool.get('account.fiscalyear')
fiscalperiod_obj = self.pool.get('account.period')
account_obj = self.pool.get('account.account')
fiscalyear_ids = []
if context is None:
context = {}
initial_bal = context.get('initial_bal', False)
company_clause = " "
if context.get('company_id', False):
company_clause = " AND " +obj+".company_id = %s" % context.get('company_id', False)
if not context.get('fiscalyear', False):
if context.get('all_fiscalyear', False):
#this option is needed by the aged balance report because otherwise, if we search only the draft ones, an open invoice of a closed fiscalyear won't be displayed
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])
else:
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [('state', '=', 'draft')])
else:
#for initial balance as well as for normal query, we check only the selected FY because the best practice is to generate the FY opening entries
fiscalyear_ids = [context['fiscalyear']]
fiscalyear_clause = (','.join([str(x) for x in fiscalyear_ids])) or '0'
state = context.get('state', False)
where_move_state = ''
where_move_lines_by_date = ''
if context.get('date_from', False) and context.get('date_to', False):
if initial_bal:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date < '" +context['date_from']+"')"
else:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date >= '" +context['date_from']+"' AND date <= '"+context['date_to']+"')"
if state:
if state.lower() not in ['all']:
where_move_state= " AND "+obj+".move_id IN (SELECT id FROM account_move WHERE account_move.state = '"+state+"')"
if context.get('period_from', False) and context.get('period_to', False) and not context.get('periods', False):
if initial_bal:
period_company_id = fiscalperiod_obj.browse(cr, uid, context['period_from'], context=context).company_id.id
first_period = fiscalperiod_obj.search(cr, uid, [('company_id', '=', period_company_id)], order='date_start', limit=1)[0]
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, first_period, context['period_from'])
else:
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, context['period_from'], context['period_to'])
if context.get('periods', False):
if initial_bal:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s)) %s %s" % (fiscalyear_clause, where_move_state, where_move_lines_by_date)
period_ids = fiscalperiod_obj.search(cr, uid, [('id', 'in', context['periods'])], order='date_start', limit=1)
if period_ids and period_ids[0]:
first_period = fiscalperiod_obj.browse(cr, uid, period_ids[0], context=context)
# Find the old periods where date start of those periods less then Start period
periods = fiscalperiod_obj.search(cr, uid, [('date_start', '<', first_period.date_start)])
periods = ','.join([str(x) for x in periods])
if periods:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND id IN (%s)) %s %s" % (fiscalyear_clause, periods, where_move_state, where_move_lines_by_date)
else:
ids = ','.join([str(x) for x in context['periods']])
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND id IN (%s)) %s %s" % (fiscalyear_clause, ids, where_move_state, where_move_lines_by_date)
else:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s)) %s %s" % (fiscalyear_clause, where_move_state, where_move_lines_by_date)
if context.get('journal_ids', False):
query += ' AND '+obj+'.journal_id IN (%s)' % ','.join(map(str, context['journal_ids']))
if context.get('chart_account_id', False):
child_ids = account_obj._get_children_and_consol(cr, uid, [context['chart_account_id']], context=context)
query += ' AND '+obj+'.account_id IN (%s)' % ','.join(map(str, child_ids))
query += company_clause
return query
def _amount_residual(self, cr, uid, ids, field_names, args, context=None):
"""
This function returns the residual amount on a receivable or payable account.move.line.
By default, it returns an amount in the currency of this journal entry (maybe different
of the company currency), but if you pass 'residual_in_company_currency' = True in the
context then the returned amount will be in company currency.
"""
res = {}
if context is None:
context = {}
cur_obj = self.pool.get('res.currency')
for move_line in self.browse(cr, uid, ids, context=context):
res[move_line.id] = {
'amount_residual': 0.0,
'amount_residual_currency': 0.0,
}
if move_line.reconcile_id:
continue
if not move_line.account_id.type in ('payable', 'receivable'):
#this function does not suport to be used on move lines not related to payable or receivable accounts
continue
if move_line.currency_id:
move_line_total = move_line.amount_currency
sign = move_line.amount_currency < 0 and -1 or 1
else:
move_line_total = move_line.debit - move_line.credit
sign = (move_line.debit - move_line.credit) < 0 and -1 or 1
line_total_in_company_currency = move_line.debit - move_line.credit
context_unreconciled = context.copy()
if move_line.reconcile_partial_id:
for payment_line in move_line.reconcile_partial_id.line_partial_ids:
if payment_line.id == move_line.id:
continue
if payment_line.currency_id and move_line.currency_id and payment_line.currency_id.id == move_line.currency_id.id:
move_line_total += payment_line.amount_currency
else:
if move_line.currency_id:
context_unreconciled.update({'date': payment_line.date})
amount_in_foreign_currency = cur_obj.compute(cr, uid, move_line.company_id.currency_id.id, move_line.currency_id.id, (payment_line.debit - payment_line.credit), round=False, context=context_unreconciled)
move_line_total += amount_in_foreign_currency
else:
move_line_total += (payment_line.debit - payment_line.credit)
line_total_in_company_currency += (payment_line.debit - payment_line.credit)
result = move_line_total
res[move_line.id]['amount_residual_currency'] = sign * (move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, result) or result)
res[move_line.id]['amount_residual'] = sign * line_total_in_company_currency
return res
def default_get(self, cr, uid, fields, context=None):
data = self._default_get(cr, uid, fields, context=context)
for f in data.keys():
if f not in fields:
del data[f]
return data
def create_analytic_lines(self, cr, uid, ids, context=None):
acc_ana_line_obj = self.pool.get('account.analytic.line')
for obj_line in self.browse(cr, uid, ids, context=context):
if obj_line.analytic_account_id:
if not obj_line.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal !'),_("You have to define an analytic journal on the '%s' journal!") % (obj_line.journal_id.name, ))
amt = (obj_line.credit or 0.0) - (obj_line.debit or 0.0)
vals_lines = {
'name': obj_line.name,
'date': obj_line.date,
'account_id': obj_line.analytic_account_id.id,
'unit_amount': obj_line.quantity,
'product_id': obj_line.product_id and obj_line.product_id.id or False,
'product_uom_id': obj_line.product_uom_id and obj_line.product_uom_id.id or False,
'amount': amt,
'general_account_id': obj_line.account_id.id,
'journal_id': obj_line.journal_id.analytic_journal_id.id,
'ref': obj_line.ref,
'move_id': obj_line.id,
'user_id': uid
}
acc_ana_line_obj.create(cr, uid, vals_lines)
return True
def _default_get_move_form_hook(self, cursor, user, data):
'''Called in the end of default_get method for manual entry in account_move form'''
if data.has_key('analytic_account_id'):
del(data['analytic_account_id'])
if data.has_key('account_tax_id'):
del(data['account_tax_id'])
return data
def convert_to_period(self, cr, uid, context=None):
if context is None:
context = {}
period_obj = self.pool.get('account.period')
#check if the period_id changed in the context from client side
if context.get('period_id', False):
period_id = context.get('period_id')
if type(period_id) == str:
ids = period_obj.search(cr, uid, [('name', 'ilike', period_id)])
context.update({
'period_id': ids[0]
})
return context
def _default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
if not context.get('journal_id', False) and context.get('search_default_journal_id', False):
context['journal_id'] = context.get('search_default_journal_id')
account_obj = self.pool.get('account.account')
period_obj = self.pool.get('account.period')
journal_obj = self.pool.get('account.journal')
move_obj = self.pool.get('account.move')
tax_obj = self.pool.get('account.tax')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
context = self.convert_to_period(cr, uid, context)
# Compute simple values
data = super(account_move_line, self).default_get(cr, uid, fields, context=context)
# Starts: Manual entry from account.move form
if context.get('lines',[]):
total_new = 0.00
for line_record in context['lines']:
if not isinstance(line_record, (tuple, list)):
line_record_detail = self.read(cr, uid, line_record, ['analytic_account_id','debit','credit','name','reconcile_id','tax_code_id','tax_amount','account_id','ref','currency_id','date_maturity','amount_currency','partner_id', 'reconcile_partial_id'])
else:
line_record_detail = line_record[2]
total_new += (line_record_detail['debit'] or 0.00)- (line_record_detail['credit'] or 0.00)
for item in line_record_detail.keys():
data[item] = line_record_detail[item]
if context['journal']:
journal_data = journal_obj.browse(cr, uid, context['journal'], context=context)
if journal_data.type == 'purchase':
if total_new > 0:
account = journal_data.default_credit_account_id
else:
account = journal_data.default_debit_account_id
else:
if total_new > 0:
account = journal_data.default_credit_account_id
else:
account = journal_data.default_debit_account_id
if account and ((not fields) or ('debit' in fields) or ('credit' in fields)) and 'partner_id' in data and (data['partner_id']):
part = partner_obj.browse(cr, uid, data['partner_id'], context=context)
account = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, account.id)
account = account_obj.browse(cr, uid, account, context=context)
data['account_id'] = account.id
s = -total_new
data['debit'] = s > 0 and s or 0.0
data['credit'] = s < 0 and -s or 0.0
data = self._default_get_move_form_hook(cr, uid, data)
return data
# Ends: Manual entry from account.move form
if not 'move_id' in fields: #we are not in manual entry
return data
# Compute the current move
move_id = False
partner_id = False
if context.get('journal_id', False) and context.get('period_id', False):
if 'move_id' in fields:
cr.execute('SELECT move_id \
FROM \
account_move_line \
WHERE \
journal_id = %s and period_id = %s AND create_uid = %s AND state = %s \
ORDER BY id DESC limit 1',
(context['journal_id'], context['period_id'], uid, 'draft'))
res = cr.fetchone()
move_id = (res and res[0]) or False
if not move_id:
return data
else:
data['move_id'] = move_id
if 'date' in fields:
cr.execute('SELECT date \
FROM \
account_move_line \
WHERE \
journal_id = %s AND period_id = %s AND create_uid = %s \
ORDER BY id DESC',
(context['journal_id'], context['period_id'], uid))
res = cr.fetchone()
if res:
data['date'] = res[0]
else:
period = period_obj.browse(cr, uid, context['period_id'],
context=context)
data['date'] = period.date_start
if not move_id:
return data
total = 0
ref_id = False
move = move_obj.browse(cr, uid, move_id, context=context)
if 'name' in fields:
data.setdefault('name', move.line_id[-1].name)
acc1 = False
for l in move.line_id:
acc1 = l.account_id
partner_id = partner_id or l.partner_id.id
ref_id = ref_id or l.ref
total += (l.debit or 0.0) - (l.credit or 0.0)
if 'ref' in fields:
data['ref'] = ref_id
if 'partner_id' in fields:
data['partner_id'] = partner_id
if move.journal_id.type == 'purchase':
if total > 0:
account = move.journal_id.default_credit_account_id
else:
account = move.journal_id.default_debit_account_id
else:
if total > 0:
account = move.journal_id.default_credit_account_id
else:
account = move.journal_id.default_debit_account_id
part = partner_id and partner_obj.browse(cr, uid, partner_id) or False
# part = False is acceptable for fiscal position.
account = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, account.id)
if account:
account = account_obj.browse(cr, uid, account, context=context)
if account and ((not fields) or ('debit' in fields) or ('credit' in fields)):
data['account_id'] = account.id
# Propose the price VAT excluded, the VAT will be added when confirming line
if account.tax_ids:
taxes = fiscal_pos_obj.map_tax(cr, uid, part and part.property_account_position or False, account.tax_ids)
tax = tax_obj.browse(cr, uid, taxes)
for t in tax_obj.compute_inv(cr, uid, tax, total, 1):
total -= t['amount']
s = -total
data['debit'] = s > 0 and s or 0.0
data['credit'] = s < 0 and -s or 0.0
if account and account.currency_id:
data['currency_id'] = account.currency_id.id
acc = account
if s>0:
acc = acc1
compute_ctx = context.copy()
compute_ctx.update({
'res.currency.compute.account': acc,
'res.currency.compute.account_invert': True,
})
v = currency_obj.compute(cr, uid, account.company_id.currency_id.id, data['currency_id'], s, context=compute_ctx)
data['amount_currency'] = v
return data
def on_create_write(self, cr, uid, id, context=None):
if not id:
return []
ml = self.browse(cr, uid, id, context=context)
return map(lambda x: x.id, ml.move_id.line_id)
def _balance(self, cr, uid, ids, name, arg, context=None):
if context is None:
context = {}
c = context.copy()
c['initital_bal'] = True
sql = """SELECT l2.id, SUM(l1.debit-l1.credit)
FROM account_move_line l1, account_move_line l2
WHERE l2.account_id = l1.account_id
AND l1.id <= l2.id
AND l2.id IN %s AND """ + \
self._query_get(cr, uid, obj='l1', context=c) + \
" GROUP BY l2.id"
cr.execute(sql, [tuple(ids)])
return dict(cr.fetchall())
def _invoice(self, cursor, user, ids, name, arg, context=None):
invoice_obj = self.pool.get('account.invoice')
res = {}
for line_id in ids:
res[line_id] = False
cursor.execute('SELECT l.id, i.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' \
'AND l.id IN %s',
(tuple(ids),))
invoice_ids = []
for line_id, invoice_id in cursor.fetchall():
res[line_id] = invoice_id
invoice_ids.append(invoice_id)
invoice_names = {False: ''}
for invoice_id, name in invoice_obj.name_get(cursor, user, invoice_ids, context=context):
invoice_names[invoice_id] = name
for line_id in res.keys():
invoice_id = res[line_id]
res[line_id] = (invoice_id, invoice_names[invoice_id])
return res
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
result = []
for line in self.browse(cr, uid, ids, context=context):
if line.ref:
result.append((line.id, (line.move_id.name or '')+' ('+line.ref+')'))
else:
result.append((line.id, line.move_id.name))
return result
def _balance_search(self, cursor, user, obj, name, args, domain=None, context=None):
if context is None:
context = {}
if not args:
return []
where = ' AND '.join(map(lambda x: '(abs(sum(debit-credit))'+x[1]+str(x[2])+')',args))
cursor.execute('SELECT id, SUM(debit-credit) FROM account_move_line \
GROUP BY id, debit, credit having '+where)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _invoice_search(self, cursor, user, obj, name, args, context=None):
if not args:
return []
invoice_obj = self.pool.get('account.invoice')
i = 0
while i < len(args):
fargs = args[i][0].split('.', 1)
if len(fargs) > 1:
args[i] = (fargs[0], 'in', invoice_obj.search(cursor, user,
[(fargs[1], args[i][1], args[i][2])]))
i += 1
continue
if isinstance(args[i][2], basestring):
res_ids = invoice_obj.name_search(cursor, user, args[i][2], [],
args[i][1])
args[i] = (args[i][0], 'in', [x[0] for x in res_ids])
i += 1
qu1, qu2 = [], []
for x in args:
if x[1] != 'in':
if (x[2] is False) and (x[1] == '='):
qu1.append('(i.id IS NULL)')
elif (x[2] is False) and (x[1] == '<>' or x[1] == '!='):
qu1.append('(i.id IS NOT NULL)')
else:
qu1.append('(i.id %s %s)' % (x[1], '%s'))
qu2.append(x[2])
elif x[1] == 'in':
if len(x[2]) > 0:
qu1.append('(i.id IN (%s))' % (','.join(['%s'] * len(x[2]))))
qu2 += x[2]
else:
qu1.append(' (False)')
if qu1:
qu1 = ' AND' + ' AND'.join(qu1)
else:
qu1 = ''
cursor.execute('SELECT l.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' + qu1, qu2)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _get_move_lines(self, cr, uid, ids, context=None):
result = []
for move in self.pool.get('account.move').browse(cr, uid, ids, context=context):
for line in move.line_id:
result.append(line.id)
return result
_columns = {
'name': fields.char('Name', size=64, required=True),
'quantity': fields.float('Quantity', digits=(16,2), help="The optional quantity expressed by this line, eg: number of product sold. The quantity is not a legal requirement but is very useful for some reports."),
'product_uom_id': fields.many2one('product.uom', 'UoM'),
'product_id': fields.many2one('product.product', 'Product'),
'debit': fields.float('Debit', digits_compute=dp.get_precision('Account')),
'credit': fields.float('Credit', digits_compute=dp.get_precision('Account')),
'account_id': fields.many2one('account.account', 'Account', required=True, ondelete="cascade", domain=[('type','<>','view'), ('type', '<>', 'closed')], select=2),
'move_id': fields.many2one('account.move', 'Move', ondelete="cascade", help="The move of this entry line.", select=2, required=True),
'narration': fields.related('move_id','narration', type='text', relation='account.move', string='Narration'),
'ref': fields.related('move_id', 'ref', string='Reference', type='char', size=64, store=True),
'statement_id': fields.many2one('account.bank.statement', 'Statement', help="The bank statement used for bank reconciliation", select=1),
'reconcile_id': fields.many2one('account.move.reconcile', 'Reconcile', readonly=True, ondelete='set null', select=2),
'reconcile_partial_id': fields.many2one('account.move.reconcile', 'Partial Reconcile', readonly=True, ondelete='set null', select=2),
'amount_currency': fields.float('Amount Currency', help="The amount expressed in an optional other currency if it is a multi-currency entry.", digits_compute=dp.get_precision('Account')),
'amount_residual_currency': fields.function(_amount_residual, method=True, string='Residual Amount', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in its currency (maybe different of the company currency)."),
'amount_residual': fields.function(_amount_residual, method=True, string='Residual Amount', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in the company currency."),
'currency_id': fields.many2one('res.currency', 'Currency', help="The optional other currency if it is a multi-currency entry."),
'period_id': fields.many2one('account.period', 'Period', required=True, select=2),
'journal_id': fields.many2one('account.journal', 'Journal', required=True, select=1),
'blocked': fields.boolean('Litigation', help="You can check this box to mark this journal item as a litigation with the associated partner"),
'partner_id': fields.many2one('res.partner', 'Partner', select=1, ondelete='restrict'),
'date_maturity': fields.date('Due date', select=True ,help="This field is used for payable and receivable journal entries. You can put the limit date for the payment of this line."),
'date': fields.related('move_id','date', string='Effective date', type='date', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['date'], 20)
}),
'date_created': fields.date('Creation date', select=True),
'analytic_lines': fields.one2many('account.analytic.line', 'move_id', 'Analytic lines'),
'centralisation': fields.selection([('normal','Normal'),('credit','Credit Centralisation'),('debit','Debit Centralisation'),('currency','Currency Adjustment')], 'Centralisation', size=8),
'balance': fields.function(_balance, fnct_search=_balance_search, method=True, string='Balance'),
'state': fields.selection([('draft','Unbalanced'), ('valid','Valid')], 'State', readonly=True,
help='When new move line is created the state will be \'Draft\'.\n* When all the payments are done it will be in \'Valid\' state.'),
'tax_code_id': fields.many2one('account.tax.code', 'Tax Account', help="The Account can either be a base tax code or a tax code account."),
'tax_amount': fields.float('Tax/Base Amount', digits_compute=dp.get_precision('Account'), select=True, help="If the Tax account is a tax code account, this field will contain the taxed amount.If the tax account is base tax code, "\
"this field will contain the basic amount(without tax)."),
'invoice': fields.function(_invoice, method=True, string='Invoice',
type='many2one', relation='account.invoice', fnct_search=_invoice_search),
'account_tax_id':fields.many2one('account.tax', 'Tax'),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
#TODO: remove this
#'amount_taxed':fields.float("Taxed Amount", digits_compute=dp.get_precision('Account')),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True)
}
def _get_date(self, cr, uid, context=None):
if context is None:
context or {}
period_obj = self.pool.get('account.period')
dt = time.strftime('%Y-%m-%d')
if ('journal_id' in context) and ('period_id' in context):
cr.execute('SELECT date FROM account_move_line ' \
'WHERE journal_id = %s AND period_id = %s ' \
'ORDER BY id DESC limit 1',
(context['journal_id'], context['period_id']))
res = cr.fetchone()
if res:
dt = res[0]
else:
period = period_obj.browse(cr, uid, context['period_id'], context=context)
dt = period.date_start
return dt
def _get_currency(self, cr, uid, context=None):
if context is None:
context = {}
if not context.get('journal_id', False):
return False
cur = self.pool.get('account.journal').browse(cr, uid, context['journal_id']).currency
return cur and cur.id or False
_defaults = {
'blocked': False,
'centralisation': 'normal',
'date': _get_date,
'date_created': lambda *a: time.strftime('%Y-%m-%d'),
'state': 'draft',
'currency_id': _get_currency,
'journal_id': lambda self, cr, uid, c: c.get('journal_id', c.get('journal',False)),
'account_id': lambda self, cr, uid, c: c.get('account_id', False),
'period_id': lambda self, cr, uid, c: c.get('period_id', False),
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.move.line', context=c)
}
_order = "date desc, id desc"
_sql_constraints = [
('credit_debit1', 'CHECK (credit*debit=0)', 'Wrong credit or debit value in accounting entry !'),
('credit_debit2', 'CHECK (credit+debit>=0)', 'Wrong credit or debit value in accounting entry !'),
]
def _auto_init(self, cr, context=None):
super(account_move_line, self)._auto_init(cr, context=context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'account_move_line_journal_id_period_id_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX account_move_line_journal_id_period_id_index ON account_move_line (journal_id, period_id)')
def _check_no_view(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type == 'view':
return False
return True
def _check_no_closed(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type == 'closed':
return False
return True
def _check_company_id(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.company_id != l.account_id.company_id or l.company_id != l.period_id.company_id:
return False
return True
def _check_date(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.journal_id.allow_date:
if not time.strptime(line.date[:10],'%Y-%m-%d') >= time.strptime(line.period_id.date_start, '%Y-%m-%d') or not time.strptime(line.date[:10], '%Y-%m-%d') <= time.strptime(line.period_id.date_stop, '%Y-%m-%d'):
return False
return True
_constraints = [
(_check_no_view, 'You can not create move line on view account.', ['account_id']),
(_check_no_closed, 'You can not create move line on closed account.', ['account_id']),
(_check_company_id, 'Company must be same for its related account and period.',['company_id'] ),
(_check_date, 'The date of your Journal Entry is not in the defined period!', ['date']),
]
#TODO: ONCHANGE_ACCOUNT_ID: set account_tax_id
def onchange_currency(self, cr, uid, ids, account_id, amount, currency_id, date=False, journal=False, context=None):
if context is None:
context = {}
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
currency_obj = self.pool.get('res.currency')
if (not currency_id) or (not account_id):
return {}
result = {}
acc = account_obj.browse(cr, uid, account_id, context=context)
if (amount>0) and journal:
x = journal_obj.browse(cr, uid, journal).default_credit_account_id
if x: acc = x
context.update({
'date': date,
'res.currency.compute.account': acc,
})
v = currency_obj.compute(cr, uid, currency_id, acc.company_id.currency_id.id, amount, context=context)
result['value'] = {
'debit': v > 0 and v or 0.0,
'credit': v < 0 and -v or 0.0
}
return result
def onchange_partner_id(self, cr, uid, ids, move_id, partner_id, account_id=None, debit=0, credit=0, date=False, journal=False):
partner_obj = self.pool.get('res.partner')
payment_term_obj = self.pool.get('account.payment.term')
journal_obj = self.pool.get('account.journal')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
val['date_maturity'] = False
if not partner_id:
return {'value':val}
if not date:
date = datetime.now().strftime('%Y-%m-%d')
part = partner_obj.browse(cr, uid, partner_id)
if part.property_payment_term:
res = payment_term_obj.compute(cr, uid, part.property_payment_term.id, 100, date)
if res:
val['date_maturity'] = res[0][0]
if not account_id:
id1 = part.property_account_payable.id
id2 = part.property_account_receivable.id
if journal:
jt = journal_obj.browse(cr, uid, journal).type
#FIXME: Bank and cash journal are such a journal we can not assume a account based on this 2 journals
# Bank and cash journal can have a payment or receipt transaction, and in both type partner account
# will not be same id payment then payable, and if receipt then receivable
#if jt in ('sale', 'purchase_refund', 'bank', 'cash'):
if jt in ('sale', 'purchase_refund'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id2)
elif jt in ('purchase', 'sale_refund', 'expense', 'bank', 'cash'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id1)
if val.get('account_id', False):
d = self.onchange_account_id(cr, uid, ids, val['account_id'])
val.update(d['value'])
return {'value':val}
def onchange_account_id(self, cr, uid, ids, account_id=False, partner_id=False):
account_obj = self.pool.get('account.account')
partner_obj = self.pool.get('res.partner')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
if account_id:
res = account_obj.browse(cr, uid, account_id)
tax_ids = res.tax_ids
if tax_ids and partner_id:
part = partner_obj.browse(cr, uid, partner_id)
tax_id = fiscal_pos_obj.map_tax(cr, uid, part and part.property_account_position or False, tax_ids)[0]
else:
tax_id = tax_ids and tax_ids[0].id or False
val['account_tax_id'] = tax_id
return {'value': val}
#
# type: the type if reconciliation (no logic behind this field, for info)
#
# writeoff; entry generated for the difference between the lines
#
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context and context.get('fiscalyear', False):
periods = self.pool.get('account.fiscalyear').browse(cr, uid, context.get('fiscalyear'), context=context).period_ids
period_ids = [period.id for period in periods]
args.append(('period_id', 'in', period_ids))
if context and context.get('periods', False):
args.append(('period_id', 'in', context.get('periods')))
if context and context.get('next_partner_only', False):
if not context.get('partner_id', False):
partner = self.get_next_partner_only(cr, uid, offset, context)
else:
partner = context.get('partner_id', False)
if not partner:
return []
args.append(('partner_id', '=', partner[0]))
return super(account_move_line, self).search(cr, uid, args, offset, limit, order, context, count)
def get_next_partner_only(self, cr, uid, offset=0, context=None):
cr.execute(
"""
SELECT p.id
FROM res_partner p
RIGHT JOIN (
SELECT l.partner_id AS partner_id, SUM(l.debit) AS debit, SUM(l.credit) AS credit
FROM account_move_line l
LEFT JOIN account_account a ON (a.id = l.account_id)
LEFT JOIN res_partner p ON (l.partner_id = p.id)
WHERE a.reconcile IS TRUE
AND l.reconcile_id IS NULL
AND (p.last_reconciliation_date IS NULL OR l.date > p.last_reconciliation_date)
AND l.state <> 'draft'
GROUP BY l.partner_id
) AS s ON (p.id = s.partner_id)
WHERE debit > 0 AND credit > 0
ORDER BY p.last_reconciliation_date LIMIT 1 OFFSET %s""", (offset, )
)
return cr.fetchone()
def reconcile_partial(self, cr, uid, ids, type='auto', context=None):
move_rec_obj = self.pool.get('account.move.reconcile')
merges = []
unmerge = []
total = 0.0
merges_rec = []
company_list = []
if context is None:
context = {}
for line in self.browse(cr, uid, ids, context=context):
if company_list and not line.company_id.id in company_list:
raise osv.except_osv(_('Warning !'), _('To reconcile the entries company should be the same for all entries'))
company_list.append(line.company_id.id)
for line in self.browse(cr, uid, ids, context=context):
company_currency_id = line.company_id.currency_id
if line.reconcile_id:
raise osv.except_osv(_('Warning'), _("Journal Item '%s' (id: %s), Move '%s' is already reconciled!") % (line.name, line.id, line.move_id.name))
if line.reconcile_partial_id:
for line2 in line.reconcile_partial_id.line_partial_ids:
if not line2.reconcile_id:
if line2.id not in merges:
merges.append(line2.id)
total += (line2.debit or 0.0) - (line2.credit or 0.0)
merges_rec.append(line.reconcile_partial_id.id)
else:
unmerge.append(line.id)
total += (line.debit or 0.0) - (line.credit or 0.0)
if self.pool.get('res.currency').is_zero(cr, uid, company_currency_id, total):
res = self.reconcile(cr, uid, merges+unmerge, context=context)
return res
# marking the lines as reconciled does not change their validity, so there is no need
# to revalidate their moves completely.
reconcile_context = dict(context, novalidate=True)
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_partial_ids': map(lambda x: (4,x,False), merges+unmerge)
}, context=reconcile_context)
move_rec_obj.reconcile_partial_check(cr, uid, [r_id] + merges_rec, context=reconcile_context)
return True
def reconcile(self, cr, uid, ids, type='auto', writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False, context=None):
account_obj = self.pool.get('account.account')
move_obj = self.pool.get('account.move')
move_rec_obj = self.pool.get('account.move.reconcile')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
lines = self.browse(cr, uid, ids, context=context)
unrec_lines = filter(lambda x: not x['reconcile_id'], lines)
credit = debit = 0.0
currency = 0.0
account_id = False
partner_id = False
if context is None:
context = {}
company_list = []
for line in self.browse(cr, uid, ids, context=context):
if company_list and not line.company_id.id in company_list:
raise osv.except_osv(_('Warning !'), _('To reconcile the entries company should be the same for all entries'))
company_list.append(line.company_id.id)
for line in unrec_lines:
if line.state <> 'valid':
raise osv.except_osv(_('Error'),
_('Entry "%s" is not valid !') % line.name)
credit += line['credit']
debit += line['debit']
currency += line['amount_currency'] or 0.0
account_id = line['account_id']['id']
partner_id = (line['partner_id'] and line['partner_id']['id']) or False
writeoff = debit - credit
# Ifdate_p in context => take this date
if context.has_key('date_p') and context['date_p']:
date=context['date_p']
else:
date = time.strftime('%Y-%m-%d')
cr.execute('SELECT account_id, reconcile_id '\
'FROM account_move_line '\
'WHERE id IN %s '\
'GROUP BY account_id,reconcile_id',
(tuple(ids), ))
r = cr.fetchall()
#TODO: move this check to a constraint in the account_move_reconcile object
if (len(r) != 1) and not context.get('fy_closing', False):
raise osv.except_osv(_('Error'), _('Entries are not of the same account or already reconciled ! '))
if not unrec_lines:
raise osv.except_osv(_('Error'), _('Entry is already reconciled'))
account = account_obj.browse(cr, uid, account_id, context=context)
if not context.get('fy_closing', False) and not account.reconcile:
raise osv.except_osv(_('Error'), _('The account is not defined to be reconciled !'))
if r[0][1] != None:
raise osv.except_osv(_('Error'), _('Some entries are already reconciled !'))
if (not currency_obj.is_zero(cr, uid, account.company_id.currency_id, writeoff)) or \
(not context.get('fy_closing', False) and account.currency_id and (not currency_obj.is_zero(cr, uid, account.currency_id, currency))):
if not writeoff_acc_id:
raise osv.except_osv(_('Warning'), _('You have to provide an account for the write off entry !'))
if writeoff > 0:
debit = writeoff
credit = 0.0
self_credit = writeoff
self_debit = 0.0
else:
debit = 0.0
credit = -writeoff
self_credit = 0.0
self_debit = -writeoff
# If comment exist in context, take it
if 'comment' in context and context['comment']:
libelle = context['comment']
else:
libelle = _('Write-Off')
cur_obj = self.pool.get('res.currency')
cur_id = False
amount_currency_writeoff = 0.0
if context.get('company_currency_id',False) != context.get('currency_id',False):
cur_id = context.get('currency_id',False)
for line in unrec_lines:
if line.currency_id and line.currency_id.id == context.get('currency_id',False):
amount_currency_writeoff += line.amount_currency
else:
tmp_amount = cur_obj.compute(cr, uid, line.account_id.company_id.currency_id.id, context.get('currency_id',False), abs(line.debit-line.credit), context={'date': line.date})
amount_currency_writeoff += (line.debit > 0) and tmp_amount or -tmp_amount
writeoff_lines = [
(0, 0, {
'name': libelle,
'debit': self_debit,
'credit': self_credit,
'account_id': account_id,
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and -1 * amount_currency_writeoff or (account.currency_id.id and -1 * currency or 0.0)
}),
(0, 0, {
'name': libelle,
'debit': debit,
'credit': credit,
'account_id': writeoff_acc_id,
'analytic_account_id': context.get('analytic_id', False),
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and amount_currency_writeoff or (account.currency_id.id and currency or 0.0)
})
]
writeoff_move_id = move_obj.create(cr, uid, {
'period_id': writeoff_period_id,
'journal_id': writeoff_journal_id,
'date':date,
'state': 'draft',
'line_id': writeoff_lines
})
writeoff_line_ids = self.search(cr, uid, [('move_id', '=', writeoff_move_id), ('account_id', '=', account_id)])
if account_id == writeoff_acc_id:
writeoff_line_ids = [writeoff_line_ids[1]]
ids += writeoff_line_ids
# marking the lines as reconciled does not change their validity, so there is no need
# to revalidate their moves completely.
reconcile_context = dict(context, novalidate=True)
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_id': map(lambda x: (4, x, False), ids),
'line_partial_ids': map(lambda x: (3, x, False), ids)
}, context=reconcile_context)
wf_service = netsvc.LocalService("workflow")
# the id of the move.reconcile is written in the move.line (self) by the create method above
# because of the way the line_id are defined: (4, x, False)
for id in ids:
wf_service.trg_trigger(uid, 'account.move.line', id, cr)
if lines and lines[0]:
partner_id = lines[0].partner_id and lines[0].partner_id.id or False
if partner_id and context and context.get('stop_reconcile', False):
partner_obj.write(cr, uid, [partner_id], {'last_reconciliation_date': time.strftime('%Y-%m-%d %H:%M:%S')})
return r_id
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
context = self.convert_to_period(cr, user, context=context)
if context.get('account_id', False):
cr.execute('SELECT code FROM account_account WHERE id = %s', (context['account_id'], ))
res = cr.fetchone()
if res:
res = _('Entries: ')+ (res[0] or '')
return res
if (not context.get('journal_id', False)) or (not context.get('period_id', False)):
return False
cr.execute('SELECT code FROM account_journal WHERE id = %s', (context['journal_id'], ))
j = cr.fetchone()[0] or ''
cr.execute('SELECT code FROM account_period WHERE id = %s', (context['period_id'], ))
p = cr.fetchone()[0] or ''
if j or p:
return j + (p and (':' + p) or '')
return False
def onchange_date(self, cr, user, ids, date, context=None):
"""
Returns a dict that contains new values and context
@param cr: A database cursor
@param user: ID of the user currently logged in
@param date: latest value from user input for field date
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
res = {}
if context is None:
context = {}
period_pool = self.pool.get('account.period')
pids = period_pool.search(cr, user, [('date_start','<=',date), ('date_stop','>=',date)])
if pids:
res.update({
'period_id':pids[0]
})
context.update({
'period_id':pids[0]
})
return {
'value':res,
'context':context,
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
journal_pool = self.pool.get('account.journal')
if context is None:
context = {}
result = super(account_move_line, self).fields_view_get(cr, uid, view_id, view_type, context=context, toolbar=toolbar, submenu=submenu)
if view_type != 'tree':
#Remove the toolbar from the form view
if view_type == 'form':
if result.get('toolbar', False):
result['toolbar']['action'] = []
#Restrict the list of journal view in search view
if view_type == 'search' and result['fields'].get('journal_id', False):
result['fields']['journal_id']['selection'] = journal_pool.name_search(cr, uid, '', [], context=context)
ctx = context.copy()
#we add the refunds journal in the selection field of journal
if context.get('journal_type', False) == 'sale':
ctx.update({'journal_type': 'sale_refund'})
result['fields']['journal_id']['selection'] += journal_pool.name_search(cr, uid, '', [], context=ctx)
elif context.get('journal_type', False) == 'purchase':
ctx.update({'journal_type': 'purchase_refund'})
result['fields']['journal_id']['selection'] += journal_pool.name_search(cr, uid, '', [], context=ctx)
return result
if context.get('view_mode', False):
return result
fld = []
fields = {}
flds = []
title = _("Accounting Entries") #self.view_header_get(cr, uid, view_id, view_type, context)
xml = '''<?xml version="1.0"?>\n<tree string="%s" editable="top" refresh="5" on_write="on_create_write" colors="red:state==\'draft\';black:state==\'valid\'">\n\t''' % (title)
ids = journal_pool.search(cr, uid, [])
journals = journal_pool.browse(cr, uid, ids, context=context)
all_journal = [None]
common_fields = {}
total = len(journals)
for journal in journals:
all_journal.append(journal.id)
for field in journal.view_id.columns_id:
if not field.field in fields:
fields[field.field] = [journal.id]
fld.append((field.field, field.sequence, field.name))
flds.append(field.field)
common_fields[field.field] = 1
else:
fields.get(field.field).append(journal.id)
common_fields[field.field] = common_fields[field.field] + 1
fld.append(('period_id', 3, _('Period')))
fld.append(('journal_id', 10, _('Journal')))
flds.append('period_id')
flds.append('journal_id')
fields['period_id'] = all_journal
fields['journal_id'] = all_journal
fld = sorted(fld, key=itemgetter(1))
widths = {
'statement_id': 50,
'state': 60,
'tax_code_id': 50,
'move_id': 40,
}
for field_it in fld:
field = field_it[0]
if common_fields.get(field) == total:
fields.get(field).append(None)
# if field=='state':
# state = 'colors="red:state==\'draft\'"'
attrs = []
if field == 'debit':
attrs.append('sum = "%s"' % _("Total debit"))
elif field == 'credit':
attrs.append('sum = "%s"' % _("Total credit"))
elif field == 'move_id':
attrs.append('required = "False"')
elif field == 'account_tax_id':
attrs.append('domain="[(\'parent_id\', \'=\' ,False)]"')
attrs.append("context=\"{'journal_id': journal_id}\"")
elif field == 'account_id' and journal.id:
attrs.append('domain="[(\'journal_id\', \'=\', journal_id),(\'type\',\'<>\',\'view\'), (\'type\',\'<>\',\'closed\')]" on_change="onchange_account_id(account_id, partner_id)"')
elif field == 'partner_id':
attrs.append('on_change="onchange_partner_id(move_id, partner_id, account_id, debit, credit, date, journal_id)"')
elif field == 'journal_id':
attrs.append("context=\"{'journal_id': journal_id}\"")
elif field == 'statement_id':
attrs.append("domain=\"[('state', '!=', 'confirm'),('journal_id.type', '=', 'bank')]\"")
elif field == 'date':
attrs.append('on_change="onchange_date(date)"')
elif field == 'analytic_account_id':
attrs.append('''groups="analytic.group_analytic_accounting"''') # Currently it is not working due to framework problem may be ..
if field in ('amount_currency', 'currency_id'):
attrs.append('on_change="onchange_currency(account_id, amount_currency, currency_id, date, journal_id)"')
attrs.append('''attrs="{'readonly': [('state', '=', 'valid')]}"''')
if field in widths:
attrs.append('width="'+str(widths[field])+'"')
if field in ('journal_id',):
attrs.append("invisible=\"context.get('journal_id', False)\"")
elif field in ('period_id',):
attrs.append("invisible=\"context.get('period_id', False)\"")
else:
attrs.append("invisible=\"context.get('visible_id') not in %s\"" % (fields.get(field)))
xml += '''<field name="%s" %s/>\n''' % (field,' '.join(attrs))
xml += '''</tree>'''
result['arch'] = xml
result['fields'] = self.fields_get(cr, uid, flds, context)
return result
def _check_moves(self, cr, uid, context=None):
# use the first move ever created for this journal and period
if context is None:
context = {}
cr.execute('SELECT id, state, name FROM account_move WHERE journal_id = %s AND period_id = %s ORDER BY id limit 1', (context['journal_id'],context['period_id']))
res = cr.fetchone()
if res:
if res[1] != 'draft':
raise osv.except_osv(_('UserError'),
_('The account move (%s) for centralisation ' \
'has been confirmed!') % res[2])
return res
def _remove_move_reconcile(self, cr, uid, move_ids=[], context=None):
# Function remove move rencocile ids related with moves
obj_move_line = self.pool.get('account.move.line')
obj_move_rec = self.pool.get('account.move.reconcile')
unlink_ids = []
if not move_ids:
return True
recs = obj_move_line.read(cr, uid, move_ids, ['reconcile_id', 'reconcile_partial_id'])
full_recs = filter(lambda x: x['reconcile_id'], recs)
rec_ids = [rec['reconcile_id'][0] for rec in full_recs]
part_recs = filter(lambda x: x['reconcile_partial_id'], recs)
part_rec_ids = [rec['reconcile_partial_id'][0] for rec in part_recs]
unlink_ids += rec_ids
unlink_ids += part_rec_ids
if unlink_ids:
obj_move_rec.unlink(cr, uid, unlink_ids)
return True
def unlink(self, cr, uid, ids, context=None, check=True):
if context is None:
context={}
move_obj = self.pool.get('account.move')
self._update_check(cr, uid, ids, context)
result = False
move_ids = set()
for line in self.browse(cr, uid, ids, context=context):
move_ids.add(line.move_id.id)
context['journal_id'] = line.journal_id.id
context['period_id'] = line.period_id.id
result = super(account_move_line, self).unlink(cr, uid, [line.id], context=context)
move_ids = list(move_ids)
if check and move_ids:
move_obj.validate(cr, uid, move_ids, context=context)
return result
def write(self, cr, uid, ids, vals, context=None, check=True, update_check=True):
if context is None:
context={}
move_obj = self.pool.get('account.move')
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
if vals.get('account_tax_id', False):
raise osv.except_osv(_('Unable to change tax !'), _('You can not change the tax, you should remove and recreate lines !'))
if ('account_id' in vals):
#~ if ('account_id' in vals) and not account_obj.read(cr, uid, vals['account_id'], ['active'])['active']:
account_tmp = account_obj.browse(cr, uid, vals['account_id'], context=context)
if account_tmp and not account_tmp.active:
raise osv.except_osv(_('Bad account!'), _('You can not use an inactive account!'))
if update_check:
#~ if ('account_id' in vals) or ('journal_id' in vals) or ('period_id' in vals) or ('move_id' in vals) or ('debit' in vals) or ('credit' in vals) or ('date' in vals):
if list(set(vals.keys()) & set([
'period_id', 'date', 'account_id', 'move_id', 'tax_code_id',
'debit', 'credit', 'currency_id', 'amount_currency',
'analytic_account_id', 'partner_id', 'journal_id',
'company_id', 'tax_code_id'])):
self._update_check(cr, uid, ids, context)
todo_date = None
if vals.get('date', False):
todo_date = vals['date']
del vals['date']
for line in self.browse(cr, uid, ids, context=context):
ctx = context.copy()
if ('journal_id' not in ctx):
if line.move_id:
ctx['journal_id'] = line.move_id.journal_id.id
else:
ctx['journal_id'] = line.journal_id.id
if ('period_id' not in ctx):
if line.move_id:
ctx['period_id'] = line.move_id.period_id.id
else:
ctx['period_id'] = line.period_id.id
#Check for centralisation
journal = journal_obj.browse(cr, uid, ctx['journal_id'], context=ctx)
if journal.centralisation:
self._check_moves(cr, uid, context=ctx)
result = super(account_move_line, self).write(cr, uid, ids, vals, context)
if check:
done = []
for line in self.browse(cr, uid, ids):
if line.move_id.id not in done:
done.append(line.move_id.id)
move_obj.validate(cr, uid, [line.move_id.id], context)
if todo_date:
move_obj.write(cr, uid, [line.move_id.id], {'date': todo_date}, context=context)
return result
def _update_journal_check(self, cr, uid, journal_id, period_id, context=None):
journal_obj = self.pool.get('account.journal')
period_obj = self.pool.get('account.period')
jour_period_obj = self.pool.get('account.journal.period')
cr.execute('SELECT state FROM account_journal_period WHERE journal_id = %s AND period_id = %s', (journal_id, period_id))
result = cr.fetchall()
for (state,) in result:
if state == 'done':
raise osv.except_osv(_('Error !'), _('You can not add/modify entries in a closed journal.'))
if not result:
journal = journal_obj.browse(cr, uid, journal_id, context=context)
period = period_obj.browse(cr, uid, period_id, context=context)
jour_period_obj.create(cr, uid, {
'name': (journal.code or journal.name)+':'+(period.name or ''),
'journal_id': journal.id,
'period_id': period.id
})
return True
def _update_check(self, cr, uid, ids, context=None):
done = {}
for line in self.browse(cr, uid, ids, context=context):
if line.move_id.state <> 'draft' and (not line.journal_id.entry_posted):
raise osv.except_osv(_('Error !'), _('You can not do this modification on a confirmed entry ! Please note that you can just change some non important fields !'))
if line.reconcile_id:
raise osv.except_osv(_('Error !'), _('You can not do this modification on a reconciled entry ! Please note that you can just change some non important fields !'))
t = (line.journal_id.id, line.period_id.id)
if t not in done:
self._update_journal_check(cr, uid, line.journal_id.id, line.period_id.id, context)
done[t] = True
return True
def create(self, cr, uid, vals, context=None, check=True):
account_obj = self.pool.get('account.account')
tax_obj = self.pool.get('account.tax')
move_obj = self.pool.get('account.move')
cur_obj = self.pool.get('res.currency')
journal_obj = self.pool.get('account.journal')
if context is None:
context = {}
if vals.get('move_id', False):
company_id = self.pool.get('account.move').read(cr, uid, vals['move_id'], ['company_id']).get('company_id', False)
if company_id:
vals['company_id'] = company_id[0]
if ('account_id' in vals) and not account_obj.read(cr, uid, vals['account_id'], ['active'])['active']:
raise osv.except_osv(_('Bad account!'), _('You can not use an inactive account!'))
if 'journal_id' in vals:
context['journal_id'] = vals['journal_id']
if 'period_id' in vals:
context['period_id'] = vals['period_id']
if ('journal_id' not in context) and ('move_id' in vals) and vals['move_id']:
m = move_obj.browse(cr, uid, vals['move_id'])
context['journal_id'] = m.journal_id.id
context['period_id'] = m.period_id.id
self._update_journal_check(cr, uid, context['journal_id'], context['period_id'], context)
move_id = vals.get('move_id', False)
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
if not move_id:
if journal.centralisation:
#Check for centralisation
res = self._check_moves(cr, uid, context)
if res:
vals['move_id'] = res[0]
if not vals.get('move_id', False):
if journal.sequence_id:
#name = self.pool.get('ir.sequence').get_id(cr, uid, journal.sequence_id.id)
v = {
'date': vals.get('date', time.strftime('%Y-%m-%d')),
'period_id': context['period_id'],
'journal_id': context['journal_id']
}
if vals.get('ref', ''):
v.update({'ref': vals['ref']})
move_id = move_obj.create(cr, uid, v, context)
vals['move_id'] = move_id
else:
raise osv.except_osv(_('No piece number !'), _('Can not create an automatic sequence for this piece !\n\nPut a sequence in the journal definition for automatic numbering or create a sequence manually for this piece.'))
ok = not (journal.type_control_ids or journal.account_control_ids)
if ('account_id' in vals):
account = account_obj.browse(cr, uid, vals['account_id'], context=context)
if journal.type_control_ids:
type = account.user_type
for t in journal.type_control_ids:
if type.code == t.code:
ok = True
break
if journal.account_control_ids and not ok:
for a in journal.account_control_ids:
if a.id == vals['account_id']:
ok = True
break
# Automatically convert in the account's secondary currency if there is one and
# the provided values were not already multi-currency
if account.currency_id and 'amount_currency' not in vals and account.currency_id.id != account.company_id.currency_id.id:
vals['currency_id'] = account.currency_id.id
ctx = {}
if 'date' in vals:
ctx['date'] = vals['date']
vals['amount_currency'] = cur_obj.compute(cr, uid, account.company_id.currency_id.id,
account.currency_id.id, vals.get('debit', 0.0)-vals.get('credit', 0.0), context=ctx)
if not ok:
raise osv.except_osv(_('Bad account !'), _('You can not use this general account in this journal !'))
if vals.get('analytic_account_id',False):
if journal.analytic_journal_id:
vals['analytic_lines'] = [(0,0, {
'name': vals['name'],
'date': vals.get('date', time.strftime('%Y-%m-%d')),
'account_id': vals.get('analytic_account_id', False),
'unit_amount': vals.get('quantity', 1.0),
'amount': vals.get('debit', 0.0) or vals.get('credit', 0.0),
'general_account_id': vals.get('account_id', False),
'journal_id': journal.analytic_journal_id.id,
'ref': vals.get('ref', False),
'user_id': uid
})]
result = super(osv.osv, self).create(cr, uid, vals, context=context)
# CREATE Taxes
if vals.get('account_tax_id', False):
tax_id = tax_obj.browse(cr, uid, vals['account_tax_id'])
total = vals['debit'] - vals['credit']
if journal.refund_journal:
base_code = 'ref_base_code_id'
tax_code = 'ref_tax_code_id'
account_id = 'account_paid_id'
base_sign = 'ref_base_sign'
tax_sign = 'ref_tax_sign'
else:
base_code = 'base_code_id'
tax_code = 'tax_code_id'
account_id = 'account_collected_id'
base_sign = 'base_sign'
tax_sign = 'tax_sign'
tmp_cnt = 0
for tax in tax_obj.compute_all(cr, uid, [tax_id], total, 1.00).get('taxes'):
#create the base movement
if tmp_cnt == 0:
if tax[base_code]:
tmp_cnt += 1
self.write(cr, uid,[result], {
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total)
})
else:
data = {
'move_id': vals['move_id'],
'journal_id': vals['journal_id'],
'period_id': vals['period_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id',False),
'ref': vals.get('ref',False),
'account_tax_id': False,
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total),
'account_id': vals['account_id'],
'credit': 0.0,
'debit': 0.0,
}
if data['tax_code_id']:
self.create(cr, uid, data, context)
#create the VAT movement
data = {
'move_id': vals['move_id'],
'journal_id': vals['journal_id'],
'period_id': vals['period_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id',False),
'ref': vals.get('ref',False),
'account_tax_id': False,
'tax_code_id': tax[tax_code],
'tax_amount': tax[tax_sign] * abs(tax['amount']),
'account_id': tax[account_id] or vals['account_id'],
'credit': tax['amount']<0 and -tax['amount'] or 0.0,
'debit': tax['amount']>0 and tax['amount'] or 0.0,
}
if data['tax_code_id']:
self.create(cr, uid, data, context)
del vals['account_tax_id']
if check and ((not context.get('no_store_function')) or journal.entry_posted):
tmp = move_obj.validate(cr, uid, [vals['move_id']], context)
if journal.entry_posted and tmp:
move_obj.button_validate(cr, uid, [vals['move_id']], context)
return result
account_move_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"juanvmarquezl@gmail.com"
] | juanvmarquezl@gmail.com |
acc0e879ddff0fddff0f1c17854d03eba9823789 | be8fe594fae8a4fb66c0cbc1f0e8462891eabb1b | /examsystemapp/api/external.py | ff92d1a6411c523b029c830ee33dfb926c6e41f2 | [] | no_license | ITfyMe/ExamSytemPython | a30cccc1ba4ef832666b23109a772209fcbcea8c | da7506ae9607d69c97744bdc08ac1113fc86237a | refs/heads/master | 2023-06-12T11:48:20.514142 | 2021-07-12T13:16:06 | 2021-07-12T13:16:06 | 369,427,947 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | """
Created By : Nikesh
Created On :
Reviewed By :
Reviewed On :
Version :
"""
from django.http import HttpRequest
from examsystemapp.api.base_controller import BaseController
from examsystemapp.utils.helpers.request_helper import RequestHelper
from django.conf import settings
class External(BaseController):
def __init__(self, request: HttpRequest):
BaseController.__init__(self, request)
def check_session(self, request):
pass
def masters(self, request: HttpRequest):
json_data = RequestHelper().call_ext_api(request, settings.MASTER_BASE_URL)
return self.send_response_raw_json(json_data)
| [
"nikesh.kedlaya@gmail.com"
] | nikesh.kedlaya@gmail.com |
b4fde878db7746ff13c0a3dedbfbca0736d7249c | 34c91527966ecf29d8adf46becbf3af5032dc3a6 | /basic/armstrongnumber.py | 9552f47dc63cdff9b0b8cf0745e63ea4f047d5bf | [] | no_license | mjohnkennykumar/csipythonprograms | 0a23040a484812b3176b614bf0e18f685c74a375 | 750cdaeda15fe73328902c3de567d6083dfa7a77 | refs/heads/master | 2021-01-21T14:32:37.913617 | 2017-06-24T12:54:03 | 2017-06-24T12:54:03 | 95,297,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | # -*- coding: utf-8 -*-
# Python program to check if the number provided by the user is an Armstrong number or not
# take input from the user
# num = int(input("Enter a number: "))
# initialize sum
sum = 0
num = int(input("Enter a number: "))
# find the sum of the cube of each digit
temp = num
while temp > 0:
digit = temp % 10
sum += digit ** 3
temp //= 10
# display the result
if num == sum:
print(num,"is an Armstrong number");
else:
print(num,"is not an Armstrong number");
| [
"Samuel Henry"
] | Samuel Henry |
5d5fd3a0fc2112964ec1c90c48196dcdc7468bd7 | d93b337a73a9bc6f6c104cc6eea9ca8d60ef3577 | /stripeAPI/cargo.py | 407cca194cdbced169d8ec5a77e78e9b275ed8e4 | [] | no_license | erivera23/market | 0532c5aefe32bd0102fa18fe6b224af289c4388f | 73bead776f233bd1a79d34122daeff74b2b3ce18 | refs/heads/master | 2022-12-13T00:21:38.163936 | 2020-01-18T22:52:34 | 2020-01-18T22:52:34 | 234,808,623 | 1 | 0 | null | 2022-11-22T04:57:29 | 2020-01-18T22:51:03 | Python | UTF-8 | Python | false | false | 478 | py | from . import stripe
def create_cargo(orden):
if orden.billing_profile and orden.user and orden.user.customer_id:
cargo = stripe.Charge.create(
amount = int(orden.total) * 100,
currency = 'USD',
description = orden.descripcion,
customer=orden.user.customer_id,
source=orden.billing_profile.card_id,
metadata= {
'orden_id': orden.id
}
)
return cargo | [
"riveraefrain5@gmail.com"
] | riveraefrain5@gmail.com |
abbef862cd5933de0ed6f118c4196a4adeb7ccc2 | 376dbd781ff32bb5c1fa64b8b2fc2cb7bfeb62bb | /main.py | 46a4a3510a8ea612c0975a28bd3c2053cae6dceb | [] | no_license | ahaggart/image-shatter | db00662c85c6f8b698008757fc38e9b42d733d91 | cca1a392bc3445b117e413bcdb8fd473de82ebfe | refs/heads/master | 2020-05-26T21:55:49.783242 | 2019-05-24T11:03:48 | 2019-05-24T11:03:48 | 188,388,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | import argparse
import cv2 as cv
from blobs import caluculate_colorspace_distances, grow_blobs
from polygons import find_edges, order_edges
from colorize import color_blobs, color_edges, color_ordered
def main(config):
image = cv.imread('img/' + config.file)
width, height = image.shape[:2]
print("Calculating colorspace distances...")
xdist, ydist = caluculate_colorspace_distances(image)
print("Growing blobs...")
blobs = grow_blobs(width, height, xdist, ydist)
print("> Num blobs: {}".format(blobs.index))
edges = find_edges(blobs)
ordered = order_edges(edges)
print("Coloring blobs...")
# colors = color_blobs(image, blobs)
# colors = color_edges(edges, width, height)
colors = color_ordered(ordered, width, height)
print("Writing image...")
cv.imwrite('out/' + config.file, colors)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"file",
help="the path to the file within the img/ directory",
)
main(parser.parse_args())
| [
"alex.haggart@gmail.com"
] | alex.haggart@gmail.com |
9b0b34eb1b3a8edde6ec56f298ac3c0a5bbe79f7 | cb0bad5bd717fef43674ffd2f76a5fe922cf0896 | /eurito_daps/flaskblog.py | 74bf352313b346746653461bbfeb148f1c63290a | [] | no_license | porter22/porter22.github.io | 2b3dbe6f8c6ffcfb3f4ff374e12f769f1ef06b51 | ad15c20fa3f8490642cd831da82eb448c694c746 | refs/heads/master | 2021-06-05T20:46:47.862065 | 2020-03-15T17:27:09 | 2020-03-15T17:27:09 | 122,872,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,061 | py | from flask import Flask, render_template, url_for
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__) #instantiated flask variable
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db' #/// relative path from the current dir
db = SQLAlchemy(app) #create database instance
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
password = db.Column(db.String(60), nullable=False)
posts = db.relationship('Post', backref='author', lazy=True) # one author can have multiple posts, but a post can have only one author: one to many relationship
def __repr__(self): #how object is printed
return f"User('{self.username}', '{self.email}', '{self.image_file}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
data_posted = db.Column(db.DateTime, nullable=False, default = datetime.utcnow )
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self): #how object is printed
return f"Post('{self.title}', '{self.data_posted}')"
posts = [
{
'author': 'Corey Schafer',
'title': 'Blog Post 1',
'content': 'First post content',
'date_posted': 'April 20, 2018'
},
{
'author': 'Jane Doe',
'title': 'Blog Post 2',
'content': 'Second post content',
'date_posted': 'April 21, 2018'
}
]
@app.route("/")
@app.route("/home")
def home():
return render_template('home.html', posts = posts)
@app.route("/about")
def about():
return render_template('about.html', title = 'About')
#this is so that we will not have to restart the server whenever new changes are introduced
if __name__ == '__main__':
app.run(debug=True)
| [
"idrissov22@gmail.com"
] | idrissov22@gmail.com |
3bf09fa4f79c4ab4f60f4fdf8d3c23e04214b598 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /F5ycABGyZtghMpYjr_16.py | ce3767d6e7a4448df5ed169a0465448836b9b5c5 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py |
def max_num(n1, n2):
if n1 > n2:
return n1
else:
return n2
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
fb12aa0f0a717a88fe55aac5ace8b0cda17fdc54 | ce39286b958c01cbbd68ecffc33d3724c8c130f1 | /club/app/migrations/0026_paquete_inscrito_horas_consumidas.py | 166d62f8b0c29460f78bc02d7a841840c518c74e | [] | no_license | Rob866/club | eb26c3085bb37908226bfed306d98538eed64dff | f2e58ca773a4e461a9905c0898c1f11bd5e94099 | refs/heads/master | 2022-12-16T00:07:32.423945 | 2020-01-09T04:38:09 | 2020-01-09T04:38:09 | 216,317,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | # Generated by Django 2.2.6 on 2019-10-15 20:29
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0025_auto_20191015_1240'),
]
operations = [
migrations.AddField(
model_name='paquete_inscrito',
name='horas_consumidas',
field=models.DurationField(default=datetime.timedelta(0)),
),
]
| [
"juanrob_10@hotmail.com"
] | juanrob_10@hotmail.com |
e944a493701484b85f0930ed5c5c716253ed6a9b | 8227d4cf270ffc45ed9c0c5f94b04f51187751c4 | /srezy.py | eb59c4eaa583d3111c0c3b1040beb6c6948da6cd | [] | no_license | NosevichOleksandr/firstrepository | ced760563a039af88dd1a5588ba0553e5e0cdef6 | 70652118bcdac48f7b638fe2d896d587f79115d1 | refs/heads/master | 2023-04-02T02:46:50.723173 | 2021-04-17T13:34:37 | 2021-04-17T13:34:37 | 356,614,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | print('hello world')
a = input('write your ... something: ')
if len(a) % 2 == 0:
b = a[:len(a)//2]
c = a[len(a)//2:][::-1]
print(b + c)
else:
print('ты неправильно ввел')
| [
"bpxnastalgia@gmail.com"
] | bpxnastalgia@gmail.com |
50157256f9b323f313890c0165fa4fe159337357 | 8cce087dfd5c623c2f763f073c1f390a21838f0e | /projects/the/test.py | 8b5458899ae7d4a6053ff37dca0868ce16e83cdb | [
"Unlicense"
] | permissive | quinn-dougherty/python-on-nix | b2ae42761bccf7b3766999b27a4674310e276fd8 | 910d3f6554acd4a4ef0425ebccd31104dccb283c | refs/heads/main | 2023-08-23T11:57:55.988175 | 2021-09-24T05:55:00 | 2021-09-24T05:55:00 | 414,799,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11 | py | import the
| [
"kamadorueda@gmail.com"
] | kamadorueda@gmail.com |
7329c993e5cfe2cf131a107a9c946a0937892cb4 | 098ac9ecdaa67b717182c2aeca2a9d60833e88e7 | /opentcweb/settings/prod.py | fd788fb20c554254729032aeabf64156243e772a | [
"MIT"
] | permissive | cahya-wirawan/opentc-web | c8e758835d129cf7edb6f9dbf640632c2aa9ff2f | fa74c49f3f2b1a74624deca912f7da87afdc7e1b | refs/heads/master | 2021-01-19T19:13:44.629858 | 2018-01-21T13:21:32 | 2018-01-21T13:21:32 | 88,406,223 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | from __future__ import absolute_import
from .base import *
# Production overrides
DEBUG = False
#...
| [
"cahya.wirawan@gmail.com"
] | cahya.wirawan@gmail.com |
ab6f49788e9c9b703b8119182f349d2b181ec92c | f907f8ce3b8c3b203e5bb9d3be012bea51efd85f | /kaki.py | 2c2c28e9db71f7ea3b53c39e8cf861cadb925d35 | [] | no_license | KohsukeKubota/Atcoder-practice | 3b4b986395551443f957d1818d6f9a0bf6132e90 | 52554a2649445c2760fc3982e722854fed5b8ab1 | refs/heads/master | 2020-08-26T15:17:29.344402 | 2019-10-26T11:14:24 | 2019-10-26T11:14:24 | 217,052,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | S = [input() for _ in range(12)]
cnt = 0
for s in S:
set_ = set(s)
if 'r' in set_:
cnt += 1
print(cnt)
| [
"kohsuke@KohsukeKubotas-MacBook-Air.local"
] | kohsuke@KohsukeKubotas-MacBook-Air.local |
edb363be7d18412f48d26946d0a265a266919f9e | 9d43b8a3b53001f25a347fd96e5c49538b0c509a | /mxshop/apps/trade/views.py | 30e854b8ad252b98ccda10e6bfe8ca3d67cb173a | [] | no_license | w8833531/mxfresh | b81b7e4223536c6bedb049009386015935d33987 | 46b83fafdae8450491344c531de81a45ab5d8aae | refs/heads/master | 2021-04-09T15:53:50.829921 | 2018-08-08T01:41:14 | 2018-08-08T01:41:14 | 125,793,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,746 | py | import random, time
from datetime import datetime
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework import status
from rest_framework import permissions
from rest_framework import authentication
from rest_framework import mixins
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from utils.permissions import IsOwnerOrReadOnly
from utils.alipay import AliPay
from .serializers import ShopCartSerializer,ShopCartDetailSerializer, OrderSerializer, OrderDetailSerializer
from .models import ShoppingCart, OrderInfo, OrderGoods
from mxshop.settings import appid, private_key_path, alipay_pub_key_path, alipay_notify_url, alipay_return_url
# Create your views here.
class ShoppingCartViewset(viewsets.ModelViewSet):
"""
购物车功能
list:
获取购物车物品列表
create:
加入购物车物品
delete:
删除购物车物品
update:
更新购物车物品
"""
authentication_classes = (JSONWebTokenAuthentication, authentication.SessionAuthentication)
permission_classes = (permissions.IsAuthenticated, IsOwnerOrReadOnly)
lookup_field = "goods_id"
# override get_serializer_class method, if list return DetailSerializer
def get_serializer_class(self, *args, **kwargs):
if self.action == 'list':
return ShopCartDetailSerializer
else:
return ShopCartSerializer
def get_queryset(self):
return ShoppingCart.objects.filter(user=self.request.user)
class OrderViewset(mixins.ListModelMixin, mixins.CreateModelMixin, mixins.RetrieveModelMixin, mixins.DestroyModelMixin, viewsets.GenericViewSet):
"""
订单管理
List:
获取订单
Delete:
删除订单
Create:
新增订单
Retrieve:
获取订单详情
"""
authentication_classes = (JSONWebTokenAuthentication, authentication.SessionAuthentication)
permission_classes = (permissions.IsAuthenticated, IsOwnerOrReadOnly)
serializer_class = OrderSerializer
def get_queryset(self):
return OrderInfo.objects.filter(user=self.request.user)
def get_serializer_class(self):
if self.action == 'retrieve':
return OrderDetailSerializer
else:
return OrderSerializer
# 生成订单号 当前时间+userid+random
def generate_order_sn(self):
random_int = random.Random()
order_sn = "{time_str}{userid}{random_str}".format(time_str=time.strftime('%Y%m%d%H%M%S'),
userid=self.request.user.id, random_str=random_int.randint(10, 99))
return order_sn
# 在创建订单时,重载 perform_create 方法, set order_sn in serializer.data
def perform_create(self, serializer):
"""
在创建订单时,关联订单中的商品,消减商品库存,清空购物车
"""
# 保存当前用户的订单
order = serializer.save(order_sn=self.generate_order_sn())
# 获取当前用户购物车内所有商品条目
shop_carts = ShoppingCart.objects.filter(user=self.request.user)
# 把商品、商品数量放入定单,库存相应消减,并清空购物车
for shop_cart in shop_carts:
# 生成订单商品对象
order_goods = OrderGoods()
# 把商品、商品数量放入订单商品对象
order_goods.goods = shop_cart.goods
order_goods.goods_num = shop_cart.nums
# 对商品的库存相应消减
order_goods.goods.goods_num -= order_goods.goods_num
order_goods.goods.save()
# 放入订单对象并保存
order_goods.order = order
order_goods.save()
# 清空购物车
shop_cart.delete()
return order
# 在删除订单时,重载 perform_destroy 方法,实现订单商品库存增加
def perform_destroy(self, instance):
if instance.pay_status != "TRADE_SUCCESS":
# 在删除订单前,如果订单没有支付成功,增加这个订单中的所有商品对应数量的库存
order_goods = OrderGoods.objects.filter(order=instance.id)
for order_good in order_goods:
order_good.goods.goods_num += order_good.goods_num
order_good.goods.save()
instance.delete()
class AliPayViewset(APIView):
def get(self, request):
"""
处理支付宝return_url 返回
:param request:
:return:
"""
processed_dict = {}
for key, value in request.GET.items():
processed_dict[key] = value
sign = processed_dict.pop("sign", None)
alipay = AliPay(
appid=appid,
app_notify_url=alipay_notify_url,
app_private_key_path=private_key_path,
alipay_public_key_path=alipay_pub_key_path, # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,
debug=True, # 默认False,
return_url=alipay_return_url,
)
verify_re = alipay.verify(processed_dict, sign)
if verify_re is True:
# order_sn = processed_dict.get('out_trade_no', None)
# trade_no = processed_dict.get('trade_no', None)
# trade_status = processed_dict.get('trade_status', None)
# existed_orders = OrderInfo.objects.filter(order_sn=order_sn)
# for existed_order in existed_orders:
# existed_order.pay_status = trade_status
# existed_order.trade_no = trade_no
# existed_order.pay_time = datetime.now()
# existed_order.save()
return Response("success")
def post(self, request):
"""
处理支付宝notify_url 返回
:param request:
:return:
"""
processed_dict = {}
for key, value in request.POST.items():
processed_dict[key] = value
print(key, value)
sign = processed_dict.pop("sign", None)
alipay = AliPay(
appid=appid,
app_notify_url=alipay_notify_url,
app_private_key_path=private_key_path,
alipay_public_key_path=alipay_pub_key_path, # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,
debug=True, # 默认False,
return_url=alipay_return_url,
)
verify_re = alipay.verify(processed_dict, sign)
if verify_re is True:
order_sn = processed_dict.get('out_trade_no', None)
trade_no = processed_dict.get('trade_no', None)
trade_status = processed_dict.get('trade_status', None)
existed_orders = OrderInfo.objects.filter(order_sn=order_sn)
for existed_order in existed_orders:
existed_order.pay_status = trade_status
# 如果支付成功,把订单中所有商品售出数量做相应增加(注:这个操作不要求实时,建议用后台程序来完成会更好)
if existed_order.pay_status == "TRADE_SUCCESS":
order_goods = existed_order.goods.all()
for order_good in order_goods:
order_good.goods.sold_num += order_good.goods_num
order_good.goods.save()
existed_order.trade_no = trade_no
existed_order.pay_time = datetime.now()
existed_order.save()
return Response("success") | [
"w8833531@hotmail.com"
] | w8833531@hotmail.com |
a6d1b05a1ca185859368e58727850feec6b840f5 | 209f0d778a673884cf56b83e9bde392f712f84aa | /.venv/bin/chardetect | aa41d066f5dfbc7d9dc51d3bdcff15c0287360f8 | [] | no_license | obiorbitalstar/chess-board | f65e5706405e4a420dfc843b5f18c39a1f16cc7d | 0bb61022fd49a6361813d894f28bcd99c07766aa | refs/heads/Master | 2022-12-18T01:56:50.652476 | 2020-08-30T13:43:38 | 2020-08-30T13:43:38 | 291,475,486 | 0 | 0 | null | 2020-08-30T13:43:39 | 2020-08-30T13:27:49 | Python | UTF-8 | Python | false | false | 264 | #!/home/orphues/codefellows/401/chess-board/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"obiorbitalstar@gmail.com"
] | obiorbitalstar@gmail.com | |
6722bcef452c085f75a486160d1e49a88934b6c6 | d92a1eb61863aa0dba7df6a8e787f243715effb2 | /urls.py | 0da45212c15d635f432c6b8adbd8c6e3b4f2a0a7 | [] | no_license | Engaginglab/scoreit | 91a7cc4b610556b45355bdd32c4c608dc2993edd | a54a073e6aded904b14738867c6b9dfa31e744f0 | refs/heads/master | 2021-01-10T19:58:05.162232 | 2012-09-22T14:34:48 | 2012-09-22T14:34:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | from django.contrib import admin
from django.conf.urls.defaults import *
admin.autodiscover()
urlpatterns = patterns('',
(r'^auth/', include('auth.urls')),
(r'^handball/', include('handball.urls')),
(r'^admin/', include(admin.site.urls))
)
| [
"martin@maklesoft.com"
] | martin@maklesoft.com |
7be265dac32863a3cb746a50679f132f3cfc6705 | 7d17375998378125fa63b1cf8673b8387d99324f | /core/migrations/0006_auto_20200517_0929.py | 07187c5d3c0bad2459578712f836c9dcc72447db | [] | no_license | priyanka1698/Late-checker | c2f96993340f09c55bd9eac2261cc760c409eae9 | ff1c4615b3abefd65fa789fcd1e684322ddfc9de | refs/heads/master | 2022-08-19T15:22:00.692770 | 2020-05-20T11:45:35 | 2020-05-20T11:45:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,921 | py | # Generated by Django 3.0.5 on 2020-05-17 09:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_delete_image'),
]
operations = [
migrations.CreateModel(
name='Timing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.TimeField()),
('end', models.TimeField()),
],
),
migrations.RenameModel(
old_name='Station',
new_name='Gate',
),
migrations.RenameField(
model_name='gate',
old_name='station_no',
new_name='no',
),
migrations.RenameField(
model_name='log',
old_name='entry_station',
new_name='entry_gate',
),
migrations.RenameField(
model_name='log',
old_name='exit_station',
new_name='exit_gate',
),
migrations.RemoveField(
model_name='log',
name='entry_image',
),
migrations.RemoveField(
model_name='log',
name='exit_image',
),
migrations.RemoveField(
model_name='log',
name='fare',
),
migrations.AddField(
model_name='log',
name='entry_status',
field=models.CharField(choices=[('late', 'late'), ('early', 'early'), ('on time', 'on time'), ('in office', 'in office')], default='on time', max_length=100),
),
migrations.AddField(
model_name='log',
name='exit_status',
field=models.CharField(choices=[('late', 'late'), ('early', 'early'), ('on time', 'on time'), ('in office', 'in office')], default='on time', max_length=100),
),
]
| [
"sanyam19092000@gmail.com"
] | sanyam19092000@gmail.com |
d5e72b406d12cc64d1490e7ba89e8b441f0c7d4e | bf511b123d05788e45b17270dc3e651a82a7ee58 | /taskmate/todolist_app/migrations/0001_initial.py | 2f3cc31b47612b0ee866de917ddd0d5a8fd0076f | [] | no_license | joseluis-gc/Django-TodoApp | 36a7350064a66dd917c1383be1eee923f3ffd20e | 2e3f82ba0fe08f76fb0d98bec2bde2717684818e | refs/heads/master | 2023-06-09T18:42:46.580395 | 2021-06-24T05:31:50 | 2021-06-24T05:31:50 | 377,983,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # Generated by Django 3.1.5 on 2021-06-18 00:48
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TaskList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('task', models.CharField(max_length=300)),
('done', models.BooleanField(default=False)),
],
),
]
| [
"joseluisgomezcecegna@gmail.com"
] | joseluisgomezcecegna@gmail.com |
42cd98f60f8637e2f8b57280dee6eeb14f3eac98 | bb4dc40ec0b62e5d2fc3ce1234013aebd4e648d5 | /src/modules/customised/payroll/hra/__init__.py | 708a454f4468ac2e8c826538ed0f9f59fab6f7cf | [] | no_license | kakamble-aiims/work | ba6cbaf4c525ff7bc28d0a407f16c829d0c35983 | cd392bf0e80d71c4742568e9c1dd5e5211da56a9 | refs/heads/master | 2022-04-02T14:45:58.515014 | 2019-12-31T14:00:51 | 2019-12-31T14:00:51 | 199,015,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | from trytond.pool import Pool
from .hra import *
def register():
Pool.register(
HRA_Allowance,
module='hra', type_='model') | [
"kakamble.aiims@gmail.com"
] | kakamble.aiims@gmail.com |
90208cdc6c60917016382d39760b8a3bb14ff4d3 | 8d34c0d29b69028fb1da01499fdac19f9762a8e1 | /lib/python2.7/sunlight/service.py | cf069d357b54bd46632e3a1b8417e69f90c7f8b1 | [] | no_license | politicrowd/politicrowd | 87a98689724c288b5400acd0de6fb7fae7e058d4 | e62c89d234a59f6c7d56fae2c8af6074f426c265 | refs/heads/master | 2021-01-20T00:55:48.083054 | 2013-07-30T00:14:08 | 2013-07-30T00:14:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,259 | py | # Copyright (c) Sunlight Labs, 2012 under the terms and conditions
# of the LICENSE file.
"""
.. module:: sunlight.service
:synopsis: Sunlight API Superclass
Base service class. All API classes (such as say -
:class:`sunlight.services.openstates.OpenStates`) inherit from this.
"""
import sys
import sunlight.config
import sunlight.errors
if sys.version_info[0] >= 3:
from urllib.parse import urlencode
from urllib.request import urlopen
from urllib.error import HTTPError
else:
from urllib import urlencode
from urllib2 import urlopen
from urllib2 import HTTPError
class Service:
"""
Base class for all the API implementations, as well as a bunch of common
code on how to actually fetch text over the network.
"""
def get(self, top_level_object, **kwargs):
"""
Get some data from the network - this is where we actually fetch
something and make a request.
.. warning:: Be sure that API_KEY was set before calling this method.
This will throw a :class:`sunlight.errors.NoAPIKeyException` if
the API_KEY is not set.
args:
``top_level_object`` (str): Thing to query for (such as say,
"bills" for OpenStates )
kwargs:
These arguments will be passed to the underlying API implementation
to help create a query. Validation will happen down below, and
on a per-API level.
"""
if not sunlight.config.API_KEY:
raise sunlight.errors.NoAPIKeyException(
"Warning: Missing API Key. please visit " + sunlight.config.API_SIGNUP_PAGE +
" to register for a key.")
url = self._get_url(top_level_object, sunlight.config.API_KEY,
**kwargs)
try:
r = urlopen(url)
return_data = r.read().decode('utf8')
return self._decode_response(return_data)
except HTTPError as e:
message = e.read()
code = e.getcode()
ex = sunlight.errors.BadRequestException("Error (%s) -- %s" % (
code, message
))
ex.url = e.geturl()
ex.message = message
ex.code = code
raise ex
| [
"paul@politicrowd.com"
] | paul@politicrowd.com |
46b52fe8e5c60205d2161d38dc9193d19d105f9e | cba90cdd06eced813be6ad80e6295587223c4600 | /betfairlightweight/endpoints/navigation.py | 8795b7d2b4a2e08e79350a3a78ae3dd5e1c20f13 | [
"MIT"
] | permissive | mberk/betfair | 1a22528b881e02567626dbe7e8c4f0197809c38e | 6b064a68c8d2afceda81b70d74b6a0ee9601f228 | refs/heads/master | 2023-03-07T02:33:06.443407 | 2022-08-16T08:06:10 | 2022-08-16T08:06:10 | 192,976,576 | 0 | 1 | MIT | 2023-03-01T12:03:37 | 2019-06-20T19:28:23 | Python | UTF-8 | Python | false | false | 1,510 | py | import requests
from ..exceptions import APIError, InvalidResponse
from ..utils import check_status_code
from .baseendpoint import BaseEndpoint
from ..compat import json
class Navigation(BaseEndpoint):
"""
Navigation operations.
"""
def list_navigation(self, session: requests.Session = None) -> dict:
"""
This Navigation Data for Applications service allows the retrieval of the
full Betfair market navigation menu from a compressed file.
:param requests.session session: Requests session object
:rtype: json
"""
return self.request(session=session)
def request(
self, method: str = None, params: dict = None, session: requests.Session = None
) -> (dict, float):
session = session or self.client.session
try:
response = session.get(
self.url,
headers=self.client.request_headers,
timeout=(self.connect_timeout, self.read_timeout),
)
except requests.ConnectionError as e:
raise APIError(None, method, params, e)
except Exception as e:
raise APIError(None, method, params, e)
check_status_code(response)
try:
response_json = json.loads(response.content.decode("utf-8"))
except ValueError:
raise InvalidResponse(response.text)
return response_json
@property
def url(self) -> str:
return self.client.navigation_uri
| [
"paulingliam@gmail.com"
] | paulingliam@gmail.com |
dd8ff876cdff51683095b93c5c1e9985b5a29584 | 9732da539d940904cf09b4164a307cb1a58fbb35 | /superhero/ability_and_armor.py | bb0e7c0ea30847095581385d460942d5d2e5ad75 | [] | no_license | makhmudislamov/fun_python_exercises | f3c7557fa6ed400ee196252a84ad7b6b23b913f1 | 21ab89540fb5f4f04dbdb80f361bf4febd694c11 | refs/heads/master | 2020-05-26T05:42:20.115833 | 2019-10-17T03:28:57 | 2019-10-17T03:28:57 | 188,125,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,406 | py | from random import randint
class Ability:
def __init__(self, name, max_damage):
'''
Initialize the values passed into this
method as instance variables.
'''
self.name = name
self.max_damage = max_damage
def __str__(self):
return f'This ability is {self.name}'
def ability_attack(self):
'''
Use randint(a, b) to select a random attack value.
Return an attack value between 0 and the full attack.
'''
attack_value = randint(0, self.max_damage)
# print(f"attack value in ability: {attack_value}")
self.max_damage -= attack_value
return attack_value
class Weapon(Ability):
def ability_attack(self):
""" This method returns a random value
between one half to the full attack power of the weapon.
"""
return randint(self.max_damage // 2, self.max_damage)
class Armor():
def __init__(self, name, max_block):
'''
Initialize the values passed into this
method as instance variables.
'''
self.name = name
self.max_block = max_block
def block(self):
'''
Return a random value between
0 and the initialized max_block strength.
'''
block_value = randint(0, self.max_block)
return block_value
# if __name__ == "__main__":
# pass
| [
"sunnatovichvv@gmail.com"
] | sunnatovichvv@gmail.com |
0cd44d57984873ff2dd3d1e25064b5bc50b880c1 | 23fdc97552b7e55b8c5e5eca3046ee95eb29d43c | /leetcode/leetcode_111.py | db8f1ff94b73fffc72e5a91a64d84064cc74e9ef | [] | no_license | qiqimaochiyu/tutorial-python | b999ef63026d05045a33225b1fbe2a5919422961 | a0d50946859798a642aaacdc31fa97cc015ae615 | refs/heads/master | 2018-10-04T18:12:18.017065 | 2018-06-26T09:23:50 | 2018-06-26T09:23:50 | 90,226,235 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def minDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
if not (root.left) or not (root.right):
return self.minDepth(root.left) + self.minDepth(root.right) + 1
return min(self.minDepth(root.left), self.minDepth(root.right)) + 1
| [
"noreply@github.com"
] | noreply@github.com |
8b579bbef3904192079b88f2eac49f003e9d4eb1 | 7ad7eb81d1dc26edd3b958a5dbbc7d61f19b0640 | /test/test_mode_type.py | 4c28945ba899c6fcde6a31a07364acf1687527cc | [
"MIT"
] | permissive | camptocamp/quickpac-client | a1b6e7164e2ad362c0c6d819a31ace9f6774f926 | 761c08bdc3846c724adbc99b589d2db460a6bcdc | refs/heads/master | 2023-07-16T18:46:48.167193 | 2021-09-02T08:56:46 | 2021-09-02T09:12:46 | 401,749,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | # coding: utf-8
"""
Quickpac API
Here you will find all public interfaces to the Quickpac system. # noqa: E501
OpenAPI spec version: v1.00
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import quickpac
from quickpac.models.mode_type import ModeType # noqa: E501
from quickpac.rest import ApiException
class TestModeType(unittest.TestCase):
"""ModeType unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testModeType(self):
"""Test ModeType"""
# FIXME: construct object with mandatory attributes with example values
# model = quickpac.models.mode_type.ModeType() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"stephane.mangin@camptocamp.com"
] | stephane.mangin@camptocamp.com |
1c61b1085a41baf65935cafa23107b1498cffadb | 56c23dfabac2b6cdcecfc20f6cc1e53d0041fd7a | /stanCode projects/find_DNA_complement/complement.py | 555df5d7a6b415aa24d181d33ca43b2eb429b0f0 | [
"MIT"
] | permissive | rogerchang910/stanCode-projects | 174144bcb67b5e141f7acc320e5a14027134bc2a | 92beb09b23a40c09a093dfad80837d97a90c8e3a | refs/heads/main | 2022-12-30T10:11:23.402634 | 2020-10-19T07:57:46 | 2020-10-19T07:57:46 | 303,656,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,687 | py | """
File: complement.py
Name: Roger(Yu-Ming) Chang
----------------------------
This program uses string manipulation to
tackle a real world problem - finding the
complement strand of a DNA sequence.
The program asks users for a DNA sequence as
a python string that is case-insensitive.
Your job is to output the complement of it.
"""
def main():
"""
The program will output the complement of a DNA sequence users input.
"""
dna = input_dna()
complement = build_complement(dna)
print('The complement of ' + str(dna) + ' is ' + str(complement))
def input_dna():
"""
The function will ask users input a DNA sequence and check whether the input format is correct.
:return: str, the correct input format.
"""
while True:
dna = input('Please give me a DNA strand and I\'ll find the complement: ')
dna = dna.upper()
wrong = 0
for i in range(len(dna)):
ch = dna[i]
if ch == 'A' or ch == 'T' or ch == 'C' or ch == 'G':
wrong += 0
else:
wrong += 1
if wrong > 0:
print('The input format is not correct.')
if wrong == 0:
return dna
def build_complement(base):
"""
:param base: str, the DNA sequence users input.
:return: str, the complement of the entered DNA sequence.
"""
strand = ''
for i in range(len(base)):
ch = base[i]
if ch == 'A':
strand += 'T'
if ch == 'T':
strand += 'A'
if ch == 'C':
strand += 'G'
if ch == 'G':
strand += 'C'
return strand
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
7d6c817fe544b5cc80a68b8c685ce92faf0c9ef5 | a9d6a3b0fe418e4e5cc131ebc05f9b56c0e4543e | /chapter11-django/site02/site02/settings.py | 1ba07484b03cf34c8252583125bc6c301d4cb224 | [] | no_license | Kianqunki/Python_CorePythonApplicationsProgramming | 34a36ba64bdc303814de507c4fcfc3c81ff88b5f | 77263c1fde0d02aade180f7e73d2cdee1d170d58 | refs/heads/master | 2021-05-07T02:41:44.567088 | 2014-10-27T17:43:51 | 2014-10-27T17:43:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,596 | py | """
Django settings for site02 project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y97upk5xk__c@j95sw4v-pf&#i45ir$cm6-ya)byzikor7+2sv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'approver',
'poster'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'site02.urls'
WSGI_APPLICATION = 'site02.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'TweetApprover.db'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# from this point on MY consts
TWEET_APPROVER_EMAIL = 'georstef@gmail.com'
EMAIL_HOST = 'smtp.mydomain.com'
EMAIL_HOST_USER = 'username'
EMAIL_HOST_PASSWORD = 'password'
DEFAULT_FROM_EMAIL = 'username@mydomain.com'
SERVER_EMAIL = 'username@mydomain.com'
TWITTER_CONSUMER_KEY = 'DeH9TfrfeV7UeRgK3OSGA'
TWITTER_CONSUMER_SECRET = 'sZGBB28VZcrRfcZvexYydj2Pc2uWW307kP8l7T7yiQo'
TWITTER_OAUTH_TOKEN = '2334856880-zYwvSu8kS7cGfH67lQ64vulTUbY7zxhc39bpnlG'
TWITTER_OAUTH_TOKEN_SECRET = 'RTQ7pzSytCIPsASCkA0Z5rubpHSWbvjvYR3c3hb9QhC3M'
| [
"georstef@gmail.com"
] | georstef@gmail.com |
9c762d2633df105988229e84fc9d96bc46b0cd65 | e5b72785a1a191ca8ed62dee6048f865caa61fe3 | /gestao/urls.py | c9f849a26b749eccbcb0f922bcf1c04f55871d3d | [] | no_license | MarToxAk/v5 | 344409f98ea0098b272c4e3bc2d21e7c9bbcfe65 | daa76fe9f2346805cf99b62d8f687d8f751defbc | refs/heads/master | 2020-11-27T01:54:06.684511 | 2019-12-20T12:45:43 | 2019-12-20T12:45:43 | 229,263,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | """gestao URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls import url
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('comparador.urls')),
path('', include('teste.urls')),
path('pousada/', include('pousada.urls')),
path('cotacao/', include('cotacao.urls')),
path('chat2/', include('chatbot.urls'), name='chatbot'),
]
| [
"junior.ilha@hotmail.com.br"
] | junior.ilha@hotmail.com.br |
890ebb99722d62fb6f06cab56b621f579b3449d8 | 6044e804dc994cb342dee73124e9216d552f6096 | /first_test.py | 4b5fe2ac95c876216633f6dacbfe65f4347e5645 | [] | no_license | jjpikoov/jjblog | 8fc02a84fa45e2d92dcdc2def55b49969b361b2f | fb89e2c6b737075c14ba25f3c3331717ae565b2c | refs/heads/master | 2021-01-10T09:10:51.089560 | 2016-02-17T16:40:53 | 2016-02-17T16:40:53 | 49,321,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | import os
import main
import unittest
import tempfile
class JJblogTestCase(unittest.TestCase):
def setUp(self):
self.db_fd, main.app.config['DATABASE'] = tempfile.mkstemp()
self.app = main.app.test_client()
main.database.init_db()
def tearDown(self):
os.close(self.db_fd)
os.unlink(main.app.config['DATABASE'])
def login(self, username, password):
return self.app.post('/admin', data=dict(
username=username,
password=password), follow_redirects=True)
def logout(self):
return self.app.get('/admin/logout', follow_redirects=True)
def test_login_logout(self):
rv = self.login('aadmin', 'admin')
# assert 'Failed' in rv.data
print(rv.data)
rv = self.logout()
if __name__ == '__main__':
unittest.main()
| [
"jjpikoov@gmail.com"
] | jjpikoov@gmail.com |
eaed7c034ea788beec33f6c9b31938383407cb09 | 7c98bee73fdd64a8fa08dc3a94e520b175ac190e | /bin/rst2latex.py | 311af3f8a994b04f7ad5f9f586bc0e1fc7cd2fcd | [] | no_license | Barry-Chen-Intersective/Proj-Api-Test | 97015eccb4b6d1652ca8809cbb8ddbe1132008c0 | cc707770578bd35a155f506198ad765d91926daa | refs/heads/master | 2021-07-21T04:45:09.329331 | 2018-10-23T01:25:02 | 2018-10-23T01:25:02 | 135,973,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 867 | py | #!/Library/Frameworks/Python.framework/Versions/2.7/Resources/Python.app/Contents/MacOS/Python
# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', description=description)
| [
"barry@intersective.com"
] | barry@intersective.com |
505e01d16c4946a2cc61a71edd7d0ee2504ca6d6 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/network/v20171001/get_virtual_network_gateway_bgp_peer_status.py | ce971110c0cb3c1a127751e2520bf66c4337635f | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,690 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayBgpPeerStatusResult',
'AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult',
'get_virtual_network_gateway_bgp_peer_status',
]
@pulumi.output_type
class GetVirtualNetworkGatewayBgpPeerStatusResult:
"""
Response for list BGP peer status API service call
"""
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.BgpPeerStatusResponseResult']]:
"""
List of BGP peers
"""
return pulumi.get(self, "value")
class AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult(GetVirtualNetworkGatewayBgpPeerStatusResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayBgpPeerStatusResult(
value=self.value)
def get_virtual_network_gateway_bgp_peer_status(peer: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult:
"""
Response for list BGP peer status API service call
:param str peer: The IP address of the peer to retrieve the status of.
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['peer'] = peer
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20171001:getVirtualNetworkGatewayBgpPeerStatus', __args__, opts=opts, typ=GetVirtualNetworkGatewayBgpPeerStatusResult).value
return AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult(
value=__ret__.value)
| [
"noreply@github.com"
] | noreply@github.com |
60d320dea98839269dab10b44bf3d83b288fe2b7 | 42b38dd5fe75148a5727760847fcea5597f9d52f | /user_auth/vendors/top/api/rest/__init__.py | 503bd7d8a0e83539629ab85ca8dad1ed8382e3e8 | [] | no_license | naitianliu/hwserver | 9d24c2ea405a6dcfafe7aa38e42a768e496608e6 | 06ddcb114cd4c1b4d8b647998b4b4637789d6b43 | refs/heads/master | 2022-12-14T10:59:43.509971 | 2016-12-18T15:38:29 | 2016-12-18T15:38:29 | 61,244,155 | 0 | 0 | null | 2022-12-07T23:39:07 | 2016-06-15T22:03:04 | Python | UTF-8 | Python | false | false | 1,968 | py | from user_auth.vendors.top.api.rest.TopIpoutGetRequest import TopIpoutGetRequest
from user_auth.vendors.top.api.rest.AlibabaAliqinFcFlowChargeProvinceRequest import AlibabaAliqinFcFlowChargeProvinceRequest
from user_auth.vendors.top.api.rest.HttpdnsGetRequest import HttpdnsGetRequest
from user_auth.vendors.top.api.rest.TopSecretGetRequest import TopSecretGetRequest
from user_auth.vendors.top.api.rest.AlibabaAliqinFcFlowQueryRequest import AlibabaAliqinFcFlowQueryRequest
from user_auth.vendors.top.api.rest.KfcKeywordSearchRequest import KfcKeywordSearchRequest
from user_auth.vendors.top.api.rest.TopatsTaskDeleteRequest import TopatsTaskDeleteRequest
from user_auth.vendors.top.api.rest.TimeGetRequest import TimeGetRequest
from user_auth.vendors.top.api.rest.AlibabaAliqinFcSmsNumSendRequest import AlibabaAliqinFcSmsNumSendRequest
from user_auth.vendors.top.api.rest.AlibabaAliqinFcTtsNumSinglecallRequest import AlibabaAliqinFcTtsNumSinglecallRequest
from user_auth.vendors.top.api.rest.AlibabaAliqinFcSmsNumQueryRequest import AlibabaAliqinFcSmsNumQueryRequest
from user_auth.vendors.top.api.rest.AlibabaAliqinFcFlowChargeRequest import AlibabaAliqinFcFlowChargeRequest
from user_auth.vendors.top.api.rest.TopatsResultGetRequest import TopatsResultGetRequest
from user_auth.vendors.top.api.rest.AreasGetRequest import AreasGetRequest
from user_auth.vendors.top.api.rest.TopAuthTokenCreateRequest import TopAuthTokenCreateRequest
from user_auth.vendors.top.api.rest.AlibabaAliqinFcFlowGradeRequest import AlibabaAliqinFcFlowGradeRequest
from user_auth.vendors.top.api.rest.AlibabaAliqinFcVoiceNumDoublecallRequest import AlibabaAliqinFcVoiceNumDoublecallRequest
from user_auth.vendors.top.api.rest.AlibabaAliqinFcVoiceNumSinglecallRequest import AlibabaAliqinFcVoiceNumSinglecallRequest
from user_auth.vendors.top.api.rest.TopAuthTokenRefreshRequest import TopAuthTokenRefreshRequest
from user_auth.vendors.top.api.rest.AppipGetRequest import AppipGetRequest
| [
"naitianliu@gmail.com"
] | naitianliu@gmail.com |
748a3810da0b0659890ef170abef1ea0d6d32b5f | 5961726d2e0d84c4ced32e5cd072c3c0c07153cb | /smart_schedule/line/handlers/__init__.py | 48b1b8553fed5e192692650955bf0185450019e4 | [] | no_license | macinjoke/smart_schedule | 46bc68d712646ffb45dcf1e8bd9d140d7a9fb84f | 605c39f2d465cb8e56bedc941109f3b716608efa | refs/heads/master | 2021-03-19T15:53:35.886128 | 2018-01-13T08:22:50 | 2018-01-13T08:22:50 | 76,947,986 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,214 | py | from datetime import datetime
import flask
import urllib
import hashlib
import re
from linebot.models import TextSendMessage
from linebot import LineBotApi
from smart_schedule.settings import (
line_env, web_env, hash_env
)
line_bot_api = LineBotApi(line_env['channel_access_token'])
# TODO 以降の関数たちはどこにあるべきか、リファクタリングの余地が無いか考える
def reply_google_auth_message(event):
auth_url = flask.url_for('oauth2')
if event.source.type == 'user':
talk_id = event.source.user_id
elif event.source.type == 'group':
talk_id = event.source.group_id
elif event.source.type == 'room':
talk_id = event.source.room_id
else:
raise Exception('invalid `event.source`')
m = hashlib.md5()
m.update(talk_id.encode('utf-8'))
m.update(hash_env['seed'].encode('utf-8'))
params = urllib.parse.urlencode({'talk_id': talk_id, 'hash': m.hexdigest()})
url = '{}{}?{}'.format(web_env['host'], auth_url, params)
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text='このリンクから認証を行ってください\n{}'.format(url))
)
def reply_refresh_error_message(event):
reply_text = '''認証情報の更新エラーが発生しました。同じGoogleアカウントで複数の\
認証を行っている場合にこの不具合が発生します。このトークでSmart Scheduleを使用したい場合\
は以下のいずれかを行った後で認証しなおしてください。
1. 同じアカウントで認証しているトークでlogoutコマンドを行う(オススメ)
2. 下記URLから手動でSmart Scheduleの認証を解除する\
https://myaccount.google.com/u/1/permissions'''
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=reply_text)
)
def reply_invalid_credential_error_message(event):
reply_text = '''無効な認証情報です。同じGoogleアカウントで複数の認証を行っている\
場合にこの不具合が発生します。認証をやりなおしてください。'''
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=reply_text)
)
def generate_message_from_events(events, reply_text):
day_of_week_strs = ["月", "火", "水", "木", "金", "土", "日"]
for e in events:
summary = e['summary']
start = e['start'].get('dateTime', e['start'].get('date'))
if re.match('\d+[-]\d+[-]\d+[T]\d+[:]\d+[:]\d+[+]\d+[:]\d+', start):
start_datetime = datetime.strptime(start, '%Y-%m-%dT%H:%M:%S+09:00')
day_of_week = day_of_week_strs[start_datetime.weekday()]
start = start_datetime.strftime(
'%Y年%m月%d日({}) %H時%S分'.format(day_of_week)
)
end = e['end'].get('dateTime', e['end'].get('date'))
end_datetime = datetime.strptime(end, '%Y-%m-%dT%H:%M:%S+09:00')
day_of_week = day_of_week_strs[end_datetime.weekday()]
end = end_datetime.strftime(
'%Y年%m月%d日({}) %H時%S分'.format(day_of_week)
)
reply_text += '\n\n{}\n{}\n |\n{}\n\n---------------------------'.format(summary,
start,
end)
else:
start_datetime = datetime.strptime(start, '%Y-%m-%d')
start = start_datetime.strftime('%Y年%m月%d日')
end = '終日'
reply_text += '\n\n{}\n{} {}\n\n---------------------------'.format(summary,
start,
end)
return reply_text
from .join_event_handler import JoinEventHandler
from .leave_event_handler import LeaveEventHandler
from .message_event_handler import MessageEventHandler
from .postback_event_handler import PostBackEventHandler
from .unfollow_event_handler import UnfollowEventHandler
| [
"shunji.makino@gmail.com"
] | shunji.makino@gmail.com |
9bd919b284a2108b62fb412c5d961bcb422c8d89 | a66460a46611483dfbdc94c7996893f427e60d97 | /ansible/my_env/lib/python2.7/site-packages/ansible/modules/network/f5/bigip_iapp_template.py | 4437352d228d92f1318fbf343532623181c1e425 | [
"GPL-3.0-only",
"MIT"
] | permissive | otus-devops-2019-02/yyashkin_infra | 06b57807dde26f94f501828c07503d6bf1d70816 | 0cd0c003884155ac922e3e301305ac202de7028c | refs/heads/master | 2020-04-29T02:42:22.056724 | 2019-05-15T16:24:35 | 2019-05-15T16:24:35 | 175,780,718 | 0 | 0 | MIT | 2019-05-15T16:24:36 | 2019-03-15T08:37:35 | HCL | UTF-8 | Python | false | false | 15,691 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_iapp_template
short_description: Manages TCL iApp templates on a BIG-IP
description:
- Manages TCL iApp templates on a BIG-IP. This module will allow you to
deploy iApp templates to the BIG-IP and manage their lifecycle. The
conventional way to use this module is to import new iApps as needed
or by extracting the contents of the iApp archive that is provided at
downloads.f5.com and then importing all the iApps with this module.
This module can also update existing iApps provided that the source
of the iApp changed while the name stayed the same. Note however that
this module will not reconfigure any services that may have been
created using the C(bigip_iapp_service) module. iApps are normally
not updated in production. Instead, new versions are deployed and then
existing services are changed to consume that new template. As such,
the ability to update templates in-place requires the C(force) option
to be used.
version_added: 2.4
options:
force:
description:
- Specifies whether or not to force the uploading of an iApp. When
C(yes), will force update the iApp even if there are iApp services
using it. This will not update the running service though. Use
C(bigip_iapp_service) to do that. When C(no), will update the iApp
only if there are no iApp services using the template.
type: bool
name:
description:
- The name of the iApp template that you want to delete. This option
is only available when specifying a C(state) of C(absent) and is
provided as a way to delete templates that you may no longer have
the source of.
content:
description:
- Sets the contents of an iApp template directly to the specified
value. This is for simple values, but can be used with lookup
plugins for anything complex or with formatting. C(content) must
be provided when creating new templates.
state:
description:
- Whether the iApp template should exist or not.
default: present
choices:
- present
- absent
partition:
description:
- Device partition to manage resources on.
default: Common
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Add the iApp contained in template iapp.tmpl
bigip_iapp_template:
content: "{{ lookup('template', 'iapp.tmpl') }}"
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Update a template in place
bigip_iapp_template:
content: "{{ lookup('template', 'iapp-new.tmpl') }}"
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Update a template in place that has existing services created from it.
bigip_iapp_template:
content: "{{ lookup('template', 'iapp-new.tmpl') }}"
force: yes
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
import re
import uuid
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import fq_name
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from f5.utils.iapp_parser import NonextantTemplateNameException
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import fq_name
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from f5.utils.iapp_parser import NonextantTemplateNameException
except ImportError:
HAS_F5SDK = False
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class Parameters(AnsibleF5Parameters):
api_attributes = []
returnables = []
@property
def name(self):
if self._values['name']:
return self._values['name']
if self._values['content']:
try:
name = self._get_template_name()
return name
except NonextantTemplateNameException:
raise F5ModuleError(
"No template name was found in the template"
)
return None
@property
def content(self):
if self._values['content'] is None:
return None
result = self._squash_template_name_prefix()
result = self._replace_template_name(result)
return result
@property
def checksum(self):
return self._values['tmplChecksum']
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
def _squash_template_name_prefix(self):
"""Removes the template name prefix
The IappParser in the SDK treats the partition prefix as part of
the iApp's name. This method removes that partition from the name
in the iApp so that comparisons can be done properly and entries
can be created properly when using REST.
:return string
"""
pattern = r'sys\s+application\s+template\s+/Common/'
replace = 'sys application template '
return re.sub(pattern, replace, self._values['content'])
def _replace_template_name(self, template):
"""Replaces template name at runtime
To allow us to do the switch-a-roo with temporary templates and
checksum comparisons, we need to take the template provided to us
and change its name to a temporary value so that BIG-IP will create
a clone for us.
:return string
"""
pattern = r'sys\s+application\s+template\s+[^ ]+'
if self._values['name']:
name = self._values['name']
else:
name = self._get_template_name()
replace = 'sys application template {0}'.format(fq_name(self.partition, name))
return re.sub(pattern, replace, template)
def _get_template_name(self):
# There is a bug in the iApp parser in the F5 SDK that prevents us from
# using it in all cases to get the name of an iApp. So we'll use this
# pattern for now and file a bug with the F5 SDK
pattern = r'sys\s+application\s+template\s+(?P<path>\/[^\{}"\'*?|#]+\/)?(?P<name>[^\{}"\'*?|#]+)'
matches = re.search(pattern, self._values['content'])
try:
result = matches.group('name').strip()
except IndexError:
result = None
if result:
return result
raise NonextantTemplateNameException
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
self.want = Parameters(params=self.module.params)
self.changes = Parameters()
def exec_module(self):
result = dict()
changed = False
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def update(self):
self.have = self.read_current_from_device()
if not self.templates_differ():
return False
if not self.want.force and self.template_in_use():
return False
if self.module.check_mode:
return True
self._remove_iapp_checksum()
# The same process used for creating (load) can be used for updating
self.create_on_device()
self._generate_template_checksum_on_device()
return True
def template_in_use(self):
collection = self.client.api.tm.sys.application.services.get_collection()
fullname = '/{0}/{1}'.format(self.want.partition, self.want.name)
for resource in collection:
if resource.template == fullname:
return True
return False
def read_current_from_device(self):
self._generate_template_checksum_on_device()
resource = self.client.api.tm.sys.application.templates.template.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return Parameters(params=result)
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def exists(self):
result = self.client.api.tm.sys.application.templates.template.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def _remove_iapp_checksum(self):
"""Removes the iApp tmplChecksum
This is required for updating in place or else the load command will
fail with a "AppTemplate ... content does not match the checksum"
error.
:return:
"""
resource = self.client.api.tm.sys.application.templates.template.load(
name=self.want.name,
partition=self.want.partition
)
resource.modify(tmplChecksum=None)
def templates_differ(self):
# BIG-IP can generate checksums of iApps, but the iApp needs to be
# on the box to do this. Additionally, the checksum is MD5, but it
# is not an MD5 of the entire content of the template. Instead, it
# is a hash of some portion of the template that is unknown to me.
#
# The code below is responsible for uploading the provided template
# under a unique name and creating a checksum for it so that that
# checksum can be compared to the one of the existing template.
#
# Using this method we can compare the checksums of the existing
# iApp and the iApp that the user is providing to the module.
backup = self.want.name
# Override whatever name may have been provided so that we can
# temporarily create a new template to test checksums with
self.want.update({
'name': 'ansible-{0}'.format(str(uuid.uuid4()))
})
# Create and remove temporary template
temp = self._get_temporary_template()
# Set the template name back to what it was originally so that
# any future operations only happen on the real template.
self.want.update({
'name': backup
})
if temp.checksum != self.have.checksum:
return True
return False
def _get_temporary_template(self):
self.create_on_device()
temp = self.read_current_from_device()
self.remove_from_device()
return temp
def _generate_template_checksum_on_device(self):
generate = 'tmsh generate sys application template {0} checksum'.format(
self.want.name
)
self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "{0}"'.format(generate)
)
def create(self):
if self.module.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the iApp template")
def create_on_device(self):
remote_path = "/var/config/rest/downloads/{0}".format(self.want.name)
load_command = 'tmsh load sys application template {0}'.format(remote_path)
template = StringIO(self.want.content)
upload = self.client.api.shared.file_transfer.uploads
upload.upload_stringio(template, self.want.name)
output = self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "{0}"'.format(load_command)
)
if hasattr(output, 'commandResult'):
result = output.commandResult
if 'Syntax Error' in result:
raise F5ModuleError(output.commandResult)
if 'ERROR' in result:
raise F5ModuleError(output.commandResult)
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the iApp template")
return True
def remove_from_device(self):
resource = self.client.api.tm.sys.application.templates.template.load(
name=self.want.name,
partition=self.want.partition
)
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
force=dict(
type='bool'
),
content=dict(),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as e:
cleanup_tokens(client)
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| [
"theyashkins@gmail.com"
] | theyashkins@gmail.com |
571b5e21a17bb0386eb30bd81b021035a58c3802 | 5b56d0ec345d19c3e9c17764cdfa4ef8180f25e0 | /2020-01-python/api.py | fd5f9add8cd66d0c4436d45b28fc09d9b3c73da0 | [] | no_license | suzuki-hoge/warikan | 6e6d5f814fe4a9130b61a416f495326c316e2a8c | d47c32338421d4c6c88022a7d64a478e79708835 | refs/heads/master | 2020-12-04T08:54:07.960635 | 2020-02-07T03:29:52 | 2020-02-07T10:09:56 | 231,702,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,406 | py | from bottle import route, get, post, put, request, response, hook, run
import json
import db, party
def handle(f):
def wrapper(*args, **kwargs):
try:
result = f(*args, **kwargs)
return {'status': 'ok', 'result': result} if result is not None else {'status': 'ok'}
except BaseException as e:
return {'status': 'ng', 'error': e.message}
return wrapper
@hook('after_request')
def allow_cors():
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'Content-Type'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'
@route('<any:path>', method = 'OPTIONS')
def options(**kwargs):
return {}
@get('/party/<partyName>')
@handle
def find(partyName):
def party_dict(p):
return {'partyName': p.partyName, 'partyHoldAt': p.partyHoldAt, 'participants': map(participant_dict, p.participants), 'billingAmount': p.billingAmount, 'adjustingUnitAmount': p.adjustingUnitAmount}
def participant_dict(p):
return {'participantName': p.participantName, 'participantType': p.participantType, 'paymentSection': p.paymentSection}
return party_dict(db.read(partyName))
@post('/party/plan')
@handle
def plan():
p = request.json
new = party.Party.plan(p.get('partyName'), p.get('partyHoldAt'), p.get('secretaryName'), p.get('paymentSection'), p.get('billingAmount'), p.get('adjustingUnitAmount'))
db.write(new)
@put('/party/<partyName>/add')
@handle
def add(partyName):
p = request.json
found = db.read(partyName)
updated = found.add(party.Participant(p.get('participantName'), 'NotSec', p.get('paymentSection')))
db.write(updated)
@put('/party/<partyName>/remove')
@handle
def remove(partyName):
p = request.params
found = db.read(partyName)
updated = found.remove(p.participantName)
db.write(updated)
@put('/party/<partyName>/change')
@handle
def change(partyName):
p = request.json
found = db.read(partyName)
updated = found.change(p.get('adjustingUnitAmount'))
db.write(updated)
@get('/party/<partyName>/demand')
@handle
def demand(partyName):
found = db.read(partyName)
return map(lambda (participantName, paymentAmount): {'participantName': participantName, 'paymentAmount': str(paymentAmount)}, found.demand())
run(host = 'localhost', port = 9000)
| [
"user.ryo@gmail.com"
] | user.ryo@gmail.com |
414f254965e2e32371576293af109dfc8fe4d3a5 | a88f90d3aa9eb9fa7bd88458d3b78e1a7a6c3477 | /svplot/jointgrids.py | 3654cfa66dafd821320fc16c36b9de591c2ff3c0 | [
"MIT"
] | permissive | msto/svplot | a43c13ae2ae66b21b8b3176d1b722c9d88118769 | 2e16a7936328079d444bdd1edd8ab93fbbf49dde | refs/heads/master | 2021-01-13T08:14:58.545980 | 2017-03-08T17:14:17 | 2017-03-08T17:14:17 | 72,221,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,851 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2016 Matthew Stone <mstone5@mgh.harvard.edu>
# Distributed under terms of the MIT license.
"""
Modification of Michael Waskom's JointGrid implementation in Seaborn.
Supports multiple JointGrids in single figure
"""
import numpy as np
import pandas as pd
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import seaborn as sns
class JointGrid(sns.JointGrid):
"""Grid for drawing a bivariate plot with marginal univariate plots."""
def __init__(self, x, y, data=None, gs=None, ratio=5, space=.2,
dropna=True, xlim=None, ylim=None):
"""Set up the grid of subplots.
Parameters
----------
x, y : strings or vectors
Data or names of variables in ``data``.
data : DataFrame, optional
DataFrame when ``x`` and ``y`` are variable names.
size : numeric
Size of each side of the figure in inches (it will be square).
ratio : numeric
Ratio of joint axes size to marginal axes height.
space : numeric, optional
Space between the joint and marginal axes
dropna : bool, optional
If True, remove observations that are missing from `x` and `y`.
{x, y}lim : two-tuples, optional
Axis limits to set before plotting.
See Also
--------
jointplot : High-level interface for drawing bivariate plots with
several different default plot kinds.
"""
# Set up the subplot grid
if gs is None:
gs = gridspec.GridSpec(ratio + 1, ratio + 1,
hspace=space, wspace=space)
ax_joint = plt.subplot(gs[1:, :-1])
ax_marg_x = plt.subplot(gs[0, :-1], sharex=ax_joint)
ax_marg_y = plt.subplot(gs[1:, -1], sharey=ax_joint)
self.ax_joint = ax_joint
self.ax_marg_x = ax_marg_x
self.ax_marg_y = ax_marg_y
# Turn off tick visibility for the measure axis on the marginal plots
plt.setp(ax_marg_x.get_xticklabels(), visible=False)
plt.setp(ax_marg_y.get_yticklabels(), visible=False)
# Turn off the ticks on the density axis for the marginal plots
plt.setp(ax_marg_x.yaxis.get_majorticklines(), visible=False)
plt.setp(ax_marg_x.yaxis.get_minorticklines(), visible=False)
plt.setp(ax_marg_y.xaxis.get_majorticklines(), visible=False)
plt.setp(ax_marg_y.xaxis.get_minorticklines(), visible=False)
plt.setp(ax_marg_x.get_yticklabels(), visible=False)
plt.setp(ax_marg_y.get_xticklabels(), visible=False)
ax_marg_x.yaxis.grid(False)
ax_marg_y.xaxis.grid(False)
# Possibly extract the variables from a DataFrame
if data is not None:
if x in data:
x = data[x]
if y in data:
y = data[y]
# Possibly drop NA
if dropna:
not_na = pd.notnull(x) & pd.notnull(y)
x = x[not_na]
y = y[not_na]
# Find the names of the variables
if hasattr(x, "name"):
xlabel = x.name
ax_joint.set_xlabel(xlabel)
if hasattr(y, "name"):
ylabel = y.name
ax_joint.set_ylabel(ylabel)
# Convert the x and y data to arrays for plotting
self.x = np.asarray(x)
self.y = np.asarray(y)
if xlim is not None:
ax_joint.set_xlim(xlim)
if ylim is not None:
ax_joint.set_ylim(ylim)
class JointGrids:
def __init__(self, data, x, y,
col=None, col_order=None,
row=None, row_order=None,
panel_size=8, ratio=5):
# row=None, row_order=None,
# col=None, col_order=None,
# hue=None, hue_order=None):
"""
Borrowed heavily from seaborn FacetGrid
Arguments
---------
panel_size : int, optional
Height/width of each constituent JointGrid
ratio : int, optional
Ratio of joint to marginal axis size
"""
if row is None:
row_names = []
else:
row_names = sns.utils.categorical_order(data[row], row_order)
if col is None:
col_names = []
else:
col_names = sns.utils.categorical_order(data[col], col_order)
# if col is not None and col_order is None:
# col_order = data[col].drop_duplicates().sort_values()
# n_cols = len(col_order)
n_cols = 1 if col is None else len(col_names)
n_rows = 1 if row is None else len(row_names)
self.fig = plt.figure(figsize=(n_cols * panel_size,
n_rows * panel_size))
self.gs = gridspec.GridSpec(n_rows, n_cols)
self.grids = np.empty((n_rows, n_cols), dtype=object)
if len(row_names) > 0 and len(col_names) > 0:
for i, row_val in enumerate(row_names):
for j, col_val in enumerate(col_names):
subdata = data.loc[(data[col] == col_val) &
(data[row] == row_val)]
ss = self.gs[i, j]
gs = gridspec.GridSpecFromSubplotSpec(ratio + 1, ratio + 1,
subplot_spec=ss)
grid = JointGrid(x, y, data=subdata, gs=gs)
self.grids[i, j] = grid
else:
if len(row_names) > 0:
facets = row_names
facet = row
grids = self.grids[:, 0]
else:
facets = col_names
facet = col
grids = self.grids[0]
for i, val in enumerate(facets):
subdata = data.loc[data[facet] == val]
gs = gridspec.GridSpecFromSubplotSpec(ratio + 1, ratio + 1,
subplot_spec=self.gs[i])
grid = JointGrid(x, y, data=subdata, gs=gs)
grids[i] = grid
def set_xlims(self, xmin, xmax):
for grid in self.grids.flat:
grid.ax_joint.set_xlim(xmin, xmax)
def set_ylims(self, ymin, ymax):
for grid in self.grids.flat:
grid.ax_joint.set_ylim(ymin, ymax)
def set_lims(self, xmin, xmax):
for grid in self.grids.flat:
grid.ax_joint.set_xlim(xmin, xmax)
grid.ax_joint.set_ylim(xmin, xmax)
def plot_joint(self, func, **kwargs):
for grid in self.grids.flat:
grid.plot_joint(func, **kwargs)
def plot_marginals(self, func, **kwargs):
for grid in self.grids.flat:
grid.plot_marginals(func, **kwargs)
| [
"matthew.stone12@gmail.com"
] | matthew.stone12@gmail.com |
a4354d06907b766c2c8e2f23546b79efe0959e4f | 06322e962c80f4c25838318e7d805ae88f0299e5 | /lengths.py | f6546177e6a717d960717d0a920b2e6122347ee7 | [
"BSD-2-Clause"
] | permissive | unixpickle/uno-ai | 6d4ec187e0c158c15cd4240ccf7e894cb599e071 | 3124afc8fa6b0cbcced95ef03ed9672cdb4f35a7 | refs/heads/master | 2020-04-21T10:20:07.310885 | 2019-08-06T15:27:45 | 2019-08-06T15:27:45 | 169,482,953 | 22 | 4 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | """
Measure the lengths of random games.
"""
import random
from uno_ai.game import Game
def main():
while True:
g = Game(4)
num_moves = 0
while g.winner() is None:
action = random.choice(g.options())
g.act(action)
num_moves += 1
print(num_moves)
if __name__ == '__main__':
main()
| [
"unixpickle@gmail.com"
] | unixpickle@gmail.com |
b642ce9125bc51b5a9f9d0ae69199d2d0bd1bf63 | 2e8ff2eb86f34ce2fc330766906b48ffc8df0dab | /tensorflow_probability/python/experimental/inference_gym/targets/__init__.py | a5ba67a6a9b68bf31372bf5990405fe49fbdf663 | [
"Apache-2.0"
] | permissive | wataruhashimoto52/probability | 9613f9a3cc685ff1a20643c4a05a48f9cf0fe1ae | 12e3f256544eadea6e863868da825614f4423eb0 | refs/heads/master | 2021-07-16T18:44:25.970036 | 2020-06-14T02:48:29 | 2020-06-14T02:51:59 | 146,873,495 | 0 | 0 | Apache-2.0 | 2018-08-31T09:51:20 | 2018-08-31T09:51:20 | null | UTF-8 | Python | false | false | 2,223 | py | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Targets package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_probability.python.experimental.inference_gym.targets.banana import Banana
from tensorflow_probability.python.experimental.inference_gym.targets.bayesian_model import BayesianModel
from tensorflow_probability.python.experimental.inference_gym.targets.ill_conditioned_gaussian import IllConditionedGaussian
from tensorflow_probability.python.experimental.inference_gym.targets.item_response_theory import ItemResponseTheory
from tensorflow_probability.python.experimental.inference_gym.targets.item_response_theory import SyntheticItemResponseTheory
from tensorflow_probability.python.experimental.inference_gym.targets.logistic_regression import GermanCreditNumericLogisticRegression
from tensorflow_probability.python.experimental.inference_gym.targets.logistic_regression import LogisticRegression
from tensorflow_probability.python.experimental.inference_gym.targets.model import Model
from tensorflow_probability.python.experimental.inference_gym.targets.sparse_logistic_regression import GermanCreditNumericSparseLogisticRegression
from tensorflow_probability.python.experimental.inference_gym.targets.sparse_logistic_regression import SparseLogisticRegression
__all__ = [
'Banana',
'BayesianModel',
'GermanCreditNumericLogisticRegression',
'GermanCreditNumericSparseLogisticRegression',
'IllConditionedGaussian',
'ItemResponseTheory',
'LogisticRegression',
'Model',
'SparseLogisticRegression',
'SyntheticItemResponseTheory',
]
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
bcb86e89e8d220c443eda53ff9c0bc4cdc174724 | 1f2342cb4bc357aa6af572a1d705d045e31dd173 | /WORKSHOPS/Workshop 9/factorial.py | 0605102f61fc4fea11ddf30452fca5c02f30e6e3 | [] | no_license | Ogaday/Programming-for-Science | 3702c8362d6a60f106ffa67b5b2a066519c91de3 | 806a9c7849f455777a6ec226fd1918f980191f78 | refs/heads/master | 2021-01-10T15:57:12.831839 | 2015-06-01T23:05:46 | 2015-06-01T23:05:46 | 36,625,188 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | def factorial(n):
"""
return the recursive factorial of n
"""
if n == 1:
return 1
else:
return n*factorial(n-1)
if __name__ == "__main__":
print """Welcome to the FACTORIAL CALCULATOR\n \nTo use the calculator, enter the number for which you want the factorial of, then press enter. In order to quit, type 'q' or 'quit'"""
while True:
x = raw_input("==> ")
if x.lower() == "q" or x.lower() == "quit":
print "Thank you for using this program"
break
else:
try:
print factorial(int(x))
except:
print "Please enter input again" | [
"w.ogaday@gmail.com"
] | w.ogaday@gmail.com |
15c2472e0bb613f8974faf6aebc33081848cd35c | f493d8c49fa2c34cb7bb6bc055ae478a6bfb068c | /bike_sharing.py | bd0f8aad95de5553ec75ef08b46807d338f1cdef | [] | no_license | yangshiyu89/bike_sharing | e69e64bd8c2f7982bfb0d84eadb777a553d96699 | bfdd57fb2eb525297901c49879e926e2bb338760 | refs/heads/master | 2021-01-21T11:30:09.827920 | 2017-03-01T16:14:44 | 2017-03-01T16:14:44 | 83,567,528 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,126 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 1 17:37:44 2017
@author: yangshiyu89
"""
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
# Define dl net
def Neural_Net(train_features, train_targets, val_features, val_targets, test_features):
features = tf.placeholder(tf.float32, shape=[None, train_features.shape[1]])
targets = tf.placeholder(tf.float32, shape=[None, train_targets.shape[1]])
W_1 = tf.Variable(tf.truncated_normal(shape=[train_features.shape[1], 25], dtype=tf.float32, stddev=0.001))
b_1 = tf.Variable(tf.zeros(shape=[25], dtype=tf.float32))
W_2 = tf.Variable(tf.truncated_normal(shape=[25, train_targets.shape[1]], dtype=tf.float32, stddev=0.001))
b_2 = tf.Variable(tf.zeros(shape=[train_targets.shape[1]], dtype=tf.float32))
layer = tf.add(tf.matmul(features, W_1), b_1)
layer = tf.nn.relu(layer)
predict = tf.add(tf.matmul(layer, W_2), b_2)
loss = tf.reduce_mean(tf.pow(targets - predict, 2))
optimizer = tf.train.AdamOptimizer(learning_rate = 0.1).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(1001):
sess.run(optimizer, feed_dict={features:train_features, targets:train_targets})
if epoch%10 == 0:
cost_test = sess.run(loss, feed_dict={features:train_features, targets:train_targets})
cost_val = sess.run(loss, feed_dict={features:val_features, targets:val_targets})
print("epoch {:4d}; cost_test: {:.4f}; cost_val: {:.4f}".format(epoch, cost_test, cost_val))
predict_targets = sess.run(predict, feed_dict={features:test_features})
return predict_targets
if __name__ == "__main__":
# Load and prepare the data
data_path = "Bike-Sharing-Dataset/hour.csv"
rides = pd.read_csv(data_path)
# Dummy variables
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
# Scaling target variables
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Splitting the data into training, testing, and validation sets
# Save the last 21 days
test_data = data[-21*24:]
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
predict_targets = Neural_Net(train_features, train_targets['cnt'][:, np.newaxis], val_features, val_targets['cnt'][:, np.newaxis], test_features)
# Check the prediction
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = predict_targets*std + mean
ax.plot(predictions[:], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
| [
"noreply@github.com"
] | noreply@github.com |
c7a3468c7cae4eb4836690dd475d98f13f9a6ac2 | f854ef28002a3931a8d8b8d0b9cc691b8a449db3 | /home-assistant/custom_components/hacs/helpers/classes/manifest.py | c0e43b9ba3f570e1740dbe3c9e52024391ae5891 | [
"MIT"
] | permissive | Burningstone91/smart-home-setup | 030cdaa13d05fb19a82b28ea455614d3276522ab | c2f34cc8b8243bc6ce620b3f03e3e44ff28150ca | refs/heads/master | 2023-02-23T06:25:04.476657 | 2022-02-26T16:05:02 | 2022-02-26T16:05:02 | 239,319,680 | 421 | 36 | MIT | 2023-02-08T01:16:54 | 2020-02-09T14:39:06 | JavaScript | UTF-8 | Python | false | false | 1,156 | py | """
Manifest handling of a repository.
https://hacs.xyz/docs/publish/start#hacsjson
"""
from typing import List
import attr
from custom_components.hacs.exceptions import HacsException
@attr.s(auto_attribs=True)
class HacsManifest:
"""HacsManifest class."""
name: str = None
content_in_root: bool = False
zip_release: bool = False
filename: str = None
manifest: dict = {}
hacs: str = None
hide_default_branch: bool = False
domains: List[str] = []
country: List[str] = []
homeassistant: str = None
persistent_directory: str = None
iot_class: str = None
render_readme: bool = False
@staticmethod
def from_dict(manifest: dict):
"""Set attributes from dicts."""
if manifest is None:
raise HacsException("Missing manifest data")
manifest_data = HacsManifest()
manifest_data.manifest = manifest
if country := manifest.get("country"):
if isinstance(country, str):
manifest["country"] = [country]
for key in manifest:
setattr(manifest_data, key, manifest[key])
return manifest_data
| [
"dimitri.steiner.gl@gmail.com"
] | dimitri.steiner.gl@gmail.com |
eee5f7823e7fce0bab38226e4c40fa15cbb05802 | 7c8fe9cf38de89dba5ed7afa9558739f037f9cc7 | /01-webtron/webtron/webtron.py | efce6211b9a1a257a1922e78b58141c9b4c10396 | [] | no_license | inigokintana/automating-AWS-python | 94b373dba4902414b37da00d4afe97f97007c1d6 | 4ee5ce245d03f4c339feee5c745b6b1fac982cf7 | refs/heads/master | 2022-02-17T02:34:17.097839 | 2019-08-01T18:16:55 | 2019-08-01T18:16:55 | 162,336,994 | 0 | 0 | null | 2022-01-21T19:45:27 | 2018-12-18T19:37:31 | Python | UTF-8 | Python | false | false | 1,511 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Webotron: Deploy websites with aws.
Webotron automates the process of deploying static websites to AWS.
- Configure AWS S3 buckets
- Create them
- Set them up for static website hosting
- Deploy local files to them
- Configure DNS with AWS Route 53
- Configure a Content Delivery Network and SSL with AWS CloudFront
"""
import boto3
import click
from bucket import BucketManager
session = boto3.Session(profile_name='pythonAutomation')
bucket_manager = BucketManager(session)
@click.group()
def cli():
"""Webotron deploys websites to AWS."""
pass
@cli.command('list-buckets')
def list_buckets():
"""List all s3 buckets."""
for bucket in bucket_manager.all_buckets():
print(bucket)
@cli.command('list-bucket-objects')
@click.argument('bucket')
def list_bucket_objects(bucket):
"""List objects in an s3 bucket."""
for obj in bucket_manager.all_objects(bucket):
print(obj)
@cli.command('setup-bucket')
@click.argument('bucket')
def setup_bucket(bucket):
"""Create and configure S3 bucket."""
s3_bucket = bucket_manager.init_bucket(bucket)
bucket_manager.set_policy(s3_bucket)
bucket_manager.configure_website(s3_bucket)
return
@cli.command('sync')
@click.argument('pathname', type=click.Path(exists=True))
@click.argument('bucket')
def sync(pathname, bucket):
"""Sync contents of PATHNAME to BUCKET."""
bucket_manager.sync(pathname, bucket)
if __name__ == '__main__':
cli()
| [
"inigokintana@gmail.com"
] | inigokintana@gmail.com |
01df404873ee9e3bba62ab69c2e05d7863ae98c4 | 2ce0c770b6ebf1122cfe2cc02b943101172920f4 | /wwt_data_formats/tests/test_wtml_tree.py | 56668db83d32b8c0c1913e626cf661c4e392067c | [
"MIT"
] | permissive | WorldWideTelescope/wwt_data_formats | 48269945ab835706f75fbf56801c5f19c38c1930 | 8f3a977b87d36c5a903e3bf63ff2ea89547447bb | refs/heads/master | 2022-10-31T02:02:51.003406 | 2022-10-25T19:49:38 | 2022-10-25T19:49:38 | 225,955,212 | 2 | 4 | MIT | 2023-08-18T00:18:54 | 2019-12-04T20:54:27 | Python | UTF-8 | Python | false | false | 833 | py | # -*- mode: python; coding: utf-8 -*-
# Copyright 2019-2020 the .NET Foundation
# Licensed under the MIT License.
from __future__ import absolute_import, division, print_function
import os.path
from .. import cli
from . import tempdir
def test_cli(tempdir):
"Simple smoke test to see if it runs at all."
prev_dir = os.getcwd()
try:
os.chdir(tempdir)
cli.entrypoint(
[
"tree",
"fetch",
"https://web.wwtassets.org/engine/assets/builtin-image-sets.wtml",
]
)
cli.entrypoint(["tree", "summarize"])
cli.entrypoint(["tree", "print-image-urls"])
cli.entrypoint(["tree", "print-dem-urls"])
finally:
# Windows can't remove the temp tree unless we chdir out of it.
os.chdir(prev_dir)
| [
"peter@newton.cx"
] | peter@newton.cx |
6dbdb7e147eb6c53ea0f4d1c2a060d11bbd6dfd2 | 3f6c3ac0800f5915ba1d0a26e7534ac5771145db | /src/software/simulated_tests/er_force_simulator.py | 0553ffb6a5f185f43d24937bb939534a77f40e1a | [
"LGPL-3.0-only"
] | permissive | LiCody/Software | 10b02612ab93b90b423cb0be94a5aa721f059095 | 8105c5f90d5d3b3d4ffa275a10b6fd4e81a4520b | refs/heads/master | 2023-07-09T09:04:30.463000 | 2022-04-13T04:32:30 | 2022-04-13T04:32:30 | 211,555,649 | 0 | 0 | MIT | 2019-10-09T03:30:30 | 2019-09-28T20:07:17 | C | UTF-8 | Python | false | false | 7,447 | py | from subprocess import Popen
from proto.import_all_protos import *
from software.networking.threaded_unix_listener import ThreadedUnixListener
from software.networking.threaded_unix_sender import ThreadedUnixSender
from software.py_constants import *
class ErForceSimulator(object):
def __init__(self, runtime_dir="/tmp/tbots"):
"""Runs our standalone er-force simulator binary and sets up the unix
sockets to communicate with it
:param runtime_dir: The unix path to run everything
"""
# inputs to er_force_simulator_main
self.sim_tick_sender = ThreadedUnixSender(runtime_dir + SIMULATION_TICK_PATH)
self.world_state_sender = ThreadedUnixSender(runtime_dir + WORLD_STATE_PATH)
self.blue_world_sender = ThreadedUnixSender(runtime_dir + BLUE_WORLD_PATH)
self.yellow_world_sender = ThreadedUnixSender(runtime_dir + YELLOW_WORLD_PATH)
self.blue_primitive_set_sender = ThreadedUnixSender(
runtime_dir + BLUE_PRIMITIVE_SET
)
self.yellow_primitive_set_sender = ThreadedUnixSender(
runtime_dir + YELLOW_PRIMITIVE_SET
)
# outputs from er_force_sim_main
self.ssl_wrapper_listener = ThreadedUnixListener(
runtime_dir + SSL_WRAPPER_PACKET_PATH, SSL_WrapperPacket
)
self.blue_robot_status_listener = ThreadedUnixListener(
runtime_dir + BLUE_ROBOT_STATUS_PATH, RobotStatus
)
self.yellow_robot_status_listener = ThreadedUnixListener(
runtime_dir + YELLOW_ROBOT_STATUS_PATH, RobotStatus,
)
self.world_state = WorldState()
self.simulator_process = Popen(["software/er_force_simulator_main"])
def __setup_robots(self, robot_locations, team_colour):
"""Initializes the world from a list of robot locations
:param robot_locations: A list of robot locations (index is robot id)
:param team_colour: The color (either "blue" or "yellow")
"""
if "blue" in team_colour:
robot_map = self.world_state.blue_robots
else:
robot_map = self.world_state.yellow_robots
for robot_id, robot_location in enumerate(robot_locations):
robot_map[robot_id].CopyFrom(
RobotState(
global_position=Point(
x_meters=robot_location.x(), y_meters=robot_location.y()
),
global_orientation=Angle(radians=0),
global_velocity=Vector(x_component_meters=0, y_component_meters=0),
global_angular_velocity=AngularVelocity(radians_per_second=0),
)
)
self.setup_world(self.world_state)
def setup_blue_robots(self, robot_locations):
"""Initializes the world from a list of robot locations
:param robot_locations: A list of robot locations (index is robot id)
"""
self.__setup_robots(robot_locations, "blue")
def setup_yellow_robots(self, robot_locations):
"""Initializes the world from a list of robot locations
:param robot_locations: A list of robot locations (index is robot id)
"""
self.__setup_robots(robot_locations, "yellow")
def setup_ball(self, ball_position, ball_velocity, distance_from_ground=0):
"""Setup the ball with the x, y coordinates in meters
:param ball_position: A tuple with the x,y coordinates
:param ball_velocity: A tuple with the x,y velocity components
:param distance_from_ground: How high up to start the ball
"""
self.world_state.ball_state.CopyFrom(
BallState(
global_position=Point(
x_meters=ball_position.x(), y_meters=ball_position.y(),
),
global_velocity=Vector(
x_component_meters=ball_velocity.x(),
y_component_meters=ball_velocity.y(),
),
distance_from_ground=distance_from_ground,
)
)
self.setup_world(self.world_state)
def setup_world(self, world_state):
"""Pass in a world_state proto directly to setup the simulator
:param world_state: The world state to initialize with
"""
self.world_state_sender.send(world_state)
def __get_sensor_proto(self, ssl_wrapper, robot_status_listener):
"""Helper function to create a sensor proto
:param ssl_wrapper: The ssl_wrapper packet to put in the sensor proto
:param robot_status_listener: The robot status listener (blue or yellow)
:returns: A sensor proto with the robot status from the listener
"""
sensor_proto = SensorProto()
if ssl_wrapper:
sensor_proto.ssl_vision_msg.CopyFrom(ssl_wrapper)
robot_status = robot_status_listener.get_most_recent_message()
packets = []
while robot_status is not None:
packets.append(robot_status)
robot_status = robot_status_listener.get_most_recent_message()
sensor_proto.robot_status_msgs.extend(packets)
return sensor_proto
def get_blue_sensor_proto(self, ssl_wrapper):
"""Returns the blue sensor proto
:param ssl_wrapper: The wrapper to pack in the sensor proto
"""
return self.__get_sensor_proto(ssl_wrapper, self.blue_robot_status_listener)
def get_yellow_sensor_proto(self, ssl_wrapper):
"""Returns the yellow sensor proto
:param ssl_wrapper: The wrapper to pack in the sensor proto
"""
return self.__get_sensor_proto(ssl_wrapper, self.yellow_robot_status_listener)
def get_ssl_wrapper_packet(self, block=False):
"""Get wrapper packet
:param block: If true, block until we receive a packet
:return: SSL_WrapperPacket
"""
return self.ssl_wrapper_listener.get_most_recent_message(block)
def tick(self, duration_ms):
"""Tick the simulator with the given duration
:param duration_ms: The duration to step the sim
"""
tick = SimulatorTick()
tick.milliseconds = duration_ms
self.sim_tick_sender.send(tick)
def send_blue_primitive_set_and_world(self, world, primitive_set):
"""Blue primitive set and world
:param world: The world msg to send
:param primitive_set: The primitive set to send
"""
self.blue_world_sender.send(world)
self.blue_primitive_set_sender.send(primitive_set)
def send_yellow_primitive_set_and_world(self, world, primitive_set):
"""Yellow primitive set and world
:param world: The world msg to send
:param primitive_set: The primitive set to send
"""
self.yellow_world_sender.send(world)
self.yellow_primitive_set_sender.send(primitive_set)
def stop():
"""Stop all listeners and senders.
"""
for unix_socket in [
self.sim_tick_sender,
self.world_state_sender,
self.blue_world_sender,
self.yellow_world_sender,
self.blue_primitive_set_sender,
self.yellow_primitive_set_sender,
self.ssl_wrapper_listener,
self.blue_robot_status_listener,
self.yellow_robot_status_listener,
]:
unix_socket.force_stop()
| [
"noreply@github.com"
] | noreply@github.com |
59d3d0e0e07d6c1d41095d2ceff9107905522820 | 32fafd0c16351743360f00b3ca2ecac7488acef2 | /example3/populate_orders.py | fb42ae27516644797345c0949f942f4ff8a509c8 | [] | no_license | unix-way-project/postgresql | 128de649100d0c068cf2cef4a1f0abf48abeed88 | 631ddf194f0f799e97abb37f0b3c9b4f5f4b99a9 | refs/heads/master | 2023-03-17T03:59:40.660690 | 2021-03-14T20:19:39 | 2021-03-14T20:19:39 | 338,542,910 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,093 | py |
import psycopg2
import psycopg2.extras
import faker
import random
def add_order(connection):
try:
cursor = connection.cursor(cursor_factory = psycopg2.extras.DictCursor)
# Choose random user
cursor.execute('''
SELECT user_pid
FROM users
ORDER BY random() DESC
LIMIT 1;
''')
user = dict(cursor.fetchone())
user_pid = user['user_pid']
# Choose random item
cursor.execute('''
SELECT item_pid, item_price
FROM items
ORDER BY random() DESC
LIMIT 1;
''')
item = dict(cursor.fetchone())
item_pid = item['item_pid']
item_price = item['item_price']
cursor.execute('''
INSERT INTO orders(user_pid, item_pid, order_price)
VALUES(%s, %s, %s)
RETURNING order_pid
''', (user_pid, item_pid, item_price))
order = dict(cursor.fetchone())
order_pid = order['order_pid']
cursor.execute('''
UPDATE users
SET user_balance = user_balance - %s
WHERE user_pid = %s
''', (item_price, user_pid))
except (Exception, psycopg2.DatabaseError) as error:
connection.rollback()
cursor.close()
print("[ ERROR ] User: %s failed to order item %s for price: %s" % (
user_pid,
item_pid,
item_price
))
print(str(error))
return
connection.commit()
cursor.close()
print("[ ORDER ] User: %s ordered item %s for price: %s, order number: %s" % (
user_pid,
item_pid,
item_price,
order_pid
))
def connect():
print('Connecting to the PostgreSQL database...')
connection = psycopg2.connect(
host = "192.168.122.51",
database = "unixway1",
user = "unixway1user",
password = "password1"
)
for index in range(0, 100000):
add_order(
connection = connection
)
if __name__ == '__main__':
connect()
| [
"jackalsh@gmail.com"
] | jackalsh@gmail.com |
7343fb8defbea9a314d6f3be0e874c35f13e8940 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_1/dlmmin002/question3.py | 7a33ac4f48f3eddf6202f2094e5bd3b2da9e4fde | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | #personal spam message
#nolwazi dlamini
#3 march 2014
name =input("Enter first name: \n")
surname=input("Enter last name: \n")
money=eval(input("Enter sum of money in USD: \n"))
country=input("Enter country name: \n")
print("\nDearest" ,name)
print("It is with a heavy heart that I inform you of the death of my father,")
print("General Fayk ",surname,", your long lost relative from Mapsfostol.",sep="")
print("My father left the sum of ", money,"USD for us, your distant cousins. ",sep="")
print("Unfortunately, we cannot access the money as it is in a bank in ",country,".",sep="")
print("I desperately need your assistance to access this money.")
print("I will even pay you generously, 30% of the amount - ",(money*0.3),"USD,",sep="")
print("for your help. Please get in touch with me at this email address asap.")
print("Yours sincerely")
print("Frank" ,surname) | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
d0bcf451fb15a4a5e6e3ce2d28b88e1a6043437e | 3bbaf5a5dd1d19f207c01f14e06a58f6faf9a1f2 | /helpers.py | 8735343af6dcaf46167a733d832f002d37073425 | [
"MIT"
] | permissive | kelly4strength/hmchallenge | f06e1a5288a73ba523e39e5cfbf0d36c33148478 | 2195d8fa2d9acdad7a088b1422a7fb5aa3b06b28 | refs/heads/master | 2021-01-19T10:45:55.459291 | 2017-02-25T05:42:16 | 2017-02-25T05:42:16 | 82,221,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | """helper functions"""
# from model import word, user_guess, partial_word
# import unnecessary
# def show_correct_guess_letter(word, user_guess, partial_word):
# """function to show current guess letter if it is in the word"""
# for i in range(len(word)):
# if user_guess != word[i]:
# partial_word = partial_word + "_ "
# else:
# partial_word = partial_word + word[i]
# return partial_word
def generate_partial_word(word, correct_guess_list):
"""generates the word with all correctly chosen letters"""
temp_partial_word = ""
# for each letter either put a dash or a letter
for i in range(len(word)):
matches = False
for letter in correct_guess_list:
if letter == word[i]:
temp_partial_word = temp_partial_word + letter
matches = True
if matches == False:
temp_partial_word = temp_partial_word + "_"
return temp_partial_word
#if there is no match to word[i] then add a underscore for that index
# only append underscore after all matches are determined
| [
"kellyhoffer@Kellys-MacBook-Pro-2.local"
] | kellyhoffer@Kellys-MacBook-Pro-2.local |
d24f43978d7bc3dc1bd471970dceef5f2bbfb976 | 8d402df39c18eba7e1c86c762f205c944357c5df | /setup/brython/make_file_system.py | b94bae446d191d813018f82085a1dbda517fd43c | [
"BSD-3-Clause"
] | permissive | brython-dev/brython | 87cc023e25550dec9ce459ba68774189f33712b6 | b33958bff0e8c7a280babc30232dc389a2500a7a | refs/heads/master | 2023-09-04T04:49:29.156209 | 2023-09-01T06:36:08 | 2023-09-01T06:36:08 | 24,046,239 | 6,569 | 625 | BSD-3-Clause | 2023-07-05T06:13:32 | 2014-09-15T06:58:21 | Python | UTF-8 | Python | false | false | 2,113 | py | import json
import os
import stat
import sys
import binascii
def make(vfs_name, prefix=None):
"""Called by
python -m brython --make_file_system <vfs_name> <prefix>
Creates a Virtual File System : a Javascript file with the files in
current directory and its children.
The file is stored in current directory as "<vfs_name>.vfs.js".
A dictionary "files" is created. Keys are the file names, relative to the
current directory (ie a file "data.txt" in current directory has the key
"data.txt", and a file "names.txt" in the subdirectory "address" has key
"address/names.txt").
If <prefix> was specified, it is prepended to the keys, followed by a /.
For instance, if prefix is "info", the files above will have keys
"info/data.txt" and "info/adress/names.txt"
Python files can be included in such files, but *programs will not be
able to import them*; for this, use --modules instead.
"""
files = {}
this_dir = os.getcwd()
dest_file = f"{vfs_name}.vfs.js"
virtual_dir = prefix.split("/") if prefix else []
print("virtual dir", virtual_dir)
for dirpath, dirnames, filenames in os.walk(this_dir):
if dirpath == this_dir:
path = []
else:
path = dirpath[len(this_dir) + len(os.sep):].split(os.sep)
for filename in filenames:
if filename.endswith(".vfs.js"):
continue
rel_path = "/".join(virtual_dir + path + [filename])
with open(os.path.join(dirpath, filename), "rb") as f:
# File content is base64-encoded
content = binascii.b2a_base64(f.read()).decode('ascii')
file_stat = os.fstat(f.fileno())
files[rel_path] = {
"content": content,
"ctime": file_stat.st_ctime,
"mtime": file_stat.st_mtime
}
print(list(files))
with open(dest_file, "w", encoding="utf-8") as out:
out.write("__BRYTHON__.add_files(")
json.dump(files, out, indent=4)
out.write(")") | [
"quentel.pierre@orange.fr"
] | quentel.pierre@orange.fr |
ae137b1b1b702ea94707b85faf4024ec372f1832 | 83d36e8795b19d537fab32c4ced52359561a6b3b | /ingredients/apps.py | b0fea569eed9b6ec9258bfa94c37a231c4b4fcd0 | [] | no_license | vubon/django-graphql | b1325ebc31136d19b5ca5b5fd85c6fea98972e6c | 9586b5b5098dfeb25aa26521b24bc6c3beb333bc | refs/heads/master | 2020-04-14T08:02:04.957523 | 2019-12-05T05:02:46 | 2019-12-05T05:02:46 | 163,727,935 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | from django.apps import AppConfig
class IngrdientsConfig(AppConfig):
name = 'ingredients'
| [
"vubon.roy@gmail.com"
] | vubon.roy@gmail.com |
f71d7ca732dbb65b50967732e07d0777f557e075 | b0a5efbd01e9614392be8eaea595f7f8efda9bfb | /lung.py | d499312a7bcfc052b19f3dd28bfb66694d3bce76 | [] | no_license | Ismail-w/cov19_imgprocess | 4ffc117a26063d3114077e703381653a5db13116 | e707f4574f9c41ab4e58cfa8bcecc5e8d1274f65 | refs/heads/main | 2023-03-31T09:17:09.974067 | 2021-04-08T10:23:12 | 2021-04-08T10:23:12 | 355,859,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py |
import cv2
import numpy as np
img = cv2.imread('Covid1.png')
print("Image Properties")
print("- Number of Pixels: " + str(img.size))
print("- Shape/Dimensions: " + str(img.shape))
cv2.imshow('org',img)
cv2.waitKey(0)
blue, green, red = cv2.split(img) # Split the image into its channels
resized_image = cv2.resize(img, (200, 200))
print("Image Properties")
print("- Number of Pixels: " + str(resized_image.size))
print("- Shape/Dimensions: " + str(resized_image.shape))
cv2.imshow('res',resized_image) # Display the grayscale version of image
cv2.waitKey(0)
img_gray = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY)
cv2.imshow('grayscale',img_gray) # Display the grayscale version of image
cv2.waitKey(0)
r, threshold = cv2.threshold(img_gray, 125, 255, cv2.THRESH_BINARY)
cv2.imshow('threshold',threshold)
cv2.waitKey(0)
edged = cv2.Canny(img_gray, 100,200)
cv2.imshow('Edge',edged)
cv2.waitKey(0)
contours, hierarchy = cv2.findContours(edged,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
print("Number of Contours found = " + str(len(contours)))
cv2.drawContours(resized_image, contours, -1, (0, 255, 0), 3)
cv2.imshow('Contours', resized_image)
cv2.waitKey(0)
| [
"noreply@github.com"
] | noreply@github.com |
53d2e5d291801ab5cf03ead215d5c4ba7b43273e | 947fa6a4a6155ffce0038b11f4d743603418ad68 | /.c9/metadata/environment/fb_post_learning/fb_post_learning/settings/base_aws_s3.py | 50a8801a8acf4d0f51a64b61ae58285d2bc56de6 | [] | no_license | bharathi151/bharathi_diyyala | bd75e10639d7d22b332d5ce677e7799402dc4984 | 99f8657d010c790a0e4e4c9d6b57f81814784eb0 | refs/heads/master | 2022-11-21T12:43:48.401239 | 2020-07-23T09:05:52 | 2020-07-23T09:05:52 | 281,903,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | {"filter":false,"title":"base_aws_s3.py","tooltip":"/fb_post_learning/fb_post_learning/settings/base_aws_s3.py","undoManager":{"mark":-1,"position":-1,"stack":[]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":0,"column":0},"end":{"row":0,"column":0},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1589610124498,"hash":"94324fee64bfb279ca1b0e507e1414c07b06fab6"} | [
"bharathi151273@gmail.com"
] | bharathi151273@gmail.com |
21571df9c1f56860a1f9f82333fa6ad56924aaa8 | 51c255de526c7f0b6a0f8b232a184ba69128e7af | /02_QUICKVIEW_hand_made_quick_view_classification/02_1_classification_basic_perceptron.py | 6283dbafd55f24ccb14981fdf16675031075f88f | [] | no_license | jerrychen44/python_machine_learning_sr | eea7e2b873763c15b8582af05dfcd9b20c589123 | bcf5fc0188d4aa956af46496c4498a2bb42bdc3d | refs/heads/master | 2021-01-11T04:16:58.669737 | 2016-11-05T07:46:55 | 2016-11-05T07:46:55 | 71,191,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,400 | py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
filepath=os.path.dirname(os.path.realpath(__file__))#root, where the apk_integration_test.py file is.
source_folder='source'
print(filepath)
data_csv_path=filepath+'/'+source_folder+'/iris.csv'
################################
#implement the perceptron class
###############################
class Perceptron(object):
"""Perceptron classifier.
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
Attributes
-----------
w_ : 1d-array
Weights after fitting.
errors_ : list
Number of misclassifications in every epoch.
"""
def __init__(self, eta=0.01, n_iter=10):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
"""Fit training data.
Parameters
----------
X : {array-like}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
self.w_ = np.zeros(1 + X.shape[1])
self.errors_ = []
for _ in range(self.n_iter):
errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(update != 0.0)
self.errors_.append(errors)
return self
def net_input(self, X):
"""Calculate net input"""
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
"""Return class label after unit step"""
return np.where(self.net_input(X) >= 0.0, 1, -1)
#####################
#loading data set
# ref: https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data
# Attribute Information:
# 1. sepal length in cm
# 2. sepal width in cm
# 3. petal length in cm
# 4. petal width in cm
# 5. class:
# -- Iris Setosa
# -- Iris Versicolour
# -- Iris Virginica
##########################
def read_csv_pd():
#df = pd.read_csv('https://archive.ics.uci.edu/ml/'
# 'machine-learning-databases/iris/iris.data', header=None)
df = pd.read_csv(data_csv_path, header=None)
#df.to_csv(filepath+'/'+source_folder+'/iris.csv',index=0,header=False)
print(df.tail())
print(df.shape)
return df
def plot_2d_data(df):
######################
# plot to take a look,Plotting the Iris data
####################
# select setosa and versicolor
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)
# extract sepal length and petal length
X = df.iloc[0:100, [0, 2]].values
# plot data
plt.scatter(X[:50, 0], X[:50, 1],
color='red', marker='o', label='setosa')
plt.scatter(X[50:100, 0], X[50:100, 1],
color='blue', marker='x', label='versicolor')
plt.xlabel('petal length [cm]')
plt.ylabel('sepal length [cm]')
plt.legend(loc='upper left')
plt.tight_layout()
# plt.savefig('./iris_1.png', dpi=300)
plt.show()
return X,y
##############
#Training the perceptron model
############
def train_perceptron_model(X,y):
#new a object
ppn = Perceptron(eta=0.1, n_iter=10)
ppn.fit(X, y)
#show the error history
#it shows the model converge at 6th round.
plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Number of misclassifications')
plt.tight_layout()
# plt.savefig('./perceptron_1.png', dpi=300)
plt.show()
#return the model object
return ppn
#A function for plotting decision regions
def plot_decision_regions(X, y, classifier, resolution=0.02):
from matplotlib.colors import ListedColormap
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
alpha=0.8, c=cmap(idx),
marker=markers[idx], label=cl)
def plot_decision_plan(ppn,X,y):
plot_decision_regions(X, y, classifier=ppn)
plt.xlabel('sepal length [cm]')
plt.ylabel('petal length [cm]')
plt.legend(loc='upper left')
plt.tight_layout()
# plt.savefig('./perceptron_2.png', dpi=300)
plt.show()
return 0
def main():
data_df=read_csv_pd()
X,y=plot_2d_data(data_df)
ppn=train_perceptron_model(X,y)
plot_decision_plan(ppn,X,y)
return 0
main()
| [
"jerrychen040@gmail.com"
] | jerrychen040@gmail.com |
fa3510c04357a2e5a1420c7e718ab0f2cde76df7 | f9aecf1d54f9919f48b523ce7e68397a13de4db6 | /cgi-bin/XmlToJson.py | 4757bbb9790b7b26fb990dd842f120c092ae8217 | [] | no_license | zestroly/www | 2ae2dc2aa3de5f47cf03e310e071049ff7790f04 | a7e15dac6e0494dcdf298edda8e1e3e2c81ae3c8 | refs/heads/master | 2021-01-18T03:53:05.678155 | 2017-06-12T01:04:14 | 2017-06-12T01:04:14 | 85,780,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py | #!/usr/bin/python3.6
from xml.dom.minidom import parse
import xml.dom.minidom
import os
import sys,json
json_str=sys.argv[1]
#print(json_str)
#print(type(json_str))
json_dict=json.loads(json_str)
xmlstr = ""
for key in json_dict:
if(key == 'data'):
break;
xmlstr +="<Param ErrorCode=\"0\">"
xmlstr += "<camera>"
xmlstr += "<" + key + ">"
for i in json_dict[key]:
xmlstr += "<" + i + " "
for j in json_dict[key][i]:
xmlstr +=" " + j + "=\"" +json_dict[key][i][j] + "\""
xmlstr += "/>"
xmlstr += "</" + key + ">"
xmlstr += "</camera>"
xmlstr += "</Param>"
f=open('/tmp/temp.xml', 'w')
f.write(xmlstr);
f.close();
os.system("/home/root/bin/XmlDevice set /tmp/temp.xml > /dev/null")
os.system("/home/root/bin/XiXmlDevice get /home/root/config/camera.xml > /dev/null")
DOMTree = xml.dom.minidom.parse("/home/root/config/camera.xml")
root=DOMTree.documentElement
cameraNodes=root.getElementsByTagName('camera')
def getAttrbute(node):
tempstr="{"
j=1
for key in node.attributes.keys():
tempstr += "\""+ key+"\":\""+node.attributes[key].value+"\""
if j < (node.attributes.length):
tempstr +=","
j=j+1
tempstr+="}"
return tempstr
def metaNode(cells, str):
str +="\""+cells.nodeName+"\"" + ":"
str+='{'
i=0
for cell in cells.childNodes:
i=i+1
if cell.nodeType == 3:
continue
str += "\""+cell.nodeName+"\"" + ":"
str += getAttrbute(cell)
if i < (cells.childNodes.length-1):
str +=","
str += "}"
return str
str="{"
for camerchild in cameraNodes:
k=0
for cell in camerchild.childNodes:
k=k+1
if cell.nodeType == 3:
continue
str = metaNode(cell,str)
if k < (camerchild.childNodes.length-1):
str += ","
str+="}"
print (str)
| [
"zestroly@126.com"
] | zestroly@126.com |
c0331309bf2f0dab4193eca0be842d750c839dc8 | c03b615ca32a191672be6ed8d5de1624db9409b0 | /p10.py | 8f59c18d38846c9708889c19091d76c70cae52c3 | [] | no_license | Hemangi3598/chap-8_p10 | b7b768120c8fbaf85e01b8aa699223a66ce75f8b | 8736c342b578100b4f0fada1f84edf6a511e753b | refs/heads/main | 2023-08-07T08:22:48.537950 | 2021-09-19T07:02:29 | 2021-09-19T07:02:29 | 408,056,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | # waopp to add students and their rno in class
class student:
def __init__(self, rno, name):
self.rno = rno
self.name = name
def show(self):
print("rno = ", self.rno)
print("name = ",self.name)
data = []
while True:
op = int(input(" 1 add, 2 view and 3 exit"))
if op == 1:
rno = int(input("enter rno "))
name = input("enter name ")
s = student(rno, name)
data.append(s)
elif op == 2:
for d in data:
d.show()
elif op == 3:
break
else:
print("invalid option ") | [
"noreply@github.com"
] | noreply@github.com |
6ddcba986a72f513f2c34d92c5c53b6cd277e169 | b309e6a809cb722f0ee9c42f6fcfacbd9495ad43 | /KMP.py | 96fb793e047549f5d87c349b2d7d49be285f16c9 | [] | no_license | leesen934/leetcode_practices | 0bee0bef00e7459bd4ecbb5a2f98436c270889ad | c93f15bee2ee2eea2e6f276c4907280d110c0467 | refs/heads/master | 2020-03-28T14:24:31.362300 | 2018-09-13T08:19:47 | 2018-09-13T08:19:47 | 148,484,508 | 0 | 0 | null | 2018-09-12T13:26:23 | 2018-09-12T13:26:22 | null | UTF-8 | Python | false | false | 1,029 | py | def getNext(p):
j = 0
k = -1 # next[j]的值(也就是k)表示,当P[j] != T[i]时,j指针的下一步移动位置。
next_p = [-1] * len(p)
while j < len(p) - 1:
print("p[k]: " + p[k] + ", p[j]: " + p[j])
if k == -1 or p[k] == p[j]:
j += 1
k += 1
if p[j] == p[k]: # 当两个字符相等时要跳过
next_p[j] = next_p[k]
else:
next_p[j] = k
else:
k = next_p[k]
print(next_p)
return next_p
def KMP(s, p):
i = 0 # 主串位置
j = 0 # 模式串位置
next_p =getNext(p)
while i < len(s) and j < len(p):
if j == -1 or s[i] == p[j]: # 当j为-1时,要移动的是i,当然j也要归0
i += 1
j += 1
else:
j = next_p[j]
if j == len(p):
return i - j
else:
return -1
if __name__ == "__main__":
s = "abcabcabcabcabxabc"
p = "abcabx"
p = "abbcabcaabbcaa"
print(KMP(s, p)) | [
"lichunchn0516@gmail.com"
] | lichunchn0516@gmail.com |
ede10fd47f66d7aee777757eb88519c3ff63a7ee | e7de3d7139e73589e2172384fd114ce0c3e3655c | /test_template.py | e7fcca2adac6e7bd26f9fd3df3517004ef23fd62 | [
"BSD-3-Clause"
] | permissive | jnieuwen/python-default-requirements | 5384d08c50c844e43999409191d1536cb4558164 | f04df6da2b8de0ff20e6c24cea7769383254a552 | refs/heads/master | 2021-04-27T04:21:33.206701 | 2020-06-23T13:47:02 | 2020-06-23T13:47:02 | 122,730,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | # Set up the paths.
import os
import sys
sys.path.append(os.path.abspath('.'))
import themodule
def test_hallo():
assert themodule.hallo() == "hallo"
def test_nohallo():
assert themodule.hallo() != "blaat"
| [
"jeroen.van.nieuwenhuizen@jeroen.se"
] | jeroen.van.nieuwenhuizen@jeroen.se |
84bc2ed6db473e44610fce5decafd84089166c40 | 6e3396980eeee1d8d55e4afbc6148711e9e9a342 | /SPOJ/py/INTEST.py | 63e9b4121738ad4488c4a37461310bf941213675 | [] | no_license | arunpatala/scala-learn | c20ca717899d1752ddb20dfbe4f6839217ad3ac8 | b9e52aec74d360a18af99e841c6b598f2b0165b6 | refs/heads/master | 2021-01-22T23:43:26.608433 | 2015-07-31T17:30:54 | 2015-07-31T17:30:54 | 38,426,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | a = str.split(raw_input())
N = int(a[0])
K = int(a[1])
ret = 0;
for i in range(0,N):
if(int(raw_input())%K==0):
ret = ret + 1
print ret
| [
"arunpatala@gmail.com"
] | arunpatala@gmail.com |
8239bdcbcbb37d192f3f3cff9af9527dbcdac038 | f66dfa2fc9bfbcc97259eb17e8b54a80727e1ce6 | /migrations/versions/061243403ebc_.py | 5a6c6f619b346ba996dd641501b2ef2f28fe2271 | [] | no_license | tam876/info3180-lab5 | 4ec8ea45e4ca513bd8cd6cf163d579b5ffba1ffa | 2fa6a57dd409788e52a1bee69c0048e843e478b7 | refs/heads/master | 2021-01-26T08:47:04.867508 | 2020-02-29T00:21:47 | 2020-02-29T00:21:47 | 243,389,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | """empty message
Revision ID: 061243403ebc
Revises:
Create Date: 2020-02-28 20:57:43.760065
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '061243403ebc'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user_profiles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('first_name', sa.String(length=80), nullable=True),
sa.Column('last_name', sa.String(length=80), nullable=True),
sa.Column('username', sa.String(length=80), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user_profiles')
# ### end Alembic commands ###
| [
"noreply@github.com"
] | noreply@github.com |
c8ef83211988cefbe18916ab9fd7f4531c57ab0d | f9a4e1c39d722daab7de1f7a5ce6c2634fa53845 | /xorGame.py | efd55f4fec19c6b1a3bb13926c7f6b0e05d94837 | [] | no_license | EugenenZhou/leetcode | b26e4198729dd9c42dccb4cdbaa952d9c50086e1 | 03a0316ac317ae48adf2d05be62d536e1b5f2620 | refs/heads/master | 2020-06-17T16:29:54.746645 | 2019-09-21T05:36:23 | 2019-09-21T05:36:23 | 195,977,242 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | py | # 一个黑板上写着一个非负整数数组 nums[i] 。
# 小红和小明轮流从黑板上擦掉一个数字,小红先手。
# 如果擦除一个数字后,剩余的所有数字按位异或运算得出的结果等于 0 的话,当前玩家游戏失败。
# (另外,如果只剩一个数字,按位异或运算得到它本身;如果无数字剩余,按位异或运算结果为0。)
# 换种说法就是,轮到某个玩家时,如果当前黑板上所有数字按位异或运算结果等于 0,这个玩家获胜。
# 假设两个玩家每步都使用最优解,当且仅当小红获胜时返回 true。
######################################################################
def xorGame(nums):
re = 0
for i in nums:
re = re ^ i
if re == 0:
return True
else:
if len(nums) % 2 == 0:
return True
else:
return False
pass
######################################################################
# 小红必胜的可能性为,先手异或为0,或nums中的元素为偶数,其他才有可能小明胜利。
nums = [1,1,2,3]
result = xorGame(nums)
| [
"735159373@qq.com"
] | 735159373@qq.com |
c72500ece3f030d2acbe56879e08b891d53d15f9 | 408491958cece161e3f7b27d10926b5cd80b4b14 | /Python/Itertools/Compress the String/CompressTheString.py | 9d710345eef369aa0def85dc24211ea56380c842 | [] | no_license | Snoblomma/HackerRank | 279212173fbe0024ecb5e34fdbacc1c01faad7f5 | 1547913ada66d13fd59b06bc2781911c0895fbbf | refs/heads/master | 2021-07-11T09:16:43.323607 | 2021-03-23T22:52:11 | 2021-03-23T22:52:11 | 70,170,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | from itertools import groupby
w = list(input())
k = [(len(list(cgen)), int(c)) for c,cgen in groupby(w)]
print(" ".join(str(item) for item in k))
| [
"noreply@github.com"
] | noreply@github.com |
73b124d0407d683b320b426ec9edb5b9f2c86f27 | 351687b2f40e8fe063c546993fb0eaefb58604f6 | /cov/example_cov_estimator_l1_nolam.py | 0118785b04c13dac6581bc07c7c93a7aebc68d0f | [] | no_license | eduff/NI_code | 79cdb22b154070d4b0e8873df810ac535d18c600 | 7b5a984e6931b8fb586f50ab390285bf6b36e785 | refs/heads/master | 2021-01-01T19:10:39.753985 | 2015-02-09T22:33:03 | 2015-02-09T22:33:03 | 9,242,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,517 | py | # Author: Gael Varoquaux
# Copyright: INRIA
import pylab as pl
import numpy as np
from scipy import linalg
from covariance.generate_data import generate_standard_sparse_mvn
from covariance.cov_estimator_l1 import CovEstimatorL1CV
################################################################################
N_SAMPLES = 30
DIM = 20
prng = np.random.RandomState(10)
x, true_prec = generate_standard_sparse_mvn(N_SAMPLES, DIM, prng=prng)
emp_cov = np.dot(x.T, x)/N_SAMPLES
true_cov = linalg.inv(true_prec)
model = CovEstimatorL1CV()
model.fit(x)
l1 = model.best_model.l1
if 1:
prec_ = model.precision
cov_ = linalg.inv(prec_)
#gap, pobj, dobj = model.dual_gap(emp_cov, with_obj=True)
#print "Dual gap : %s" % gap
#print "Criterion : %s" % pobj
#print "Dual criterion : %s" % dobj
###############################################################################
# Visualize
vmin = min(true_cov.min(), emp_cov.min(), cov_.min())
vmax = max(true_cov.max(), emp_cov.max(), cov_.max())
vmax = max(-vmin, vmax)
pl.figure()
pl.subplot(2, 3, 1)
pl.imshow(true_cov, interpolation='nearest', vmin=-vmax, vmax=vmax, cmap=pl.cm.RdBu_r)
pl.axis('off')
pl.title('True (simulated) covariance', fontsize=10)
pl.subplot(2, 3, 2)
pl.imshow(emp_cov, interpolation='nearest', vmin=-vmax, vmax=vmax, cmap=pl.cm.RdBu_r)
pl.axis('off')
pl.title('sample covariance', fontsize=10)
pl.subplot(2, 3, 3)
pl.imshow(cov_, interpolation='nearest', vmin=-vmax, vmax=vmax, cmap=pl.cm.RdBu_r)
pl.axis('off')
pl.title('L1 covariance estimate \n for lambda=%s' % l1, fontsize=10)
vmin = min(true_prec.min(), prec_.min())
vmax = max(true_prec.max(), prec_.max())
vmax = max(-vmin, vmax)
pl.subplot(2, 3, 4)
pl.imshow(true_prec, interpolation='nearest', vmin=-vmax, vmax=vmax, cmap=pl.cm.RdBu_r)
pl.imshow(np.ma.masked_array(np.ones_like(true_prec), true_prec!=0), cmap=pl.cm.gray, interpolation='nearest', vmin=0, vmax=2)
pl.axis('off')
pl.title('True (simulated) precision', fontsize=10)
pl.subplot(2, 3, 5)
pl.imshow(linalg.inv(emp_cov), interpolation='nearest', vmin=-vmax, vmax=vmax, cmap=pl.cm.RdBu_r)
pl.axis('off')
pl.title('Empirical precision', fontsize=10)
pl.subplot(2, 3, 6)
pl.imshow(prec_, interpolation='nearest', vmin=-vmax, vmax=vmax, cmap=pl.cm.RdBu_r)
pl.imshow(np.ma.masked_array(np.ones_like(true_prec), np.abs(prec_)>1e-2), cmap=pl.cm.gray, interpolation='nearest', vmin=0, vmax=2)
pl.axis('off')
pl.title('L1 precision estimate \n for lambda=%s' % l1, fontsize=10)
pl.show()
| [
"eduff@fmrib.ox.ac.uk"
] | eduff@fmrib.ox.ac.uk |
8c67bf15d9d2de2bc24154779e9a522ad89693e5 | c83ba2b21c72fa119fecb1b094fcf7a9745b626c | /CodeFiles/Metrics_L2.py | e0e921f54fcc22481bc26c70c02ba9f53dc28d3f | [] | no_license | amoghgaikwad/Click-Through-Prediction-Rate | 8074d84309e4ff824eafe531a6f62280db091113 | 0dd369a927929e4fa45d092e38cd7756276e9cb2 | refs/heads/master | 2021-01-22T20:50:39.342028 | 2017-03-18T02:58:16 | 2017-03-18T02:58:16 | 85,371,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,043 | py | from pyspark.sql import Row
from pyspark.ml.feature import OneHotEncoder, StringIndexer
from pyspark.ml.feature import VectorAssembler
from pyspark import SparkConf, SparkContext
from pyspark.sql import SQLContext
from pyspark.ml import Pipeline
from pyspark.mllib.classification import LogisticRegressionWithSGD, LogisticRegressionModel
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.evaluation import BinaryClassificationMetrics
import os, tempfile
path = tempfile.mkdtemp()
#sc = spark.sparkContext
conf = SparkConf().setAppName("Assignment 3")
sc = SparkContext(conf=conf)
sqlCtx = SQLContext(sc)
###### Part 1- Preparing the data #######
#To count the number of distinct values in each column
distnct_values = {}
# Load a text file and convert each line to a Row.
lines = sc.textFile("test_2.csv")
#extract header
header = lines.first()
lines = lines.filter(lambda row : row != header)
parts = lines.map(lambda l: l.split(","))
data_T = parts.map(lambda p: Row(click=(p[1]), C1=p[3], banner_pos = p[4], site_id=p[5], site_domain=p[6],
site_category=p[7], app_id=p[8], app_domain=p[9], app_category=p[10], device_id=p[11], device_ip=p[12], device_model=p[13], device_type=p[14],
device_conn_type=p[15], C14=p[16], C15=p[17], C16=p[18], C17=p[19], C18=p[20], C19=p[21], C20=p[22], C21=p[23] ))
#create the dataframe
df = sqlCtx.createDataFrame(data_T)
#selecting all the categorical columns and checking the distinct values in each
col_names= ['C1','site_category','app_category','device_type','C14','C15','C16','C17','C18','C19','C20','C21']
for i in col_names: ##col_names contains names of cols that contains categorical data
distinctValues = df.select(i).distinct().rdd.map(lambda r: r[0]).count()
distnct_values[i] = distinctValues
#delete columns which have more than 100 distinct values in them
for key, value in distnct_values.iteritems() :
if int(value) >100:
df = df.drop(str(key))
#drop the columns which have NA values in it.
df = df.na.replace('', 'NA', 'C1')
df = df.dropna()
#stringIndexer on all the categorical columns
c1I = StringIndexer(inputCol="C1", outputCol="iC1", handleInvalid="skip")
c15I = StringIndexer(inputCol="C15", outputCol="iC15", handleInvalid="skip")
c16I = StringIndexer(inputCol="C16", outputCol="iC16", handleInvalid="skip")
c18I = StringIndexer(inputCol="C18", outputCol="iC18", handleInvalid="skip")
c19I = StringIndexer(inputCol="C19", outputCol="iC19", handleInvalid="skip")
c21I = StringIndexer(inputCol="C21", outputCol="iC21", handleInvalid="skip")
appcatI = StringIndexer(inputCol="app_category", outputCol="i_app_category", handleInvalid="skip")
devtypeI = StringIndexer(inputCol="device_type", outputCol="i_device_type", handleInvalid="skip")
sitecatI = StringIndexer(inputCol="site_category", outputCol="i_site_category", handleInvalid="skip")
#OneHotEncoder applied after the stringIndexer to form binary vector for each column
c1E = OneHotEncoder(inputCol="iC1", outputCol="C1Vector")
c15E = OneHotEncoder(inputCol="iC15", outputCol="C15Vector")
c16E = OneHotEncoder(inputCol="iC16", outputCol="C16Vector")
c18E = OneHotEncoder(inputCol="iC18", outputCol="C18Vector")
c19E = OneHotEncoder(inputCol="iC19", outputCol="C19Vector")
c21E = OneHotEncoder(inputCol="iC21", outputCol="C21Vector")
appcatE = OneHotEncoder(inputCol="i_app_category", outputCol="i_app_category_Vector")
devtypeE = OneHotEncoder(inputCol="i_device_type", outputCol="i_device_type_Vector")
sitecatE = OneHotEncoder(inputCol="i_site_category", outputCol="i_site_category_Vector")
#Vector assembler
fAssembler = VectorAssembler(
inputCols=["C1Vector", "C15Vector", "C16Vector", "C18Vector", "C19Vector", "C21Vector", "i_app_category_Vector", "i_device_type_Vector", "i_site_category_Vector"],
outputCol="features")
#pipeline to sum up all the stringIndexers and OneHotEncoders and VectorAssemebler
data_P = Pipeline(stages=[c1I, c15I, c16I, c18I, c19I, c21I, appcatI, devtypeI, sitecatI,
c1E, c15E, c16E, c18E, c19E, c21E, appcatE, devtypeE, sitecatE, fAssembler])
model = data_P.fit(df)
data_t = model.transform(df)
###### Part 1 ends here #####
# Making the labelpoints to train the data with LR
parsedData=data_t.select('click', 'features').rdd.map(lambda row: LabeledPoint(float(row.click),Vectors.dense((row.features).toArray())))
# split the dataset
training,test = parsedData.randomSplit([0.6, 0.4], seed=11L)
training.cache()
##### PART 4 ######
# Retrain your model using an L2 Regularization method.
# Tune the cost parameter by dividing your training set into a training and a validation set.
# Output the value of the best choice. Test the new model with the provided test set. Output the accuracy, FPR, and AUC
training_l2, validation_l2 = training.randomSplit([0.8, 0.2], seed=11L)
training_l2.cache()
model1 = LogisticRegressionWithSGD.train(training_l2, step=0.1, miniBatchFraction=0.1, regType='l2', regParam=0.01)
model2 = LogisticRegressionWithSGD.train(training_l2, step=0.1, miniBatchFraction=0.1, regType='l2', regParam=1.0)
model3 = LogisticRegressionWithSGD.train(training_l2, step=0.1, miniBatchFraction=0.1, regType='l2', regParam=0.9)
model3.save(sc, path)
# 1- Testing the Accuracy by changing the regParam(cost Parameter)
print("Model 1:")
labelsAndPreds1 = validation_l2.map(lambda p: (float(model1.predict(p.features)), p.label))
Accuracy = labelsAndPreds1.filter(lambda (v, p): v == p).count() / float(validation_l2.count())
print("Accuracy 1 = " + str(Accuracy))
print("Model 2:")
labelsAndPreds2 = validation_l2.map(lambda p: (float(model2.predict(p.features)), p.label))
Accuracy = labelsAndPreds2.filter(lambda (v, p): v == p).count() / float(validation_l2.count())
print("Accuracy 2 = " + str(Accuracy))
print("Model 3:")
labelsAndPreds3 = validation_l2.map(lambda p: (float(model3.predict(p.features)), p.label))
Accuracy = labelsAndPreds3.filter(lambda (v, p): v == p).count() / float(validation_l2.count())
print("Accuracy 3 = " + str(Accuracy))
# After running all the three models, the accuracy was slightly higher in the 3rd model - Accuracy = 0.831453634085
# So using the 3rd model to evaluate the test set:
print("Test Data Metrics on Best model:")
model_best = LogisticRegressionModel.load(sc,path)
# 1- Accuracy
labelsAndPredsT = test.map(lambda p: (float(model_best.predict(p.features)), p.label))
Accuracy = labelsAndPredsT.filter(lambda (v, p): v == p).count() / float(test.count())
print("Accuracy = " + str(Accuracy))
# 2- To Find the FPR
fpr_of_data= labelsAndPredsT.filter(lambda (v, p): v==1 and p==0).count()
fpr= fpr_of_data/(fpr_of_data+labelsAndPredsT.filter(lambda (v, p): v==0 and p==0).count())
print("FPR = " + str(fpr))
#Clears the threshold so that predict will output raw prediction scores, which will be used for AU -ROC
model_best.clearThreshold()
# Instantiate metrics object
metrics = BinaryClassificationMetrics(labelsAndPredsT)
# Area under ROC curve
print("Area under ROC = %s" % metrics.areaUnderROC) | [
"amogh.gk@gmail.com"
] | amogh.gk@gmail.com |
626a8f3efe3c44a285bd894dcf720fe9a98984aa | 8ccc0846442ea595015ec772c62d14695d745859 | /Backtesting/strategy/base.py | 7daa9847b42b20ed695cc39f44941ccb586a9169 | [] | no_license | linkenghong/Backtesting | 67c804446b3687a75064c8dc20713f69c11c56d7 | 29509e9e7262410275a92e42407cd8df334ecdad | refs/heads/master | 2020-06-05T02:57:30.582940 | 2019-09-08T05:20:43 | 2019-09-08T05:20:43 | 192,289,777 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,122 | py | from abc import ABCMeta, abstractmethod
class AbstractStrategy(object):
"""
AbstractStrategy is an abstract base class providing an interface for
all subsequent (inherited) strategy handling objects.
The goal of a (derived) Strategy object is to generate Signal
objects for particular symbols based on the inputs of ticks
generated from a PriceHandler (derived) object.
This is designed to work both with historic and live data as
the Strategy object is agnostic to data location.
"""
__metaclass__ = ABCMeta
@abstractmethod
def calculate_signals(self, event):
"""
Provides the mechanisms to calculate the list of signals.
"""
raise NotImplementedError("Should implement calculate_signals()")
def set_portfolio(self, portfolio_handler):
self.portfolio_handler = portfolio_handler
def get_symbol_position(self, symbol):
key = ["symbol", "quantity", "unavailable_quantity",
"available_quantity", "price", "total_commission",
"avg_price", "market_value"]
position_dict = {k:0 for k in key}
position_dict["symbol"] = symbol
try:
position = self.portfolio_handler.portfolio.positions[symbol]
except:
pass
else:
position_dict["quantity"] = position.quantity
position_dict["unavailable_quantity"] = position.unavailable_quantity
position_dict["available_quantity"] = position.available_quantity
position_dict["price"] = position.price
position_dict["total_commission"] = position.total_commission
position_dict["avg_price"] = position.avg_price
position_dict["market_value"] = position.market_value
return position_dict
class Strategies(AbstractStrategy):
"""
Strategies is a collection of strategy
"""
def __init__(self, *strategies):
self._lst_strategies = strategies
def calculate_signals(self, event):
for strategy in self._lst_strategies:
strategy.calculate_signals(event)
| [
"345852974@qq.com"
] | 345852974@qq.com |
c72299e7afa25673891cb364c768c19408325154 | f79267b09f4fee621c7aaaa02ab2eef4f59e0dcf | /ag/sorting/graph.py | b2a6a847d623fb418ea65eca15d0c3a908ef7eb9 | [
"MIT"
] | permissive | justyre/jus | 07503972ff4933117f39fe91818c9b63dcfcbb17 | 1339c010ac4499c253061d2cce5e638ec06062bd | refs/heads/master | 2023-07-16T11:54:43.875953 | 2021-08-03T07:49:36 | 2021-09-06T02:26:13 | 389,536,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,359 | py | # Licensed under MIT License.
# See LICENSE in the project root for license information.
"""Several graph algorithms."""
# With DFS, we can classify the edges in a directed or undirected graph:
#
# 1. A tree edge is (u,v) if v was first discovered by exploring edge (u,v). All the edges in the depth-first forest G(parent) (aka the predecessor subgraph) are tree edges (but the reverse does not always hold - not all tree edges are in G(pa)).
# 2. A back edge is (u,v) connecting u to an ancestor v in a depth-first tree. For
# directed graphs, we consider self-loops to be back edges.
# 3. A forward edge is a non-tree edge (u,v) connecting u to a descendant v.
# 4. All other edges that is not any of the above kind are called cross edges.
# 5. For undirected graphs, we classify the edge as the first type in the list
# above; and we classify the edge according to whichever of (u,v) or (v,u) the DFS
# encounters first.
#
# When we first explore an edge (u,v), if:
# a) v.color = WHITE: then this is a tree edge.
# b) v.color = GRAY: then this is a back edge, since the gray vertices always form
# a linear chain of descendants corresponding to the stack of active _dfs_visit()
# invocations. Exploration always proceeds from the deepest (latest) gray vertex,
# so an edge that reaches another gray vertex must have reached an ancestor.
# c) v.color = BLACK (only possible for a directed graph): then this is a forward
# or cross edge. When u.grayed_time < v.grayed_time, it is a forward edge; if >,
# it is a cross edge.
#
# According to CLRS Theorem 22.10, for an undirected graph, every edge is either a
# tree edge or a back edge (ie there are no forward or cross edges).
# Hence, for an undirected graph, it has a cycle if and only if DFS finds a back
# edge.
from typing import Sequence, Tuple
import enum
class Color(enum.Enum):
"""Color definition for Graph."""
WHITE = enum.auto()
GRAY = enum.auto()
BLACK = enum.auto()
class Graph:
"""Graph represented using adjacency lists. The default is undirected graph."""
def __init__(
self, num_vertices: int, edges: Sequence[Tuple], is_directed: bool = False
) -> None:
# Adjacency list (ie list of all neighbors) for all vertices
self.adjlist = [[] for _ in range(num_vertices)]
for v1, v2 in edges:
# `edges` is a list of tuples of vertex values like (v1, v2).
# We want to store the edge info as neighbors for each vertex, so that
# we will have an adjacency list
self.adjlist[v1].append(v2)
if not is_directed:
self.adjlist[v2].append(v1)
self.color: Color = [Color.WHITE] * num_vertices
# For source vertex and all undiscovered vertices, their parents are None
self.parent = [None] * num_vertices
# Distance (ie total num of edges) from source to the vertex
self.distance = [None] * num_vertices
# The next attrs are for DFS to store the time when a vertex turns gray/black
self.timestamp = 0
self.grayed_time = [None] * num_vertices
self.blackened_time = [None] * num_vertices
# Mark the cycle index number of a vertex; `None` if it belongs to no cycle
self.cycle_mark = [None] * max(num_vertices, len(edges))
# Total number of cycles in the graph
self.num_cycles = 0
def __repr__(self) -> str:
"""Representation showing neighbors of each vertex."""
return "\n".join([f"{i}: {neighbors}" for (i, neighbors) in enumerate(self.adjlist)])
def __str__(self) -> str:
"""Representation."""
return self.__repr__()
def adjacency_matrix(self) -> list:
"""Get the adjacency matrix."""
adjmat = [[0] * len(self.adjlist) for _ in range(len(self.adjlist))]
for i, neighbors in enumerate(self.adjlist):
for j in neighbors:
adjmat[i][j] = 1
return adjmat
def breadth_first_search(self, source: int) -> list:
"""Breadth-first search (BFS) of a graph from vertex `source`, cf CLRS 22.2."""
# Time complexity: O(num_vertices + num_edges), aka O(V+E)
# Note: This initialization is a must, since other methods may change defaults
self.color = [Color.WHITE] * len(self.adjlist)
# For source vertex and all undiscovered vertices, their parents are None
self.parent = [None] * len(self.adjlist)
# Distance (ie total num of edges) from source to the vertex
self.distance = [None] * len(self.adjlist)
# Source is discovered, but not all its neighbors are discovered, so gray
self.color[source] = Color.GRAY
self.distance[source] = 0
queue = [] # Use-and-discard FIFO queue
traversal = [] # Record the BFS traversal route
queue.append(source)
traversal.append(source)
while queue:
# We use queue as FIFO here
u = queue.pop(0)
for v in self.adjlist[u]:
if self.color[v] == Color.WHITE:
# White means undiscovered, so discover it
self.color[v] = Color.GRAY
self.distance[v] = self.distance[u] + 1
self.parent[v] = u
queue.append(v)
traversal.append(v)
# When u's adjlist is exhausted, turn u to black
self.color[u] = Color.BLACK
return traversal
def shortest_path(self, source: int, vertex: int) -> list:
"""Return the shortest path from vertex `source` to `vertex`.
Note
----
The length of the shortest path (when one exists) is trivial: `len(returning list)-1`.
"""
# Time complexity: O(num of vertices in the path)
# First, we need to compute all vertices' parents using b_f_s()
_ = self.breadth_first_search(source)
if vertex == source:
return [source]
elif self.parent[vertex] is None:
print(f"No path from {source} to {vertex} exists.")
return []
else:
return self.shortest_path(source, self.parent[vertex]) + [vertex]
def breadth_first_search_jovian(self, source: int) -> list:
"""Breadth-first search (BFS) traversal of a graph from vertex `source`."""
# Time complexity: O(num_vertices + num_edges), aka O(V+E)
visited = [False] * len(self.adjlist)
queue = [] # same as `traversal` in the above breadth_first_search()
# Label root (ie source) as visited
visited[source] = True
queue.append(source)
i = 0
while i < len(queue):
for v in self.adjlist[queue[i]]:
# v is a neighbor of queue[i] (starting from queue[0]=source)
if not visited[v]:
visited[v] = True
queue.append(v)
i += 1
return queue
def depth_first_search(self) -> list:
"""Depth-first search (DFS) of a graph, cf CLRS 22.3."""
# Time complexity: Theta(V + E)
# Note: This initialization is a must, since other methods may change defaults
self.color = [Color.WHITE] * len(self.adjlist)
# For source vertex and all undiscovered vertices, their parents are None
self.parent = [None] * len(self.adjlist)
self.timestamp = 0
# `predsubg` is the predecessor subgraph: G(parent) = (V, E(parent)), where
# E(parent) = {(v.pa, v): v in G.V and v.pa is not None}.
# Note: Depending on the tree structure, predsubg may not include ALL edges of
# the original graph. But we are sure that predsubg does not include duplicate
# edges, and does not have any edges that are not present in the original graph.
predsubg = [None] * len(self.adjlist)
for vertex in range(len(self.adjlist)):
if self.color[vertex] == Color.WHITE:
# Every time _dfs_visit(vertex) is called, `vertex` becomes the root of
# a new tree in the depth-first forest
predsubg[vertex] = self._dfs_visit(vertex)
return predsubg, self.grayed_time, self.blackened_time
def _dfs_visit(self, vertex: int) -> list:
# Visit all neighbors of `vertex` using DFS approach.
traversal = []
# White `vertex` is discovered, so it turns gray
self.timestamp += 1
self.grayed_time[vertex] = self.timestamp
self.color[vertex] = Color.GRAY
for v in self.adjlist[vertex]:
# Edge (vertex, v) is being explored by the DFS
if self.color[v] == Color.WHITE:
self.parent[v] = vertex
traversal += [(vertex, v)] + self._dfs_visit(v)
elif self.color[v] == Color.GRAY:
# TODO: For an undirected graph, this means (u,v) is a back edge, which means there is a cycle
# print('cyc', traversal + [(v, self.parent[v])])
pass
# When all neighbors of `vertex` have been exhausted, it turns black
self.color[vertex] = Color.BLACK
self.timestamp += 1
self.blackened_time[vertex] = self.timestamp
return traversal
def depth_first_search_jovian(self, source: int) -> list:
"""Depth-first search (DFS) traversal of a graph from vertex `source`."""
# DFS is more memory efficient than BFS, since you can backtrack sooner.
visited = [False] * len(self.adjlist)
queue = []
stack = [source]
while stack:
v = stack.pop()
if not visited[v]:
visited[v] = True
queue.append(v)
for neighbor in self.adjlist[v]:
# Push (ie append) all neighbors of v into stack for next loop
stack.append(neighbor)
return queue
def is_cyclic(self) -> bool:
"""Check if the graph has any cycles."""
visited = [False] * len(self.adjlist)
for vertex in range(len(self.adjlist)):
if not visited[vertex] and self._is_subgraph_cyclic(vertex, visited, -1):
return True
return False
def _is_subgraph_cyclic(self, v: int, visited: Sequence, parent: int) -> bool:
# Detect cycles in the subgraph reachable from vertex `v`.
visited[v] = True
for neighbor in self.adjlist[v]:
if not visited[neighbor]:
# If neighbor is not visited, then recurse on it
if self._is_subgraph_cyclic(neighbor, visited, v):
return True
elif parent != neighbor:
# If neighbor has been visited and is not the parent of v,
# then there is a cycle
return True
return False
def dfs_cycle(self, u: int, p: int) -> None:
"""Mark the vertices with different numbers for different cycles."""
if p is None:
# This initialization is a must, since other methods may change defaults
self.color = [Color.WHITE] * len(self.adjlist)
# For source vertex and all undiscovered vertices, their parents are None
self.parent = [None] * len(self.adjlist)
# Store total number of cycles found; also used as current cycle's index num
self.num_cycles = 0
if self.color[u] == Color.GRAY:
# A vertex that is discovered but not finished.
# For an undirected graph, this means we have discovered a back edge, which
# means there is a cycle. So we backtrack based on parents to find whole cyc
self.num_cycles += 1
current = p
self.cycle_mark[current] = self.num_cycles
while current != u:
# Backtrack the parent of current, until the cycle is exhausted
current = self.parent[current]
self.cycle_mark[current] = self.num_cycles
elif self.color[u] == Color.WHITE:
# Set p to be u's parent, and mark u as (first) discovered
self.parent[u] = p
self.color[u] = Color.GRAY
for v in self.adjlist[u]:
# Edge (u, v) is being explored by the DFS
if v != self.parent[u]:
self.dfs_cycle(v, u)
# Now u is finished
self.color[u] = Color.BLACK
def print_cycles(self, edges: Sequence[Tuple]) -> None:
"""Print and return the cycles in the graph."""
self.dfs_cycle(0, None)
cycles = [[] for _ in range(self.num_cycles + 1)]
for i in range(len(self.adjlist)):
if self.cycle_mark[i] is not None:
print(i, self.cycle_mark, cycles)
cycles[self.cycle_mark[i]].append(i)
for i in range(1, self.num_cycles + 1):
print(f"Cycle #{i}:", *cycles[i])
print()
return cycles[1:]
##########################################
### Driver code
edges = [(0, 1), (1, 2), (2, 3), (3, 4), (4, 0), (1, 4), (1, 3)]
g1 = Graph(5, edges)
print(g1)
print('Adjacent matrix:', g1.adjacency_matrix())
print('BFS:', g1.breadth_first_search(3))
print('BFS jovian:', g1.breadth_first_search_jovian(3))
print('Shortest path:', g1.shortest_path(2, 4))
print('DFS:', g1.depth_first_search())
print('DFS jovian:', g1.depth_first_search_jovian(0))
print('Has cycles:', g1.is_cyclic())
print(g1.print_cycles(edges))
# Has a small cycle
edges = [(0, 1), (0, 3), (1, 2), (2, 0), (3, 4)]
g = Graph(5, edges)
print(g)
print(g.depth_first_search())
print(g.is_cyclic())
print("Cycles: ", g.print_cycles(edges))
# Has a big cycle
edges = [(0, 1), (1, 2), (2, 3), (3, 4), (4, 0)]
g = Graph(5, edges)
print(g)
print(g.depth_first_search())
print(g.is_cyclic())
print("Cycles: ", g.print_cycles(edges))
edges = [(0, 1), (0, 3), (3, 1), (1, 4), (4, 3), (2, 4), (2, 5), (5, 5)]
g = Graph(6, edges, is_directed=True)
print(g)
print(g.depth_first_search())
print("Cycles: ", g.print_cycles(edges))
edges = [(0, 1), (1, 2), (2, 3), (1, 4), (5, 6), (5, 7)]
g = Graph(8, edges, is_directed=True)
print(g)
print(g.depth_first_search()) | [
"1762873+justyre@users.noreply.github.com"
] | 1762873+justyre@users.noreply.github.com |
619f32ab28b0c9805f69cf283ef37f4d1facbada | 2051155a91c262ec951b42cfd6eff52a8ad65707 | /prepare_submission_20180430.py | b60f36c44633830a71f9e6ed20521921cda16014 | [] | no_license | detrout/C1_mouse_limb_combined | 607af5402eebcc79aed68f6c9966d217fb79ee54 | 44d604bf86588bd89db227b20dac589253820bca | refs/heads/master | 2021-07-10T04:50:33.089606 | 2020-06-19T22:39:01 | 2020-06-19T22:39:01 | 152,499,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,426 | py | #!/usr/bin/python3
from argparse import ArgumentParser
import os
import collections
from lxml.html import fromstring
import json
import re
import requests
import glob
import pandas
from urllib.parse import urljoin
from rdflib import Graph, Literal, URIRef
from generate_combined_transcript_C1 import (
paper_433_experiment_files,
ASOF_RUN17_experiment_files
)
from woldrnaseq.models import load_experiments
from htsworkflow.util.opener import autoopen
from htsworkflow.util.rdfns import (
libraryOntology,
RDF,
RDFS,
)
from htsworkflow.util.rdfhelp import (
dump_model,
)
# 20031-20038 are good on flowcell HF7NTBCX2
# 20026-20030 are mixed on flowcell HF7NTBCX2
def main(cmdline=None):
parser = ArgumentParser()
parser.add_argument('--first-tranche', default=False, action='store_true',
help='Use just the first tranche as experiment list')
parser.add_argument('--name', required=True, help='submission name')
parser.add_argument('-s', '--sheet', default=0, help='Sheet to use')
parser.add_argument('--header', default=None, help="header row")
parser.add_argument('filename', nargs=1, help='driver spreadsheet')
args = parser.parse_args(cmdline)
root_fastq_url = 'http://jumpgate.caltech.edu/runfolders/volvox02/'
desplit = os.path.expanduser('~/proj/htsworkflow/htsworkflow/pipelines/desplit_fastq.py')
header = int(args.header) if args.header is not None else None
data = read_spreadsheet(args.filename[0], args.sheet, header)
print(data.shape)
if args.first_tranche:
experiment_file_list = paper_433_experiment_files.split('\n')
else:
experiment_file_list = ASOF_RUN17_experiment_files.split('\n')
experiment_files = [ os.path.expanduser(x.strip()) for x in experiment_file_list]
experiments = load_experiments(experiment_files)
experiments['replicates'] = experiments['replicates'].apply(lambda l: [x.replace('_mm10', '').replace('_clean', '') for x in l])
current_experiments = find_experiments_to_submit(experiments, data)
aliases_tsv = '{}-aliases.tsv'.format(args.name)
make_library_aliases(current_experiments, aliases_tsv)
submission_fastqs_tsv = '{}-fastqs.tsv'.format(args.name)
if not os.path.exists(submission_fastqs_tsv):
fastq_urls = find_all_fastqs(root_fastq_url, current_experiments, submission_fastqs_tsv)
fastq_urls = pandas.read_csv(submission_fastqs_tsv, sep='\t')
barcodes_tsv = '{}-barcodes.tsv'.format(args.name)
make_library_barcodes(fastq_urls, barcodes_tsv)
metadata_tsv = '{}-flowcell-details.tsv'.format(args.name)
metadata = make_metadata(fastq_urls, root_fastq_url, metadata_tsv)
merge_file = '{}-merge-fastqs.condor'.format(args.name)
make_desplit_condor(fastq_urls, metadata, desplit, root_fastq_url, merge_file)
def read_spreadsheet(filename, sheet, header=None):
if filename.endswith('xlsx'):
data = pandas.read_excel(
'Second_set_of_limb_single_cell_data_for_Diane_almost_complete_April13_2018.xlsx',
sheet=sheet,
header=header
)
elif filename.endswith('ods'):
from pandasodf import ODFReader
book = ODFReader(filename)
data = book.parse(sheet, header=header)
return data
def find_all_fastqs(root_fastq_url, experiments, output_file):
"""Get urls to the raw fastq files for all our replicates
"""
runfolder = Runfolder(root_fastq_url)
records = []
multi = []
for record in find_replicate_flowcells(experiments):
fastqs = []
for flowcell in record['flowcells']:
fastqs.extend(list(runfolder.find_fastqs(flowcell, record['library_id'])))
record['fastq_urls'] = fastqs
fluidigm_fields = parse_fluidigm(urljoin(root_fastq_url, fastqs[0]))
record['barcode'] = fluidigm_fields['barcode']
record['location'] = fluidigm_fields['location']
records.append(record)
if len(record['flowcells']) > 1:
multi.append(record)
df = pandas.DataFrame(records)
df.to_csv(output_file, sep='\t', index=False)
if len(multi) > 0:
print('Warning, runs on multiple flowcells check multiple_flowcells.tsv')
pandas.DataFrame(multi).to_csv('multiple_flowcells.tsv', sep='\t')
return df
def find_replicate_flowcells(experiments):
model = Graph()
for i, row in experiments.iterrows():
for extended_id in row.replicates:
library_id, location, *_ = extended_id.split('_')
extended_id = library_id + '_' + location
uri = URIRef('https://felcat.caltech.edu/library/{}/'.format(library_id))
s = (uri, RDF['type'], libraryOntology['Library'])
if s not in model:
model.parse(source=uri, format='rdfa')
flowcells = model.query("""PREFIX libns: <http://jumpgate.caltech.edu/wiki/LibraryOntology#>
select distinct ?flowcell_id
where {
?library a libns:Library ;
libns:has_lane ?lane .
?lane libns:flowcell ?flowcell .
?flowcell libns:flowcell_id ?flowcell_id .
}
""", initBindings={'library': uri})
yield {'experiment': row.name,
'library_id': extended_id,
'flowcells': sorted([x[0].value for x in flowcells])
}
def find_experiments_to_submit(experiments, submission_table):
to_upload = set(submission_table[submission_table.columns[0]])
missing = set(submission_table[submission_table.columns[0]])
tosubmit = []
for i, row in experiments.iterrows():
current = to_upload.intersection(set(row.replicates))
missing = missing.difference(set(row.replicates))
if len(current) > 0:
tosubmit.append({
'name': row.name,
'analysis_dir': row.analysis_dir,
'replicates': list(current)
})
print('Not found:', len(missing), sorted(missing))
df = pandas.DataFrame(tosubmit)
df.set_index('name', inplace=True)
return df
def find_seans_fastqs(experiments):
for i, row in experiments.iterrows():
for library_id in row.replicates:
pattern = os.path.join(row.analysis_dir, library_id + '*.fastq.gz')
files = glob.glob(pattern)
assert len(files) > 0
filesets.setdefault(i, []).extend(files)
#make_desplit_condor(filesets)
def make_library_aliases(experiments, aliases_tsv):
aliases = {}
for i, row in experiments.iterrows():
for library_id in row.replicates:
aliases.setdefault(row.name, []).append('barbara-wold:{}'.format(library_id))
with open(aliases_tsv, 'wt') as outstream:
for key in sorted(aliases):
outstream.write(key)
outstream.write('\t')
outstream.write(','.join(sorted(aliases[key])))
outstream.write(os.linesep)
def make_library_barcodes(experiments, barcode_tsv):
def sorted_plate_key(row):
return row['plate_id'] + '_' + row['plate_location']
barcodes = {}
for i, row in experiments.iterrows():
plate_id, location, *_ = row.library_id.split('_')
record = {'barcode': row.barcode, 'plate_id': plate_id, 'plate_location': location}
barcodes.setdefault(row.experiment, []).append(record)
with open(barcode_tsv, 'wt') as outstream:
for key in sorted(barcodes):
outstream.write(key)
outstream.write('\t')
outstream.write(json.dumps(sorted(barcodes[key], key=sorted_plate_key)))
outstream.write(os.linesep)
def make_desplit_condor(experiments, metadata, desplit_cmd, root_url, condor_file):
"""Make condor file to build merged fastqs
:Parameters:
- experiments: (pandas.DataFrame) Experiments and their fastq urls
from find_all_fastqs()
- metadata: (pandas.DataFrame) metadata details about each fastq
- desplit_cmd: (filename) Path to the desplit_fastq.py file from htsworkflow
- condor_file: (filename) target to write condor file
:Returns:
True if all the merged fastqs exists, otherwise False
"""
header = """universe=vanilla
executable=/usr/bin/python3
error=log/desplit_fastq.$(process).out
output=log/desplit_fastq.$(process).out
log=log/desplit_fastq.log
environment="PYTHONPATH=/woldlab/loxcyc/home/diane/proj/htsworkflow"
requirements=(MACHINE != "wold-clst-3.woldlab") && (MACHINE != "wold-clst-4.woldlab")
"""
experiment_fastqs = {}
for i, row in metadata.iterrows():
output_name = row.experiment + '.fastq.gz'
experiment_fastqs.setdefault(output_name, []).append(row.fastq_url)
# chunk all fastqs by experiment
body = []
for output_name in experiment_fastqs:
print(output_name)
fastq_urls = experiment_fastqs[output_name]
body.extend(['arguments="{} --gzip -o {} -s 0:50 {}"'.format(desplit_cmd, output_name, ' '.join(sorted(fastq_urls))),
'queue',
''])
if len(body) > 0:
with open(condor_file, 'wt') as outstream:
outstream.write(header)
outstream.write(os.linesep.join(body))
return False
else:
return True
def make_metadata(experiments, root_fastq_url, filename):
model = Graph()
metadata = []
for i, row in experiments.iterrows():
fastq_urls = [ urljoin(root_fastq_url, x[1:-1]) for x in row.fastq_urls[1:-1].split(', ')]
for fastq_url in fastq_urls:
fastq_data = parse_fluidigm(fastq_url)
metadata.append({
'experiment': row.experiment,
'fastq_url': fastq_url,
'machine': 'http://jumpgate.caltech.edu/sequencer/8',
'flowcell': fastq_data['flowcell_id'],
'lane': fastq_data['lane_number'],
'barcode': fastq_data['barcode'],
'read_length': fastq_data['read_length']
})
metadata = sorted(metadata, key=lambda row: (row['experiment'], row['flowcell'], row['barcode']))
df = pandas.DataFrame(metadata, columns=['experiment', 'fastq_url', 'machine', 'flowcell', 'lane', 'barcode', 'read_length'])
print(df.head())
df.to_csv(filename, sep='\t', index=False)
return df
fluidigm_fields = ['library_id', 'location', 'barcode', 'lane_number', 'read']
def parse_fluidigm(pathname):
path, name = os.path.split(pathname)
p = r'(?P<library_id>[0-9]{5})_'\
'(?P<location>[A-H][0-9]{1,2})_'\
'(?P<barcode>[AGCT-]+)_'\
'L00(?P<lane_number>[1-8])_'\
'R(?P<read>[1-3])'
match = re.match(p, name)
if match is not None:
fields = { k: match.group(k) for k in fluidigm_fields }
with autoopen(pathname, 'rt') as stream:
fields.update(parse_fastq_header(stream.readline()))
seq = stream.readline()
fields['read_length'] = len(seq)
return fields
def parse_fastq_header(header):
header = header.strip()
read_id, extra = header.split(' ')
fields = read_id.split(':')
extra_fields = extra.split(':')
return {
'flowcell_id': fields[2],
#'lane_number': fields[3],
#'read': fields[4],
'barcode': extra_fields[3],
}
class Runfolder:
def __init__(self, root_url):
self.root_url = root_url
self.pages = {}
def load_index(self, url=''):
absolute_url = urljoin(self.root_url, url)
response = requests.get(absolute_url)
if response.status_code != 200:
raise RuntimeError('Unable to access {}. Status {}'.format(absolute_url, response.status_code))
tree = fromstring(response.content)
rows = tree.xpath('*/table/tr/td/a')
if len(rows) == 0:
raise RuntimeError('{} is not a directory'.format(absolute_url))
if rows[0].text == 'Parent Directory':
rows.pop(0)
self.pages[url] = [ x.text for x in rows ]
def find_flowcell(self, flowcell):
root = ''
if root not in self.pages:
self.load_index(root)
for name in self.pages[root]:
if flowcell in name:
return name
def _find_unaligned(self, url):
if url not in self.pages:
self.load_index(url)
for name in self.pages[url]:
for unaligned in ['Unaligned.dualIndex/', 'Unaligned/']:
if unaligned == name:
return url + name
raise RuntimeError('Unable to find index in {}'.format(url))
def _find_extended_id(self, url, extended_id):
if url not in self.pages:
self.load_index(url)
for name in self.pages[url]:
if extended_id in name:
return url + name
def find_fastqs(self, flowcell, extended_id):
runfolder = self.find_flowcell(flowcell)
assert runfolder is not None
unaligned = self._find_unaligned(runfolder)
assert unaligned is not None
project = self._find_extended_id(unaligned, extended_id)
sample = self._find_extended_id(project, extended_id)
if sample not in self.pages:
self.load_index(sample)
for name in self.pages[sample]:
if 'fastq.gz' in name:
yield sample + name
if __name__ == '__main__':
main()
| [
"diane@ghic.org"
] | diane@ghic.org |
2428e97f642a009fcaf507f939507bb69f6dffab | 916f9880e97016fa9501c52df2cdb10ad89488ec | /Sample.py | 738afcb91b8f3940e95dec5810f61580661be9c7 | [] | no_license | jamunagithub/Sample | 61a027f7fd4d86e3224caa5a24b8bab6affd1a89 | 1539c78553d4fbab2880c7ecec169350ccf7468e | refs/heads/master | 2023-05-15T08:51:56.202120 | 2021-06-12T09:45:40 | 2021-06-12T09:45:40 | 376,249,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38 | py | print ("This is the sample GIT code")
| [
"jamunamec@gmail.com"
] | jamunamec@gmail.com |
bb72ca08e04c6993447c5ba8e25163b95a1d07f8 | cf1636cd2108ae86c5df5bfc1ae9448e3fd9dbf7 | /nash_test.py | 6e61f59897bf12985704bd9184416d09b3079de8 | [] | no_license | ryanpig/MultiagentSystem-FindNE | 991d5db6950eef11b41b400627f9b05b3710759b | 470b5f838d4b9296a91824024cbef412881abcf9 | refs/heads/master | 2021-04-12T04:22:24.828274 | 2019-02-08T10:41:49 | 2019-02-08T10:41:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,923 | py | import nash
import numpy as np
import datetime
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from scipy.stats import norm
# Algorithms
def non_duplicate_print(eqs):
tmp1 = []
tmp2 = []
for eq in eqs:
#print("eq:",eq,"tmp:",tmp)
#print(np.any(eq in tmp))
#print(eq in tmp)
# print(tmp1,tmp2)
if len(tmp1) == 0 and len(tmp2) == 0:
tmp1.append(eq[0])
tmp2.append(eq[1])
print(eq)
flag = False
for i in range(len(tmp1)):
if np.all(eq[0] == tmp1[i]):
if np.all(eq[1] == tmp2[i]):
flag = True
if flag is False:
tmp1.append(eq[0])
tmp2.append(eq[1])
print(eq)
def find_nash_support_enum(utilA , utilB):
random_game = nash.Game(utilA, utilB)
eqs = random_game.support_enumeration()
non_duplicate_print(eqs)
def find_nash_lemke_howson(utilA , utilB):
random_game = nash.Game(utilA, utilB)
a = random_game.lemke_howson(initial_dropped_label=0)
print("NE", a[0],a[1])
#print("row", a[3])
#print("col", a[4])
return a[2]
#for eq in random_game.lemke_howson_enumeration():
# print(eq)
#eqs = random_game.lemke_howson_enumeration()
#non_duplicate_print(eqs)
def find_nash_vertex_enum(utilA , utilB):
random_game = nash.Game(utilA, utilB)
for eq in random_game.vertex_enumeration():
print(eq)
# generate m x n 2-player game
def generate_game(m = 2, n = 2, utility_max = 10, utility_min = -10):
# two player game, multiple actions (m x n)
# m = 6
# n = 4
# utility_max = 10
# utility_min = -10
print("Rows:", m, "Cols:", n, "Max:", utility_max, "Min:", utility_min)
utilA = np.random.uniform(utility_min, utility_max, size=(m, n))
utilB = np.random.uniform(utility_min, utility_max, size=(m, n))
utilA = np.asarray(utilA, dtype=np.int8)
utilB = np.asarray(utilB, dtype=np.int8)
print("utility of row player")
print(utilA)
print("utility of col player")
print(utilB)
return utilA, utilB
# generate a single m x n game
def gen_single_game(m, n):
# Configuration
flag_steps_distribusion = False
flag_single_game = True
flag_use_customized_game = True
flag_save_game = False
#outfileA = 'game_A_prison.npy'
#outfileB = 'game_B_prison.npy'
#outfileA = 'game_A_rock.npy'
#outfileB = 'game_B_rock.npy'
#outfileA = 'game_A.npy'
#outfileB = 'game_B.npy'
outfileA = 'game_A_53.npy'
outfileB = 'game_B_53.npy'
# outfileA = 'game_A_12.npy'
# outfileB = 'game_B_12.npy'
if flag_single_game == True:
# Generate a new game or use customized game.
if flag_use_customized_game == True:
a = np.load(outfileA)
b = np.load(outfileB)
print(a)
print(b)
else:
a, b = generate_game(m=m, n=n)
if flag_save_game == True:
# Save
np.save(outfileA, a)
np.save(outfileB, b)
a = [[3,1,5],[2,2,4]]
b = [[2,1,0],[2,3,1]]
t1 = datetime.datetime.now()
print("Support Enumeration")
find_nash_support_enum(a, b)
print("Lemke Howson")
find_nash_lemke_howson(a, b)
print("Vertex Enumeration")
find_nash_vertex_enum(a, b)
t2 = datetime.datetime.now()
diff = t1 - t2
tdiff_sec = abs(diff.total_seconds())
print("Time cost for finding NE:", tdiff_sec)
# Cal steps distribution
if flag_steps_distribusion == True:
count = 0
arrs = []
counts = []
for k in range(5, 9, 1):
counts.clear()
for i in range(300):
a, b = generate_game(m=k, n=k)
# Using customized game
# a = [[0,-1,1],[1,0,-1],[-1,1,0]]
# b = [[0,1,-1],[-1,0,1],[1,-1,0]]
# a = [[3,0],[0,2]]
# b = [[2,0],[0,3]]
# Loading existing game
# outfileA = 'game_A.npy'
# outfileB = 'game_B.npy'
# a = np.load(outfileA)
# b = np.load(outfileB)
# Run
count = find_nash_lemke_howson(a, b)
# Save
# np.save(outfileA, a)
# np.save(outfileB, b)
print(count)
if count <= 40:
counts.append(count)
arrs.append(counts.copy())
title = "Random size:" + str(k) + " x " + str(k)
# plot_histogram(counts,title)
plot_four_hist(arrs[0], arrs[1], arrs[2], arrs[3])
def plot_histogram(arrs,title):
#x = np.random.normal(size=1000)
#plt.hist(x, normed=True, bins=30)
plt.ylabel('Steps to find a NE');
plt.xlabel('counts');
plt.title(title)
plt.hist(arrs, bins=20)
plt.show()
#l = plt.plot(bins, y, 'r--', linewidth=1)
def plot_four_hist(arr1,arr2,arr3,arr4):
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')
(mu1, sigma1) = norm.fit(arr1)
(mu2, sigma2) = norm.fit(arr2)
(mu3, sigma3) = norm.fit(arr3)
(mu4, sigma4) = norm.fit(arr4)
mu1 = format(mu1, '.2f')
mu2 = format(mu2, '.2f')
mu3 = format(mu3, '.2f')
mu4 = format(mu4, '.2f')
sigma1 = format(sigma1, '.2f')
sigma2 = format(sigma2, '.2f')
sigma3 = format(sigma3, '.2f')
sigma4 = format(sigma4, '.2f')
ax1.hist(arr1, rwidth=0.8, color='sandybrown')
ax2.hist(arr2, rwidth=0.8, color='sandybrown')
ax3.hist(arr3, rwidth=0.8, color='sandybrown')
ax4.hist(arr4, rwidth=0.8, color='sandybrown')
plt.title("Find a NE in different Size in 300 rounds")
ax1.set_ylabel("Counts")
ax2.set_ylabel("Counts")
ax3.set_ylabel("Counts")
ax4.set_ylabel("Counts")
str1 = "Game Size 5 x 5 " + ", u:" + str(mu1) + ", s:" + str(sigma1)
str2 = "Game Size 6 x 6 " + ", u:" + str(mu2) + ", s:" + str(sigma2)
str3 = "Game Size 7 x 7 " + ", u:" + str(mu3) + ", s:" + str(sigma3)
str4 = "Game Size 8 x 8 " + ", u:" + str(mu4) + ", s:" + str(sigma4)
ax1.set_title(str1)
ax2.set_title(str2)
ax3.set_title(str3)
ax4.set_title(str4)
plt.show()
def plot_cal_time(arrs):
# debug
print(arrs[0])
print(arrs[1])
print(arrs[2])
#
len1 = len(arrs[0])
x = range(2, len1+2)
plt.title('Finding NE in a symmetric game')
plt.xlabel('The number of actions')
plt.ylabel('Running Time in second')
#plt.xlim(xmin=2)
a0, = plt.plot(x, arrs[0], 'r')
a1, = plt.plot(x, arrs[1], 'g')
a2, = plt.plot(x, arrs[2], 'b')
plt.legend((a0, a1, a2), ('Support Enum','LH','Vertex Enum'))
plt.tight_layout()
def gen_multi_games(m, n):
max1 = max(m, n)
len_algorithms = 3
arr_tdiffs = []
# Loop all algorithm
for ind in range(len_algorithms):
arr_tdiff = []
for i in range(2, max1+1, 1):
a, b = generate_game(m=i, n=i)
# time start for the various algorithms
t1 = datetime.datetime.now()
# pick up one algorithm
if ind == 0:
find_nash_support_enum(a, b)
elif ind == 1:
find_nash_lemke_howson(a, b)
elif ind == 2:
find_nash_vertex_enum(a, b)
t2 = datetime.datetime.now()
# time end
diff = t1 - t2
tdiff_sec = abs(diff.total_seconds())
arr_tdiff.append(tdiff_sec)
print("Time cost for finding NE:", tdiff_sec)
arr_tdiffs.append(arr_tdiff)
# plotting
plot_cal_time(arr_tdiffs)
# main -> find_nash -> random_game.support_enumeration()
# loop symmetric games from (2,2) to (10,10) actions
#gen_multi_games(12, 12)
# single game test
gen_single_game(5, 3)
| [
"ryanpig@gmail.com"
] | ryanpig@gmail.com |
b024aad18dfb436560ff9893287a0262f4a1f76d | d36546287721db2e97e0a4323e143163a14ce0b1 | /2016/19/an_elephant_named_joseph.py | 11737177b14154baaf7b8b0d01877cfaf5f14c2c | [
"Unlicense"
] | permissive | GeoffRiley/AdventOfCode | ca258edee05ad7a4b6e6db2e59b83e8879b48af0 | 567df9cb5645bc6cf4c22063a84a621039069311 | refs/heads/master | 2023-01-12T03:42:11.099541 | 2022-12-25T17:16:20 | 2022-12-25T17:16:20 | 225,139,440 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | from collections import deque
def an_elephant_named_joseph(inp, part1=True):
elves = deque(range(1, inp + 1))
elves2 = deque()
if part1:
while len(elves) > 1:
elves.rotate(-1)
elves.popleft()
else:
elf_c = inp
while len(elves) > len(elves2):
elves2.append(elves.pop())
while elf_c > 1:
elves2.pop()
elves2.appendleft(elves.popleft())
if len(elves2) - len(elves) > 1:
elves.append(elves2.pop())
elf_c -= 1
return elves[0] if part1 else elves2[0]
if __name__ == '__main__':
elf_count = 3_004_953
print(f'Day 19, part 1: {an_elephant_named_joseph(elf_count)}')
print(f'Day 19, part 2: {an_elephant_named_joseph(elf_count, False)}')
# Day 19, part 1: 1815603
# Day 19, part 2: 1410630
| [
"geoffr@adaso.com"
] | geoffr@adaso.com |
b20893d703e00928bbb7b86e4af0fa4b71d78cca | e569c41ec81382630693d3bc0a163c06a6a23d52 | /PythonProgramming/ICP3/Source/3.py | a3abe312b2325a5978f48b2d5217e9b574de48a9 | [] | no_license | Sravanthi-Gogadi/PythonDeeplearningCourse | 20a074763283c3bdbcbc3846576509c5e7a733e9 | 037e94f19362635dd6911cdbd70f60830ec51f5c | refs/heads/master | 2020-03-19T08:21:18.901163 | 2018-07-28T04:39:00 | 2018-07-28T04:39:00 | 136,197,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | from bs4 import BeautifulSoup
import urllib.request
import pandas as pd
import csv
# using tabulate to display the pandas dataframe
from tabulate import tabulate
# Read the web url into a variable
url = "https://en.wikipedia.org/wiki/List_of_state_and_union_territory_capitals_in_India"
# use urllib to open the url
res = urllib.request.urlopen(url)
plain_text = res
# Use beautiful soup to get the content of webpage
soup = BeautifulSoup(plain_text, "html.parser")
# Print the title of the web page
print(soup.find('title').string)
# Print all the anchor tags in the webpage
result_list = soup.findAll('a')
# Print the text of href
for i in result_list:
link = i.get('href')
print(link)
# Read the table from webpage
result_table = soup.findAll('table', {'class': 'wikitable sortable plainrowheaders'})
for tr in result_table:
table_data = tr.findAll('td')
table_head = tr.findAll('th')
# Print td and th
print(table_data, table_head)
# To display the list of union territories
table = soup.find_all('table')[1]
# using pandas object read the table and assign header
df = pd.read_html(str(table),header=0)
# display the output
print( tabulate(df[0], headers='keys', tablefmt='psql') )
| [
"sravanthigogadi@gmail.com"
] | sravanthigogadi@gmail.com |
08028b087d65af74817e4362ee88f3cb8f285acb | ece03546e09b3880222598a6b3955281341283ae | /Scrapy/DouyuPicture/DouyuPicture/settings.py | df93d3fce09547f77e38fb6afd1e5729ab471bba | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | haochen95/python_tutorial | e24bdf603183793680233499adcc94c10d7e90da | ec02114a40b6c42fc54a5504b79d56f4ed1abef8 | refs/heads/master | 2020-04-18T03:26:19.331623 | 2019-02-26T04:47:14 | 2019-02-26T04:47:14 | 167,198,727 | 0 | 3 | Apache-2.0 | 2019-01-28T07:20:59 | 2019-01-23T14:42:05 | Jupyter Notebook | UTF-8 | Python | false | false | 3,337 | py | # -*- coding: utf-8 -*-
# Scrapy settings for DouyuPicture project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'DouyuPicture'
SPIDER_MODULES = ['DouyuPicture.spiders']
NEWSPIDER_MODULE = 'DouyuPicture.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'DouyuPicture (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98',
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'DouyuPicture.middlewares.DouyupictureSpiderMiddleware': 543,
# }
IMAGE_STORE = "C:/Users/haoch/Desktop/Programming/Python/Scrapy_project01/Image/"
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'DouyuPicture.middlewares.DouyupictureDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'DouyuPicture.pipelines.DouyupicturePipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"haochen273@gmail.com"
] | haochen273@gmail.com |
1d13b98d948da4230d205362cbabd0696af80cf6 | 2c0e1786044c2818be20062a1c8f75990c61ae26 | /argparse/5_conflicting_options.py | 55541fc040e8ba860bdcc2e766342e231fbcbf87 | [] | no_license | jukim-greenventory/python-practice | 51802bf354c36049f41f539778f576d7e9560305 | 4a1bb17c08204edf6954196c29f6e40a88274ef6 | refs/heads/master | 2023-05-31T21:15:37.770161 | 2021-06-10T12:38:57 | 2021-06-10T12:38:57 | 375,691,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | """
Let’s introduce a third one, add_mutually_exclusive_group().
It allows for us to specify options that conflict with each other.
Let’s also change the rest of the program so that the new functionality makes more sense:
we’ll introduce the --quiet option, which will be the opposite of the --verbose one:
"""
import argparse
parser = argparse.ArgumentParser(description="calculate X to the power of Y")
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true")
group.add_argument("-q", "--quiet", action="store_true")
parser.add_argument("x", type=int, help="the base")
parser.add_argument("y", type=int, help="the exponent")
args = parser.parse_args()
answer = args.x ** args.y
print("Running '{}'".format(__file__))
if args.quiet:
print(answer)
elif args.verbose:
print("{} to the power {} equals {}".format(args.x, args.y, answer))
else:
print("{}^{} == {}".format(args.x, args.y, answer))
| [
"junseok.kim@greenventory.de"
] | junseok.kim@greenventory.de |
f3df497c0894663eb52e0d21dc7c21eb0ae41a48 | 63f61f5a8fab6dd89b557666317b3cdc2a27e5af | /partyDataDownload.py | 40340068dabadd97f8ed0a347cbf6ee125baf78d | [] | no_license | Shan-Herald-Agency-for-News/MMElection2020_Scripting | 55d048235d903fafd69355528dc6df419ec27f39 | ea079f64803a49d08599eac2db6d3924f76746c3 | refs/heads/main | 2023-01-23T02:54:19.052822 | 2020-11-23T10:55:54 | 2020-11-23T10:55:54 | 306,220,889 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,866 | py | import json
import requests
import os
import shutil
partyInfoFile = "shanRegionParties.json"
def flag_imageDownload():
image_url = ""
filename = ""
filepath = ""
with open(partyInfoFile) as jFile:
data = json.load(jFile)
for d in data['data']:
attr = d['attributes']
image_url = attr['flag_image']
filename = filename.join(
[attr['name_english'], "_", d['id'], ".jpg"])
filepath = os.path.join("party/flag", filename)
r = requests.get(image_url, stream=True)
if r.status_code == 200:
r.raw.decode_content = True
with open(filepath, 'wb') as f:
shutil.copyfileobj(r.raw, f)
print("Image successfully Downloaded: ", filename)
image_url = ""
filename = ""
filepath = ""
else:
print("Image Couldn\'t be retreived")
def policy_download():
file_url = ""
filename = ""
filepath = ""
with open(partyInfoFile) as jFile:
data = json.load(jFile)
for d in data['data']:
attr = d['attributes']
file_url = attr['policy']
filename = filename.join(
[attr['name_english'], "_", d['id'], ".pdf"])
filepath = os.path.join("party/policy", filename)
r = requests.get(file_url, stream=True)
if r.status_code == 200:
r.raw.decode_content = True
with open(filepath, 'wb') as f:
shutil.copyfileobj(r.raw, f)
print("Image successfully Downloaded: ", filename)
file_url = ""
filename = ""
filepath = ""
else:
print("Image Couldn\'t be retreived")
partyInfoFile = "shanRegionParties.json"
def seal_imageDownload():
image_url = ""
filename = ""
filepath = ""
with open(partyInfoFile) as jFile:
data = json.load(jFile)
for d in data['data']:
attr = d['attributes']
image_url = attr['seal_image']
filename = filename.join(
[attr['name_english'], "_", d['id'], ".jpg"])
filepath = os.path.join("party/seal", filename)
r = requests.get(image_url, stream=True)
if r.status_code == 200:
r.raw.decode_content = True
with open(filepath, 'wb') as f:
shutil.copyfileobj(r.raw, f)
print("Image successfully Downloaded: ", filename)
image_url = ""
filename = ""
filepath = ""
else:
print("Image Couldn\'t be retreived")
# seal_imageDownload()
# flag_imageDownload()
# policy_download()
| [
"noernova666@gmail.com"
] | noernova666@gmail.com |
b0de702648187305cb55de4c208e37e3f49e804d | abd7a71f62eb1f85dd8725e2c5ed5f2ff9f43fbf | /tuples_comparing.py | 7a45e1b83fd00b5cffdd9c9883bc405af5b5b1c2 | [] | no_license | danielrhunt/python | 3f97df39f01ac9ef58d0c8d8aaf47a2c6abaeec8 | 6a5e5057b18dc55e317ca3027440765b409b1a43 | refs/heads/master | 2020-04-21T02:51:04.719043 | 2019-03-12T20:22:46 | 2019-03-12T20:22:46 | 169,267,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,748 | py | '''COMPARING TUPLES'''
'''comparison operators work on tuples, just like other sequences
Python starts by comparing the first element in each sequence
if they are equal, it goes on to the next element, and then on again, and again, until it finds elements that differ
subsequent elements are not considered (even if they are really big)'''
'''the SORT FUNCTION works the same way: it sorts primarily by first element, but in case of a tie, it moves onto the next element until the tie is broken
this feature lends itself to a pattern called DSU: DECORATE, SORT, and UNDECORATE'''
example = "but soft what light in yonder window breaks"
print(example)
print(type(example))
words = example.split() #this doesn't appear to do anything on its own
tt = list() #create empty list
for word in words:
#build list of tuples, where each tuple is preceded by it's length
tt.append((len(word), word)) #add length of word, and then the word
print(tt) #prints list in original order, preceded by count
print(type(tt))
#the sort compares the first element (length) first, and only considers the second element to break ties
#reverse = True tells sort method to go in decreasing order
tt.sort(reverse = True) #reverse number sort (i.e. starts at highest)
print(tt) #prints list in number sorted order (highest to lowest)
print(type(tt))
#this loop traverses the list of tuples, and builds a list of words in descending order of length
#the four character words are sorted in reverse alphabetical order
res = list() #create another empty list
for length, word in tt: #use two mnemonically named iteration variables
res.append(word) #append the words to res list
print(res) #should just print the words
print(type(res))
| [
"noreply@github.com"
] | noreply@github.com |
63c131575e15d03798d9cea07eca0474a3a6be3f | 4434118abceaad2388fb8b2e989154fea0e6c07e | /verletPygame.py | cb336f18cdd2b5bf2697ec79de23145d88a00782 | [] | no_license | Kelloggs/verletPygame | bc599803efa34ab0cbecf48be4f17c9545a91c35 | 146789e413313fa022e618f9184cf1270b7df682 | refs/heads/master | 2021-01-06T20:41:51.519263 | 2012-05-29T11:11:49 | 2012-05-29T11:11:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,857 | py | """
Verlet integration scheme for a deformable object using a mesh-based
Mass Spring System. This has just been written to test pygame.
Require: numpy, pygame
TODO:
- add less naive collision handling and response between objects
Author: Jens Cornelis
"""
from numpy import array, linalg, cross, dot
import math
import pygame
import sys
import time
#globalsb
world_size = 1000,700
world_rect = pygame.Rect(0,0, world_size[0], world_size[1])
num_iterations = 10 #iteration for constraint relaxation
pickedParticle = None
mousePosition = 0,0
frames = 25
drawVelocities = True
paused = False
class Material:
def __init__(self, stiffness=0.3, friction=0.1):
self.stiffness = stiffness
self.friction = friction
class MSSObject:
def __init__(self, vertices, indexedSprings, screen, material):
self.screen = screen
self.particles = []
self.material = material
#set up particles
for vertex in vertices:
self.particles.append(Particle(vertex, screen, self))
self.mass = len(self.particles)
#set up springs/constraints
self.constraints = []
for spring in indexedSprings:
self.constraints.append(Constraint(self.particles[spring[0]], self.particles[spring[1]]))
#initial draw
self.draw()
def setMass(self, mass):
self.mass = mass
partialMass = mass / float(len(self.particles))
for particle in self.particles:
particle.mass = partialMass
def get_rect(self):
x_min, y_min = world_size[0], world_size[1]
x_max, y_max = 0, 0
for particle in self.particles:
if particle.x[0] > x_max:
x_max = particle.x[0]
if particle.x[0] < x_min:
x_min = particle.x[0]
if particle.x[1] > y_max:
y_max = particle.x[1]
if particle.x[1] < y_min:
y_min = particle.x[1]
return pygame.Rect(x_min, y_min, x_max - x_min, y_max - y_min)
def update(self):
for particle in self.particles:
particle.update()
def draw(self):
for constraint in self.constraints:
pos1 = (constraint.p1.x[0], constraint.p1.x[1])
pos2 = (constraint.p2.x[0], constraint.p2.x[1])
pygame.draw.aaline(self.screen, (0,0,255), pos1, pos2)
for particle in self.particles:
particle.draw()
class DeformableSphere(MSSObject):
def __init__(self, center, radius, vertices, indexedSprings, screen, material=Material()):
MSSObject.__init__(self, vertices, indexedSprings, screen, material)
self.radius = radius
self.center = center
def movePointOut(self, point):
direction = point - self.center
directionLength = linalg.norm(direction)
normalizedDirection = direction/directionLength
distMax = 0
for particle in self.particles:
tmp = dot(normalizedDirection, particle.x - self.center)
if tmp > distMax:
distMax = tmp
if linalg.norm(direction) < distMax:
diff = (self.radius - directionLength)/directionLength
return direction*diff
else:
return (0,0)
class DeformableCube(MSSObject):
def __init__(self, vertices, indexedSprings, screen, material=Material()):
MSSObject.__init__(self, vertices, indexedSprings, screen, material)
raise Error("Not yet implemented")
def movePointOut(self, point):
return (0,0)
class Constraint:
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
self.restlength = linalg.norm(p1.x - p2.x)
class Particle:
def __init__(self, x, screen, parentObject, mass = 1.0):
#set up physical quantities
self.x = x
self.oldx = x
self.force = array([0., 0.])
self.mass = mass
self.image = pygame.image.load("sphere.png")
self.picked = False
self.parentObject = parentObject
self.velocity = array([0,0])
#set bounding volume and position
self.bv = self.image.get_rect()
self.radius = self.bv[2]/2.0
self.bv[0] = self.x[0] - self.radius
self.bv[1] = self.x[1] - self.radius
#initial drawing
self.screen = screen
self.draw()
def draw(self):
self.screen.blit(self.image, self.bv)
if drawVelocities:
pygame.draw.aaline(self.screen, (255,0,0), self.x, self.x + 0.1*self.velocity)
def update(self):
self.bv[0] = self.x[0] - self.radius
self.bv[1] = self.x[1] - self.radius
def computeForces(objects):
#add gravitational forces and friction
for obj in objects:
for particle in obj.particles:
particle.force = array([0.0, particle.mass * 9.81 * 100.0])
def computeFriction(objects):
for obj in objects:
for particle in obj.particles:
friction = particle.parentObject.material.friction
if not particle.x[0] < (world_size[0] - particle.radius):
delta = particle.x[1] - particle.oldx[1]
depth = math.fabs(world_size[0] - particle.radius - particle.x[0])
particle.oldx[1] = particle.x[1] - depth*friction*delta
if not (particle.x[0] > particle.radius):
delta = particle.x[1] - particle.oldx[1]
depth = math.fabs(particle.radius - particle.x[0])
particle.oldx[1] = particle.x[1] - depth*friction*delta
if not particle.x[1] < (world_size[1] - particle.radius):
delta = particle.x[0] - particle.oldx[0]
depth = math.fabs(world_size[1] - particle.radius - particle.x[1])
particle.oldx[0] = particle.x[0] - depth*friction*delta
if not (particle.x[1] > particle.radius):
delta = particle.x[0] - particle.oldx[0]
depth = math.fabs(particle.radius - particle.x[1])
particle.oldx[0] = particle.x[0] - depth*friction*delta
def verlet(h, objects):
for obj in objects:
for particle in obj.particles:
x = array([particle.x[0], particle.x[1]])
tmp = array([particle.x[0], particle.x[1]])
oldx = particle.oldx
a = particle.force / particle.mass
particle.x += x - oldx + a*h*h
#compute velocity as central difference of positions
particle.velocity = (particle.x - particle.oldx)/(2.0*h)
particle.oldx = tmp
def satisfyConstraints(objects):
for val in range(num_iterations):
for obj in objects:
#check and solve world collisions
for particle in obj.particles:
particle.x[0] = min(max(particle.x[0], particle.radius), world_size[0] - particle.radius)
particle.x[1] = min(max(particle.x[1], particle.radius), world_size[1] - particle.radius)
#solve constraints deformable object
for constraint in obj.constraints:
p1 = constraint.p1
p2 = constraint.p2
delta = p2.x - p1.x
deltalength = linalg.norm(delta)
diff = (deltalength - constraint.restlength)/deltalength
#make material stiffness linear to solver iterations and apply to
#particle positions
k = 1 - (1 - obj.material.stiffness)**(1.0/float(num_iterations))
p1.x += delta*0.5*diff*k
p2.x -= delta*0.5*diff*k
#constraint for picked particle to act on mouse action
if pickedParticle:
delta = pickedParticle.x - mousePosition
deltalength = linalg.norm(delta)
if deltalength > 0:
diff = (0 - deltalength)/deltalength
pickedParticle.x += delta*diff
for particle in obj.particles:
for obj2 in objects:
if obj == obj2:
continue
else:
if obj2.get_rect().collidepoint(particle.x):
particle.x += obj2.movePointOut(particle.x)
def create2DBall(screen, center, radius, particles, material):
'''Convenience method to generate a ball mesh'''
p = []
p.append(center)
for val in range(particles):
angle = val * 360./particles
tmp_x = center[0] + radius*math.cos((angle*math.pi)/180.)
tmp_y = center[1] + radius*math.sin((angle*math.pi)/180.)
p.append(array([tmp_x, tmp_y]))
c = []
for val in range(1, len(p) - 1):
c.append((val, val + 1))
c.append((0, val))
c.append((1, len(p) - 1))
c.append((0, len(p) - 1))
return DeformableSphere(center, radius, p, c, screen, material)
def create2DCube(screen, rect, material):
p = []
p.append(array([rect[0], rect[1]]))
p.append(array([rect[0], rect[1] + rect[3]]))
p.append(array([rect[0] + rect[2], rect[1]]))
p.append(array([rect[0] + rect[2], rect[1] + rect[3]]))
c = (0,1), (1,2), (2,3), (3, 0), (0,2), (1,3)
return DeformableCube(p, c, screen, material)
def main():
global pickedParticle, mousePosition, mouseClickPosition, paused
#initialization of pygame and window
pygame.init()
screen = pygame.display.set_mode(world_size)
#setting up objects
objects = []
mat1 = Material(0.9)
mat2 = Material(0.6)
sphere = create2DBall(screen, array([300., 300.]), 100., 8, mat2)
objects.append(sphere)
# sphere2 = create2DBall(screen, array([100., 100.]), 80., 7, mat1)
# objects.append(sphere2)
clock = pygame.time.Clock()
#main simulation loop
while True:
#set clock of pygame to predefined frames for equal timesteps
clock.tick(frames)
for event in pygame.event.get():
#stop the program if user wants us to
if event.type == pygame.QUIT:
sys.exit()
#flag particle as picked if user clicked on it
if event.type == pygame.MOUSEBUTTONDOWN:
for obj in objects:
for particle in obj.particles:
if particle.bv.collidepoint(event.pos):
particle.picked = True
pickedParticle = particle
if event.type == pygame.MOUSEMOTION:
mousePosition = event.pos
if event.type == pygame.MOUSEBUTTONUP:
if pickedParticle:
pickedParticle.picked = False
pickedParticle = None
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
paused = not paused
if event.key == pygame.K_s:
pygame.image.save(screen, "screenshot" + str(time.time()) + ".jpg")
if paused:
continue
#clear screen with background color
screen.fill((255,255,255))
#compute external forces
computeForces(objects)
computeFriction(objects)
#compute timestep according to frame set py pygame.clock
h=frames/1000.
#do integration step and satisfy constraints
verlet(h, objects)
satisfyConstraints(objects)
#update and draw particles
for obj in objects:
obj.update()
obj.draw()
#make everything visible
pygame.display.flip()
#########################################
if __name__ == '__main__':
main()
######################################### | [
"mail@jenscornelis.de"
] | mail@jenscornelis.de |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.