blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
27ee28679d9eb1bffb2f1e431d2bc7787ec15af2 | bb37226fe4f918cec1b5d3b9f3c2abd227c740fb | /library/setup.py | da6190df6b671fac3be9afc2c85b4235220dd3c5 | [
"BSD-2-Clause"
] | permissive | piecafe/rpi_ws281x-python | e942f44b77cb9c61f10a4777b1140463020b5ab5 | 7c7513aec0aa60dd2d8c3d8fdcb2e8bba6fa3ef6 | refs/heads/master | 2021-01-23T07:16:31.825159 | 2017-08-18T12:54:47 | 2017-08-18T12:54:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,424 | py | #!/usr/bin/env python
# Python wrapper for the rpi_ws281x library.
# Authors:
# Phil Howard (phil@pimoroni.com)
# Tony DiCola (tony@tonydicola.com)
from setuptools import setup, find_packages, Extension
from setuptools.command.build_py import build_py
import subprocess
class CustomInstallCommand(build_py):
"""Customized install to run library Makefile"""
def run(self):
print("Compiling ws281x library...")
proc =subprocess.Popen(["make"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(proc.stderr.read())
build_py.run(self)
setup(name = 'rpi_ws281x',
version = '3.0.1',
author = 'Jeremy Garff <jer@jers.net>, Phil Howard <phil@pimoroni.com>',
author_email = 'jer@jers.net',
description = 'Userspace Raspberry Pi PWM/PCM/SPI library for SK6812 and WS281X LEDs.',
license = 'MIT',
url = 'https://github.com/pimoroni/rpi_ws281x-python/',
cmdclass = {'build_py':CustomInstallCommand},
packages = ['neopixel', 'rpi_ws281x'],
ext_modules = [Extension('_rpi_ws281x',
sources=['rpi_ws281x_wrap.c'],
include_dirs=['lib/'],
library_dirs=['lib-built/'],
libraries=['ws2811'])])
| [
"phil@gadgetoid.com"
] | phil@gadgetoid.com |
2b694d9643bf16ae008b20db0368e0915d9f8158 | 0674b9d8a34036a6bbe2052e1cae0eee9a44554b | /Baekjoon/2941.py | ee51dc09161855679d9cb3d0955c5af4efebcde3 | [] | no_license | timebird7/Solve_Problem | 02fb54e90844a42dc69a78afb02cc10a87eda71c | 2d54b6ecbe3edf9895fd8303cbca99b3f50f68f3 | refs/heads/master | 2020-04-14T23:37:15.354476 | 2019-04-15T14:32:41 | 2019-04-15T14:32:41 | 164,208,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py |
s = input()
while s.find('c=') >= 0:
s = s.replace('c=','1')
while s.find('c-') >= 0:
s = s.replace('c-','2')
while s.find('dz=') >= 0:
s = s.replace('dz=','3')
while s.find('d-') >= 0:
s = s.replace('d-','4')
while s.find('lj') >= 0:
s = s.replace('lj','5')
while s.find('nj') >= 0:
s = s.replace('nj','6')
while s.find('s=') >= 0:
s = s.replace('s=','7')
while s.find('z=') >= 0:
s = s.replace('z=','8')
print(len(s))
| [
"timebird7@gmail.com"
] | timebird7@gmail.com |
fc3fdfec5df7869e6e4c00786c0cb1712f5f7405 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p2DJ/New/program/pyquil/startPyquil191.py | 5d80d7ba626c297f6554e4295cdaa0a9f0341404 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | # qubit number=2
# total number=9
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += SWAP(1,0) # number=2
prog += SWAP(1,0) # number=3
prog += X(1) # number=5
prog += RX(-2.73004401596953,1) # number=6
prog += Z(1) # number=4
prog += SWAP(1,0) # number=7
prog += SWAP(1,0) # number=8
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil191.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
be9f2bb0defc1f2082a178d6bb37135cf836224c | 5b71e2952f34dd3bb20148874d952fee06d31857 | /app/mf/crud/migrations/0107_auto_20210208_2328.py | 37f44934ff8cd007fd1f0c5693956ab187222266 | [] | no_license | isela1998/facebook | a937917cddb9ef043dd6014efc44d59d034102b1 | a0f2f146eb602b45c951995a5cb44409426250c5 | refs/heads/master | 2023-07-18T02:14:50.293774 | 2021-08-28T03:26:06 | 2021-08-28T03:26:06 | 400,613,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | # Generated by Django 3.1.1 on 2021-02-09 03:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crud', '0106_auto_20210208_1919'),
]
operations = [
migrations.AlterField(
model_name='cancelledinvoices',
name='provider',
field=models.CharField(max_length=255, verbose_name='Proveedor/Cliente'),
),
]
| [
"infantefernandezisela@gmail.com"
] | infantefernandezisela@gmail.com |
e46899246ef5d5ccc4e48374f590c0591205b6dc | 567e89b21aca23db5f14032889fdd1cb7c7700f7 | /Ia de morpion 1.py | 85df14cede381f6300d00965b3de944a959dfe20 | [] | no_license | MarcPartensky/Python-2018 | 7ab83d42eb28b34bed88fc6fb77892e62094dd8d | 27d2a57a6b6d6cdaa883fd2ce55e1c5eefd13ccc | refs/heads/master | 2020-04-17T13:12:41.448439 | 2019-01-19T23:55:05 | 2019-01-19T23:55:05 | 166,605,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,487 | py | import math
import numpy
M=[0]*9
Grille = [0]*9
Liste = np.zeros([9][8][7][6][5][4][3][2][1])
Enjeux = [[9,-5],
[-9,8],
[7,-4],
[-8,6],
[5,-3],
[-7,4],
[3,-2],
[-6,2],
[1,-1]]
for a in range(0,9):
M[0]=a
Avantage=Avantage+Enjeux[0,testwin(Grille)]/2**0
for b in range(0,8):
M[1]=b
Avantage=Avantage+Enjeux[1,testwin(Grille)]/2**1
for c in range(0,7):
M[2]=c
Avantage=Avantage+Enjeux[2,testwin(Grille)]/2**2
for d in range(0,6):
M[3]=d
Avantage=Avantage+Enjeux[3,testwin(Grille)]/2**3
for e in range(0,5):
M[4]=e
Avantage=Avantage+Enjeux[4,testwin(Grille)]/2**4
for f in range(0,4):
M[5]=f
Avantage=Avantage+Enjeux[5,testwin(Grille)]/2**5
for g in range(0,3):
M[6]=g
Avantage=Avantage+Enjeux[6,testwin(Grille)]/2**6
for h in range(0,2):
M[7]=h
Avantage=Avantage+Enjeux[7,testwin(Grille)]/2**7
i=1
M[8]=i
Avantage=Avantage+Enjeux[8,testwin(Grille)]/2**8
Liste[M]=Avantage
Mouvements=[0]*9
Avantage=0
def testwin(grille):
if (grille[0]==grille[1]) and (grille[0]==grille[2]) and (grille[0]!=0):
return 1
elif (grille[3]==grille[4]) and (grille[3]==grille[5]) and (grille[3]!=0):
return 1
elif (grille[6]==grille[7]) and (grille[6]==grille[8]) and (grille[6]!=0):
return 1
elif (grille[0]==grille[3]) and (grille[0]==grille[6]) and (grille[0]!=0):
return 1
elif (grille[1]==grille[4]) and (grille[1]==grille[7]) and (grille[1]!=0):
return 1
elif (grille[2]==grille[5]) and (grille[2]==grille[8]) and (grille[2]!=0):
return 1
elif (grille[0]==grille[4]) and (grille[0]==grille[8]) and (grille[0]!=0):
return 1
elif (grille[2]==grille[4]) and (grille[2]==grille[6]) and (grille[2]!=0):
return 1
else
return 0
| [
"marc.partensky@gmail.com"
] | marc.partensky@gmail.com |
1c34d14ba96c3625da9c663dd7e84a221f2a03a4 | 025660ec946f46cb7458abb12ce32fb4a2e437bb | /event/arguments/loss.py | 60b776d0dcc85bde9d1d031e11f8f8068f8c034e | [
"Apache-2.0"
] | permissive | edvisees/DDSemantics | af22070a239ac227694b87218db5e9c2021ac57b | 9044e4afa6f9d6d7504de028633295f30679278d | refs/heads/master | 2022-12-01T10:50:32.221836 | 2020-06-18T04:29:44 | 2020-06-18T04:29:44 | 283,858,855 | 0 | 0 | Apache-2.0 | 2020-07-30T19:19:55 | 2020-07-30T19:19:55 | null | UTF-8 | Python | false | false | 224 | py | import torch
def cross_entropy(y_hat, y):
print(y_hat)
print(y)
if y_hat == 1:
return -torch.log(y)
else:
return -torch.log(1 - y)
def hinge(y_hat, y):
return np.max(0, 1 - y_hat * y)
| [
"hunterhector@gmail.com"
] | hunterhector@gmail.com |
33f283df6d6a2614a08df515391e4bc8357670c2 | 9c5b9e4197bfdcf74cec45dcca47d9caa3317a4b | /main.py | 06e1a299d1af620fd21b58c3165b1705a3f4d14e | [] | no_license | rhuidean/terrain_data | 614c1a8f5420538b74e2e72bc20e19ccce7e565f | 596b2e3993d7b445c8115cd316349a026ad63dae | refs/heads/master | 2021-01-25T06:36:11.245301 | 2017-06-07T05:21:27 | 2017-06-07T05:21:27 | 93,593,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,530 | py | #!/usr/bin/python
""" Complete the code in ClassifyNB.py with the sklearn
Naive Bayes classifier to classify the terrain data.
The objective of this exercise is to recreate the decision
boundary found in the lesson video, and make a plot that
visually shows the decision boundary """
from prep_terrain_data import makeTerrainData
from class_vis import prettyPicture, output_image
from classify_nb import classify
import numpy as np
import pylab as pl
features_train, labels_train, features_test, labels_test = makeTerrainData()
### the training data (features_train, labels_train) have both "fast" and "slow" points mixed
### in together--separate them so we can give them different colors in the scatterplot,
### and visually identify them
grade_fast = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==0]
bumpy_fast = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==0]
grade_slow = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==1]
bumpy_slow = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==1]
# You will need to complete this function imported from the ClassifyNB script.
# Be sure to change to that code tab to complete this quiz.
clf = classify(features_train, labels_train)
### draw the decision boundary with the text points overlaid
prettyPicture(clf, features_test, labels_test)
output_image("test.png", "png", open("test.png", "rb").read())
| [
"rhuidean06@gmail.com"
] | rhuidean06@gmail.com |
6572a45e896e65f747dd399131d451027c8a0f8e | bfb113c3076f5b0570953583e7a2321c774d73ea | /Classes/class_example106.py | fb14ae80d41b7c5e58d8fbd7205723b8647d23bc | [] | no_license | gsudarshan1990/Training_Projects | 82c48d5492cb4be94db09ee5c66142c370794e1c | 2b7edfafc4e448bd558c034044570496ca68bf2d | refs/heads/master | 2022-12-10T15:56:17.535096 | 2020-09-04T06:02:31 | 2020-09-04T06:02:31 | 279,103,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | #This python program is about static method
class Rectangle:
def area(length, breadth):
return length*breadth
Rectangle.area=staticmethod(Rectangle.area)
print(Rectangle.area(10,12))
class Square:
def area(side):
return side**2
area=staticmethod(area)
print(Square.area(7)) | [
"sudarshan2009@live.in"
] | sudarshan2009@live.in |
5ec2bd27d104ad261ca02ea4c6eb402c72836eb5 | e1c7c25c22c2f854aa8e3d8f6fffdf80a0b4dfbf | /CodeForces/CodeForces_Solution_In_Python/watermelon_problem.py | e4682dc0816385ec70cb341507d02ceba1752e7f | [] | no_license | roshan13ghimire/Competitive_Programming | efc85f9fe6fa46edff96931ca3a1cca78628918b | 0c238a391c6acee8763968ef298b765c133b7111 | refs/heads/master | 2023-04-15T16:35:07.711491 | 2021-04-12T03:00:05 | 2021-04-12T03:00:05 | 273,682,360 | 4 | 1 | null | 2020-08-05T02:11:53 | 2020-06-20T09:59:57 | Python | UTF-8 | Python | false | false | 132 | py | #watermelon_problem
n=int(input())
if(n==2):
print("NO")
exit()
if(n%2==0):
print("YES")
else:
print("NO")
| [
"noreply@github.com"
] | roshan13ghimire.noreply@github.com |
523b04f22ef940fae42d54f7acd0945edab44cd0 | 26c8a9bda50bb2ea9d44529d803477e788d102a2 | /MODEL1302180002/model.py | a2c7fdcded362373ccd9ab2e6a9483cf516de075 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | biomodels/MODEL1302180002 | 20a378031da3750921062cd69752e11eb9ff6645 | fd9c884345a84dcf4c75d3db87f27520d6c3853f | refs/heads/master | 2020-05-31T02:54:36.073367 | 2014-10-16T05:49:30 | 2014-10-16T05:49:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL1302180002.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString) | [
"stanleygu@gmail.com"
] | stanleygu@gmail.com |
14280dd87f4eec14382e48e5062801018048ead1 | 99deab5f52fd7262a26de9aa5d0163bfa738590f | /python/leetcode/geometry/593_valid_square.py | 10db741d5cf9151696a35d715fbce4d92f80f27b | [] | no_license | zchen0211/topcoder | e47fc07c928b83138e27fd6681b373ce499480b0 | 4d73e4c1f2017828ff2d36058819988146356abe | refs/heads/master | 2022-01-17T16:54:35.871026 | 2019-05-08T19:26:23 | 2019-05-13T05:19:46 | 84,052,683 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,474 | py | """
593. Valid Square (Medium)
Given the coordinates of four points in 2D space, return whether the four points could construct a square.
The coordinate (x,y) of a point is represented by an integer array with two integers.
Example:
Input: p1 = [0,0], p2 = [1,1], p3 = [1,0], p4 = [0,1]
Output: True
Note:
All the input integers are in the range [-10000, 10000].
A valid square has four equal sides with positive length and four equal angles (90-degree angles).
Input points have no order.
"""
import collections
class Solution(object):
def validSquare(self, p1, p2, p3, p4):
"""
:type p1: List[int]
:type p2: List[int]
:type p3: List[int]
:type p4: List[int]
:rtype: bool
"""
dis12 = self.distance(p1, p2)
dis13 = self.distance(p1, p3)
dis14 = self.distance(p1, p4)
dis23 = self.distance(p2, p3)
dis24 = self.distance(p2, p4)
dis34 = self.distance(p3, p4)
dis_stat = collections.Counter([dis12,dis13,dis14,dis23,dis24,dis34])
dis_stat = dict(dis_stat)
if len(dis_stat.keys()) == 2:
max_ = max(dis_stat.keys())
min_ = min(dis_stat.keys())
if dis_stat[max_]==2 and dis_stat[min_]==4 and max_==2*min_:
return True
return False
def distance(self, p1, p2):
x1, y1 = p1
x2, y2 = p2
return (x1-x2)**2 + (y1-y2)**2
if __name__ == '__main__':
a = Solution()
# print a.validSquare([0,0],[1,1],[1,0],[0,2])
print a.validSquare([0,0],[-1,0],[1,0],[0,1])
| [
"chenzhuoyuan07@gmail.com"
] | chenzhuoyuan07@gmail.com |
b7ab08da2f79c2419645422a6de099e4cd1df741 | 72880d033c9948098291efebf934255635f8c6ea | /pythonexamples/constructor2.py | 0dec20edf9b0368a505ee3cb68138df8219450b0 | [] | no_license | manutdmohit/mypythonexamples | 729347aec300bda01f629224337c84d5838a71f2 | b189c201d07b1a345478699bbb3852c02eb96ce5 | refs/heads/master | 2023-04-18T01:55:22.026867 | 2021-05-13T05:59:09 | 2021-05-13T05:59:09 | 366,946,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | class Student:
def __int__(self,name,rollno,marks):
print('Creating instance variables and performing initialization...')
self.name=name
self.rollno=rollno
self.marks=marks
s1=Student('Ram',101,90)
s2=Student('Sita',102,95)
print(s1.name.s1.rollno,s1.marks)
print(s2.name.s2.rollno,s2.marks)
| [
"saudmohit@gmail.com"
] | saudmohit@gmail.com |
070e6ab9841df7311809ebd17f01a2e542e6a9bb | 7a4ed01a40e8d79126b26f5e8fca43c8e61e78fd | /Geeky Shows/Advance Pyhton/220.Module[34]/PythonProject/Example-16.py | e95b5e1423d89cfc81cdc3ab590dfb301de598a5 | [] | no_license | satyam-seth-learnings/python_learning | 5a7f75bb613dcd7fedc31a1567a434039b9417f8 | 7e76c03e94f5c314dcf1bfae6f26b4a8a6e658da | refs/heads/main | 2023-08-25T14:08:11.423875 | 2021-10-09T13:00:49 | 2021-10-09T13:00:49 | 333,840,032 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | # Example-16.py <---- Main Module
from thired import Myclass,Myschool
from fourth import Mycollege
c=Myclass()
c.name()
s=Myschool()
s.show()
cl=Mycollege()
cl.disp() | [
"satyam1998.1998@gmail.com"
] | satyam1998.1998@gmail.com |
e2f30408e62e31ec33316a8fdad1c5f5e6477b7c | b95e71dcc1b42ebf3459ee57bd0119c618a79796 | /HashTable/Easy/811.py | e34b3be698d497e6d4f22a44b9eb0185ba053eed | [] | no_license | anton-dovnar/LeetCode | e47eece7de28d76b0c3b997d4dacb4f151a839b5 | 6ed9e1bd4a0b48e343e1dd8adaebac26a3bc2ed7 | refs/heads/master | 2023-06-29T07:21:06.335041 | 2021-07-31T15:08:05 | 2021-07-31T15:08:05 | 361,205,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | """
Subdomain Visit Count
"""
from collections import deque
class Solution:
def subdomainVisits(self, cpdomains: List[str]) -> List[str]:
hash_table = dict()
for record in cpdomains:
count, domain = record.split()
queue = deque(domain.split("."))
while queue:
if domain in hash_table:
hash_table[domain] += int(count)
else:
hash_table[domain] = int(count)
queue.popleft()
domain = ".".join(queue)
return [f"{value} {key}" for key, value in hash_table.items()]
| [
"fode4cun@gmail.com"
] | fode4cun@gmail.com |
9d72d1fe8cfc2b68d11f5698576f8ab92bb82e06 | d66fd976d66632267551467c3df9b2fbfb8be1cd | /Interview01/100 python传参还是传址.py | ab4c8e34f9f1c32e2d62671c0823d23c97d79687 | [] | no_license | bobopython/PythonInterviewQuestions | d8f580932f36bd85432aaafd5c00924183bac16a | 9f38a9368bbca32d071062d59748518c0c4f0d09 | refs/heads/master | 2020-06-07T06:37:25.038426 | 2019-02-22T16:46:00 | 2019-02-22T16:46:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | # _*_ coding: utf-8 _*_
__author__ = 'jevoly'
__date__ = '2019/2/16 0016 下午 11:29'
"""
python中函数参数是引用传递。
对于不可变类型(数值、字符串、元组),因变量不能修改
所以运算不会影响到变量自身;
对于可变类型(列表字典)来说,函数体运算可能会更改传入的参数变量
"""
def selfAdd(a):
a += a
a_int = 1
print(a_int)
selfAdd(a_int)
print(a_int)
a_list = [1, 2]
print(a_list)
selfAdd(a_list)
print(a_list)
| [
"jevoly@163.com"
] | jevoly@163.com |
a76f71b1b0a7bbd3e2980596c7741c93c2f0397d | 6f4ee285871ee52ea4c1143d54581ead795bca87 | /example/asr/preprocess_aihub.py | 966a54cb9783dff1ab5f8ed68a23ec6dd026ed95 | [
"MIT"
] | permissive | rosinality/imputer-pytorch | f3773074ddec615c8eaffd1b89a67402790aa3cc | 7ff8f73dcd7bd62a98c5b8a126946c5fe381d895 | refs/heads/master | 2022-05-24T16:30:05.588670 | 2020-05-03T00:36:28 | 2020-05-03T00:36:28 | 257,501,256 | 52 | 8 | null | null | null | null | UTF-8 | Python | false | false | 3,933 | py | import argparse
import io
import re
import os
import string
import wave
import pickle
from multiprocessing import Pool
from functools import partial
import lmdb
import librosa
import torch
from tqdm import tqdm
from audio import FilterbankFeature
from text import collapse_whitespace
re_pronunciation = re.compile(r'\((.*?)\)\/\((.*?)\)')
re_noise = re.compile(r'b\/|l\/|o\/|n\/')
table_punctuation = str.maketrans(string.punctuation, ' ' * len(string.punctuation))
PCM_CHANNELS = 1
PCM_BIT_DEPTH = 16
PCM_SAMPLING_RATE = 16000
N_META_CHAR = 3
def use_pronunciation(text):
return re_pronunciation.sub(r'\2', text)
def remove_noise(text):
return re_noise.sub(' ', text)
def remove_punctuation(text):
return text.translate(table_punctuation)
def process_text(text):
return collapse_whitespace(
remove_punctuation(remove_noise(use_pronunciation(text)))
).strip()
def load_pcm(filename):
with open(filename, 'rb') as f:
pcmdata = f.read()
wav_write = io.BytesIO()
wav = wave.open(wav_write, 'wb')
wav.setparams(
(PCM_CHANNELS, PCM_BIT_DEPTH // 8, PCM_SAMPLING_RATE, 0, 'NONE', 'NONE')
)
wav.writeframes(pcmdata)
wav_write.seek(0)
wav, _ = librosa.load(wav_write, sr=PCM_SAMPLING_RATE)
return wav
def load_text(filename):
with open(filename, encoding='cp949') as f:
return f.read()
def process_worker(filename, root):
file = os.path.join(root, filename)
wav = load_pcm(file + '.pcm')
text = load_text(file + '.txt')
wav_feat = wav_feature(torch.from_numpy(wav).unsqueeze(0), PCM_SAMPLING_RATE)
text_feat = process_text(text)
record = (wav_feat, text_feat, filename)
return record
if __name__ == '__main__':
os.environ['OMP_NUM_THREADS'] = '1'
parser = argparse.ArgumentParser()
parser.add_argument('--n_mels', type=int, default=80)
parser.add_argument('--path', type=str, required=True)
parser.add_argument('output', metavar='OUTPUT')
args = parser.parse_args()
speech_files = []
wav_feature = FilterbankFeature(args.n_mels)
for dirpath, dirs, files in os.walk(args.path):
if len(dirs) == 0:
speech_keys = set()
for file in files:
speech_keys.add(os.path.splitext(file)[0])
speech_keys = list(sorted(speech_keys))
relpath = os.path.relpath(dirpath, args.path)
for key in speech_keys:
speech_files.append(os.path.join(relpath, key))
vocab = {}
worker = partial(process_worker, root=args.path)
with Pool(processes=8) as pool, lmdb.open(
args.output, map_size=1024 ** 4, readahead=False
) as env:
pbar = tqdm(pool.imap(worker, speech_files), total=len(speech_files))
mel_lengths = []
text_lengths = []
for i, record in enumerate(pbar):
record_buffer = io.BytesIO()
torch.save(record, record_buffer)
with env.begin(write=True) as txn:
txn.put(str(i).encode('utf-8'), record_buffer.getvalue())
for char in record[1]:
if char not in vocab:
vocab[char] = len(vocab) + N_META_CHAR
mel_lengths.append(record[0].shape[0])
text_lengths.append(len(record[1]))
pbar.set_description(record[2])
with env.begin(write=True) as txn:
txn.put(b'length', str(len(speech_files)).encode('utf-8'))
txn.put(
b'meta',
pickle.dumps(
{
'sr': PCM_SAMPLING_RATE,
'channels': PCM_CHANNELS,
'bit_depth': PCM_BIT_DEPTH,
'vocab': vocab,
'mel_lengths': mel_lengths,
'text_lengths': text_lengths,
}
),
)
| [
"kim.seonghyeon@snu.ac.kr"
] | kim.seonghyeon@snu.ac.kr |
3f25f208b39b728d049043a65f1698f402063b63 | d4dda2e2992ca16b8fe628e417f8a4243af0ed4a | /step10_plottingutilities/nexpectedeventsDbkg0.5.py | 694952c3137cf9cfe4b69eb75d783cd8b5acd850 | [] | no_license | hroskes/anomalouscouplings | 01f46c0d38f5332c58538b0bdea373704cf06fcc | 391eb7fbd52d8605b09ca2e461b1789e019b1da0 | refs/heads/production | 2021-11-24T22:37:48.932830 | 2021-10-29T18:38:54 | 2021-10-29T18:38:54 | 60,651,233 | 0 | 2 | null | 2017-01-24T14:20:56 | 2016-06-07T22:37:23 | Python | UTF-8 | Python | false | false | 1,170 | py | #!/usr/bin/env python
import os
import ROOT
from helperstuff import config
from helperstuff.enums import analyses, channels, categories
from projections import Projections
assert len(config.productionsforcombine) == 1
production = config.productionsforcombine[0]
class NExpected(Projections):
@property
def nexpected(self):
disc = self.discriminants[0]
rootfile = os.path.join(self.saveasdir, disc.name+".root")
f = ROOT.TFile(rootfile)
c = f.c1
hstack = c.GetListOfPrimitives()[1]
total = 0
for h in hstack.GetHists():
if h.GetLineColor() in (1, 6, 2, ROOT.kOrange+6, ROOT.kViolet+7): total += h.Integral()
elif h.GetLineColor() in (ROOT.kCyan, ROOT.kGreen+3, 4): pass
else: assert False
return total
if __name__ == "__main__":
for analysis in analyses:
print sum(NExpected(production, channel, category, "rescalemixtures", analysis, "fullrange").nexpected for channel in channels for category in categories),
print
for analysis in analyses:
print sum(NExpected(production, channel, category, "rescalemixtures", analysis, "enrich").nexpected for channel in channels for category in categories),
| [
"jroskes1@jhu.edu"
] | jroskes1@jhu.edu |
7e18b441ca5e77bb4d99946436324fdc717db390 | c8836eac0f6a20a6d3056fc3651c9daf5ce0572d | /test_hu/test_cadastro.py | dac1ceb79c100055f70f3db34a7f50d2195c4645 | [] | no_license | rafaelcoelho/code_interview_training | cb44eb6701b6902a28eaf1c0025aea4921dfccb4 | fa7484487bf1a2fa9fb4a4abe135c6114fd98bf8 | refs/heads/master | 2021-01-18T20:05:59.486916 | 2020-11-27T00:00:56 | 2020-11-27T00:00:56 | 100,544,717 | 0 | 0 | null | 2017-08-17T00:36:50 | 2017-08-17T00:36:49 | null | UTF-8 | Python | false | false | 3,447 | py | """
Utilizando o conceito de dicionários, faça uma ficha de cadastro de 4 funcionários utilizando a seguinte
estrutura:
Chave: Nome Dados: Idade, email, setor, salario
Inclua os funcionários:
Joao Pereira, 25, joao.pereira@hurb.com, marketing, 1950
Maria Silva, 23, maria.silva@hurb.com, comercial, 2300
Pedro Peixoto, 32, pedro.peixoto@hurb.com, operacao, 2625
Luiza Almeida, 28, luiza.almeida@hurb.com, atendimento, 2120
Faça um programa que retorna o nome, email e setor de todos os funcionários com mais de 25 anos.
"""
import unittest
from typing import List, Tuple, Union
def filtrar_maiores_de_25(colaboradores):
resultado = []
for nome, (idade, email, setor, *_) in colaboradores:
if idade > 25:
adicionar(email, nome, resultado, setor)
return resultado
def adicionar(email, nome, resultado, setor):
resultado.append((nome, email, setor))
def extrair_nome_salario(colaborador):
dados_pessoais = colaborador[1]
salario = dados_pessoais[-1]
nome = colaborador[0]
return nome, salario
def extrair_salario(tpl):
return tpl[-1]
def top_colaborador(colaboradores: List[Tuple[str, List[Union[str, int]]]]) -> Tuple[str, int]:
"""
Calcula o colaborador com maior salário
Ex:
>>> colaboradores = [
... ('Joao Pereira', [25, 'joao.pereira@hurb.com', 'marketing', 1950]),
... ('Maria Silva', [23, 'maria.silva@hurb.com', 'comercial', 2300]),
... ('Pedro Peixoto', [32, 'pedro.peixoto@hurb.com', 'operacao', 2625]),
... ('Pedro Peixoto', [32, 'pedro.peixoto@hurb.com', 'operacao', 2625]),
... ('Luiza Almeida', [28, 'luiza.almeida@hurb.com', 'atendimento', 2120]),
... ]
...
>>> top_colaborador(colaboradores)
('Pedro Peixoto', 2625)
:param colaboradores: lista de colabores
:return: Tupla com Nome e Salário
"""
return max([extrair_nome_salario(colaborador) for colaborador in colaboradores], key=extrair_salario)
class TesteColaborabores(unittest.TestCase):
def test_filtragem_colabores(self) -> None:
colaboradores = [
('Joao Pereira', [25, 'joao.pereira@hurb.com', 'marketing', 1950]),
('Maria Silva', [23, 'maria.silva@hurb.com', 'comercial', 2300]),
('Pedro Peixoto', [32, 'pedro.peixoto@hurb.com', 'operacao', 2625]),
('Pedro Peixoto', [32, 'pedro.peixoto@hurb.com', 'operacao', 2625]),
('Luiza Almeida', [28, 'luiza.almeida@hurb.com', 'atendimento', 2120]),
]
resultado = filtrar_maiores_de_25(colaboradores)
self.assertEqual(3, len(resultado))
self.assertSetEqual(
{
('Pedro Peixoto', 'pedro.peixoto@hurb.com', 'operacao'),
('Luiza Almeida', 'luiza.almeida@hurb.com', 'atendimento'),
},
set(resultado)
)
def test_salario_maximo(self) -> None:
colaboradores = [
('Joao Pereira', [25, 'joao.pereira@hurb.com', 'marketing', 1950]),
('Maria Silva', [23, 'maria.silva@hurb.com', 'comercial', 2300]),
('Pedro Peixoto', [32, 'pedro.peixoto@hurb.com', 'operacao', 2625]),
('Luiza Almeida', [28, 'luiza.almeida@hurb.com', 'atendimento', 2120]),
('Zoraide', [28, 'luiza.almeida@hurb.com', 'atendimento', 2120]),
]
resultado = top_colaborador(colaboradores)
self.assertEqual(('Pedro Peixoto', 2625), resultado)
| [
"renzon@gmail.com"
] | renzon@gmail.com |
7410a60093d30f7f552fe587527803a22d586e60 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/scrabble-score/4f89ec23c2c3437681d68f5c9bf4d3d9.py | ee4dbb86106d575505a2c999db6b7e251a7494d5 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 382 | py | def score(phrase):
score_dict = {
"a": 1, "b": 3, "c": 3, "d": 2, "e": 1,
"f": 4, "g": 2, "h": 4, "i": 1, "j": 8,
"k": 5, "l": 1, "m": 3, "n": 1, "o": 1,
"p": 3, "q": 10, "r": 1, "s": 1, "t": 1,
"u": 1, "v": 4, "w": 4, "x": 8, "y": 4,
"z": 10
}
s = 0
for char in phrase.strip().lower():
s += score_dict[char]
return s
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
1d945a7c1bf6726a99ca1c625db31858855c0347 | 5ba0e63ff6d94efbd28ed425bb6074e74d3148a0 | /app.py | c9fe431141f706fd0d4bbe760be77e916c60c677 | [] | no_license | SmartPracticeschool/SBSPS-Challenge-4160-Sentiment-Analysis | ad25e307f38828dd219e26a2bcbb7250a2cf023e | b448d93e8464bf5af5bd70cacf631ed124d96a18 | refs/heads/master | 2022-11-20T08:31:50.487634 | 2020-07-29T11:23:24 | 2020-07-29T11:23:24 | 272,616,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,313 | py | from flask import Flask, render_template,request, jsonify
import tweepy
from textblob import TextBlob
app = Flask(__name__)
app.config["DEBUG"] = True
cons_key="zJQyiylJYFuutBTOzomm2ZgDc"
cons_sec="gPXYZSZ7jVqjTOIG48p4CYzs7fx9fmaWHFPnmSMp4DF10Bla3D"
acc_token="1269151933559762945-wZYKQZRbSRaTuDkxW29PnLVaTUJmea"
acc_sec="uGyK2OpmhiCyE20b7D0b26adNOosmdDT0FPmtCsLjHqqt"
auth = tweepy.OAuthHandler(cons_key,cons_sec)
auth.set_access_token(acc_token,acc_sec)
api = tweepy.API(auth)
@app.route('/')
def hello_world():
return render_template('home.html')
@app.route('/results',methods=['GET', 'POST'])
def show_result():
if request.method=='POST':
result = request.form['keyword']
neutral, positive, negative = 0,0,0
tweetData = {}
id = 0
tweets = api.search(q=result,count = 100,rpp = 1500, _id="23424848")
for tweet in tweets:
blob = TextBlob(tweet.text)
polarity = blob.sentiment.polarity
if polarity == 0:
tweetData[id] = {
'text': tweet.text,
'polarity': round(polarity, 2),
}
neutral += 1
elif polarity > 0:
tweetData[id] = {
'text': tweet.text,
'polarity': round(polarity, 2),
}
positive += 1
elif polarity < 0:
tweetData[id] = {
'text': tweet.text,
'polarity': round(polarity, 2),
}
negative += 1
id += 1
if(positive>negative) and (positive>neutral):
outcome = 'positive'
msg = "Outcome: Over the analysis the result falls on a positive edge. :)"
elif(negative> neutral):
outcome = 'negative'
msg = "Outcome: Over the analysis the result falls on the negative edge. :("
else:
outcome = 'neutral'
msg = "Outcome: Over the analysis, the results are claimed to be neutral. :| "
values = [positive, negative, neutral]
labels = ["positive", "negative", "neutral"]
return render_template('result.html', msg=msg, labels=labels, values=values, keyword=result, outcome=outcome, tweetData=tweetData)
app.run() | [
"noreply@github.com"
] | SmartPracticeschool.noreply@github.com |
d31b596c5a1b60f72386ba04c19e591e3b4d2eca | 7ef2308e51d1d5700fbd092177ee15e2a03ebdd8 | /WorkLean/Python/Scrapy/testCrawler3_0/testCrawler3_0/items.py | 6672b1af9e9bd24d5a6602896aef9d975422922e | [] | no_license | STAWZW/STAWZW1.0 | 741002eb35c2883e5739fee8d14ff430e9622c01 | a835ac27aba17f968116e321bd201b26c9fb3578 | refs/heads/master | 2020-07-21T20:21:59.753992 | 2019-09-26T09:21:28 | 2019-09-26T09:21:28 | 206,965,347 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class Testcrawler30Item(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| [
"1223868042@qq.com"
] | 1223868042@qq.com |
9eaa568ed36cd7532c669f74ff9bdcb0cae51b8e | bbefcbb5fdaa571b1bd674269a51d855eadbb2cb | /ticketing/migrations/0037_auto_20200207_1606.py | a6ef78cdee50e70b0274b2594896c195a3920f96 | [] | no_license | fadhlimulyana20/event_organizer | 8a0e87e71ca24f9ca82af5c90b39be1e61e7867d | 4995d529a533d0a6b855e42283c2aaf441fa5a9c | refs/heads/master | 2020-12-13T17:11:49.562499 | 2020-03-01T22:19:29 | 2020-03-01T22:19:29 | 234,480,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | # Generated by Django 2.2.7 on 2020-02-07 09:06
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('ticketing', '0036_auto_20200206_1445'),
]
operations = [
migrations.AlterField(
model_name='event',
name='due_registration',
field=models.DateField(default=datetime.datetime(2020, 2, 4, 16, 6, 8, 775718)),
),
migrations.AlterField(
model_name='eventpayment',
name='due_date',
field=models.DateTimeField(default=datetime.datetime(2020, 2, 9, 9, 6, 8, 779718, tzinfo=utc)),
),
]
| [
"baba.fadhli@gmail.com"
] | baba.fadhli@gmail.com |
140d5a49d274013ddeace0c911a004f3725a8cd0 | 3432efd194137e1d0cb05656eb547c9992229f02 | /web_test/nineteen cookie/cookie.py | d0991972774ab8c00cf93a1c3cace275a13d7ecc | [] | no_license | zhanganxia/other_code | 31747d7689ae1e91fcf3f9f758df130246e7d495 | 8d09d9d0b6d6a1a9b8755487f926ac6fafd761fa | refs/heads/master | 2021-09-04T02:22:38.632685 | 2018-01-14T15:37:14 | 2018-01-14T15:37:14 | 107,007,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | #coding=utf-8
from selenium import webdriver
import time
driver = webdriver.Chrome()
driver.get("http://www.youdao.com")
#获得cookies信息
cookie=driver.get_cookies()
#将获得cookies的信息打印
print cookie
driver.quit() | [
"kk@kk.rhel.cc"
] | kk@kk.rhel.cc |
5926a539dc685e3b284ebafa5e13af890c809432 | 5a281cb78335e06c631181720546f6876005d4e5 | /openstack-cyborg-2.0.0/cyborg/accelerator/drivers/gpu/utils.py | 15a926e52a1cf7a63768cf2955192eb569ff9f26 | [
"Apache-2.0"
] | permissive | scottwedge/OpenStack-Stein | d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8 | 7077d1f602031dace92916f14e36b124f474de15 | refs/heads/master | 2021-03-22T16:07:19.561504 | 2020-03-15T01:31:10 | 2020-03-15T01:31:10 | 247,380,811 | 0 | 0 | Apache-2.0 | 2020-03-15T01:24:15 | 2020-03-15T01:24:15 | null | UTF-8 | Python | false | false | 4,150 | py | # Copyright 2018 Beijing Lenovo Software Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utils for GPU driver.
"""
from oslo_log import log as logging
from oslo_serialization import jsonutils
import re
import subprocess
from cyborg.objects.driver_objects import driver_deployable, driver_device, \
driver_attach_handle, driver_controlpath_id
from cyborg.common import constants
LOG = logging.getLogger(__name__)
GPU_FLAGS = ["VGA compatible controller", "3D controller"]
GPU_INFO_PATTERN = re.compile("(?P<devices>[0-9]{4}:[0-9]{2}:[0-9]{2}\.[0-9]) "
"(?P<controller>.*) [\[].*]: (?P<name>.*) .*"
"[\[](?P<vendor_id>[0-9a-fA-F]"
"{4}):(?P<product_id>[0-9a-fA-F]{4})].*")
# NOTE(wangzhh): The implementation of current release doesn't support virtual
# GPU.
def discover_vendors():
cmd = "sudo lspci -nnn -D | grep -E '%s'"
cmd = cmd % "|".join(GPU_FLAGS)
# FIXME(wangzhh): Use oslo.privsep instead of subprocess here to prevent
# shell injection attacks.
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
p.wait()
gpus = p.stdout.readlines()
vendors = set()
for gpu in gpus:
m = GPU_INFO_PATTERN.match(gpu)
if m:
vendor_id = m.groupdict().get("vendor_id")
vendors.add(vendor_id)
return vendors
def discover_gpus(vender_id=None):
cmd = "sudo lspci -nnn -D| grep -E '%s'"
cmd = cmd % "|".join(GPU_FLAGS)
if vender_id:
cmd = cmd + "| grep " + vender_id
# FIXME(wangzhh): Use oslo.privsep instead of subprocess here to prevent
# shell injection attacks.
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
p.wait()
gpus = p.stdout.readlines()
gpu_list = []
for gpu in gpus:
m = GPU_INFO_PATTERN.match(gpu)
if m:
gpu_dict = m.groupdict()
gpu_list.append(_generate_driver_device(gpu_dict))
return gpu_list
def _generate_driver_device(gpu):
driver_device_obj = driver_device.DriverDevice()
driver_device_obj.vendor = gpu["vendor_id"]
driver_device_obj.model = gpu.get('model', 'miss model info')
std_board_info = {'product_id': gpu.get('product_id', None),
'controller': gpu.get('controller', None)}
driver_device_obj.std_board_info = jsonutils.dumps(std_board_info)
driver_device_obj.type = constants.DEVICE_GPU
driver_device_obj.controlpath_id = _generate_controlpath_id(gpu)
driver_device_obj.deployable_list = _generate_dep_list(gpu)
return driver_device_obj
def _generate_controlpath_id(gpu):
driver_cpid = driver_controlpath_id.DriverControlPathID()
driver_cpid.cpid_type = "PCI"
driver_cpid.cpid_info = gpu["devices"]
return driver_cpid
def _generate_dep_list(gpu):
dep_list = []
driver_dep = driver_deployable.DriverDeployable()
driver_dep.attach_handle_list = []
# NOTE(wangzhh): The name of deployable should be unique, its format is
# under disscussion, may looks like
# <ComputeNodeName>_<NumaNodeName>_<CyborgName>_<NumInHost>, now simply
# named <Device_name>_<Device_address>
driver_dep.name = gpu.get('name', '') + '_' + gpu["devices"]
driver_dep.num_accelerators = 1
driver_dep.attach_handle_list = \
[_generate_attach_handle(gpu)]
dep_list.append(driver_dep)
return dep_list
def _generate_attach_handle(gpu):
driver_ah = driver_attach_handle.DriverAttachHandle()
driver_ah.attach_type = "PCI"
driver_ah.in_use = False
driver_ah.attach_info = gpu["devices"]
return driver_ah
| [
"Wayne Gong@minbgong-winvm.cisco.com"
] | Wayne Gong@minbgong-winvm.cisco.com |
5b8c473a88965f160d27bbdbf6da45b8cdebd7e6 | f8e778d31a83fdbacb0c498f6c71aa2c48f2d000 | /scenarios/card_hold_show/executable.py | 2631ad59e96fdaab9c937f252adb2b5251b792f3 | [
"MIT"
] | permissive | bsmartt13/balanced-python | a254c695032a80bb1919b8e3fc5cb4c28b4ef44b | c5e192f9547f7b251486cf78d98933410a31daca | refs/heads/master | 2020-12-27T15:01:01.237398 | 2014-03-11T00:00:22 | 2014-03-11T00:06:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | import balanced
balanced.configure('ak-test-1kvvievk0Qqw5wQPsrlM9g7wQwNe62cyc')
card_hold = balanced.CardHold.fetch('/card_holds/HL2bT9uMRkTZkfSPmA2pBD9S') | [
"ben@unfiniti.com"
] | ben@unfiniti.com |
4202d20e3558a69f15aca8363391280f1b305552 | bdc10ba57424040129cc72ad018ff26bc8bca66a | /ConfigDefinitions/UserConfigs/SMHTT_2018_AntiIso_Config_Deep/WZ2L2QConfig.py | 202bcd9c8f5a2cfb9428c372ab52265bd277a253 | [] | no_license | aloeliger/Jesterworks | 61e0ac38ca325fefbbd8ccedaa8eb02d8a76ebbe | 96a22bac4ce20b91aba5884eb0e5667fcea3bc9a | refs/heads/master | 2021-06-09T15:39:06.976110 | 2021-04-23T11:25:06 | 2021-04-23T11:25:06 | 157,698,363 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,139 | py | from ConfigDefinitions.JesterworksConfigurations import JesterworksConfiguration as Config
from ConfigDefinitions.BranchAdditions.UserDefinedCollections.SMHTT_2018_MC_Collection import MCCollection as BranchCollection
from ConfigDefinitions.BranchAdditions.UserDefinedCollections.Differential_2018_Collection import DifferentialCollection as PostfixCollection
from ConfigDefinitions.CuttingDefinitions.UserCutConfigs.SMHTT2018Cuts_MC_NoEmbeddedOverlap_wDeep import SMHTT2018Cuts as CutConfig
from ConfigDefinitions.EndActionDefinitions.UserConfigs.GrabHistograms import HistogramGrabber as HistogramGrabber
DataConfig = Config()
DataConfig.Path = "/data/ccaillol/differentialmt2018_svfitted_3aug/"
DataConfig.Files = ["WZ2L2Q.root"]
DataConfig.InputTreeName = "mutau_tree"
DataConfig.SampleName = "WZ2L2Q"
DataConfig.OutputPath = "/data/aloeliger/SMHTT_Selected_2018_AntiIso_Deep/"
DataConfig.OutputFile = "WZ2L2Q.root"
DataConfig.OutputTreeName = "mt_Selected"
DataConfig.BranchCollection = BranchCollection
DataConfig.PostfixBranchCollection = PostfixCollection
DataConfig.CutConfig = CutConfig
DataConfig.EndAction = HistogramGrabber
| [
"aloelige@cern.ch"
] | aloelige@cern.ch |
13229a7fa36de42831537b95efa97c775f649005 | edf125be37a40caeb14c7fe32bd9f7511cf0ce9b | /09-manipulating-dataFrames-with-pandas/1-extracting-and-transforming-data/indexing_and_column_rearrangement.py | 745bece0cdcb2ca968e8a8950bf5140557d4b32e | [] | no_license | vedpbharti/Datacamp | 1d3d2ca0722a3a19733e91fa054f64e0c3b7114a | b6d019efebe1b46765f19212ba2d8ebb9d90de57 | refs/heads/master | 2020-04-05T05:47:28.528088 | 2019-02-10T22:34:00 | 2019-02-10T22:34:00 | 156,610,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | '''Indexing and column rearrangement
There are circumstances in which it's useful to modify the order of your DataFrame columns. We do that now by extracting just two columns from the Pennsylvania election results DataFrame.
Your job is to read the CSV file and set the index to 'county'. You'll then assign a new DataFrame by selecting the list of columns ['winner', 'total', 'voters']. The CSV file is provided to you in the variable filename.
Instructions
100 XP
Import pandas as pd.
Read in filename using pd.read_csv() and set the index to 'county' by specifying the index_col parameter.
Create a separate DataFrame results with the columns ['winner', 'total', 'voters'].
Print the output using results.head(). This has been done for you, so hit 'Submit Answer' to see the new DataFrame!'''
# Import pandas
import pandas as pd
# Read in filename and set the index: election
election = pd.read_csv(filename, index_col='county')
# Create a separate dataframe with the columns ['winner', 'total', 'voters']: results
results = election[['winner', 'total', 'voters']]
# Print the output of results.head()
print(results.head())
| [
"ved.bhartig@gmail.com"
] | ved.bhartig@gmail.com |
f1771c403450a958403c400739cb654460474052 | bb2a7aacab41acb8e804d823c98b9b4dd3267f0c | /modules/vector_distance.py | 382c89c12b6bc1894a47194efedb0bf9387fc871 | [
"MIT"
] | permissive | nicolasying/WordNet-Embeddings | 7f9f5c57f534d5ea1db956a9e2d7f0dd178e4998 | a6a5782dca97376e487df41fb83542729f284197 | refs/heads/master | 2020-04-21T16:58:48.668802 | 2019-06-11T14:10:10 | 2019-06-11T14:10:10 | 169,720,827 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,003 | py | # coding=utf-8
#! /usr/bin/env python3.4
"""
MIT License
Copyright (c) 2018 NLX-Group
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This code calculates the cosine similarity between two given vectors
Chakaveh.saedi@di.fc.ul.pt
"""
import math
from modules.input_output import *
from scipy.spatial import distance
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
def word_similarity(wrd1, wrd2, for_WSD, from_emb_file):
vec_extractor(wrd1, wrd2, for_WSD, from_emb_file)
def vec_extractor(wrd1, wrd2, for_WSD, from_emb_file):
if from_emb_file == "auto":
final_vec = array_loader("embeddings_matrix")
word_list = array_loader("word_list")
"""
all_words =[]
for itm in word_list:
all_words.append(itm.split("\t")[0])
"""
if for_WSD:
all_words = [itm.split("_offset")[0].replace("\n","") for itm in word_list]
else:
all_words = word_list
all_words = np.array(all_words)
indx1 = np.where(all_words == wrd1)[0]
indx2 = np.where(all_words == wrd2)[0]
com_wrd1 = [word_list[itm].split("\t")[0] for itm in indx1]
com_wrd2 = [word_list[itm].split("\t")[0] for itm in indx2]
else:
indx1 = []
indx2 = []
com_wrd1 = []
com_wrd2 = []
final_vec = []
indx = 0
path = os.getcwd() + '/data/output/' + from_emb_file
with open(path) as infile:
for line in infile:
if for_WSD:
if line[0:len(wrd1)] == wrd1 and line[len(wrd1):len(wrd1)+7] == "_offset":
temp = line[line.index(" ")+1:].replace(" \n","").replace("\n","").replace("'","").split(" ")
temp = [float(i) for i in temp]
final_vec.append(temp)
indx1.append(indx)
com_wrd1.append(line.split(" ")[0])
indx += 1
if line[0:len(wrd2)] == wrd2 and line[len(wrd2):len(wrd2)+7] == "_offset":
temp = line[line.index(" ")+1:].replace(" \n","").replace("\n","").replace("'","").split(" ")
temp = [float(i) for i in temp]
final_vec.append(temp)
indx2.append(indx)
com_wrd2.append(line.split(" ")[0])
indx += 1
else:
if line[0:len(wrd1)] == wrd1 and line[len(wrd1):len(wrd1) + 1] == " ":
temp = line[line.index(" ") + 1:].replace(" \n", "").replace("\n", "").replace("'", "").split(" ")
temp = [float(i) for i in temp]
final_vec.append(temp)
indx1.append(indx)
com_wrd1.append(line.split(" ")[0])
indx += 1
if line[0:len(wrd2)] == wrd2 and line[len(wrd2):len(wrd2) + 1] == " ":
temp = line[line.index(" ") + 1:].replace(" \n", "").replace("\n", "").replace("'", "").split(" ")
temp = [float(i) for i in temp]
final_vec.append(temp)
indx2.append(indx)
com_wrd2.append(line.split(" ")[0])
indx += 1
final_vec = np.array(final_vec)
if len(indx1) > 1 :
print(' "%s" is ambiguous with "%d" senses' % (wrd1, len(indx1)))
if len(indx2) > 1:
print(' "%s" is ambiguous with "%d" senses' % (wrd2, len(indx2)))
if len(indx1) == 0 or len(indx2) == 0:
print(' Cannot find both "%s" and "%s" in current word list' % (wrd1, wrd2))
else:
for i in range(len(indx1)):
for j in range(len(indx2)):
v1 = final_vec[indx1[i]]
v2 = final_vec[indx2[j]]
print(' Cosine similarity between "%s" and "%s": %f' % (com_wrd1[i],com_wrd2[j], cosine_sim(v1, v2, "auto")))
def cosine_sim(v1,v2,mode):
if mode == "auto":
#return(1 - distance.cosine(v1,v2))
return(cosine_similarity(v1.reshape(1, -1),v2.reshape(1, -1)))
else:
"compute cosine similarity of v1 to v2: (v1 dot v2)/{||v1||*||v2||)"
#synsDim = v2.split(" ")
sumxx, sumxy, sumyy = 0, 0, 0
j = 0
for i in range(len(v1)):
if v2[j] == "":
j += 1
y = float(v2[j])
j += 1
x = v1[i]
sumxx += x*x
sumyy += y*y
sumxy += x*y
if math.sqrt(sumxx*sumyy) == 0 :
return (0.00000001)
return (sumxy/math.sqrt(sumxx*sumyy))
def element_product(v1, v2):
"compute elementwise product of v1 to v2: (v11 dot v21) (v12 dot v22) ..."
if v2[0] == " ":
v2 = v2.replace(" ","",1)
synsVec = [float(a) for a in v2]
vector1 = np.array(v1)
vector2 = np.array(synsVec)
return(vector1 * vector2)
| [
"ruben.branco@outlook.pt"
] | ruben.branco@outlook.pt |
4896c1d2ea4c5fa667528ba15bcffab3b2030178 | 295d37dcad3ad6f2cf71ded944eb1a86b3404e6b | /firstsite/learn/views.py | 9093870d9a45add351fddacba712e9b8c63be4a4 | [] | no_license | guwenfeng/Django | c47f9cd9d7d58b75867665a9574fc5d84235a970 | 86a3af2c50f85de0fe57d74224ac145e735aa960 | refs/heads/master | 2021-04-03T01:43:54.115874 | 2018-03-09T08:50:15 | 2018-03-09T08:50:15 | 124,513,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,395 | py | #coding=utf-8
from django.shortcuts import render,render_to_response,redirect
from django.http import HttpResponse,HttpResponseRedirect,request
from django.template import RequestContext
from django import forms
from .models import ResUser
from django.db.models import Q
#表单
class UserForm(forms.Form):
username = forms.CharField(label='用户名',max_length=100)
password = forms.CharField(label='密码',widget=forms.PasswordInput(),max_length=16)
email = forms.EmailField(label='邮箱')
phone = forms.CharField(label='电话',max_length=13)
def web_login(request):
return render(request, 'login.html')
#注册
def regist(request):
return render(request, 'registration.html')
#注册
def regist_create(request):
if request.method == 'POST':
uf = UserForm(request.POST)
if uf.is_valid():
#获得表单数据
username = uf.cleaned_data['username']
password = uf.cleaned_data['password']
email = uf.cleaned_data['email']
phone = uf.cleaned_data['phone']
print (username,password)
#添加到数据库
ResUser.objects.create(username= username,password=password,email= email,phone=phone)
return redirect('/web/login')
else:
uf = UserForm()
return render_to_response('login.html',{'uf':uf}, context_instance=RequestContext(request))
#登陆
def login(request):
if request.method == 'POST':
#获取表单用户密码
data=request.POST
username = data['username']
password = data['password']
print (username,password)
#获取的表单数据与数据库进行比较
user =ResUser.objects.get(Q(username__exact = username) | Q(email__exact = username) | Q(phone__exact = username),password__exact = password)
if user:
#比较成功,跳转index
response = HttpResponseRedirect('/index' )
#将username写入浏览器cookie,失效时间为3600
response.set_cookie('username',username,3600)
return response
return redirect('/web/login')
#退出
def logout(request):
response = HttpResponseRedirect('/web/login')
#清理cookie里保存username
response.delete_cookie('username')
return response | [
"guwenfengvip@163.com"
] | guwenfengvip@163.com |
ce1f1e15dd9fb2cc99a49b1ed54484e2d655871c | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_configuration.py | 625f14a52e1f2f4aa3eb2678b58649f3107f1ead | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 3,531 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ConversationAnalysisClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for ConversationAnalysisClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param endpoint: Supported Cognitive Services endpoint (e.g.,
https://:code:`<resource-name>`.cognitiveservices.azure.com). Required.
:type endpoint: str
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:keyword api_version: Api Version. Default value is "2022-05-15-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, endpoint: str, credential: "AsyncTokenCredential", **kwargs: Any) -> None:
super(ConversationAnalysisClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop("api_version", "2022-05-15-preview") # type: str
if endpoint is None:
raise ValueError("Parameter 'endpoint' must not be None.")
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
self.endpoint = endpoint
self.credential = credential
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "ai-language-conversations/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(
self.credential, *self.credential_scopes, **kwargs
)
| [
"noreply@github.com"
] | test-repo-billy.noreply@github.com |
487f69e10372f0cca2d9dc1ca97d415961b90453 | a248ebfced3e5892d763c8fff1f5e5ebd0ffb02e | /src/shortener/migrations/0005_auto_20180813_1614.py | aafc0011e8f31d9f752193ba370df885856a28ff | [] | no_license | gauravsaxena1997/URLshortener | 78b9a33ae1640ae27759085f47e72605ae8f0b94 | d1350760dc0436c9a2eea0a549f030e6f4f734f7 | refs/heads/master | 2020-03-26T06:05:45.229523 | 2018-08-15T17:56:04 | 2018-08-15T17:56:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-08-13 16:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shortener', '0004_auto_20180813_1448'),
]
operations = [
migrations.AddField(
model_name='url',
name='active',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='url',
name='shortcode',
field=models.CharField(blank=True, max_length=15, unique=True),
),
]
| [
"gauravsaxena.cs@gmail.com"
] | gauravsaxena.cs@gmail.com |
513164c5b81b86296b7c191d7adb745634d4d6b1 | 3e306d0ec56608259e36c9fe28c95ab5bd58147c | /keras/layers/activation/leaky_relu.py | bc563705cd88ddff65155e02ca7ada49e1c43903 | [
"Apache-2.0"
] | permissive | Alan-love/keras | 8012319eb3f88bfb3806e9df913f62b442701137 | 6c392b5ad96fb47a05019e6dda42d2af1f1ec08e | refs/heads/master | 2023-08-22T17:44:36.217261 | 2022-03-29T23:06:19 | 2022-03-29T23:06:50 | 209,978,278 | 0 | 0 | Apache-2.0 | 2022-03-31T03:09:20 | 2019-09-21T12:05:44 | Python | UTF-8 | Python | false | false | 2,538 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Leaky version of a Rectified Linear Unit activation layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
from keras import backend
from keras.engine.base_layer import Layer
from keras.utils import tf_utils
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.LeakyReLU')
class LeakyReLU(Layer):
"""Leaky version of a Rectified Linear Unit.
It allows a small gradient when the unit is not active:
```
f(x) = alpha * x if x < 0
f(x) = x if x >= 0
```
Usage:
>>> layer = tf.keras.layers.LeakyReLU()
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[-0.9, -0.3, 0.0, 2.0]
>>> layer = tf.keras.layers.LeakyReLU(alpha=0.1)
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[-0.3, -0.1, 0.0, 2.0]
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the batch axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Args:
alpha: Float >= 0. Negative slope coefficient. Default to 0.3.
"""
def __init__(self, alpha=0.3, **kwargs):
super(LeakyReLU, self).__init__(**kwargs)
if alpha is None:
raise ValueError(
'The alpha value of a Leaky ReLU layer cannot be None, '
f'Expecting a float. Received: {alpha}')
self.supports_masking = True
self.alpha = backend.cast_to_floatx(alpha)
def call(self, inputs):
return backend.relu(inputs, alpha=self.alpha)
def get_config(self):
config = {'alpha': float(self.alpha)}
base_config = super(LeakyReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
22917a83efbde931e2a785846a9a92c36e59b834 | 704976ea552111c6a5af9cd7cb62b9d9abaf3996 | /pypy/module/test_lib_pypy/ctypes_tests/conftest.py | fdc368945bef163758eaf608e7187c32b4663584 | [
"BSD-3-Clause"
] | permissive | mesalock-linux/mesapy | 4f02c5819ce7f2f6e249d34840f1aa097577645d | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | refs/heads/mesapy2.7 | 2023-08-16T21:33:02.239581 | 2019-08-13T10:29:43 | 2019-08-13T18:06:45 | 136,080,721 | 396 | 33 | NOASSERTION | 2020-04-01T03:05:18 | 2018-06-04T20:45:17 | Python | UTF-8 | Python | false | false | 3,400 | py | import py
import pytest
import sys
import os
def pytest_ignore_collect(path):
if '__pypy__' not in sys.builtin_module_names:
return True
# XXX: copied from pypy/tool/cpyext/extbuild.py
if os.name != 'nt':
so_ext = 'so'
else:
so_ext = 'dll'
def _build(cfilenames, outputfilename, compile_extra, link_extra,
include_dirs, libraries, library_dirs):
try:
# monkeypatch distutils for some versions of msvc compiler
import setuptools
except ImportError:
# XXX if this fails and is required,
# we must call pypy -mensurepip after translation
pass
from distutils.ccompiler import new_compiler
from distutils import sysconfig
# XXX for Darwin running old versions of CPython 2.7.x
sysconfig.get_config_vars()
compiler = new_compiler(force=1)
sysconfig.customize_compiler(compiler) # XXX
objects = []
for cfile in cfilenames:
cfile = py.path.local(cfile)
old = cfile.dirpath().chdir()
try:
res = compiler.compile([cfile.basename],
include_dirs=include_dirs, extra_preargs=compile_extra)
assert len(res) == 1
cobjfile = py.path.local(res[0])
assert cobjfile.check()
objects.append(str(cobjfile))
finally:
old.chdir()
compiler.link_shared_object(
objects, str(outputfilename),
libraries=libraries,
extra_preargs=link_extra,
library_dirs=library_dirs)
def c_compile(cfilenames, outputfilename,
compile_extra=None, link_extra=None,
include_dirs=None, libraries=None, library_dirs=None):
compile_extra = compile_extra or []
link_extra = link_extra or []
include_dirs = include_dirs or []
libraries = libraries or []
library_dirs = library_dirs or []
if sys.platform == 'win32':
link_extra = link_extra + ['/DEBUG'] # generate .pdb file
if sys.platform == 'darwin':
# support Fink & Darwinports
for s in ('/sw/', '/opt/local/'):
if (s + 'include' not in include_dirs
and os.path.exists(s + 'include')):
include_dirs.append(s + 'include')
if s + 'lib' not in library_dirs and os.path.exists(s + 'lib'):
library_dirs.append(s + 'lib')
outputfilename = py.path.local(outputfilename).new(ext=so_ext)
saved_environ = os.environ.copy()
try:
_build(
cfilenames, outputfilename,
compile_extra, link_extra,
include_dirs, libraries, library_dirs)
finally:
# workaround for a distutils bugs where some env vars can
# become longer and longer every time it is used
for key, value in saved_environ.items():
if os.environ.get(key) != value:
os.environ[key] = value
return outputfilename
# end copy
def compile_so_file():
udir = pytest.ensuretemp('_ctypes_test')
cfile = py.path.local(__file__).dirpath().join("_ctypes_test.c")
if sys.platform == 'win32':
libraries = ['oleaut32']
else:
libraries = []
return c_compile([cfile], str(udir / '_ctypes_test'), libraries=libraries)
# we need to run after the "tmpdir" plugin which installs pytest.ensuretemp
@pytest.mark.trylast
def pytest_configure(config):
global sofile
sofile = compile_so_file()
| [
"mssun@mesalock-linux.org"
] | mssun@mesalock-linux.org |
409fdc1ded1a89799c4f089c66d3b135a74ab98c | 6a34b039ededb2e1dcdc07c6976475654ca0ae0a | /code_all/day19/review01.py | 8a5cedfdc9e72fce003c67cf1aa52efc1de9219f | [
"MIT"
] | permissive | testcg/python | 57c62671ab1aad18205c1dee4457b55009cef098 | 4db4bd5d0e44af807d2df80cf8c8980b40cc03c4 | refs/heads/main | 2023-07-09T13:19:24.740751 | 2021-08-11T09:25:20 | 2021-08-11T09:25:20 | 394,932,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,791 | py | """
总复习 - Python 核心
一、自动化内存管理机制
1. 引用计数:
每个对象记录被变量绑定(引用)的数量,
当为0时被销毁。
缺点-循环引用:
两个垃圾互相引用,但是计数不为0
2. 标记清除
全盘扫描,标记不再使用的数据
缺点-速度慢
3. 分代回收
程序运行时,将内存分为小0、中1、大2三块.
每次创建新数据,一定在0代分配空间.
如果内存告急,触发标记清除
再将有用的数据升代,清空上一代
内存优化:尽少产生垃圾、对象池、配置垃圾回收器参数
二、跳转语句
三、函数参数
实际参数:与形参进行对应
位置实参:按顺序
函数名(数据1,数据2)
序列实参:拆
函数名(*序列)
关键字实参:按名字
函数名(形参名1 = 数据1,形参名2 = 数据2)
字典实参:拆
函数名(**字典)
形式参数
位置形参:必填
def 函数名(形参名1,形参名2)
默认形参:可选
def 函数名(形参名1=默认值,形参名2=默认值)
星号元组形参:收集位置实参
def 函数名(*args)
双星号字典形参:收集关键字实参
def 函数名(**kwargs)
命名关键字形参:必须是关键字实参
def 函数名(*args,形参名)
def 函数名(形参名,*,形参名)
"""
def func01(p1,p2):
pass
func01(p1 =1 ,p2 =2)
list01 = [] # 引用计数+=1
list02 = list01 # 引用计数+=1
del list01 # 引用计数-=1
list02 = [] # 引用计数-=1
list01 = []
list02 = []
list01.append(list02)
list02.append(list01)
del list01, list02 # 循环引用
# 循环拼接字符串会不断产生垃圾
# str_result = ""
# for i in range(10):
# str_result += str(i)
# print(str_result)
# 向列表添加元素,不会产生垃圾
result = []
for i in range(10):
result.append(str(i))
print("".join(result))
# 对象池
# 每次创建数据时,都先判断池中是否存在相同数据
# 如果存在直接返回地址,如果不存在则创建新数据
data01 = ["10.1234",]
data02 = ["10.1234",]
print(id(data01))
print(id(data02))
# 跳转语句
def func01():
while True:
while True:
break # 1.跳出
continue # 2.跳过
return "单个数据" # 3. 退出
def func02():
yield "多个数据" # 4. 暂时离开
def func03():
raise Exception("错误信息") # 5. 不断上翻 | [
"cheng@eaglesoftware.cn"
] | cheng@eaglesoftware.cn |
bc19c45df8d46e53fc84287d52e1a50a79d9f27f | 08ee36e0bb1c250f7f2dfda12c1a73d1984cd2bc | /src/mnistk/networks/resnetstyle_22.py | 93fa67898bf4b568a507a10d0985d17c6c533350 | [] | no_license | ahgamut/mnistk | 58dadffad204602d425b18549e9b3d245dbf5486 | 19a661185e6d82996624fc6fcc03de7ad9213eb0 | refs/heads/master | 2021-11-04T07:36:07.394100 | 2021-10-27T18:37:12 | 2021-10-27T18:37:12 | 227,103,881 | 2 | 1 | null | 2020-02-19T22:07:24 | 2019-12-10T11:33:09 | Python | UTF-8 | Python | false | false | 1,077 | py | # -*- coding: utf-8 -*-
"""
resnetstyle_22.py
:copyright: (c) 2019 by Gautham Venkatasubramanian.
:license: MIT
"""
import torch
from torch import nn
from torchvision.models.resnet import BasicBlock
class ResNetStyle_22(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.f0 = nn.Conv2d(in_channels=1, out_channels=43, kernel_size=(13, 13), stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias=True, padding_mode='zeros')
self.f1 = BasicBlock(inplanes=43, planes=43)
self.f2 = nn.Conv2d(in_channels=43, out_channels=45, kernel_size=(9, 9), stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias=True, padding_mode='zeros')
self.f3 = nn.Linear(in_features=2880, out_features=10, bias=False)
self.f4 = nn.LogSoftmax(dim=1)
def forward(self, *inputs):
x = inputs[0]
x = x.view(x.shape[0],1,28,28)
x = self.f0(x)
x = self.f1(x)
x = self.f2(x)
x = x.view(x.shape[0],2880)
x = self.f3(x)
x = self.f4(x)
return x
| [
"41098605+ahgamut@users.noreply.github.com"
] | 41098605+ahgamut@users.noreply.github.com |
390a58d4b13f9ee7281e3f5ab74fa8d5329df6c7 | 519aa4942b6eb6663811dd2a050f498c8d3e0f95 | /Python 2.X/ZERO/GUI/Backup/Backup 1.0.pyw | 6ba365f5534e462ebd99d91fcad5f01a8c949418 | [] | no_license | jacobbridges/my-chaos | 2b5aab5dcac703b268f03efb07fc54e9d4984f29 | 45837fc39f99b5f7f69919ed2f6732e6b7bec936 | refs/heads/master | 2020-05-20T03:21:32.747460 | 2016-08-13T02:12:25 | 2016-08-13T02:12:25 | 29,456,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,008 | pyw | import os
import sys
import Tkinter
import tkFileDialog
import tkMessageBox
def main():
global master, source_dialog, destination_dialog, source_entry, destination_entry
# Create the main window.
master = Tkinter.Tk()
master.title('Backup 1.0')
master.resizable(False, False)
# Create the file dialogs.
options = {'mustexist': True, 'parent': master, 'title': 'Please choose a source directory and then select OK.'}
if os.name == 'nt':
if os.path.exists('C:\\Documents and Settings'):
options['initialdir'] = 'C:\\Documents and Settings'
elif os.path.exists('C:\\'):
options['initialdir'] = 'C:\\'
source_dialog = tkFileDialog.Directory(master, **options)
options['title'] = options['title'].replace('source', 'destination')
destination_dialog = tkFileDialog.Directory(master, **options)
# Create widgets.
source = Tkinter.LabelFrame(master, text='Source')
source_entry = Tkinter.Entry(source, width=30)
source_button = Tkinter.Button(source, text='Browse ...', command=browse_source)
destination = Tkinter.LabelFrame(master, text='Destination')
destination_entry = Tkinter.Entry(destination, width=30)
destination_button = Tkinter.Button(destination, text='Browse ...', command=browse_destination)
okay_button = Tkinter.Button(master, text='Okay', command=okay)
exit_button = Tkinter.Button(master, text='Exit', command=terminate)
# Create bindings.
source_button.bind('<Return>', browse_source)
destination_button.bind('<Return>', browse_destination)
okay_button.bind('<Return>', okay)
exit_button.bind('<Return>', terminate)
# Display widgets.
source_entry.grid(row=0, column=0, padx=5, pady=5)
source_button.grid(row=0, column=1, padx=5, pady=5)
destination_entry.grid(row=0, column=0, padx=5, pady=5)
destination_button.grid(row=0, column=1, padx=5, pady=5)
source.grid(row=0, column=0, padx=5, pady=5, columnspan=2)
destination.grid(row=1, column=0, padx=5, pady=5, columnspan=2)
okay_button.grid(row=2, column=0, padx=5, pady=5, sticky='news')
exit_button.grid(row=2, column=1, padx=5, pady=5, sticky='news')
# Execute the main loop.
master.mainloop()
def browse_source(event=None):
# Get the selected source.
path = source_dialog.show()
if path:
# Replace the text.
source_entry.delete(0, Tkinter.END)
source_entry.insert(0, os.path.realpath(path))
def browse_destination(event=None):
# Get the selected destination.
path = destination_dialog.show()
if path:
# Replace the text.
destination_entry.delete(0, Tkinter.END)
destination_entry.insert(0, os.path.realpath(path))
def okay(event=None):
source = source_entry.get()
# Does the source exist?
if os.path.exists(source):
# Is the source a directory?
if os.path.isdir(source):
destination = destination_entry.get()
# Does the destination exist?
if os.path.exists(destination):
# Is the destination a directory?
if os.path.isdir(destination):
master.withdraw()
try:
backup(source, destination)
except:
tkMessageBox.showerror(title='Error', message='The backup could not be completed.')
master.deiconify()
else:
tkMessageBox.showwarning(title='Warning', message='The destination is not a directory.')
else:
tkMessageBox.showwarning(title='Warning', message='The destination does not exist.')
else:
tkMessageBox.showwarning(title='Warning', message='The source is not a directory.')
else:
tkMessageBox.showwarning(title='Warning', message='The source does not exist.')
def backup(source, destination, errors=None):
# Check for recursion level.
if errors is None:
errors = list()
root = True
else:
root = False
# Copy all directories and files from source to destination.
for name in os.listdir(source):
source_name = os.path.join(source, name)
destination_name = os.path.join(destination, name)
try:
if os.path.isdir(source_name):
os.mkdir(destination_name)
backup(source_name, destination_name, errors)
elif os.path.isfile(source_name):
binary = open(source_name, 'rb')
file(destination_name, 'wb').write(binary.read())
binary.close()
except:
errors.append('%s\n%s' % (source_name, destination_name))
# Write an error log if needed.
if root and errors:
file(os.path.join(os.path.dirname(sys.argv[0]), 'error.log'), 'w').write('\n\n'.join(errors))
def terminate(event=None):
# Terminate the program.
master.quit()
if __name__ == '__main__':
main()
| [
"him@jacobandkate143.com"
] | him@jacobandkate143.com |
7609647aad7ffa3b5252aa5069b284baa0eb4d7f | 86813bf514f3e0257f92207f40a68443f08ee44b | /0378 有序矩阵中第K小的元素/0378 有序矩阵中第K小的元素.py | c74fd3302ed9d2435bde0f0195ed617c845e2b4e | [] | no_license | Aurora-yuan/Leetcode_Python3 | 4ce56679b48862c87addc8cd870cdd525c9d926c | 720bb530850febc2aa67a56a7a0b3a85ab37f415 | refs/heads/master | 2021-07-12T13:23:19.399155 | 2020-10-21T03:14:36 | 2020-10-21T03:14:36 | 212,998,500 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | #label: 二分查找 difficulty: medium
"""
解题思路:
看到这种有序(或者部分有序)的数组,一般考虑使用二分查找进行优化。
开始时,左指针指向矩阵中最小元素,右指针指向矩阵中最大元素(注意:指针代表的是元素值,而不是位置),
计算矩阵中小于等于左右指针中间值的元素个数c,然后通过比较c与k的值,进行左指针或者右指针的移动。重复上述过程直到l >= r.
"""
class Solution:
def kthSmallest(self, matrix: List[List[int]], k: int) -> int:
l = matrix[0][0]
r = matrix[-1][-1]
while l < r:
mid = (l+r)//2
c = sum(bisect.bisect_right(row,mid) for row in matrix)
# bisect.bisect_right(row,mid)计算row中元素值<=mid的数量
if c < k:
l = mid + 1
else:
r = mid
return l
| [
"noreply@github.com"
] | Aurora-yuan.noreply@github.com |
fd1bf15834426fd092f37b7667afda9bd7e54df2 | d4ea1f9747799bf503523b86b8b5ee29bab65eff | /gyun/cli/iaas_client/actions/s2/enable_s2_shared_targets.py | eab03da093de332254257c250d9d7fe7bc891cbd | [
"Apache-2.0"
] | permissive | gyun-gome/gyun-cli | 88b5493d90a19c5bf56a1bba4bf301d1b4a3156d | 275b6664335e2ef21a01a48f8c06d6a89dd63467 | refs/heads/master | 2021-06-28T13:53:01.300135 | 2017-09-13T04:44:01 | 2017-09-13T04:44:01 | 103,353,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,814 | py | # encoding: utf-8
# =========================================================================
# ©2017-2018 北京国美云服科技有限公司
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from gyun.cli.misc.utils import explode_array
from gyun.cli.iaas_client.actions.base import BaseAction
class EnableS2SharedTargetsAction(BaseAction):
action = 'EnableS2SharedTargets'
command = 'enable-s2-shared-targets'
usage = '%(prog)s -s <shared_targets> [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument("-s", "--shared-targets", dest="shared_targets",
action="store", type=str, default=None,
help="the IDs of shared targets you want to enable.")
@classmethod
def build_directive(cls, options):
for key in ['shared_targets']:
if not hasattr(options, key):
print("error: [%s] should be specified." % key)
return None
directive = {
"shared_targets": explode_array(options.shared_targets),
}
return directive
| [
"lvyonggang@gomeholdings.com"
] | lvyonggang@gomeholdings.com |
80e2f36e6bc596fd3d476900e6c9fe46833f12de | caaf1b0754db1e676c37a6f1e58f19183754e654 | /sdk/tables/azure-data-tables/azure/data/tables/_entity.py | aa1ccae843a4eeb457a282f8013be5b4f718ea84 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | rdomenzain/azure-sdk-for-python | 45dfb39121a0abda048c22e7309733a56259f525 | 58984255aeb904346b6958c5ba742749a2cc7d1b | refs/heads/master | 2023-07-07T06:53:12.967120 | 2023-07-04T16:27:37 | 2023-07-04T16:27:37 | 258,050,134 | 0 | 0 | MIT | 2020-04-23T00:12:14 | 2020-04-23T00:12:13 | null | UTF-8 | Python | false | false | 2,551 | py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from enum import Enum
from typing import Any, Dict, Union, NamedTuple
from azure.core import CaseInsensitiveEnumMeta
class TableEntity(dict):
"""
An Entity dictionary with additional metadata
"""
_metadata = {} # type: Dict[str, Any]
@property
def metadata(self) -> Dict[str, Any]:
"""Resets metadata to be a part of the entity
:return Dict of entity metadata
:rtype: Dict[str, Any]
"""
return self._metadata
class EdmType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""
Used by :class:`~.EntityProperty` to represent the type of the entity property
to be stored by the Table service.
"""
BINARY = "Edm.Binary"
""" Represents byte data. This type will be inferred for Python bytes. """
INT64 = "Edm.Int64"
""" Represents a number between -(2^31) and 2^31. Must be specified or numbers will default to INT32. """
GUID = "Edm.Guid"
""" Represents a GUID. This type will be inferred for uuid.UUID. """
DATETIME = "Edm.DateTime"
""" Represents a date. This type will be inferred for Python datetime objects. """
STRING = "Edm.String"
""" Represents a string. This type will be inferred for Python strings. """
INT32 = "Edm.Int32"
""" Represents a number between -(2^15) and 2^15. This is the default type for Python numbers. """
DOUBLE = "Edm.Double"
""" Represents a double. This type will be inferred for Python floating point numbers. """
BOOLEAN = "Edm.Boolean"
""" Represents a boolean. This type will be inferred for Python booleans. """
EntityProperty = NamedTuple("EntityProperty", [("value", Any), ("edm_type", Union[str, EdmType])])
"""
An entity property. Used to explicitly set :class:`~EdmType` when necessary.
Values which require explicit typing are GUID, INT64, and BINARY. Other EdmTypes
may be explicitly create as EntityProperty objects but need not be. For example,
the below with both create STRING typed properties on the entity::
entity = TableEntity()
entity.a = 'b'
entity.x = EntityProperty('y', EdmType.STRING)
:param value:
:type value: Any
:param edm_type: Type of the value
:type edm_type: str or :class:`~azure.data.tables.EdmType`
"""
| [
"noreply@github.com"
] | rdomenzain.noreply@github.com |
e12bb3c1b2e7f6ba0856850ff98c3e2c05ab5f88 | 95a2924dfbed2b07587c9c6516df4ac248f2586c | /Data Visualization/Bokeh/Streaming Real Time Data/random-generator.py | fdd31efee2d80d3eb98b68af91a68676a18d6868 | [] | no_license | souviksaha97/Data-Science-Lab | 49f50ef80dff3bcfed6f26d735707ec485a393ae | d18681460f51e83252d5b6a491b997a3600c7715 | refs/heads/master | 2020-12-18T19:50:00.327882 | 2019-06-24T17:41:20 | 2019-06-24T17:41:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | """
Created on Sat Feb 9 2019
@author: Nodar Okroshiashvili
"""
# Serve Random Number Generation in Bokeh Server
#import libraries
from bokeh.io import curdoc
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
from random import randrange
# Create Figure
f = figure(x_range=(0,11),y_range=(0,11)) # Set range for axes
# Create ColumnDataSource
source=ColumnDataSource(data=dict(x=[],y=[]))
# Create Glyphs
f.circle(x='x',
y='y',
size=10,
fill_color='olive',
line_color='brown',
source=source)
f.line(x='x', y='y', source=source)
# Create Periodic Function
def update():
new_data=dict(x=[randrange(1,10)],y=[randrange(1,10)])
source.stream(new_data,rollover=20)
#print(source.data)
# Add figure to curdoc
curdoc().add_root(f)
# Configure callback
curdoc().add_periodic_callback(update,1000) # callback every 1000 mili second
| [
"n.okroshiashvili@gmail.com"
] | n.okroshiashvili@gmail.com |
ce45e39c5d8bac1037969802999ef4d0ac487163 | e3d969e2c9e4b57f4f7d58af5e44a00aa8fb15d3 | /0886 Possible Bipartition.py | efd3c30cb3226e421b6cc27ccfa5dbea2357067a | [
"MIT"
] | permissive | kevin-fang/leetcode | 2744ff01e791db6f60edf946ef71451fae92ef6f | 3958f888b30bb3e29916880ecec49b3870a0bea3 | refs/heads/master | 2022-12-15T07:50:01.056016 | 2020-09-10T03:47:53 | 2020-09-10T03:47:53 | 294,296,037 | 3 | 0 | MIT | 2020-09-10T03:47:39 | 2020-09-10T03:47:38 | null | UTF-8 | Python | false | false | 951 | py | class Solution:
def possibleBipartition(self, N: int, dislikes: List[List[int]]) -> bool:
neighbors = defaultdict(set)
for a,b in dislikes:
neighbors[a].add(b)
neighbors[b].add(a)
colors = defaultdict(int)
for i in range(1,N+1):
if colors[i] == 0:
colors[i] = 1
bfs = deque([i])
while bfs:
for j in range(len(bfs)):
cur = bfs.popleft()
for neighbor in neighbors[cur]:
if colors[neighbor] == colors[cur]:
return False
if colors[neighbor] == 0:
colors[neighbor] = 3-colors[cur]
bfs.append(neighbor)
return True
| [
"mdabedincs@gmail.com"
] | mdabedincs@gmail.com |
d5563aac9230b36f5b80103075450f10c7274578 | 2de2437bbf480f6518554bcb204106dd37262023 | /office365/runtime/serviceOperationQuery.py | 54dddc29343498b339aa8a62c19dcbc2079b3d14 | [
"MIT"
] | permissive | stardust85/Office365-REST-Python-Client | 386e5bba16cdee1472b7e23d405a4bf9b6f5e73a | cd369c607c7d137a000734e9c5e8f03ae3e3c603 | refs/heads/master | 2022-09-29T19:44:02.166438 | 2020-06-03T23:12:40 | 2020-06-03T23:12:40 | 269,356,313 | 0 | 0 | MIT | 2020-06-04T12:41:03 | 2020-06-04T12:41:02 | null | UTF-8 | Python | false | false | 974 | py | from office365.runtime.client_query import ClientQuery
from office365.runtime.resource_path_service_operation import ResourcePathServiceOperation
class ServiceOperationQuery(ClientQuery):
def __init__(self, binding_type, method_name=None, method_params=None, parameter_type=None,
parameter_name=None, return_type=None):
"""
:type method_params: list or dict or None
:type method_name: str or None
"""
super(ServiceOperationQuery, self).__init__(binding_type, parameter_type, parameter_name, return_type)
self._method_name = method_name
self._method_params = method_params
self.static = False
@property
def method_url(self):
return ResourcePathServiceOperation(self.method_name, self.method_parameters).to_url()
@property
def method_name(self):
return self._method_name
@property
def method_parameters(self):
return self._method_params
| [
"Ajilon80!"
] | Ajilon80! |
6ea6c324e5ab3ebb985b2079358f585f8734be93 | 76e7feaea74beb9d337885dcaa3ee59e26d9db70 | /basics/nn2.py | 6f9cbd8aea264996b27941a924761f4a52324c18 | [] | no_license | sayantansatpati/dlf | 8f9bec134212a6608f2b6854c120253677c71959 | ce8b083f31cd1b4f67ea3718cbbad5cac1eff1f4 | refs/heads/master | 2021-01-11T15:47:02.118653 | 2017-11-14T21:04:19 | 2017-11-14T21:04:19 | 79,931,519 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | import numpy as np
def disp(a,msg):
print '\n### {0}'.format(msg)
print 'nDim: {0}, shape: {1}'.format(np.ndim(a), a.shape)
print a[:5]
X = np.array([ [0,0,1],[0,1,1],[1,0,1],[1,1,1] ])
disp(X,"X")
y = np.array([[0,1,1,0]]).T
disp(y,"y")
syn0 = 2*np.random.random((3,4)) - 1
disp(syn0,"syn0")
syn1 = 2*np.random.random((4,1)) - 1
disp(syn1,"syn1")
for j in xrange(1000):
l1 = 1/(1+np.exp(-(np.dot(X,syn0))))
l2 = 1/(1+np.exp(-(np.dot(l1,syn1))))
l2_delta = (y - l2)*(l2*(1-l2))
l1_delta = l2_delta.dot(syn1.T) * (l1 * (1-l1))
syn1 += l1.T.dot(l2_delta)
syn0 += X.T.dot(l1_delta) | [
"sayantan.satpati.sfbay@gmail.com"
] | sayantan.satpati.sfbay@gmail.com |
f6cb4b6acd7359ae58644de8ce42a06ca40d370e | ce722f35f63d7e7af3e9890cbea50b05d32c34c7 | /crawler/dspider/spiders/hkexSituationSpider.py | f5c24ebe06cbdf7bfa80ffba929a9fa2839a4b2a | [] | no_license | tfangz888/smart_deal_tool | bc6645047e2c3ff36af0baed62e31d1c6cec4a15 | 0f0e4edfec582e93146b30273621a28c36a5d6ca | refs/heads/master | 2020-05-17T03:12:16.720526 | 2019-04-23T14:11:10 | 2019-04-23T14:11:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,409 | py | #coding=utf-8
import json
import scrapy
from datetime import datetime, timedelta
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import DNSLookupError
from twisted.internet.error import TimeoutError
from dspider.myspider import BasicSpider
from dspider.items import HkexTradeOverviewItem, HkexTradeTopTenItem
class HkexSpider(BasicSpider):
name = 'hkexSpider'
custom_settings = {
'ITEM_PIPELINES': {
'dspider.pipelines.DspiderPipeline': 2
}
}
def start_requests(self):
matching_url = "https://sc.hkex.com.hk/TuniS/www.hkex.com.hk/chi/csm/DailyStat/data_tab_daily_{}c.js"
end_date = datetime.now().strftime('%Y.%m.%d')
start_date = self.get_nday_ago(end_date, 10, dformat = '%Y.%m.%d')
while start_date <= end_date: # 自己控制下时间范围
start_date = self.get_tomorrow_date(sdate = start_date)
url = matching_url.format(start_date.replace('.', ''))
yield scrapy.Request(url=url, callback=self.parse, errback=self.errback_httpbin, dont_filter=True)
def parse(self, response):
try:
jsonstr = response.text.split("=")[1]
data = json.loads(jsonstr)
sse_northbond = data[0]
sse_northbond_overview_item = self.parseTradeOverviewItem(sse_northbond, "sse", "north")
yield sse_northbond_overview_item
sse_northbond_top_ten_items = self.parseTradeTopTenItem(sse_northbond, "sse", "north")
for i in range(len(sse_northbond_top_ten_items)):
yield sse_northbond_top_ten_items[i]
sse_southbond = data[1]
sse_southbond_overview_item = self.parseTradeOverviewItem(sse_southbond, "sse", "south")
yield sse_southbond_overview_item
sse_southbond_top_ten_items = self.parseTradeTopTenItem(sse_southbond, "sse", "south")
for i in range(len(sse_southbond_top_ten_items)):
yield sse_southbond_top_ten_items[i]
szse_northbond = data[2]
szse_northbond_overview_item = self.parseTradeOverviewItem(szse_northbond, "szse", "north")
yield szse_northbond_overview_item
szse_northbond_top_ten_items = self.parseTradeTopTenItem(szse_northbond, "szse", "north")
for i in range(len(szse_northbond_top_ten_items)):
yield szse_northbond_top_ten_items[i]
szse_southbond = data[3]
szse_southbond_overview_item = self.parseTradeOverviewItem(szse_southbond, "szse", "south")
yield szse_southbond_overview_item
szse_southbond_top_ten_items = self.parseTradeTopTenItem(szse_southbond, "szse", "south")
for i in range(len(szse_southbond_top_ten_items)):
yield szse_southbond_top_ten_items[i]
except Exception as e:
print(e)
def parseTradeOverviewItem(self, need_parse_data, market, direction):
trade_overview_tr = need_parse_data["content"][0]["table"]["tr"]
item = HkexTradeOverviewItem()
item['market'] = market
item['direction'] = direction
item['date'] = need_parse_data["date"]
item['total_turnover'] = trade_overview_tr[0]["td"][0][0]
item['buy_turnover'] = trade_overview_tr[1]["td"][0][0]
item['sell_turnover'] = trade_overview_tr[2]["td"][0][0]
item['total_trade_count'] = trade_overview_tr[3]["td"][0][0]
item['buy_trade_count'] = trade_overview_tr[4]["td"][0][0]
item['sell_trade_count'] = trade_overview_tr[5]["td"][0][0]
if need_parse_data["market"] == "SSE Northbound" or need_parse_data["market"] == "SZSE Northbound":
#使用额度总额和和使用额度总额比例
item['dqb'] = trade_overview_tr[6]["td"][0][0]
item['dqb_ratio'] = trade_overview_tr[7]["td"][0][0]
else:
item['dqb'] = None
item['dqb_ratio'] = None
return item
def parseTradeTopTenItem(self, need_parse_data, market, direction):
items = []
trade_top_ten_tr = need_parse_data["content"][1]["table"]["tr"]
for i in range(10):
item = HkexTradeTopTenItem()
item['market'] = market
item['direction'] = direction
item['date'] = need_parse_data["date"]
item['rank'] = trade_top_ten_tr[i]["td"][0][0]
item['code'] = trade_top_ten_tr[i]["td"][0][1]
item['name'] = trade_top_ten_tr[i]["td"][0][2].strip()
item['buy_turnover'] = trade_top_ten_tr[i]["td"][0][3]
item['sell_turnover'] = trade_top_ten_tr[i]["td"][0][4]
item['total_turnover'] = trade_top_ten_tr[i]["td"][0][5]
items.append(item)
return items
def errback_httpbin(self, failure):
# log all errback failures, in case you want to do something special for some errors, you may need the failure's type
#print(repr(failure))
if failure.check(HttpError):
response = failure.value.response
#print('HttpError on %s', response.url)
elif failure.check(DNSLookupError):
request = failure.request
#print('DNSLookupError on %s', request.url)
elif failure.check(TimeoutError):
request = failure.request
#print('TimeoutError on %s', request.url)
| [
"hellobiek@gmail.com"
] | hellobiek@gmail.com |
24050d06ae4154d502447a8ccfe7d229fa83e08b | 2286b880df34e1bfabe79b3605de287040404560 | /04-04/todolist/mahasiswa/views.py | 7259eaec7ed0098020a0f8469da5bc93517f17b6 | [] | no_license | iklimah27/praxis-academy-2 | e5d8b08807980d6fd8ff6ab73caa6ea18083c7f8 | 925853b520c9a8d7a87d8980d7fedfa604d3b4c8 | refs/heads/master | 2022-12-25T01:54:45.572190 | 2020-10-15T07:22:06 | 2020-10-15T07:22:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,173 | py | from django.shortcuts import render
from django.shortcuts import render, redirect
from . import models, forms
def index(req):
tasks = models.Mhs.objects.all()
return render(req, 'mahasiswa/index.html',{
'data': tasks,
})
def new(req):
form_input = forms.MhsForm()
if req.POST:
form_input = forms.MhsForm(req.POST)
if form_input.is_valid():
form_input.save()
return redirect('/mahasiswa/')
return render(req, 'mahasiswa/new.html',{
'form' : form_input,
})
def detail(req, id):
mhs = models.Mhs.objects.filter(pk=id).first()
return render(req, 'mahasiswa/detail.html', {
'data': mhs,
})
def delete(req, id):
models.Mhs.objects.filter(pk=id).delete()
return redirect('/mahasiswa/')
def update(req, id):
if req.POST:
mhs = models.Mhs.objects.filter(pk=id).update(nama=req.POST['nama'], nim=req.POST['nim'], status=req.POST['status'], telp=req.POST['telp'], alamat=req.POST['alamat'])
return redirect('/mahasiswa/')
mhs = models.Mhs.objects.filter(pk=id).first()
return render(req, 'mahasiswa/update.html', {
'data': mhs,
}) | [
"hatami391998@gmail.com"
] | hatami391998@gmail.com |
0b25adda0ddd3b445d9fee6ae58a58302663df91 | f8457b044305b5ef4944ab649c44268e9a32a0bc | /data/ship/color_gen.py | 7a85a179f64da38b63c1d1d1d76d5e6716e7f723 | [
"MIT"
] | permissive | PcloD/PixelShipGenerator | 163d2b2d7d1f9eea2d316c2d7d9c29d1c84d581a | 33e4fa004890f388cd679bbbb6837bcc05465bbe | refs/heads/master | 2020-05-05T08:19:39.968725 | 2016-12-19T00:45:10 | 2016-12-19T00:45:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,774 | py | from random import *
class ColorPalette(object):
def __init__(self, num, variance=30):
self.num = num
self.variance = variance
self.base = self.set_random_color()
self.palette = self.set_palette()
def set_palette(self):
palette = {
1: self.base,
2: self.vary_color(self.base),
3: self.lighten_color(self.base),
4: self.darken_color(self.base),
}
return palette
@staticmethod
def verify_color(col):
verified = []
for v in col:
if v > 255:
v = 255
if v < 0:
v = 0
verified.append(v)
return tuple(verified)
def vary_color(self, (r, g, b)):
r_var = randint(-self.variance, self.variance)
g_var = randint(-self.variance, self.variance)
b_var = randint(-self.variance, self.variance)
new = r + r_var, g + g_var, b + b_var
return self.verify_color(new)
def lighten_color(self, (r, g, b)):
r_var = randint(0, self.variance)
g_var = randint(0, self.variance)
b_var = randint(0, self.variance)
new = r + r_var, g + g_var, b + b_var
return self.verify_color(new)
def darken_color(self, (r, g, b)):
r_var = randint(-self.variance, 0)
g_var = randint(-self.variance, 0)
b_var = randint(-self.variance, 0)
new = r + r_var, g + g_var, b + b_var
return self.verify_color(new)
def set_random_color(self):
r = randint(0, 255)
g = randint(0, 255)
b = randint(0, 255)
return r, g, b
def get_color(self):
return choice(self.palette.keys()) | [
"marzecsean@gmail.com"
] | marzecsean@gmail.com |
8d07dbb7d243048f69e7a6b17948b087345934c2 | 7e0cdabf1e7514fb0e3d53548eaadd7be85ae5e6 | /configs/helmet/merge/faster_rcnn_mobilenetv2_64_fpn_2x.py | a668d159c1d3ba2cfd6199b16e82ba9a061d865f | [
"Apache-2.0"
] | permissive | fanqie03/mmdetection.bak | ce2697f3a0ca5603d923856fbdc8b7bb32066939 | 0bc0ea591b5725468f83f9f48630a1e3ad599303 | refs/heads/master | 2023-05-12T02:44:11.209749 | 2020-03-09T01:58:39 | 2020-03-09T01:58:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,419 | py | # model settings
model = dict(
type='FasterRCNN',
pretrained='torchvision://mobilenet_v2',
backbone=dict(
type='MobileNetV2',
out_indices=(3, 6, 13, 18)),
neck=dict(
type='FPN',
in_channels=[24, 32, 96, 1280],
out_channels=64,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=64,
feat_channels=64,
anchor_scales=[8],
anchor_ratios=[1.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), # sample_num采样点
out_channels=64,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=64,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=3,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=64,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.3), max_per_img=100) # iou_thr越小,越减少重叠
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# classes = ['']
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
# dict(type='PhotoMetricDistortion',),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
# dataset settings
dataset_type = 'HelmetMergeDataset'
data_root = '/datasets/HelmetMerge/'
data = dict(
imgs_per_gpu=4,
workers_per_gpu=4,
train=dict(type=dataset_type,
data_root=data_root,
ann_file=data_root + 'ImageSets/Main/trainval.txt',
img_prefix=data_root,
pipeline=train_pipeline,
use_ignore=True),
val=dict(
type=dataset_type,
data_root=data_root,
ann_file=data_root + 'ImageSets/Main/test.txt',
img_prefix=data_root,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
ann_file=data_root + 'ImageSets/Main/test.txt',
img_prefix=data_root,
pipeline=test_pipeline),)
# optimizer
optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[16, 22])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 24
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = __file__.replace('configs', 'work_dirs').rstrip('.py')
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"1242733702@qq.com"
] | 1242733702@qq.com |
c5e37668b4e5631218e5d75f9260e597329993b2 | 7296c1214741a8cd3e2b70c90de6784d9fa53dba | /Assignments/Assignment_2/score.py | 7fd043b850098a87a8580534f7d2b08d979708c8 | [] | no_license | msaad1311/MLOps | 949912c5417db5c08ce69df46867c3e84b90f810 | 484f2124cd84472c7971d428982507b9215a400f | refs/heads/main | 2023-03-29T12:13:38.286713 | 2021-03-30T14:01:57 | 2021-03-30T14:01:57 | 349,442,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,788 | py | import preprocessing_functions as pf
import config
# =========== scoring pipeline =========
# impute categorical variables
def predict(data):
# extract first letter from cabin
data['cabin']=pf.extract_cabin_letter(data,'cabin')
# impute NA categorical
data[config.CATEGORICAL_VARS]=pf.impute_na(data[config.CATEGORICAL_VARS],'Missing')
# impute NA numerical
data[config.NUMERICAL_TO_IMPUTE]=pf.impute_na(data[config.NUMERICAL_TO_IMPUTE],'Numerical')
# Group rare labels
for var in config.CATEGORICAL_VARS:
data[var] = pf.remove_rare_labels(data, var,config.FREQUENT_LABELS[var])
# encode variables
data = pf.encode_categorical(data,config.CATEGORICAL_VARS)
print(data.shape)
# check all dummies were added
data = pf.check_dummy_variables(data,config.DUMMY_VARIABLES)
print(data.shape)
# scale variables
data = pf.scale_features(data,config.OUTPUT_SCALER_PATH)
# make predictions
predictions = pf.predict(data,config.OUTPUT_MODEL_PATH)
return predictions
# ======================================
# small test that scripts are working ok
if __name__ == '__main__':
from sklearn.metrics import accuracy_score
import warnings
warnings.simplefilter(action='ignore')
# Load data
data = pf.load_data(config.PATH_TO_DATASET)
X_train, X_test, y_train, y_test = pf.divide_train_test(data,
config.TARGET)
pred = predict(X_test)
# evaluate
# if your code reprodues the notebook, your output should be:
# test accuracy: 0.6832
print('test accuracy: {}'.format(accuracy_score(y_test, pred)))
print()
| [
"50722220+msaad1311@users.noreply.github.com"
] | 50722220+msaad1311@users.noreply.github.com |
7dc978fbff839ff4c2fc09ba79dd84913b12d20a | fbcac9ff8664e857e1f7d5409b170a23f432763f | /tests/test_util.py | 4466050d91c1fb68eb4969ac37cebed560b4c6ea | [] | no_license | claraj/iss_slack_bot | 289d4af3cf108e888db3170048d7aa491984f759 | 8e36e81d8d571dfceb7e40ede44ca720fc699e50 | refs/heads/master | 2021-09-06T08:59:07.966377 | 2018-02-04T17:01:53 | 2018-02-04T17:01:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,471 | py | from unittest import TestCase
from utils import in_future
from datetime import datetime, timedelta
import time
# Testing functions unrelated to GAE features
class TestInFuture(TestCase):
def setUp(self):
self.now = datetime.today()
self.add_ten_seconds = timedelta(seconds = 10)
self.add_fourty_seconds = timedelta(seconds = 40)
def test_in_future(self):
# Example: pass_time is 10 seconds in the future. Return True
now_plus_ten = self.now + self.add_ten_seconds
now_plus_ten_ts = time.mktime(now_plus_ten.timetuple()) # Python 3 has a datetime.timestamp() function, but...
self.assertTrue(in_future(now_plus_ten_ts))
def test_in_future_now(self):
# Example: pass_time is now. Return False
now_ts = time.mktime(self.now.timetuple())
self.assertFalse(in_future(now_ts))
def test_in_future_beyond_min(self):
# Example: pass_time is 10 seconds in the future. min_time_in_future is 5. Return True
now_plus_ten = self.now + self.add_ten_seconds
now_plus_ten_ts = time.mktime(now_plus_ten.timetuple())
self.assertTrue(in_future(now_plus_ten_ts, 5))
def test_in_future_at_min(self):
# Example: pass_time is 10 seconds in the future. min_time_in_future is 10. Return False
now_plus_ten = self.now + self.add_ten_seconds
now_plus_ten_ts = time.mktime(now_plus_ten.timetuple())
self.assertFalse(in_future(now_plus_ten_ts, 10))
def test_in_future_in_past(self):
# Example: pass_time is in the past. return False
now_minus_ten = self.now - self.add_ten_seconds
now_minus_ten_ts = time.mktime(now_minus_ten.timetuple())
self.assertFalse(in_future(now_minus_ten_ts))
def test_in_future_in_past_negative_min(self):
# Example: pass_time is 40 seconds in the past, min_time_delta is -60. return True
now_minus_fourty = self.now - self.add_fourty_seconds
now_minus_fourty_ts = time.mktime(now_minus_fourty.timetuple())
self.assertTrue(in_future(now_minus_fourty_ts, -60))
def test_in_future_in_past_beyond_negative_min(self):
# Example: pass_time is 40 seconds in the past, min_time_delta is -10. return False
now_minus_fourty = self.now - self.add_fourty_seconds
now_minus_fourty_ts = time.mktime(now_minus_fourty.timetuple())
self.assertFalse(in_future(now_minus_fourty_ts, -10))
| [
"clara@clara.com"
] | clara@clara.com |
753f1bc6189e0ee7037082baa53044333b792640 | 275a96a33ae1f89e7b2ee0ecdbac7d78abe6d6cc | /test/test_pos_api.py | 8c2788266c7397b2d42f5f30041879ecad09d054 | [] | no_license | cascadiarc/cyclos-python-client | 8029ce07174f2fe92350a92dda9a60976b2bb6c2 | a2e22a30e22944587293d51be2b8268bce808d70 | refs/heads/main | 2023-04-03T16:52:01.618444 | 2021-04-04T00:00:52 | 2021-04-04T00:00:52 | 354,419,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py | # coding: utf-8
"""
Cyclos 4.11.5 API
The REST API for Cyclos 4.11.5 # noqa: E501
OpenAPI spec version: 4.11.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.pos_api import POSApi # noqa: E501
from swagger_client.rest import ApiException
class TestPOSApi(unittest.TestCase):
"""POSApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.pos_api.POSApi() # noqa: E501
def tearDown(self):
pass
def test_calculate_receive_payment_installments(self):
"""Test case for calculate_receive_payment_installments
Calculates the default installments for a scheduled payment # noqa: E501
"""
pass
def test_data_for_receive_payment(self):
"""Test case for data_for_receive_payment
Returns configuration data for receiving a payment (POS) # noqa: E501
"""
pass
def test_preview_receive_payment(self):
"""Test case for preview_receive_payment
Previews a POS payment before receiving it # noqa: E501
"""
pass
def test_receive_payment(self):
"""Test case for receive_payment
Receives a payment (POS) # noqa: E501
"""
pass
def test_receive_payment_otp(self):
"""Test case for receive_payment_otp
Generates a new One-Time-Password (OTP) for a pos payment # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"dan@leftcoastfs.com"
] | dan@leftcoastfs.com |
b49cd004b6a2b08e9956b2115e38c5393bc16c91 | 9919439783a3d9ec7a4435e50e0225ea1d6f2b69 | /django_rest_json_api_example/models.py | f5f2ab812b4f5594b6975c8ef4dc23d0ff0dd16c | [] | no_license | newcontext-oss/django-rest-json-api | 19c2e5210c59d02eee88afb3061761f02f4037d6 | 107ef896397d93715d9f3eed34fcb6f14d5893b9 | refs/heads/master | 2021-01-15T20:27:51.771682 | 2017-10-02T18:41:28 | 2017-10-02T18:41:28 | 99,850,109 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | """
Django ORM models for the JSON API examples.
"""
import uuid
from django.db import models
class Person(models.Model):
"""
JSON API example person model.
"""
uuid = models.UUIDField(default=uuid.uuid4())
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
class Article(models.Model):
"""
JSON API example article model.
"""
uuid = models.UUIDField(default=uuid.uuid4())
title = models.CharField(max_length=255)
description = models.CharField(max_length=255, blank=True)
author = models.ForeignKey(Person, null=True, blank=True)
class Comment(models.Model):
"""
JSON API example comment model.
"""
class Meta:
ordering = ["uuid"]
uuid = models.UUIDField(default=uuid.uuid4())
body = models.TextField()
article = models.ForeignKey(Article, blank=False, related_name='comments')
author = models.ForeignKey(Person, blank=False)
| [
"me@rpatterson.net"
] | me@rpatterson.net |
5ceec9be34f8bee06d3162cec9aacb9cb4578b59 | eb9af63f5874345c03b567a944e2cb67ec8995d5 | /leetcode/binarySearch/findminimumValueinSortedRotatedWIthDuplicate.py | 2731d45cce9d2ad7025bb28d97bc1d079678bb36 | [] | no_license | ziqingW/pythonPlayground | 262fc143c7997fb9f9a9b148359c4d2c7de84fc7 | 3aab1747a1e6a77de808073e8735f89704940496 | refs/heads/master | 2021-01-25T13:41:56.494266 | 2019-03-10T02:48:04 | 2019-03-10T02:48:04 | 123,607,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | # Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
# (i.e., [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]).
# Find the minimum element.
# The array may contain duplicates.
class Solution:
def findMin(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
left,right = 0, len(nums)-1
while left < right:
mid = (left+right) // 2
if nums[mid] > nums[right]:
left = mid + 1
elif nums[mid] == nums[right]:
if nums[left] == nums[mid]:
if len(set(nums[left:mid+1])) == 1:
left = mid + 1
elif len(set(nums[mid+1: right+1])) == 1:
right = mid
else:
right = mid
else:
right = mid
return nums[left]
# return min(nums)
| [
"redtaq@hotmail.com"
] | redtaq@hotmail.com |
2c1513fb42947cf5ca374844cd41d2a4d4ae5bfd | ad13583673551857615498b9605d9dcab63bb2c3 | /output/models/ms_data/attribute_group/attg_d018_xsd/__init__.py | 624bfe9285086da05657bad0b2b20d96f60e002a | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 141 | py | from output.models.ms_data.attribute_group.attg_d018_xsd.attg_d018 import (
AttgRef,
Doc,
)
__all__ = [
"AttgRef",
"Doc",
]
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
2aba281c8d4b6dee7d96c653a117739563d1e6bd | 89ba6569e82bfe1abbb85f58c3a264240ef5b68f | /Scripts/calc_SNA_Data_Eurasia_CDRSCE.py | 375ff378a7d20bed12ffd1dcee10ec43332aa0ea | [
"MIT"
] | permissive | muskanmahajan37/AMIP_Simu | b3792c24f2f82749ac4d9df48a11bb46d2b82236 | 6370626fe81baf5c2280dab95fdab08a873f3a84 | refs/heads/master | 2022-02-08T08:09:08.575967 | 2019-07-25T22:29:34 | 2019-07-25T22:29:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,914 | py | """
Script calculates Eurasian snow area index for October-November using data
from the Rutgers Global Snow Lab data
Notes
-----
Author : Zachary Labe
Date : 25 July 2019
"""
### Import modules
import datetime
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as sts
import scipy.signal as SS
### Define directories
directoryfigure = '/home/zlabe/Desktop/'
directoryoutput = '/home/zlabe/Documents/Research/AMIP/Data/'
### Define time
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
titletime = currentmn + '/' + currentdy + '/' + currentyr
print('\n' '----Calculating Snow Cover Area Index - %s----' % titletime)
#### Alott time series
year1 = 1979
year2 = 2015
years = np.arange(year1,year2+1,1)
yearsdata = np.arange(year1,2018+1,1)
m = 12 # number of months
### Read in all months of data
yearsdata,months,data = np.genfromtxt(directoryoutput + \
'CDR_SCE_Eurasia_Monthly.txt',unpack=True,
usecols=[0,1,2])
### Reshape data into []
yearssort = np.reshape(yearsdata,(yearsdata.shape[0]//m,m))
monthsort = np.reshape(months,(months.shape[0]//m,m))
datasortq = np.reshape(data,(data.shape[0]//m,m))
### Change units from km^2 to 10^6 km^2
datasort = datasortq/1e6
### Calculate October-November index (1979-2015)
octnov = np.nanmean(datasort[:years.shape[0],9:11],axis=1)
octnovdt = SS.detrend(octnov,type='linear')
### Calculate October index (1979-2015)
octonly = datasort[:years.shape[0],9:10].squeeze()
octonlydt = SS.detrend(octonly,type='linear')
### Save both indices (Oct-Nov)
np.savetxt(directoryoutput + 'SNA_Eurasia_ON_CDRSCE.txt',
np.vstack([years,octnov]).transpose(),delimiter=',',fmt='%3.1f',
footer='\n Snow cover index calculated from' \
'CDR SCE record in Global Snow Lab by \n' \
'Rutgers',newline='\n\n')
np.savetxt(directoryoutput + 'SNA_Eurasia_ON_CDRSCE_DETRENDED.txt',
np.vstack([years,octnovdt]).transpose(),delimiter=',',fmt='%3.1f',
footer='\n Snow cover index calculated from' \
'CDR SCE record in Global Snow Lab by \n' \
'Rutgers',newline='\n\n')
### Save both indices (Oct)
np.savetxt(directoryoutput + 'SNA_Eurasia_O_CDRSCE.txt',
np.vstack([years,octonly]).transpose(),delimiter=',',fmt='%3.1f',
footer='\n Snow cover index calculated from' \
'CDR SCE record in Global Snow Lab by \n' \
'Rutgers',newline='\n\n')
np.savetxt(directoryoutput + 'SNA_Eurasia_O_CDRSCE_DETRENDED.txt',
np.vstack([years,octonlydt]).transpose(),delimiter=',',fmt='%3.1f',
footer='\n Snow cover index calculated from' \
'CDR SCE record in Global Snow Lab by \n' \
'Rutgers',newline='\n\n') | [
"zlabe@uci.edu"
] | zlabe@uci.edu |
9d57190c339f4d7c406c41b3ab2a49e9f86c568c | 26e3d85a3b61219e13f794289ff2b70baa248f14 | /material/frontend/views/detail.py | 0bad092ff2ef47b543351b523af9a399ee2695e5 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | viewflow/django-material | ac8dd10daa8352440845c767b07cafc7f7d09216 | 31b1ce5f6fecc10ba4c9babe4219fb7be97dbf93 | refs/heads/master | 2023-08-15T23:32:58.330321 | 2023-04-12T06:12:07 | 2023-04-12T06:12:40 | 29,337,344 | 2,818 | 570 | BSD-3-Clause | 2023-03-04T02:28:50 | 2015-01-16T07:17:33 | CSS | UTF-8 | Python | false | false | 5,692 | py | from __future__ import unicode_literals
from django.contrib.auth import get_permission_codename
from django.core.exceptions import PermissionDenied, ValidationError
from django.db import models
from django.http import Http404
from django.urls import reverse
from django.views import generic
class DetailModelView(generic.DetailView):
"""Thin wrapper for `generic.DetailView`."""
viewset = None
def get_queryset(self):
"""Return the list of items for this view.
If view have no explicit `self.queryset`, tries too lookup to
`viewflow.get_queryset`
"""
if self.queryset is None and self.viewset is not None:
if hasattr(self.viewset, 'get_queryset'):
return self.viewset.get_queryset(self.request)
return super(DetailModelView, self).get_queryset()
def get_object_data(self):
"""List of object fields to display.
Choice fields values are expanded to readable choice label.
"""
for field in self.object._meta.fields:
if isinstance(field, models.AutoField):
continue
elif field.auto_created:
continue
else:
choice_display_attr = "get_{}_display".format(field.name)
if hasattr(self.object, choice_display_attr):
value = getattr(self.object, choice_display_attr)()
else:
value = getattr(self.object, field.name)
if value is not None:
yield (field.verbose_name.title(), value)
def has_view_permission(self, request, obj):
"""Object view permission check.
If view had a `viewset`, the `viewset.has_view_permission` used.
"""
if self.viewset is not None:
return self.viewset.has_view_permission(request, obj)
# default lookup for the django permission
opts = self.model._meta
codename = get_permission_codename('view', opts)
view_perm = '{}.{}'.format(opts.app_label, codename)
if request.user.has_perm(view_perm):
return True
elif request.user.has_perm(view_perm, obj=obj):
return True
return self.has_change_permission(request, obj=obj)
def has_change_permission(self, request, obj):
"""Object chane permission check.
If view had a `viewset`, the `viewset.has_change_permission` used.
If true, view will show `Change` link to the Change view.
"""
if self.viewset is not None:
return self.viewset.has_change_permission(request, obj)
# default lookup for the django permission
opts = self.model._meta
codename = get_permission_codename('change', opts)
change_perm = '{}.{}'.format(opts.app_label, codename)
if request.user.has_perm(change_perm):
return True
return request.user.has_perm(change_perm, obj=obj)
def has_delete_permission(self, request, obj):
"""Object delete permission check.
If true, view will show `Delete` link to the Delete view.
"""
if self.viewset is not None:
return self.viewset.has_delete_permission(request, obj)
# default lookup for the django permission
opts = self.model._meta
codename = get_permission_codename('delete', opts)
delete_perm = '{}.{}'.format(opts.app_label, codename)
if request.user.has_perm(delete_perm):
return True
return request.user.has_perm(delete_perm, obj=obj)
def get_object(self):
"""Retrieve the object.
Check object view permission at the same time.
"""
queryset = self.get_queryset()
model = queryset.model
pk = self.kwargs.get(self.pk_url_kwarg)
if pk is not None:
try:
self.kwargs[self.pk_url_kwarg] = model._meta.pk.to_python(pk)
except (ValidationError, ValueError):
raise Http404
obj = super(DetailModelView, self).get_object()
if not self.has_view_permission(self.request, obj):
raise PermissionDenied
return obj
def get_context_data(self, **kwargs):
"""Additional context data for detail view.
:keyword object_data: List of fields and values of the object
:keyword change_url: Link to the change view
:keyword delete_url: Link to the delete view
"""
opts = self.model._meta
kwargs['object_data'] = self.get_object_data()
if self.has_change_permission(self.request, self.object):
kwargs['change_url'] = reverse(
'{}:{}_change'.format(opts.app_label, opts.model_name),
args=[self.object.pk])
if self.has_delete_permission(self.request, self.object):
kwargs['delete_url'] = reverse(
'{}:{}_delete'.format(opts.app_label, opts.model_name),
args=[self.object.pk])
return super(DetailModelView, self).get_context_data(**kwargs)
def get_template_names(self):
"""
List of templates for the view.
If no `self.template_name` defined, returns::
[<app_label>/<model_label>_detail.html
'material/frontend/views/detail.html']
"""
if self.template_name is None:
opts = self.model._meta
return [
'{}/{}{}.html'.format(
opts.app_label,
opts.model_name,
self.template_name_suffix),
'material/frontend/views/detail.html',
]
return [self.template_name]
| [
"kmmbvnr@gmail.com"
] | kmmbvnr@gmail.com |
f0177181db3771cd7db55d41e5558003f6dee2c1 | 478de38a95c2729ee2ef8c77b1c5a81f23aedb59 | /Programming-Algorithm/Factorial Trailing Zeroes.py | b0bbec923fa02afbe6ed300d334715b8a0dfae98 | [] | no_license | shuzhancnjx/leetcode- | 0c711f720ef653ddff2af3af697a453122c28403 | 12093c92ef33707ad8ccdd59ad040c04cad1ee3b | refs/heads/master | 2020-12-24T16:32:25.976747 | 2016-03-03T15:36:44 | 2016-03-03T15:36:44 | 37,101,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 24 17:00:01 2015
@author: ZSHU
"""
"""
a simple algorithm based on the wiki.
"""
class Solution(object):
def trailingZeroes(self, n):
"""
:type n: int
:rtype: int
"""
res=0
for i in xrange(1, n):
if 5**i<=n:
res+= (n/5**i)
else:
return res
return res | [
"zshu@ets.org"
] | zshu@ets.org |
bc7cd024c8017f602b40f68ae8f58135a8d780cf | df1306cdc82ccbe730d77d78740004abc10bb492 | /src/main/python/config.py | d0b40c696caac30b756c79c5e59af50081cb0bc7 | [] | no_license | richburdon/flask-demo | cc656c3c750977c8ee2a312554fda146d83919d3 | 16f346f77af7824807e8e30ed9c92ceab55cfa82 | refs/heads/master | 2020-03-27T23:17:30.220307 | 2015-08-02T18:07:10 | 2015-08-02T18:07:10 | 38,577,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | #
# Copyright 2015 Alien Laboratories, Inc.
#
import flask
import flask.views
from injector import Key, Module, inject, singleton
CONFIG = Key('configuration')
@singleton
@inject(app=flask.Flask)
class ConfigModule(Module):
def configure(self, binder):
binder.bind(CONFIG, {
'app': {
'name': 'Demo'
},
'client': {
'debug': True
}
})
| [
"EMAIL"
] | EMAIL |
1890b3225e9fecb70d999a6c4e8c5668902b71fc | ce196aba0adde47ea2767eae1d7983a1ef548bb8 | /txtFile_覆盖式生成文件3_姓名生成.py | 81646a132236fad56879debd3ae1c06fee1131f6 | [] | no_license | xiang-daode/Python3_codes | 5d2639ffd5d65065b98d029e79b8f3608a37cf0b | 06c64f85ce2c299aef7f9311e9473e0203a05b09 | refs/heads/main | 2023-08-30T14:59:55.123128 | 2021-11-03T05:12:24 | 2021-11-03T05:12:24 | 333,632,892 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | # txtFile_覆盖式生成文件:
import random
f = "myTest.txt"
a =range(200)
with open(f,"w") as file: #"w"代表着每次运行都覆盖内容
for m in a:
rnd1=int(20902*(random.random()))
rnd2=int(20902*(random.random()))
file.write(str(m)+":\t 项"+chr(0x4e00+rnd1)+chr(0x4e00+rnd2)+"\n")
| [
"noreply@github.com"
] | xiang-daode.noreply@github.com |
9d2c2c7b6917bdaaba2e101d20bd9c5cd9fd32c0 | f58e6240965d2d3148e124dcbdcd617df879bb84 | /tensorflow_datasets/core/community/load.py | 2718ecb5588a9c29696143b81ddbd0feec48c6cd | [
"Apache-2.0"
] | permissive | suvarnak/datasets | b3f5913cece5c3fe41ec0dde6401a6f37bfd9303 | 3a46548d0c8c83b2256e5abeb483137bd549a4c1 | refs/heads/master | 2022-09-27T03:38:20.430405 | 2022-07-22T15:21:33 | 2022-07-22T15:27:07 | 176,061,377 | 0 | 0 | Apache-2.0 | 2019-03-17T05:45:33 | 2019-03-17T05:45:32 | null | UTF-8 | Python | false | false | 2,465 | py | # coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils to load community datasets."""
import importlib
import sys
from typing import Type
from tensorflow_datasets.core import dataset_builder
from tensorflow_datasets.core import registered
from tensorflow_datasets.core.community import huggingface_wrapper
def builder_cls_from_module(
module_name: str,) -> Type[dataset_builder.DatasetBuilder]:
"""Imports the module and extract the `tfds.core.DatasetBuilder`.
Args:
module_name: Dataset module to import containing the dataset definition
(e.g. `tensorflow_datasets.image.mnist.mnist`)
Returns:
The extracted tfds.core.DatasetBuilder builder class.
"""
if module_name not in sys.modules: # Module already imported
# Module can be created during execution, so call invalidate_caches() to
# make sure the new module is noticed by the import system.
importlib.invalidate_caches()
# Executing the module will register the datasets in _MODULE_TO_DATASETS.
with registered.skip_registration(),\
huggingface_wrapper.mock_huggingface_import():
importlib.import_module(module_name)
# TODO(tfds): For community-installed modules, we should raise cleaner
# error if there is additional missing dependency. E.g. Parsing all
# import statements. Or wrap this `importlib.import_module` within a
# `with lazy_imports():` context manager ?
builder_classes = registered._MODULE_TO_DATASETS.get(module_name, []) # pylint: disable=protected-access
if len(builder_classes) != 1:
raise ValueError(
f'Could not load DatasetBuilder from: {module_name}. '
'Make sure the module only contains a single `DatasetBuilder`.\n'
'If no dataset is detected, make sure that all abstractmethods are '
'implemented.\n'
f'Detected builders: {builder_classes}')
return builder_classes[0]
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
415268d2f1f79abf144496915ce9c4b774eb9e79 | 147e022b38e05fb2f6967aa4d5e50816221f8bf5 | /matches/middleware.py | e4dbf0afe8f40b927f4991ce1b29b5f4ad73d44c | [] | no_license | Shirhussain/Meet-your-match | a542f0fdcab573ba70740bfbd8d2bb6c2603bdc9 | fd9a8beabe7288aca6fae07f1a7bc0b68c0223a8 | refs/heads/main | 2023-02-25T20:30:22.647930 | 2021-02-04T08:02:05 | 2021-02-04T08:02:05 | 332,492,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,316 | py | from django.contrib import messages
from django.urls import reverse
from django.conf import settings
from django.shortcuts import HttpResponseRedirect
URLS = [reverse(url) for url in settings.SUBSCRIPTION_REQUIRED_URLS]
class CheckMembership:
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
def process_view(self, request, view_func, *view_args, **view_kwargs):
if request.user.is_authenticated:
# messages.success(request, "user is logged in ")
if request.path in URLS:
role = request.user.userrole
if str(role) == "Regular":
messages.success(request, f"you need to upgrade your membership plan to see that, your rol is: {role}")
return HttpResponseRedirect(reverse("home"))
else:
# messages.error(request, "user is not logged in")
print("not logged in horraaaaaaaaaaaaaaaaaaaaaa") | [
"sh.danishyar@gmail.com"
] | sh.danishyar@gmail.com |
c8aff798a0c522cfd15ef26a74275128a8d4fc8a | e298bf40ae88c2bd8e0a07f3e92f3e08a92edcc6 | /keystoneauth1/fixture/keystoneauth_betamax.py | 724d217d4b7bddbb6cda5bc1c31935af4f359787 | [] | no_license | KevinKaiQian/polar-bear | 46a814c746246394f76505846166673a049f12f2 | 61d4e0ccd7328a6aa543af3b75e5f7fedf98bf8e | refs/heads/master | 2022-04-29T02:15:35.536039 | 2021-05-19T12:33:07 | 2021-05-19T12:33:07 | 172,068,536 | 2 | 0 | null | 2022-03-29T21:56:51 | 2019-02-22T13:11:58 | Python | UTF-8 | Python | false | false | 2,976 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A fixture to wrap the session constructor for use with Betamax."""
from functools import partial
import betamax
import fixtures
import mock
import requests
from keystoneauth1.fixture import hooks
from keystoneauth1.fixture import serializer as yaml_serializer
from keystoneauth1 import session
class BetamaxFixture(fixtures.Fixture):
def __init__(self, cassette_name, cassette_library_dir=None,
serializer=None, record=False,
pre_record_hook=hooks.pre_record_hook):
self.cassette_library_dir = cassette_library_dir
self.record = record
self.cassette_name = cassette_name
if not serializer:
serializer = yaml_serializer.YamlJsonSerializer
self.serializer = serializer
betamax.Betamax.register_serializer(serializer)
self.pre_record_hook = pre_record_hook
def setUp(self):
super(BetamaxFixture, self).setUp()
self.mockpatch = mock.patch.object(
session, '_construct_session',
partial(_construct_session_with_betamax, self))
self.mockpatch.start()
# Unpatch during cleanup
self.addCleanup(self.mockpatch.stop)
def _construct_session_with_betamax(fixture, session_obj=None):
# NOTE(morganfainberg): This function should contain the logic of
# keystoneauth1.session._construct_session as it replaces the
# _construct_session function to apply betamax magic to the requests
# session object.
if not session_obj:
session_obj = requests.Session()
# Use TCPKeepAliveAdapter to fix bug 1323862
for scheme in list(session_obj.adapters.keys()):
session_obj.mount(scheme, session.TCPKeepAliveAdapter())
with betamax.Betamax.configure() as config:
config.before_record(callback=fixture.pre_record_hook)
fixture.recorder = betamax.Betamax(
session_obj, cassette_library_dir=fixture.cassette_library_dir)
record = 'none'
serializer = None
if fixture.record in ['once', 'all', 'new_episodes']:
record = fixture.record
if fixture.serializer:
serializer = fixture.serializer.name
fixture.recorder.use_cassette(fixture.cassette_name,
serialize_with=serializer,
record=record)
fixture.recorder.start()
fixture.addCleanup(fixture.recorder.stop)
return session_obj
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
84751391d9a0d6fb8e19ee5ccd599e6882cd26df | 40e7156576ad93db1f0dcab62ec3bb6042576166 | /Termux/python/Mergeall/test/ziptools/zip-extract.py | dc8a5fd4d5ae165ebb55f3e6dd9fdcb43e83d452 | [] | no_license | WeilerWebServices/Gists | c5d12093d620abc8152e8e8214a2000832969421 | 506fae2f3f9568ecd73ba373f35ac5fda054520e | refs/heads/master | 2023-02-17T18:00:07.721504 | 2021-01-11T02:19:44 | 2021-01-11T02:19:44 | 272,584,650 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,823 | py | #!/usr/bin/python
"""
=============================================================================
Command-line ziptools wrapper and client (for Python 3.X or 2.X).
Extract a zip file, with:
<python> zip-extract.py [zipfile [unzipto] [-nofixlinks]]
Where:
"zipfile" is the pathname of an existing zipfile (a ".zip" is appended to
the end of this if missing)
"unzipto" is the pathname of a possibly-existing folder where all unzipped
items will be stored (the default is ".", the current working directory)
"-nofixlinks", if used, prevents symbolic-link path separators from being
adjusted for the local platform (else they are, to make links portable)
Arguments are input at console prompts if not listed on the command line.
The script's output lists for each item both zipfile (from) and extracted
(to) name, the latter after a "=>" on a new line.
<python> is your platform's optional Python identifier string. It may be
"python", "python3", or an alias on Unix; and "python", "py -3", or "py"
on Windows. It can also be omitted on Windows (to use a default), and on
Unix given executable permission for this script (e.g., post "chmod +x").
Some frozen app/executable packages may also omit <python>; see your docs.
The "unzipto" folder is created automatically if needed, but is cleaned
of its contents before the extract only if using interactive-prompts
mode here and cleaning is confirmed. Neither the base extract function
nor non-interactive mode here do any such cleaning. Remove the unzipto
folder's contents manually if needed before running this script.
Caution: cleaning may not make sense for ".", the current working dir.
This case is verified with prompts in interactive mode only, but that
is the only context in which auto-cleaning occurs.
Examples:
python zip-extract.py # input args
python zip-extract.py tests.zip # unzip to '.'
python zip-extract.py download.zip dirpath # unzip to other dir
python zip-extract.py dev.zip . -nofixlinks # don't adjust links
ABOUT LINKS AND OTHER FILE TYPES:
For symbolic links to both files and dirs, the ziptools package either
zips links themselves (by default), or the items they refer to (upon
request); this extract simply recreates whatever was added to the zip.
FIFOs and other exotica are never zipped or unzipped.
To make links more portable, path separators in link paths are automatically
agjusted for the hosting platform by default (e.g., '/' becomes '\' on
Windows); use "-nofixlinks" (which can appear anywhere on the command line)
to suppress this if you are unzipping on one platform for use on another.
See ziptools.py's main docstring for more details.
ABOUT TARGET PATHS:
For extracts, the Python zipfile module underlying this script discards
any special syntax in the archive's item names, including leading slashes,
Windows drive and UNC network names, and ".." up-references. The local
symlink adder parrots the same behavior.
Hence, paths that were either absolute, rooted in a drive or network, or
parent-relative at zip time become relative to (and are created in) the
"unzipto" path here. Items zipped as "dir0", "/dir1", "C:\dir2", and
"..\dir3" are extracted to "dir0", "dir1", "dir2", and "dir3" in "unzipto".
Technically, zipfile's write() removes leading slashes and drive and
network names (they won't be in the zipfile), and its extract() used
here removes everything special, including "..". Other zip tools may
store anything in a zipfile, and may or may not be as forgiving about
"..", but the -create and -extract scripts here work as a team.
Note that all top-level items in the zipfile are extracted as top-level
items in the "unzipto" folder. A zipfile that contains just files will
not create nested folders in "unzipto"; a zipfile with folders will.
ABOUT LARGE FILES:
Python's zipfile - and hence ziptools - handles files > ZIP64's 2G
size cutoff, both for zipping and unzipping. UNIX "unzip" may not.
See zip-create.py for more details.
CAVEAT: extracts here may not preserve UNIX permissions due to a Python
zipfile bug; see extractzipfile() in ziptools/ziptools.py for more details.
See zip-create.py for usage details on the zip-creation companion script.
See ziptools/ziptools.py's docstring for more on this script's utility.
=============================================================================
"""
import ziptools, sys, os
if sys.version[0] == '2':
input = raw_input # py 2.X compatibility
if len(sys.argv) >= 2: # 2 = script zipfile...
interactive = False
nofixlinks = False
if '-nofixlinks' in sys.argv: # anywhere in argv
nofixlinks = True
sys.argv.remove('-nofixlinks')
assert len(sys.argv) >= 2, 'Too few arguments'
zipfrom = sys.argv[1]
zipfrom += '' if zipfrom[-4:].lower() == '.zip' else '.zip'
unzipto = '.' if len(sys.argv) == 2 else sys.argv[2]
else:
interactive = True
zipfrom = input('Zip file to extract? ')
zipfrom += '' if zipfrom[-4:].lower() == '.zip' else '.zip'
unzipto = input('Folder to extract in (use . for here) ? ') or '.'
nofixlinks = input('Do not localize symlinks (y=yes)? ').lower() == 'y'
verify = input("About to UNZIP\n"
"\t%s,\n"
"\tto %s,\n"
"\t%socalizing any links\n"
"Confirm with 'y'? "
% (zipfrom, unzipto, 'not l' if nofixlinks else 'l'))
if verify.lower() != 'y':
input('Run cancelled.')
sys.exit(0)
if not os.path.exists(unzipto):
# no need to create here: zipfile.extract() does os.makedirs(unzipto)
pass
else:
# in interactive mode, offer to clean target folder (ziptools doesn't);
# removing only items to be written requires scanning the zipfile: pass;
if (interactive and
input('Clean target folder first (yes=y)? ').lower() == 'y'):
# okay, but really?
if (unzipto in ['.', os.getcwd()] and
input('Target = "." cwd - really clean (yes=y)? ').lower() != 'y'):
# a very bad thing to do silently!
pass
else:
# proceed with cleaning
for item in os.listdir(unzipto):
itempath = os.path.join(unzipto, item)
if os.path.isfile(itempath) or os.path.islink(itempath):
os.remove(ziptools.FWP(itempath))
elif os.path.isdir(itempath):
ziptools.tryrmtree(itempath)
# the zip bit
ziptools.extractzipfile(zipfrom, unzipto, nofixlinks)
if interactive and sys.platform.startswith('win'):
input('Press Enter to exit.') # stay up if clicked
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
73397cdefaa3f889a395f6445034301d0a731cbc | bde6ed092b7b29703737e11c5a5ff90934af3d74 | /AtCoder/ABC/128/a.py | a9866e7e31e1d163626daae9c34501fc2c277bec | [] | no_license | takecian/ProgrammingStudyLog | 2ab7ea601e0996b3fa502b81ec141bc3772442b6 | 94485d131c0cc9842f1f4799da2d861dbf09b12a | refs/heads/master | 2023-04-28T16:56:18.943574 | 2023-04-18T06:34:58 | 2023-04-18T06:34:58 | 128,525,713 | 4 | 0 | null | 2022-12-09T06:15:19 | 2018-04-07T12:21:29 | Python | UTF-8 | Python | false | false | 219 | py | #
import itertools
from collections import Counter
from collections import defaultdict
import bisect
def main():
A, P = map(int, input().split())
print((A * 3 + P) // 2)
if __name__ == '__main__':
main()
| [
"takecian@gmail.com"
] | takecian@gmail.com |
c863d3bd856aea04083da417daf952d5564beed7 | 0bf5e600ac0bd860919d4e43e8ccd55057a9e899 | /Python Files/sunni_keydown.py | 2787b80c178a36b079a878127a86adb701c19a9d | [] | no_license | AndyDeany/Sunni | 3f4dc0d9ffdec864c5340d5fbc45c18f470c5165 | 3dbe0f0f8f25c1c3cff024ffadf1a2ca76cbadd0 | refs/heads/master | 2020-12-25T14:14:01.610017 | 2016-10-04T12:48:06 | 2016-10-04T12:48:06 | 66,398,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,508 | py | keys = pygame.key.get_pressed()
# Miscellaneous
backspace_held = keys[8]
tab_held = keys[9]
enter_held = keys[13]
pausebreak_held = keys[19]
escape_held = keys[27]
space_held = keys[32]
apostrophe_held = keys[39]
comma_held = keys[44]
minus_held = keys[45]
fullstop_held = keys[46]
forwardslash_held = keys[47]
# Numbers across the top
zero_held = keys[48]
one_held = keys[49]
two_held = keys[50]
three_held = keys[51]
four_held = keys[52]
five_held = keys[53]
six_held = keys[54]
seven_held = keys[55]
eight_held = keys[56]
nine_held = keys[57]
# Miscellaneous
semicolon_held = keys[59]
backslash_held = keys[60]
equals_held = keys[61]
opensquarebracket_held = keys[91]
sharp_held = keys[92]
closesquarebracket_held = keys[93]
backtick_held = keys[96]
# Alphabet
a_held = keys[97]
b_held = keys[98]
c_held = keys[99]
d_held = keys[100]
e_held = keys[101]
f_held = keys[102]
g_held = keys[103]
h_held = keys[104]
i_held = keys[105]
j_held = keys[106]
k_held = keys[107]
l_held = keys[108]
m_held = keys[109]
n_held = keys[110]
o_held = keys[111]
p_held = keys[112]
q_held = keys[113]
r_held = keys[114]
s_held = keys[115]
t_held = keys[116]
u_held = keys[117]
v_held = keys[118]
w_held = keys[119]
x_held = keys[120]
y_held = keys[121]
z_held = keys[122]
# Miscellaneous
delete_held = keys[127]
# Numpad
numpad0_held = keys[256]
numpad1_held = keys[257]
numpad2_held = keys[258]
numpad3_held = keys[259]
numpad4_held = keys[260]
numpad5_held = keys[261]
numpad6_held = keys[262]
numpad7_held = keys[263]
numpad8_held = keys[264]
numpad9_held = keys[265]
numpaddivide_held = keys[267]
numpadmultiply_held = keys[268]
numpadminus_held = keys[269]
numpadplus_held = keys[270]
numpadenter_held = keys[271]
# Arrow keys
uparrow_held = keys[273]
downarrow_held = keys[274]
rightarrow_held = keys[275]
leftarrow_held = keys[276]
# Miscellaneous
insert_held = keys[277]
home_held = keys[278]
end_held = keys[279]
pageup_held = keys[280]
pagedown_held = keys[281]
# F keys
f1_held = keys[282]
f2_held = keys[283]
f3_held = keys[284]
f4_held = keys[285]
f5_held = keys[286]
f6_held = keys[287]
f7_held = keys[288]
f8_held = keys[289]
f9_held = keys[290]
f10_held = keys[291]
f11_held = keys[292]
f12_held = keys[293]
# Key modifiers
numlock = keys[300]
capslock = keys[301]
scrolllock_held = keys[302]
rightshift_held = keys[303]
leftshift_held = keys[304]
shift_held = rightshift_held or leftshift_held
rightcontrol_held = keys[305]
leftcontrol_held = keys[306]
altgrammar_held = keys[307]
alt_held = keys[308]
leftwindows_held = keys[311] #} these might be
rightwindows_held = keys[312] #} pointless (windows keys)
menubutton_held = keys[319]
# Calculating the number of keys pressed (for typing)
if accepting_text:
keys_pressed = 0
for value in keys:
keys_pressed += value
if numlock:
keys_pressed -= 1
if capslock:
keys_pressed -= 1
if scrolllock_held:
keys_pressed -= 1
if rightshift_held:
keys_pressed -= 1
if leftshift_held:
keys_pressed -= 1
if rightcontrol_held:
keys_pressed -= 1
if leftcontrol_held:
keys_pressed -= 1
if altgrammar_held:
if leftcontrol_held:
keys_pressed -= 1
else:
keys_pressed -= 2
if alt_held:
keys_pressed -= 1
if leftwindows_held:
keys_pressed -= 1
if rightwindows_held:
keys_pressed -= 1
if menubutton_held:
keys_pressed -= 1
| [
"oneandydean@hotmail.com"
] | oneandydean@hotmail.com |
c93f0d01307038fa67436b4424d1db481e3e53c9 | 69bf192eb08f2c49093d2b63e5ef16a5b4028848 | /ved/vedomosti/MainApp/migrations/0006_auto_20160721_2321.py | b687bb0c52eb40b21ffd1d1998613a87a5bb35f6 | [] | no_license | jsay-api/vedomosti | c7aac45684142f428e3ffb6cb29aff9d77a999e3 | 5255d44386afbe06965b79c50547dcb80a59029f | refs/heads/master | 2021-01-17T18:09:02.543349 | 2016-07-21T20:32:34 | 2016-07-21T20:32:34 | 63,365,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-21 20:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('MainApp', '0005_auto_20160721_2319'),
]
operations = [
migrations.AlterField(
model_name='assetsbeneficiaries',
name='rel_date',
field=models.DateField(blank=True, verbose_name='дата актуальности'),
),
]
| [
"julia.sayapina@me.com"
] | julia.sayapina@me.com |
4ded9ca09dbfbb9b99301b5899ee6c07d0abcb31 | 81407be1385564308db7193634a2bb050b4f822e | /library/lib_study/112_netdata_binascii.py | 842c2712049f85a402b204d0cd455f742cd69bb0 | [
"MIT"
] | permissive | gottaegbert/penter | 6db4f7d82c143af1209b4259ba32145aba7d6bd3 | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | refs/heads/master | 2022-12-30T14:51:45.132819 | 2020-10-09T05:33:23 | 2020-10-09T05:33:23 | 305,266,398 | 0 | 0 | MIT | 2020-10-19T04:56:02 | 2020-10-19T04:53:05 | null | UTF-8 | Python | false | false | 435 | py | # 二进制和 ASCII 码互转 https://docs.python.org/zh-cn/3/library/binascii.html
"""
binascii 模块包含很多在二进制和二进制表示的各种ASCII码之间转换的方法。
通常情况不会直接使用这些函数,而是使用像 uu , base64 ,或 binhex 这样的封装模块。
为了执行效率高,binascii 模块含有许多用 C 写的低级函数,这些底层函数被一些高级模块所使用。
""" | [
"350840291@qq.com"
] | 350840291@qq.com |
21ae1362833a5c039555dc1eb6113024b53fed68 | bd4535b2ff5fc80234eed709f46da53b9ab260cf | /Packs/OSQuery/Scripts/OSQueryBasicQuery/OSQueryBasicQuery.py | 6efabf7dae8ae8298e237cf0d2e9bdbfe70d5657 | [
"MIT"
] | permissive | vibhuabharadwaj/content | 0641284c862668b577e82e32e2daecdb9fabb39a | 518da763814fefce538379560282ff8c2ce661b9 | refs/heads/master | 2023-03-07T21:36:31.768989 | 2022-09-28T15:50:46 | 2022-09-28T15:50:46 | 202,795,410 | 1 | 0 | MIT | 2023-03-06T17:25:01 | 2019-08-16T20:30:23 | Python | UTF-8 | Python | false | false | 1,188 | py | import json
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
# ssh command to run, json format, param = query to execute
COMMAND = 'osqueryi --json "{0}"'
def main():
systems = argToList(demisto.args().get('system'))
query = demisto.args().get('query')
res = []
error_res = []
if query and systems:
for system in systems:
temp_res = demisto.executeCommand("RemoteExec", {'cmd': COMMAND.format(str(query)), 'system': system})
if isError(temp_res[0]):
temp_res_contents = temp_res[0]['Contents']
error_res += [{"Type": entryTypes["error"], "ContentsFormat": formats["text"],
"Contents": f'An Error occurred on remote system:\"{system}\". Error={temp_res_contents}.'}]
else:
data = json.loads(temp_res[0]['Contents'])
res += [{'ContentsFormat': formats['markdown'], 'Type': entryTypes['note'],
"Contents": tblToMd("{0} results:".format(system), data)}]
demisto.results(res + error_res)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| [
"noreply@github.com"
] | vibhuabharadwaj.noreply@github.com |
5f9ff53c21a585394ade8a312d386efe615fa801 | def2fee9dd1476bb4d782178bffa7d5d34fbbd13 | /nb_autoimports/__init__.py | 83a3fd5f2332cd4681a1fe5abd646602a188e248 | [
"MIT"
] | permissive | sshh12/nb_autoimports | 0f6298e7d11434bb5a8ca98f44e9460129ccbf4c | 693c73b07af1882f97c957a0813db42926433978 | refs/heads/main | 2023-05-24T08:15:48.298366 | 2021-06-08T23:18:12 | 2021-06-08T23:18:12 | 374,137,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | from .auto_import import AutoImporter
def load_ipython_extension(ip):
# TODO: unload function
ai = AutoImporter(ip)
ip.events.register("post_run_cell", ai.on_post_run_cell) | [
"shrivu1122@gmail.com"
] | shrivu1122@gmail.com |
10ff6d612cda6c48feb8e0551c17ebcc01eadad7 | bd86f45ec9355cf1b76c25307d77c85ff98d30a8 | /lib/common/color.py | 1edf3cccfedc2eb0e95268f309e00c535b7eac49 | [
"MIT"
] | permissive | WhySoGeeky/DroidPot | fd39abe490117283f992d80f317574f47809de8d | 7c3d9e975dae3835e2ccf42c425d65b26466e82a | refs/heads/master | 2021-07-02T12:47:16.269514 | 2015-11-03T17:49:41 | 2015-11-03T17:49:41 | 45,484,292 | 6 | 0 | MIT | 2021-06-10T17:59:45 | 2015-11-03T17:44:48 | Python | UTF-8 | Python | false | false | 615 | py | __author__ = 'RongShun'
import os
import sys
def color(text, color_code):
if sys.platform == "win32" and os.getenv("TERM") != "xterm":
return text
return "\x1b[%dm%s\x1b[0m" % (color_code, text)
def green(text):
return color(text, 32)
def yellow(text):
return color(text, 33)
def white(text):
return color(text, 37)
def bold(text):
return color(text, 1)
def black(text):
return color(text, 30)
def red(text):
return color(text, 31)
def blue(text):
return color(text, 34)
def magenta(text):
return color(text, 35)
def cyan(text):
return color(text, 36)
| [
"phobiatrs@gmail.com"
] | phobiatrs@gmail.com |
43634a4192a6b1de1987f2c7343b04f81c9ab576 | b62ba918b0b96e682d811aa79d0f34ffa50e784c | /shop/catalog/models.py | 0e6804f4c1d6e88a043bf6d32137738765467f99 | [] | no_license | MaksimLion/django-furniture-shop | 8ee3edb584cf670c7893c7b836037b97aefafcb2 | 14a5160b29a06947fab7aae7dda15829c1dcf23f | refs/heads/master | 2020-04-24T22:38:26.514663 | 2019-03-10T09:37:06 | 2019-03-10T09:37:06 | 172,318,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | from django.db import models
class Furniture(models.Model):
TYPES = (
('kitchens','КУХНИ'),
('showcases','ВИТРИНЫ'),
('wardrobes','ШКАФЫ-КУПЕ'),
('offices','МЕБЕЛЬ ДЛЯ ОФИСА'),
('hallways','ПРИХОЖИЕ'),
('lounges','ГОСТИНЫЕ'),
('child','ДЕТСКИЕ'),
('closets','ГАРДЕРОБНЫЕ'),
('others','КРОВАТИ КОМОДЫ ТУМБЫ'),
)
title = models.CharField(max_length=20, verbose_name="Название")
photo = models.ImageField(blank=True, verbose_name="Фото", upload_to="categories/")
option = models.CharField(max_length=20, choices=TYPES, verbose_name="Категория")
class Meta:
verbose_name = "Мебель"
verbose_name_plural = "Мебель"
def __str__(self):
return self.title
| [
"maxim226356@mail.ru"
] | maxim226356@mail.ru |
08df8cd4acefaf74f1039287b5260de31247f5da | ec61946a176935044d08cf1244d2185f2460df32 | /pyleecan/Methods/Slot/SlotM11/get_surface_active.py | 42e164a216dc443c271c333537ee71a505c481e6 | [
"Apache-2.0"
] | permissive | Lunreth/pyleecan | d3974a144cb8a6c332339ab0426f1630b7516fc9 | 1faedde4b24acc6361fa1fdd4e980eaec4ca3a62 | refs/heads/master | 2023-06-07T01:46:32.453763 | 2021-07-01T21:29:51 | 2021-07-01T21:29:51 | 383,880,732 | 1 | 0 | Apache-2.0 | 2021-07-07T17:47:01 | 2021-07-07T17:47:01 | null | UTF-8 | Python | false | false | 1,691 | py | # -*- coding: utf-8 -*-
from numpy import linspace, zeros
from ....Classes.Arc1 import Arc1
from ....Classes.Segment import Segment
from ....Classes.SurfLine import SurfLine
def get_surface_active(self, alpha=0, delta=0):
"""Return the full active surface
Parameters
----------
self : SlotM11
A SlotM11 object
alpha : float
float number for rotation (Default value = 0) [rad]
delta : complex
complex number for translation (Default value = 0)
Returns
-------
surf_act: Surface
Surface corresponding to the Active Area
"""
# get the name of the lamination
st = self.get_name_lam()
Rbo = self.get_Rbo()
point_dict = self._comp_point_coordinate()
ZM1 = point_dict["ZM1"]
ZM2 = point_dict["ZM2"]
ZM3 = point_dict["ZM3"]
ZM4 = point_dict["ZM4"]
curve_list = list()
curve_list.append(Segment(ZM1, ZM2))
if self.is_outwards():
curve_list.append(
Arc1(ZM2, ZM3, (Rbo + self.H0 - self.Hmag), is_trigo_direction=True)
)
else:
curve_list.append(
Arc1(ZM2, ZM3, (Rbo - self.H0 + self.Hmag), is_trigo_direction=True)
)
curve_list.append(Segment(ZM3, ZM4))
if self.is_outwards():
curve_list.append(Arc1(ZM4, ZM1, -Rbo - self.H0, is_trigo_direction=False))
else:
curve_list.append(Arc1(ZM4, ZM1, -Rbo + self.H0, is_trigo_direction=False))
Zmid = (abs(ZM1) + abs(ZM3)) / 2
surface = SurfLine(
line_list=curve_list, label="Wind_" + st + "_R0_T0_S0", point_ref=Zmid
)
# Apply transformation
surface.rotate(alpha)
surface.translate(delta)
return surface
| [
"pierre.bonneel@gmail.com"
] | pierre.bonneel@gmail.com |
af004c090784e8fe7a38327a0699f3f1bee2b802 | 179d8aae260d20443e6e87613cff55d42587bc16 | /examples/x2oneflow/pytorch2oneflow/nodes/test_reduction.py | 34c6292c3b266174d612648e742d5a682ce50c30 | [] | no_license | 666DZY666/oneflow_convert_tools | 3b1f9d6ebaf154d7218236c332c6f9613b89a860 | bb38c52954facbfe977e09c7e4706b7563a7b50c | refs/heads/main | 2023-06-04T10:16:08.786531 | 2021-06-24T08:38:24 | 2021-06-24T08:38:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch
from torch import nn
from oneflow_onnx.x2oneflow.util import load_pytorch_module_and_check
def test_reduce_mean():
class Net(nn.Module):
def forward(self, x):
return torch.mean(x)
load_pytorch_module_and_check(Net)
def test_reduce_mean_axis():
class Net(nn.Module):
def forward(self, x):
return torch.mean(x, dim=2)
load_pytorch_module_and_check(Net)
def test_reduce_mean_axis_keepdim():
class Net(nn.Module):
def forward(self, x):
return torch.mean(x, dim=3, keepdim=True)
load_pytorch_module_and_check(Net)
| [
"1182563586@qq.com"
] | 1182563586@qq.com |
276d5cb7c2f08b0c622a30d2a0ad6d9e5ebab54b | 55ceefc747e19cdf853e329dba06723a44a42623 | /_CodeTopics/LeetCode/801-1000/000954/WA--000954.py3 | 12cb53000239ceb19cbc5b6fee2b54a6673848bf | [] | no_license | BIAOXYZ/variousCodes | 6c04f3e257dbf87cbe73c98c72aaa384fc033690 | ee59b82125f100970c842d5e1245287c484d6649 | refs/heads/master | 2023-09-04T10:01:31.998311 | 2023-08-26T19:44:39 | 2023-08-26T19:44:39 | 152,967,312 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py3 | class Solution:
def canReorderDoubled(self, arr: List[int]) -> bool:
if arr.count(0) & 1 or sum(elem < 0 for elem in arr) & 1:
return False
arr = filter(lambda x : x != 0, arr)
arr = map(abs, arr)
ctr = Counter(arr)
deleted = defaultdict(int)
keys = list(ctr.keys())
keys.sort()
for key in keys:
# 说明 key 已经做为 pair 中较大的那个,被之前更小的匹配完了,所以直接跳过。
# 比如,[2,4,6,12] 中的 4 和 12 都是这种情况。
if ctr[key] == deleted[key]:
continue
doubleKey = 2 * key
# 对于 [2,4,4,8],当 for 循环到 4 时,此时可用的 4 的数目应该是一个,因为
# 虽然一共有两个 4,但是已经有一个和前面的 2 匹配用掉了。
numOfKeyLeft = ctr[key] - deleted[key]
if ctr[doubleKey] < numOfKeyLeft:
return False
else:
deleted[doubleKey] += numOfKeyLeft
return True
"""
https://leetcode-cn.com/submissions/detail/292900128/
101 / 102 个通过测试用例
状态:解答错误
输入:
[-3,-4,2,6]
输出:
true
预期结果:
false
"""
| [
"noreply@github.com"
] | BIAOXYZ.noreply@github.com |
7bff1f662a66130a50f13bd63bbeb32866d217a1 | 92187fc72f613751e9d215bc1db8fe1bba4b83bc | /src/home/migrations/0013_car.py | 9c675273b9a0d56370b376e831e68bb22653c9bf | [] | no_license | bhubs-python/istehar | 33cb0abeeb4c8da1f1721eee5f0f380c086f4a66 | 3e1679f1b1b17f6e21aff4923d1d5dbcf687fc66 | refs/heads/master | 2021-09-10T16:26:31.464986 | 2018-03-29T08:06:15 | 2018-03-29T08:06:15 | 123,160,263 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,085 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-11 10:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0012_audiomp3'),
]
operations = [
migrations.CreateModel(
name='car',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('model_year', models.DateTimeField(blank=True, null=True)),
('registration_year', models.DateTimeField(blank=True, null=True)),
('transmission', models.CharField(blank=True, max_length=100, null=True)),
('body_type', models.CharField(blank=True, max_length=255, null=True)),
('fuel_type', models.CharField(blank=True, max_length=255, null=True)),
('engine_capacity', models.FloatField(blank=True, null=True)),
('kilometer_run', models.FloatField(blank=True, null=True)),
],
),
]
| [
"mubarak117136@gmail.com"
] | mubarak117136@gmail.com |
9c0ecdb04410180dded57b66e4abaa2e72494082 | 50910ddbbbf57cdbf4d40a404fc2672e8a4b340a | /application.py | e6c3db90918404f80393c32bf61cf2ea20c5f923 | [] | no_license | aes95/cs50-web-2018-x-projects-1 | c5ea9255ae1a986b3ab7c9bf166267afdfea49c9 | 3463c48c46a2a1a849c21653a37058c54d660c96 | refs/heads/master | 2020-04-16T11:02:22.218936 | 2019-01-14T00:14:00 | 2019-01-14T00:14:00 | 165,521,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,961 | py | import os, requests, xml.etree.ElementTree
from flask import Flask, session, render_template, request, jsonify
from flask_session import Session
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
app = Flask(__name__)
# Check for environment variable
if not os.getenv("DATABASE_URL"):
raise RuntimeError("DATABASE_URL is not set")
# Configure session to use filesystem
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Set up database
engine = create_engine(os.getenv("DATABASE_URL"))
db = scoped_session(sessionmaker(bind=engine))
@app.route("/")
def index():
if not session.get('logged_in'):
return render_template('login.html')
return render_template("search.html")
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST":
email = request.form.get('email')
password = request.form.get('password')
x = db.execute('SELECT password FROM users WHERE email = :email', {'email': email}).fetchone()
if x == None or x['password'] != password:
return 'Incorrect username or password. Please try again.'
else:
session['logged_in'] = True
session['email'] = email
return index()
if request.method == "GET":
return render_template("login.html")
@app.route("/logout")
def logout():
session['logged_in']=False
session['email'] = None
return index()
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "GET":
return render_template("register.html")
@app.route("/confirmation", methods=["POST", "GET"])
def confirmation():
pwd = request.form.get('psw')
email = request.form.get('email')
email_check = db.execute("SELECT email FROM users WHERE email = :email",{'email':email}).fetchone()
if email_check != None:
return f"Your email {email} already has an account associated with it. Please log in <a href='/login'> here <a>"
db.execute("INSERT INTO users (email, password) VALUES(:email,:pwd)",{"email":email, "pwd":pwd})
db.commit()
return "You have successfuly registered! Find books <a href='/'> here </a>"
@app.route("/<string:isbn>", methods=["POST", "GET"])
def book(isbn):
if not session.get('logged_in'):
return render_template('login.html')
book_data = db.execute("SELECT * FROM books WHERE isbn=:isbn",{'isbn':isbn}).fetchone()
if book_data == None:
return "Book not found. Please try again <a href='/search'>Here</a>"
title = book_data['title']
author = book_data['author']
year = book_data['year']
res = requests.get("https://www.goodreads.com/book/review_counts.json", params={"key":"EOquiAwYzuZQkS4FGKIQ", "isbns":isbn}).json()
goodreads_avg = res['books'][0]['average_rating']
goodreads_count = res['books'][0]['ratings_count']
reviews = db.execute("SELECT * FROM reviews WHERE isbn=:isbn",{'isbn':isbn}).fetchall()
return render_template("book.html", title=title, author= author, year=year, isbn=isbn, rating=goodreads_avg, count=goodreads_count, reviews=reviews)
@app.route("/search", methods=["POST", "GET"])
def search():
search = f"%{request.form.get('search')}%"
results = db.execute("SELECT * FROM books WHERE title LIKE :search OR author LIKE :search OR isbn LIKE :search",{'search':search}).fetchall()
return render_template('search.html', results=results)
@app.route("/submit", methods=["POST"])
def submit():
email = session['email']
email_check = db.execute("SELECT email FROM reviews WHERE email = :email",{'email':email}).fetchone()
if email_check != None:
return f"Your email {email} has already submitted a review for this book. Please review other books <a href='/search'> here <a>"
isbn = request.form.get('isbn')
print(isbn)
rating = request.form.get('rating')
review = request.form.get('review')
db.execute("INSERT INTO reviews (email, isbn, rating, review) VALUES (:email, :isbn, :rating, :review)", {'email':email, 'isbn':isbn, 'rating':rating, 'review':review})
db.commit()
return index()
@app.route("/api/<string:isbn>")
def api(isbn):
book_data = db.execute("SELECT * FROM books WHERE isbn=:isbn",{'isbn':isbn}).fetchone()
title = book_data['title']
author = book_data['author']
year = book_data['year']
isbn = isbn
review_count = db.execute("SELECT COUNT(*) FROM reviews WHERE isbn=:isbn",{'isbn':isbn}).fetchone()[0]
average_score = db.execute("SELECT AVG(reviews.rating) FROM reviews WHERE isbn=:isbn",{'isbn':isbn}).fetchone()[0]
average_score = round(float(average_score),2)
dic = {"title": title, "author":author, "year": year,"isbn":isbn, "review_count":review_count, "average_score": average_score }
print(dic)
return jsonify(dic) | [
"you@example.com"
] | you@example.com |
8c90d597c7ceb9a5f6d6cf86f71da32121e3b905 | d0a84d97aaa8dcc2dff4a6b33ce98dee6d474496 | /com.CheckProofing/2020/Test_w_45_HolidayDeals_T1_Actives/test_w45_CCcheck.py | a9f1f29bcb011c9ba984670d8e1baea2714e1275 | [] | no_license | ahmed-test001/python | 21a27248c4571a13c0ed4dccab256aede1beea3a | eab59b9a54fae1a51fbc18c391599eb3b0e28b3d | refs/heads/master | 2023-03-10T21:00:54.634028 | 2021-02-27T05:31:58 | 2021-02-27T05:31:58 | 342,778,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,821 | py | import time
import unittest
import sys
import os
import logging
import warnings
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from PageClass.UrlSegmentPage import URLSegemntPage
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from PageClass.ComputingPage import ComputingPage
from PageClass.MobileAccessoriesPage import MobileAccessoriesPage
from PageClass.TVHomeTheaterPage import TVHomeTheaterPage
from PageClass.SmartPhonePage import SmartPhonePage
from PageClass.HomeAppliancePage import HomeAppliancePage
from PageClass.TabletPage import TabletPage
from Utility_Files import ReadConfig
from Utility_Files.HTMLTestRunner import stdout_redirector
logger=logging.getLogger(__name__)
out_hdlr=logging.StreamHandler(stdout_redirector)
out_hdlr.setFormatter(logging.Formatter('%(asctime)s%(levelname)s%(message)s'))
out_hdlr.setLevel(logging.INFO)
logger.addHandler(out_hdlr)
logger.setLevel(logging.INFO)
class HTMLPage_W_45_CCTest(unittest.TestCase):
method1=""
driver = None
url_list = []
method_list_in_Url = []
@classmethod
def setUp(self):
option = webdriver.ChromeOptions()
option.add_experimental_option('excludeSwitches', ['enable-logging'])
self.driver = webdriver.Chrome(executable_path=ReadConfig.readconfigData('paths', 'chromedriver1'), options=option)
warnings.filterwarnings(action="ignore", message="unclosed", category=ResourceWarning)
self.wait = WebDriverWait(self.driver, 10)
@classmethod
def tearDown(self):
self.driver.quit()
def test_Proofs(self):
with open('../TextFolder_Unique_URL/UniqueList_2.txt')as f:
urls = f.read().splitlines()
for url in urls:
if url != 0:
if "DD" in url:
print("Select DD")
self.driver.get(url)
MB_smartphone=SmartPhonePage(self.driver)
MB_smartphone.get_SMARTPHONE_ShopAll()
# MB_smartphone.get_Module1_link()
elif "CC" in url:
self.driver.get(url)
if "MB" in url:
print("Select CC")
MB_smartphone=SmartPhonePage(self.driver)
MB_smartphone.get_SMARTPHONE_ShopAll()
MB_smartphone.get_Module4_link()
if "HA" in url:
print("Select HA")
HA_homeappliance=HomeAppliancePage(self.driver)
HA_homeappliance.get_HomeAppliance_ShopAll()
HA_homeappliance.get_Module4_link()
if "MB_TABLET" in url:
print("Select MB_TABLET")
MB_tablet = TabletPage(self.driver)
MB_tablet.get_Tablet_ShopAll()
MB_tablet.get_Module1_link()
if "TV" in url:
print("Select TV")
TV_HomeTheater=TVHomeTheaterPage(self.driver)
TV_HomeTheater.get_TVHomeTheater_ShopAll()
TV_HomeTheater.get_Module4_link()
if "MB_WEAR" in url:
print("Select MB_WEAR")
MB_Wear=MobileAccessoriesPage(self.driver)
MB_Wear.get_MobileAccessories_ShopAll()
MB_Wear.get_Module4_link()
if "CE_COMPUTER" in url:
print("Select CE_COMPUTER")
CE_Computer=ComputingPage(self.driver)
CE_Computer.get_Computing_ShopAll()
CE_Computer.get_Module4_link()
else:
print("Not able to RUN")
# def test_computing(self):
# with open('../TextFolder_Unique_URL/UniqueList_2.txt')as f:
# urls = f.read().splitlines()
# for url in urls:
# if url != 0:
# if "CC" in url:
# self.driver.get(url)
# if "CE_COMPUTER" in url:
# print("Select CE_COMPUTER")
# # self.driver.get(url)
# CE_Computer=ComputingPage(self.driver)
# CE_Computer.get_Computing_ShopAll()
# CE_Computer.get_Module4_link()
if __name__ == '__main__':
unittest.main() | [
"ahmedu.ferdous@gmail.com"
] | ahmedu.ferdous@gmail.com |
65752b9d1bb71f04389f6f784306953871c845e6 | 51a6413af4995a221bb7cf9bce20a00acedcff9d | /snakify-problems-python/ALL/10_09_polyglotes.py | 2fb598e6e525c544190b13d43f6f67e7916ff2fe | [] | no_license | famaxth/Way-to-Coding | 4dff099de31c1a5870cf72a2aaaab74fdcbfed36 | bcb2048898edf418b059ec506eb5ad1507889cfb | refs/heads/main | 2023-08-15T19:34:16.510571 | 2021-10-08T04:53:42 | 2021-10-08T04:53:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | n = int(input()) # количество учеников
lang_nums = [0] * n # количество языков для каждого ученика
langs = [] # наименования языков, для каждого из учеников
for i in range(n):
lang_nums[i] = int(input())
l = set()
for j in range(lang_nums[i]):
l.add(input())
langs.append(l)
uni = set.union(*langs)
inter = set.intersection(*langs)
print(len(inter), '\n'.join(sorted(inter)), len(uni), '\n'.join(sorted(uni)), sep='\n')
# developers solution
# students = [{input() for j in range(int(input()))} for i in range(int(input()))]
# known_by_everyone, known_by_someone = set.intersection(*students), set.union(*students)
# print(len(known_by_everyone), *sorted(known_by_everyone), sep='\n')
# print(len(known_by_someone), *sorted(known_by_someone), sep='\n')
| [
"jobin25051999@gmail.com"
] | jobin25051999@gmail.com |
566c733da6a9ca4011ffcaa3a1e35a486b6be7df | 09cead98874a64d55b9e5c84b369d3523c890442 | /py200727_python2/day33_py208024/module_2.py | 4a804c01cc95026ad482d2195c3fca507f0ab18f | [] | no_license | edu-athensoft/stem1401python_student | f12b404d749286036a090e941c0268381ce558f8 | baad017d4cef2994855b008a756758d7b5e119ec | refs/heads/master | 2021-08-29T15:01:45.875136 | 2021-08-24T23:03:51 | 2021-08-24T23:03:51 | 210,029,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | """
module 2
from...import
from...import statement
"""
from py200727_python2.day33_py208024.myfunc import add
print(add(1,2))
# import math
from math import sqrt
result = sqrt(4)
print(result)
#
from math import *
print(gcd(12,4))
print(ceil(3.4))
print(fabs(5.1))
| [
"lada314@gmail.com"
] | lada314@gmail.com |
2d3085eafbaea0f942754274f2975d8c59f460f2 | ff487fe5f2956bac2f80ee8f515a17f4fad4bd07 | /apps/alerts/urls.py | d032c1db6e513c613eead5b8c9970cbafe511ef7 | [] | no_license | 101t/django-lms | 8c11c28321675c52a82f5111912e58ed4edf221f | 4bee87f299c588b8ad0145bff3b82a51f89b4cac | refs/heads/master | 2021-11-24T22:44:39.401464 | 2021-11-16T20:45:33 | 2021-11-16T20:45:33 | 219,135,709 | 1 | 2 | null | 2021-11-16T06:49:56 | 2019-11-02T10:07:43 | HTML | UTF-8 | Python | false | false | 287 | py | from django.urls import path
from django.contrib.auth.decorators import login_required
from .views import AlertList, acknowledge
app_name = "alerts"
urlpatterns = [
path('', login_required(AlertList.as_view()), name='list'),
path('acknowledge/', acknowledge, name='acknowledge'),
]
| [
"tarek.it.eng@gmail.com"
] | tarek.it.eng@gmail.com |
a0126bed627d15a32c8e0da5723ce62e338341f9 | de56ee2369d36c93ad802f0359f3274b9a3f0a25 | /photos/utils.py | c2c4b34843b95f366d0f71718aee276495d84c97 | [] | no_license | Anubhav722/asynchronous-celery-tasks | bdfd485b6c6b2777a4712ad64ebabf347e717654 | a21f055e8e524db662d21f60dac2f8daab075f63 | refs/heads/master | 2021-01-23T00:45:41.631402 | 2017-05-31T10:47:44 | 2017-05-31T10:47:44 | 92,840,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | import json
import requests
from photos.models import Photo
from django.conf import settings
def get_latest_flickr_image():
"""
Grabs the latest image from the flick public image feed
"""
url = settings.FLICKR_JSON_FEED_URL
r = requests.get(url)
page_content = r.text
# It turns out flickr escapes single quotes (')
# and apparently this isn't allowed and makes the json invalid
# we use String.replace to get around this.
probably_json = page_content.replace("\\'", "'")
# now we load json
feed = json.loads(probably_json)
images = feed['items']
return images[0]
def save_latest_flickr_image():
"""
We get the lastest image and save it to flickr model
"""
flickr_image = get_latest_flickr_image()
# make sure we don't save the image more than once
# assuming each flickr image has a unique link
if not Photo.objects.filter(link=flickr_image['link']).exists():
photo = Photo(
title = flickr_image['title'],
link = flickr_image['link'],
image_url = flickr_image['media']['m'],
description = flickr_image['description'],
)
photo.save() | [
"anubhavs286@gmail.com"
] | anubhavs286@gmail.com |
55b79e0d9a0de22080a98b43b205b927f983600f | 4ff8676136167cdd81d7a983272102fff86360e8 | /python/面试题 08.06. 汉诺塔问题.py | 5b98a8435f155bdd5b6ff5e032c3e17d3a9fbe93 | [] | no_license | geniuscynic/leetcode | 0ec256af2377d19fee22ce736462a7e95e3f4e67 | 379a8f27f8213951ee8be41bd56598036995d267 | refs/heads/master | 2023-07-19T07:22:20.001770 | 2021-09-07T14:50:40 | 2021-09-07T14:50:40 | 297,277,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | import sys
from collections import defaultdict
from collections import Counter
class Solution:
dicts = {}
def helper(self, n, A, B, C):
if n == 1:
C.append(A.pop())
return
self.helper(n - 1, A, C, B)
C.append(A.pop())
self.helper(n - 1, B, A, C)
def hanota(self, A, B, C):
self.helper(len(A), A, B, C)
if __name__ == "__main__":
solution = Solution()
A = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
B = []
C = []
m = [1,2,3,4]
nums2 = [1,2,3]
n = 3
result = solution.hanota(A, B, C)
#print(solution.ls)
print(A, B, C) | [
"350810375@qq.com"
] | 350810375@qq.com |
aa69c7a051939ec0e565da4a832fe5aa529aee8d | 8b0d9eb0c04426f544e34726981643dbe7b91bdc | /TestBotDeploy/Bot/settings.py | 6e1ec557579fbb81e8b85387857ed5cf04e4e321 | [] | no_license | VicGjb/bot | 555777661115c3ebf33169ed9d5f61a8f2a1bbbb | ad3cecbec46f5b78dd97e9a4d04d527bed853e14 | refs/heads/master | 2023-03-04T07:51:38.049016 | 2021-02-14T19:08:10 | 2021-02-14T19:08:10 | 324,197,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,306 | py | """
Django settings for Bot project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8yd7qz&cu35z!^qh_o6zzdk*u-%tmtbi#*bf-$i2(rq&f8wi@2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['104.236.40.45']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Bot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Bot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
if DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
else:
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL='/media/'
STATICFILES_DIRS=[os.path.join(BASE_DIR, 'static_in_env')]
STATIC_ROOT=os.path.join(BASE_DIR,'static_root')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media_root')`` | [
"="
] | = |
39f7c29ea95996c14613d1e200fbe93a42a90aa3 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_braces.py | f13585bba0e3d0198f64068b01917659abec0f3c | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py |
#calss header
class _BRACES():
def __init__(self,):
self.name = "BRACES"
self.definitions = brace
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['brace']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
4fee162a9707bcfbb449862ff68c9713ae67654a | f0316e656767cf505b32c83eef4df13bb9f6b60c | /LeetCode/Python/Medium/1476_subrectangle_queries.py | aac76bf6f8f0c4ead16e60f36d62d2a45052f2d8 | [] | no_license | AkshdeepSharma/Classroom | 70ec46b35fab5fc4a9d2eac430659d7dafba93da | 4e55799466c101c736de6c7e07d716ff147deb83 | refs/heads/master | 2022-06-13T18:14:03.236503 | 2022-05-17T20:16:28 | 2022-05-17T20:16:28 | 94,828,359 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | class SubrectangleQueries:
def __init__(self, rectangle: List[List[int]]):
self.rectangle = rectangle
def updateSubrectangle(self, row1: int, col1: int, row2: int, col2: int, newValue: int) -> None:
for i in range(row1, row2 + 1):
for j in range(col1, col2 + 1):
self.rectangle[i][j] = newValue
def getValue(self, row: int, col: int) -> int:
return self.rectangle[row][col]
# Your SubrectangleQueries object will be instantiated and called as such:
# obj = SubrectangleQueries(rectangle)
# obj.updateSubrectangle(row1,col1,row2,col2,newValue)
# param_2 = obj.getValue(row,col)
| [
"akshdeep.sharma1@gmail.com"
] | akshdeep.sharma1@gmail.com |
c3452dd8aecdacf3bd2f698102e86b7017748a11 | df541a802b2dfa89d3aab14af627358dc7c76e6e | /APP自动化/App/StoneUIFramework/public/setting/系统设置/退出/_Logout.py | 74aa1a7a16757b9144ada4802842241e26290418 | [] | no_license | gupan2018/PyAutomation | de966aff91f750c7207c9d3f3dfb488698492342 | 230aebe3eca5799c621673afb647d35a175c74f1 | refs/heads/master | 2021-09-07T19:44:20.710574 | 2017-12-22T15:58:23 | 2017-12-22T15:58:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,031 | py | __author__ = 'Administrator'
#登出确认页
from StoneUIFramework.public.setting.系统设置._Syssetting import _Syssetting
import logging
class _Logout(_Syssetting):
def Syssetting_logout_confirm(self):
#定位:设置-系统安全-退出-确定
try:
__Syssetting_logout_confirm = self.driver.find_element_by_id("android:id/button1")
except Exception as err:
logging.info("Syssetting_logout_confirm:error@@!!!!!!!")
assert False,\
"点击设置-系统安全-退出-确定失败"
return __Syssetting_logout_confirm
def Syssetting_logout_cancel(self):
#定位:设置-系统安全-退出-取消
try:
__Syssetting_logout_cancel = self.driver.find_element_by_id("android:id/button2")
except Exception as err:
logging.info("Syssetting_logout_cancel:error@@!!!!!!!")
assert False,\
"点击设置-系统安全-退出-取消"
return __Syssetting_logout_cancel | [
"610077670@qq.com"
] | 610077670@qq.com |
87ccf88abce864ab8fdb05be5339c0883908d290 | ab621c65fc91f5194c4032d68e750efaa5f85682 | /l10n_th_account_tax_detail/__openerp__.py | bd15f29f96dcd2ad04cab3560b4c476f85ba1f8a | [] | no_license | pabi2/pb2_addons | a1ca010002849b125dd89bd3d60a54cd9b9cdeef | e8c21082c187f4639373b29a7a0905d069d770f2 | refs/heads/master | 2021-06-04T19:38:53.048882 | 2020-11-25T03:18:24 | 2020-11-25T03:18:24 | 95,765,121 | 6 | 15 | null | 2022-10-06T04:28:27 | 2017-06-29T10:08:49 | Python | UTF-8 | Python | false | false | 754 | py | # -*- coding: utf-8 -*-
{
'name': "Invoice Tax Detail",
'summary': "Allow editing tax table in detail",
'author': "Ecosoft",
'website': "http://ecosoft.co.th",
'category': 'Account',
'version': '0.1.0',
'depends': [
'account',
'l10n_th_account',
'account_invoice_check_tax_lines_hook',
],
'data': [
'data/config_data.xml',
'security/ir.model.access.csv',
'wizard/account_tax_detail_view.xml',
'views/account_view.xml',
'views/account_invoice_view.xml',
'views/account_voucher_view.xml',
'views/account_config.xml',
],
'demo': [
],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"kittiu@gmail.com"
] | kittiu@gmail.com |
644fc1bf3f6b8d07653d023d8f31d01f65603566 | 02f0b44446d0ae1456db790866f8c184dc9b2652 | /trunk/convertdialog.py | 2d3c32c6b4d323b8028e177c1d5ac090a9334565 | [] | no_license | BGCX261/zoomtc-svn-to-git | 23d26688429d5d0db5937c630ecb6243438c3721 | 8937a6a53bb58c1457e6f93af619070929a4839a | refs/heads/master | 2021-01-23T12:11:25.771636 | 2015-08-25T15:20:29 | 2015-08-25T15:20:29 | 41,591,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,708 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*
import os
import sys
from PIL import Image
from PyQt4 import QtCore, QtGui
import zoomtc_rc
from ui_convertdialog import Ui_convertDialog
# all supported image formats list here
EXTS = ('.bmp', '.im', '.msp', '.pcx', '.ppm',
'.spider', '.tiff', '.xbm', '.xv', '.jpg', '.jpeg', '.gif', '.png',)
class ConvertDialog(QtGui.QDialog, Ui_convertDialog):
def __init__(self, parent=None, initDir= '.', initRate = 0.055):
QtGui.QDialog.__init__(self, parent)
self.setupUi(self)
# English version UI messages
self.messages = {
'selectSrc': u'Please select picture source dir',
'outDir': u'zoomtc_out',
'progressFormat': u'processing: %d / %d',
'cancel': u'Cancel',
'processing': u'Processing...',
'dirLabel': u'Picture Dir:',
'rateLabel': u'Zoom Rate:',
'helpLabel': u"<p>1.Drag & Drop picture directory on `Picture Dir'.<br/>"
u"2.Set `Zoom Rate' as R,the zoomed size will be SIZE*R.<br/>"
u"3.Zoomed pictures stored in 'zoomtc_out' under the same directory.</p>",
'dirButton': u'Browser...',
'convertButton': u'Zoom',
'windowTitle': u'Zoomtc, a picture batch zoom tool',
'criticalTitle': u'Something is Wrong',
'criticalInfo': u'Check the zoom rate and picture format.\nPlease try again.',
}
# If system locale is Chinese, then we define Chinese version UI messages
loc = QtCore.QLocale.system()
if loc.country()==loc.China:
self.messages = {
'selectSrc': u'请选择图片源目录',
'outDir': u'缩放输出目录',
'progressFormat': u'进度: %d / %d',
'cancel': u'取消',
'processing': u"正在处理图片……",
'dirLabel': u'图片源目录:',
'rateLabel': u'缩放比例:',
'helpLabel': u'<p>1.拖放图片目录到"图片源目录"<br/>'
u'2.设置"缩放比例"为R, 缩放后尺寸为"原尺寸*R"<br/>'
u'3.缩放后的文件保存在原图片目录下的“缩放输出目录"中</p>',
'dirButton': u"浏览...",
'convertButton': u"缩放",
'windowTitle': u'Zoomtc, 图片批量缩放工具',
'criticalTitle': u'错误',
'criticalInfo': u'请检查是否正确设置了缩放比例.',
}
# set the UI, English or Chinese according to the system locale
self.dirLabel.setText(self.messages['dirLabel'])
self.rateLabel.setText(self.messages['rateLabel'])
self.helpLabel.setText(self.messages['helpLabel'])
self.dirButton.setText(self.messages['dirButton'])
self.convertButton.setText(self.messages['convertButton'])
self.setWindowTitle(self.messages['windowTitle'])
self.setWindowIcon(QtGui.QIcon(":/logo.ico"))
# enable Drag & Drop
self.dirLineEdit.setAcceptDrops(False)
self.rateLineEdit.setAcceptDrops(False)
self.setAcceptDrops(True)
self.connect(self.dirButton, QtCore.SIGNAL("clicked()"),
self.getDir)
self.connect(self.convertButton, QtCore.SIGNAL("clicked()"),
self.doConvert)
self.cwd = os.path.abspath(initDir)
self.dirLineEdit.setText(self.cwd)
self.rate = float(initRate)
self.rateLineEdit.setText("%.3f"%round(self.rate, 3))
def dragEnterEvent(self, event):
if event.mimeData().hasFormat("text/uri-list"):
event.acceptProposedAction()
def dropEvent(self, event):
urls = event.mimeData().urls()
if not urls:
return
fileName = urls[0].toLocalFile()
if not fileName:
return
self.dirLineEdit.setText(fileName)
# save rate value when closing
def closeEvent(self, event):
rate = float(self.rateLineEdit.text())
settings = QtCore.QSettings(u"ctootc", u"zoomtc")
settings.setValue("rate", QtCore.QVariant(rate))
def getDir(self):
dirName = QtGui.QFileDialog.getExistingDirectory(self, self.messages['selectSrc'],
self.cwd)
if dirName:
self.dirLineEdit.setText(dirName)
#self.cwd = os.path.basename(dirName)
# process one image file
def _processFile(self, fileName, rate, progressDialog):
print 'process on:', fileName
path = os.path.dirname(fileName)
os.chdir(path)
outdir = os.path.join(path, self.messages['outDir'])
print 'outdir', outdir
name = os.path.basename(fileName)
print 'name', name
self.processValue += 1
progressDialog.setValue(self.processValue)
progressDialog.setLabelText(self.messages['progressFormat'] % (self.processValue, self.processTotal))
QtGui.qApp.processEvents()
if progressDialog.wasCanceled():
return
n,ext = os.path.splitext(name)
if ext.lower() in EXTS:
im = Image.open(fileName)
(w,h) = im.size
iout = im.resize((int(w*rate),int(h*rate)), Image.ANTIALIAS)
print 'outname', os.path.join(outdir, name)
if not os.path.exists(outdir):
os.mkdir(outdir)
iout.save(os.path.join(outdir, name))
# process all image files under this directories
def _processDir(self, path, rate, progressDialog):
print 'process on:', path
os.chdir(path)
outdir = os.path.join(path, self.messages['outDir'])
print 'outdir', outdir
for name in os.listdir(path):
print 'name', name
fullname = os.path.join(path, name)
if os.path.isdir(fullname):
self._processDir(fullname, rate, progressDialog)
else:
self._processFile(fullname, rate, progressDialog)
# count image files need to be processed, we need this number to initialize ProgressDialog
def _totalfiles(self, path):
if os.path.isdir(path):
total = 0
for name in os.listdir(path):
fullname = os.path.join(path, name)
if os.path.isdir(fullname):
total += self._totalfiles(fullname)
else:
total += 1
return total
else:
return 1
def doConvert(self):
try:
rate = float(self.rateLineEdit.text())
path = unicode(self.dirLineEdit.text())
progressDialog = QtGui.QProgressDialog(self)
progressDialog.setCancelButtonText(self.messages['cancel'])
self.processTotal = self._totalfiles(path)
progressDialog.setRange(0, self.processTotal)
progressDialog.setWindowTitle(self.messages['processing'])
self.processValue = 0
if os.path.isdir(path):
self._processDir(path, rate, progressDialog)
else:
self._processFile(path, rate, progressDialog)
progressDialog.close()
except:
QtGui.QMessageBox.critical(self, self.messages['criticalTitle'], self.messages['criticalInfo'])
return
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
settings = QtCore.QSettings(u"ctootc", u"zoomtc")
rate = settings.value("rate", QtCore.QVariant(0.5)).toDouble()[0]
s = ConvertDialog(initRate=rate)
s.show()
sys.exit(app.exec_())
| [
"you@example.com"
] | you@example.com |
6ffb0c65163ec92ac6eb7e8bbb2eeee963366b3e | 04ad466db13a382cc679d9562e515d57b54c47e6 | /scripts/maxent_priors.py | a69077dddf5e3009d6b707e0e4792d6890804360 | [
"MIT"
] | permissive | shivaditya-meduri/pyprobml | d9423463ae7b352c52f3d005fbf33ee66d366971 | 9dbe0c95f4ec061b98bf32fa3ac1deafe2e0c04d | refs/heads/master | 2023-04-12T13:09:45.572071 | 2021-05-07T18:22:02 | 2021-05-07T18:22:02 | 356,659,290 | 1 | 0 | MIT | 2021-04-11T05:04:38 | 2021-04-10T18:07:31 | null | UTF-8 | Python | false | false | 1,105 | py |
# jeffreys prior for bernoulli using 2 paramterizatiobs
# fig 1.10 of 'Bayeysian Modeling and Computation'
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from scipy import stats
from scipy.stats import entropy
from scipy.optimize import minimize
C = 10
xs = np.arange(1,C+1)
cons = [[{"type": "eq", "fun": lambda x: np.sum(x) - 1}],
[{"type": "eq", "fun": lambda x: np.sum(x) - 1},
{"type": "eq", "fun": lambda x: 1.5 - np.sum(x *xs)}],
[{"type": "eq", "fun": lambda x: np.sum(x) - 1},
{"type": "eq", "fun": lambda x: np.sum(x[[2, 3]]) - 0.8}]]
max_ent = []
names= ['unconstrained', 'mean of 1.5', 'p(3,4)=0.8']
for i, c in enumerate(cons):
val = minimize(lambda x: -entropy(x),
x0=[1/C]*C,
bounds=[(0., 1.)] * C,
constraints=c)['x']
max_ent.append(entropy(val))
plt.plot(xs, val, 'o--', lw=2.5, label=names[i])
#plt.stem(xs, val, label=names[i])
plt.xlabel(r"$\theta$")
plt.ylabel(r"$p(\theta)$")
plt.legend()
pml.savefig("maxent_priors.pdf", dpi=300) | [
"murphyk@gmail.com"
] | murphyk@gmail.com |
b56e012cd852d9dee8afd57b3ff0a9a240fe1828 | 185bea7d9c7dc9288b021e0c27f24e087f668109 | /AI/Project/Sudoku/eliminate_q5.py | 472e64c213c8524b8d5000fa04d1d1df0dd7b613 | [] | no_license | JasonVann/Udacity | 3430fa86d9863dd0c4657b525243a91154f4b57f | 9584bf90e8f1f9d4faf0973c7c36325f367558e7 | refs/heads/master | 2021-01-12T09:38:53.896396 | 2017-09-05T03:51:47 | 2017-09-05T03:51:47 | 76,217,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | from utils import *
def eliminate(values):
"""Eliminate values from peers of each box with a single value.
Go through all the boxes, and whenever there is a box with a single value,
eliminate this value from the set of values of all its peers.
Args:
values: Sudoku in dictionary form.
Returns:
Resulting Sudoku in dictionary form after eliminating values.
"""
import copy
values2 = copy.deepcopy(values)
changed = True
'''
print(values)
print(peers)
return
'''
#while changed:
changed = False
for k, v in values2.items():
if len(v) == 1:
for peer in peers[k]:
if v in values[peer]:
values[peer] = values[peer].replace(v,'')
changed = True
#'''
return values
| [
"jasonvanet@gmail.com"
] | jasonvanet@gmail.com |
f7afb969c707a49319ea9771a712ab4977d48f95 | 642f50ad3a7abc63032815847fe9a49246b03506 | /setup.py | ae3cdb64db3b8ddeab1b8efb9189075e6969709a | [
"MIT"
] | permissive | circuitpython/CircuitPython_Org_DisplayIO_Annotation | f76557c616498480c2f14c6423150f6eb6aa2709 | df982a2f65d8dfe77759905820f5e27aead425a7 | refs/heads/main | 2023-06-13T07:31:35.924175 | 2021-07-05T15:06:11 | 2021-07-05T15:06:11 | 358,929,751 | 0 | 2 | MIT | 2021-07-05T15:06:11 | 2021-04-17T16:34:49 | Python | UTF-8 | Python | false | false | 2,336 | py | # SPDX-FileCopyrightText: 2017 Scott Shawcroft, written for Adafruit Industries
# SPDX-FileCopyrightText: Copyright (c) 2021 Kevin Matocha for circuitpython
#
# SPDX-License-Identifier: MIT
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
# Community Bundle Information
name="circuitpython-displayio-annotation",
use_scm_version=True,
setup_requires=["setuptools_scm"],
description="A CircuitPython DisplayIO widget for annotating other widgets or freeform positions.",
long_description=long_description,
long_description_content_type="text/x-rst",
# The project's main homepage.
url="https://github.com/circuitpython/CircuitPython_Org_DisplayIO_Annotation.git",
# Author details
author="CircuitPython Organization",
author_email="",
install_requires=[
"Adafruit-Blinka",
"adafruit-circuitpython-display-text",
"adafruit-circuitpython-display_shapes",
"adafruit-circuitpython-displayio-layout",
],
# Choose your license
license="MIT",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries",
"Topic :: System :: Hardware",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
# What does your project relate to?
keywords="adafruit blinka circuitpython micropython displayio_annotation displayio widget "
"graphics",
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
# TODO: IF LIBRARY FILES ARE A PACKAGE FOLDER,
# CHANGE `py_modules=['...']` TO `packages=['...']`
py_modules=["displayio_annotation"],
)
| [
"foamyguy@gmail.com"
] | foamyguy@gmail.com |
5a27684254e81e031f857cf3f929151d09307a8b | ad553dd718a8df51dabc9ba636040da740db57cf | /.history/app_20181208180113.py | 5eb0e0ff5a594f01d11592f60c728896dcfaa4e9 | [] | no_license | NergisAktug/E-Commerce-PythonWithFlask-Sqlite3 | 8e67f12c28b11a7a30d13788f8dc991f80ac7696 | 69ff4433aa7ae52ef854d5e25472dbd67fd59106 | refs/heads/main | 2023-01-01T14:03:40.897592 | 2020-10-19T20:36:19 | 2020-10-19T20:36:19 | 300,379,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,256 | py | import datetime
from flask import Flask,flash, request, render_template_string, render_template
from flask import Flask, url_for, render_template, request, redirect, session, escape, render_template_string
from flask_babelex import Babel
from flask_sqlalchemy import SQLAlchemy
from flask_user import current_user, login_required, roles_required
from sqlalchemy.sql import table, column, select
from sqlalchemy import MetaData, create_engine
from flask_user import login_required, roles_required, UserManager, UserMixin
class ConfigClass(object):
SECRET_KEY = 'This is an INSECURE secret!! DO NOT use this in production!!'
SQLALCHEMY_DATABASE_URI = 'sqlite:///eticaret.sqlite'
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TLS = False
MAIL_USERNAME = 'nergis.aktug2014@gmail.com'
MAIL_PASSWORD = '05383896877'
MAIL_DEFAULT_SENDER = '"MyApp" <xyz@gmail.com>'
USER_ENABLE_EMAIL = True
USER_ENABLE_USERNAME = False
USER_EMAIL_SENDER_EMAIL = "noreply@example.com"
def create_app():
app = Flask(__name__)
app.config.from_object(__name__ + '.ConfigClass')
db = SQLAlchemy(app)
class Kullanici(db.Model):
__tablename__ = 'Kullanici'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(80), unique=True)
sifre = db.Column(db.String(80))
rolId = db.Column(db.Integer, db.ForeignKey('rol.rolId', ondelete='CASCADE'))
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='1')
def __init__(self, email, sifre):
self.email = email
self.sifre = sifre
self.rolId = 0
class Roller(db.Model):
__tablename__ = 'rol'
rolId = db.Column(db.Integer, primary_key=True)
rolisim = db.Column(db.String(80))
class urunler(db.Model):
__tablename__ = 'urunler'
urun_id = db.Column(db.Integer, primary_key=True)
urunismi = db.Column(db.String(80))
urunresmi = db.Column(db.String(80))
urunFiyati = db.Column(db.Integer)
markaId = db.Column(db.Integer(), db.ForeignKey('markalar.markaId', ondelete='CASCADE'))
def __init__(self, urunismi, urunresmi, urunFiyati,markaId):
self.urunismi =urunismi
self.urunresmi = urunresmi
self.urunFiyati = urunFiyati
self.markaId=markaId
class markalar(db.Model):
__tablename__ = 'markalar'
markaId = db.Column(db.Integer, primary_key=True)
markaadi = db.Column(db.String(80))
marka_modeli = db.Column(db.String(80))
def __init__(self, markaadi, marka_modeli):
self.markaadi = markaadi
self.marka_modeli = marka_modeli
class musteri(db.Model):
__tablename__ = 'musteri'
musteriId = db.Column(db.Integer, primary_key=True)
musteriadi = db.Column(db.String(80))
musterisoyadi = db.Column(db.String(80))
mail = db.Column(db.String(80), unique=True)
telefon = db.Column(db.Integer)
sifre = db.Column(db.String(80))
il = db.Column(db.String(80))
ilce = db.Column(db.String(80))
kullaniciId = db.Column(db.Integer(), db.ForeignKey('Kullanici.id', ondelete='CASCADE'))
def __init__(self, musteriadi, musterisoyadi, mail, telefon, sifre, il, ilce, kullaniciId):
self.musteriadi = musteriadi
self.musterisoyadi = musterisoyadi
self.mail = mail
self.telefon = telefon
self.sifre = sifre
self.il = il
self.ilce = ilce
self.kullaniciId = kullaniciId
class siparis(db.Model):
__tablename__ = 'siparis'
siparisId = db.Column(db.Integer, primary_key=True)
musteriId = db.Column(db.Integer(), db.ForeignKey('musteri.musteriId', ondelete='CASCADE'))
urunId = db.Column(db.Integer(), db.ForeignKey('urunler.urun_id', ondelete='CASCADE'))
siparisno = db.Column(db.Integer)
siparisTarihi = db.Column(db.Integer)
odemeId = db.Column(db.Integer())
def __init__(self, musteriId, urunId, siparisno, siparisTarihi, odemeId):
self.musteriId = musteriId
self.urunId = urunId
self.siparisno = siparisno
self.siparisTarihi = siparisTarihi
self.odemeId = odemeId
db.create_all()
@app.route('/')
def anasayfa():
return render_template('index.html')
@app.route('/kayit', methods=['GET', 'POST'])
def kayit():
if request.method == 'POST':
mail = request.form['email']
parola = request.form['sifre']
yeniKullanici = Kullanici(email=mail, sifre=parola)
db.session.add(yeniKullanici)
db.session.commit()
if yeniKullanici is not None:
mesaj = "Kayıt Başarıyla Sağlanmıştır."
return render_template("index.html", mesaj=mesaj)
else:
return render_template('kayit.html')
@app.route('/admin')
def admin():
return render_template("admin.html")
@app.route('/uye', methods=['GET', 'POST'])
def uye():
return render_template("uyeGirisi.html")
@app.route('/giris', methods=['GET', 'POST'])
def giris():
hata=None
if request.method=='POST':
if request.form['email']!='admin@example.com' or request.form['sifre']!='admin':
if Kullanici.query.filter_by(email=request.form['email'],sifre=request.form['sifre']) is not None:
session['uye_giris']=True
return redirect(url_for('anasayfa'))
else:
hata='hatalı giris yaptınız'
else:
flash('giriş başarılı')
session['admin_giris']=True
return redirect(url_for('admin'))
return render_template('uyeGiris.html',hata=hata)
@app.route('/cikis')
def cikis():
session.pop('admin_giris',None)
session.pop('uye_giris',None)
return render_template("index.html")
@app.route('/urunEkle')
def urunEkle():
tumVeri=urunler.query.all()
return render_template("urunEkle.html",tumVeri=tumVeri)
@app.route('/urunEklemeYap',methods=['POST'])
def urunEklemeYap():
urunismi=request.form['urunismi']
urunResmi=request.form['urunresmi']
urunFiyati=request.form['fiyati']
markaId=request.form['markaId']
yeniUrun=urunler(urunismi=urunismi,urunresmi=urunResmi,urunFiyati=urunFiyati,markaId=markaId)
db.session.add(yeniUrun)
db.session.commit()
return redirect(url_for("urunEkle"))
@app.route("/sil/<string:id>")
def sil(id):
urun=urunler.query.filter_by(urun_id=id).first()
db.session.delete(urun)
db.session.commit()
return redirect(url_for("urunEkle"))
@app.route('/Markalar')
def Markalar():
return render_template("marka.html")
return app
if __name__ == '__main__':
app=create_app()
app.run(host='127.0.0.1', port=5000, debug=True) | [
"nergis.aktug2014@gmail.com"
] | nergis.aktug2014@gmail.com |
820ef2144d3dd85072afa1fb2bb8df44a8a3087d | 006ff11fd8cfd5406c6f4318f1bafa1542095f2a | /FastSimulation/L1CaloTriggerProducer/test/test_cfg.py | 817cecbd05903876f5b300c0f4b38be6b0da5ac0 | [] | permissive | amkalsi/cmssw | 8ac5f481c7d7263741b5015381473811c59ac3b1 | ad0f69098dfbe449ca0570fbcf6fcebd6acc1154 | refs/heads/CMSSW_7_4_X | 2021-01-19T16:18:22.857382 | 2016-08-09T16:40:50 | 2016-08-09T16:40:50 | 262,608,661 | 0 | 0 | Apache-2.0 | 2020-05-09T16:10:07 | 2020-05-09T16:10:07 | null | UTF-8 | Python | false | false | 1,347 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("L1")
process.load("FastSimulation.L1CaloTriggerProducer.fastl1calosim_cfi")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
process.load("Geometry.CaloEventSetup.CaloGeometry_cfi")
process.load("Geometry.CaloEventSetup.CaloTopology_cfi")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('/store/relval/2008/6/6/RelVal-RelValZTT-1212543891-STARTUP-2nd-02/0000/40FA3C45-E533-DD11-9B17-000423D98C20.root')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
process.Out = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('drop *',
'keep l1extraL1JetParticles_*_*_*',
'keep l1extraL1EmParticles_*_*_*',
'keep l1extraL1MuonParticles_*_*_*',
'keep l1extraL1EtMissParticle_*_*_*',
'keep l1extraL1ParticleMaps_*_*_*'),
fileName = cms.untracked.string('test.root')
)
process.CaloTowerConstituentsMapBuilder = cms.ESProducer("CaloTowerConstituentsMapBuilder",
MapFile = cms.untracked.string('Geometry/CaloTopology/data/CaloTowerEEGeometric.map.gz')
)
process.p = cms.Path(process.fastL1CaloSim)
process.e = cms.EndPath(process.Out)
process.Out.fileName = 'test.root'
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
e8940800e8c4feb508acd637a11a9f70573c15ce | a1ad2715e306fd4e7eaeda5348e00e1a363e7884 | /leetcode/hashmap.py | 17f3ac037e825715ccd3106d80f804bebe69a9a4 | [] | no_license | MayankMaheshwar/DS-and-Algo-solving | cef54a800b3e8a070a707f97b4f30fccaa17d5c6 | ac6ea8f880920242a55d40c747368d68cb6f7534 | refs/heads/master | 2022-12-07T07:55:08.380505 | 2022-12-05T09:32:14 | 2022-12-05T09:32:14 | 237,103,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | class Solution:
# @param A : list of integers
# @param B : list of integers
# @param C : list of integers
# @return a list of integers
def solve(self, A, B, C):
Hash1, Hash2, Hash3 = set(A), set(B), set(C)
res = set()
for i in Hash1:
if i in Hash2 or i in Hash3:
res.add(i)
for j in Hash2:
if j in Hash3:
res.add(j)
return sorted(list(res))
| [
"mayank.maheshwari625@gmail.com"
] | mayank.maheshwari625@gmail.com |
00ad4f40b3123901479230db421ad0cc1ba9fb83 | 3ae937aec30f413dc87a1a6398ea6ef95b90f58a | /Estrutura-Decisao/ex10.py | 372887472117d1248bf3d2232b86466610e03f97 | [] | no_license | lucas-sigma/Python-Brasil-Resposta-Exercicios | 74f53c2531bea03fb65fa9addf8106450edb5d5e | af2df5b7d8d013ca176817af5f7bfa08ba5e33d1 | refs/heads/master | 2020-03-24T22:53:36.317235 | 2018-11-23T19:43:24 | 2018-11-23T19:43:24 | 143,107,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | # Faça um Programa que pergunte em que turno você estuda. Peça para digitar M-matutino ou V-Vespertino ou N- Noturno. Imprima a mensagem "Bom Dia!", "Boa Tarde!" ou "Boa Noite!" ou "Valor Inválido!", conforme o caso.
print('M - Matutino | V - Vespertino | N - Noturno')
turno = input('Digite o turno em que você estuda: ')
if turno.upper() == 'M':
print('Bom Dia!')
elif turno.upper() == 'V':
print('Boa Tarde!')
elif turno.upper() == 'N':
print('Boa Noite!')
else:
print('Valor Inválido!') | [
"lucasn.sigma@gmail.com"
] | lucasn.sigma@gmail.com |
92f8adefb2a72e066420f7f170628fd3531a481a | 8ae2dc044d056cb9a4b0cd25fbaed977288ba926 | /Python_OOP/car.py | 016554f5f0206353f5ff091f7fdd5e1875e00d1a | [] | no_license | februarypython/Alex_Green | 17a3c79d4e1eb751e6b5d76b2ab036506ba43a12 | ad72d761c33708d80200ee896e1d145fd74009c1 | refs/heads/master | 2021-04-30T05:06:31.439798 | 2018-04-03T20:54:02 | 2018-04-03T20:54:02 | 121,407,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | # Create a class called Car. In the__init__(), allow the user to specify the following attributes: price, speed, fuel, mileage.
# If the price is greater than 10,000, set the tax to be 15%. Otherwise, set the tax to be 12%.
# Create six different instances of the class Car. In the class have a method called display_all() that returns all the information
# about the car as a string. In your __init__(), call this display_all() method to display information about the car once the
# attributes have been defined.
class Car(object):
def __init__(self, price, speed, fuel, mileage):
self.price = price
self.speed = speed
self.fuel = fuel
self.mileage = mileage
self.displayall()
def displayall(self):
print "Price:", self.price
print "Speed:", self.speed, "mph"
print "Fuel:", self.fuel
print "Mileage:", self.mileage, "mpg"
if self.price > 10000:
print "Tax: 0.15"
else:
print "Tax: 0.12"
print "---------------"
car1 = Car(11000, 100, "full", 25)
car2 = Car(8000, 75, "empty", 15)
car3 = Car(14000, 85, "mostly full", 7000)
car4 = Car(200, 25, "none", 8)
car5 = Car(55000, 200, "full", 15)
car6 = Car(6500, 112, "mostly empty", 28)
| [
"alexkenta@gmail.com"
] | alexkenta@gmail.com |
e7f6ee71d68187d1c702e84d43bfe2bc4768f18f | 67e817ca139ca039bd9eee5b1b789e5510119e83 | /Linked_List/Swap_Nodes_in_Pairs.py | c36d59cada641e25326ec7b2bb48a3989fbbf1e3 | [] | no_license | dstch/my_leetcode | 0dc41e7a2526c2d85b6b9b6602ac53f7a6ba9273 | 48a8c77e81cd49a75278551048028c492ec62994 | refs/heads/master | 2021-07-25T21:30:41.705258 | 2021-06-06T08:58:29 | 2021-06-06T08:58:29 | 164,360,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,189 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: dstch
@license: (C) Copyright 2013-2019, Regulus Tech.
@contact: dstch@163.com
@file: Swap_Nodes_in_Pairs.py
@time: 2019/1/15 22:10
@desc: Given a linked list, swap every two adjacent nodes and return its head.
Example:
Given 1->2->3->4, you should return the list as 2->1->4->3.
"""
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def swapPairs(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
return_head = ListNode(0)
return_head.next = head
left = head
head = return_head
while left is not None and left.next is not None:
right = left.next
head.next = right
left.next = right.next
right.next = left
head = left
left = left.next
return return_head.next
if __name__ == '__main__':
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
s = Solution()
s.swapPairs(head)
# output: [2,1,3]
| [
"dstch@163.com"
] | dstch@163.com |
166322ebabb0c976ace6f2e0fbdbfd220d2d019c | 7d2c27662499f2c594c6f706c0d774955cd97ec9 | /tensorpack/dataflow/imgaug/paste.py | 73a44523297d5cd85063a71158e91e7d97d6b1d5 | [
"Apache-2.0"
] | permissive | RyannnXU/tensorpack | 8ce0d5166719879a6a947ec253170751f7f45c30 | b335a7baa00f578a5229315a3c8841efba602dcd | refs/heads/master | 2021-06-09T18:23:38.410559 | 2017-01-03T16:44:20 | 2017-01-03T16:44:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,035 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: paste.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
from .base import ImageAugmentor
from abc import abstractmethod
import numpy as np
__all__ = ['CenterPaste', 'BackgroundFiller', 'ConstantBackgroundFiller',
'RandomPaste']
class BackgroundFiller(object):
""" Base class for all BackgroundFiller"""
def fill(self, background_shape, img):
"""
Return a proper background image of background_shape, given img
:param background_shape: a shape of [h, w]
:param img: an image
:returns: a background image
"""
return self._fill(background_shape, img)
@abstractmethod
def _fill(self, background_shape, img):
pass
class ConstantBackgroundFiller(BackgroundFiller):
""" Fill the background by a constant """
def __init__(self, value):
"""
:param value: the value to fill the background.
"""
self.value = value
def _fill(self, background_shape, img):
assert img.ndim in [3, 2]
if img.ndim == 3:
return_shape = background_shape + (3,)
else:
return_shape = background_shape
return np.zeros(return_shape) + self.value
class CenterPaste(ImageAugmentor):
"""
Paste the image onto the center of a background canvas.
"""
def __init__(self, background_shape, background_filler=None):
"""
:param background_shape: shape of the background canvas.
:param background_filler: a `BackgroundFiller` instance. Default to zero-filler.
"""
if background_filler is None:
background_filler = ConstantBackgroundFiller(0)
self._init(locals())
def _augment(self, img, _):
img_shape = img.shape[:2]
assert self.background_shape[0] > img_shape[0] and self.background_shape[1] > img_shape[1]
background = self.background_filler.fill(
self.background_shape, img)
y0 = int((self.background_shape[0] - img_shape[0]) * 0.5)
x0 = int((self.background_shape[1] - img_shape[1]) * 0.5)
background[y0:y0 + img_shape[0], x0:x0 + img_shape[1]] = img
return background
def _fprop_coord(self, coord, param):
raise NotImplementedError()
class RandomPaste(CenterPaste):
"""
Randomly paste the image onto a background convas
"""
def _get_augment_params(self, img):
img_shape = img.shape[:2]
assert self.background_shape[0] > img_shape[0] and self.background_shape[1] > img_shape[1]
y0 = self._rand_range(self.background_shape[0] - img_shape[0])
x0 = self._rand_range(self.background_shape[1] - img_shape[1])
return int(x0), int(y0)
def _augment(self, img, loc):
x0, y0 = loc
img_shape = img.shape[:2]
background = self.background_filler.fill(
self.background_shape, img)
background[y0:y0 + img_shape[0], x0:x0 + img_shape[1]] = img
return background
| [
"ppwwyyxxc@gmail.com"
] | ppwwyyxxc@gmail.com |
c53c7a9ab52018aad680ab9e0754c63891fb0dea | 5a95daadcdf2eb4a9957be43d8231cd12615bda6 | /pipeline/metric_fuc.py | a956061df328cf72cd447b7c422fcca60b80b07a | [] | no_license | fendaq/cail2018_repo | 0fe7126ca052f57782aae6ce3863e6bad6833093 | 750c3846a678402220c8badd0c377deda277db6a | refs/heads/master | 2020-03-21T19:32:58.539058 | 2018-06-21T12:01:55 | 2018-06-21T12:01:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,623 | py | import mmap
import numpy as np
from keras.callbacks import Callback
from sklearn.metrics import f1_score
from tqdm import tqdm
from config import *
import os
def predict2half(predictions):
y_pred = np.zeros(predictions.shape)
y_pred[predictions > 0.5] = 1
return y_pred
def predict2tag(predictions):
y_pred = np.array(predictions, copy=True)
for index, x in enumerate(y_pred):
x[x > 0.5] = 1
if x.max() < 1:
x[x == x.max()] = 1
y_pred[y_pred < 1] = 0
return y_pred
class F1ScoreCallback(Callback):
def __init__(self, predict_batch_size=1024, include_on_batch=False,data_test=None):
super(F1ScoreCallback, self).__init__()
self.predict_batch_size = predict_batch_size
self.include_on_batch = include_on_batch
self.data_test=data_test;
def on_batch_begin(self, batch, logs={}):
pass
def on_train_begin(self, logs={}):
if not ('avg_f1_score_val' in self.params['metrics']):
self.params['metrics'].append('avg_f1_score_val')
def on_batch_end(self, batch, logs={}):
if (self.include_on_batch):
logs['avg_f1_score_val'] = float('-inf')
def on_epoch_end(self, epoch, logs={}):
logs['avg_f1_score_val'] = float('-inf')
if (self.validation_data):
predict = self.model.predict(self.validation_data[0],
batch_size=self.predict_batch_size)
y_predict = predict2half(predict)
f1 = f1_score(self.validation_data[1], y_predict, average='macro')
print("macro f1_score %.4f " % f1)
f2 = f1_score(self.validation_data[1], y_predict, average='micro')
print("micro f1_score %.4f " % f2)
avgf1 = (f1 + f2) / 2
# print("avg_f1_score %.4f " % (avgf1))
logs['avg_f1_score_val'] = avgf1
if(self.data_test):
predict = self.model.predict(self.data_test[0],
batch_size=self.predict_batch_size)
y_predict = predict2tag(predict)
f1 = f1_score(self.data_test[1], y_predict, average='macro')
print("test macro f1_score %.4f " % f1)
f2 = f1_score(self.data_test[1], y_predict, average='micro')
print("test micro f1_score %.4f " % f2)
avgf1 = (f1 + f2) / 2
print("test avg_f1_score %.4f " % (avgf1))
logs['avgf1_test'] = avgf1
def get_num_lines(file_path):
fp = open(file_path, "r+")
buf = mmap.mmap(fp.fileno(), 0)
lines = 0
while buf.readline():
lines += 1
return lines
def get_embedding_matrix(word_index, Emed_path, Embed_npy):
if (os.path.exists(Embed_npy)):
return np.load(Embed_npy)
print('Indexing word vectors')
embeddings_index = {}
file_line = get_num_lines(Emed_path)
print('lines ', file_line)
with open(Emed_path, encoding='utf-8') as f:
for line in tqdm(f, total=file_line):
values = line.split()
if (len(values) < embedding_dims):
print(values)
continue
word = ' '.join(values[:-embedding_dims])
coefs = np.asarray(values[-embedding_dims:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Total %s word vectors.' % len(embeddings_index))
print('Preparing embedding matrix')
nb_words = MAX_FEATURES # min(MAX_FEATURES, len(word_index))
all_embs = np.stack(embeddings_index.values())
print(all_embs.shape)
emb_mean, emb_std = all_embs.mean(), all_embs.std()
embedding_matrix = np.random.normal(loc=emb_mean, scale=emb_std, size=(nb_words, embedding_dims))
# embedding_matrix = np.zeros((nb_words, embedding_dims))
count = 0
for word, i in tqdm(word_index.items()):
if i >= MAX_FEATURES:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
count += 1
np.save(Embed_npy, embedding_matrix)
print('Null word embeddings: %d' % (nb_words - count))
print('not Null word embeddings: %d' % count)
print('embedding_matrix shape', embedding_matrix.shape)
# print('Null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0))
return embedding_matrix
def judger(label_true, y_predict):
result = 0
l1, l2, l3 = label_true
p1, p2, p3 = y_predict
p2[p2 > 0.5] = 1
p2[p2 < 0.5] = 0
p3[p3 > 0.5] = 1
p3[p3 < 0.5] = 0
# p1 = np.reshape(p1, (-1,))
# p2 = np.reshape(p2, (-1,))
# p3 = np.reshape(p3, (-1,))
for i in range(len(y_predict)):
yp = round(p1[i][0])
dp = p2[i][0]
lp = p3[i][0]
yt = l1[i][0]
dt = l2[i][0]
lt = l3[i][0]
sc = 0
if dt == 1:
if dp ==1:
sc = 1
elif lt == 1:
if lp==1:
sc = 1
else:
v1 =yt
v2 = yp
v = abs(np.log(v1 + 1) - np.log(v2 + 1))
if v <= 0.2:
sc = 1
elif v <= 0.4:
sc = 0.8
elif v <= 0.6:
sc = 0.6
elif v <= 0.8:
sc = 0.4
elif v <= 1.0:
sc = 0.2
else:
sc = 0
sc = sc * 1.0
result += sc
return result / len(y_predict)
class ImprisonCallback(Callback):
def __init__(self, predict_batch_size=1024, include_on_batch=False):
super(ImprisonCallback, self).__init__()
self.predict_batch_size = predict_batch_size
self.include_on_batch = include_on_batch
def on_batch_begin(self, batch, logs={}):
pass
def on_train_begin(self, logs={}):
if not ('avg_f1_score_val' in self.params['metrics']):
self.params['metrics'].append('avg_f1_score_val')
def on_batch_end(self, batch, logs={}):
if (self.include_on_batch):
logs['avg_f1_score_val'] = float('-inf')
def on_epoch_end(self, epoch, logs={}):
logs['avg_f1_score_val'] = float('-inf')
if (self.validation_data):
y_predict = self.model.predict(self.validation_data[0],
batch_size=self.predict_batch_size)
label = self.validation_data[1], self.validation_data[2], self.validation_data[3]
logs['avg_f1_score_val'] = judger(label, y_predict)
| [
"chongjiu.jin@tinno.com"
] | chongjiu.jin@tinno.com |
2c3be747e324140bce05946aeaa349adbce9a3a5 | 539e4522a3a47b0234a3972b633ca8d55f0c71ec | /data2csv | 40a8ad623e8b66de0133d90014f58c748f1d6b60 | [
"MIT"
] | permissive | charnley/data2csv | a060c2fa4d5a239e67dd95050bc73b13f6853563 | ac8b5516b0932f444203d17a270217d827633288 | refs/heads/master | 2020-07-03T21:50:51.715289 | 2018-06-15T20:43:45 | 2018-06-15T20:43:45 | 74,230,997 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,688 | #!/usr/bin/env python
from __future__ import print_function
import ConfigParser
import sys
import os
import subprocess
import re
import numpy as np
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def get_floats(shell_cmd):
""" Return all the floats for each line """
FNULL = open(os.devnull, 'w')
string = subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE, stderr=FNULL).communicate()[0]
string = string.split('\n')
regex = r'[\-]*\d+\.\d+[eE\-]*\d*'
if string == ['']:
eprint('nan:', shell_cmd)
return 'nan'
floats = []
for line in string:
numbers = re.findall(regex, line)
if len(numbers) > 0:
floats.append([])
for number in numbers:
try:
number = float(number)
except ValueError:
number = float("nan")
floats[-1].append(number)
# floats.append([float(number) for number in numbers])
return floats
def config_section_map(section, Config):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
eprint("exception on %s!" % option)
dict1[option] = None
return dict1
if __name__ == '__main__':
args = sys.argv[1:]
usage = """
get_data <ini structure> <list of molecules>
"""
if len(args) < 2:
eprint(usage)
quit()
ini_file = args[0]
structures_file = args[1]
config = ConfigParser.ConfigParser()
config.read(ini_file)
sections = config.sections()
structures = []
f = open(structures_file)
for line in f:
structures.append(line.replace("\n", ""))
f.close()
data = {}
for structure in structures:
data[structure] = []
# TODO structures
for i, section in enumerate(sections):
secdic = config_section_map(section, config)
try:
grep_cmd = secdic['grep']
except KeyError:
grep_cmd = ""
try:
cmd_cmd = secdic['cmd']
except KeyError:
cmd_cmd = ""
folder = secdic['folder']
if folder[-1] != "/":
folder += "/"
extension = secdic['extension']
unit = secdic['unit']
indexcol = int(secdic['indexcol'])
indexrow = int(secdic['indexrow'])
if grep_cmd != "":
cmd = grep_cmd + " " + "{:}" + "." + extension
if cmd_cmd != "":
cmd = cmd_cmd
cmd = cmd.replace("{:}", "{:}."+extension)
for structure in structures:
floats = get_floats(cmd.format(folder+structure))
if isinstance(floats, basestring):
value = "nan"
else:
try:
value = floats[indexrow][indexcol]
except IndexError:
eprint("nan:", cmd)
eprint(floats)
eprint()
value = "nan"
# exit('Wrong row,col index in ini file. Does not match output.')
data[structure].append(value)
# change the name of section
if unit != "kcal/mol":
sections[i] += " [" + unit + "]"
strprint = "{0}"
for x in range(len(sections)):
strprint += ", {"+str(x+1)+"}"
print(strprint.format("molecule", *sections))
for structure in structures:
print(strprint.format(structure, *data[structure]))
| [
"jimmy@charnley.dk"
] | jimmy@charnley.dk | |
f41309b669d164c057a8575be6894c0ae30aa544 | eb4f61315e8f0b139d0af3a95c59a2907845ebfd | /7-8(am)/corePython/control stmtss/cakediscount.py | 177bcc1cf85eb01053c25aba35836b2d73d33abf | [] | no_license | vamsikrishna6668/python-core | c1d368792fa6f89bf51ae690d20b45cb5ae0fb98 | b66ad0f6ad0deffbc350d5a7996f23220940e187 | refs/heads/master | 2020-04-02T10:00:39.641928 | 2018-10-23T12:19:14 | 2018-10-23T12:19:14 | 154,320,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | veg_cake=int(input('No of Veg cakes:'))
nonveg_cake=int(input('No of nonVeg cakes:'))
vegcake_cost=300
nonvegcake_cost=350
vegcake_discount=(veg_cake*vegcake_cost)*0.05
nonvegcake_discount=(nonveg_cake*nonvegcake_cost)*0.04
total_vegcake_cost=(veg_cake*vegcake_cost)-vegcake_discount
total_nonvegcake_cost=(nonveg_cake*nonvegcake_cost)-nonvegcake_discount
if total_vegcake_cost>=600 and total_nonvegcake_cost>=700:
print('The no of items of veg cake:',veg_cake)
print('The discount got for a veg cake:',vegcake_discount)
print('The total amount for the veg cake cost:',total_vegcake_cost)
print('The no of items of non-veg cake cost',nonveg_cake)
print('The discount for the non-veg cake cost',nonvegcake_discount)
print('The total amount for the non-veg cake cost:',total_nonvegcake_cost)
else:
print('The no of items of a veg cake:',veg_cake)
print('The total cost for a veg cake:',total_vegcake_cost)
print('The no of items of a non -veg cake cost:',nonveg_cake)
print('The total cost for a non veg cake:',total_nonvegcake_cost)
print('The Welcome for ordering in uber eats visit once again') | [
"vamsikrishna6668@gmail.com"
] | vamsikrishna6668@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.