blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6f2d0035b27900c2f7fc843139b6b74c96b2bf51
|
ae2c2bdf3aba6b5bd0753778be1991f968e689ec
|
/dotbiz/migrations/0031_uploadedfile_file_name.py
|
86e1abecbb628ab31cbfd47120129aac6079d12d
|
[] |
no_license
|
bintangx1902/clone_biz
|
7cd5ce56073ebd7627c93bb1608de10177a33dfd
|
80ee7b04aaff167001f5cbd9307bd3f487d3919c
|
refs/heads/main
| 2023-08-24T10:53:18.590655
| 2021-10-25T06:32:28
| 2021-10-25T06:32:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
# Generated by Django 3.1.4 on 2020-12-30 15:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dotbiz', '0030_uploadedfile'),
]
operations = [
migrations.AddField(
model_name='uploadedfile',
name='file_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
[
"60544749+bintangx1902@users.noreply.github.com"
] |
60544749+bintangx1902@users.noreply.github.com
|
6bbddf9120b6877356513aa49b8ece59f6e330f0
|
f48778b953c76cfb0039735094eb2246d6e56177
|
/graphs/enhanced_qos/experiment_rssi/plot_association.py
|
a8ac4e20efc2706cfee98114ffb05ca5c8729634
|
[] |
no_license
|
phisolani/wifi_monitoring
|
36826c52a00cf79d71f93c4acd0dc8b6d56ff934
|
06c62e625d395e4f179b9fef95318a746fee1786
|
refs/heads/master
| 2021-06-07T02:53:59.832752
| 2021-04-09T09:36:37
| 2021-04-09T09:36:37
| 142,306,383
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
#!/usr/bin/env python
__author__ = "Daniel Kulenkamp"
__copyright__ = "Copyright 2020, QoS-aware WiFi Slicing"
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Daniel Kulenkamp"
__email__ = "dkulenka@asu.edu"
__status__ = "Prototype"
" Python script for testing association plot"
from graphs.enhanced_qos.association_graph import *
make_association_graph(
experiment_path='workload/experiment_2/gomez/association/',
filenames=['ap_1_association', 'ap_2_association', 'ap_3_association']
)
|
[
"pedroisolani@gmail.com"
] |
pedroisolani@gmail.com
|
da866e6ea6f4eb4518bbc5ec3a0d9a1479af131e
|
c3c8b7779381c37a97c7176947f175a1168b6149
|
/instrument/generate_port_agent_config.py
|
95920fcfc744e6aff2b2d762f9af8f11f88e0f0a
|
[] |
no_license
|
ronkyo/ooi-tools
|
9f7e51dc6fbc8fc5d77d91d3523f4342e957f8e1
|
36034ae3f950ff7c585363c6cc07f8a51549185d
|
refs/heads/master
| 2021-01-21T03:54:24.522565
| 2015-04-27T23:22:23
| 2015-04-28T20:52:01
| 25,649,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,645
|
py
|
#!/usr/bin/env python
"""
generate_port_agent_config.py
Generate port agent configuration files and a supervisord file from CSV input
Usage:
generate_port_agent_config.py <path> <name> <csv_file>
"""
import shutil
import jinja2
import os
import docopt
from csv import DictReader
loader = jinja2.FileSystemLoader(searchpath="templates")
env = jinja2.Environment(loader=loader, trim_blocks=True, lstrip_blocks=True)
pa_template = env.get_template('pa_config.jinja')
super_template = env.get_template('supervisord.jinja')
def prep_dir(path, name):
configdir = os.path.join(path, 'configs')
configfile = os.path.join(path, '%s.conf' % name)
if os.path.exists(path):
if not os.path.isdir(path):
raise Exception('path matches existing file, looking for directory')
if os.path.exists(configdir):
shutil.rmtree(configdir)
if os.path.exists(configfile):
os.remove(configfile)
os.makedirs(configdir)
def create_pa_dict(csv_file):
pa_dict = {}
with open(csv_file) as fh:
for row in DictReader(fh):
name = create_pa_name(row)
if name is not None:
pa_dict[name] = row
return pa_dict
def create_pa_name(pa_dict):
if any([pa_dict['instrument'] == '', pa_dict['refdes'] == '']):
return None
return '%s_%s' % (pa_dict['instrument'], pa_dict['refdes'])
def create_pa_config(pa_dict):
return pa_template.render(**pa_dict)
def create_pa_configs(pa_dict):
pa_configs = {}
for name in pa_dict:
config = create_pa_config(pa_dict[name])
pa_configs[name] = config
return pa_configs
def create_supervisord_config(name, pa_dict):
groups = {}
for name in pa_dict:
each = pa_dict[name]
group = each['group']
groups.setdefault(group, []).append(name)
return super_template.render(name=name, groups=groups)
def write(path, name, supervisord_config, pa_configs):
with open(os.path.join(path, '%s.conf' % name), 'wb') as fh:
fh.write(supervisord_config)
for pa_name in pa_configs:
with open(os.path.join(path, 'configs', '%s.conf' % pa_name), 'wb') as fh:
fh.write(pa_configs[pa_name])
def main():
options = docopt.docopt(__doc__)
path = options['<path>']
name = options['<name>']
csv_file = options['<csv_file>']
prep_dir(path, name)
pa_dict = create_pa_dict(csv_file)
pa_configs = create_pa_configs(pa_dict)
supervisord_config = create_supervisord_config(name, pa_dict)
write(path, name, supervisord_config, pa_configs)
if __name__ == '__main__':
main()
|
[
"petercable@gmail.com"
] |
petercable@gmail.com
|
5b545d841152235d7e1f5f4957a3721084e203b0
|
8d6f73af697f56834e8cef88876be78a360190d9
|
/flaskblog/users/forms.py
|
73112b0e9cadba5151b9b28e081593b41fb3d9b2
|
[] |
no_license
|
Sukhrobjon/flask-blog
|
7a837f3454263f69935d88cc3c73858a2848c22c
|
357b311efd32f3537fce9f84ecbadd0835aac524
|
refs/heads/master
| 2020-04-18T10:49:58.829788
| 2019-02-25T23:28:14
| 2019-02-25T23:28:14
| 167,480,195
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,654
|
py
|
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from flask_login import current_user
from flaskblog.models import User
class RegistrationForm(FlaskForm):
username = StringField('Username',
validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
def validate_username(self, username):
'''
Raise ValidationError if username exists in database alreay
'''
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError(
'This username is taken! Please choose different one')
def validate_email(self, email):
'''
Raise ValidationError if email exists in database already
'''
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError(
'This email is taken! Please choose different one')
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
class UpdateAccountForm(FlaskForm):
username = StringField('Username',
validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email', validators=[DataRequired(), Email()])
picture = FileField('Update profile picture', validators=[
FileAllowed(['jpg', 'png'])])
submit = SubmitField('Update Account')
def validate_username(self, username):
'''
Raise ValidationError if username exists in database alreay
'''
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError(
'This username is taken! Please choose different one')
def validate_email(self, email):
'''
Raise ValidationError if email exists in database alreay
'''
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError(
'This email is taken! Please choose different one')
class RequestResetForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
submit = SubmitField('Request Password Reset')
def validate_email(self, email):
'''
Raise ValidationError if email exists in database alreay
'''
user = User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError(
'There is no account associated with this email. You must register first.')
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Reset Password')
|
[
"sgolibbo@mail.ccsf.edu"
] |
sgolibbo@mail.ccsf.edu
|
bd65812cfdd6ab3cd0e5706f3851f79bf3fe9fda
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02803/s706884747.py
|
68d1a7bdccac1d1136b3fdc1c6c24530314347e6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,575
|
py
|
import numpy as np
H, W = map(int, input().split())
map_lis = [input() for i in range(H)]
ans = 0
def prob(x,y,h,w):
if 0<=x<h and 0<=y<w:
return True
else:
return False
def count(x,y,lis):
if not lis[x][y]:
return True
else:
return False
def maze(x,y,lis):
if lis[x][y] == ".":
return True
else:
return False
for i in range(H):
for j in range(W):
k = 0
count_lis = np.zeros((H,W))
num_lis = [[] for i in range(1000)]
if maze(i,j,map_lis):
count_lis[i][j] = 1
num_lis[0].append([i,j])
while True:
for l in num_lis[k]:
if prob(l[0]-1,l[1],H,W) and count(l[0]-1,l[1],count_lis) and maze(l[0]-1,l[1],map_lis):
num_lis[k+1].append([l[0]-1,l[1]])
count_lis[l[0]-1][l[1]] = 1
if prob(l[0],l[1]-1,H,W) and count(l[0],l[1]-1,count_lis) and maze(l[0],l[1]-1,map_lis):
num_lis[k+1].append([l[0],l[1]-1])
count_lis[l[0]][l[1]-1] = 1
if prob(l[0]+1,l[1],H,W) and count(l[0]+1,l[1],count_lis) and maze(l[0]+1,l[1],map_lis):
num_lis[k+1].append([l[0]+1,l[1]])
count_lis[l[0]+1][l[1]] = 1
if prob(l[0],l[1]+1,H,W) and count(l[0],l[1]+1,count_lis) and maze(l[0],l[1]+1,map_lis):
num_lis[k+1].append([l[0],l[1]+1])
count_lis[l[0]][l[1]+1] = 1
new_ans = 0
for m in num_lis[1:]:
if m != []:
new_ans += 1
if m == []:
break
ans = max(ans, new_ans)
k += 1
if num_lis[k] == []:
break
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
098e437846ce6912a30c7c1604db09a0b6f8e608
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/87/usersdata/190/59803/submittedfiles/contido.py
|
665f253469da7a4df1c7e7b19b10513a1ca38f72
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
# -*- coding: utf-8 -*-
n=int(input('digite o numero de elementos de a:'))
m=int(input('digite o numero de elementos de b:'))
a=[]
b=[]
for i in range(1,n+1,1):
valor=float(input('digite o valor:'))
a.append(valor)
print(a)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
7f66de5ad7a64720048a36fbb2fe4e9553f4d762
|
cb57a9ea4622b94207d12ea90eab9dd5b13e9e29
|
/lc/python/0404_sum_of_left_leaves.py
|
8933e5d4e7cc50241ba77f57829793df8661ba7e
|
[] |
no_license
|
boknowswiki/mytraning
|
b59585e1e255a7a47c2b28bf2e591aef4af2f09a
|
5e2f6ceacf5dec8260ce87e9a5f4e28e86ceba7a
|
refs/heads/master
| 2023-08-16T03:28:51.881848
| 2023-08-10T04:28:54
| 2023-08-10T04:28:54
| 124,834,433
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,263
|
py
|
# binary tree and dfs
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def sumOfLeftLeaves(self, root: Optional[TreeNode]) -> int:
if not root:
return 0
self.ret = 0
self.helper(root.left, True)
self.helper(root.right, False)
return self.ret
def helper(self, node, is_left):
if not node:
return
if is_left and node.left is None and node.right is None:
self.ret += node.val
return
self.helper(node.left, True)
self.helper(node.right, False)
return
# binary tree and bfs
# time O(n)
# space O(n)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def sumOfLeftLeaves(self, root: Optional[TreeNode]) -> int:
ret = 0
if not root:
return ret
q = collections.deque([root])
while q:
cur = q.popleft()
if cur.left and cur.left.left is None and cur.left.right is None:
ret += cur.left.val
if cur.left:
q.append(cur.left)
if cur.right:
q.append(cur.right)
return ret
# binary tree and dfs
# time O(n)
# space O(1)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def sumOfLeftLeaves(self, root: Optional[TreeNode]) -> int:
self.ret = 0
self.helper(root, False)
return self.ret
def helper(self, node, is_left):
if not node:
return
if node.left == None and node.right == None and is_left:
self.ret += node.val
self.helper(node.left, True)
self.helper(node.right, False)
return
|
[
"noreply@github.com"
] |
boknowswiki.noreply@github.com
|
a5693d14d44e83141c8b81ec18c896a2cc1e4a0d
|
5842d17a6c85f7e135609fc9e9951978ad42e6a5
|
/app/migrations/0015_userprofile_tz.py
|
cfecfa463d489995a488b33d945bc7b61d423689
|
[] |
no_license
|
projectdata8thsem/lctva
|
ca95f56b8f3ceaffe6ef724fddcde2f3edb37e30
|
71522cd65451cc52f65eed2033f4ae2bac52040f
|
refs/heads/master
| 2020-04-07T06:03:45.380412
| 2016-10-11T20:05:34
| 2016-10-11T20:05:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-07 03:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0014_auto_20160102_1636'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='tz',
field=models.CharField(blank=True, max_length=100),
),
]
|
[
"jtaddei@gmail.com"
] |
jtaddei@gmail.com
|
e647def5e76581edc66f86e284a7583b0c68c1e2
|
c32809922bbdb8bef698f9979999187a30823d8f
|
/setup.py
|
a5da1976c5aad387385a747d3276c2692a26520b
|
[
"MIT"
] |
permissive
|
mattdennewitz/pat
|
6f98cc19ed7d647dac27ffee6d434fac0be63107
|
782ebf29a75ab9b35d45fc11d2eb7a50fa2f611b
|
refs/heads/master
| 2020-04-01T18:45:35.721013
| 2016-07-27T05:35:20
| 2016-07-27T05:35:20
| 64,279,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 741
|
py
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
import pip.download
from pip.req import parse_requirements
reqs_txt = os.path.join(os.path.dirname(__file__), 'requirements.txt')
pip_reqs = parse_requirements(reqs_txt, session=pip.download.PipSession())
pip_reqs = [str(obj.req) for obj in pip_reqs]
setup(
name = 'pat',
version = '0.1.0',
description = (
'CLI for testing Xpath and CSS queries on HTML documents'
),
author = 'Matt Dennewitz',
author_email = 'mattdennewitz@gmail.com',
url = 'https://github.com/mattdennewitz/pat',
install_requires = pip_reqs,
include_package_data=True,
packages = find_packages(),
scripts = [
'bin/pat'
],
)
|
[
"mattdennewitz@gmail.com"
] |
mattdennewitz@gmail.com
|
0182379e6cb385585af008abaae27b6f33a00cb1
|
cbd2f3db68ec311e0d40f5281111bc3a8f200115
|
/modelo/bookings/api/urls.py
|
ef95c12cefbd96832bd4ab68b6f9dfb1c5b4e6b3
|
[] |
no_license
|
CoutinhoElias/fullcalendar
|
fceff2dfae07508f02ceabf0247bac2da4528224
|
4553828cd1dd20f15a139bf535060b9cee27d3f1
|
refs/heads/master
| 2020-03-09T20:28:53.020188
| 2018-04-25T20:35:37
| 2018-04-25T20:35:37
| 128,986,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
from django.conf.urls import url
from django.contrib import admin
from modelo.bookings.api.serializers import BookingListFeriadoSerializer
app_name = 'bookings-api'
from .views import (
PostListAPIView,
PostListFeriadoAPIView,
PostDetailAPIView,
PostDeleteAPIView,
PostUpdateAPIView,
PostCreateAPIView,
PostList2APIView, PostUpdate2APIView)
urlpatterns = [
url(r'^$', PostListAPIView.as_view(), name='listapi'),
url(r'^(?P<pk>\d+)/$',PostDetailAPIView.as_view(),name='detail'),
url(r'^(?P<pk>\d+)/delete/$',PostDeleteAPIView.as_view(),name='delete'),
url(r'^(?P<pk>\d+)/edit/$',PostUpdateAPIView.as_view(),name='update'),
url(r'^create/$', PostCreateAPIView.as_view(), name='create'),
url(r'^feriado/$', PostListFeriadoAPIView.as_view(), name='feriado'),
#url(r'^(?P<slug>[\W-]+)/$',PostDetailAPIView.as_view(),name='detail')
url(r'^list/$', PostList2APIView.as_view(), name='listapi2'),
url(r'^(?P<pk>\d+)/edita/$', PostUpdate2APIView.as_view(), name='update2'),
]
|
[
"coutinho.elias@gmail.com"
] |
coutinho.elias@gmail.com
|
0c7322d41077b64b7c875f3f2f1d39845ebd37f3
|
d93159d0784fc489a5066d3ee592e6c9563b228b
|
/Calibration/EcalAlCaRecoProducers/python/ALCARECOEcalESAlign_Output_cff.py
|
4dd5cc22d3f4dbedd90a21eaf5229cad92c545c6
|
[] |
permissive
|
simonecid/cmssw
|
86396e31d41a003a179690f8c322e82e250e33b2
|
2559fdc9545b2c7e337f5113b231025106dd22ab
|
refs/heads/CAallInOne_81X
| 2021-08-15T23:25:02.901905
| 2016-09-13T08:10:20
| 2016-09-13T08:53:42
| 176,462,898
| 0
| 1
|
Apache-2.0
| 2019-03-19T08:30:28
| 2019-03-19T08:30:24
| null |
UTF-8
|
Python
| false
| false
| 1,006
|
py
|
import FWCore.ParameterSet.Config as cms
OutALCARECOEcalESAlign_noDrop = cms.PSet(
# put this if you have a filter
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('pathALCARECOEcalESAlign')
),
outputCommands = cms.untracked.vstring(
'keep ESDCCHeaderBlocksSorted_ecalPreshowerDigis_*_*',
'keep ESDigiCollection_ecalPreshowerDigis_*_*',
'keep ESKCHIPBlocksSorted_ecalPreshowerDigis_*_*',
'keep SiPixelClusteredmNewDetSetVector_ecalAlCaESAlignTrackReducer_*_*',
'keep SiStripClusteredmNewDetSetVector_ecalAlCaESAlignTrackReducer_*_*',
'keep TrackingRecHitsOwned_ecalAlCaESAlignTrackReducer_*_*',
'keep recoTrackExtras_ecalAlCaESAlignTrackReducer_*_*',
'keep recoTracks_ecalAlCaESAlignTrackReducer_*_*',
'keep recoBeamSpot_offlineBeamSpot_*_*'
)
)
import copy
OutALCARECOEcalESAlign=copy.deepcopy(OutALCARECOEcalESAlign_noDrop)
OutALCARECOEcalESAlign.outputCommands.insert(0,"drop *")
|
[
"marco.musich@cern.ch"
] |
marco.musich@cern.ch
|
0f277c60902e8603255da3dbd08294738084831d
|
4fbd844113ec9d8c526d5f186274b40ad5502aa3
|
/algorithms/python3/minimum_area_rectangle_ii.py
|
708813731bf76371e270627e1c38eb0d4c3a71c5
|
[] |
no_license
|
capric8416/leetcode
|
51f9bdc3fa26b010e8a1e8203a7e1bcd70ace9e1
|
503b2e303b10a455be9596c31975ee7973819a3c
|
refs/heads/master
| 2022-07-16T21:41:07.492706
| 2020-04-22T06:18:16
| 2020-04-22T06:18:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,363
|
py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Given a set of points in the xy-plane, determine the minimum area of any rectangle formed from these points, with sides not necessarily parallel to the x and y axes.
If there isn't any rectangle, return 0.
Example 1:
Input: [[1,2],[2,1],[1,0],[0,1]]
Output: 2.00000
Explanation: The minimum area rectangle occurs at [1,2],[2,1],[1,0],[0,1], with an area of 2.
Example 2:
Input: [[0,1],[2,1],[1,1],[1,0],[2,0]]
Output: 1.00000
Explanation: The minimum area rectangle occurs at [1,0],[1,1],[2,1],[2,0], with an area of 1.
Example 3:
Input: [[0,3],[1,2],[3,1],[1,3],[2,1]]
Output: 0
Explanation: There is no possible rectangle to form from these points.
Example 4:
Input: [[3,1],[1,1],[0,1],[2,1],[3,3],[3,2],[0,2],[2,3]]
Output: 2.00000
Explanation: The minimum area rectangle occurs at [2,1],[2,3],[3,3],[3,1], with an area of 2.
Note:
1 <= points.length <= 50
0 <= points[i][0] <= 40000
0 <= points[i][1] <= 40000
All points are distinct.
Answers within 10^-5 of the actual value will be accepted as correct.
"""
""" ==================== body ==================== """
class Solution:
def minAreaFreeRect(self, points):
"""
:type points: List[List[int]]
:rtype: float
"""
""" ==================== body ==================== """
|
[
"capric8416@gmail.com"
] |
capric8416@gmail.com
|
b90f6e7917af6e75b2d7465f1ec61af936e8ad06
|
a08f5a2a75dcbe8e6871759d6b7a89e0e2f3519e
|
/pypodman/pypodman/lib/actions/rmi_action.py
|
7c3d0bd795b1c9a13a592f15020303f704fb5c37
|
[
"Apache-2.0"
] |
permissive
|
4383/python-podman
|
99a7d8906e13811059743feba005a7550ff9f604
|
94a46127cb0db2b6187186788a941ec72af476dd
|
refs/heads/master
| 2020-04-22T10:47:05.035526
| 2019-01-14T14:30:35
| 2019-01-14T14:30:35
| 170,317,428
| 0
| 0
|
Apache-2.0
| 2019-02-12T12:52:13
| 2019-02-12T12:52:12
| null |
UTF-8
|
Python
| false
| false
| 1,339
|
py
|
"""Remote client command for deleting images."""
import sys
import podman
from pypodman.lib import AbstractActionBase
class Rmi(AbstractActionBase):
"""Class for removing images from storage."""
@classmethod
def subparser(cls, parent):
"""Add Rmi command to parent parser."""
parser = parent.add_parser('rmi', help='delete image(s)')
parser.add_flag(
'--force',
'-f',
help='force delete of image(s) and associated containers.')
parser.add_argument('targets', nargs='+', help='image id(s) to delete')
parser.set_defaults(class_=cls, method='remove')
def remove(self):
"""Remove image(s)."""
for ident in self._args.targets:
try:
img = self.client.images.get(ident)
img.remove(self._args.force)
print(ident)
except podman.ImageNotFound as e:
sys.stdout.flush()
print(
'Image {} not found.'.format(e.name),
file=sys.stderr,
flush=True)
except podman.ErrorOccurred as e:
sys.stdout.flush()
print(
'{}'.format(e.reason).capitalize(),
file=sys.stderr,
flush=True)
|
[
"jhonce@redhat.com"
] |
jhonce@redhat.com
|
5bedb6acfb11c0720e53b72916dc4b2a3fe2290b
|
1843fd5ccb4377240e664acd21ba5a9369eca2ab
|
/bluebottle/utils/exchange_rates.py
|
5a13a2e89fbde1ff2d96b0f6451e16a83362c4b3
|
[
"BSD-2-Clause"
] |
permissive
|
raux/bluebottle
|
ba2e576cebcb6835065004c410b22bd8a6b9ee29
|
49d92b5deb289c1539f99122abc20f845577b879
|
refs/heads/master
| 2020-03-27T03:20:11.465491
| 2018-08-23T13:09:25
| 2018-08-23T13:09:25
| 145,854,614
| 0
| 0
| null | 2018-08-23T13:05:00
| 2018-08-23T13:04:59
| null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
from djmoney_rates.utils import convert_money
def convert(money, currency):
""" Convert money object `money` to `currency`."""
return convert_money(money.amount, money.currency, currency)
|
[
"ernst@onepercentclub.com"
] |
ernst@onepercentclub.com
|
36b6bc2a275ccfba0a769e03a047842003034ffa
|
8d1d1e7677e1a18c00fb295971211d4e29d10896
|
/vocal_synthesis/experiments/22feb_testing_b2_rmsprop_rectify.py
|
5bbb31566fa7981f6bc49ff20056e209698369f4
|
[] |
no_license
|
christopher-beckham/ift6266h16
|
8296d1529f6ce3e209af371283f816a4c6d63ea9
|
f141fb0a320c20c2c7b43b46f06b1c68cde183f0
|
refs/heads/master
| 2021-01-10T13:38:40.733180
| 2016-04-17T02:22:52
| 2016-04-17T02:22:52
| 49,399,600
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,177
|
py
|
import numpy as np
from scipy.io import wavfile
import os
import sys
sys.path.append( os.pardir )
import cPickle as pickle
from lasagne.updates import *
import rnn_experiment as experiment
if __name__ == "__main__":
# e.g. 1000_60sec.pkl
in_pkl = sys.argv[1]
out_pkl = sys.argv[2]
with open(in_pkl) as f:
dat = pickle.load(f)
X_train, X_valid, X_test = dat[0]
sys.stderr.write("X_train shape = %s\n" % str(X_train.shape))
sys.stderr.write("X_valid shape = %s\n" % str(X_valid.shape))
sys.stderr.write("X_test shape = %s\n" % str(X_test.shape))
args = dict()
args["seed"] = 0
args["batch_size"] = 16
args["learning_rate"] = 0.01
args["momentum"] = 0.9
args["num_epochs"] = 5000
args["X_train"] = X_train
args["X_valid"] = X_valid
args["X_test"] = X_test
#args["update_method"] = nesterov_momentum
args["update_method"] = adagrad
args["config"] = "../configurations/19feb_testing_e2_relu.py"
model = experiment.train(args)
sys.stderr.write( "writing to file: %s\n" % (out_pkl) )
with open(out_pkl, "wb") as f:
pickle.dump(model, f, pickle.HIGHEST_PROTOCOL)
|
[
"chrispy645@gmail.com"
] |
chrispy645@gmail.com
|
b665f56e5dff437feeb254d063d46896215f3942
|
bad686ba27539a3d3286418cc3ebf2aa80ae4958
|
/src/kits/maker-pi-rp2040-robots/time-of-flight-display-test.py
|
f633d990bc842f9f8617dc32d4679a845184fb29
|
[] |
no_license
|
AaryaBatchu/micropython
|
f0a31b579b3a998586f26b92036875c93588eca7
|
aef7d33937352e9ab6f9615bfc5bf9aa1a9bee57
|
refs/heads/main
| 2023-08-19T13:33:15.006432
| 2021-10-23T19:06:26
| 2021-10-23T19:06:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
# Test program for VL53L0X
import time
from machine import Pin, I2C
from ssd1306 import SSD1306_I2C
import VL53L0X
I2C0_SDA_PIN = 0
I2C0_SCL_PIN = 1
I2C1_SDA_PIN = 2
I2C1_SCL_PIN = 3
i2c0=machine.I2C(0,sda=machine.Pin(I2C0_SDA_PIN), scl=machine.Pin(I2C0_SCL_PIN))
i2c1=machine.I2C(1,sda=machine.Pin(I2C1_SDA_PIN), scl=machine.Pin(I2C1_SCL_PIN), freq=400000)
oled = SSD1306_I2C(128, 64, i2c0)
tof = VL53L0X.VL53L0X(i2c1)
tof.start()
while True:
tof.read()
print(tof.read())
oled.fill(0)
oled.text("CoderDojo Robot", 0, 0)
oled.text("P1:", 0, 20)
oled.text(str(tof.read()), 40, 20)
oled.show()
time.sleep(0.05)
# tof.stop()
|
[
"dan.mccreary@gmail.com"
] |
dan.mccreary@gmail.com
|
105c0808de6a0d4f0fca7819c1b26ac7d1fd97cf
|
1cd83cf06ff7ae750ab419ac867d475d38342388
|
/play-with-numbers/lychrel.py
|
ee4e3ef1b0281f78eebe6e5db760bed2cd2e7ead
|
[
"MIT"
] |
permissive
|
AnuragAnalog/maths
|
28e402a980075e0201a699528387a7ca223737e8
|
b8fd5398dd8fce657ba5c3c0170b5e493eb9d341
|
refs/heads/master
| 2022-10-31T14:50:52.114560
| 2022-10-15T19:19:36
| 2022-10-15T19:19:36
| 146,905,452
| 0
| 3
|
MIT
| 2022-10-15T19:19:37
| 2018-08-31T14:52:56
|
C
|
UTF-8
|
Python
| false
| false
| 1,074
|
py
|
#!/usr/bin/python3.6
""" If we take 47, reverse and add, 47 + 74 = 121, which is palindromic.
It is thought that some numbers, like 196, never produce a palindrome.
A number that never forms a palindrome through the reverse and add process
is called a Lychrel number. Due to the theoretical nature of these numbers,
and for the purpose of this problem, we shall assume that a number is Lychrel
until proven otherwise. In addition you are given that for every number below
ten-thousand, it will either (i) become a palindrome in less than fifty
iterations, or, (ii) no one, with all the computing power that exists, has
managed so far to map it to a palindrome. """
def ispalindrome(s):
return s[:] == s[::-1]
def lychrel(n):
tot = n
for i in range(50):
temp = list(str(tot))
temp.reverse()
tot += int("".join(temp))
if ispalindrome(str(tot)):
print(f"Given {n} is not a lychrel number")
return None
print(f"Given {n} is a lychrel number")
n = int(input("Enter the number: "))
lychrel(n)
|
[
"anurag.peddi1998@gmail.com"
] |
anurag.peddi1998@gmail.com
|
d0d2170beaa496a695545f31c255a9f64e877438
|
3c38febb2f7a42fb72b543eb23caa9f46580f06b
|
/Uva Problems/Python/713.py
|
e95e6f573511da20421cca9b7150a6881a4d1807
|
[] |
no_license
|
joyonto51/Programming_Practice
|
479b8e84862e35f29a3904bd3556edb6532efb4b
|
1d6ff08cf63011cfde6b73988bc1686a12e70a79
|
refs/heads/master
| 2020-03-09T05:34:39.502942
| 2018-10-01T14:34:17
| 2018-10-01T14:34:17
| 128,617,218
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
T = int(input())
for i in range(T):
a,b=map(str,input().split())
x = int(a[::-1])
y = int(b[::-1])
w = str(x+y)
z = int(w[::-1])
print("{}".format(z))
|
[
"meghla1609"
] |
meghla1609
|
91225add6aa7e75a6cef7c47459470a6ff209b39
|
58d2b60989a83142286273c1d3bdd66a77842c76
|
/Wind/Train/RunConfig.py
|
0220ddff4b85e2aa74b7fa6c181234275d0f4756
|
[] |
no_license
|
castorgit/wind_code
|
8193c7439eb459e58a55b8ff107df6032f77ad7d
|
6f25302e9ba581f769d22d47b2c86aa2d60de393
|
refs/heads/master
| 2022-09-24T13:14:59.280976
| 2020-06-06T17:13:58
| 2020-06-06T17:13:58
| 270,047,772
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,846
|
py
|
"""
.. module:: RunConfig
RunConfig
*************
:Description: RunConfig
Object to store information from the flags passed to the script for training
:Authors: HPAI-BSC
:Version:
:Created on: 06/07/2018 8:09
"""
__author__ = 'HPAI-BSC'
class RunConfig:
"""Class Runconfig
Stores information from the flags of the script and are not in the configuration file of the experiment
"""
## Implementation to use for RNN
impl = 1
## Activates TF verbose output and other information
verbose = False
## Generate output for tensorboard
tboard = False
## Keep the model with best validation accuracy
best = True
## Early stopping
early = True
## Multi GPU training
multi = False
## Get experiment configuration using the proxy
proxy = False
## Save the final model
save = False
## Get the data from the remote server
remote = False
## Print info of dataset and model at the end of training
info = False
## Not yet used
log = None
def __init__(self, impl=1, verbose=False, tboard=False, best=True, early=True, multi=False, proxy=False, save=False,
remote=False, info=False, log=False):
""" Constructor
Stores the parameters in the object attributes
:param impl:
:param verbose:
:param tboard:
:param best:
:param early:
:param multi:
:param proxy:
:param save:
:param remote:
:param info:
:param log:
"""
self.impl = impl
self.verbose = verbose
self.tboard = tboard
self.best = best
self.early = early
self.multi = multi
self.proxy = proxy
self.save = save
self.remote = remote
self.info = info
self.log = log
|
[
"30628981+castorgit@users.noreply.github.com"
] |
30628981+castorgit@users.noreply.github.com
|
927db656a17a58d4953f22fbdc57fd16d812b724
|
27ff7fec0ae3f29f58089a2acab0aa3bc4e6e1f7
|
/RIDE-python3/utest/controller/test_tcuk_copy.py
|
4d4c8ab42b7c9cad385b512f4c7fb6b0a38d324f
|
[
"Apache-2.0"
] |
permissive
|
zhangsong1417/xx
|
01435d6057364991b649c1acc00b36ab13debe5a
|
c40cfdede194daf3bdf91b36c1936150577128b9
|
refs/heads/master
| 2020-04-06T14:06:23.011363
| 2019-07-09T02:38:02
| 2019-07-09T02:38:02
| 157,528,207
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,828
|
py
|
import time
import unittest
from robotide.robotapi import TestCaseFile
from robotide.controller.filecontrollers import TestCaseFileController
from resources import COMPLEX_SUITE_PATH
from nose.tools import assert_equal, assert_true
class TestCaseAndUserKeywordCopyingTest(unittest.TestCase):
controller = TestCaseFileController(
TestCaseFile(source=COMPLEX_SUITE_PATH).populate())
def test_test_case_copy(self):
test = self.controller.tests[0]
copy = test.copy('New Name')
assert_equal(copy.name, 'New Name')
for orig, copied in zip(test.settings, copy.settings):
assert_equal(orig.value, copied.value)
assert_true(copied is not orig)
assert_equal(test.steps, copy.steps)
assert_true(test.steps is not copy.steps)
def test_keyword_copy(self):
test = self.controller.keywords[0]
copy = test.copy('New Name')
assert_equal(copy.name, 'New Name')
for orig, copied in zip(test.settings, copy.settings):
assert_equal(orig.value, copied.value)
assert_true(copied is not orig)
assert_equal(test.steps, copy.steps)
assert_true(test.steps is not copy.steps)
def test_test_copy_performance(self):
self._run_copy_test(self.controller.tests[0])
def test_keyword_copy_performance(self):
self._run_copy_test(self.controller.keywords[0])
def _run_copy_test(self, item):
self._test_copy(item, 10)
self._test_copy(item, 200)
def _test_copy(self, item, count):
start_time = time.time()
for i in range(0, count):
item.copy(str(i))
self.assertTrue(time.time() < (start_time + 2),
"Copy operation takes too long time")
if __name__ == '__main__':
unittest.main()
|
[
"44634576+shuiling21@users.noreply.github.com"
] |
44634576+shuiling21@users.noreply.github.com
|
e09bfea83d8f83c4c3596a4e1384efbdb19d67e1
|
f5a1fcc57ba6d1bdd946158d269f861747023977
|
/google_problems/problem_19.py
|
ba9c6737ede40571563275798458611485c593dd
|
[
"MIT"
] |
permissive
|
younes38/Daily-Coding-Problem
|
64f8c4137b86fc44d6202585f009a3a5e05f4eac
|
ba2b48fbd4d86b2130a396b9d464f2395b9983b6
|
refs/heads/master
| 2021-07-19T01:41:20.665277
| 2019-11-30T21:00:50
| 2019-11-30T21:00:50
| 225,072,874
| 2
| 0
|
MIT
| 2019-11-30T21:38:13
| 2019-11-30T21:38:13
| null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
"""This problem was asked by Google.
Implement an LRU (Least Recently Used) cache. It should be able to
be initialized with a cache size n, and contain the following methods:
• set(key, value): sets key to value. If there are already n items in the
cache and we are adding a new item, then it should also
remove the least recently used item.
• get(key): gets the value at key. If no such key exists, return null.
Each operation should run in O(1) time.
"""
|
[
"mxcsyounes@gmail.com"
] |
mxcsyounes@gmail.com
|
8ceb1e40fd1074a3ca829be6b58a977dfba353b3
|
828e58fff5f3779b6fddbc029f332345adb97c4f
|
/yoohalal/apps/dashboard/promotions/app.py
|
9a86e30efe48de93a9f59df745206195afa7b01e
|
[] |
no_license
|
sofyandamha/Django-Oscar-Marketplace-yoohalal
|
5c33ac77834dcbc199d5f39971b0f527e9627167
|
8bc1f11b1fe301f1dc5f9f9edfd6c24402effeb3
|
refs/heads/master
| 2020-03-27T20:52:37.031477
| 2018-09-02T16:03:43
| 2018-09-02T16:03:43
| 147,099,611
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,564
|
py
|
from django.conf.urls import url
from oscar.apps.dashboard.promotions.app import PromotionsDashboardApplication as CorePromotionsDashboardApplication
from oscar.core.loading import get_class
from apps.promotions.conf import PROMOTION_CLASSES
class PromotionsDashboardApplication(CorePromotionsDashboardApplication):
# Dynamically set the CRUD views for all promotion classes
view_names = (
('create_%s_view', 'Create%sView'),
('update_%s_view', 'Update%sView'),
('delete_%s_view', 'Delete%sView')
)
for klass in PROMOTION_CLASSES:
if klass.classname() == 'categoryproductlist':
for attr_name, view_name in view_names:
full_attr_name = attr_name % klass.classname()
full_view_name = view_name % klass.__name__
view = get_class('apps.dashboard.promotions.views', full_view_name)
locals()[full_attr_name] = view
def get_urls(self):
urls = super(PromotionsDashboardApplication, self).get_urls()
for klass in PROMOTION_CLASSES:
if klass.classname() == 'categoryproductlist':
code = klass.classname()
urls += [
url(r'create/%s/' % code,
getattr(self, 'create_%s_view' % code).as_view(),
name='promotion-create-%s' % code),
url(r'^update/(?P<ptype>%s)/(?P<pk>\d+)/$' % code,
getattr(self, 'update_%s_view' % code).as_view(),
name='promotion-update'),
url(r'^delete/(?P<ptype>%s)/(?P<pk>\d+)/$' % code,
getattr(self, 'delete_%s_view' % code).as_view(),
name='promotion-delete')]
return self.post_process_urls(urls)
application = PromotionsDashboardApplication()
|
[
"sofyandamha@gmail.com"
] |
sofyandamha@gmail.com
|
e0ed0dfefb138ca3c8fd3e937468c210583a4899
|
ace717292aec2cbff637da1455e265b2c4f5894b
|
/azdev/operations/tests/test_break_change.py
|
6a81837caf7c11c4c8bb9ba49d0e3b8e9f1cb7b8
|
[
"MIT"
] |
permissive
|
Azure/azure-cli-dev-tools
|
b62b936327c1129d970e7b836b7f926e7c277cce
|
d4e9490071a5161c3f663e41a99f16b7a091feea
|
refs/heads/dev
| 2023-08-24T12:36:36.386669
| 2023-08-10T05:57:09
| 2023-08-10T05:57:09
| 158,465,443
| 78
| 106
|
MIT
| 2023-09-14T02:55:18
| 2018-11-20T23:46:08
|
Python
|
UTF-8
|
Python
| false
| false
| 2,621
|
py
|
# -----------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------
import unittest
import os
from azdev.operations.command_change import export_command_meta, cmp_command_meta
from azdev.operations.command_change.util import get_command_tree
class MyTestCase(unittest.TestCase):
def test_cmd_meta_generation(self):
if os.path.exists("./jsons/az_monitor_meta.json"):
os.remove("./jsons/az_monitor_meta.json")
module_list = ["monitor"]
export_command_meta(modules=module_list, meta_output_path="./jsons/")
self.assertTrue(os.path.exists("./jsons/az_monitor_meta.json"), "new monitor meta generation failed")
def test_parse_cmd_tree(self):
cmd_name = "monitor log-profiles create"
ret = get_command_tree(cmd_name)
self.assertTrue(ret["is_group"], "group parse failed")
self.assertFalse(ret["sub_info"]["sub_info"]["is_group"], "group parse failed")
self.assertTrue(ret["sub_info"]["sub_info"]["cmd_name"] == "monitor log-profiles create", "group parse failed")
def test_diff_meta(self):
if not os.path.exists("./jsons/az_monitor_meta_before.json") \
or not os.path.exists("./jsons/az_monitor_meta_after.json"):
return
result = cmp_command_meta(base_meta_file="./jsons/az_monitor_meta_before.json",
diff_meta_file="./jsons/az_monitor_meta_after.json",
output_type="text")
target_message = [
"please confirm cmd `monitor private-link-scope scoped-resource show` removed",
"sub group `monitor private-link-scope private-endpoint-connection cust` removed",
]
for mes in target_message:
found = False
for line in result:
if line.find(mes) > -1:
found = True
break
self.assertTrue(found, "target message not found")
ignored_message = [
"updated property `is_aaz` from `False` to `True`"
]
for mes in ignored_message:
ignored = True
for line in result:
if line.find(mes) > -1:
ignored = False
break
self.assertTrue(ignored, "ignored message found")
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
989ffac9bbb35c98fef0e0e921d47fbd26eac1d9
|
4e02d5b0b1b0739553fd40bbbdfb0d02c9830350
|
/0279_Perfect_Squares.py
|
a4f7a1558f81bae7ae146a21635bc67ee099f516
|
[] |
no_license
|
bingli8802/leetcode
|
b039ab6af62f0c8992463393f561caafd21056e6
|
a509b383a42f54313970168d9faa11f088f18708
|
refs/heads/master
| 2023-03-29T03:11:45.801090
| 2021-03-23T22:55:16
| 2021-03-23T22:55:16
| 279,321,659
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,738
|
py
|
class Solution(object):
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
dp = [0] * (n+1)
for i in range(1, n+1):
dp[i] = i
j = 1
while i - j*j >= 0:
dp[i] = min(dp[i], dp[i-j*j] + 1)
j += 1
return dp[n]
# 和第一种解法思路类似 但是更好理解 效率也更高
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
# lst is to store all the perfect squares that are smaller or equal to n
# lst用来存放所有比n小的完全平方数
lst = [i*i for i in range(1,n) if i*i <= n]
print lst
# create another list dp to store perfect squares that a number need from 1 to n
# dp存放所有小于n的数 遍历每一个数字
dp = [0] * (n+1)
for num in range(1,n+1):
# number from 1 to 4, for each number, it can be divided into n 1's
# 对于每个数字num来说 都可以拆分成num个1 所以把当前最小完全平方数就设为num
# 比如4 它可以拆分成4个1 min_num = 4
min_num = num
# a temporary list is to store perfect squares that are smaller or equal to current number
# 这个临时list存放所有小于等于4的完全平方数[1,4]
tmp_lst = [c for c in lst if c <= num]
for j in tmp_lst:
# 在dp中找到num-j的完全平方数的个数 dp[4-4]=1
perfectSquares = dp[num-j] + 1
if perfectSquares < min_num:
min_num = perfectSquares
dp[num] = min_num
return dp[n]
|
[
"noreply@github.com"
] |
bingli8802.noreply@github.com
|
b85bd0e003e0af4800ebefdd62eaedba1d847289
|
83cbb5554f488f78b9cc8fddda5749e4a77cd9d7
|
/corona_chan/app_celery.py
|
d2f74a0925bf8a3b6fdd875f3bcee3262aaa42f5
|
[
"WTFPL"
] |
permissive
|
dem4ply/corona_chan
|
5273beef3bb859314a39bf6948d983db1f9ead38
|
e422a6ddd2ea7628d83f9f7eb09dc8b39984971b
|
refs/heads/master
| 2022-01-21T17:34:56.458016
| 2020-03-28T08:12:40
| 2020-03-28T08:12:40
| 252,572,781
| 0
| 0
|
WTFPL
| 2022-01-06T22:43:34
| 2020-04-02T21:53:46
|
Python
|
UTF-8
|
Python
| false
| false
| 398
|
py
|
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
__all__ = [ 'corona_chan_task' ]
os.environ.setdefault( 'DJANGO_SETTINGS_MODULE', 'corona_chan.settings' )
corona_chan_task = Celery( 'corona_chan' )
corona_chan_task.config_from_object( 'django.conf:settings' )
corona_chan_task.autodiscover_tasks( lambda: settings.INSTALLED_APPS )
|
[
"dem4ply@gmail.com"
] |
dem4ply@gmail.com
|
d9a61d0432914bf06dd735f4f255e00fbb172535
|
6a3db7061752a7dc7466f3965e1d7aef116b2adc
|
/core/manager/threadmanager.py
|
0a24f226af42726d5611b3b10dedc9d19d0f664f
|
[
"BSD-2-Clause"
] |
permissive
|
bwbj/imagepy
|
f3b68b320a82bbb3e0641539a244046e9dc6fa73
|
e2ee389f1003fc297a541968dcfd7fd883b91e01
|
refs/heads/master
| 2021-01-25T09:25:45.077798
| 2017-06-04T16:02:19
| 2017-06-04T16:02:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 14 23:24:32 2017
@author: yxl
"""
import numpy as np, os
import IPy
class ThreadManager:
threads = []
|
[
"498264240@qq.com"
] |
498264240@qq.com
|
c64c92384af399fffd97ca2e1811f3c53183049a
|
ad20495c8df427211dba51c93c507365f9fce319
|
/tilejetserver/source/models.py
|
99a2588f534876baed6957523db4f391849ae42f
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
tilejet/tilejet-server
|
779398257c65138c906f3989c63e029dfe45587e
|
7bd0caa18cde98a8fd80aeea6e06bbe8aa2fa1be
|
refs/heads/master
| 2021-01-10T02:41:23.553939
| 2015-12-06T07:18:56
| 2015-12-06T07:19:59
| 43,448,267
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,709
|
py
|
import datetime
import logging
import os
import io
import sys
import uuid
from base64 import b64encode
from optparse import make_option
import json
import argparse
import time
import os
import subprocess
import binascii
import re
from django.db import models
from django.db.models import signals
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from tilejetserver.utils import TYPE_TMS, TYPE_TMS_FLIPPED, TYPE_BING, TYPE_WMS, TYPE_CHOICES, IMAGE_EXTENSION_CHOICES
def parse_url(url):
if (url is None) or len(url) == 0:
return None
index = url.rfind('/')
if index != (len(url)-1):
url += '/'
return url
class TileOrigin(models.Model):
TYPE_CHOICES = [
(TYPE_TMS, _("TMS")),
(TYPE_TMS_FLIPPED, _("TMS - Flipped")),
(TYPE_BING, _("Bing")),
(TYPE_WMS, _("WMS"))
]
name = models.CharField(max_length=100)
description = models.CharField(max_length=400, help_text=_('Human-readable description of the services provided by this tile origin.'))
type = models.IntegerField(choices=TYPE_CHOICES, default=TYPE_TMS)
cacheable = models.BooleanField(default=True, help_text=_('If true, tiles from the origin might be cached given other constraints. If false, tiles from the origin will never be cached.'))
multiple = models.BooleanField(default=True, help_text=_('If true, make sure to include {slug} in the url to be replaced by each source.'))
auto = models.BooleanField(default=True, help_text=_('Should the proxy automatically create tile sources for this origin?'))
url = models.CharField(max_length=400, help_text=_('Used to generate url for new tilesource. For example, http://c.tile.openstreetmap.org/{z}/{x}/{y}.png.'))
extensions = models.CharField(max_length=400,null=True,blank=True)
pattern = models.CharField(max_length=400,null=True,blank=True)
auth = models.CharField(max_length=400, blank=True, null=True, help_text=_('Authentication or access token. Dynamically replaced in downstream sources by replacing {auth}.'))
def __unicode__(self):
return self.name
class Meta:
ordering = ("name","type")
verbose_name_plural = _("Tile Origins")
def type_title(self):
return unicode([v for i, v in enumerate(TYPE_CHOICES) if v[0] == self.type][0][1]);
#def match(self, url):
# match = None
# # If matches primary pattern, then check secondary patterns/filters.
# if self.pattern:
# match = re.match(self.pattern, url, re.M|re.I)
#patterns = TileOriginPattern.objects.filter(origin__pk=self.pk)
#for pattern in patterns:
# match = pattern.match(url)
# if match:
# break
return match
class TileOriginPattern(models.Model):
origin = models.ForeignKey(TileOrigin,null=True,blank=True,help_text=_('The origin.'))
includes = models.CharField(max_length=400,null=True,blank=True)
excludes = models.CharField(max_length=400,null=True,blank=True)
def __unicode__(self):
return self.origin.name + " - "+str(self.pk)
class Meta:
ordering = ("origin", "includes", "excludes")
verbose_name_plural = _("Tile Origin Patterns")
def match(self,url):
#print "matching includes: "+str(self.includes)
#print "matching excludes: "+str(self.excludes)
#print "matching url: "+str(url)
match = None
if self.includes:
match = re.match(self.includes, url, re.M|re.I)
if self.excludes:
if re.match(self.excludes, url, re.M|re.I):
match = None
#print "match: "+str(match)
return match
class TileSource(models.Model):
name = models.CharField(max_length=100)
description = models.CharField(max_length=400, null=True, blank=True, help_text=_('Human-readable description of this tile source.'))
type = models.IntegerField(choices=TYPE_CHOICES, default=TYPE_TMS)
auto = models.BooleanField(default=True, help_text=_('Was the tile source created automatically by the proxy or manually by a user?'))
cacheable = models.BooleanField(default=True, help_text=_('If true, tiles from this source might be cached given other constraints. If false, tiles from this source will never be cached.'))
origin = models.ForeignKey(TileOrigin,null=True,blank=True,help_text=_('The Tile Origin, if there is one.'))
url = models.CharField(max_length=400, help_text=_('Standard Tile URL. If applicable, replace {slug} from origin. For example, http://c.tile.openstreetmap.org/{z}/{x}/{y}.{ext}. If url includes {auth}, it is dynamically replaced with the relevant auth token stored with origin.'))
#extensions = models.CharField(max_length=400,null=True,blank=True,choices=IMAGE_EXTENSION_CHOICES)
extensions = models.CharField(max_length=400,null=True,blank=True)
pattern = models.CharField(max_length=400,null=True,blank=True)
extents = models.CharField(max_length=800,blank=True,null=True)
minZoom = models.IntegerField(default=0,null=True,blank=True)
maxZoom = models.IntegerField(default=None,null=True,blank=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ("name",)
verbose_name_plural = _("Tile Sources")
@property
def tileservices(self):
return self.tileservice_set
def type_title(self):
return unicode([v for i, v in enumerate(TYPE_CHOICES) if v[0] == self.type][0][1]);
|
[
"pjdufour.dev@gmail.com"
] |
pjdufour.dev@gmail.com
|
847223e52750a1c83489ec6b1a24eef605dc399e
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/detection/centernet2/projects/CenterNet2/centernet/modeling/layers/deform_conv.py
|
1f769c8cc406ad284f290a81f443686934da2c31
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,579
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import torch
from torch import nn
from detectron2.layers import Conv2d
class _NewEmptyTensorOp(torch.autograd.Function):
@staticmethod
def forward(ctx, x, new_shape):
ctx.shape = x.shape
return x.new_empty(new_shape)
@staticmethod
def backward(ctx, grad):
shape = ctx.shape
return _NewEmptyTensorOp.apply(grad, shape), None
class DFConv2d(nn.Module):
"""Deformable convolutional layer"""
def __init__(
self,
in_channels,
out_channels,
with_modulated_dcn=True,
kernel_size=3,
stride=1,
groups=1,
dilation=1,
deformable_groups=1,
bias=False,
padding=None
):
super(DFConv2d, self).__init__()
if isinstance(kernel_size, (list, tuple)):
assert isinstance(stride, (list, tuple))
assert isinstance(dilation, (list, tuple))
assert len(kernel_size) == 2
assert len(stride) == 2
assert len(dilation) == 2
padding = (
dilation[0] * (kernel_size[0] - 1) // 2,
dilation[1] * (kernel_size[1] - 1) // 2
)
offset_base_channels = kernel_size[0] * kernel_size[1]
else:
padding = dilation * (kernel_size - 1) // 2
offset_base_channels = kernel_size * kernel_size
if with_modulated_dcn:
from detectron2.layers.deform_conv import ModulatedDeformConv
offset_channels = offset_base_channels * 3 # default: 27
conv_block = ModulatedDeformConv
else:
from detectron2.layers.deform_conv import DeformConv
offset_channels = offset_base_channels * 2 # default: 18
conv_block = DeformConv
self.offset = Conv2d(
in_channels,
deformable_groups * offset_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=1,
dilation=dilation
)
nn.init.constant_(self.offset.weight, 0)
nn.init.constant_(self.offset.bias, 0)
'''
for l in [self.offset, ]:
nn.init.kaiming_uniform_(l.weight, a=1)
torch.nn.init.constant_(l.bias, 0.)
'''
self.conv = conv_block(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
deformable_groups=deformable_groups,
bias=bias
)
self.with_modulated_dcn = with_modulated_dcn
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.offset_split = offset_base_channels * deformable_groups * 2
def forward(self, x, return_offset=False):
if x.numel() > 0:
if not self.with_modulated_dcn:
offset_mask = self.offset(x)
x = self.conv(x, offset_mask)
else:
offset_mask = self.offset(x)
offset = offset_mask[:, :self.offset_split, :, :]
mask = offset_mask[:, self.offset_split:, :, :].sigmoid()
x = self.conv(x, offset, mask)
if return_offset:
return x, offset_mask
return x
# get output shape
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // d + 1
for i, p, di, k, d in zip(
x.shape[-2:],
self.padding,
self.dilation,
self.kernel_size,
self.stride
)
]
output_shape = [x.shape[0], self.conv.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
|
[
"hanjun20@huawei.com"
] |
hanjun20@huawei.com
|
5e3edb681874b53ffa1b0407e4f3f44e9c9564ce
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-gaussdb/huaweicloudsdkgaussdb/v3/model/mysql_flavors_info.py
|
f645f35059fe903ce4c4505600efa70fabee5fdc
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,889
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class MysqlFlavorsInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'vcpus': 'str',
'ram': 'str',
'type': 'str',
'id': 'str',
'spec_code': 'str',
'version_name': 'str',
'instance_mode': 'str',
'az_status': 'dict(str, str)'
}
attribute_map = {
'vcpus': 'vcpus',
'ram': 'ram',
'type': 'type',
'id': 'id',
'spec_code': 'spec_code',
'version_name': 'version_name',
'instance_mode': 'instance_mode',
'az_status': 'az_status'
}
def __init__(self, vcpus=None, ram=None, type=None, id=None, spec_code=None, version_name=None, instance_mode=None, az_status=None):
"""MysqlFlavorsInfo - a model defined in huaweicloud sdk"""
self._vcpus = None
self._ram = None
self._type = None
self._id = None
self._spec_code = None
self._version_name = None
self._instance_mode = None
self._az_status = None
self.discriminator = None
self.vcpus = vcpus
self.ram = ram
self.type = type
self.id = id
self.spec_code = spec_code
self.version_name = version_name
self.instance_mode = instance_mode
self.az_status = az_status
@property
def vcpus(self):
"""Gets the vcpus of this MysqlFlavorsInfo.
CPU大小。例如:1表示1U。
:return: The vcpus of this MysqlFlavorsInfo.
:rtype: str
"""
return self._vcpus
@vcpus.setter
def vcpus(self, vcpus):
"""Sets the vcpus of this MysqlFlavorsInfo.
CPU大小。例如:1表示1U。
:param vcpus: The vcpus of this MysqlFlavorsInfo.
:type: str
"""
self._vcpus = vcpus
@property
def ram(self):
"""Gets the ram of this MysqlFlavorsInfo.
内存大小,单位为GB。
:return: The ram of this MysqlFlavorsInfo.
:rtype: str
"""
return self._ram
@ram.setter
def ram(self, ram):
"""Sets the ram of this MysqlFlavorsInfo.
内存大小,单位为GB。
:param ram: The ram of this MysqlFlavorsInfo.
:type: str
"""
self._ram = ram
@property
def type(self):
"""Gets the type of this MysqlFlavorsInfo.
规格类型,取值为arm和x86。
:return: The type of this MysqlFlavorsInfo.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this MysqlFlavorsInfo.
规格类型,取值为arm和x86。
:param type: The type of this MysqlFlavorsInfo.
:type: str
"""
self._type = type
@property
def id(self):
"""Gets the id of this MysqlFlavorsInfo.
规格ID,该字段唯一
:return: The id of this MysqlFlavorsInfo.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this MysqlFlavorsInfo.
规格ID,该字段唯一
:param id: The id of this MysqlFlavorsInfo.
:type: str
"""
self._id = id
@property
def spec_code(self):
"""Gets the spec_code of this MysqlFlavorsInfo.
资源规格编码,同创建指定的flavor_ref。例如:gaussdb.mysql.xlarge.x86.4。
:return: The spec_code of this MysqlFlavorsInfo.
:rtype: str
"""
return self._spec_code
@spec_code.setter
def spec_code(self, spec_code):
"""Sets the spec_code of this MysqlFlavorsInfo.
资源规格编码,同创建指定的flavor_ref。例如:gaussdb.mysql.xlarge.x86.4。
:param spec_code: The spec_code of this MysqlFlavorsInfo.
:type: str
"""
self._spec_code = spec_code
@property
def version_name(self):
"""Gets the version_name of this MysqlFlavorsInfo.
数据库版本号。
:return: The version_name of this MysqlFlavorsInfo.
:rtype: str
"""
return self._version_name
@version_name.setter
def version_name(self, version_name):
"""Sets the version_name of this MysqlFlavorsInfo.
数据库版本号。
:param version_name: The version_name of this MysqlFlavorsInfo.
:type: str
"""
self._version_name = version_name
@property
def instance_mode(self):
"""Gets the instance_mode of this MysqlFlavorsInfo.
实例类型。目前仅支持Cluster。
:return: The instance_mode of this MysqlFlavorsInfo.
:rtype: str
"""
return self._instance_mode
@instance_mode.setter
def instance_mode(self, instance_mode):
"""Sets the instance_mode of this MysqlFlavorsInfo.
实例类型。目前仅支持Cluster。
:param instance_mode: The instance_mode of this MysqlFlavorsInfo.
:type: str
"""
self._instance_mode = instance_mode
@property
def az_status(self):
"""Gets the az_status of this MysqlFlavorsInfo.
规格所在az的状态,包含以下状态: - normal,在售 - unsupported,暂不支持该规格 - sellout,售罄。
:return: The az_status of this MysqlFlavorsInfo.
:rtype: dict(str, str)
"""
return self._az_status
@az_status.setter
def az_status(self, az_status):
"""Sets the az_status of this MysqlFlavorsInfo.
规格所在az的状态,包含以下状态: - normal,在售 - unsupported,暂不支持该规格 - sellout,售罄。
:param az_status: The az_status of this MysqlFlavorsInfo.
:type: dict(str, str)
"""
self._az_status = az_status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MysqlFlavorsInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
f98b586ac029d55f77d430b036cbbe0642135da8
|
0660cfca0799685969f75b4082455a5608e23bc4
|
/TensorFlow2/Recommendation/WideAndDeep/data/outbrain/nvtabular/preproc.py
|
52240c1143495c764743a4c98d2bd58e20e4257a
|
[] |
no_license
|
resemble-ai/DeepLearningExamples
|
c88dc66930bc2c4627fd187fc0034e783e6244d3
|
2d555548b698e4fc207965b7121f525c37e0401c
|
refs/heads/master
| 2023-04-12T17:23:45.349726
| 2021-04-26T13:00:10
| 2021-04-26T13:00:10
| 362,784,409
| 4
| 3
| null | 2021-04-29T10:51:42
| 2021-04-29T10:51:41
| null |
UTF-8
|
Python
| false
| false
| 1,995
|
py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
os.environ['TF_MEMORY_ALLOCATION'] = "0.0"
from data.outbrain.nvtabular.utils.converter import nvt_to_tfrecords
from data.outbrain.nvtabular.utils.workflow import execute_pipeline
from data.outbrain.nvtabular.utils.arguments import parse_args
from data.outbrain.nvtabular.utils.setup import create_config
def is_empty(path):
return not os.path.exists(path) or (not os.path.isfile(path) and not os.listdir(path))
def main():
args = parse_args()
config = create_config(args)
if is_empty(args.metadata_path):
logging.warning('Creating new stats data into {}'.format(config['stats_file']))
execute_pipeline(config)
else:
logging.warning('Directory is not empty {args.metadata_path}')
logging.warning('Skipping NVTabular preprocessing')
if os.path.exists(config['output_train_folder']) and os.path.exists(config['output_valid_folder']):
if is_empty(config['tfrecords_path']):
logging.warning('Executing NVTabular parquets to TFRecords conversion')
nvt_to_tfrecords(config)
else:
logging.warning(f"Directory is not empty {config['tfrecords_path']}")
logging.warning('Skipping TFrecords conversion')
else:
logging.warning(f'Train and validation dataset not found in {args.metadata_path}')
if __name__ == '__main__':
main()
|
[
"kkudrynski@nvidia.com"
] |
kkudrynski@nvidia.com
|
8319f4003ac2e4e7bc59b13fa6bded86492c5bf2
|
d89cbdfbb67e46d43a05edb2abeb5e1deedfd3a2
|
/functions/shake_shake_function.py
|
2b38e2c551c100ed70044885d915227a673d2921
|
[
"MIT"
] |
permissive
|
minhtannguyen/pytorch_shake_shake
|
c9a6fb520ba2201c1ccb022767cbba963225bbae
|
d7f245d8d8b9e81a6020aadb438ffeae6d5593c2
|
refs/heads/master
| 2020-03-27T14:22:09.646020
| 2018-09-01T02:00:25
| 2018-09-01T02:00:25
| 146,658,651
| 0
| 0
|
MIT
| 2018-08-29T21:07:24
| 2018-08-29T21:07:24
| null |
UTF-8
|
Python
| false
| false
| 1,363
|
py
|
# coding: utf-8
import torch
from torch.autograd import Function
class ShakeFunction(Function):
@staticmethod
def forward(ctx, x1, x2, alpha, beta):
ctx.save_for_backward(x1, x2, alpha, beta)
y = x1 * alpha + x2 * (1 - alpha)
return y
@staticmethod
def backward(ctx, grad_output):
x1, x2, alpha, beta = ctx.saved_variables
grad_x1 = grad_x2 = grad_alpha = grad_beta = None
if ctx.needs_input_grad[0]:
grad_x1 = grad_output * beta
if ctx.needs_input_grad[1]:
grad_x2 = grad_output * (1 - beta)
return grad_x1, grad_x2, grad_alpha, grad_beta
shake_function = ShakeFunction.apply
def get_alpha_beta(batch_size, shake_config, is_cuda):
forward_shake, backward_shake, shake_image = shake_config
if forward_shake and not shake_image:
alpha = torch.rand(1)
elif forward_shake and shake_image:
alpha = torch.rand(batch_size).view(batch_size, 1, 1, 1)
else:
alpha = torch.FloatTensor([0.5])
if backward_shake and not shake_image:
beta = torch.rand(1)
elif backward_shake and shake_image:
beta = torch.rand(batch_size).view(batch_size, 1, 1, 1)
else:
beta = torch.FloatTensor([0.5])
if is_cuda:
alpha = alpha.cuda()
beta = beta.cuda()
return alpha, beta
|
[
"hysts@users.noreply.github.com"
] |
hysts@users.noreply.github.com
|
94c31b6f766395c271029a4a360588b51ce03a06
|
bc40c23403c1926e5bb81d3f6b6be567ccee7a70
|
/ecommerce/user_model/models.py
|
049da65fb7f8b02f6947ab77d96e101120f2e8e3
|
[] |
no_license
|
hrupesh/ecommerce
|
e1e0abf624ad3222bcf57548f051a3197ae899e5
|
2fc645e0e43f1920d787612e8f5b47d5990c793e
|
refs/heads/master
| 2020-04-20T01:28:35.977058
| 2019-01-31T14:51:20
| 2019-01-31T14:51:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,178
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.db import models
import hashlib
# Create your models here.
class register_model(models.Model):
firstname = models.CharField(max_length=250, help_text='Required')
lastname = models.CharField(max_length=250, help_text='Required')
username = models.CharField(max_length=250, help_text='Required')
email = models.EmailField(max_length=250, help_text='Required')
contact_no = models.IntegerField(null=True)
slug = models.SlugField(unique = True, null=True)
password = models.CharField(max_length=100)
timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)
updated = models.DateTimeField(auto_now=True, auto_now_add=False)
verified = models.BooleanField(default = False)
is_active = models.BooleanField(default = False)
email_confirmed = models.BooleanField(default=False)
USERNAME_FIELD = 'username'
REQUIRED_FIELD = ['firstname','lastname']
'''def get_absolute_url(self):
return reverse('details', kwargs={'pk':self.pk})'''
def __str__(self):
return self.email
|
[
"maheshwarishivam2604@gmail.com"
] |
maheshwarishivam2604@gmail.com
|
db865ebb216e0b6efcc8947e4b2809b42095bcd7
|
07c3034f7b6ef88e08430b8c908613ea0091f1b6
|
/Labs/Lab 7.py
|
540d228ec6bf38673cdb8a3ea31a04a997b03f8a
|
[] |
no_license
|
HelalChow/Data-Structures
|
494b8fabcdf1cac20db78055547ce4160ad6a018
|
3b3401cbd23e01b2d7d95dfc3b95451ca179cee9
|
refs/heads/master
| 2021-10-19T00:24:23.589224
| 2019-02-15T22:34:05
| 2019-02-15T22:34:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,395
|
py
|
#Question 1
def powers_of_two(num):
for i in range(1,num+1):
yield 2**i
#Question 2
def decimal_to_binary(int):
if int==1:
return "1"
else:
hold = decimal_to_binary(int-1)
#Question 3
def partition(lst):
low = 1
high = len(lst)-1
pivot = lst[0]
while low <= high:
if lst[low]>pivot>lst[high]:
lst[low],lst[high]=lst[high],lst[low]
low+=1
high-=1
elif lst[low]<pivot>lst[high]:
low+=1
elif lst[low]>pivot<lst[high]:
high-=1
else:
low+=1
high-=1
lst[low-1],lst[0]=lst[0],lst[low-1]
lst = [54, 26, 93, 17, 77, 31, 44, 55, 20]
partition(lst)
print(lst)
class MyString():
def __init__(self,str_input):
self.str = str_input
def __len__(self):
return len(self.str)
def __iter__(self):
for i in range(len(self)):
return self[i]
def __repr__(self):
str = ''
return str
def __getitem__(self, ind):
if ind >= len(self):
raise IndexError("Index is out of range")
return self[ind]
def __add__(self, other):
new = self
return new+other
def __radd__(self, other):
new = self
return new + other
def upper(self):
new = ''
|
[
"noreply@github.com"
] |
HelalChow.noreply@github.com
|
cf2d48e9ab4da0ece9b9ee4ddd631919aa2261e1
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2790/60720/237340.py
|
ea1258231b7d219c2f8c047b47fb50bd69d1f997
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
list=input().split()
size1=int(list[0])
size2=int(list[1])
list1=input().split()
list2=input().split()
for i in range(size1):
list1[i]=int(list1[i])
for i in range(size2):
list2[i]=int(list2[i])
count=0
for i in range(size2-1):
for j in range(size1):
if list1[j]<=list2[i]:
count=count+1
print(count,end=' ')
count=0
for j in range(size1):
if list1[j]<=list2[size2-1]:
count=count+1
print(count)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
e8d4272f4fa006bd29293b9b21adeec8080c6d47
|
3b15dc211cb6c034f4d843b1bbc540f1699182f7
|
/爬虫/多线程爬取.py
|
5fb5a954f28cf987da7a0401528b87219b49f4f9
|
[] |
no_license
|
Hanlen520/-
|
eea27d5a1272272a2f47f9070f285e44300ab0d2
|
308d3401ff368cd11f4aeac7949d57c3136f2b6e
|
refs/heads/master
| 2023-03-15T19:18:25.168665
| 2020-07-05T14:39:41
| 2020-07-05T14:39:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,074
|
py
|
import requests
import re
import threading
import os
num = 1
def get_html_text_one(url, headers):
request = requests.get(url, headers=headers)
if request.status_code == 200:
return request.text
return request
def get_images_one(html, headers):
urls = re.findall('><a href="(.*?)" alt=".*?".*?"><img src=.*? alt="" data-src=.*? data-nclazyload="true"></a>',html)
for url in urls:
file_name = url.split('/')[-1]
global num
print('正在下载p第{}'.format(num))
request = requests.get(url, headers=headers)
with open('你与星河,皆可收藏' + '/' + file_name, 'wb') as f:
f.write(request.content)
num += 1
print("=======================================================================================================")
def get_images_two():
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.106 Safari/537.36'
}
dir_name = '妹子自拍'
if not os.path.exists(dir_name):
os.mkdir(dir_name)
global num
for i in range(1, 3):
urls = 'https://www.mzitu.com/jiepai/comment-page-{}/#comments'.format(i)
request = requests.get(urls, headers=headers)
html = request.text
urs = re.findall('<p><img class=".*?" src=".*?" data-original="(.*?)" width="640" height="auto"/></p>', html)
for uls in urs:
file_name = uls.split('/')[-1]
print('正在下载p1第{}张图片'.format(num))
request_a = requests.get(uls, headers=headers)
with open(dir_name + '/' + file_name, 'wb') as f:
f.write(request_a.content)
num += 1
def get_images_two_three():
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.106 Safari/537.36'
}
dir_name = '妹子自拍'
if not os.path.exists(dir_name):
os.mkdir(dir_name)
global num
for i in range(4, 6):
urls = 'https://www.mzitu.com/jiepai/comment-page-{}/#comments'.format(i)
request = requests.get(urls, headers=headers)
html = request.text
urs = re.findall('<p><img class=".*?" src=".*?" data-original="(.*?)" width="640" height="auto"/></p>', html)
for uls in urs:
file_name = uls.split('/')[-1]
print('正在下载p1第{}张图片'.format(num))
request_a = requests.get(uls, headers=headers)
with open(dir_name + '/' + file_name, 'wb') as f:
f.write(request_a.content)
num += 1
def main():
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.106 Safari/537.36'
}
url = 'https://www.vmgirls.com/3545.html'
html = get_html_text_one(url, headers)
get_images_one(html, headers)
def main2():
get_images_two()
def main3():
get_images_two_three()
if __name__ == '__main__':
main()
|
[
"admiwj@outlook.com"
] |
admiwj@outlook.com
|
9d822d260cb327e22781dc7abc5ad41d19df961c
|
01c2254dcd0547058e66dd0ca0ecd9e2941e3cb9
|
/billingstack/openstack/common/jsonutils.py
|
e8ab2d559752d171eeb769e1538b0bc0a47621dc
|
[
"Apache-2.0"
] |
permissive
|
lanve/billingstack
|
8ac3c52eca8f1d2dc46753d043ff09b55f536fcf
|
dfeb22992d503e1d407a55b7cf1a7658386fefa3
|
refs/heads/master
| 2021-01-22T15:22:08.731143
| 2014-10-31T10:21:35
| 2014-10-31T10:21:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,458
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import functools
import inspect
import itertools
import json
try:
import xmlrpclib
except ImportError:
# NOTE(jd): xmlrpclib is not shipped with Python 3
xmlrpclib = None
import six
from billingstack.openstack.common import gettextutils
from billingstack.openstack.common import importutils
from billingstack.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in value.iteritems())
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if xmlrpclib and isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
|
[
"endre.karlson@gmail.com"
] |
endre.karlson@gmail.com
|
ab19e7a013264bab618abc32f5c3b27d2161ed9d
|
3ef70fe63acaa665e2b163f30f1abd0a592231c1
|
/stackoverflow/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/html5lib/_utils.py
|
0703afb38b13bf56998d43aa542dcea9839d8132
|
[
"MIT"
] |
permissive
|
wistbean/learn_python3_spider
|
14914b63691ac032955ba1adc29ad64976d80e15
|
40861791ec4ed3bbd14b07875af25cc740f76920
|
refs/heads/master
| 2023-08-16T05:42:27.208302
| 2023-03-30T17:03:58
| 2023-03-30T17:03:58
| 179,152,420
| 14,403
| 3,556
|
MIT
| 2022-05-20T14:08:34
| 2019-04-02T20:19:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,015
|
py
|
from __future__ import absolute_import, division, unicode_literals
from types import ModuleType
from pip._vendor.six import text_type
try:
import xml.etree.cElementTree as default_etree
except ImportError:
import xml.etree.ElementTree as default_etree
__all__ = ["default_etree", "MethodDispatcher", "isSurrogatePair",
"surrogatePairToCodepoint", "moduleFactoryFactory",
"supports_lone_surrogates"]
# Platforms not supporting lone surrogates (\uD800-\uDFFF) should be
# caught by the below test. In general this would be any platform
# using UTF-16 as its encoding of unicode strings, such as
# Jython. This is because UTF-16 itself is based on the use of such
# surrogates, and there is no mechanism to further escape such
# escapes.
try:
_x = eval('"\\uD800"') # pylint:disable=eval-used
if not isinstance(_x, text_type):
# We need this with u"" because of http://bugs.jython.org/issue2039
_x = eval('u"\\uD800"') # pylint:disable=eval-used
assert isinstance(_x, text_type)
except: # pylint:disable=bare-except
supports_lone_surrogates = False
else:
supports_lone_surrogates = True
class MethodDispatcher(dict):
"""Dict with 2 special properties:
On initiation, keys that are lists, sets or tuples are converted to
multiple keys so accessing any one of the items in the original
list-like object returns the matching value
md = MethodDispatcher({("foo", "bar"):"baz"})
md["foo"] == "baz"
A default value which can be set through the default attribute.
"""
def __init__(self, items=()):
# Using _dictEntries instead of directly assigning to self is about
# twice as fast. Please do careful performance testing before changing
# anything here.
_dictEntries = []
for name, value in items:
if isinstance(name, (list, tuple, frozenset, set)):
for item in name:
_dictEntries.append((item, value))
else:
_dictEntries.append((name, value))
dict.__init__(self, _dictEntries)
assert len(self) == len(_dictEntries)
self.default = None
def __getitem__(self, key):
return dict.get(self, key, self.default)
# Some utility functions to deal with weirdness around UCS2 vs UCS4
# python builds
def isSurrogatePair(data):
return (len(data) == 2 and
ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and
ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF)
def surrogatePairToCodepoint(data):
char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 +
(ord(data[1]) - 0xDC00))
return char_val
# Module Factory Factory (no, this isn't Java, I know)
# Here to stop this being duplicated all over the place.
def moduleFactoryFactory(factory):
moduleCache = {}
def moduleFactory(baseModule, *args, **kwargs):
if isinstance(ModuleType.__name__, type("")):
name = "_%s_factory" % baseModule.__name__
else:
name = b"_%s_factory" % baseModule.__name__
kwargs_tuple = tuple(kwargs.items())
try:
return moduleCache[name][args][kwargs_tuple]
except KeyError:
mod = ModuleType(name)
objs = factory(baseModule, *args, **kwargs)
mod.__dict__.update(objs)
if "name" not in moduleCache:
moduleCache[name] = {}
if "args" not in moduleCache[name]:
moduleCache[name][args] = {}
if "kwargs" not in moduleCache[name][args]:
moduleCache[name][args][kwargs_tuple] = {}
moduleCache[name][args][kwargs_tuple] = mod
return mod
return moduleFactory
def memoize(func):
cache = {}
def wrapped(*args, **kwargs):
key = (tuple(args), tuple(kwargs.items()))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return wrapped
|
[
"354142480@qq.com"
] |
354142480@qq.com
|
ec5e14ca7ee91e5a11c352f1f40d86319a8f3aca
|
6e8f2e28479566dbaa338300b2d61f784ff83f97
|
/.history/code/live_20210421141743.py
|
fc17bb0a93cda5b645c4b46e5443cb671d1f180d
|
[] |
no_license
|
eeng5/CV-final-project
|
55a7d736f75602858233ebc380c4e1d67ab2b866
|
580e28819560b86f6974959efb1d31ef138198fc
|
refs/heads/main
| 2023-04-09T21:28:21.531293
| 2021-04-21T19:57:22
| 2021-04-21T19:57:22
| 352,703,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,640
|
py
|
import os
import cv2
import sys
import numpy as np
from models import SimpleModel
from preprocess import Datasets
import hyperparameters as hp
import tensorflow as tf
from skimage.transform import resize
from PIL import Image, ImageFont, ImageDraw
from scipy.spatial import distance as dist
from imutils import face_utils
from imutils.video import VideoStream
import fastai
import fastai.vision
import imutils
import argparse
import time
import dlib
""" This file is a live video emotion detection application. To run simply activate the virtual environment in the code dirrectory via:
$ source cs14_30/bin/activate
Then run teh below command in the virtual environment:
$ python3 live.py
"""
class LiveApp:
def __init__(self, data_path):
def doLive(self):
weights_str = "/Users/elizabethwang/Desktop/CS1430/CV-final-project/code/checkpoints/simple_model/041321-113618/your.weights.e015-acc0.6121.h5"
os.chdir(sys.path[0])
model = SimpleModel()
model(tf.keras.Input(shape=(hp.img_size, hp.img_size,3)))
model.load_weights(weights_str, by_name=False)
model.compile(
optimizer=model.optimizer,
loss=model.loss_fn,
metrics=["sparse_categorical_accuracy"],
)
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
vs = VideoStream(src=0).start()
start = time.perf_counter()
data = []
time_value = 0
out = cv2.VideoWriter(
"liveoutput.avi", cv2.VideoWriter_fourcc("M", "J", "P", "G"), 10, (450, 253)
)
while True:
frame = vs.read()
frame = imutils.resize(frame, width=450)
gray = frame
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_coord = face_cascade.detectMultiScale(gray, 1.1, 5, minSize=(48, 48))
for coords in face_coord:
X, Y, w, h = coords
H, W, _ = frame.shape
X_1, X_2 = (max(0, X - int(w)), min(X + int(1.3 * w), W))
Y_1, Y_2 = (max(0, Y - int(0.1 * h)), min(Y + int(1.3 * h), H))
img_cp = gray[Y_1:Y_1+48, X_1:X_1+48].copy()
img_mod = createPixelArray(img_cp)
img_mod = np.expand_dims(img_mod, 0)
prediction = model.predict(img_mod)
p = np.argmax(prediction)
caption = ''
if (p == 0):
caption = 'Angry'
elif (p == 1):
caption = 'Disgust'
elif (p == 2):
caption = 'Fear'
elif (p == 3):
caption = 'Happy'
elif (p == 4):
caption = 'Sad'
elif (p == 5):
caption = 'Surprise'
elif (p == 6):
caption = 'Neutral'
cv2.rectangle(
img=frame,
pt1=(X_1, Y_1),
pt2=(X_2, Y_2),
color=(128, 128, 0),
thickness=2,
)
cv2.putText(
frame,
caption,
(10, frame.shape[0] - 25),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(225, 255, 255),
2,)
cv2.imshow("frame", frame)
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
vs.stop()
out.release()
cv2.destroyAllWindows()
|
[
"natalie_rshaidat@brown.edu"
] |
natalie_rshaidat@brown.edu
|
803ff5ac5b0cdf521eddb1c05b2ba9f5a0625e88
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2518/60667/257400.py
|
5a34f96a54c8f08657b63d8da8e6099b6338fb0a
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
import math
houses = list(map(int, input().split(',')))
heaters = list(map(int, input().split(',')))
dist = []
for i in range(len(houses)):
dist.append(len(houses))
for heater in heaters:
for i in range(len(houses)):
dist[i] = min(dist[i], math.fabs(houses[i] - heater))
print(int(max(dist)))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
3b36ccf527d7f2266c29d77220345506c88dfb84
|
4a8bfa3407aa98a04ede3162f85467b1b5012fe7
|
/aiogram/api/types/input_venue_message_content.py
|
ae89bf991e07eb8b472c705371a17a215d91e73c
|
[] |
no_license
|
aiogram/tg-codegen
|
07ec80814eec46f464d2490fd27b7b6b27257f1b
|
ba3c2f893591d45dda418dd16e0646e260afdf14
|
refs/heads/master
| 2022-12-09T10:44:10.781570
| 2021-11-07T23:33:25
| 2021-11-07T23:33:25
| 218,523,371
| 24
| 5
| null | 2022-12-08T08:47:43
| 2019-10-30T12:33:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,278
|
py
|
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from .input_message_content import InputMessageContent
if TYPE_CHECKING:
pass
class InputVenueMessageContent(InputMessageContent):
"""
Represents the `content <https://core.telegram.org/bots/api#inputmessagecontent>`_ of a venue message to be sent as the result of an inline query.
Source: https://core.telegram.org/bots/api#inputvenuemessagecontent
"""
latitude: float
"""Latitude of the venue in degrees"""
longitude: float
"""Longitude of the venue in degrees"""
title: str
"""Name of the venue"""
address: str
"""Address of the venue"""
foursquare_id: Optional[str] = None
"""*Optional*. Foursquare identifier of the venue, if known"""
foursquare_type: Optional[str] = None
"""*Optional*. Foursquare type of the venue, if known. (For example, 'arts_entertainment/default', 'arts_entertainment/aquarium' or 'food/icecream'.)"""
google_place_id: Optional[str] = None
"""*Optional*. Google Places identifier of the venue"""
google_place_type: Optional[str] = None
"""*Optional*. Google Places type of the venue. (See `supported types <https://developers.google.com/places/web-service/supported_types>`_.)"""
|
[
"jroot.junior@gmail.com"
] |
jroot.junior@gmail.com
|
2aebcd46bf5395a3b680783cacda6f9c826dd5e0
|
ac085e82a957da4e59beefe52fae06e8e0077e09
|
/gen3va/endpoints/api/download_api.py
|
9979505563b22fad1ad79b4ccb1bdbb559c60975
|
[] |
no_license
|
MaayanLab/gen3va
|
9cb670f971690c911a09f633c49c2e8a35d69afc
|
b08755993633f5bfc710be088da327813b8df1fc
|
refs/heads/master
| 2022-05-13T13:13:35.151752
| 2022-04-15T15:04:13
| 2022-04-15T15:04:13
| 67,866,413
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,028
|
py
|
"""Manages downloads.
"""
import os
import zipfile
import StringIO
from flask import Blueprint, Response
from substrate import Tag
from gen3va import database
from gen3va.config import Config
download_api = Blueprint('download_api',
__name__,
url_prefix='%s/download' % Config.BASE_URL)
DOWNLOAD_DIR = '%s/static/downloads' % Config.SERVER_FILE_ROOT
@download_api.route('/<tag_name>', methods=['GET'])
def test(tag_name):
"""Returns a zipped directory with one plain text file for each signature.
"""
tag = database.get(Tag, tag_name, 'name')
# Write the contents of the signatures to file and get the filenames.
filenames = _get_signature_files(tag)
# Folder name in ZIP archive which contains the above files
zip_subdir = tag.name
zip_filename = '%s.zip' % zip_subdir
# Open StringIO to grab in-memory ZIP contents
s = StringIO.StringIO()
# The zip compressor
zf = zipfile.ZipFile(s, 'w')
for fpath in filenames:
# Calculate path for file in zip
fdir, fname = os.path.split(fpath)
zip_path = os.path.join(zip_subdir, fname)
# Add file, at correct path
zf.write(fpath, zip_path)
zf.close()
# Grab ZIP file from in-memory, make response with correct MIME-type
resp = Response(s.getvalue(), mimetype='application/x-zip-compressed')
resp.headers['Content-Disposition'] = 'attachment; filename=%s' % zip_filename
# Remove files from disc
for f in filenames:
os.remove(f)
return resp
def _get_signature_files(tag):
"""Returns a list of filenames where each file has the contents of a gene
signature.
"""
filenames = []
for idx, sig in enumerate(tag.report.gene_signatures):
fname = _write_signature_to_file(idx, sig)
filenames.append(fname)
return filenames
def _write_signature_to_file(idx, gene_signature):
"""Returns the name of a file with the contents of a gene signature.
"""
name = gene_signature.name.replace('/', '')
path = '%s/%s_%s.txt' % (DOWNLOAD_DIR, idx, name)
with open(path, 'w+') as f:
rm = gene_signature.required_metadata
_write_metadata(f, 'diff_exp_method', rm.diff_exp_method)
_write_metadata(f, 'ttest_correction_method', rm.ttest_correction_method)
_write_metadata(f, 'cutoff', rm.cutoff)
_write_metadata(f, 'threshold', rm.threshold)
for om in gene_signature.filtered_optional_metadata:
_write_metadata(f, om.name, om.value)
f.write('!end_metadata\n\n')
for rg in gene_signature.combined_genes:
line = '%s\t%s\n' % (rg.gene.name, rg.value)
f.write(line)
return path
def _write_metadata(f, key, value):
"""Writes metadata key-value pair to file, encoding to UTF-8.
"""
try:
line = '!%s\t%s\n' % (key, value)
line = line.encode('utf-8')
f.write(line)
except UnicodeEncodeError as e:
print(e)
|
[
"ggundersen@gmail.com"
] |
ggundersen@gmail.com
|
8f367326fc506820a4dc0f6116e2f02a0c3a3d1e
|
9d84fa1d80c4248ad304f4b1ac32cb6adfa7a0f1
|
/mk_dti_report.py
|
d07b0c8c0884dd676b8f1e4a0fc95068c4a97fd6
|
[
"MIT"
] |
permissive
|
poldrack/dtiqa
|
6a504848af736eb2c585a337568c41a11812c0b6
|
3b44e3dbec4cace8fb4bb2fcdf43e4f64a649898
|
refs/heads/master
| 2021-01-10T21:01:33.025637
| 2015-07-13T21:20:31
| 2015-07-13T21:20:31
| 39,037,793
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,707
|
py
|
"""
make report for QA using reportlab module
"""
from reportlab.pdfgen import canvas
import numpy as N
import time
import os
def mk_dti_report(infile,dtidir,datavars):
#imgsnr,meansfnr,spikes,badvols):
timestamp=time.strftime('%B %d, %Y: %H:%M:%S')
report_header=[]
report_header.append('QA Report: %s'%timestamp)
report_header.append('directory: %s'%os.path.dirname(infile))
report_header.append('filename: %s'%os.path.basename(infile))
report_header.append('Mean SNR: %f'%N.mean(datavars['imgsnr']))
badvols=['%d'%i for i in datavars['badvols']]
report_header.append('# potentially bad gradients: %d (%s)'%(len(datavars['badvols']),' '.join(badvols)))
c = canvas.Canvas(os.path.join(dtidir,"QA_report.pdf"))
yloc=820
stepsize=16
for line in report_header:
c.drawString(10,yloc,line)
yloc=yloc-stepsize
timeseries_to_draw=['snr.png','fd.png','interleavecorr.png','slicecorr.png']
tsfiles=[os.path.join(dtidir,t) for t in timeseries_to_draw]
ts_img_size=[467,140]
yloc=yloc-ts_img_size[1]
for imfile in tsfiles:
c.drawImage(imfile, 45,yloc,width=ts_img_size[0],height=ts_img_size[1])
yloc=yloc-ts_img_size[1]
c.showPage()
# yloc=650
# c.drawImage(os.path.join(qadir,'spike.png'),20,yloc,width=500,height=133)
yloc=330
images_to_draw=['FA.png','worst_gradient.png']
imfiles=[os.path.join(dtidir,t) for t in images_to_draw]
c.drawImage(imfiles[0],0,yloc,width=300,height=300)
c.drawImage(imfiles[1],300,yloc,width=300,height=300)
# yloc=20
# c.drawImage(imfiles[2],0,yloc,width=325,height=325)
c.save()
|
[
"poldrack@gmail.com"
] |
poldrack@gmail.com
|
5bd5a62e7c57b3658b7b1fd7f54e5fffc4ca9fc9
|
a14ec6e367e6a471bfc74c066fb958ef585bc269
|
/2019/04/common.py
|
89c29d75ecf7bdfd829c92b56ca0d45e14d2a616
|
[] |
no_license
|
jimhendy/AoC
|
90641814ed431f46a8500ff0f022c6c957567563
|
a1727f88bc2e6f739d65902dce188377966b3fb4
|
refs/heads/master
| 2023-09-02T14:48:39.860352
| 2023-08-28T08:09:19
| 2023-08-28T08:09:19
| 225,152,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 443
|
py
|
import numpy as np
import pandas as pd
def possibles(inputs):
Min, Max = (int(i) for i in inputs.split("-"))
return pd.DataFrame([list(str(i)) for i in np.arange(Min, Max)]).astype(int)
def diffs(possibles):
return possibles.diff(axis=1).fillna(0)
def counts(possibles):
counts = {
i: possibles.eq(i).sum(axis=1) for i in np.unique(possibles.values.ravel())
}
return pd.concat(counts, axis=1, sort=False)
|
[
"jimhendy88@gmail.com"
] |
jimhendy88@gmail.com
|
7e5988d2abc9de3328191dcf4a48925e1114b43a
|
1dbc955c3d717476fa75a48cc87a05e2eceb0002
|
/easy/min_depth.py
|
bc408df46fa538cc996dbb3442ff9a4ebdd9d9e3
|
[] |
no_license
|
gregorysimpson13/leetcode
|
e68eaee2ba38a1edff119eda1ccdeacc0c400d26
|
ae88b9f9979a5643497cb2dfeb90d19a1bcdb137
|
refs/heads/master
| 2023-03-31T23:58:58.940234
| 2021-04-11T14:37:31
| 2021-04-11T14:37:31
| 258,632,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
# 111. Minimum Depth of Binary Tree - EASY
# https://leetcode.com/problems/minimum-depth-of-binary-tree/submissions/
# Given a binary tree, find its minimum depth.
# The minimum depth is the number of nodes along the shortest path from the root node down to the nearest leaf node.
# Note: A leaf is a node with no children.
# Example:
# Given binary tree [3,9,20,null,null,15,7],
# 3
# / \
# 9 20
# / \
# 15 7
# return its minimum depth = 2.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
def minDepth(self, root: TreeNode) -> int:
if root == None: return 0
def getDepth(node, depth=1):
if node == None:
return float('inf')
if node.left == None and node.right == None:
return depth
return min(getDepth(node.left, depth+1), getDepth(node.right, depth+1))
return getDepth(root, 1)
|
[
"gregorysimpson13@gmail.com"
] |
gregorysimpson13@gmail.com
|
2d0c20dbdb1d0b69c0e130f7c0b4fb5917f56aed
|
e2bbbf67487fddfd4648c3f37d84849efee5bfaa
|
/backend/hazardest/game/tests/test_trick.py
|
644cfa0ee7485c64b02fe2197757d46af891215b
|
[] |
no_license
|
JordanSlaman/hazardest
|
2a6f813e86c58fdee2a4b6cde0b9634c2750b57a
|
07d055d9cc8423f5fc45fc2992f8a8798d054cf2
|
refs/heads/master
| 2022-09-01T17:35:22.414359
| 2022-08-07T22:47:23
| 2022-08-07T22:47:23
| 98,753,092
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
from django.test import TestCase
from ..models.game import Game
from ..models.hand import Hand
from ..models.trick import Trick
from ..utils.create_cards import create_cards
from .fixtures import create_game_with_players
class TrickModelTests(TestCase):
def setUp(self):
create_cards()
self.test_game = create_game_with_players()
self.test_game.start_game()
# trump turn stuff
self.test_hand = self.test_game.active_hand
self.trick = Trick.objects.create(hand=self.test_hand)
# self.trick.save()
def test_play_card(self):
player = self.test_hand.active_player
card = player.cards.last()
self.trick.play_card(player=player, card=card)
x = 3
# assert player does not have card.
# Assert card in trick
# card winning?
# def test_deals_5_cards(self):
# game = Game.objects.get(pk=1)
#
# a = game.player_set.get(user__username='alice')
# new_hand = Hand(game=game, dealer=a)
# new_hand.deal()
#
# self.assertIs(a.cards.count(), 5)
|
[
"jordan.slaman@gmail.com"
] |
jordan.slaman@gmail.com
|
2623b29fb1f656b795b705e7ca7f4f8ed1255c7b
|
0e25dc15ae9efce8bfd716d4d2041da07767968b
|
/qbench/benchmarks/QLib/OPENQL_converted/benstein_vazirani_41b_secret_16.py
|
fc9be6f9ad7d194f78cb38cb05d15e7415b54a28
|
[] |
no_license
|
alxhotel/crossbar-bench
|
f608fc0062b4f8a5162ec33d61c0204aaf27b6ff
|
3bf7536e7697d29c3089b0ba564ba22d39698b88
|
refs/heads/master
| 2021-07-13T16:06:50.085838
| 2020-10-04T23:39:05
| 2020-10-04T23:39:05
| 213,409,122
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,090
|
py
|
from openql import openql as ql
import os
import argparse
def circuit(config_file, new_scheduler='yes', scheduler='ASAP', uniform_sched= 'no', sched_commute = 'yes', mapper='base', moves='no', maptiebreak='random', initial_placement='no', output_dir_name='test_output', optimize='no', measurement=True, log_level='LOG_WARNING'):
curdir = os.path.dirname(__file__)
output_dir = os.path.join(curdir, output_dir_name)
ql.set_option('output_dir', output_dir)
ql.set_option('optimize', optimize)
ql.set_option('scheduler', scheduler)
ql.set_option('scheduler_uniform', uniform_sched)
ql.set_option('mapper', mapper)
ql.set_option('initialplace', initial_placement)
ql.set_option('log_level', log_level)
ql.set_option('scheduler_post179', new_scheduler)
ql.set_option('scheduler_commute', sched_commute)
ql.set_option('mapusemoves', moves)
ql.set_option('maptiebreak', maptiebreak)
config_fn = os.path.join(curdir, config_file)
# platform = ql.Platform('platform_none', config_fn)
platform = ql.Platform('starmon', config_fn)
sweep_points = [1,2]
num_circuits = 1
num_qubits = 43
p = ql.Program('benstein_vazirani_41b_secret_16', platform, num_qubits)
p.set_sweep_points(sweep_points, num_circuits)
k = ql.Kernel('benstein_vazirani_41b_secret_16', platform, num_qubits)
k.gate('prepz',[41])
k.gate('x',[41])
k.gate('h',[0])
k.gate('h',[1])
k.gate('h',[2])
k.gate('h',[3])
k.gate('h',[4])
k.gate('h',[5])
k.gate('h',[6])
k.gate('h',[7])
k.gate('h',[8])
k.gate('h',[9])
k.gate('h',[10])
k.gate('h',[11])
k.gate('h',[12])
k.gate('h',[13])
k.gate('h',[14])
k.gate('h',[15])
k.gate('h',[16])
k.gate('h',[17])
k.gate('h',[18])
k.gate('h',[19])
k.gate('h',[20])
k.gate('h',[21])
k.gate('h',[22])
k.gate('h',[23])
k.gate('h',[24])
k.gate('h',[25])
k.gate('h',[26])
k.gate('h',[27])
k.gate('h',[28])
k.gate('h',[29])
k.gate('h',[30])
k.gate('h',[31])
k.gate('h',[32])
k.gate('h',[33])
k.gate('h',[34])
k.gate('h',[35])
k.gate('h',[36])
k.gate('h',[37])
k.gate('h',[38])
k.gate('h',[39])
k.gate('h',[40])
k.gate('h',[41])
k.gate('cnot',[4,41])
k.gate('h',[0])
k.gate('h',[1])
k.gate('h',[2])
k.gate('h',[3])
k.gate('h',[4])
k.gate('h',[5])
k.gate('h',[6])
k.gate('h',[7])
k.gate('h',[8])
k.gate('h',[9])
k.gate('h',[10])
k.gate('h',[11])
k.gate('h',[12])
k.gate('h',[13])
k.gate('h',[14])
k.gate('h',[15])
k.gate('h',[16])
k.gate('h',[17])
k.gate('h',[18])
k.gate('h',[19])
k.gate('h',[20])
k.gate('h',[21])
k.gate('h',[22])
k.gate('h',[23])
k.gate('h',[24])
k.gate('h',[25])
k.gate('h',[26])
k.gate('h',[27])
k.gate('h',[28])
k.gate('h',[29])
k.gate('h',[30])
k.gate('h',[31])
k.gate('h',[32])
k.gate('h',[33])
k.gate('h',[34])
k.gate('h',[35])
k.gate('h',[36])
k.gate('h',[37])
k.gate('h',[38])
k.gate('h',[39])
k.gate('h',[40])
k.gate('h',[41])
if measurement:
for q in range(num_qubits):
k.gate('measure', [q])
p.add_kernel(k)
p.compile()
ql.set_option('mapper', 'no')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='OpenQL compilation of a Quantum Algorithm')
parser.add_argument('config_file', help='Path to the OpenQL configuration file to compile this algorithm')
parser.add_argument('--new_scheduler', nargs='?', default='yes', help='Scheduler defined by Hans')
parser.add_argument('--scheduler', nargs='?', default='ASAP', help='Scheduler specification (ASAP (default), ALAP, ...)')
parser.add_argument('--uniform_sched', nargs='?', default='no', help='Uniform shceduler actication (yes or no)')
parser.add_argument('--sched_commute', nargs='?', default='yes', help='Permits two-qubit gates to be commutable')
parser.add_argument('--mapper', nargs='?', default='base', help='Mapper specification (base, minextend, minextendrc)')
parser.add_argument('--moves', nargs='?', default='no', help='Let the use of moves')
parser.add_argument('--maptiebreak', nargs='?', default='random', help='')
parser.add_argument('--initial_placement', nargs='?', default='no', help='Initial placement specification (yes or no)')
parser.add_argument('--out_dir', nargs='?', default='test_output', help='Folder name to store the compilation')
parser.add_argument('--measurement', nargs='?', default=True, help='Add measurement to all the qubits in the end of the algorithm')
args = parser.parse_args()
try:
circuit(args.config_file, args.new_scheduler, args.scheduler, args.uniform_sched, args.sched_commute, args.mapper, args.moves, args.maptiebreak, args.initial_placement, args.out_dir)
except TypeError:
print('\nCompiled, but some gate is not defined in the configuration file. \nThe gate will be invoked like it is.')
raise
|
[
"alxmorais8@msn.com"
] |
alxmorais8@msn.com
|
a5c147f2b13a9262b93ad5d48796d82c3012edfb
|
04e5b6df2ee3bcfb7005d8ec91aab8e380333ac4
|
/Lib/objc/StoreKit.py
|
4b919a9b01a65ef35f5ae3e2e78d36703567db64
|
[
"MIT"
] |
permissive
|
ColdGrub1384/Pyto
|
64e2a593957fd640907f0e4698d430ea7754a73e
|
7557485a733dd7e17ba0366b92794931bdb39975
|
refs/heads/main
| 2023-08-01T03:48:35.694832
| 2022-07-20T14:38:45
| 2022-07-20T14:38:45
| 148,944,721
| 884
| 157
|
MIT
| 2023-02-26T21:34:04
| 2018-09-15T22:29:07
|
C
|
UTF-8
|
Python
| false
| false
| 5,635
|
py
|
"""
Classes from the 'StoreKit' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
SKCloudServiceController = _Class("SKCloudServiceController")
SKOverlay = _Class("SKOverlay")
_SKStoreProductActivityAnimationController = _Class(
"_SKStoreProductActivityAnimationController"
)
SKAccountPageSpecifierProvider = _Class("SKAccountPageSpecifierProvider")
SKPurchaseIntent = _Class("SKPurchaseIntent")
SKPaymentQueueClient = _Class("SKPaymentQueueClient")
SKPaymentDiscount = _Class("SKPaymentDiscount")
SKPaymentDiscountInternal = _Class("SKPaymentDiscountInternal")
SKStorePageRequest = _Class("SKStorePageRequest")
SKStoreReviewController = _Class("SKStoreReviewController")
SKServiceProxy = _Class("SKServiceProxy")
SKInvocationQueueProxy = _Class("SKInvocationQueueProxy")
SKInternalProductStorePromotionController = _Class(
"SKInternalProductStorePromotionController"
)
SKStorefront = _Class("SKStorefront")
SKCloudServiceSetupExtension = _Class("SKCloudServiceSetupExtension")
SKXPCConnection = _Class("SKXPCConnection")
SKCloudServiceSetupConfiguration = _Class("SKCloudServiceSetupConfiguration")
SKScrollDetector = _Class("SKScrollDetector")
SKWeakContainer = _Class("SKWeakContainer")
SKEntitlementChecker = _Class("SKEntitlementChecker")
SKAdNetwork = _Class("SKAdNetwork")
SKArcadeService = _Class("SKArcadeService")
SKProductStorePromotionController = _Class("SKProductStorePromotionController")
SKPrivacyController = _Class("SKPrivacyController")
SKOverlayConfiguration = _Class("SKOverlayConfiguration")
SKOverlayAppClipConfiguration = _Class("SKOverlayAppClipConfiguration")
SKOverlayAppConfiguration = _Class("SKOverlayAppConfiguration")
SKURLParserBagContract = _Class("SKURLParserBagContract")
SKProductSubscriptionPeriod = _Class("SKProductSubscriptionPeriod")
SKProductSubscriptionPeriodInternal = _Class("SKProductSubscriptionPeriodInternal")
SKPaymentTransactionInternal = _Class("SKPaymentTransactionInternal")
SKPaymentTransaction = _Class("SKPaymentTransaction")
SKRemoteDismissingTransition = _Class("SKRemoteDismissingTransition")
SKInGameAnalytics = _Class("SKInGameAnalytics")
SKOverlayTransitionContext = _Class("SKOverlayTransitionContext")
SKPaymentInternal = _Class("SKPaymentInternal")
SKPayment = _Class("SKPayment")
SKMutablePayment = _Class("SKMutablePayment")
SKCloudServiceSetupReloadContext = _Class("SKCloudServiceSetupReloadContext")
SKDownloadChangeset = _Class("SKDownloadChangeset")
SKDownload = _Class("SKDownload")
SKDownloadInternal = _Class("SKDownloadInternal")
SKProductDiscount = _Class("SKProductDiscount")
SKProductDiscountInternal = _Class("SKProductDiscountInternal")
SKProductInternal = _Class("SKProductInternal")
SKProduct = _Class("SKProduct")
SKProductsResponseInternal = _Class("SKProductsResponseInternal")
SKProductsResponse = _Class("SKProductsResponse")
SKProductsRequestInternal = _Class("SKProductsRequestInternal")
SKRequestInternal = _Class("SKRequestInternal")
SKRequest = _Class("SKRequest")
SKInstallSheetStatusUpdateRequest = _Class("SKInstallSheetStatusUpdateRequest")
SKPromotedIAPGetInfoInternalRequest = _Class("SKPromotedIAPGetInfoInternalRequest")
SKHandleInvalidReceiptRequest = _Class("SKHandleInvalidReceiptRequest")
SKReceiptRefreshRequest = _Class("SKReceiptRefreshRequest")
SKPromotedIAPSetOrderRequest = _Class("SKPromotedIAPSetOrderRequest")
SKPromotedIAPGetVisibilityRequest = _Class("SKPromotedIAPGetVisibilityRequest")
SKPromotedIAPSetVisibilityRequest = _Class("SKPromotedIAPSetVisibilityRequest")
SKPromotedIAPGetOrderRequest = _Class("SKPromotedIAPGetOrderRequest")
SKProductsRequest = _Class("SKProductsRequest")
SKWeakReference = _Class("SKWeakReference")
SKDefaultsManager = _Class("SKDefaultsManager")
SKPaymentQueueInternal = _Class("SKPaymentQueueInternal")
SKPaymentQueue = _Class("SKPaymentQueue")
SKSpecifierWithSubtitleCell = _Class("SKSpecifierWithSubtitleCell")
SKStoreReviewPresentationWindow = _Class("SKStoreReviewPresentationWindow")
SKStarRatingControl = _Class("SKStarRatingControl")
SKStorePageViewController = _Class("SKStorePageViewController")
SKStoreProductActivityViewController = _Class("SKStoreProductActivityViewController")
SKComposeReviewViewController = _Class("SKComposeReviewViewController")
SKCloudServiceSetupViewController = _Class("SKCloudServiceSetupViewController")
SKProductPageExtension = _Class("SKProductPageExtension")
SKStoreProductViewController = _Class("SKStoreProductViewController")
SKTermsPageViewController = _Class("SKTermsPageViewController")
SKAccountPageViewController = _Class("SKAccountPageViewController")
SKStoreReviewViewController = _Class("SKStoreReviewViewController")
SKArcadeSubscribeViewController = _Class("SKArcadeSubscribeViewController")
SKStoreExtension = _Class("SKStoreExtension")
SKRemoteProductActivityViewController = _Class("SKRemoteProductActivityViewController")
SKRemoteStorePageViewController = _Class("SKRemoteStorePageViewController")
SKRemoteComposeReviewViewController = _Class("SKRemoteComposeReviewViewController")
SKRemoteReviewViewController = _Class("SKRemoteReviewViewController")
SKRemoteProductViewController = _Class("SKRemoteProductViewController")
SKStoreRemoteViewController = _Class("SKStoreRemoteViewController")
SKCloudServiceSetupRemoteViewController = _Class(
"SKCloudServiceSetupRemoteViewController"
)
SKRemoteAccountPageViewController = _Class("SKRemoteAccountPageViewController")
SKStarRatingAlertController = _Class("SKStarRatingAlertController")
|
[
"adrilabbelol@gmail.com"
] |
adrilabbelol@gmail.com
|
23a9af8c3a7546c53ad2b85b5f514b566b53d151
|
c36679186f669c6e3bd1c106c96d4a17be1f5ab1
|
/Practice_Telusko/121.py
|
ddb01801081aec49788165b80cbbbe017000e1b4
|
[] |
no_license
|
touhiduzzaman-tuhin/python-code-university-life
|
60a3d671b200a6f5222c6d176c13c5f20f013509
|
6d2e3d90d430faa5c83fe79e7fb1ebe516994762
|
refs/heads/master
| 2023-03-22T15:18:10.636203
| 2021-03-06T18:52:04
| 2021-03-06T18:52:04
| 332,467,190
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
py
|
x = int(input("Enter A Number : "))
if x == 0:
print("Zero Number")
elif x >= 0:
print("Positive Number")
if x % 2 == 0:
print("Even Number")
else:
print("Odd Number")
else:
print("Negative Number")
|
[
"touhiduzzamantuhin95@gmail.com"
] |
touhiduzzamantuhin95@gmail.com
|
f0457a3c6eba2170502ff89f45eef7cd3dcb14d9
|
36bfa8c212270b3c1eaab77210a525f0bbef6874
|
/podoc/ast/__init__.py
|
2bb4bd02683ba6fae13666a16f9937b333aa96eb
|
[
"BSD-3-Clause"
] |
permissive
|
podoc/podoc
|
ce7b22571251ae90b56d272eff0277ec6090ea75
|
1868e7f82a521b1722dca528802acedf9010b11a
|
refs/heads/master
| 2021-01-17T15:18:12.158127
| 2018-02-12T15:16:10
| 2018-02-12T15:16:10
| 41,724,522
| 54
| 9
|
BSD-3-Clause
| 2018-02-04T15:08:13
| 2015-09-01T07:44:50
|
Python
|
UTF-8
|
Python
| false
| false
| 338
|
py
|
# -*- coding: utf-8 -*-
# flake8: noqa
"""JSON plugin."""
#-------------------------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------------------------
from ._ast import ASTNode, ASTPlugin, PandocPlugin, ast_from_pandoc
|
[
"cyrille.rossant@gmail.com"
] |
cyrille.rossant@gmail.com
|
6f337a988d6c798586c741832f986ede08989829
|
a964f0f3f93a84d5195042d3c1bb2288e8b62161
|
/muddery/server/dao/event_data.py
|
3bec35efc460fb681d575e820864148aa197845a
|
[
"BSD-3-Clause"
] |
permissive
|
nobodxbodon/muddery
|
474433791b75d2f2130e6b758fb3126e2d56230b
|
4b4c6c0dc5cc237a5df012a05ed260fad1a793a7
|
refs/heads/master
| 2023-06-19T19:28:39.252340
| 2021-07-14T15:07:47
| 2021-07-14T15:07:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
"""
Query and deal common tables.
"""
from muddery.server.dao.base_query import BaseQuery
from muddery.server.dao.worlddata import WorldData
class EventData(BaseQuery):
"""
Object's event data.
"""
table_name = "event_data"
@classmethod
def get_object_event(cls, object_key):
"""
Get object's event.
"""
return WorldData.get_table_data(cls.table_name, trigger_obj=object_key)
|
[
"luyijun999@gmail.com"
] |
luyijun999@gmail.com
|
2192bb616634a4295e7ef0cd9808e3bbab101988
|
e629795e54c7f0bf79c9128adc8bc9154bebfb19
|
/dynamic_programming/leetcode/python/leet_code_1289.py
|
4b2f08063b0352438dfb7adf0f552bae3817f7f6
|
[] |
no_license
|
sm2774us/leetcode_hackerrank_codesignal_practice
|
1aff675b8b3e6b58e3bb2f81c46c8646da50589f
|
bdc2407f391a8bc08e3a119227c384f3e15bb693
|
refs/heads/main
| 2023-07-19T21:59:31.638152
| 2021-08-25T12:29:59
| 2021-08-25T12:29:59
| 392,862,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 886
|
py
|
from typing import List
class Solution:
def minFallingPathSum(A):
rows = len(A)
cols = len(A[0])
prv_row_min1 = prv_row_min2 = 0
prev_pos1 = -1
for i in range(rows):
curr_row_min1 = curr_row_min2 = float('inf')
for j in range(cols):
if prev_pos1 != j:
min_val = prv_row_min1
else:
min_val = prv_row_min2
if min_val + A[i][j] < curr_row_min1:
curr_row_min2 = curr_row_min1
curr_row_min1 = min_val + A[i][j]
curr_pos = j
else:
curr_row_min2 = min(curr_row_min2, min_val+A[i][j])
prv_row_min1, prv_row_min2 = curr_row_min1, curr_row_min2
prev_pos1 = curr_pos
return prv_row_min1
|
[
"sm2774us@gmail.com"
] |
sm2774us@gmail.com
|
bdb032f9ec951da8035b9e788b89c0624320bc26
|
e1243b212be599a801e8d1fb5fd00e0ab0db974d
|
/models/EDSR_freq.py
|
fa798b278b49185c4ded544307b278cd3d6168e4
|
[] |
no_license
|
hyungminr/PyTorch_SISR
|
d17358ca94ccf7a82223a3b383b7529f1bfff0cd
|
5aa22c3f68d262eb7ff80618cb3a75a7b488dacc
|
refs/heads/master
| 2023-04-17T04:15:58.591604
| 2021-04-27T01:02:20
| 2021-04-27T01:02:20
| 328,836,251
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,452
|
py
|
from models.EDSR import EDSR
from models.EDSR import MeanShift
import math
import torch.nn as nn
import torch
class EDSR_freq(nn.Module):
def __init__(self, scale=2, num_feats=64, kernel=3, padding=1, bias=True):
super().__init__()
self.model_image = EDSR(scale=scale, num_feats=32)
self.model_high = EDSR(scale=scale, num_feats=24)
self.model_low = EDSR(scale=scale, num_feats= 8)
layers = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
layers += [nn.Conv2d(in_channels=num_feats, out_channels=num_feats*4, kernel_size=kernel, padding=padding, bias=bias)]
layers += [nn.PixelShuffle(2)]
layers += [nn.Conv2d(in_channels=num_feats, out_channels=3, kernel_size=kernel, padding=padding, bias=bias)]
self.tail = nn.Sequential(*layers)
self.add_mean = MeanShift(mode='add')
def forward(self, img, high, low):
sr_image, deep_image = self.model_image(img)
sr_high, deep_high = self.model_high(high)
sr_low, deep_low = self.model_low(low)
deep = torch.cat((deep_image[0] + deep_image[-1],
deep_high[0] + deep_high[-1],
deep_low[0] + deep_low[-1]), dim=1)
x_up = self.tail(deep)
out = self.add_mean(x_up)
return out, [sr_image, sr_high, sr_low]
|
[
"raingold1347@snu.ac.kr"
] |
raingold1347@snu.ac.kr
|
a9ee77da3ed775af437905f8a77e31c68934d890
|
b47c136e077f5100478338280495193a8ab81801
|
/Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/motorkit_dc_test.py
|
acee534bc1f2a7feb18935e5d4caa5d3f8df88a2
|
[
"Apache-2.0"
] |
permissive
|
IanSMoyes/SpiderPi
|
22cd8747cc389f674cc8d95f32b4d86f9b7b2d8e
|
cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1
|
refs/heads/master
| 2023-03-20T22:30:23.362137
| 2021-03-12T17:37:33
| 2021-03-12T17:37:33
| 339,555,949
| 16
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,059
|
py
|
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import board
from adafruit_motorkit import MotorKit
kit = MotorKit(i2c=board.I2C())
kit.motor1.throttle = 0
while True:
print("Forward!")
kit.motor1.throttle = 0.5
time.sleep(1)
print("Speed up...")
for i in range(0, 101):
speed = i * 0.01
kit.motor1.throttle = speed
time.sleep(0.01)
print("Slow down...")
for i in range(100, -1, -1):
speed = i * 0.01
kit.motor1.throttle = speed
time.sleep(0.01)
print("Backward!")
kit.motor1.throttle = -0.5
time.sleep(1)
print("Speed up...")
for i in range(0, -101, -1):
speed = i * 0.01
kit.motor1.throttle = speed
time.sleep(0.01)
print("Slow down...")
for i in range(-100, 1):
speed = i * 0.01
kit.motor1.throttle = speed
time.sleep(0.01)
print("Stop!")
kit.motor1.throttle = 0
time.sleep(1)
|
[
"ians.moyes@gmail.com"
] |
ians.moyes@gmail.com
|
75d822e59f72d9e13076734236f48c4e1dafc564
|
12112cdb7efa8a7026925f3eea9bf096b098ed3b
|
/yy_api/dao/address.py
|
6517613dac58761ab644934790528a0fc6197e79
|
[] |
no_license
|
smallstrong0/Earth
|
c377f43216515b21191778c8a529c1c073bd8954
|
c81c2e0e0ea18af62ef8f81b2ea84d2cc0faed8d
|
refs/heads/master
| 2021-05-16T00:08:12.047520
| 2019-07-27T01:39:48
| 2019-07-27T01:39:48
| 106,982,264
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 877
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import core.mongo as db_core
import tool.c_utils
import tool.t_utils
collection = db_core.DbUtils().db.address
"""
字段规则
ctime
mtime
address_id 地址唯一id
user_id 用户ID全局唯一 UUID生成
name 收货人姓名
phone 电话
qq qq号
we_chat 微信号
address 地址
place 具体门牌号
"""
def create(dic={}):
dic['ctime'] = tool.t_utils.get_ts()
dic['mtime'] = tool.t_utils.get_ts()
code = collection.insert(dic)
if code:
return dic
else:
return None
def update(old={}, new={}):
return True if collection.update(old, {'$set': new}) else False
def select(where={}):
cursor = collection.find(where)
if cursor.count() > 0:
return list(cursor)
else:
return []
def delete(field={}):
return True if collection.remove(field) else False
|
[
"393019766@qq.com"
] |
393019766@qq.com
|
212fd46f2e5f7911e6424e3b7c2bc3491bab7a3b
|
f8b9e5de8823ff810ec445b6fa6d0e34f7b6319f
|
/Django/Users_project/apps/Users_app/migrations/0001_initial.py
|
01e46249e25ed13984f49c592311b5448c9212d9
|
[] |
no_license
|
amalfushi/Python
|
6c042443a8aeae15fc96a41a692abdbea05db863
|
067c2cef722457e884833f77baf9f44f45a4a165
|
refs/heads/master
| 2021-01-24T04:08:21.278071
| 2018-02-26T06:25:59
| 2018-02-26T06:25:59
| 122,923,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 879
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-18 14:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('email_address', models.CharField(max_length=255)),
('age', models.IntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"dustin.p.schroeder@gmail.com"
] |
dustin.p.schroeder@gmail.com
|
2f7a6efeb9a5d3c242058dcae19f31ce8c0190c8
|
b4f5055ab8c61098a66cfb10095d835acbb373b7
|
/dataaccess/basedao.py
|
3075a4cdbe3bb3d571997f4822803471207d9fc9
|
[] |
no_license
|
shmyhero/option
|
f9f01f98144e5f58023ddacfd133647a019b63a2
|
436027c8b33a7bc25cebcf16daa6962eb079c220
|
refs/heads/master
| 2021-03-16T09:03:04.229299
| 2017-11-29T08:15:02
| 2017-11-29T08:15:02
| 111,675,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,013
|
py
|
import traceback
import datetime
import mysql.connector
from utils.logger import Logger
from common.pathmgr import PathMgr
from common.configmgr import ConfigMgr
class BaseDAO(object):
def __init__(self):
self.logger = Logger(self.__class__.__name__, PathMgr.get_log_path())
@staticmethod
def get_connection():
db_config = ConfigMgr.get_db_config()
return mysql.connector.connect(host=db_config['host'], user=db_config['user'], password=db_config['password'], database=db_config['database'])
@staticmethod
def python_value_to_sql_value(val):
if val is not None:
if type(val) is float:
return '{:.5f}'.format(val)
else:
return str(val)
else:
return 'null'
@staticmethod
def mysql_format(template, *args):
mysql_args = map(BaseDAO.python_value_to_sql_value, args)
return template.format(*mysql_args)
def select(self, query, cursor=None):
#self.logger.info('query:%s' % query)
conn = None
if cursor is None:
conn = BaseDAO.get_connection()
cursor = conn.cursor()
try:
cursor.execute(query)
rows = cursor.fetchall()
return rows
except Exception as e:
error_message = "Query:{}, error message: {}, Stack Trace: {}".format(query, str(e), traceback.format_exc())
self.logger.exception(error_message)
finally:
if conn:
conn.close()
def execute_query(self, query, cursor=None):
#self.logger.info('query:%s' % query)
conn = None
if cursor is None:
conn = BaseDAO.get_connection()
cursor = conn.cursor()
try:
cursor.execute(query)
if conn:
conn.commit()
except mysql.connector.IntegrityError:
pass
except Exception as e:
error_message = "Query:{}, error message: {}, Stack Trace: {}".format(query, str(e), traceback.format_exc())
self.logger.exception(error_message)
finally:
if conn:
conn.close()
def execute_query_list(self, query_list):
conn = BaseDAO.get_connection()
cursor = conn.cursor()
try:
query_for_log_exception = None
for query in query_list:
#self.logger.info('query:%s' % query)
query_for_log_exception = query
cursor.execute(query)
conn.commit()
except Exception as e:
error_message = "Query:{}, error message: {}, Stack Trace: {}".format(query_for_log_exception, str(e), traceback.format_exc())
self.logger.exception(error_message)
finally:
conn.close()
if __name__ == '__main__':
#print BaseDAO.mysql_format('insert into table (field1, field2) values ({}, {})', None, None)
print BaseDAO.python_value_to_sql_value(0.0)
|
[
"elwin.luo@tradehero.mobi"
] |
elwin.luo@tradehero.mobi
|
610705888444eecd4a25200c9dd46b8ac12f5337
|
6eb207074705bacb36457d713e1dc06555192380
|
/plot_ppi_blockage_map.py
|
ff52f40610dd5525e69260d3983ba9dba9563685
|
[] |
no_license
|
ritvje/lidar-xband-article-scripts
|
22574adb29d2645fab31003ae8cd654363f6cb0a
|
ad6ec0997b09609c494316e2ae285296ffdde0eb
|
refs/heads/main
| 2023-04-07T01:31:49.337350
| 2023-02-07T07:55:01
| 2023-02-07T07:55:01
| 491,413,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,484
|
py
|
"""Plot a 2-panel figure of radar and lidar availability PPIs.
Looks for files in directory `inpath` called
- xband_obs_pct_{startdate}_{enddate}_pct.txt
- xband_obs_pct_{startdate}_{enddate}_range.txt
- xband_obs_pct_{startdate}_{enddate}_azimuth.txt
- lidar_obs_pct_{startdate}_{enddate}_pct.txt
- lidar_obs_pct_{startdate}_{enddate}_range.txt
- lidar_obs_pct_{startdate}_{enddate}_azimuth.txt
Author: Jenna Ritvanen <jenna.ritvanen@fmi.fi>
"""
import os
import sys
import argparse
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib as mlt
mlt.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import cartopy.crs as ccrs
plt.style.use("./presentation.mplstyle")
from radar_plotting import plotting
import contextily as ctx
from pathlib import Path
centerpoint = (24.87608, 60.28233)
airport_aws = (24.95675, 60.32670)
COPYRIGHT_TEXT = "Map tiles by Stamen Design, under CC BY 3.0. \nMap data by OpenStreetMap, under ODbL."
@mlt.ticker.FuncFormatter
def m2km_formatter(x, pos):
return f"{x / 1000:.0f}"
if __name__ == "__main__":
argparser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
argparser.add_argument("startdate", type=str, help="the startdate (YYYYmm)")
argparser.add_argument("enddate", type=str, help="the enddate (YYYYmm)")
argparser.add_argument(
"inpath", type=str, help="Path where the input files are located"
)
argparser.add_argument(
"--ext",
type=str,
default="png",
choices=["pdf", "png"],
help="Output plot file format.",
)
argparser.add_argument("--outpath", type=str, default=".", help="Output path")
argparser.add_argument(
"--maxdist",
type=float,
default=15,
help="Maximum distance plotted in figures in km",
)
argparser.add_argument(
"--dpi", type=int, default=300, help="Dots per inch in figure"
)
args = argparser.parse_args()
outpath = Path(args.outpath)
outpath.mkdir(parents=True, exist_ok=True)
inpath = Path(args.inpath).resolve()
startdate = datetime.strptime(args.startdate, "%Y%m")
enddate = (
datetime.strptime(args.enddate, "%Y%m") + pd.offsets.MonthEnd(0)
).to_pydatetime()
pct_xband = np.loadtxt(
inpath / f"xband_obs_pct_{startdate:%Y%m%d}_{enddate:%Y%m%d}_pct.txt"
)
xband_rr = np.loadtxt(
inpath / f"xband_obs_pct_{startdate:%Y%m%d}_{enddate:%Y%m%d}_range.txt"
)
xband_az = np.loadtxt(
inpath / f"xband_obs_pct_{startdate:%Y%m%d}_{enddate:%Y%m%d}_azimuth.txt"
)
pct_lidar = np.loadtxt(
inpath / f"lidar_obs_pct_{startdate:%Y%m%d}_{enddate:%Y%m%d}_pct.txt"
)
lidar_rr = np.loadtxt(
inpath / f"lidar_obs_pct_{startdate:%Y%m%d}_{enddate:%Y%m%d}_range.txt"
)
lidar_az = np.loadtxt(
inpath / f"lidar_obs_pct_{startdate:%Y%m%d}_{enddate:%Y%m%d}_azimuth.txt"
)
outfn = os.path.join(outpath, f"meas_pct_map.{args.ext}")
cbar_ax_kws = {
"width": "5%", # width = 5% of parent_bbox width
"height": "100%",
"loc": "lower left",
"bbox_to_anchor": (1.01, 0.0, 1, 1),
"borderpad": 0,
}
fig = plt.figure(figsize=(12, 10))
ax_lidar, fig, aeqd, ext = plotting.axes_with_background_map(
centerpoint, 15, 10, fig=fig, no_map=True, map="toner-line", ncols=2, index=1
)
ctx.add_basemap(
ax_lidar, crs=aeqd, zorder=9, zoom=11, source=ctx.providers.Stamen.TonerLite
)
p = plotting.plot_ppi(
ax_lidar,
pct_lidar,
lidar_az,
lidar_rr,
rasterized=True,
vmin=0,
vmax=1,
cmap="viridis",
zorder=100,
alpha=0.7,
linewidth=0,
antialiased=True,
edgecolor="none",
)
ax_lidar.scatter(
*airport_aws,
s=75,
transform=ccrs.PlateCarree(),
zorder=110,
label="Helsinki Airport",
marker="X",
color="k",
)
ax_radar, fig, aeqd, ext = plotting.axes_with_background_map(
centerpoint,
15,
10,
fig=fig,
no_map=True,
map="toner-line",
sharey=None,
ncols=2,
index=2,
)
ctx.add_basemap(
ax_radar, crs=aeqd, zorder=9, zoom=11, source=ctx.providers.Stamen.TonerLite
)
p = plotting.plot_ppi(
ax_radar,
pct_xband,
xband_az,
xband_rr,
rasterized=True,
vmin=0,
vmax=1,
cmap="viridis",
zorder=100,
alpha=0.7,
linewidth=0,
antialiased=True,
edgecolor="none",
)
ax_radar.scatter(
*airport_aws,
s=75,
transform=ccrs.PlateCarree(),
zorder=110,
label="Helsinki Airport",
marker="X",
color="k",
)
cax = inset_axes(ax_radar, bbox_transform=ax_radar.transAxes, **cbar_ax_kws)
cbar = plt.colorbar(p, orientation="vertical", cax=cax, ax=None)
cbar.set_label("Fraction", weight="bold")
cbar.ax.tick_params(labelsize=12)
for ax, title in zip([ax_lidar, ax_radar], ["(a) Lidar", "(b) X-band radar"]):
plotting.set_ticks_km(
ax,
[
-args.maxdist * 1e3,
args.maxdist * 1e3,
-args.maxdist * 1e3,
args.maxdist * 1e3,
],
16,
16,
)
# x-axis
ax.set_xlabel("Distance from site [km]", weight="bold", size="medium")
ax.set_title(title, y=-0.15, size="large")
ax.xaxis.set_major_formatter(m2km_formatter)
# y-axis
ax.set_ylabel("Distance from site [km]", weight="bold", size="medium")
ax.yaxis.set_major_formatter(m2km_formatter)
ax.set_xlim([-args.maxdist * 1e3, args.maxdist * 1e3])
ax.set_ylim([-args.maxdist * 1e3, args.maxdist * 1e3])
ax.tick_params(axis="both", which="major", labelsize="small")
ax.set_aspect(1)
ax.text(
0.75, 0.01, COPYRIGHT_TEXT, fontsize=4, zorder=100, transform=ax.transAxes
)
ax_radar.set_yticks([])
ax_radar.set_yticklabels([])
ax_radar.set_ylabel("")
fig.savefig(outfn, dpi=args.dpi, bbox_inches="tight")
|
[
"jenna.ritvanen@fmi.fi"
] |
jenna.ritvanen@fmi.fi
|
59c5e626e9a7066e4b46f04bc311d2d0bb6b243e
|
13f78c34e80a52442d72e0aa609666163233e7e0
|
/Other/ICPC Live Archive/7526/input_gen.py
|
e02dee7416064f5e4eaed6e55e6ef4f49cc2d0f0
|
[] |
no_license
|
Giantpizzahead/comp-programming
|
0d16babe49064aee525d78a70641ca154927af20
|
232a19fdd06ecef7be845c92db38772240a33e41
|
refs/heads/master
| 2023-08-17T20:23:28.693280
| 2023-08-11T22:18:26
| 2023-08-11T22:18:26
| 252,904,746
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
import random
def genTestCase(out):
ROWS = random.randint(200, 300)
COLS = random.randint(200, 300)
SPARE_ROWS = random.randint(0, int(ROWS / 5))
SPARE_DIODES = random.randint(0, int(ROWS * COLS / 7))
out.write("{} {} {} {}\n".format(ROWS, COLS, SPARE_ROWS, SPARE_DIODES))
ZERO_DENSITY = random.randint(0, 10)
choice_arr = ['0' for _ in range(ZERO_DENSITY)]
choice_arr.append('1')
for i in range(ROWS):
out.write(' '.join([random.choice(choice_arr) for _ in range(COLS)]))
out.write('\n')
with open("input.txt", 'w') as out:
for i in range(30):
genTestCase(out)
out.write("0 0 0 0\n")
|
[
"43867185+Giantpizzahead@users.noreply.github.com"
] |
43867185+Giantpizzahead@users.noreply.github.com
|
cb7b18e29e9988f4058c89a85ff27762dd7ea458
|
eb1a2e24ecdbe9be8a6aac153fe1980b19dcbe4a
|
/sameline.py
|
db7711b6a73bb522e178afdfcfdba7a0d80d7476
|
[] |
no_license
|
sushmithasushi/playerlevel
|
1e7363e63bd75deba79b96a659db7736c93ed6a2
|
612ea4483a5c5d7c3c3a8564e0b7ce0df08a686a
|
refs/heads/master
| 2020-06-19T07:03:06.457910
| 2019-07-24T04:28:31
| 2019-07-24T04:28:31
| 196,608,802
| 0
| 0
| null | 2019-07-12T16:08:51
| 2019-07-12T16:08:51
| null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
num1=input().split()
n2=input().split()
n3=input().split()
if(num1[0]==n2[0]==n3[0] or num1[1]==n2[1]==n3[1] or (num1[0]==num1[1] and n2[0]==n2[1] and n3[0]==n3[1])):
print('yes')
else:
print('no')
|
[
"noreply@github.com"
] |
sushmithasushi.noreply@github.com
|
7de4e0db9696917074e2cac47616c5de30351365
|
1f33bcbd545b82bc64e7f7b5ef2052798b02b279
|
/Quiz:Program1/quiz.py
|
f90e3289eeff403a20c20b0baeae41159bfc7583
|
[] |
no_license
|
jlevy44/LearningScripts
|
107369798605641a24a5a29c9f8f249d5396e0b6
|
fe7ce46a995cd6b7e40099a4b2db558a549de446
|
refs/heads/master
| 2021-01-01T19:19:11.849540
| 2017-09-27T00:19:54
| 2017-09-27T00:19:54
| 98,566,879
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
def is_true(i):
if i==2:
return True
else:
return False
print(is_true(2))
def lalala(i):
return lambda y: i**2
print(lalala(5))
print('alpha' > 'beta')
d='hello'
for p in d: print(p)
print(d[-2:2])
def add(x,y):
print x+y
add(2,6)
def make_scalar(c):
return lambda n: c*(n)
tripleAdd=make_scalar(3)
print tripleAdd(2)
|
[
"joshualevy44@berkeley.edu"
] |
joshualevy44@berkeley.edu
|
21120623aaae1cf3c80e160d93a4a917c30ebcfc
|
ece0d321e48f182832252b23db1df0c21b78f20c
|
/engine/2.80/scripts/templates_py/ui_previews_dynamic_enum.py
|
39a3750bfee6404d1a45d3c448143682e5ad1b80
|
[
"GPL-3.0-only",
"Font-exception-2.0",
"GPL-3.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain-disclaimer",
"Bitstream-Vera",
"LicenseRef-scancode-blender-2010",
"LGPL-2.1-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"PSF-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"BSD-2-Clause",
"Unlicense"
] |
permissive
|
byteinc/Phasor
|
47d4e48a52fa562dfa1a2dbe493f8ec9e94625b9
|
f7d23a489c2b4bcc3c1961ac955926484ff8b8d9
|
refs/heads/master
| 2022-10-25T17:05:01.585032
| 2019-03-16T19:24:22
| 2019-03-16T19:24:22
| 175,723,233
| 3
| 1
|
Unlicense
| 2022-10-21T07:02:37
| 2019-03-15T00:58:08
|
Python
|
UTF-8
|
Python
| false
| false
| 4,092
|
py
|
# This sample script demonstrates a dynamic EnumProperty with custom icons.
# The EnumProperty is populated dynamically with thumbnails of the contents of
# a chosen directory in 'enum_previews_from_directory_items'.
# Then, the same enum is displayed with different interfaces. Note that the
# generated icon previews do not have Blender IDs, which means that they can
# not be used with UILayout templates that require IDs,
# such as template_list and template_ID_preview.
#
# Other use cases:
# - make a fixed list of enum_items instead of calculating them in a function
# - generate isolated thumbnails to use as custom icons in buttons
# and menu items
#
# For custom icons, see the template "ui_previews_custom_icon.py".
#
# For distributable scripts, it is recommended to place the icons inside the
# script directory and access it relative to the py script file for portability:
#
# os.path.join(os.path.dirname(__file__), "images")
import os
import bpy
def enum_previews_from_directory_items(self, context):
"""EnumProperty callback"""
enum_items = []
if context is None:
return enum_items
wm = context.window_manager
directory = wm.my_previews_dir
# Get the preview collection (defined in register func).
pcoll = preview_collections["main"]
if directory == pcoll.my_previews_dir:
return pcoll.my_previews
print("Scanning directory: %s" % directory)
if directory and os.path.exists(directory):
# Scan the directory for png files
image_paths = []
for fn in os.listdir(directory):
if fn.lower().endswith(".png"):
image_paths.append(fn)
for i, name in enumerate(image_paths):
# generates a thumbnail preview for a file.
filepath = os.path.join(directory, name)
thumb = pcoll.load(filepath, filepath, 'IMAGE')
enum_items.append((name, name, "", thumb.icon_id, i))
pcoll.my_previews = enum_items
pcoll.my_previews_dir = directory
return pcoll.my_previews
class PreviewsExamplePanel(bpy.types.Panel):
"""Creates a Panel in the Object properties window"""
bl_label = "Previews Example Panel"
bl_idname = "OBJECT_PT_previews"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "object"
def draw(self, context):
layout = self.layout
wm = context.window_manager
row = layout.row()
row.prop(wm, "my_previews_dir")
row = layout.row()
row.template_icon_view(wm, "my_previews")
row = layout.row()
row.prop(wm, "my_previews")
# We can store multiple preview collections here,
# however in this example we only store "main"
preview_collections = {}
def register():
from bpy.types import WindowManager
from bpy.props import (
StringProperty,
EnumProperty,
)
WindowManager.my_previews_dir = StringProperty(
name="Folder Path",
subtype='DIR_PATH',
default=""
)
WindowManager.my_previews = EnumProperty(
items=enum_previews_from_directory_items,
)
# Note that preview collections returned by bpy.utils.previews
# are regular Python objects - you can use them to store custom data.
#
# This is especially useful here, since:
# - It avoids us regenerating the whole enum over and over.
# - It can store enum_items' strings
# (remember you have to keep those strings somewhere in py,
# else they get freed and Blender references invalid memory!).
import bpy.utils.previews
pcoll = bpy.utils.previews.new()
pcoll.my_previews_dir = ""
pcoll.my_previews = ()
preview_collections["main"] = pcoll
bpy.utils.register_class(PreviewsExamplePanel)
def unregister():
from bpy.types import WindowManager
del WindowManager.my_previews
for pcoll in preview_collections.values():
bpy.utils.previews.remove(pcoll)
preview_collections.clear()
bpy.utils.unregister_class(PreviewsExamplePanel)
if __name__ == "__main__":
register()
|
[
"admin@irradiate.net"
] |
admin@irradiate.net
|
3f5aafbcf9931ad72e65de3c2d356b6fddbf2328
|
55e11b61b3230f7442dd2c8f2c16754ad50dcaf6
|
/code/SLFN/training.py
|
801ed20b9e22b128eae974493e8cffee9b990256
|
[] |
no_license
|
manuwhs/B-ADSNs
|
f70f534ebf1389f2fe51c4d46978ca1c24bbb671
|
6fe0c8456f71dbeb72b172baccdbf98caaa86d7c
|
refs/heads/master
| 2021-04-28T03:06:26.873816
| 2018-04-18T21:54:08
| 2018-04-18T21:54:08
| 122,131,782
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,058
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 01:31:58 2015
@author: montoya
"""
import numpy as np
from sklearn import cross_validation
import paramClasses as paC
from time import time # For the random seed
def train (self):
# Adapt the labels so that they are correct (-1 or 0 and transform multivariate if needed)
self.Ytrain = paC.adapt_labels(self.Ytrain, mode = self.outMode)
self.Yval = paC.adapt_labels(self.Yval, mode = self.outMode )
if (self.Xtest != []): # If there is a test dataset.
self.Ytest = paC.adapt_labels(self.Ytest, mode = self.outMode )
# Values that will be stored in Exec_list for later processing
self.TrError = np.zeros((self.Nruns,1))
self.ValError = np.zeros((self.Nruns,1))
self.TstError = np.zeros((self.Nruns,1))
for r in range (self.Nruns):
self.train_CV(r = r)
# print self.TrError[r], self.ValError[r] , self.TstError[r]
def train_once (self):
# print "train_once"
# print self.D
# print self
self.init_Weights() # Initialize
# Check the training algorithm and pass it with its parameters.
# D is the dehenfasis vector, distribution of samples probabilities.
if (self.trainingAlg.trAlg == "ELM"):
self.ELM_train(self.trainingAlg.param)
if (self.trainingAlg.trAlg == "BP"):
self.BP_train(self.trainingAlg.param)
if (self.trainingAlg.trAlg == "BMBP"):
self.BMBP_train(self.trainingAlg.param)
if (self.trainingAlg.trAlg == "ELMT"):
self.ELMT_train(self.trainingAlg.param)
if (self.trainingAlg.trAlg == "LDAT"):
self.LDAT_train(self.trainingAlg.param)
if (self.trainingAlg.trAlg == "LDA"):
self.LDA_train(self.trainingAlg.param)
def train_CV (self, r):
# Trains the learner CV times using cross validation
total_Xtrain = self.Xtrain
total_Ytrain = self.Ytrain
## Get the random seed and use it
if (self.InitRandomSeed == -1): # If no seed is specified
self.RandomSeed[r] = int((time()%1 * 100000))
np.random.seed(self.RandomSeed[r])
else:
self.RandomSeed[r] = self.InitRandomSeed
np.random.seed(self.RandomSeed[r])
TrError = 0;
ValError = 0;
TstError = 0;
# print "train_CV"
# print self.CV
# print self.D
if (self.CV == 1):
# If the validation is performed with just the training set
# Then the validation set is the original self.Xval.
""" Why you may ask ?? """
# In other aggregate solutions, like Boosting, the CV is done
# over the whole structure, not layer by layer. In this cases,
# the CV of the SLFN will be 1 always and its the Boosting "train"
# the one in charge for changing the Validation set and training set.
self.train_once()
TrError += self.score(self.Xtrain, self.Ytrain)
ValError += self.score(self.Xval, self.Yval)
if (self.Xtest != []): # If there is a test dataset.
TstError += self.score(self.Xtest, self.Ytest)
if (self.CV > 1):
stkfold = cross_validation.StratifiedKFold(total_Ytrain.ravel(), n_folds = self.CV)
for train_index, val_index in stkfold:
# print train_index
self.set_Train(total_Xtrain[train_index],total_Ytrain[train_index])
self.set_Val(total_Xtrain[val_index],total_Ytrain[val_index])
self.train_once()
TrError += self.score(self.Xtrain, self.Ytrain)
ValError += self.score(self.Xval, self.Yval)
if (self.Xtest != []): # If there is a test dataset.
TstError += self.score(self.Xtest, self.Ytest)
self.TrError[r] = TrError / self.CV
self.ValError[r] = ValError / self.CV
self.TstError[r] = TstError / self.CV
self.Xtrain = total_Xtrain # Restore the original Xtrain
self.Ytrain = total_Ytrain
|
[
"manuwhs@bitbucket.org"
] |
manuwhs@bitbucket.org
|
d1ad790afa900acbcfab2baee1841aa7e9952ce7
|
4926535b3e0d2fe8a80bf231f72347abff6dcee4
|
/experiments/spatial.py
|
31164bbbb7f0f2a8b3630e61e75e76ba06247078
|
[] |
no_license
|
yangarbiter/blindguess
|
1f2a34b0eb30fc53ab9be5c01fa1c184b8eb2ff9
|
86e49749ba62132a263dfea3743513b23895b03e
|
refs/heads/master
| 2021-01-16T10:17:51.582682
| 2020-02-25T18:43:31
| 2020-02-25T18:43:31
| 243,076,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,862
|
py
|
import os
import logging
import torch
from bistiming import Stopwatch
from mkdir_p import mkdir_p
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from .utils import set_random_seed, load_model
from lolip.utils import estimate_local_lip_v2
from lolip.variables import get_file_name
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.WARNING, datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
def run_spatial(auto_var):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
_ = set_random_seed(auto_var)
#norm = auto_var.get_var("norm")
trnX, trny, tstX, tsty = auto_var.get_var("dataset")
lbl_enc = OneHotEncoder(categories=[np.sort(np.unique(trny))], sparse=False).fit(trny.reshape(-1, 1))
auto_var.set_intermidiate_variable("lbl_enc", lbl_enc)
n_classes = len(np.unique(trny))
n_channels = trnX.shape[-1]
result = {}
#multigpu = True if len(trnX) > 90000 and torch.cuda.device_count() > 1 else False
multigpu = False
try:
model_path, model = load_model(
auto_var, trnX, trny, tstX, tsty, n_channels, model_dir="./models/experiment01", device=device)
model.model.to(device)
result['model_path'] = model_path
except:
del model
logger.info("Model not trained yet, retrain the model")
mkdir_p("./models/experiment01")
result['model_path'] = os.path.join(
'./models/experiment01', get_file_name(auto_var) + '-ep%04d.pt')
result['model_path'] = result['model_path'].replace(
auto_var.get_variable_name("attack"), "pgd")
model = auto_var.get_var("model", trnX=trnX, trny=trny, multigpu=multigpu,
n_channels=n_channels, device=device)
model.tst_ds = (tstX, tsty)
with Stopwatch("Fitting Model", logger=logger):
history = model.fit(trnX, trny)
model.save(result['model_path'])
result['model_path'] = result['model_path'] % model.epochs
result['history'] = history
result['trn_acc'] = (model.predict(trnX) == trny).mean()
result['tst_acc'] = (model.predict(tstX) == tsty).mean()
print(f"train acc: {result['trn_acc']}")
print(f"test acc: {result['tst_acc']}")
attack_model = auto_var.get_var("attack", model=model, n_classes=n_classes)
with Stopwatch("Attacking Train", logger=logger):
adv_trnX = attack_model.perturb(trnX, trny)
with Stopwatch("Attacking Test", logger=logger):
adv_tstX = attack_model.perturb(tstX, tsty)
result['adv_trn_acc'] = (model.predict(adv_trnX) == trny).mean()
result['adv_tst_acc'] = (model.predict(adv_tstX) == tsty).mean()
print(f"adv trn acc: {result['adv_trn_acc']}")
print(f"adv tst acc: {result['adv_tst_acc']}")
print(result)
return result
|
[
"yangarbiter@gmail.com"
] |
yangarbiter@gmail.com
|
eda928275e1b9cc5238c2f7b986b78ac46b584e3
|
313203cd01705e08bc8967246bfeacb7fa5cd6c9
|
/Untitled Folder 2/newpython/strings.py
|
f45458de85baa2979b96b808132d94e5001b35d1
|
[] |
no_license
|
Nitesh101/Nitesh_old_backup
|
17d9c8e8f9694c0ef4d995de7cbb694080523b22
|
a0b0263e204f2f46c51e6f20990024396eb0ccb7
|
refs/heads/master
| 2020-03-19T20:51:51.587343
| 2018-09-21T09:42:11
| 2018-09-21T09:42:11
| 136,919,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,262
|
py
|
#!/usr/bin/python
"""
str = 'Votarytech Learning Center'
print str.replace("Center","1")
print str.find("L")
print str.update('4','8')
print str[0:8],str[10::]
print str[1:20]
"""
list = ["this","is","python"]
print "To find index of list : "
print list.index("is")
list = ["this","is","python"]
print "To remove element from list : "
list.remove("is")
print list
list = ["this","is","python"]
print "To add elements in a list: "
list = list+["for"]
print list
list = ["this","is","python"]
print "To add element in end of list: "
list.pop()
print list
list = ["this","is","python"]
print "To add elment in specific index: "
list.insert(2,2009)
print list
list = ["this","is","python"]
print "delete a specific element in list: "
del list[1];
print list
list = ["this","is","python"]
print "To print element in multiple times : "
list = ["this"] * 4
print list
list = ["this","is","python"]
list.reverse()
print list
list = [5,6,6,7,8,1]
list.sort()
print list
list = [12,'nitesh','python',345]
list1 = [2009,'language']
list.extend(list1)
print "Extended list: ",list
dict = {}
dict['one'] = "this is one"
dict[3] = "this is two"
dict = {'sep':'sales','code':876,'name':'nitesh','dep':'it'}
print dict
a = (1,2,3,"nitesh")
print list(a)
|
[
"m.veeranitesh@gmail.com"
] |
m.veeranitesh@gmail.com
|
2b690281a2c912dd8a40b26f3e3d47ba473e10c7
|
28691ec55ebce9ec7045d12ea9675932ce12d671
|
/py2rhino-project/branches/sandbox2/py2rhino/_make/data/parser_out/utility/sleep.py
|
747d42de5841701c0bad6df9d9e9bcd690ec7cdc
|
[] |
no_license
|
ianclarksmith/design-automation
|
1e71315193effc0c18b4a8b41300bda6f41a3f09
|
e27cc028fe582395f4a62f06697137867bb0fc33
|
refs/heads/master
| 2020-04-22T22:28:39.385395
| 2009-10-26T02:48:37
| 2009-10-26T02:48:37
| 37,266,915
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
sleep = {
"input_folder_name": "Utility_Methods",
"input_file_name": "Sleep",
"output_package_name": "utility",
"output_module_name": "sleep",
"doc_html": """
Suspends the execution of a running script for the specified interval.
""",
"syntax_html": {
0: ("lngMilliseconds"),
},
"params_html": {
0: {
"name": "lngMilliseconds",
"py_name": "milliseconds",
"opt_or_req": "Required",
"type": "Number",
"name_prefix": "lng",
"name_main": "Milliseconds",
"doc": """
The duration in milliseconds.
"""
},
},
"returns_html": {
0: {
"type": "null",
"doc": "If successful, or on error."
},
},
"id_com": 248,
"params_com": {
0: {
"name": "vaTime",
"opt_or_req": "Required",
"type": "tagVARIANT",
},
},
"returns_com": "tagVARIANT",
}
|
[
"patrick.ht.janssen@d56020b2-6ac5-11de-89a9-0b20f3e2dceb"
] |
patrick.ht.janssen@d56020b2-6ac5-11de-89a9-0b20f3e2dceb
|
809efa968327ec2b765eba788b2c0355bfbae597
|
07c5656f004b6a444e22ff7b4c3b6802d027f759
|
/week_3/class_0228/learn_import.py
|
fa67da2fb5f09d290d59e56d72ed750a06f0a45b
|
[] |
no_license
|
EuniceHu/python15_api_test
|
de2a0f0bec8057edb27c8d1f82a438da3e9c105c
|
1313e56ddfa67a2490e703a1a5ef4a6967565849
|
refs/heads/master
| 2020-05-20T13:30:41.686327
| 2019-05-14T11:00:52
| 2019-05-14T11:00:52
| 185,599,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
#-*- coding:utf-8 _*-
"""
@author:小胡
@file: learn_import.py
@time: 2019/03/02
"""
# 导入module_import模块
# import week_3.class_0228.module_import
# print(week_3.class_0228.module_import.name)
|
[
"hongdh1122@163.com"
] |
hongdh1122@163.com
|
4b0b8b40b54edefa3452bf0836965eec78601e79
|
0c9e8b42a1e0a6f010a6c38489c7c96b3b783991
|
/photo/tests.py
|
57ff49f4b1663ad73d11612a2994277554b8212c
|
[
"MIT"
] |
permissive
|
Derrick-Nyongesa/Photo-Gallery
|
4690ff3bd427415236fd5147188e2f452b87d487
|
bbd8774bf7e8d1f9f32aa9e02d12af20e8cb0e70
|
refs/heads/main
| 2023-05-02T20:54:58.462766
| 2021-05-17T07:05:06
| 2021-05-17T07:05:06
| 366,949,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,422
|
py
|
from django.test import TestCase
from .models import Image,Category,Location
# Create your tests here.
class TestImage(TestCase):
def setUp(self):
self.location = Location(name='Nairobi')
self.location.save_location()
self.category = Category(name='food')
self.category.save_category()
self.image_test = Image(id=1, name='image', description='this is a test image', location=self.location,
category=self.category)
def test_instance(self):
self.assertTrue(isinstance(self.image_test, Image))
def test_save_image(self):
self.image_test.save_image()
after = Image.objects.all()
self.assertTrue(len(after) > 0)
def test_delete_image(self):
self.image_test.delete_image()
images = Image.objects.all()
self.assertTrue(len(images) == 0)
def test_update_image(self):
self.image_test.save_image()
self.image_test.update_image(self.image_test.id, 'photos/test.jpg')
changed_img = Image.objects.filter(image='photos/test.jpg')
self.assertTrue(len(changed_img) > 0)
def test_get_image_by_id(self):
found_image = self.image_test.get_image_by_id(self.image_test.id)
image = Image.objects.filter(id=self.image_test.id)
self.assertTrue(found_image, image)
def test_search_image_by_location(self):
self.image_test.save_image()
found_images = self.image_test.filter_by_location(location='Nairobi')
self.assertTrue(len(found_images) == 1)
def test_search_image_by_category(self):
category = 'food'
found_img = self.image_test.search_by_category(category)
self.assertTrue(len(found_img) > 1)
def tearDown(self):
Image.objects.all().delete()
Location.objects.all().delete()
Category.objects.all().delete()
class TestLocation(TestCase):
def setUp(self):
self.location = Location(name='Nairobi')
self.location.save_location()
def test_instance(self):
self.assertTrue(isinstance(self.location, Location))
def test_save_location(self):
self.location.save_location()
locations = Location.get_locations()
self.assertTrue(len(locations) > 0)
def test_get_locations(self):
self.location.save_location()
locations = Location.get_locations()
self.assertTrue(len(locations) > 1)
def test_update_location(self):
new_location = 'America'
self.location.update_location(self.location.id, new_location)
changed_location = Location.objects.filter(name='America')
self.assertTrue(len(changed_location) > 0)
def test_delete_location(self):
self.location.delete_location()
location = Location.objects.all()
self.assertTrue(len(location) == 0)
class CategoryTestClass(TestCase):
def setUp(self):
self.category = Category(name='home')
self.category.save_category()
def test_instance(self):
self.assertTrue(isinstance(self.category, Category))
def test_save_category(self):
self.category.save_category()
categories = Category.objects.all()
self.assertTrue(len(categories) > 0)
def test_delete_category(self):
self.category.delete_category()
category = Category.objects.all()
self.assertTrue(len(category) == 0)
|
[
"nyongesaderrick@gmail.com"
] |
nyongesaderrick@gmail.com
|
64a6de091d8defdd83efab73dcacb5a3d0e4d9a2
|
e50244b666bc6af028beb3c1a83de70e7c61edd0
|
/L1/L1.py
|
e70b76e021489eff37ea41e71b021b4c15c88804
|
[] |
no_license
|
justaleaf/ML-2020
|
21e9b3026b1dd25d0727e9868e026eca7a3301c8
|
234c2a5e16dcc2d1f71483f57ec3c806cd6fdff0
|
refs/heads/main
| 2023-01-24T12:32:25.070338
| 2020-11-05T11:57:05
| 2020-11-05T11:57:05
| 305,983,518
| 0
| 0
| null | 2020-10-21T10:08:33
| 2020-10-21T10:08:32
| null |
UTF-8
|
Python
| false
| false
| 5,384
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
from tabulate import tabulate
from IPython.core.debugger import set_trace
from IPython.display import display
from matplotlib import pyplot as plt
from sklearn import preprocessing
# In[2]:
df = pd.read_csv('../data/heart_failure_clinical_records_dataset.csv')
df = df.drop(columns=['anaemia','diabetes','high_blood_pressure','sex','smoking','time','DEATH_EVENT'])
display(df)
# In[3]:
fig, axes = plt.subplots(2, 3, figsize=(12, 6))
n_bins = 20
axes[0, 0].hist(df['age'].values, bins = n_bins)
axes[0, 0].set_title('age')
axes[0, 1].hist(df['creatinine_phosphokinase'].values, bins = n_bins)
axes[0, 1].set_title('creatinine_phosphokinase')
axes[0, 2].hist(df['ejection_fraction'].values, bins = n_bins)
axes[0, 2].set_title('ejection_fraction')
axes[1, 0].hist(df['platelets'].values, bins = n_bins)
axes[1, 0].set_title('platelets')
axes[1, 1].hist(df['serum_creatinine'].values, bins = n_bins)
axes[1, 1].set_title('serum_creatinine')
axes[1, 2].hist(df['serum_sodium'].values, bins = n_bins)
axes[1, 2].set_title('serum_sodium')
fig.tight_layout()
plt.savefig('./img/hist-1.png')
plt.show()
# In[4]:
data = df.to_numpy(dtype='float')
# In[5]:
scaler = preprocessing.StandardScaler().fit(data[:150,:])
data_scaled = scaler.transform(data)
# In[6]:
TITLES = ['age', 'creatinine_phosphokinase', 'ejection_fraction', 'platelets', 'serum_creatinine', 'serum_sodium']
def plot_data(data_scaled):
fig, axes = plt.subplots(2, 3, figsize=(12, 6))
ax_order = [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]
for i, ax_ind in enumerate(ax_order):
axes[ax_ind].hist(data_scaled[:,i], bins = n_bins)
axes[ax_ind].set_title(TITLES[i])
fig.tight_layout()
return fig
plot_data(data_scaled)
plt.savefig('./img/hist-2.png')
plt.show()
# In[7]:
def calc_metrics(data):
mean = [np.mean(col) for col in data.T]
std = [np.std(col) for col in data.T]
return mean, std
calc_metrics(data)
# In[8]:
def shorten(s):
if len(s) < 10:
return s
return s[:10] + '...'
mean_src, std_src = calc_metrics(data)
mean_sc, std_sc = calc_metrics(data_scaled)
scaler2 = preprocessing.StandardScaler()
data_scaled2 = scaler2.fit_transform(data)
mean_sc2, std_sc2 = calc_metrics(data_scaled2)
plot_data(data_scaled2)
plt.savefig('./img/hist-3.png')
plt.show()
header = ['Признак', *[shorten(t) for t in TITLES]]
table = [
['Среднее (исх.)', *mean_src],
['Среднее (стандарт. 150)', *mean_sc],
['Среднее (стандарт. 150 scaler)', *scaler.mean_],
['Среднее (стандарт. полн.)', *mean_sc2],
['Среднее (стандарт. полн. scaler)', *scaler2.mean_],
['СКО (исх)', *std_src],
['СКО (стандарт. 150)', *std_sc],
['СКО (стандарт. 150 scaler)', *[np.sqrt(v) for v in scaler.var_]],
['СКО (стандарт. полн.)', *std_sc2],
['СКО (стандарт. полн. scaler)', *[np.sqrt(v) for v in scaler2.var_]]
]
latex_t1 = tabulate(table, headers=header, tablefmt='latex_booktabs', floatfmt=".4f")
with open('./output/t1.tex', 'w') as f:
f.write(latex_t1)
# In[9]:
min_max_scaler = preprocessing.MinMaxScaler()
min_max_data = min_max_scaler.fit_transform(data)
plot_data(min_max_data)
plt.savefig('./img/hist-min-max.png')
plt.show()
# In[10]:
header = ['Признак', 'Минимум', 'Максимум']
table = [
(title, min_, max_)
for title, min_, max_ in zip(TITLES, min_max_scaler.data_min_, min_max_scaler.data_max_)
]
latex_t2 = tabulate(table, headers=header, tablefmt='latex_booktabs')
with open('./output/t2.tex', 'w') as f:
f.write(latex_t2)
# In[11]:
max_abs_data = preprocessing.MaxAbsScaler().fit_transform(data)
robust_data = preprocessing.RobustScaler().fit_transform(data)
plot_data(max_abs_data)
plt.savefig('./img/hist-max-abs.png')
plt.show()
plot_data(robust_data)
plt.savefig('./img/hist-robust.png')
plt.show()
# In[12]:
def fit_5_10(data):
data = data.copy()
for col in range(data.shape[1]):
min_, max_ = np.min(data[:, col]), np.max(data[:, col])
data[:, col] = [(x - min_) / (max_ - min_) * 15 - 5 for x in data[:, col]]
return data
data_5_10 = fit_5_10(data)
plot_data(data_5_10)
plt.savefig('./img/hist-5-10.png')
plt.show()
# In[13]:
quantile_transformer = preprocessing.QuantileTransformer(n_quantiles=100, random_state=0)
quantile_data = quantile_transformer.fit_transform(data)
plot_data(quantile_data)
plt.savefig('./img/hist-quantile.png')
plt.show()
# In[14]:
quantile_normal_transformer = preprocessing.QuantileTransformer(n_quantiles=100, random_state=0, output_distribution='normal')
quantile_normal_data = quantile_normal_transformer.fit_transform(data)
plot_data(quantile_normal_data)
plt.savefig('./img/hist-quantile-normal.png')
plt.show()
# In[15]:
power_transformer = preprocessing.PowerTransformer()
power_data = power_transformer.fit_transform(data)
plot_data(power_data)
plt.savefig('./img/hist-power.png')
plt.show()
# In[16]:
est = preprocessing.KBinsDiscretizer(n_bins=[3, 4, 3, 10, 2, 4], encode='ordinal')
disc_data = est.fit_transform(data)
plot_data(disc_data)
plt.savefig('./img/hist-disc.png')
plt.show()
# In[ ]:
|
[
"thexcloud@gmail.com"
] |
thexcloud@gmail.com
|
a27316b1c3a96c08649b135d42376f0518fe896a
|
73e147e1d49656fafba5d4bf84df5ded2c4dca73
|
/team_9/cocos/test/test_layer_rotate.py
|
686cddc35f9dbf85bfe18864a85263644c578451
|
[
"LGPL-2.1-only",
"CC-BY-NC-4.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license",
"CC-BY-NC-SA-2.0",
"BSD-3-Clause"
] |
permissive
|
Donnyvdm/dojo19
|
2278747366c57bfc80eb9ee28ca617ec0a79bae3
|
3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400
|
refs/heads/master
| 2020-07-26T12:22:15.882800
| 2019-09-15T20:34:36
| 2019-09-15T20:34:36
| 208,642,183
| 1
| 0
|
BSD-3-Clause
| 2019-09-15T18:57:53
| 2019-09-15T18:57:52
| null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "t 0.1, s, t 0.5, s, t 1.5, s, t 2.1, s, q"
tags = "Layer, RotateBy"
import cocos
from cocos.director import director
from cocos.actions import RotateBy
from cocos.layer import *
def main():
director.init()
main_scene = cocos.scene.Scene()
test_layer = ColorLayer(64,64,64,255)
test_layer.scale = 0.75
main_scene.add( test_layer )
test_layer.do( RotateBy( 360, 2 ) )
director.run (main_scene)
if __name__ == '__main__':
main()
|
[
"a.campello@wellcome.ac.uk"
] |
a.campello@wellcome.ac.uk
|
2dac7ea615bd751bd0c4315c3a059a77790feec3
|
4bc2af514877135a222826b2c5ac48632045f2fa
|
/jenkins/update_mysql.py
|
c05b16d304495222a0c459566afbcffa9cb12ba7
|
[] |
no_license
|
18734865664/python
|
1853481ac1dcd515f691cfc11557f76fbbb083de
|
25bc355ddb2abefc5a3736fb99e6345138ebbefc
|
refs/heads/master
| 2020-03-17T09:37:57.469741
| 2018-06-28T08:41:37
| 2018-06-28T08:41:37
| 133,482,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,007
|
py
|
#! /usr/local/python3/bin/python3
# encodig: utf-8
import pymysql
import sys
sys.path.insert(0, '/data/nfs/python/learn_python/jenkins/')
from get_file_list import getFileList
from get_args import getJenkinsArgs
import get_args
class update_mysql():
def __init__(self):
self.mysql_host = "10.100.137.179"
self.mysql_user = "root"
self.mysql_pass = "123123"
self.mysql_port = "3306"
self.mysql_dbname = "jenkins_info"
def create_job_args_table(self):
# 实例化mysql
db = pymysql.connect(host = self.mysql_host, user = self.mysql_user, passwd = self.mysql_pass, port = int(self.mysql_port))
# 创建游标对象
cursor = db.cursor()
# 如果不存在就建库jenkins_info
try:
sql = "create database if not exists {};".format((self.mysql_dbname))
cursor.execute(sql)
except:
print("库已存在")
# 如果表不存在,则创建表
sql1 = "create table if not exists jenkins_info.{}( \
`job_name` VARCHAR(100) NOT NULL, \
`job_name_row` VARCHAR(100) NOT NULL, \
`branch_parents` VARCHAR(1000) NOT NULL DEFAULT 'NULL', \
`ftp_path` VARCHAR(100) NOT NULL DEFAULT 'NULL', \
`mvn_args`VARCHAR(50) NOT NULL DEFAULT '\[\"prod\"\]', \
`subitems_name` VARCHAR(200) NOT NULL DEFAULT 'NULL' \
);".format(("job_args"))
cursor.execute(sql1)
# 获取job列表
job_name_file_obj = getFileList("/data/nfs/jenkins/jobs/").get_file_list()
# 获取参数列表
for job_name in job_name_file_obj:
job_workspace_file = '/data/nfs/jenkins/jobs/' + job_name
job_config_file = job_workspace_file + "/config.xml"
obj = getJenkinsArgs(job_config_file, job_name)
if __name__ == "__main__":
obj = update_mysql()
obj.create_job_args_table()
|
[
"you@example.com"
] |
you@example.com
|
98d038b91f190b29f651a48eb083b8182feee660
|
c1bd12405d244c5924a4b069286cd9baf2c63895
|
/azure-cognitiveservices-vision-customvision/azure/cognitiveservices/vision/customvision/training/models/tag.py
|
b1eef24ccd54891a5c4e373cc43537a5efa21229
|
[
"MIT"
] |
permissive
|
lmazuel/azure-sdk-for-python
|
972708ad5902778004680b142874582a284a8a7c
|
b40e0e36cc00a82b7f8ca2fa599b1928240c98b5
|
refs/heads/master
| 2022-08-16T02:32:14.070707
| 2018-03-29T17:16:15
| 2018-03-29T17:16:15
| 21,287,134
| 1
| 3
|
MIT
| 2019-10-25T15:56:00
| 2014-06-27T19:40:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,527
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Tag(Model):
"""Represents a Tag.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Gets the Tag ID
:vartype id: str
:param name: Gets or sets the name of the tag
:type name: str
:param description: Gets or sets the description of the tag
:type description: str
:ivar image_count: Gets the number of images with this tag
:vartype image_count: int
"""
_validation = {
'id': {'readonly': True},
'image_count': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'Id', 'type': 'str'},
'name': {'key': 'Name', 'type': 'str'},
'description': {'key': 'Description', 'type': 'str'},
'image_count': {'key': 'ImageCount', 'type': 'int'},
}
def __init__(self, name=None, description=None):
super(Tag, self).__init__()
self.id = None
self.name = name
self.description = description
self.image_count = None
|
[
"laurent.mazuel@gmail.com"
] |
laurent.mazuel@gmail.com
|
b5595d5d7348a72d9b7433a750d364865b2ef7b7
|
2ae420ff508e7e6799dbd0b9e0c71be96ef2ced9
|
/pyre_extensions/__init__.py
|
c84f0ed282dd1e45df78c74712bcd532e404169e
|
[
"MIT"
] |
permissive
|
njayinthehouse/pyre-check
|
5c8ab3ee2048ad395652d2079c5dcbcee288fbfc
|
14872ab61ffef3fe61490c4cf0e098954157a5ac
|
refs/heads/master
| 2020-05-31T17:23:56.392622
| 2019-06-05T05:06:58
| 2019-06-05T05:10:37
| 186,618,634
| 0
| 0
| null | 2019-05-14T12:35:46
| 2019-05-14T12:35:46
| null |
UTF-8
|
Python
| false
| false
| 1,059
|
py
|
def ParameterSpecification(name):
"""This kind of type variable captures callable parameter specifications
(known as argspecs in the runtime and inspect library) instead of types,
allowing the typing of decorators which transform the return type of the
given callable.
For example:
from typing import TypeVar, Callable, List
from pyre_extensions import ParameterSpecification
Tparams = ParameterSpecification("Tparams")
Treturn = TypeVar("Treturn")
def unwrap(
f: Callable[Tparams, List[Treturn],
) -> Callable[Tparams, Treturn]: ...
@unwrap
def foo(x: int, y: str, z: bool = False) -> List[int]:
return [1, 2, 3]
decorates foo into a callable that returns int, but still has the same
parameters, including their names and whether they are required.
The empty list is required for backwards compatibility with the runtime
implementation for callables, which requires the first argument to be
a list of types
"""
return []
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
d4c6454dd5d3cfb9f950a4eb640378ff7e298582
|
16197783050cb044729f8a4073f6a137658cf831
|
/lesson23___/lesson20/iter_class.py
|
68463ffc0b1a07c822ad9e774bf4970d94b2e3f0
|
[] |
no_license
|
AlexseyPivovarov/python_scripts
|
04c053d531e1d36266e82b8b9dc75161a0bcdcf9
|
99d849c834c647cf669e55f5b8f32d984a288091
|
refs/heads/master
| 2020-04-07T02:26:31.520861
| 2018-11-17T11:21:02
| 2018-11-17T11:21:02
| 157,976,592
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
class Foo:
a=1
b=2
def __init__(self,a,b):
self.a = a
self.b = b
def __iter__(self):
yield self.a
yield self.b
# return (item for item in (self.a,self.b,))
def generator(self):
return iter(self)
class FooFoo(Foo):
def __init__(self,*args):
super().__init__(args[0],args[1])
self.d = args[2]
def __iter__(self):
yield from super().__iter__()
yield self.d
childFoo = Foo(7,10)
foofoo = FooFoo(*childFoo,5)
for item in foofoo:
print(item)
|
[
"a.peevovarov@gmail.com"
] |
a.peevovarov@gmail.com
|
3972ca7ba5760c2af411c970167427094f32caf0
|
51e6720d5bc219df3ce9bc899fe04ca71d6e86af
|
/Python/POO I/Cinema.py
|
0140e3000714059ddb4dc507936c847a7d7b3a57
|
[] |
no_license
|
ThiagoCComelli/POO
|
7466d8272ccc742400d55603e70a9b3be16e80a1
|
2f878d2d2674e11ea584f6c23b94bd27dea26d65
|
refs/heads/master
| 2020-06-03T15:29:04.704709
| 2020-01-10T01:39:04
| 2020-01-10T01:39:04
| 191,628,147
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,866
|
py
|
# -*- coding: utf-8 -*-
class Cinema():
def __init__(self,nome,endereco):
self.__nome = nome
self.__endereco = endereco
self.__funcionarios = []
self.__salas = []
self.__filmes = []
self.__sessoes = []
def getNome(self):
return self.__nome
def getEndereco(self):
return self.__endereco
def getFuncionarios(self):
return self.__funcionarios
def getSalas(self):
return self.__salas
def getFilmes(self):
return self.__sessoes
def getSessaoMaisCara(self):
maior = 0
nome = 0
for i in self.__sessoes:
if i.getPreco() > maior:
maior = i.getPreco()
nome = i.getLocal()
return 'Sessao mais cara(preco): R$ {} Local(sala): {}'.format(str(maior),str(nome))
def getFilmeComedia(self):
filmes = 'Filmes de Comédia no Cinema: '
for i in self.__filmes:
if i.getGenero() == 'comedia':
filmes += i.getNome()
filmes += ' | '
return filmes
def getGeneroMaisFrequente(self):
lista = []
for i in self.__sessoes:
a = i.getFilmee()
lista.append(a.getGenero())
lista1 = set(lista)
genero = ''
count = 0
for i in lista1:
if lista.count(i) > count:
genero = i
count = lista.count(i)
return 'Genero Mais Frequente Nas Sessoes: {}'.format(genero)
def getMaiorLucro(self):
maior = 0
for i in self.__sessoes:
preco = i.getPreco()
sala = i.getLocall()
cap = sala.getCap()
tot = cap*preco
if tot>maior:
maior = tot
return 'Maior Lucro Possivel Sera de: R$ {}'.format(maior)
def setSessao(self,x):
self.__sessoes.append(x)
def setSala(self,x):
self.__salas.append(x)
def setFilme(self,x):
self.__filmes.append(x)
def setFuncionario(self,x):
self.__funcionarios.append(x)
class Pessoa():
def __init__(self,nome):
self.__nome = nome
def getNome(self):
return self.__nome
class Diretor(Pessoa):
def __init__(self,nome):
super().__init__(nome)
def getNome(self):
return self.__nome
class Funcionario(Pessoa):
def __init__(self,nome,idade,salario,sexo):
super().__init__(nome)
self.__idade = idade
self.__salario = salario
self.__sexo = sexo
def getNome(self):
return self.__nome
def getIdade(self):
return self.__idade
def getSalario(self):
return self.__salario
def getSexo(self):
return self.__sexo
class Sala():
def __init__(self,id,cap):
self.__id = id
self.__cap = cap
def getId(self):
return self.__id
def getCap(self):
return self.__cap
class Filme():
def __init__(self,nome,lancamento,diretor,genero,duracao):
self.__nome = nome
self.__lancamento = lancamento
self.__diretor = diretor
self.__genero = genero
self.__duracao = duracao
def getNome(self):
return self.__nome
def getLancamento(self):
return self.__lancamento
def getDiretor(self):
return self.__diretor
def getGenero(self):
return self.__genero
def getDuracao(self):
return self.__duracao
class Sessao():
def __init__(self,inicio,preco):
self.__inicio = inicio
self.__preco = preco
self.__local = ''
self.__filme = ''
def getIncio(self):
return self.__inicio
def getLocal(self):
return self.__local.getId()
def getLocall(self):
return self.__local
def getPreco(self):
return self.__preco
def getFilme(self):
return self.__filme.getNome()
def getFilmee(self):
return self.__filme
def setFilme(self,x):
self.__filme = x
def setLocal(self,x):
self.__local = x
def __repr__(self):
return 'Incio: {}\nPreco: {}\nLocal(sala): {}\nFilme: {}'.format(self.getIncio(),self.getPreco(),self.getLocal(),self.getFilme())
cine0 = Cinema('cine0','88130-000')
func0 = Funcionario('thiago',19,1900,'masculino')
dire0 = Diretor('lorenzo')
film0 = Filme('film0','08/2019',dire0,'comedia',130)
film1 = Filme('film1','08/2012',dire0,'acao',120)
film2 = Filme('film2','08/2011',dire0,'comedia',90)
film3 = Filme('film3','08/2014',dire0,'terror',200)
film4 = Filme('film4','08/2015',dire0,'suspense',100)
film5 = Filme('film5','08/2016',dire0,'comedia',45)
sala0 = Sala(0,100)
sala1 = Sala(1,101)
sala2 = Sala(2,102)
sala3 = Sala(3,103)
sala4 = Sala(4,104)
sala5 = Sala(5,105)
sess0 = Sessao('28/04/3010 - 13:30',12)
sess1 = Sessao('28/04/3011 - 13:30',20)
sess2 = Sessao('28/04/3012 - 13:30',18)
sess3 = Sessao('28/04/3013 - 13:30',121)
sess4 = Sessao('28/04/3014 - 13:30',32)
sess5 = Sessao('28/04/3015 - 13:30',122)
cine0.setFuncionario(func0)
sess0.setFilme(film0)
sess1.setFilme(film1)
sess2.setFilme(film2)
sess3.setFilme(film3)
sess4.setFilme(film4)
sess5.setFilme(film5)
sess0.setLocal(sala0)
sess1.setLocal(sala1)
sess2.setLocal(sala2)
sess3.setLocal(sala3)
sess4.setLocal(sala4)
sess5.setLocal(sala5)
cine0.setFilme(film0)
cine0.setFilme(film1)
cine0.setFilme(film2)
cine0.setFilme(film3)
cine0.setFilme(film4)
cine0.setFilme(film5)
cine0.setSessao(sess0)
cine0.setSessao(sess1)
cine0.setSessao(sess2)
cine0.setSessao(sess3)
cine0.setSessao(sess4)
cine0.setSessao(sess5)
cine0.setSala(sala0)
cine0.setSala(sala1)
cine0.setSala(sala2)
cine0.setSala(sala3)
cine0.setSala(sala4)
cine0.setSala(sala5)
print(cine0.getSessaoMaisCara())
print()
print(cine0.getFilmeComedia())
print()
print(cine0.getGeneroMaisFrequente())
print()
print(cine0.getMaiorLucro())
|
[
"thiago.comelli@outlook.com"
] |
thiago.comelli@outlook.com
|
429ac7ba8eb6de79c8e5918dfb728409774bf846
|
25f16d9e3416e186f677e425d7c3c19fb1b6b76a
|
/qt5_exercises/concurrent/bad_example_1.py
|
fccb43f8a4882055a4ed050ebd0b638cfb424564
|
[] |
no_license
|
amisaka/pyqt5Samples
|
662b33255f9fd3522a8600e23b1f5e742eef769e
|
40f295232a726b4d67cc3124dcf6ac46a2efe9c8
|
refs/heads/master
| 2023-08-27T18:11:58.162821
| 2021-11-03T14:06:10
| 2021-11-03T14:06:10
| 419,336,692
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 971
|
py
|
import sys
import time
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import (
QApplication,
QLabel,
QMainWindow,
QPushButton,
QVBoxLayout,
QWidget,
)
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.counter = 0
layout = QVBoxLayout()
self.l = QLabel("Start")
b = QPushButton("DANGER!")
b.pressed.connect(self.oh_no)
layout.addWidget(self.l)
layout.addWidget(b)
w = QWidget()
w.setLayout(layout)
self.setCentralWidget(w)
self.show()
self.timer = QTimer()
self.timer.setInterval(1000)
self.timer.timeout.connect(self.recurring_timer)
self.timer.start()
def oh_no(self):
time.sleep(5)
def recurring_timer(self):
self.counter += 1
self.l.setText("Counter: %d" % self.counter)
app = QApplication(sys.argv)
window = MainWindow()
app.exec_()
|
[
"amisaka@gmail.com"
] |
amisaka@gmail.com
|
4d1b9324b39491d5d9bc18a2121746ef329ae04d
|
b34641367c20afd688050976339ebae3ca7220a0
|
/somemart_auth/somemart_auth/bin/django-admin.py
|
53e30b0ed682707f5bb3c5c3d3a6699aba4d164c
|
[] |
no_license
|
DrShiz/learning4
|
cee6e46f733badbe17ab410e5a89b2106e493d2e
|
85fa372d3bdccf7da487d13d147c1b639ddceefd
|
refs/heads/master
| 2023-03-06T00:20:00.048331
| 2021-02-16T16:39:35
| 2021-02-16T16:39:35
| 339,447,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
#!/Users/terekhovas/PycharmProjects/learning4/somemart_auth/somemart_auth/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"shizdryeah@gmail.com"
] |
shizdryeah@gmail.com
|
ffe7dd88fd8bc92e657aaa2d716937bfe5577bb7
|
016200d5593feb15bf7737389586bd161398a09c
|
/Database/venv/bin/rstpep2html.py
|
d3334da4c02e7abe3f073230dad54c21f6a5896b
|
[] |
no_license
|
MarcPartensky/Python-2019
|
d74e41710c9b48887e141ef5a8251f5e5d06026d
|
1b29680292fdc48af25ae45ce0e9572b8c31427d
|
refs/heads/master
| 2021-07-07T18:46:49.708387
| 2020-08-11T19:49:01
| 2020-08-11T19:49:01
| 166,604,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 731
|
py
|
#!/Users/marcpartensky/Programs/Python/Repository-2019/Database/venv/bin/python
# $Id: rstpep2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML from PEP
(Python Enhancement Proposal) documents.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML from reStructuredText-format PEP files. '
+ default_description)
publish_cmdline(reader_name='pep', writer_name='pep_html',
description=description)
|
[
"marc.partensky@gmail.com"
] |
marc.partensky@gmail.com
|
c4f18d847f19ca495879aa38a6784637e2cdc09e
|
f34298735dbaee1a56da7dcc477f2734c6a1a305
|
/src/tandlr/emails/views.py
|
7e5be2847adb90189418ba321dbc33b1554a340c
|
[
"Apache-2.0"
] |
permissive
|
shrmoud/schoolapp
|
6f8e71b68cf42b6d2ac54acb42ed0a4664d5aaa9
|
7349ce18f56658d67daedf5e1abb352b5c15a029
|
refs/heads/master
| 2021-03-24T13:34:44.385285
| 2017-06-11T21:37:53
| 2017-06-11T21:37:53
| 94,031,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,020
|
py
|
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.template import TemplateDoesNotExist
from django.views.generic.base import TemplateView
from tandlr.scheduled_classes.models import Class
class StaticEmailView(TemplateView):
"""
View to render static email templates for development.
example:
tandlr.local/email-preview/registration/confirmation_email.html
"""
def get_template_names(self):
return 'email/%s' % self.kwargs['page']
def get(self, request, page):
try:
return self.render_to_response(self.get_context_data())
except TemplateDoesNotExist:
raise Http404
def get_context_data(self, **kwargs):
context = super(StaticEmailView, self).get_context_data(**kwargs)
if self.request.GET.get('session_id'):
context['booking'] = get_object_or_404(
Class,
id=self.request.GET.get('session_id')
)
return context
|
[
"smoudgal@hawk.iit.edu"
] |
smoudgal@hawk.iit.edu
|
80d8b80562719182cb64b44da2035610d8f622f3
|
86206b05a6e0a425ba5401de50b8645bddf77780
|
/Oper Python/SFDC Oper/Training Scripts/Selenium/Oper1/Sandbox/XLLib.py
|
c3f4c611f5f1340cccab5f36f85d28d7b2f13bba
|
[] |
no_license
|
QuestTestAutomation/PersistentDesktop1python
|
2e626ea16ce0fd4c697b156fdc2f9b3ca85bbd7b
|
ece25957edb6f87b2777b261b31914d22ebd99ad
|
refs/heads/master
| 2021-03-10T21:55:25.450872
| 2020-03-27T09:45:14
| 2020-03-27T09:45:14
| 246,488,801
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,491
|
py
|
import openpyxl
import os
import time
from openpyxl.utils import get_column_letter
from openpyxl import Workbook
from openpyxl.compat import range
def Create_Workbook(file,sheetname):
sheetexists = 0
if os.path.isfile(file):
mywb = openpyxl.load_workbook(file)
print("XL file exists")
else:
print("XL file does not exists")
mywb = openpyxl.Workbook()
time.sleep(5)
sheet = mywb.active
sheet.title = sheetname
mywb.save(file)
time.sleep(5)
def Create_Worksheets(file,sheetnames):
sheetexists = 0
i = 0
if os.path.isfile(file):
mywb = openpyxl.load_workbook(file)
print("XL file exists")
for sheetname in sheetnames:
mywb.create_sheet(index= i, title= sheetname)
mywb.save(file)
time.sleep(3)
else:
print("XL file does not exists")
mywb = openpyxl.Workbook()
time.sleep(10)
def print_XL_cell_values(file,sheetname,irow,icolumn):
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
for row in range(1, mysheet.max_row):
for col in range(1, mysheet.max_column):
if mysheet.cell(column=col, row=row).value <> 'None':
print mysheet.cell(column=col, row=row).value
def get_XL_column_letter(file,sheetname,columnvalue):
id = '0'
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
for col in range(1, mysheet.max_column):
if (str(mysheet.cell(column=col, row=1).value)).upper() == (str(columnvalue)).upper():
id = format(get_column_letter(col))
break
return id
def get_XL_column_index(file,sheetname,columnvalue):
id = '0'
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
print "max column is " + str(mysheet.max_column)
for col in range(1, mysheet.max_column):
# print "***************"
# print str((mysheet.cell(column=col, row=1).value)).upper()
# print (str(columnvalue)).upper()
# print (str(mysheet.cell(column=col, row=1).value)).upper() == (str(columnvalue)).upper()
if (str(mysheet.cell(column=col, row=1).value)).upper() == (str(columnvalue)).upper():
id = col
break
return id
def get_XL_cell_value(file,sheetname,irow,icolumn):
id = '0'
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
id = mysheet.cell(column=icolumn, row=irow).value
return id
def get_XL_cell_value_using_column_header(file,sheetname,irow,columnheader):
id = '0'
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
icolumn = get_XL_column_index(file,sheetname,columnheader)
print "The Column is : " + str(icolumn)
id = mysheet.cell(column=int(icolumn), row=irow).value
id = str(id).strip()
return id
def set_XL_cell_value(file,sheetname,irow,icolumn,cellvalue):
id = '0'
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
mysheet.cell(column=icolumn, row=irow).value = cellvalue
mywb.save(file)
time.sleep(5)
def set_XL_cell_value_using_column_header(file,sheetname,irow,columnheader,cellvalue):
id = '0'
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
icolumn = get_XL_column_index(file,sheetname,columnheader)
mysheet.cell(column=int(icolumn), row=irow).value = cellvalue
mywb.save(file)
time.sleep(5)
def create_XL_header_lists(file,sheetname,lists):
col = 1
Create_Workbook(file, sheetname)
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
for list in lists :
mysheet.cell(column=int(col), row=1).value = list
col = int(col) + int(1)
mywb.save(file)
time.sleep(5)
def add_XL_header_column(file,sheetname,columnheader):
Create_Workbook(file, sheetname)
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
col = int(mysheet.max_column) + int(1)
mysheet.cell(column=int(col), row=1).value = columnheader
mywb.save(file)
time.sleep(5)
def copy_XL_workbook(sourcefile,targetfile):
if os.path.isfile(sourcefile):
sourcewb = openpyxl.load_workbook(sourcefile)
wslists = sourcewb.sheetnames
for wslist in wslists:
print "sheet name is : " + str(wslist)
if os.path.isfile(targetfile):
mywb = openpyxl.load_workbook(targetfile)
print "XL file exists"
else:
print "XL file does not exists"
mywb = openpyxl.Workbook(targetfile)
# mywb.save(targetfile)
# time.sleep(5)
sourcews = sourcewb.get_sheet_by_name(str(wslist))
myws = sourcewb.create_sheet(index=int(sourcewb.get_index(sourcews)), title= str(wslist))
# Myws = mywb.active
myws = sourcewb.copy_worksheet(sourcews)
mywb.save(targetfile)
time.sleep(5)
def get_XL_row_count(file,sheetname):
id = '0'
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
id = mysheet.max_row
return id
def get_XL_column_count(file,sheetname):
id = '0'
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
id = mysheet.max_column
return id
|
[
"45171766+QuestTestAutomation@users.noreply.github.com"
] |
45171766+QuestTestAutomation@users.noreply.github.com
|
a34584dbef4b37a0544599bc74d308186d99c177
|
b7138d31e13920ad1bf6a82ff062a6f512c48983
|
/cal_test.py
|
a9aa6c440b822b666232f96a284da723bc4b737a
|
[] |
no_license
|
aaqqxx/ZA_cal
|
bcb6863a1aa882ed34fb18a4070ecb316568a3f1
|
2b604d2f1d472666d03c0a63a3c18f5710650a2e
|
refs/heads/master
| 2020-05-04T14:03:49.846304
| 2019-04-03T01:05:04
| 2019-04-03T01:05:04
| 179,183,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,200
|
py
|
# coding:utf-8
__author__ = 'XingHua'
"""
光栅尺位置VS出光学位置
参考点为d1前面的那个镜子,见90nm的ZA图纸。
从Excel中的光栅尺位置计算出光学位置d1,d2,d3,d5.
从光学位置计算出光栅尺位置。
"""
import matplotlib.pyplot as plt
import numpy as np
from glob import glob
import pandas as pd
import argparse
import sys
import math
def quadratic(a, b, c):
if not isinstance(a, (int, float)):
raise TypeError('a is not a number')
if not isinstance(b, (int, float)):
raise TypeError('b is not a number')
if not isinstance(c, (int, float)):
raise TypeError('c is not a number')
derta = b * b - 4 * a * c
if a == 0:
if b == 0:
if c == 0:
return '方程根是全体实数'
else:
return '方程无根'
else:
x1 = -c / b
x2 = x1
return x1, x2
else:
if derta < 0:
return '方程无根'
else:
x1 = (-b + math.sqrt(derta)) / (2 * a)
x2 = (-b - math.sqrt(derta)) / (2 * a)
return x1, x2
print(quadratic(2, 3, 1))
print(quadratic(1, 3, -4))
|
[
"aaqqxx1910@gmail.com"
] |
aaqqxx1910@gmail.com
|
0f47c9b1f5dd09360682347e38d451c90365764d
|
3bb57eb1f7c1c0aced487e7ce88f3cb84d979054
|
/semeval/corpora/semeval/test_tags.py
|
9ba72eace0ee9a1f33302f8a869d091beb0940c0
|
[] |
no_license
|
ghpaetzold/phd-backup
|
e100cd0bbef82644dacc73a8d1c6b757b2203f71
|
6f5eee43e34baa796efb16db0bc8562243a049b6
|
refs/heads/master
| 2020-12-24T16:41:21.490426
| 2016-04-23T14:50:07
| 2016-04-23T14:50:07
| 37,981,094
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
f1 = open('semeval_test_clean.txt')
f2 = open('tagged_sents_semeval_test.txt')
c = 0
for line1 in f1:
line2 = f2.readline()
tokens = line1.strip().split('\t')[0].strip().split(' ')
tags = line2.strip().split(' ')
if len(tokens)!=len(tags):
print('Tokens: ' + str(tokens))
print('Tags: ' + str(tags))
print('')
c += 1
f1.close()
f2.close()
print(str(c))
|
[
"ghpaetzold@outlook.com"
] |
ghpaetzold@outlook.com
|
7130805026446d24d544f10d895736cf095e094e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02937/s210990064.py
|
f15af763a1739d3abe1ae11028c005fee5325924
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
from bisect import bisect_left
import sys
s = input()
t = input()
d = {char:[] for char in 'abcdefghijklmnopqrstuvwxyz'}
for i in range(len(s)):
d[s[i]].append(i)
cnt = 0
index = 0
for char in t:
if not d[char]:
print(-1)
sys.exit()
i = bisect_left(d[char], index)
if i == len(d[char]):
cnt += 1
index = d[char][0]
else:
index = d[char][i]
index += 1
ans = cnt * len(s) + index
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
d8947f54da103676026570125e67410bffb7919f
|
99d7765da35926279c4a4fd7313d55908786f4b8
|
/1/7/17256/17256.py
|
d960171dbed858b264d23746c09f153f0475dfbb
|
[
"MIT"
] |
permissive
|
chr0m3/boj-codes
|
b8294c5d4d10a5af25b5276427bccd74d0866ef5
|
d71d0a22d0a3ae62c225f382442461275f56fe8f
|
refs/heads/master
| 2021-08-16T15:24:57.733088
| 2021-03-22T13:13:10
| 2021-03-22T13:13:10
| 91,523,558
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
ax, ay, az = map(int, input().split())
cx, cy, cz = map(int, input().split())
bx = cx - az
by = int(cy / ay)
bz = cz - ax
print(f'{bx} {by} {bz}')
|
[
"chr0m3@users.noreply.github.com"
] |
chr0m3@users.noreply.github.com
|
bcfda7e647bf3de983700535dd5d34d12deef7d1
|
a01fb7bb8e8738a3170083d84bc3fcfd40e7e44f
|
/python3/core/meta/define_class.py
|
f2363c09f30ec08eda430d475519793a0f9d812f
|
[] |
no_license
|
jk983294/CommonScript
|
f07acf603611b4691b176aa4a02791ef7d4d9370
|
774bcbbae9c146f37312c771c9e867fb93a0c452
|
refs/heads/master
| 2023-08-21T17:50:19.036159
| 2023-08-16T00:22:03
| 2023-08-16T00:22:03
| 42,732,160
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
import types
# Example of making a class manually from parts
# Methods
def __init__(self, name, shares, price):
self.name = name
self.shares = shares
self.price = price
def cost(self):
return self.shares * self.price
cls_dict = {
'__init__': __init__,
'cost': cost,
}
# Make a class
Stock = types.new_class('Stock', (), {}, lambda ns: ns.update(cls_dict))
Stock.__module__ = __name__
s = Stock('ACME', 50, 91.1)
print(s.cost()) # 4555.0
|
[
"jk983294@gmail.com"
] |
jk983294@gmail.com
|
38fea3cb4a84ad737b0db2e9f0e3d3bd11cf0e52
|
0343de40021f8dd72fb9a6cb31b5d2f24ccd7971
|
/utilities/wake_models_mean/array_efficiency_openMDAO.py
|
fc3e2db61bcf729ad72f210df061bd8098d2014a
|
[] |
no_license
|
sebasanper/WINDOW_dev
|
47ae9252e6fadb2a3b1a0aae3383681a7955f4ea
|
3c6437a777f2fc3be1dfd3d53b5d2ed25281c55c
|
refs/heads/master
| 2021-01-01T19:45:02.555727
| 2018-05-21T20:27:56
| 2018-05-21T20:27:56
| 98,670,270
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,855
|
py
|
from numpy import array
from openmdao.api import Component, Group, Problem
from farm_energy.layout import read_layout
from power_models import power_v90 as power
from site_conditions.wind_conditions.windrose import read_windrose
from wake_models import jensen_1angle, ainslie_1angle, larsen_1angle
class JensenWindRose(Component):
def __init__(self):
super(JensenWindRose, self).__init__()
self.add_param('layout_x', shape=(9,))
self.add_param('layout_y', shape=(9,))
self.add_param('windrose_direction', shape=(4,))
self.add_param('windrose_speed', shape=(4,))
self.add_param('windrose_probability', shape=(4,))
self.add_output('array_efficiency', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
layout_x = params['layout_x']
layout_y = params['layout_y']
wind_direction = params['windrose_direction']
wind_speed = params['windrose_speed']
wind_frequency = params['windrose_probability']
efficiency = []
profit = []
summation = 0.0
nt = len(layout_y)
P = []
U = []
efficiency_proportion = []
for wind in range(len(wind_direction)):
U0 = wind_speed[wind] # Free stream wind speed
angle = wind_direction[wind]
# angle2 = - 270.0 - angle # To read windroses where N is 0 and E is 90
U.append(jensen_1angle(layout_x, layout_y, U0, angle, rotor_radius=40.0, k=0.04))
P.append([power(u) for u in U[-1]])
# Farm efficiency
profit.append(sum(P[-1]))
efficiency.append(profit[-1] * 100.0 / (float(nt) * max(P[-1]))) # same as using U0
efficiency_proportion.append(efficiency[-1] * wind_frequency[wind] / 100.0)
summation += efficiency_proportion[wind]
# print profit
# print efficiency
# print efficiency_proportion
# print U
# print P
unknowns['array_efficiency'] = summation
class AinslieWindRose(Component):
def __init__(self):
super(AinslieWindRose, self).__init__()
self.add_param('layout_x', shape=(9,))
self.add_param('layout_y', shape=(9,))
self.add_param('windrose_direction', shape=(4,))
self.add_param('windrose_speed', shape=(4,))
self.add_param('windrose_probability', shape=(4,))
self.add_output('array_efficiency', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
layout_x = params['layout_x']
layout_y = params['layout_y']
wind_direction = params['windrose_direction']
wind_speed = params['windrose_speed']
wind_frequency = params['windrose_probability']
efficiency = []
profit = []
summation = 0.0
nt = len(layout_y)
P = []
U = []
efficiency_proportion = []
for wind in range(len(wind_direction)):
U0 = wind_speed[wind] # Free stream wind speed
angle = wind_direction[wind]
# angle2 = - 270.0 - angle # To read windroses where N is 0 and E is 90
U.append(ainslie_1angle(layout_x, layout_y, U0, angle, rotor_radius=40.0, TI=0.08))
P.append([power(u) for u in U[-1]])
# Farm efficiency
profit.append(sum(P[-1]))
efficiency.append(profit[-1] * 100.0 / (float(nt) * max(P[-1]))) # same as using U0
efficiency_proportion.append(efficiency[-1] * wind_frequency[wind] / 100.0)
summation += efficiency_proportion[wind]
# print profit
# print efficiency
# print efficiency_proportion
# print U
# print P
unknowns['array_efficiency'] = summation
class LarsenWindRose(Component):
def __init__(self):
super(LarsenWindRose, self).__init__()
self.add_param('layout_x', shape=(9,))
self.add_param('layout_y', shape=(9,))
self.add_param('windrose_direction', shape=(4,))
self.add_param('windrose_speed', shape=(4,))
self.add_param('windrose_probability', shape=(4,))
self.add_output('array_efficiency', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
layout_x = params['layout_x']
layout_y = params['layout_y']
wind_direction = params['windrose_direction']
wind_speed = params['windrose_speed']
wind_frequency = params['windrose_probability']
efficiency = []
profit = []
summation = 0.0
nt = len(layout_y)
P = []
U = []
efficiency_proportion = []
for wind in range(len(wind_direction)):
U0 = wind_speed[wind] # Free stream wind speed
angle = wind_direction[wind]
# angle2 = - 270.0 - angle # To read windroses where N is 0 and E is 90
U.append(larsen_1angle(layout_x, layout_y, U0, angle, rotor_radius=40.0, hub_height=100.0, TI=0.08))
P.append([power(u) for u in U[-1]])
# Farm efficiency
profit.append(sum(P[-1]))
efficiency.append(profit[-1] * 100.0 / (float(nt) * max(P[-1]))) # same as using U0
efficiency_proportion.append(efficiency[-1] * wind_frequency[wind] / 100.0)
summation += efficiency_proportion[wind]
# print profit
# print efficiency
# print efficiency_proportion
# print U
# print P
unknowns['array_efficiency'] = summation
if __name__ == '__main__':
layout_x, layout_y = read_layout('coordinates.dat')
windrose_direction, windrose_speed, windrose_probability = read_windrose('windrose.dat')
root = Group()
root.add('jensen', JensenWindRose())
root.add('ainslie', AinslieWindRose())
root.add('larsen', LarsenWindRose())
prob = Problem(root)
prob.setup()
prob['jensen.layout_x'] = prob['ainslie.layout_x'] = prob['larsen.layout_x'] = array(layout_x)
prob['jensen.layout_y'] = prob['ainslie.layout_y'] = prob['larsen.layout_y'] = array(layout_y)
prob['jensen.windrose_direction'] = prob['ainslie.windrose_direction'] = prob['larsen.windrose_direction'] = array(windrose_direction)
prob['jensen.windrose_speed'] = prob['ainslie.windrose_speed'] = prob['larsen.windrose_speed'] = array(windrose_speed)
prob['jensen.windrose_probability'] = prob['ainslie.windrose_probability'] = prob['larsen.windrose_probability'] = array(windrose_probability)
prob.run()
efficiency_jensen = prob['jensen.array_efficiency']
efficiency_ainslie = prob['ainslie.array_efficiency']
efficiency_larsen = prob['larsen.array_efficiency']
print 'Jensen'
print efficiency_jensen
print
print 'Ainslie'
print efficiency_ainslie
print
print 'Larsen'
print efficiency_larsen
|
[
"s.sanchezperezmoreno@tudelft.nl"
] |
s.sanchezperezmoreno@tudelft.nl
|
356538f816b7e65cf077f0dc5839f9ca201bca9f
|
edbb63696580638af0084ee318d2c9bc9e8c7e79
|
/linkf.py
|
f95261c46757de6bb39993f4c951b15bf01b7640
|
[] |
no_license
|
awaddell77/Scrapers
|
fef34e34b8e039f4992497cae75135cdb57b2581
|
0a36fb2c99f2d7b90533834b29c0ba8f27c13a85
|
refs/heads/master
| 2020-05-21T13:44:06.524855
| 2020-03-16T23:00:45
| 2020-03-16T23:00:45
| 62,753,048
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 838
|
py
|
def linkF(s,n, base = 0, attrs= 0, default = '"'):#x is the item, n = tag takes link tag (MUST BE STRING) and extracts the link
l =[]
ln = ''
x = s
if attrs != 0:
x = re.sub('<a','', x)#strips the tag from the string, helps in certain situations where the location of the link changes in between elements
elif type(attrs) == str:
x = re.sub(attrs, '', x)
ln_s = x.split(default)
for i in range(0, len(ln_s)):
if ln_s[i] == n or ln_s[i] == ' ' + n:
if ln_s[i+1] != 'javascript:void(0);':
ln = ln_s[i+1] #ln is the link (still needs to be joined witht the base URL
if base == 0 and not ln or ln is None:
return ""
else:
ln = base + ln #MAJOR WORKAROUND!!!! IN THE FUTURE THS SHOULD CALL A FUNCTION THAT FINDS THE BASE
return ln
|
[
"waddell.andrew@gmail.com"
] |
waddell.andrew@gmail.com
|
8f9fac60b55576c80814a327c149e2c7951c05eb
|
1b3addbc9473b6ffb999665601470ccc4d1153b0
|
/libs/magic.py
|
53a91fd47331bd4941f6ce36bfa0ea6caa24c08f
|
[] |
no_license
|
weijia/approot
|
e1f712fa92c4c3200210eb95d251d890295769ba
|
15fac5b31a4d619d1bdede3d1131f5e6e57cd43b
|
refs/heads/master
| 2020-04-15T13:15:01.956721
| 2014-08-26T14:02:17
| 2014-08-26T14:02:17
| 11,049,975
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,745
|
py
|
"""
magic is a wrapper around the libmagic file identification library.
See README for more information.
Usage:
>>> import magic
>>> magic.from_file("testdata/test.pdf")
'PDF document, version 1.2'
>>> magic.from_file("testdata/test.pdf", mime=True)
'application/pdf'
>>> magic.from_buffer(open("testdata/test.pdf").read(1024))
'PDF document, version 1.2'
>>>
"""
import os.path
import ctypes
import ctypes.util
from ctypes import c_char_p, c_int, c_size_t, c_void_p
class MagicException(Exception): pass
class Magic:
"""
Magic is a wrapper around the libmagic C library.
"""
def __init__(self, mime=False, magic_file=None, mime_encoding=False):
"""
Create a new libmagic wrapper.
mime - if True, mimetypes are returned instead of textual descriptions
mime_encoding - if True, codec is returned
magic_file - use a mime database other than the system default
"""
flags = MAGIC_NONE
if mime:
flags |= MAGIC_MIME
elif mime_encoding:
flags |= MAGIC_MIME_ENCODING
self.cookie = magic_open(flags)
magic_load(self.cookie, magic_file)
def from_buffer(self, buf):
"""
Identify the contents of `buf`
"""
return magic_buffer(self.cookie, buf)
def from_file(self, filename):
"""
Identify the contents of file `filename`
raises IOError if the file does not exist
"""
if not os.path.exists(filename):
raise IOError("File does not exist: " + filename)
return magic_file(self.cookie, filename)
def __del__(self):
if self.cookie:
magic_close(self.cookie)
self.cookie = None
_magic_mime = None
_magic = None
def _get_magic_mime():
global _magic_mime
if not _magic_mime:
_magic_mime = Magic(mime=True)
return _magic_mime
def _get_magic():
global _magic
if not _magic:
_magic = Magic()
return _magic
def _get_magic_type(mime):
if mime:
return _get_magic_mime()
else:
return _get_magic()
def from_file(filename, mime=False):
m = _get_magic_type(mime)
return m.from_file(filename)
def from_buffer(buffer, mime=False):
m = _get_magic_type(mime)
return m.from_buffer(buffer)
libmagic = None
# Let's try to find magic or magic1
dll = ctypes.util.find_library('magic') or ctypes.util.find_library('magic1')
# This is necessary because find_library returns None if it doesn't find the library
if dll:
libmagic = ctypes.CDLL(dll)
if not libmagic or not libmagic._name:
import sys
platform_to_lib = {'darwin': '/opt/local/lib/libmagic.dylib',
'win32': 'magic1.dll'}
if sys.platform in platform_to_lib:
try:
libmagic = ctypes.CDLL(platform_to_lib[sys.platform])
except OSError:
pass
if not libmagic or not libmagic._name:
# It is better to raise an ImportError since we are importing magic module
raise ImportError('failed to find libmagic. Check your installation')
magic_t = ctypes.c_void_p
def errorcheck(result, func, args):
err = magic_error(args[0])
if err is not None:
raise MagicException(err)
else:
return result
magic_open = libmagic.magic_open
magic_open.restype = magic_t
magic_open.argtypes = [c_int]
magic_close = libmagic.magic_close
magic_close.restype = None
magic_close.argtypes = [magic_t]
magic_error = libmagic.magic_error
magic_error.restype = c_char_p
magic_error.argtypes = [magic_t]
magic_errno = libmagic.magic_errno
magic_errno.restype = c_int
magic_errno.argtypes = [magic_t]
magic_file = libmagic.magic_file
magic_file.restype = c_char_p
magic_file.argtypes = [magic_t, c_char_p]
magic_file.errcheck = errorcheck
_magic_buffer = libmagic.magic_buffer
_magic_buffer.restype = c_char_p
_magic_buffer.argtypes = [magic_t, c_void_p, c_size_t]
_magic_buffer.errcheck = errorcheck
def magic_buffer(cookie, buf):
return _magic_buffer(cookie, buf, len(buf))
magic_load = libmagic.magic_load
magic_load.restype = c_int
magic_load.argtypes = [magic_t, c_char_p]
magic_load.errcheck = errorcheck
magic_setflags = libmagic.magic_setflags
magic_setflags.restype = c_int
magic_setflags.argtypes = [magic_t, c_int]
magic_check = libmagic.magic_check
magic_check.restype = c_int
magic_check.argtypes = [magic_t, c_char_p]
magic_compile = libmagic.magic_compile
magic_compile.restype = c_int
magic_compile.argtypes = [magic_t, c_char_p]
MAGIC_NONE = 0x000000 # No flags
MAGIC_DEBUG = 0x000001 # Turn on debugging
MAGIC_SYMLINK = 0x000002 # Follow symlinks
MAGIC_COMPRESS = 0x000004 # Check inside compressed files
MAGIC_DEVICES = 0x000008 # Look at the contents of devices
MAGIC_MIME = 0x000010 # Return a mime string
MAGIC_MIME_ENCODING = 0x000400 # Return the MIME encoding
MAGIC_CONTINUE = 0x000020 # Return all matches
MAGIC_CHECK = 0x000040 # Print warnings to stderr
MAGIC_PRESERVE_ATIME = 0x000080 # Restore access time on exit
MAGIC_RAW = 0x000100 # Don't translate unprintable chars
MAGIC_ERROR = 0x000200 # Handle ENOENT etc as real errors
MAGIC_NO_CHECK_COMPRESS = 0x001000 # Don't check for compressed files
MAGIC_NO_CHECK_TAR = 0x002000 # Don't check for tar files
MAGIC_NO_CHECK_SOFT = 0x004000 # Don't check magic entries
MAGIC_NO_CHECK_APPTYPE = 0x008000 # Don't check application type
MAGIC_NO_CHECK_ELF = 0x010000 # Don't check for elf details
MAGIC_NO_CHECK_ASCII = 0x020000 # Don't check for ascii files
MAGIC_NO_CHECK_TROFF = 0x040000 # Don't check ascii/troff
MAGIC_NO_CHECK_FORTRAN = 0x080000 # Don't check ascii/fortran
MAGIC_NO_CHECK_TOKENS = 0x100000 # Don't check ascii/tokens
|
[
"richardwangwang@gmail.com"
] |
richardwangwang@gmail.com
|
1d4c22ebff1083d8a40dc2c2f66e9ac41fe68e8e
|
40ca9c74e41047b4f057923894f664653c2317c8
|
/002_celery/django/sth/views.py
|
e25e6e38ffa47e47f99b2dd7b42f50441edad371
|
[
"MIT"
] |
permissive
|
MeNsaaH/Task-Queues-Django
|
1207856fd382baa63f018b60e5c6b79fc0c52b31
|
73bebc9b755e2943455939817325aa63a1aa7f62
|
refs/heads/master
| 2022-11-21T09:44:57.856364
| 2018-10-11T11:16:50
| 2018-10-11T11:16:50
| 152,408,206
| 0
| 0
|
MIT
| 2022-11-10T12:56:38
| 2018-10-10T10:49:40
|
Python
|
UTF-8
|
Python
| false
| false
| 323
|
py
|
from django.http import HttpResponse
from sth.tasks import some_task
# Create your views here.
def some_view(request):
result = some_task.delay()
# Other Methods on Celery AsyncResult
# result.ready()
# result.get(timeout=1)
# result.traceback
return HttpResponse('some task has being started')
|
[
"="
] |
=
|
906d663716faf84d5efa71adc0baced6ba2e9174
|
5ecdbc6e79c408ed45327ffeae11eae91c4b8ce6
|
/ilusiones_sales/models/contract_number.py
|
f898d4746618b0bb0ccf7fbf890269a839a2dc93
|
[] |
no_license
|
rosalesdc/ejemplos_addons
|
c6ee5cf9a10935b38165eca84c07a84d4d9c2538
|
8c3e74a3d0145b74cb8288772763e88f39979e6f
|
refs/heads/master
| 2022-12-22T07:21:16.238052
| 2020-09-25T06:18:34
| 2020-09-25T06:18:34
| 298,184,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
class ContractNumber(models.Model):
_name = 'contract.number'
_description = "Numero de conrato"
name = fields.Char(string='Numero de serie', required=True)
date = fields.Date(string="Fecha del contrato")
|
[
"rosales9146@gmail.com"
] |
rosales9146@gmail.com
|
b101bda0005c03561d755e2270489d08ff015499
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/layout/scene/zaxis/_categoryarraysrc.py
|
b38055e7ec20ace3ec74c3b976ac52a75b5fd63d
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994
| 2021-12-21T03:49:19
| 2021-12-21T03:49:19
| 234,146,634
| 0
| 0
|
MIT
| 2020-01-15T18:33:43
| 2020-01-15T18:33:41
| null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
import _plotly_utils.basevalidators
class CategoryarraysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="categoryarraysrc", parent_name="layout.scene.zaxis", **kwargs
):
super(CategoryarraysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
[
"noreply@github.com"
] |
hugovk.noreply@github.com
|
1f84c1a411609c8c5b853c69c2baa18edb4f027e
|
05148c0ea223cfc7ed9d16234ab3e6bb40885e9d
|
/Packages/matplotlib-2.2.2/lib/mpl_examples/pyplots/whats_new_98_4_fill_between.py
|
ed4b7f4ac7d55386bb003fc91a8e1dfea59642c1
|
[
"MIT"
] |
permissive
|
NightKirie/NCKU_NLP_2018_industry3
|
9ee226e194287fd9088429f87c58c874e050a8b3
|
23ac13644b140587e23cfeffb114c7c6f46f17a2
|
refs/heads/master
| 2021-06-05T05:33:09.510647
| 2018-07-05T10:19:47
| 2018-07-05T10:19:47
| 133,680,341
| 1
| 4
|
MIT
| 2020-05-20T16:29:54
| 2018-05-16T14:43:38
|
Python
|
UTF-8
|
Python
| false
| false
| 456
|
py
|
"""
=============================
Whats New 0.98.4 Fill Between
=============================
"""
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0.0, 2, 0.01)
y1 = np.sin(2*np.pi*x)
y2 = 1.2*np.sin(4*np.pi*x)
fig, ax = plt.subplots()
ax.plot(x, y1, x, y2, color='black')
ax.fill_between(x, y1, y2, where=y2>y1, facecolor='green')
ax.fill_between(x, y1, y2, where=y2<=y1, facecolor='red')
ax.set_title('fill between where')
plt.show()
|
[
"qwer55113322@gmail.com"
] |
qwer55113322@gmail.com
|
19dd60844691611bae70a8df07c9fc9cd60b0efb
|
23f534a67f2d58ea556885b93584590f57f47123
|
/src/tutorials/fluentpython/concurrency_futures/Standardflags.py
|
319a5ec9b067726a606f19bac52621071e8c93a1
|
[] |
no_license
|
manas-mukherjee/MLTools
|
2cb3e735d4967f4c60c08739c86e07224977a182
|
eb18e6d1c6a8900ed47332a7dfb1ceaccc789deb
|
refs/heads/master
| 2022-12-07T22:46:13.419343
| 2020-03-10T09:46:03
| 2020-03-10T09:46:03
| 98,766,117
| 2
| 0
| null | 2022-11-22T01:27:34
| 2017-07-30T00:36:56
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 976
|
py
|
import os
import time
import sys
import requests
POP20_CC = ('CN IN US ID BR PK NG BD RU JP '
'MX PH VN ET EG DE IR TR CD FR').split()
BASE_URL = 'http://flupy.org/data/flags'
DEST_DIR = 'downloads/'
def save_flag(img, filename):
path = os.path.join(DEST_DIR, filename)
with open(path, 'wb') as fp:
fp.write(img)
def get_flag(cc):
url = '{}/{cc}/{cc}.gif'.format(BASE_URL, cc=cc.lower())
resp = requests.get(url)
return resp.content
def show(text):
print(text, end=' ')
sys.stdout.flush()
def download_many(cc_list):
for cc in sorted(cc_list):
image = get_flag(cc)
show(cc)
save_flag(image, cc.lower() + '.gif')
return len(cc_list)
def main(download_many):
t0 = time.time()
count = download_many(POP20_CC)
elapsed = time.time() - t0
msg = '\n{} flags downloaded in {:.2f}s'
print(msg.format(count, elapsed))
if __name__ == '__main__':
main(download_many)
|
[
"mmanas19@gmail.com"
] |
mmanas19@gmail.com
|
432e56502ab6f191dd680864cf5673b011f9a73b
|
f6f632bee57875e76e1a2aa713fdbe9f25e18d66
|
/python/_1001_1500/1434_number-of-ways-to-wear-different-hats-to-each-other.py
|
6e4134899e9fb8ff1f3ae283f20327e4315b3a62
|
[] |
no_license
|
Wang-Yann/LeetCodeMe
|
b50ee60beeeb3661869bb948bef4fbe21fc6d904
|
44765a7d89423b7ec2c159f70b1a6f6e446523c2
|
refs/heads/master
| 2023-08-07T05:31:23.428240
| 2021-09-30T15:33:53
| 2021-09-30T15:33:53
| 253,497,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,244
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Rock Wayne
# @Created : 2020-06-19 08:00:00
# @Last Modified : 2020-06-19 08:00:00
# @Mail : lostlorder@gmail.com
# @Version : alpha-1.0
"""
# 总共有 n 个人和 40 种不同的帽子,帽子编号从 1 到 40 。
#
# 给你一个整数列表的列表 hats ,其中 hats[i] 是第 i 个人所有喜欢帽子的列表。
#
# 请你给每个人安排一顶他喜欢的帽子,确保每个人戴的帽子跟别人都不一样,并返回方案数。
#
# 由于答案可能很大,请返回它对 10^9 + 7 取余后的结果。
#
#
#
# 示例 1:
#
#
# 输入:hats = [[3,4],[4,5],[5]]
# 输出:1
# 解释:给定条件下只有一种方法选择帽子。
# 第一个人选择帽子 3,第二个人选择帽子 4,最后一个人选择帽子 5。
#
# 示例 2:
#
#
# 输入:hats = [[3,5,1],[3,5]]
# 输出:4
# 解释:总共有 4 种安排帽子的方法:
# (3,5),(5,3),(1,3) 和 (1,5)
#
#
# 示例 3:
#
#
# 输入:hats = [[1,2,3,4],[1,2,3,4],[1,2,3,4],[1,2,3,4]]
# 输出:24
# 解释:每个人都可以从编号为 1 到 4 的帽子中选。
# (1,2,3,4) 4 个帽子的排列方案数为 24 。
#
#
# 示例 4:
#
#
# 输入:hats = [[1,2,3],[2,3,5,6],[1,3,7,9],[1,8,9],[2,5,7]]
# 输出:111
#
#
#
#
# 提示:
#
#
# n == hats.length
# 1 <= n <= 10
# 1 <= hats[i].length <= 40
# 1 <= hats[i][j] <= 40
# hats[i] 包含一个数字互不相同的整数列表。
#
# Related Topics 位运算 动态规划
"""
import functools
from typing import List
import pytest
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def numberWays(self, hats: List[List[int]]) -> int:
# 总人数
MOD = 10 ** 9 + 7
N = len(hats)
@functools.lru_cache(None)
def dp(cur, pos):
# cur 代表当前轮到第cur顶帽子可供选择
# pos 代表当前戴帽的人有哪些,为二进制压缩状态形式
# 首先,如果当前所有人都带上了帽,则返回1
if pos == (1 << N) - 1:
return 1
# 若不满足所有人都戴上了帽,且当前也没有帽子了,则返回0
if cur > 40:
return 0
# 首先考虑不戴该顶帽子,直接考虑后一顶,则其值应为dp(cur+1, pos)
res = dp(cur + 1, pos)
# 考虑有人佩戴该顶帽子
for i in range(N):
# 找到喜欢该帽子的人,且这个人并没有戴其他帽子(即二进制pos中该位置为0)
if cur in hats[i] and not pos & (1 << i):
# 给这个人戴上帽子(该位置置1),并依序进行下去
res += dp(cur + 1, pos + (1 << i))
return res % MOD
return dp(0, 0)
# leetcode submit region end(Prohibit modification and deletion)
class Solution1:
def numberWays(self, hats: List[List[int]]) -> int:
""" 状压DP"""
MOD = 10 ** 9 + 7
HAT_SIZE = 40
N = len(hats)
hat_to_people = [[] for _ in range(HAT_SIZE)]
for i in range(N):
for h in hats[i]:
hat_to_people[h - 1].append(i)
dp = [0] * (1 << N)
dp[0] = 1
for people in hat_to_people:
for mask in range(len(dp) - 1, -1, -1):
for p in people:
if mask & (1 << p):
continue
dp[mask | (1 << p)] += dp[mask]
dp[mask | (1 << p)] %= MOD
# print(dp)
return dp[-1]
@pytest.mark.parametrize("kw,expected", [
[dict(hats=[[3, 4], [4, 5], [5]]), 1],
[dict(hats=[[3, 5, 1], [3, 5]]), 4],
[dict(hats=[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]), 24],
[dict(hats=[[1, 2, 3], [2, 3, 5, 6], [1, 3, 7, 9], [1, 8, 9], [2, 5, 7]]), 111],
])
def test_solutions(kw, expected):
assert Solution().numberWays(**kw) == expected
assert Solution1().numberWays(**kw) == expected
if __name__ == '__main__':
pytest.main(["-q", "--color=yes", "--capture=no", __file__])
|
[
"rock@get.com.mm"
] |
rock@get.com.mm
|
c26155b2c52ac0f93816f82789a0620fa8778e43
|
30319ef38df7cdd57a0e31d32009dfaaa6803ce2
|
/zeijemol/views/startup.py
|
c2f4684c1e5c34e34f66fd847260a1c3cc6652bf
|
[] |
no_license
|
neurospin/zeijemol
|
9a064562387604bfbd4cd619687719d99074736c
|
d023e6f7907b73810c4496687d63dead5836138c
|
refs/heads/master
| 2020-04-11T03:38:09.852328
| 2017-09-12T11:44:46
| 2017-09-12T11:44:46
| 50,499,760
| 1
| 4
| null | 2017-08-31T13:21:45
| 2016-01-27T10:26:44
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 7,706
|
py
|
##########################################################################
# NSAp - Copyright (C) CEA, 2013
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
# System import
from __future__ import division
import os
import collections
# Cubicweb import
from cubicweb import _
from cubicweb.web.views.startup import IndexView
from cubicweb.web.httpcache import NoHTTPCacheManager
from cubicweb.view import View
from cubicweb.predicates import authenticated_user
from cubicweb.predicates import match_user_groups
class ZEIJEMOLNotRaterIndexView(IndexView):
""" Class that defines the index view.
"""
__regid__ = "index"
__select__ = authenticated_user() & ~match_user_groups(
"managers", "moderators")
title = _("Index")
def call(self, **kwargs):
""" Create the loggedin 'index' page of our site.
"""
# Format template
template = self._cw.vreg.template_env.get_template("startup.logged.jinja2")
html = template.render(
header_url=self._cw.data_url("creative/img/neurospin.jpg"),
moderator=False,
waves_progress={})
self.w(html)
class ZEIJEMOLRaterIndexView(IndexView):
""" Class that defines the index view.
"""
__regid__ = "index"
__select__ = authenticated_user() & match_user_groups(
"managers", "moderators")
title = _("Index")
http_cache_manager = NoHTTPCacheManager
def call(self, **kwargs):
""" Create the loggedin 'index' page of our site.
"""
# Get information to display a summary table with one progress bar
# for each wave
rset = self._cw.execute(
"Any S, W, N, C Where S is SnapSet, S wave W, W name N, "
"W category C")
struct = {}
for index, row in enumerate(rset):
wave_name = row[2]
category = row[3]
struct.setdefault(category, {}).setdefault(wave_name, []).append(
rset.get_entity(index, 0))
waves_progress = {}
for category, wave_struct in struct.items():
for wave_name, snapset in wave_struct.items():
nb_of_snapset= len(snapset)
nb_rates = 0
for entity in snapset:
scores = [
e for e in entity.scores
if e.scored_by[0].login == self._cw.session.login]
if len(scores) == 1:
nb_rates += 1
elif len(scores) > 1:
raise Exception(
"We expect one score per user for one snap.")
waves_progress.setdefault(category, []).append(
(wave_name, int(nb_rates / nb_of_snapset * 100)))
# Format template
template = self._cw.vreg.template_env.get_template("startup.logged.jinja2")
html = template.render(
header_url=self._cw.data_url("creative/img/neurospin.jpg"),
moderator=True,
waves_progress=waves_progress)
self.w(html)
class ZEIJEMOLIndexView(IndexView):
""" Class that defines the index view.
"""
__regid__ = "index"
__select__ = ~authenticated_user()
title = _("Index")
templatable = False
def call(self, **kwargs):
""" Create the anonymous 'index' page of our site.
"""
# Get additional resources links
css = []
for path in ("creative/vendor/bootstrap/css/bootstrap.min.css",
"creative/vendor/font-awesome/css/font-awesome.min.css",
"creative/vendor/magnific-popup/magnific-popup.css",
"creative/css/creative.css"):
css.append(self._cw.data_url(path))
js = []
for path in ("creative/vendor/jquery/jquery.min.js",
"creative/vendor/bootstrap/js/bootstrap.min.js",
"creative/vendor/scrollreveal/scrollreveal.min.js",
"creative/vendor/magnific-popup/jquery.magnific-popup.min.js",
"creative/js/creative.js"):
js.append(self._cw.data_url(path))
# Format template
template = self._cw.vreg.template_env.get_template("startup.jinja2")
html = template.render(
header_url=self._cw.data_url("creative/img/neurospin.jpg"),
login_url=self._cw.build_url(
"login", __message=u"Please login with your account."),
contact_email=self._cw.vreg.config.get(
"administrator-emails", "noreply@cea.fr"),
css_url=css,
js_url=js)
self.w(html)
class PieChart(View):
""" Create a pie chart representing the user rates with HighCharts.
"""
__regid__ = "pie-highcharts"
paginable = False
div_id = "pie-highcharts"
def call(self, data, title, container_id=0,
highcharts_js="https://code.highcharts.com/highcharts.js",
exporting_js="https://code.highcharts.com/modules/exporting.js"):
""" Method that will create a pie chart from the user rates.
Parameters
----------
data: dict
a dictionnary with title as keys and occurence (in percent) as
values.
title: str
a title for the chart.
container_id: int
an identifier for the chart container.
"""
# Add js resources
self._cw.add_js(highcharts_js, localfile=False)
self._cw.add_js(exporting_js, localfile=False)
# Create the highcharts string representation of the data
sdata = '['
for key, value in data.items():
sdata += '["{0}", {1}], '.format(key, value)
sdata += ']'
# Generate the script
# > headers
self.w(u'<script type="text/javascript">')
self.w(u'$(function () {{ $("#hc_container_{0}").highcharts({{'.format(
container_id))
# > configure credit
self.w(u'credits : {enabled : false}, ')
# > configure chart
self.w(u'chart: {plotBackgroundColor: null, plotBorderWidth: 1, '
'plotShadow: false}, ')
# > configure title
self.w(u'title: {{text: "{0}"}}, '.format(title))
# > configure tooltip
self.w(u'tooltip: {pointFormat: "{series.name}: '
'<b>{point.percentage:.1f}%</b>" }, ')
# > configure plot
self.w(u'plotOptions: {')
self.w(u'pie: {allowPointSelect: true, cursor: "pointer", '
'dataLabels: { enabled: true, format: "<b>{point.name}</b>: '
'{point.percentage:.1f} %", style: {color: (Highcharts.theme '
'&& Highcharts.theme.contrastTextColor) || "black"}}}')
self.w(u'}, ')
# > configure series
self.w(u'series: [{{type: "pie", name: "Rate", '
'data: {0}}}] '.format(sdata))
# > close headers
self.w(u'}); ')
self.w(u'}); ')
self.w(u'</script>')
# Add a container in the body to display the pie chart
self.w(u'<div id="hc_container_{0}" class="hc_container">'
'</div>'.format(container_id))
def registration_callback(vreg):
#vreg.register_and_replace(SnapIndexView, IndexView)
vreg.register_and_replace(ZEIJEMOLIndexView, IndexView)
vreg.register(ZEIJEMOLRaterIndexView)
vreg.register(ZEIJEMOLNotRaterIndexView)
vreg.register(PieChart)
|
[
"antoine.grigis@cea.fr"
] |
antoine.grigis@cea.fr
|
d65628556268853325cd60b2ae9d5b468ddd607c
|
f9acdde88dbb70a2844e058f6c53c016fc8407c1
|
/lfc/utils/middleware.py
|
73fd4101ae5c997956b80590d97c5c105c676f5e
|
[] |
no_license
|
yzl11/django-lfc
|
536daccae82351af66f3894c38c8f2702691af75
|
75c900d672b4d36705fb8fa4833c446bbb78efea
|
refs/heads/master
| 2021-01-15T13:14:37.192773
| 2015-05-03T15:03:12
| 2015-05-03T15:03:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 574
|
py
|
# python imports
import logging
# permissions imports
from permissions.exceptions import Unauthorized
# lfc imports
import lfc.utils
# Load logger
logger = logging.getLogger(__name__)
class LFCMiddleware:
"""LFC specific middleware.
"""
def process_exception(self, request, exception):
"""Catches Unauthorized exceptions to display the login form.
"""
if isinstance(exception, Unauthorized):
logger.info(u"Unauthorized: %s" % exception.message)
return lfc.utils.login_form(next=request.META.get("PATH_INFO"))
|
[
"kai.diefenbach@iqpp.de"
] |
kai.diefenbach@iqpp.de
|
171445b5eb900e95286b527051d51c2668e76e2d
|
35e11785421b1831fb19fef8fb03c804fafbad44
|
/tests/common_app.py
|
4ec8a40249e2c75286b1b661d9dc93b0ad666831
|
[
"MIT"
] |
permissive
|
bwisgood/FRF
|
fff4e264ceeb7397da58564aaca6be6d66c29430
|
c520cb98bd2fca5f29964bcc475d84c011d43954
|
refs/heads/master
| 2023-01-10T19:17:01.396435
| 2019-08-26T11:23:37
| 2019-08-26T11:23:37
| 155,576,545
| 3
| 0
|
MIT
| 2022-12-27T15:36:47
| 2018-10-31T15:03:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,447
|
py
|
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from v3.wrapper import FlaskRestFramework
from v3.views import GetView, PostView, PutView, RetrieveView, DeleteView
from v3.serializer import Serializer
db = SQLAlchemy()
pwd = os.environ.get("FRF_MYSQL_PASSWORD") or ""
def config():
global db
app = Flask(__name__)
class Config(object):
# 数据库配置
SQLALCHEMY_DATABASE_URI = r'mysql+pymysql://root:{}@127.0.0.1:3306/test'.format(pwd)
SQLALCHEMY_TRACK_MODIFICATIONS = False
engine = create_engine(SQLALCHEMY_DATABASE_URI)
Base = declarative_base(engine)
TESTING = True
app.config.from_object(Config)
db.init_app(app)
frf = FlaskRestFramework()
frf.init_app(app)
return app
def test_without_db():
# db = SQLAlchemy()
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URL"] = r'mysql+pymysql://root:{}@127.0.0.1:3306/test'.format(pwd)
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config['TESTING'] = True
# db.init_app(app)
frf = FlaskRestFramework()
frf.init_app(app)
return app
class Person(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30))
gender = db.Column(db.String(30))
class PersonSerializer(Serializer):
model_class = Person
logical_delete = None
class PersonView(GetView):
serializer = PersonSerializer
look_up = ("name",)
class PersonPostView(PostView):
serializer = PersonSerializer
class PersonPutView(PutView):
serializer = PersonSerializer
class PersonDeleteView(DeleteView):
serializer = PersonSerializer
from v3.mixins import AllMethodMixin, ReadOnlyMixin
class PersonRetrieveView(AllMethodMixin):
serializer = PersonSerializer
app = config()
# app.add_url_rule('/persons', view_func=PersonView.as_view("person_view"))
# app.add_url_rule('/persons', view_func=PersonPostView.as_view("person_view_post"))
# app.add_url_rule('/persons', view_func=PersonPutView.as_view("person_view_put"))
# app.add_url_rule('/persons', view_func=PersonDeleteView.as_view("person_view_delete"))
app.add_url_rule('/persons', view_func=PersonRetrieveView.as_view("person_view_re"))
if __name__ == '__main__':
ap = app.test_client()
ap.post()
ap.get()
app.run(debug=True)
|
[
"857464370@qq.com"
] |
857464370@qq.com
|
1c67e00090b8ef676b62937c7101be1ca71afa72
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/kxrhqiE5so3AMXWS7_5.py
|
854f9fb64387e018368202a96adcf4e5dab2b54d
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
def get_number_of_apples(n, p):
p = int(p[:-1])/10
ans = int(n * (10-p)/10)
return ans if ans > 0 else "The children didn't get any apples"
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
7cd01337d6972f928bcb5624b0c3eda164203938
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_55/558.py
|
30c1531227e7121c4e261664bebf108bef953b37
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 982
|
py
|
#!/usr/bin/python
def precompute(capacity, groups, people):
packing = [[0, 0],]*groups
pointer = 0
for i in range(groups):
pointer = i
total = 0
while (total + people[i]) <= capacity:
total += people[i]
i += 1
if i >= groups:
i = 0
if i == pointer:
break
packing[pointer] = [total, i]
return packing
def coaster(rides, capacity, groups, people):
pointer = 0
income = 0
if groups < 1:
return income
packing = precompute(capacity, groups, people)
while rides > 0:
if packing[pointer][0] == 0:
return income
income += packing[pointer][0]
pointer = packing[pointer][1]
rides -= 1
return income
def rl():
return sys.stdin.readline().strip()
import sys
cases = int(rl())
for case in range(1, cases+1):
rides, cap, groups = [int(x) for x in rl().split(' ')]
people = [int(x) for x in rl().split(' ')]
if len(people) != groups:
raise Exception('Wrong input')
print 'Case #%s: %s' % (case, coaster(rides, cap, groups, people))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
a529f6fb1e0b2d70e2d519ca9bf9cc233dec5c97
|
10c459a49cbc8ee2dc3bc2a8353c48b5a96f0c1d
|
/spider/day05/01.threading_斗图网.py
|
069c486f5ef724f2c44ff447e8feb6a0e9c1c0c7
|
[] |
no_license
|
alinzel/Demo
|
1a5d0e4596ab4c91d7b580da694b852495c4ddcc
|
cc22bbcdbd77190014e9c26e963abd7a9f4f0829
|
refs/heads/master
| 2020-03-10T22:26:30.247695
| 2018-04-15T15:37:28
| 2018-04-15T15:37:28
| 129,619,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,071
|
py
|
# -*- coding: utf-8 -*-
# @Time : 18-3-2 下午6:15
# @Author : Zwl
# @Email : 944951481@qq.com
# @File : 01.threading_斗图网.py
# @Software: PyCharm
import threading
import requests
from bs4 import BeautifulSoup
import time
import os
# TODO 确定数据
base_url = 'https://www.doutula.com/photo/list/?page='
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'
}
# 初始化存放url的容器
url_list = []
# 构造url,取三页
for i in range(1, 4):
url = base_url + str(i)
# 将拼接好的链接添加到url_list
url_list.append(url)
# 初始化存档img_url的容器
img_url_list = []
# 初始化锁
glock = threading.Lock()
# TODO 定义生产者线程,用于请求网页和解析网页
class Producer(threading.Thread):
# 重写run方法,start执行此方法
def run(self):
print('当前生产线程是%s' % threading.current_thread())
# 当存放链接的列表存在数据时,进行此循环
while len(url_list) > 0:
# TODO 当一个线程取链接时,锁住
glock.acquire()
# 从url_list中拿出一个链接,并删除 [从末尾取出,并删除有返回值]
request_url = url_list.pop()
# TODO 当取完释放锁,方便其他线程
glock.release()
# 请求链接,并返回响应
page = requests.get(request_url, headers=header)
# 根据响应,得到源码
html = page.text
# 解析页面
parse_html = BeautifulSoup(html, 'lxml')
# 获取标签及图片的url
img_urls = parse_html.select('.img-responsive.lazy.image_dta')
# 上锁
glock.acquire()
# 遍历得到的图片列表
for img_url_item in img_urls:
# 获取图片的链接
img_url = img_url_item.attrs['data-original']
# 如果图片的链接不是以http开头,则构建图片链接数据
if not img_url.startswith('http'):
img_url = 'http:' + img_url
img_url_list.append(img_url)
else:
img_url_list.append(img_url)
# 释放锁
glock.release()
# TODO 消费者线程,负责把图片写入本地
class Consumer(threading.Thread):
# 重写父方法
def run(self):
print('当前消费线程是%s'%threading.current_thread())
# TODO 设置休眠,当消费者等待生产者两秒,因为刚开始列表无数据,不能取出
time.sleep(2)
# 当img_url_list存在数据,执行此循环
while len(img_url_list) > 0:
glock.acquire()
img_url = img_url_list.pop()
glock.release()
# 请求图片链接,并显示图片
img = requests.get(img_url, headers=header).content
# 定义图片存储目录
dire = os.getcwd() + '/images/'
# 如果路径不存在则创建目录
if not os.path.exists(dire):
os.mkdir('images')
# 初始化图片的name
img_name = img_url[-14:-4]
# 构造图片路径
path = dire + img_name
# 打开路径
with open(path, 'wb') as f:
# 写入数据
f.write(img)
if __name__ == '__main__':
for i in range(1, 3):
Producer().start()
for i in range(1, 3):
Consumer().start()
|
[
"944951481@qq.com"
] |
944951481@qq.com
|
875345586355a9355bce7e92772c02104b0791a4
|
3faf4b9fb76145b2326446bc6bc190a5712b3b62
|
/Algorithms/0143 Reorder List.py
|
4af27de2eeddeed16f4dcef88dcca1c8ce33e0ab
|
[] |
no_license
|
cravo123/LeetCode
|
b93c18f3e4ca01ea55f4fdebceca76ccf664e55e
|
4c1288c99f78823c7c3bac0ceedd532e64af1258
|
refs/heads/master
| 2021-07-12T11:10:26.987657
| 2020-06-02T12:24:29
| 2020-06-02T12:24:29
| 152,670,206
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,358
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
# Solution 1, fast and slow pointers, reverse linked list techniques
class Solution:
def cut_half(self, node):
slow = fast = node
prev = None
while fast and fast.next:
fast = fast.next.next
prev = slow
slow = slow.next
# make sure to null first half tail
prev.next = None
prev, curr = None, slow
while curr:
tmp = curr.next
curr.next = prev
prev, curr = curr, tmp
return node, prev
def reorderList(self, head: ListNode) -> None:
"""
Do not return anything, modify head in-place instead.
"""
if head is None or head.next is None:
return head
# cut in half
p, q = self.cut_half(head)
# paste half lists
dummy = curr = ListNode(0)
while p or q:
if p:
curr.next = p
curr = curr.next
p = p.next
if q:
curr.next = q
curr = curr.next
q = q.next
return dummy.next
|
[
"cc3630@columbia.edu"
] |
cc3630@columbia.edu
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.