blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9cbf92bcd66291c9f8ba2ec9fc26d46465e85b7c
|
149d7c1312b8fdd04be2a1550302853599891276
|
/shadowsocks/utils.py
|
31c427a5c753d8049d356d274717948a0ebe4095
|
[
"Apache-2.0"
] |
permissive
|
jaylon-valoroso/sss-valor
|
750c44b4236150e7fb145a60b73b622cafc6c9db
|
7997696fbf8b11051caaa78fbe3ab7adb65ed3db
|
refs/heads/master
| 2020-03-24T20:27:51.613507
| 2018-08-01T07:49:02
| 2018-08-01T07:49:02
| 142,979,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,246
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import os
import logging
def encode(s):
result = '\n'
index = 1
for c in s:
v = hex(ord(c)).replace('0x', '')
if len(v) == 1:
v = '0' + v
result += v
if index % 16 == 0:
result += '\n'
elif index % 1 == 0:
result += ' '
index += 1
return result
def decode(s):
s = s.replace(' ', '')
s = s.replace('\n', '')
result = ''
index = 1
cc = ''
for c in s:
cc += c
if index % 2 == 0:
result += chr(int(cc, 16))
cc = ''
index += 1
return result
|
[
"jaylon.cheng@valorosoltd.com"
] |
jaylon.cheng@valorosoltd.com
|
88515e3139b985720606d7cb5eea30c1eadba8ea
|
5cf4d4c29f4e7848a0eb3f6f542e7b28abbd1814
|
/ramulator-NVMain/MemControl/BEAR/SConscript
|
c3981c6484cb5b40bcfc6f5fe935031d6adda22e
|
[] |
no_license
|
xiaosanxian/ramulator-NVMain
|
d2cd34c974ff546f2d513c2e9c66840cdbefb5a5
|
dca6bdf6407b81c7e1b708f245a7a9742d7ec248
|
refs/heads/master
| 2021-06-14T11:14:44.842935
| 2017-03-31T03:23:27
| 2017-03-31T03:23:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,985
|
# Copyright (c) 2012-2013, The Microsystems Design Labratory (MDL)
# Department of Computer Science and Engineering, The Pennsylvania State University
# All rights reserved.
#
# This source code is part of NVMain - A cycle accurate timing, bit accurate
# energy simulator for both volatile (e.g., DRAM) and non-volatile memory
# (e.g., PCRAM). The source code is free and you can redistribute and/or
# modify it by providing that the following conditions are met:
#
# 1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2) Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author list:
# Matt Poremba ( Email: mrp5060 at psu dot edu
# Website: http://www.cse.psu.edu/~poremba/ )
Import('*')
# Assume that this is a gem5 extras build if this is set.
if 'TARGET_ISA' in env and env['TARGET_ISA'] == 'no':
Return()
if 'NVMAIN_BUILD' in env:
NVMainSourceType('src', 'Backend Source')
NVMainSource('BEAR.cpp')
|
[
"1902552434@qq.com"
] |
1902552434@qq.com
|
|
f0dc05a9822af19ce717dfe2a66cfb5333836437
|
1da58342267e4e1a1200fd1fa2e5483f0bcde56c
|
/Rezolvari-Laboratoare/Laborator_5/Problema_4.py
|
2720f1bf2b73f42fc5d124109ddf49b4eb83daa4
|
[] |
no_license
|
FlorescuMiruna/PA-Labs-Python
|
1738432769d66a8a8d5f9f9b95eeedbd138e2e41
|
1454a037377c99e13f4564d6e46fdc02a2f0cdde
|
refs/heads/main
| 2023-03-25T10:55:48.303240
| 2021-03-20T13:21:27
| 2021-03-20T13:21:27
| 349,728,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
f = open("bani.txt")
monede = f.readline()
monede = [int(x) for x in monede.split()]
moede = monede.reverse()
suma = int(f.readline())
sir = str(suma) + " = "
indice = 0
plata = []
while suma != 0:
moneda = monede[indice]
nr = 0
while suma - monede[indice] >= 0:
suma -= monede[indice]
nr += 1
if nr != 0:
plata.append((moneda,nr))
indice += 1
print(plata)
for x in plata:
sir += str(x[0]) + "*" + str(x[1]) + " + "
g = open("plata.txt","w")
g.write(sir[:-2])
|
[
"noreply@github.com"
] |
FlorescuMiruna.noreply@github.com
|
99e182cd90fbca4d0b157953b735f008f9d6a1d8
|
8be4b123cb12dfa852857d9df756c74365980168
|
/douban/douban/spiders/minyao.py
|
69c7ae9a945d7f2ae92b0abfa85664d30acc0175
|
[] |
no_license
|
tongwenfeng/scrapy
|
56dc4e38aabeb3ed4445493e361cb3db51b0c1c9
|
351f832ae06735801403d885b73da26151a5e6b9
|
refs/heads/master
| 2020-03-28T18:54:34.669396
| 2014-03-20T03:31:53
| 2014-03-20T03:31:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,576
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import re
import json
import pymongo
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor as sle
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from douban.items import DoubanItem
from douban.download import download
class spider(CrawlSpider):
name='m'
#allowed_domains = ["http://music.douban.com"]
start_urls=["http://music.douban.com/"]
is_start=True
def parse(self,response):
hxs = HtmlXPathSelector(response)
items = []
newurls = hxs.xpath("//a/@href").extract()
rules = [
#Rule(sle(allow=("subject/\d+/?$")),callback='parse2'),
#Rule(sle(allow=("/tag/[^/]+/?$",)),callback='parse2'),
#Rule(sle(allow=(".*douban.*",)),follow=True),
]
#print url
validurls =[]
item=DoubanItem()
for url in newurls:
print url
if True:
validurls.append(url)
regex_site=re.compile(r'.*site.*')
#m = re.match(r".site.*",url)
flag_site = regex_site.findall(url)
if flag_site:
#print url,m
item['singerurl']=url
item['singer']=' '
yield Request(item['singerurl'],meta={'item':item},callback=self.parse_item)
newurls.remove(url)
regex_tag = re.compile(r'http:music.douban.com/tag/.*')
flag_tag = regex_tag.findall(url)
if flag_tag:
yield Request(url,callback=)
n = re.compile(r'http:.*douban.*')
t = n.findall(url)
if t:
yield Request(url,callback=self.parse)
#items.extend([self.make_request_from_url(url).replace(callback=self.parse) for url in validurls])
def parse2(self,response):
hxs = HtmlXPathSelector
items = []
urls = hxs.select("//a/@href").extract()
#print urls
def parse_item(self,response):
hxs=HtmlXPathSelector(response)
item=response.meta['item']
#print item
items=[]
song=[]
try:
song=hxs.re("\[\{\"name\".*\]")
except Exception as e:
print e
for s in song:
record=json.loads(s)
for b in record:
#print '------------------------------'
#print b['name'],b['rawUrl']
dou = DoubanItem()
dou['singer']=item['singer']
dou['singerurl']=item['singerurl']
dou['name'] = b['name']
dou['url'] = b['rawUrl']
#download(item['url'],item['name'])
items.append(dou)
print items
#return items
def parse_site(self,response):
hxs = HtmlXPathSelector(response)
items=[]
for a in hxs.select("//div[@class=\"photoin\"]//a"):
try:
if re.match(".*",a.select("@href").extract()[0]) and re.match(".*",a.select("img//@alt").extract()[0]):
print a.select("@href").extract()[0],a.select("img//@alt").extract()[0]
item=DoubanItem()
item['singer']=a.select("img//@alt").extract()[0]
item['singerurl']=a.select("@href").extract()[0]
yield Request(item['singerurl'],meta={'item':item},callback=self.parse_item)
except Exception as e:
pass
|
[
"iie@ubuntu.ubuntu-domain"
] |
iie@ubuntu.ubuntu-domain
|
70d959fa5f6f46528ac3299453bc740ddffdd677
|
5ead41be5c8086fcdd68e5ce0f397eab0ac4e5f1
|
/CSCI-University of Minnesota Work/UMN-1133/Labs/selectionSort.py
|
1c3262dded447a4071aa784bf49683faae92899e
|
[] |
no_license
|
jkelly37/Jack-kelly-portfolio
|
5ddc1dc510950b5f60c2db4bbf83f6ba66fba743
|
d77bc68382d319cb5cc251704e4b78528fe48cc3
|
refs/heads/master
| 2020-08-03T00:12:38.010069
| 2019-09-28T20:57:57
| 2019-09-28T20:57:57
| 211,557,181
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
def main():
intstring = input("enter list of integers with spaces inbetween:").replace(" ","")
intList = [0] * len(intstring)
p = 0
while p<len(intstring):
intList[p] = int(intstring[p])
p = p+1
for i in range(len(intstring)):
min = i
for j in range(i+1, len(intList)):
if int(intList[min])>int(intList[j]):
min = j
#intList[i] = min
intList[i], intList[min] = intList[min], intList[i]
print(intList)
if __name__ == '__main__':
main()
|
[
"kell2425@umn.edu"
] |
kell2425@umn.edu
|
1a0ee41c37a828d819fde744e632bb8c99a5d50b
|
eb762d14de67a355442ff81b6c4ba82e4c8ea847
|
/app/services/slack/tests/main/conftest.py
|
7285b7cef6311fc7684a56ecac2ef5fe88be2d6a
|
[] |
no_license
|
klapcsik/aws-sns-to-slack
|
d1074f47a1b093ff1065121355fa3551016b5909
|
c1f8e9360d34225029d140631cf90cc10dbfc91c
|
refs/heads/main
| 2023-06-26T09:03:24.619711
| 2021-07-13T10:51:34
| 2021-07-13T10:51:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
import os
def pytest_sessionstart(session):
"""
Called after the Session object has been created and
before performing collection and entering the run test loop.
"""
os.environ['SLACK_HOOK_URL'] = ''
|
[
"tomerlevi1983@gmail.com"
] |
tomerlevi1983@gmail.com
|
2608ee5e6792179976939c1be55b154390218004
|
b22205aa21ac51c7b14dfaab556eea1f8902a922
|
/bin/foamEnsightSeries.py
|
01d2f508c95d5486890b9bdce863d8a4e78b81d5
|
[] |
no_license
|
ewquon/pylib
|
a1c6a64a0127c5078e19f190ec252ccd00b5035e
|
c34afb2a13fc0075f95a43bac99219b25b3984a2
|
refs/heads/master
| 2023-07-12T11:32:31.671093
| 2023-06-21T15:59:15
| 2023-06-21T15:59:15
| 41,262,844
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,037
|
py
|
#!/usr/bin/env python
#
# Create symbolic links for specified directories to form a single Ensight .case
# file with the complete time series
#
from __future__ import print_function
import sys
import os
import glob
import numpy as np
# HARD CODE INPUTS FOR NOW
prefixMeshFile = '_U.mesh'
prefixSolnFile = '_U.000.U'
prefixSolnStr = '_U.*****.U'
prefixNewFile = '_U.{:05d}.U'
if len(sys.argv) > 1:
postdir = sys.argv[1].rstrip(os.sep)
else:
sys.exit('Specify array postprocessing directory with structure: arraySampleDir/timeDir/*.case')
if not os.path.isdir(postdir):
sys.exit('Not a valid directory: '+postdir)
if len(sys.argv) > 2:
prefix = sys.argv[2]
else:
prefix = os.path.split(postdir)[1]
casefile = prefix + '_series.case'
if os.path.isfile(casefile):
raise IOError('Case file '+casefile+' exists')
meshFile = prefix + prefixMeshFile
solnFile = prefix + prefixSolnFile
solnStr = prefix + prefixSolnStr
newFile = prefix + prefixNewFile
outdir = postdir + '_series'
if os.path.isdir(outdir):
print('Warning: time series directory',outdir,'already exists')
else:
os.makedirs(outdir)
# read available time directories (assume equally spaced)
timeList = []
tdirList = []
for d in glob.glob(os.path.join(postdir,'*')):
if os.path.isdir(d):
tdir = os.path.split(d)[-1]
try:
tval = float(tdir)
except ValueError:
continue
timeList.append(tval)
tdirList.append(os.path.abspath(d))
timeArray = np.array(timeList)
timeSortOrder = np.argsort(timeArray)
timeArray = timeArray[timeSortOrder]
tdirList = [ tdirList[i] for i in timeSortOrder ]
#for t,tdir in zip(timeArray,tdirList):
# print('t=',t,':',tdir)
Ntimes = len(tdirList)
timeDiff = np.diff(timeArray)
if not np.min(timeDiff) == np.max(timeDiff):
print('Warning: sampling intervals are variable?')
# create symlinks
havemesh = False
for itime in range(Ntimes):
if not havemesh:
os.symlink(
os.path.join(tdirList[itime],meshFile),
os.path.join(outdir,meshFile)
)
havemesh = True
srcfile = os.path.join(tdirList[itime],solnFile)
if not os.path.isfile(srcfile):
raise IOError('Source file not found: '+srcfile)
tgtfile = os.path.join(outdir,newFile.format(itime))
print(tgtfile,'-->',srcfile)
os.symlink(srcfile,tgtfile)
# create case file for time series
caseTemplate="""FORMAT
type: ensight gold
GEOMETRY
model: 1 {meshFile:s}
VARIABLE
vector per node: 1 U {solnStr:s}
TIME
time set: 1
number of steps: {Ntimes:d}
filename start number: 0
filename increment: 1
time values:
"""
targetMesh = os.path.join(outdir,meshFile)
targetSoln = os.path.join(outdir,solnStr)
with open(casefile,'w') as f:
f.write(
caseTemplate.format(meshFile=targetMesh,solnStr=targetSoln,Ntimes=Ntimes)
)
for t in timeArray:
f.write('{:.5g}\n'.format(t))
|
[
"eliot.quon@nrel.gov"
] |
eliot.quon@nrel.gov
|
8804b3bdb95212c5305df70bde327009eef115c8
|
5ebfced62f59052560c6adf89bfd2f249877cc75
|
/webcomics/comments/forms.py
|
de875c024ad559e011a4e79c4027de7216f51378
|
[] |
no_license
|
lumenwrites/webcomics
|
537c9bd0337ebd087dacdee7b72797b658481f8c
|
34200eaf19021147c561bf140a685e398156589e
|
refs/heads/master
| 2021-06-10T17:12:50.317113
| 2017-02-19T09:28:57
| 2017-02-19T09:28:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
from django.forms import ModelForm
from django import forms
from .models import Comment
class CommentForm(ModelForm):
class Meta:
model = Comment
fields = ['body']
|
[
"raymestalez@gmail.com"
] |
raymestalez@gmail.com
|
9ed0e03a0c054036f454866ea42cdc8cc1220f3a
|
df42a3fbc9e6f5702d0da7fc9e859e9ff163a456
|
/petstagram/accounts/forms.py
|
c12d1f25bfa151fe89dc93334a5785a45a3e71fa
|
[] |
no_license
|
RadkaValkova/Petstagram-application
|
c4ef007f42329d4587b814d4a898a1fbfe4c8450
|
1e930d7fdbe9fa0a0d7a9b4e002bcab7edcfe40c
|
refs/heads/main
| 2023-06-18T19:23:46.845318
| 2021-07-20T16:24:53
| 2021-07-20T16:24:53
| 386,993,511
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 826
|
py
|
from django import forms
from django.contrib.auth import authenticate, get_user_model
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.core.exceptions import ValidationError
UserModel = get_user_model()
class LoginForm(forms.Form):
user = None
email = forms.EmailField(
)
password = forms.CharField(
widget=forms.PasswordInput(),
)
def clean_password(self):
self.user = authenticate(
email=self.cleaned_data['email'],
password=self.cleaned_data['password'],
)
if not self.user:
raise ValidationError('Email and/or password incorrect')
def save(self):
return self.user
class RegisterForm(UserCreationForm):
class Meta:
model = UserModel
fields = ('email',)
|
[
"76747070+RadkaValkova@users.noreply.github.com"
] |
76747070+RadkaValkova@users.noreply.github.com
|
7a4e4ebbcd27cd8101a911cc5ab7a990485e1878
|
17f0874efc711c11108b110519d9912804b08a11
|
/tf3d/layers/sparse_voxel_hourglass.py
|
63911f34d73c856bf2211f556eb010309571ff6f
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
HuiBinR/google-research
|
d7b2ea82cfda230084cf1c4adddab1452b4a87a0
|
02f86fec9c8d343f645299c7be78533355608d51
|
refs/heads/master
| 2023-01-28T11:36:28.441552
| 2020-12-03T19:42:45
| 2020-12-03T19:50:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,489
|
py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a 3D Sparse Voxel HourGlass model."""
import gin
import gin.tf
import tensorflow as tf
from tf3d.layers import sparse_voxel_unet
@gin.configurable
class SparseConvHourGlass(tf.keras.layers.Layer):
"""3D UNet sparse voxel network."""
def __init__(self,
num_stacked_networks=1,
task_names_to_num_output_channels=None,
task_names_to_use_relu_last_conv=None,
task_names_to_use_batch_norm_in_last_layer=None,
conv_filter_size=3,
encoder_dimensions=((32, 64), (64, 128), (128, 256)),
bottleneck_dimensions=(256, 256),
decoder_dimensions=((256, 256), (128, 128), (64, 64)),
dropout_prob=0.0,
use_batch_norm=True,
network_pooling_segment_func=tf.math.unsorted_segment_max,
normalize_sparse_conv=True):
"""3D UNet sparse voxel network.
Args:
num_stacked_networks: Number of stacked networks that build the hour-glass
structure.
task_names_to_num_output_channels: A dictionary containing the mapping
between task names to number of prediction channels for each task.
task_names_to_use_relu_last_conv: A dictionary containing the mapping
between task names to whether relu should be applied at the last
convolution or not. If None, by default relu will not be applied.
task_names_to_use_batch_norm_in_last_layer: A dictionary containing the
mapping between task names to whether batch norm is applied to the last
convolution of the tasks.
conv_filter_size: The 3d convolution filter size. Currently the 3d
convolution op is optimized for a filter size of 3.
encoder_dimensions: A tuple of tuples, where each nested tuple is a list
of ints describing the output feature dimensionality of each 3x3x3
convolution. After every nested tuple we do a 2x2x2 3D Max Pooling.
bottleneck_dimensions: A tuple of ints describing the output feature
dimensionality of each 3x3x3 convolution in the middle of the network,
which is after we have finished downsampling but before upsampling.
decoder_dimensions: A tuple of tuples, where each nested tuple is a list
of ints describing the output feature dimensionality of each 3x3x3
convolution. Before every new nested tuple we do a 2x2x2 upsampling
operation, and then concatenate encoder features in a UNet fashion.
dropout_prob: A float indicating the probability of dropout.
use_batch_norm: Whether to use batch normalization or not.
network_pooling_segment_func: Function used to pool voxel features in the
network.
normalize_sparse_conv: If True, applies normalization to 3d sparse convs.
Returns:
A dictionary containing a predicted tensor per task. The predicted tensors
are of size [batch_size, num_voxels, num_task_channels].
Raises:
ValueError: If task_names_to_num_output_channels is None.
ValueError: If the encoder and decoder have a different number of
downsampling/upsampling levels.
"""
super().__init__()
if task_names_to_num_output_channels is None:
raise ValueError('task_names_to_num_output_channels cannot be None')
if len(encoder_dimensions) != len(decoder_dimensions):
raise ValueError(
'The number of encoder and decoder blocks should be equal')
if task_names_to_use_relu_last_conv is None:
task_names_to_use_relu_last_conv = {}
for key in sorted(task_names_to_num_output_channels):
task_names_to_use_relu_last_conv[key] = False
if task_names_to_use_batch_norm_in_last_layer is None:
task_names_to_use_batch_norm_in_last_layer = {}
for key in sorted(task_names_to_num_output_channels):
task_names_to_use_batch_norm_in_last_layer[key] = False
self.num_stacked_networks = num_stacked_networks
self.input_spec = [
tf.keras.layers.InputSpec(shape=(None, None, None), dtype=tf.float32),
tf.keras.layers.InputSpec(shape=(None, None, 3), dtype=tf.int32),
tf.keras.layers.InputSpec(shape=(None,), dtype=tf.int32)
]
self.networks = []
decoder_dimensions_last = decoder_dimensions[-1][-1]
for i in range(num_stacked_networks):
if i == num_stacked_networks - 1:
task_channels = task_names_to_num_output_channels
task_relu = task_names_to_use_relu_last_conv
task_batch_norm = task_names_to_use_batch_norm_in_last_layer
else:
task_channels = {'intermediate_output': decoder_dimensions_last}
task_relu = {'intermediate_output': True}
task_batch_norm = {'intermediate_output': use_batch_norm}
self.networks.append(
sparse_voxel_unet.SparseConvUNet(
task_names_to_num_output_channels=task_channels,
task_names_to_use_relu_last_conv=task_relu,
task_names_to_use_batch_norm_in_last_layer=task_batch_norm,
conv_filter_size=conv_filter_size,
encoder_dimensions=encoder_dimensions,
bottleneck_dimensions=bottleneck_dimensions,
decoder_dimensions=decoder_dimensions,
dropout_prob=dropout_prob,
use_batch_norm=use_batch_norm,
network_pooling_segment_func=network_pooling_segment_func,
normalize_sparse_conv=normalize_sparse_conv))
def call(self, inputs, training=True):
voxel_features, voxel_xyz_indices, num_valid_voxels = inputs
for i in range(self.num_stacked_networks):
outputs = self.networks[i](
inputs=[voxel_features, voxel_xyz_indices, num_valid_voxels],
training=training)
if i != self.num_stacked_networks - 1:
voxel_features = outputs['intermediate_output']
return outputs
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
2ae99a0805b0ac0482001efbc1e402cc7d2beee8
|
982324b6228f82ed01951b52df5b2813909f4cad
|
/athlete_sort/athlete_sort.py
|
3485a8890b173195bed7486b59aaa4ab63654c64
|
[] |
no_license
|
ndina/Hackerrank-Python
|
5f14bae9f570978a860d489bc556f54a111e81d6
|
e8e9ea62f8cb04cf6997b126d66eafede8d6a645
|
refs/heads/master
| 2020-05-31T10:36:34.158818
| 2019-06-09T23:46:45
| 2019-06-09T23:46:45
| 190,244,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
table = list(map(int, input().split()))
arr = list()
for i in range(table[0]):
arr.append(list(map(int, input().split())))
k = int(input())
sorte = sorted(arr, key=lambda record: record[k])
for i in sorte:
print(*i)
#30pt
|
[
"dbom12360@gmail.com"
] |
dbom12360@gmail.com
|
b71cf5e234217b3fdabdeaea5c732b0fb43cfd03
|
a1e00921dba2a1ebb572407e935a3c884e9ea83a
|
/scratch/day0_scratch.py
|
47a54cc1ebd215a6ee703b97f6d1f568612f119a
|
[
"MIT"
] |
permissive
|
notsambeck/civicu-machine-learning
|
bef2a876714b3d7ba27fa0de2af432330183adaa
|
591f241ed420f26292cae0241a38e07604ce7ef3
|
refs/heads/master
| 2021-07-23T01:21:25.536000
| 2017-11-03T01:17:56
| 2017-11-03T01:17:56
| 104,957,122
| 0
| 0
| null | 2017-09-27T01:35:22
| 2017-09-27T01:35:22
| null |
UTF-8
|
Python
| false
| false
| 3,319
|
py
|
''' assignment for Thursday:
use height and weight as independent variables to predict gender
(inluding logistic regression function for genders)
'''
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, LogisticRegression
df = pd.read_csv('lessons/shared-resources/heights_weights_genders.csv')
# df.plot.scatter(x='Height', y='Weight')
# plt.show()
# gender masks, male=1
df = df.sort_values('Height')
df['log_height'] = pd.np.log(df.Height)
female = df['Gender'] == 'Female'
male = df['Gender'] == 'Male'
df.Gender = male.astype(int) # set male to 1, female to 0
X = df[['Height', 'log_height']].values.reshape(-1, 2)
X_m = df[male][['Height', 'log_height']].values.reshape(-1, 2)
# len of array x 1 (vs. list)
y_m = df[male].Weight
df['predicted_weight_as_f'] = 0
df['predicted_weight_as_m'] = 0
lr_m = LinearRegression()
lr_m.fit(X_m, y_m)
df.predicted_weight_as_m = lr_m.predict(X)
X_f = df[female][['Height', 'log_height']].values.reshape(-1, 2)
# len of array x 1 (vs. list)
y_f = df[female].Weight
lr_f = LinearRegression()
lr_f.fit(X_f, y_f)
df.predicted_weight_as_f = lr_f.predict(X)
# plot female / male
ax = df[female].plot(kind='scatter', x='Height', y='Weight', c='red',
alpha=.2)
ax = df[male].plot(ax=ax, kind='scatter', x='Height', y='Weight', c='blue',
alpha=.1)
# plot lines of best fit
df.plot(ax=ax, kind='line', x='Height', y='predicted_weight_as_f', c='gray')
df.plot(ax=ax, kind='line', x='Height', y='predicted_weight_as_m', c='k')
# logistic regression: predict genders
df = df.sort_values('Height')
logistic = LogisticRegression()
# use height and weight
X = pd.np.zeros((10000, 2))
X[:, 0] = df.Height.values
X[:, 1] = df.Weight.values
x = df[['Height', 'Weight']].values.reshape(-1, 2)
print(X.shape)
print(X[5000])
# use weight only
# X = (df.Height.values * df.Weight.values).reshape(-1, 1)
logistic = logistic.fit(X, df.Gender)
df['logreg_predicted_gender'] = logistic.predict(X)
df['prob'] = logistic.predict_proba(X)[:, 1]
'''
ax = df.plot.scatter(x='Height', y='prob', s=1, c='k')
df.plot.scatter(ax=ax, x='Height', y='logreg_predicted_gender')
'''
print('rms error:')
print(((df.prob - df.Gender) ** 2).mean() ** .5)
print('accuracy:')
print(logistic.score(X, df.Gender))
predicted_male = df.logreg_predicted_gender.astype(bool)
ax = df[predicted_male].plot.scatter(ax=ax, x='Height', y='Weight',
s=.5, c='black', alpha=.5)
ax = df[~predicted_male].plot.scatter(ax=ax, x='Height', y='Weight',
s=.5, c='gray', alpha=.5)
print('\n ### multivariate regression: predict weight ### \n')
def pred_weight_dumb(height, gender):
if gender == 'Male':
return lr_m.predict(height)
else:
return lr_f.predict(height)
predict_weight = LinearRegression().fit(df[['Height', 'Gender', 'log_height']]
.values.reshape(-1, 3),
df.Weight)
df['e_mlvr'] = predict_weight.predict(df[['Height', 'Gender', 'log_height']]
.values.reshape(-1, 3)) - df.Weight
e_rms = ((df.e_mlvr ** 2).mean()) ** .5
print('rms error: {}'.format(e_rms))
df.hist('e_mlvr', bins=20)
plt.show()
|
[
"thesamuelbeck@gmail.com"
] |
thesamuelbeck@gmail.com
|
4b1cf36c4b7da5c518e8e2275a09cc77dc696c63
|
4d220dfc5482f822ae08410e0f92e9335a1f22c1
|
/leetcode_china/company/demo4.py
|
65e736b4f5dddfb98b5afe30d9d33b53ddeb69f3
|
[] |
no_license
|
DaiJitao/algorithm
|
8539441cc2d70abdd0de025c16ba2a197b770db0
|
2a4b1f8fe54308ef2818c4baf0284bc00ff382a9
|
refs/heads/master
| 2023-05-26T03:44:07.701094
| 2023-05-23T08:53:46
| 2023-05-23T08:53:46
| 209,712,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 903
|
py
|
from typing import List, Optional
"""
对一个有n个元素的数组,求最大的连续子数组的和,并求其开始、结束下标。
数组的元素必然有正数也有负数才有意义,如果全是正数,那最大的子数组就是本身;如果全部为负数,应该是一个最大的元素。
"""
def demo(nums):
if nums is None or len(nums) == 1:
return None if nums is None else nums[0]
n = len(nums)
temp = 0
maxp = 0
startIndex = 0
endIndex = 0
ts = 0
for i in range(n):
temp += nums[i]
if temp < 0:
ts = i + 1
temp = 0
else:
if temp > maxp:
startIndex = ts
endIndex = i
maxp = temp
print(maxp)
return startIndex, endIndex
if __name__ == '__main__':
nums = [4, -10, 9, 7, -2, 8, 6, -1, 11]
print(demo(nums))
|
[
"976185561@qq.com"
] |
976185561@qq.com
|
dadcb76a3882141c9c96fdca9eba067550e22da8
|
55678fed60529ff04c5559fe909a1175223c21d5
|
/median.py
|
7a5482a2059f687c26bcd5bb5722e3f9f2c7cf2f
|
[] |
no_license
|
MrIndia9211/C-104
|
999cba81f0492c6bf37e45f95dbe40f4154139c4
|
9970ed6236a78f8b8a4ac00eac9d85fccc2c8a59
|
refs/heads/main
| 2023-08-15T15:46:01.047223
| 2021-09-29T10:26:05
| 2021-09-29T10:26:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
import csv
with open ('height-weight.csv',newline='') as f:
reader=csv.reader(f)
file_data=list(reader)
file_data.pop(0)
new_data=[]
for i in range(len(file_data)):
num=file_data[i][1]
new_data.append(float(num))
n=len(new_data)
new_data.sort()
if n % 2 == 0 :
median1 =float(new_data[n//2])
median2 = float(new_data[n//2-1])
median = (median1+median2)/2
else:
median=new_data[n//2]
print('median:-',median)
|
[
"noreply@github.com"
] |
MrIndia9211.noreply@github.com
|
341ecc567eb26f00c94c4a007afaf4024e338d91
|
c6e7bcdf0d111666a9f3e63b66c11d6f2f4013dc
|
/run.py
|
2c66e36a88edcce200a00850d209b1aec02d9205
|
[] |
no_license
|
samo1petar/segmentator
|
75c54c26be4a3b830583ff7d2d7a0f257a13a5df
|
5c65c620bb56aa6e3f57617415b6710f0c4cebe9
|
refs/heads/master
| 2023-05-26T18:56:13.767391
| 2022-06-26T19:16:25
| 2022-06-26T19:16:25
| 297,342,236
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,431
|
py
|
import argparse
from params import Definition, params
# from lib.execution.eval import eval
from lib.execution.train import train
def run():
parser = argparse.ArgumentParser()
parser.add_argument(
'--mode',
dest = 'mode',
type = str,
default = 'train',
help = 'Choose between train and test',
)
parser.add_argument(
'--no_images',
dest = 'no_images',
action = 'store_false',
help = 'In eval mode, wrong images won\'t be saved',
)
args = parser.parse_args()
if args.mode == 'train':
train(
model = Definition.model,
loader = Definition.reader,
loss_object = Definition.loss,
optimizer = Definition.optimizer,
print_every_iter = params['print_every_iter'],
eval_every_iter = params['eval_every_iter'],
max_iter = params['max_iter'],
clip_gradients = params['clip_gradients'],
results_dir = params['results_dir'],
name = params['name'],
)
elif args.mode in ['test', 'eval']:
eval(
model = Definition.model,
loader = Definition.reader,
results_dir = params['results_dir'],
save_images = args.no_images,
)
if __name__ == '__main__':
run()
|
[
"pero356@gmail.com"
] |
pero356@gmail.com
|
5e30f00d471b8de960a34c14da5f963b3033d2a4
|
bc24adb2c6c0fd4f540b76552bbe0c61a3f496ce
|
/django_project/blog/urls.py
|
e1d81b61e872db726c07e7e23a4d369f42282f95
|
[] |
no_license
|
stefank-29/BlogApp-Django
|
86e95343aae315a7ec3d9329941847ccadcdaec3
|
1f220d3f0ad3f21c6e2c31de3496438ebe43f071
|
refs/heads/master
| 2023-05-23T00:24:33.154694
| 2021-06-10T17:27:21
| 2021-06-10T17:27:21
| 321,454,819
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 741
|
py
|
from django.urls import path
from .views import BlogListView, BlogDetailView, BlogCreateView, BlogUpdateView, BlogDeleteView, MyBlogsListView
from . import views
urlpatterns = [
path('', BlogListView.as_view(), name='blog-home'),
path('blogs/', BlogListView.as_view(), name='blog-blogs'),
path('myblogs/', MyBlogsListView.as_view(), name='blog-myblogs'),
path('blog/<int:pk>/', BlogDetailView.as_view(), name='blog-detail'),
path('add/', BlogCreateView.as_view(), name='blog-create'),
path('blog/<int:pk>/update/', BlogUpdateView.as_view(), name='blog-update'),
path('blog/<int:pk>/delete/', BlogDeleteView.as_view(), name='blog-delete'),
path('reviews/<int:pk>/', views.addReview, name='blog-comment'),
]
|
[
"stefankaraferovic@gmail.com"
] |
stefankaraferovic@gmail.com
|
b02e8ecc999de4688439cb589adba563cbf01b41
|
f51bf78ebea69a0b125a68b427aa7b30b3d4549c
|
/crawl_web_completed.py
|
55c7fdb98cbff2207b88037be0df0242a8f1a162
|
[] |
no_license
|
ariakerstein/Python-Udacity
|
9b8449599be2d6e0b232f73b08a34db7a2f614ac
|
b182072a2734b83e6562be0ff7188ba6e99a737e
|
refs/heads/master
| 2020-06-30T04:13:36.514688
| 2015-01-11T18:35:03
| 2015-01-11T18:35:03
| 29,083,141
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
def crawl_web(seed):
tocrawl = [seed]
crawled = []
index = []
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
add_page_to_index(index, page, content)
union(tocrawl,get_all_links(content))
crawled.append(page)
return index
|
[
"ariakerstein@gmail.com"
] |
ariakerstein@gmail.com
|
7f6c1d3c8cd6dd1ea150b48f282f0596da323d94
|
d5572ae1dace6ffad6c7aed6152cc329869a89b3
|
/similar_recommend/output.py
|
0a53aeb3f777a176584345791be7dba79c78e80b
|
[] |
no_license
|
pxl-a/position_recommend
|
26125d73a62513bcb8da1a4049f5f2acdd1a8e89
|
33b4c1223c9193b263aaf1e9a108d5e66d992680
|
refs/heads/master
| 2022-03-08T05:03:47.165391
| 2019-03-07T12:45:58
| 2019-03-07T12:45:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 603
|
py
|
from mysql import mysql
from similar_recommend import similar_recommend
import re
import jieba
import logging
from gensim import corpora, models, similarities
# sql = mysql()
#str = ""
# results = sql .work_experience_require("3-5年")
# results1 = sql.edu_background_require("博士",results)
#
# for r in results1:
# print(r)
# describe = sql.get_describe("3-5年","本科",8000)
# for value in describe.keys():
# print(value)
sr = similar_recommend("3-5年","本科",8000,"计算机 大学 相关 专业 毕业 年 及 以上 asp net c# 开发 经验")
for i in sr.sim_sort:
print(i[0])
|
[
"email@example.com"
] |
email@example.com
|
3543dd7f06a8d0e28deb512c2d25605c0c3ff766
|
c0239d75a8199ec84ad683f945c21785c1b59386
|
/dingtalk/api/rest/OapiServiceActivateSuiteRequest.py
|
eae5a257d0d9438a61cc25e1207617c492821574
|
[] |
no_license
|
luss613/oauth_dingtalk
|
9f253a75ce914c577dbabfb84e97fd883e80e04b
|
1e2554642d2b16c642a031670d08efa4a74e8252
|
refs/heads/master
| 2023-04-23T01:16:33.450821
| 2020-06-18T08:22:57
| 2020-06-18T08:22:57
| 264,966,287
| 1
| 1
| null | 2020-06-18T08:31:24
| 2020-05-18T14:33:25
|
Python
|
UTF-8
|
Python
| false
| false
| 397
|
py
|
'''
Created by auto_sdk on 2018.07.25
'''
from dingtalk.api.base import RestApi
class OapiServiceActivateSuiteRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.auth_corpid = None
self.permanent_code = None
self.suite_key = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.service.activate_suite'
|
[
"paul.lu@belstar.com.cn"
] |
paul.lu@belstar.com.cn
|
e94b263df58a78ccd663c7420fea1827788a8a8a
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/224/users/4354/codes/1650_869.py
|
0e425860e6f08d42daa86b93734dc4dfbb96f754
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
# Teste seu código aos poucos.
# Não teste tudo no final, pois fica mais difícil de identificar erros.
# Use as mensagens de erro para corrigir seu código.
preco = float(input("digite o preco: "))
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
fc75db4a0b8955e7775a7532d067773d13501111
|
c27b02ece1348d2f504baeb1b9c15fc3fc693bd8
|
/src/models/championship.py
|
fd0b22b446bd5d322a8f50c5912241df5be604b7
|
[] |
no_license
|
jfly/cubingusa-org
|
60b7119b6f52d2ad6732f83dc2d123cb3277c616
|
f843cae7a30ec5f39d93519f965a3a18b352107f
|
refs/heads/master
| 2021-09-02T05:45:01.251015
| 2017-12-30T20:51:03
| 2017-12-30T20:51:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
from google.appengine.ext import ndb
from src.models.region import Region
from src.models.state import State
from src.models.wca.competition import Competition
class Championship(ndb.Model):
national_championship = ndb.BooleanProperty()
region = ndb.KeyProperty(kind=Region)
state = ndb.KeyProperty(kind=State)
competition = ndb.KeyProperty(kind=Competition)
@staticmethod
def NationalsId(year):
return str(year)
@staticmethod
def RegionalsId(year, region):
return '%s_%d' % (region.key.id(), year)
@staticmethod
def StateChampionshipId(year, state):
return '%s_%d' % (state.key.id(), year)
|
[
"timbreynolds@gmail.com"
] |
timbreynolds@gmail.com
|
7ba855193a81c0a99787d5bdf4a164e27fb6267e
|
5fe8ecd17245782838c836560139380de3b8f947
|
/cf/practice/471b.py
|
a270ff07ef90698fe565c226d2ae2fc98583801c
|
[] |
no_license
|
pyramidsnail/practice
|
2ac48ec6fac45c39b3eb52b08910daca1a6fccec
|
de33ed00c74574941a27fc0495fa9fe8b0a79b67
|
refs/heads/master
| 2020-04-06T04:00:36.038416
| 2015-02-05T15:04:30
| 2015-02-05T15:04:30
| 15,498,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
import math
n = int(raw_input())
h = [int(x) for x in raw_input().split()]
total = 1
for a in set(h):
total *= math.factorial(h.count(a))
if total<3:
print 'NO'
exit()
task = sorted((h[i],i) for i in xrange(n))
para = []
for i in xrange(n-1):
if task[i][0]==task[i+1][0]:
para.append(i)
print 'YES'
print ' '.join([str(x[1]+1) for x in task])
tmp = task[para[0]]
task[para[0]]=task[para[0]+1]
task[para[0]+1]=tmp
print ' '.join([str(x[1]+1) for x in task])
tmp = task[para[1]]
task[para[1]]=task[para[1]+1]
task[para[1]+1]=tmp
print ' '.join([str(x[1]+1) for x in task])
|
[
"wangli@tbi-hm01.inet.dkfz-heidelberg.de"
] |
wangli@tbi-hm01.inet.dkfz-heidelberg.de
|
446a073387f469521c65d83a2a44564de2ecdf9f
|
f8757ad3e70d9a85cdb70d5059b06fcfaa2f23db
|
/parser/parser_oggetti&distrutti_2.1.py
|
e50bb03a55ad2da055ca4d2a9b1c70e97d62e409
|
[] |
no_license
|
LG965/1LifeCampaign
|
63282f292a840593b8ccce3aeece641b5959ddf9
|
27da72cc8ae2899ff398747d7637fc337c0f5368
|
refs/heads/master
| 2023-05-03T00:55:59.226366
| 2021-05-09T17:36:28
| 2021-05-09T17:36:28
| 362,393,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,627
|
py
|
# legge il log di missione e restituisce tre file txt contenenti tutti gli oggetti.txt e gli oggetti_distrutti.txt
import os.path
import re
print("#################################################################################################################\n")
print("Inserisci il nome del file es: missionReport(2021-04-23_16-52-50)[0].txt\n")
print("ATTENZIONE! Se i tre file oggetti_distrutti.txt, oggetti_Dx.txt e oggetti_Sx.txt esistono, verranno sovrascritti!\n")
global lineaoggettiSx
missionReport = input()
if os.path.isfile(missionReport):
# inserire il nome del report missione (log)
# al posto di "missionReport(2021-04-23_16-52-50)[0].txt" qui sotto
# missionReport = "missionReport(2021-04-23_16-52-50)[0].txt"
#################### oggetti_Sx ########################################
log = open(missionReport)
oggettiSx = open("oggetti_Sx.txt", "w")
for linea_oggetti in log:
linea_oggetti = linea_oggetti.rstrip()
# T:25389 AType:12 ID:182272 TYPE:arf_dugouts_3[25011,1] COUNTRY:201 NAME:< Lin N 5 Rif Dug < PID:-1 POS(188777.2344,142.5266,178376.0156)
# if oggettoSx := re.findall('AType:12(.*)< PID', linea_oggetti):
if oggettoSx := re.findall('AType:12(.*) TYPE:(.*) COUNTRY:(.*) NAME:(.*)< PID', linea_oggetti):
#print(oggettoSx)
#lineaOggettiSxLista = [oggettoSx]
for lineaSx in oggettoSx:
lineaOggettiSxLista = []
oggettiSx.write(lineaSx[0] + ' ' + lineaSx[1] + ' ' + lineaSx[3] + "\n")
#lineaoggettiSxLista[lineaSx + "\n"]
#global lineaOggettiSx
# lineaSx[0] + ' ' + lineaSx[1] + ' ' + lineaSx[3] + "\n"
#lineaOggettiSx = lineaSx[0] + ' ' + lineaSx[1] + ' ' + lineaSx[3] + "\n"
#lineaOggettiSxLista.append(oggettoSx)
# print(lineaOggettiSx)
lineaOggettiSxLista.append(lineaSx)
print(len(lineaOggettiSxLista))
oggettiSx.close()
log.close()
#################### oggetti_Dx ########################################
log = open(missionReport)
oggettiDx = open("oggetti_Dx.txt", "w")
for linea_oggetti in log:
linea_oggetti = linea_oggetti.rstrip()
if oggettoDx := re.findall('AType:12(.*)> PID', linea_oggetti):
#print(oggettoDx)
for lineaDx in oggettoDx:
oggettiDx.write(lineaDx + "\n")
oggettiDx.close()
log.close()
#################### oggetti_distrutti ########################################
log = open(missionReport)
oggetti_distrutti = open("oggetti_distrutti.txt", "w")
for linea_distrutti in log:
linea_distrutti = linea_distrutti.rstrip()
# T:25378 AType:3 AID:8193 TID:182272 POS(188777.2344,142.5266,178376.0156)
# if oggetto_distrutto := re.findall('AType:3(.*)POS', linea_distrutti):
if oggetto_distrutto := re.findall('AType:3 A(.*) T(.*)POS', linea_distrutti):
#print(oggetto_distrutto)
for lineaDis in oggetto_distrutto:
#if lineaDis == re.escape('AID:-1 TID:(.*)'):
if lineaDis[0] != 'ID:-1':
# print(lineaDis)
# else:
oggetti_distrutti.write(lineaDis[1] + "\n")
oggetti_distrutti.close()
log.close()
print(lineaOggettiSxLista)
print("\nFile oggetti_distrutti.txt, oggetti_Dx.txt e oggetti_Sx.txt creati!")
else:
print("\nIl file mission inserito non esiste! Programma terminato.")
input("\nPremi un tasto per chiudere...")
|
[
"ellegi965@gmail.com"
] |
ellegi965@gmail.com
|
31423afd7843125e573616a7df9b3fa38287ee1f
|
b782e5ae244003c2c3539353147ffb8d8c2363e1
|
/lib/gazpacho/uimstate.py
|
ce1a6f8039b6c432df85eff19cb5ede3ce320da2
|
[] |
no_license
|
dsaran/packagehelper
|
5e75c364b0c571737fcd5cfc6349891513378c55
|
516784f6891a99a92285314797dc12b721bceba0
|
refs/heads/master
| 2020-05-07T15:45:03.023492
| 2013-06-20T19:27:20
| 2013-06-20T19:27:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,529
|
py
|
import gettext
import gtk
from gazpacho.app.bars import bar_manager
from gazpacho.clipboard import clipboard
from gazpacho.commandmanager import command_manager
from gazpacho.constants import STOCK_SIZEGROUP
from gazpacho.placeholder import Placeholder
from gazpacho.sizegroupeditor import add_sizegroup_gadgets
from gazpacho.widget import Gadget
from gazpacho.signalhandlers import SignalHandlerStorage
_ = lambda msg: gettext.dgettext('gazpacho', msg)
class UIMState(object):
"""
The UIMState controls the state of the UIManager. It's responsible
for enabling and disabling action as well as add and remove ui
definitions.
The UIMState is responsible for the basic functionallity such as
Save, Close, Undo and Redo actions. It should be extended and
customized to controll other actions as well.
It is possible to have more than one UIMState but only one should
be enabled at one time.
"""
def __init__(self):
"""
Initialize the uim state.
"""
self._enabled = False
self._project = None
# Project dependent handlers
self.handlers = SignalHandlerStorage()
self._merge_ids = []
#
# Public methods
#
def enable(self):
"""
Enable the state object.
"""
self._enabled = True
self.update_uim()
self.update_state()
def disable(self):
"""
Disable the state object.
"""
self._enabled = False
self.update_uim()
self.update_state()
def set_project(self, project):
"""
Set the current project.
@param project: the new project
@type project: L{gazpacho.project.Project}
"""
# Disconnect handlers
self.handlers.disconnect_all()
self._project = project
self.update_state()
if not self._project:
return
# Connect handlers
self.handlers.connect(project, 'project_changed',
self._on_project_changed)
self.handlers.connect(project.undo_stack, 'changed',
self._on_undo_stack_changed)
def update_state(self):
"""
Make sure that the state of all actions are up to date.
"""
group = bar_manager.get_group('ContextActions')
group.set_sensitive(self._project is not None)
if not self._enabled:
return
if not self._project:
self._set_undo_redo_labels()
return
self._update_save_action(self._project)
self._update_undo_redo_actions(self._project.undo_stack)
def merge_ui(self, ui_string):
"""
Merge a ui definition. It's currently only possibe to merge
one ui definition, if you want to merge another you have to
remove the first.
@param ui_string: the ui definition
@type ui_string: str
"""
merge_id = bar_manager.add_ui_from_string(ui_string)
self._merge_ids.append(merge_id)
def remove_ui(self):
"""
Remove ui that has previously been merged. If nothing has been
merge nothing will happen.
"""
for merge_id in self._merge_ids:
bar_manager.remove_ui(merge_id)
self._merge_ids = []
#
# Private methods
#
def update_uim(self):
"""
Update the UIManager. This will add or remove ui definitions
as well as action groups. Note that this isn't really part of
the public API but should be overridden by the subclasses.
"""
raise NotImplementedError(
"This method has to be overridden by subclasses")
def _update_undo_redo_actions(self, undo_stack):
"""
Update the undo/redo actions.
@param undo_stack: the undo/redo stack
@type undo_stack: L{gazpacho.project.UndoRedoStack}
"""
undo_info = redo_info = None
if self._project:
undo_info = undo_stack.get_undo_info()
redo_info = undo_stack.get_redo_info()
bar_manager.set_action_prop('Undo', sensitive=undo_info is not None)
bar_manager.set_action_prop('Redo', sensitive=redo_info is not None)
self._set_undo_redo_labels(undo_info, redo_info)
def _set_undo_redo_labels(self, undo_text=None, redo_text=None):
"""
Set the undo and redo labels. If no text has been specified a
default label will be used to indicate that there are no undo
or redo available.
@param undo_text: the text that describes the undo action
@type undo_text: str
@param redo_text: the text that describes the redo action
@type redo_text: str
"""
if undo_text is None:
undo_text = _('Nothing')
if redo_text is None:
redo_text = _('Nothing')
bar_manager.set_action_prop('Undo',
label=_('_Undo: %s') % undo_text,
short_label=_('_Undo'))
bar_manager.set_action_prop('Redo',
label=_('_Redo: %s') % redo_text,
short_label=_('_Redo'))
def _update_save_action(self, project):
"""
Update the save action.
@param project: the current project
@type project: L{gazpacho.project.Project}
"""
bar_manager.set_action_prop('Save', sensitive=self._project.changed)
#
# Signal handlers
#
def _on_project_changed(self, project):
"""
Callback for the project's changed signal that is emitted when
the state of the project has changed.
@param project: the project that has changed
@type project: L{gazpacho.project.Project}
"""
if self._enabled:
self._update_save_action(project)
def _on_undo_stack_changed(self, undo_stack):
"""
Callback for the undo/redo stack's changed signal.
@param undo_stack: the undo/redo stack
@type undo_stack: L{gazpacho.project.UndoRedoStack}
"""
if self._enabled:
self._update_undo_redo_actions(undo_stack)
ACTION_UI_STRING = """
<ui>
<menubar name="MainMenu">
<menu action="ObjectMenu">
<menuitem action="AddAction"/>
<menuitem action="AddActionGroup"/>
<menuitem action="EditAction"/>
</menu>
</menubar>
<toolbar name="MainToolbar">
<toolitem action="AddAction"/>
</toolbar>
</ui>
"""
ACTION_POPUP_UI_STRING = """
<ui>
<popup name="ActionPopup">
<menuitem action="AddAction"/>
<menuitem action="AddActionGroup"/>
<menuitem action="EditAction"/>
<menuitem action="Delete"/>
</popup>
</ui>
"""
class ActionUIMState(UIMState):
"""
The ActionUIMState is responsible for actions that has to do with
the GActionView, i.e. adding, removing and editing actions and
action groups.
"""
def __init__(self, view):
"""
Initialize the action uim state.
@param view: the action view
@type view: L{gazpacho.actioneditor.GActionsView}
"""
UIMState.__init__(self)
self._view = view
self._action_group = self._create_action_group()
# Connect handlers (the normal way)
view.connect('selection-changed', self._on_view_selection_changed)
#
# Public methods
#
def update_state(self):
"""
Make sure that the state of all actions are up to date.
"""
UIMState.update_state(self)
self._action_group.set_sensitive(self._project is not None)
if not (self._project and self._enabled):
return
self._update_action_actions()
#
# Private methods
#
def update_uim(self):
"""
Update the UIManager. This will add or remove ui definitions
as well as action groups.
"""
if self._enabled:
if not bar_manager.has_group(self._action_group.get_name()):
bar_manager.add_action_group(self._action_group)
self.merge_ui(ACTION_UI_STRING)
self.merge_ui(ACTION_POPUP_UI_STRING)
else:
self.remove_ui()
group_name = self._action_group.get_name()
if bar_manager.has_group(group_name):
bar_manager.remove_action_group(group_name)
def _create_action_group(self):
"""
Create an action group for the action and action group
specific actions.
@return: the action group
@rtype: gtk.ActionGroup
"""
group = gtk.ActionGroup('ActionActions')
group.add_actions((('Delete', gtk.STOCK_DELETE, None, '<control>D',
_('Delete'), self._delete_cb),))
action = gtk.Action('AddAction',
_('_Add Action...'),
_('Add an action'),
gtk.STOCK_ADD)
action.connect('activate', self._add_action_cb)
action.set_property('short-label', _('Action'))
group.add_action(action)
action = gtk.Action('AddActionGroup',
_('Add Action _Group...'),
_('Add an action group'),
gtk.STOCK_ADD)
action.connect('activate', self._add_action_group_cb)
action.set_property('short-label', _('Action Group'))
group.add_action(action)
action = gtk.Action('EditAction',
_('_Edit...'),
_('Edit selected action or action group'),
gtk.STOCK_EDIT)
action.connect('activate', self._edit_action_cb)
action.set_property('short-label', _('Edit'))
group.add_action(action)
return group
def _update_action_actions(self):
"""
Update the action and action group actions.
"""
if not self._project:
return
bar_manager.set_action_prop('AddActionGroup', sensitive=True)
if (self._view.get_selected_action() or
self._view.get_selected_action_group()):
sensitive = True
else:
sensitive = False
bar_manager.set_action_props(('AddAction', 'Delete', 'EditAction'),
sensitive=sensitive)
#
# Signal handlers
#
def _on_view_selection_changed(self, actionview, item):
"""
Callback that is called when the selection in the action view
has changed.
@param actionview: the action view
@type actionview: L{gazpacho.actioneditor.GActionsView}
@param item: the selected gaction or gaction group
@type item: L{gazpacho.gaction.GActionGroup} or
L{gazpacho.gaction.GAction}
"""
if self._enabled:
self._update_action_actions()
#
# Action callbacks
#
def _delete_cb(self, action):
gaction = self._view.get_selected_action()
if gaction is not None:
self._view.remove_action(gaction)
def _add_action_cb(self, action):
gaction = self._view.get_selected_action()
self._view.add_action(gaction)
def _add_action_group_cb(self, action):
self._view.add_action_group()
def _edit_action_cb(self, action):
gaction = self._view.get_selected_action()
if gaction is not None:
self._view.edit_action(gaction)
def _delete_action_cb(self, action):
gaction = self._view.get_selected_action()
if gaction is not None:
self._view.remove_action(gaction)
SIZEGROUP_POPUP_UI_STRING = """
<ui>
<popup name="SizeGroupPopup">
<menuitem action="Delete"/>
</popup>
</ui>
"""
class SizeGroupUIMState(UIMState):
"""
The SizeGroupUIMState is responsible for actions that has to do
with the SizeGroupView, i.e. manipulating sizegroups and their
widgets.
"""
def __init__(self, view):
"""
Initialize the sizegroup uim state.
@param view: the sizegroup view
@type view: L{gazpacho.sizegroupeditor.SizeGroupView}
"""
UIMState.__init__(self)
self._view = view
self._action_group = self._create_sizegroup_action_group()
# Connect handlers (the normal way)
view.connect('selection-changed', self._on_view_selection_changed)
#
# Public methods
#
def update_state(self):
"""
Make sure that the state of all actions are up to date.
"""
UIMState.update_state(self)
self._action_group.set_sensitive(self._project is not None)
if not (self._project and self._enabled):
return
enabled = self._view.has_selected()
self._update_sizegroup_actions(enabled)
#
# Private methods
#
def update_uim(self):
"""
Update the UIManager. This will add or remove ui definitions
as well as action groups.
"""
if self._enabled:
if not bar_manager.has_group(self._action_group.get_name()):
bar_manager.add_action_group(self._action_group)
self.merge_ui(SIZEGROUP_POPUP_UI_STRING)
else:
self.remove_ui()
group_name = self._action_group.get_name()
if bar_manager.has_group(group_name):
bar_manager.remove_action_group(group_name)
def _create_sizegroup_action_group(self):
"""
Create an action group for the sizegroup specific actions.
@return: the action group
@rtype: gtk.ActionGroup
"""
group = gtk.ActionGroup('Edit sizegroup actions')
group.add_actions((('Delete', gtk.STOCK_DELETE, None, '<control>D',
_('Delete'), self._delete_cb),))
return group
def _update_sizegroup_actions(self, enabled):
"""
Update the actions related to sizegroup manipulation.
@param enabled: whether the actions should be enabled or not
@type enabled: bool
"""
bar_manager.set_action_prop('Delete', sensitive=enabled)
#
# Signal handlers
#
def _on_view_selection_changed(self, view, item):
"""
Callback that is called when the selected item in the
sizegroup view has changed.
@param view: the sizegroup view
@type view: L{gazpacho.sizegroupeditor.SizeGroupView}
@param item: the selected widget or sizegroup
@type item: L{gazpacho.sizegroup.GSizeGroup} or
L{gazpacho.widget.Gadget}
"""
self._update_sizegroup_actions(bool(item))
#
# Action callbacks
#
def _delete_cb(self, action):
self._view.remove_selected_item()
WIDGET_UI_STRING = """
<ui>
<menubar name="MainMenu">
<menu action="ObjectMenu">
<menuitem action="AddSizeGroupWidgets"/>
</menu>
</menubar>
<toolbar name="MainToolbar">
<toolitem action="AddSizeGroupWidgets"/>
</toolbar>
</ui>
"""
class WidgetUIMState(UIMState):
"""
The WidgetUIMState is responsible for actions that has to do with
widgets, i.e. adding, removing, cut and paste and so on.
"""
def __init__(self):
"""
Initialize the widget uim state.
"""
UIMState.__init__(self)
self._action_group = self._create_widget_action_group()
# Connect handlers (the normal way)
clipboard.connect('selection_changed',
self._on_clipboard_selection_changed)
#
# Public methods
#
def set_project(self, project):
"""
Set the current project.
@param project: the new project
@type project: L{gazpacho.project.Project}
"""
UIMState.set_project(self, project)
if not project:
return
self.handlers.connect(project.selection, 'selection_changed',
self._on_project_selection_changed)
def update_state(self):
"""
Make sure that the state of all actions are up to date.
"""
UIMState.update_state(self)
self._action_group.set_sensitive(self._project is not None)
if not (self._project and self._enabled):
return
selection = self._project.selection
self._update_edit_actions(selection)
self._update_sizegroup_actions(selection)
#
# Private methods
#
def update_uim(self):
"""
Update the UIManager. This will add or remove ui definitions
as well as action groups.
"""
if self._enabled:
if not bar_manager.has_group(self._action_group.get_name()):
bar_manager.add_action_group(self._action_group)
self.merge_ui(WIDGET_UI_STRING)
else:
self.remove_ui()
group_name = self._action_group.get_name()
if bar_manager.has_group(group_name):
bar_manager.remove_action_group(group_name)
def _create_widget_action_group(self):
"""
Create an action group for the widget specific actions.
@return: the action group
@rtype: gtk.ActionGroup
"""
group = gtk.ActionGroup('WidgetActions')
# We override the default cut, copy, paste and delete
group.add_actions(
(('Cut', gtk.STOCK_CUT, None, None, _('Cut'), self._cut_cb),
('Copy', gtk.STOCK_COPY, None, None, _('Copy'), self._copy_cb),
('Paste', gtk.STOCK_PASTE, None, None, _('Paste'),
self._paste_cb),
('Delete', gtk.STOCK_DELETE, None, '<control>D', _('Delete'),
self._delete_cb)
)
)
action = gtk.Action('AddSizeGroupWidgets',
_('Add Size Group _Widgets'),
_('Add the selected widgets to a size group'),
STOCK_SIZEGROUP)
action.connect('activate', self._add_sizegroup_gadgets_cb)
action.set_property('short-label', _('Group'))
group.add_action(action)
return group
def _update_paste_action(self, selection, clipboard_item):
"""
Update the paste action.
@param selection: the selected widgets
@type selection: list
@param clipboard_item: the selected item on the clipboard
@type clipboard_item: L{gazpacho.clipboard.ClipboardItem}
"""
sensitive = False
if clipboard_item:
# We can always paste a toplevel
if clipboard_item.is_toplevel:
sensitive = True
# otherwise we need a placeholder
elif (len(selection) == 1
and isinstance(selection[0], Placeholder)):
sensitive = True
bar_manager.set_action_prop('Paste', sensitive=sensitive)
def _update_edit_actions(self, selection):
"""
Update the actions in the edit group.
@param selection: the selected widgets
@type selection: list
"""
if len(selection) == 1:
widget = selection[0]
# Placeholders cannot be cut or copied but sometimes deleted
if isinstance(widget, Placeholder):
bar_manager.set_action_props(('Copy', 'Cut'),
sensitive=False)
bar_manager.set_action_prop('Delete',
sensitive=widget.is_deletable())
else:
bar_manager.set_action_props(('Copy', 'Cut', 'Delete'),
sensitive=True)
else:
bar_manager.set_action_props(('Copy', 'Cut', 'Delete'),
sensitive=False)
# Unless the widget is toplevel it can only be pasted on a placeholder
item = clipboard.get_selected_item()
self._update_paste_action(selection, item)
def _update_sizegroup_actions(self, selection):
"""
Update the sizegroup actions.
@param selection: the selected widgets
@type selection: list
"""
sensitive = True
if not selection:
sensitive = False
for widget in selection:
if isinstance(widget, (gtk.Window, Placeholder)):
sensitive = False
bar_manager.set_action_prop('AddSizeGroupWidgets', sensitive=sensitive)
#
# Signal handlers
#
def _on_clipboard_selection_changed(self, clipboard, item):
"""
Callback for the clipboard's selection-changed signal.
@param clipboard: the clipboard
@type clipboard: L{gazpacho.clipboard.Clipboard}
@param item: the newly selected item
@type item: L{gazpacho.clipboard.ClipboardItem}
"""
if not self._enabled:
return
selection = self._project.selection
self._update_paste_action(selection, item)
def _on_project_selection_changed(self, project):
"""
Callback for when the selected widget in this project has changed.
@param project: the current project
@type project: L{gazpacho.project.Project}
"""
if not self._enabled:
return
selection = self._project.selection
self._update_edit_actions(selection)
self._update_sizegroup_actions(selection)
#
# Action callbacks
#
def _cut_cb(self, action):
gadget = Gadget.from_widget(self._project.selection[0])
command_manager.cut(gadget)
def _copy_cb(self, action):
gadget = Gadget.from_widget(self._project.selection[0])
command_manager.copy(gadget)
def _paste_cb(self, action):
placeholder = None
if len(self._project.selection) == 1:
placeholder = self._project.selection[0]
command_manager.paste(placeholder, self._project)
def _delete_cb(self, action):
command_manager.delete_selection(self._project)
def _add_sizegroup_gadgets_cb(self, action):
"""
Callback that will add the selected widgets to a sizegroup
specified by the user.
@param action: the action that triggered the callback
@type action: L{gtk.Action}
"""
add_sizegroup_gadgets(self._project)
|
[
"danielsaran@gmail.com"
] |
danielsaran@gmail.com
|
54761c345cc041913fea12ff29f09b94c6270005
|
dd7c7f19d1599669dd39af3e9b6e2bd198ad4ed4
|
/backend/blockchain/blockchain.py
|
4961a46770cdda2d7aa31e97554aa30c9cf49596
|
[] |
no_license
|
komfysach/komfykrypto-pychain
|
46fe47d337ba6aff804f6c810cebeb94e15578d4
|
119c3978b819d8def705f7d35074b9334c596770
|
refs/heads/main
| 2023-05-06T13:30:40.344383
| 2021-05-24T09:30:21
| 2021-05-24T09:30:21
| 370,708,574
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,453
|
py
|
from backend.wallet.wallet import Wallet
from backend.config import MINING_REWARD_INPUT
from backend.wallet.transactions import Transaction
from backend.blockchain.block import Block
class Blockchain:
"""
Blockchain: a public ledger of transactions
Implemented as a list of blocks - data sets of transactions
"""
def __init__(self):
self.chain = [Block.genesis()]
def add_block(self, data):
self.chain.append(Block.mine_block(self.chain[-1], data))
def __repr__(self):
return f'Blockchain: {self.chain}'
def replace_chain(self, chain):
"""
Replace the local chain with the incoming one if the following applies:
- The incoming chain is longer than the local one.
- The incoming chain is formatted properly.
"""
if len(chain) <= len(self.chain):
raise Exception('Cannot replace. The incoming chain must be longer.')
try:
Blockchain.is_valid_chain(chain)
except Exception as e:
raise Exception(f'Cannot replace. The incoming chain is invalid: {e}')
self.chain = chain
def to_json(self):
"""
Serialise the blockchain into a list of blocks.
"""
return list(map(lambda block: block.to_json(), self.chain))
@staticmethod
def from_json(chain_json):
"""
Deserialize a list of serialized blocks into a Blockchain instance.
The result will contain a chain list of Block instances.
"""
blockchain = Blockchain()
blockchain.chain = list(
map(lambda block_json: Block.from_json(block_json), chain_json)
)
return blockchain
@staticmethod
def is_valid_chain(chain):
"""
Validate the incoming chain.
Enforce the following rules in the blockchain:
- the chain must start with the genesis block
- blocks must be formatted correctly
"""
if chain[0] != Block.genesis():
raise Exception('The genesis block must be valid')
for i in range(1, len(chain)):
block = chain[i]
last_block = chain[i-1]
Block.is_valid_block(last_block, block)
Blockchain.is_valid_transaction_chain(chain)
@staticmethod
def is_valid_transaction_chain(chain):
"""
Enforce the rules of a chain composed of blocks of transactions.
- Each transaction must only appear once in the chain.
- There can only be one mining reward per block.
- Each transaction must be valid.
"""
transaction_ids = set()
for i in range(len(chain)):
block = chain[i]
has_mining_reward = False
for transaction_json in block.data:
transaction = Transaction.from_json(transaction_json)
if transaction.id in transaction_ids:
raise Exception(f'Transaction {transaction.id} is not unique')
transaction_ids.add(transaction.id)
if transaction.input == MINING_REWARD_INPUT:
if has_mining_reward:
raise Exception(
'There can only be one mining reward per block.' \
f'Check block with hash: {block.hash}'
)
has_mining_reward = True
else:
historic_blockchain = Blockchain()
historic_blockchain.chain = chain[0:i]
historic_balance = Wallet.calculate_balance(
historic_blockchain,
transaction.input['address']
)
if historic_balance != transaction.input['amount']:
raise Exception(f'Transaction {transaction.id} has an invalid input amount')
Transaction.is_valid_transaction(transaction)
def main():
blockchain = Blockchain()
blockchain.add_block('one')
blockchain.add_block('two')
print(blockchain)
print(f'blockchain.py ___name__: {__name__}')
if __name__ == '__main__':
main()
|
[
"sach@getkomfy.net"
] |
sach@getkomfy.net
|
a2806a04afe0d4b965ba15eb778367f7a0bf8841
|
49296c69348c743f234807ff6390687079b6b5d9
|
/client/server_lib/omero_model_Line_ice.py
|
174821e4ec0c8ea8913b5a645fd4f9e0041c9605
|
[] |
no_license
|
crs4/omero.biobank-docker
|
2cb4b00f37115916d5b140cbdaf24c12bd8be9ef
|
e332a6eccad44384cd6a3a12e6da17eb89a6cd96
|
refs/heads/master
| 2023-09-02T04:36:21.401597
| 2014-12-07T17:34:27
| 2014-12-07T17:34:27
| 26,125,831
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,746
|
py
|
# **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
#
# Ice version 3.4.2
#
# <auto-generated>
#
# Generated from file `Line.ice'
#
# Warning: do not edit this file.
#
# </auto-generated>
#
import Ice, IcePy, __builtin__
import omero_model_IObject_ice
import omero_RTypes_ice
import omero_System_ice
import omero_Collections_ice
import omero_model_Shape_ice
# Included module omero
_M_omero = Ice.openModule('omero')
# Included module omero.model
_M_omero.model = Ice.openModule('omero.model')
# Included module Ice
_M_Ice = Ice.openModule('Ice')
# Included module omero.sys
_M_omero.sys = Ice.openModule('omero.sys')
# Included module omero.api
_M_omero.api = Ice.openModule('omero.api')
# Start of module omero
__name__ = 'omero'
# Start of module omero.model
__name__ = 'omero.model'
if not _M_omero.model.__dict__.has_key('Roi'):
_M_omero.model._t_Roi = IcePy.declareClass('::omero::model::Roi')
_M_omero.model._t_RoiPrx = IcePy.declareProxy('::omero::model::Roi')
if not _M_omero.model.__dict__.has_key('Details'):
_M_omero.model._t_Details = IcePy.declareClass('::omero::model::Details')
_M_omero.model._t_DetailsPrx = IcePy.declareProxy('::omero::model::Details')
if not _M_omero.model.__dict__.has_key('Line'):
_M_omero.model.Line = Ice.createTempClass()
class Line(_M_omero.model.Shape):
def __init__(self, _id=None, _details=None, _loaded=False, _version=None, _theZ=None, _theT=None, _theC=None, _roi=None, _locked=None, _g=None, _transform=None, _vectorEffect=None, _visibility=None, _fillColor=None, _fillRule=None, _strokeColor=None, _strokeDashArray=None, _strokeDashOffset=None, _strokeLineCap=None, _strokeLineJoin=None, _strokeMiterLimit=None, _strokeWidth=None, _fontFamily=None, _fontSize=None, _fontStretch=None, _fontStyle=None, _fontVariant=None, _fontWeight=None, _x1=None, _y1=None, _x2=None, _y2=None, _textValue=None):
if __builtin__.type(self) == _M_omero.model.Line:
raise RuntimeError('omero.model.Line is an abstract class')
_M_omero.model.Shape.__init__(self, _id, _details, _loaded, _version, _theZ, _theT, _theC, _roi, _locked, _g, _transform, _vectorEffect, _visibility, _fillColor, _fillRule, _strokeColor, _strokeDashArray, _strokeDashOffset, _strokeLineCap, _strokeLineJoin, _strokeMiterLimit, _strokeWidth, _fontFamily, _fontSize, _fontStretch, _fontStyle, _fontVariant, _fontWeight)
self._x1 = _x1
self._y1 = _y1
self._x2 = _x2
self._y2 = _y2
self._textValue = _textValue
def ice_ids(self, current=None):
return ('::Ice::Object', '::omero::model::IObject', '::omero::model::Line', '::omero::model::Shape')
def ice_id(self, current=None):
return '::omero::model::Line'
def ice_staticId():
return '::omero::model::Line'
ice_staticId = staticmethod(ice_staticId)
def getX1(self, current=None):
pass
def setX1(self, theX1, current=None):
pass
def getY1(self, current=None):
pass
def setY1(self, theY1, current=None):
pass
def getX2(self, current=None):
pass
def setX2(self, theX2, current=None):
pass
def getY2(self, current=None):
pass
def setY2(self, theY2, current=None):
pass
def getTextValue(self, current=None):
pass
def setTextValue(self, theTextValue, current=None):
pass
def __str__(self):
return IcePy.stringify(self, _M_omero.model._t_Line)
__repr__ = __str__
_M_omero.model.LinePrx = Ice.createTempClass()
class LinePrx(_M_omero.model.ShapePrx):
def getX1(self, _ctx=None):
return _M_omero.model.Line._op_getX1.invoke(self, ((), _ctx))
def begin_getX1(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Line._op_getX1.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getX1(self, _r):
return _M_omero.model.Line._op_getX1.end(self, _r)
def setX1(self, theX1, _ctx=None):
return _M_omero.model.Line._op_setX1.invoke(self, ((theX1, ), _ctx))
def begin_setX1(self, theX1, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Line._op_setX1.begin(self, ((theX1, ), _response, _ex, _sent, _ctx))
def end_setX1(self, _r):
return _M_omero.model.Line._op_setX1.end(self, _r)
def getY1(self, _ctx=None):
return _M_omero.model.Line._op_getY1.invoke(self, ((), _ctx))
def begin_getY1(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Line._op_getY1.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getY1(self, _r):
return _M_omero.model.Line._op_getY1.end(self, _r)
def setY1(self, theY1, _ctx=None):
return _M_omero.model.Line._op_setY1.invoke(self, ((theY1, ), _ctx))
def begin_setY1(self, theY1, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Line._op_setY1.begin(self, ((theY1, ), _response, _ex, _sent, _ctx))
def end_setY1(self, _r):
return _M_omero.model.Line._op_setY1.end(self, _r)
def getX2(self, _ctx=None):
return _M_omero.model.Line._op_getX2.invoke(self, ((), _ctx))
def begin_getX2(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Line._op_getX2.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getX2(self, _r):
return _M_omero.model.Line._op_getX2.end(self, _r)
def setX2(self, theX2, _ctx=None):
return _M_omero.model.Line._op_setX2.invoke(self, ((theX2, ), _ctx))
def begin_setX2(self, theX2, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Line._op_setX2.begin(self, ((theX2, ), _response, _ex, _sent, _ctx))
def end_setX2(self, _r):
return _M_omero.model.Line._op_setX2.end(self, _r)
def getY2(self, _ctx=None):
return _M_omero.model.Line._op_getY2.invoke(self, ((), _ctx))
def begin_getY2(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Line._op_getY2.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getY2(self, _r):
return _M_omero.model.Line._op_getY2.end(self, _r)
def setY2(self, theY2, _ctx=None):
return _M_omero.model.Line._op_setY2.invoke(self, ((theY2, ), _ctx))
def begin_setY2(self, theY2, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Line._op_setY2.begin(self, ((theY2, ), _response, _ex, _sent, _ctx))
def end_setY2(self, _r):
return _M_omero.model.Line._op_setY2.end(self, _r)
def getTextValue(self, _ctx=None):
return _M_omero.model.Line._op_getTextValue.invoke(self, ((), _ctx))
def begin_getTextValue(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Line._op_getTextValue.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getTextValue(self, _r):
return _M_omero.model.Line._op_getTextValue.end(self, _r)
def setTextValue(self, theTextValue, _ctx=None):
return _M_omero.model.Line._op_setTextValue.invoke(self, ((theTextValue, ), _ctx))
def begin_setTextValue(self, theTextValue, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.Line._op_setTextValue.begin(self, ((theTextValue, ), _response, _ex, _sent, _ctx))
def end_setTextValue(self, _r):
return _M_omero.model.Line._op_setTextValue.end(self, _r)
def checkedCast(proxy, facetOrCtx=None, _ctx=None):
return _M_omero.model.LinePrx.ice_checkedCast(proxy, '::omero::model::Line', facetOrCtx, _ctx)
checkedCast = staticmethod(checkedCast)
def uncheckedCast(proxy, facet=None):
return _M_omero.model.LinePrx.ice_uncheckedCast(proxy, facet)
uncheckedCast = staticmethod(uncheckedCast)
_M_omero.model._t_LinePrx = IcePy.defineProxy('::omero::model::Line', LinePrx)
_M_omero.model._t_Line = IcePy.declareClass('::omero::model::Line')
_M_omero.model._t_Line = IcePy.defineClass('::omero::model::Line', Line, (), True, _M_omero.model._t_Shape, (), (
('_x1', (), _M_omero._t_RDouble),
('_y1', (), _M_omero._t_RDouble),
('_x2', (), _M_omero._t_RDouble),
('_y2', (), _M_omero._t_RDouble),
('_textValue', (), _M_omero._t_RString)
))
Line._ice_type = _M_omero.model._t_Line
Line._op_getX1 = IcePy.Operation('getX1', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RDouble, ())
Line._op_setX1 = IcePy.Operation('setX1', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RDouble),), (), None, ())
Line._op_getY1 = IcePy.Operation('getY1', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RDouble, ())
Line._op_setY1 = IcePy.Operation('setY1', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RDouble),), (), None, ())
Line._op_getX2 = IcePy.Operation('getX2', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RDouble, ())
Line._op_setX2 = IcePy.Operation('setX2', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RDouble),), (), None, ())
Line._op_getY2 = IcePy.Operation('getY2', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RDouble, ())
Line._op_setY2 = IcePy.Operation('setY2', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RDouble),), (), None, ())
Line._op_getTextValue = IcePy.Operation('getTextValue', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RString, ())
Line._op_setTextValue = IcePy.Operation('setTextValue', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RString),), (), None, ())
_M_omero.model.Line = Line
del Line
_M_omero.model.LinePrx = LinePrx
del LinePrx
# End of module omero.model
__name__ = 'omero'
# End of module omero
|
[
"gmauro@crs4.it"
] |
gmauro@crs4.it
|
15ea9a03ffec118e4fc925a147ce04b0a8945cfa
|
a9a16c414d7370b2ca6442b8125d6b6f9b3d6556
|
/chapter_16_Exam_Statistics/08_Standard_Deviation.py
|
118eb2847f27737167f15d03010d0d0c460c5020
|
[] |
no_license
|
dennisnderitu254/CodeCademy-Py
|
99f1cb9fa011f1586d543650c5001de17f04b8b2
|
758067dc53fdb442ab18dd922dacd13cc8846ebb
|
refs/heads/master
| 2021-07-12T10:01:58.854222
| 2017-10-12T20:03:56
| 2017-10-12T20:03:56
| 106,739,488
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
grades = [100, 100, 90, 40, 80, 100, 85, 70, 90, 65, 90, 85, 50.5]
def print_grades(grades):
for grade in grades:
print grade
def grades_sum(grades):
total = 0
for grade in grades:
total += grade
return total
def grades_average(grades):
sum_of_grades = grades_sum(grades)
average = sum_of_grades * 1.0/ len(grades)
return average
def grades_variance(grades, average):
variance = 0.0
for grade in grades:
variance += (grade - average) ** 2
return variance / len(grades)
def grades_std_deviation(variance):
return variance ** 0.5
print grades_std_deviation(grades_variance(grades, grades_average(grades)))
|
[
"dknderitu@gmail.com"
] |
dknderitu@gmail.com
|
2b37b81ae2ee016f14e0faf33e65a896113b51bb
|
5a91ebb01e3eab6e9851789e15a37731b2d8c051
|
/DjangoPracticeProject-master/mysite_2/article/migrations/0004_comment.py
|
83dc6de72baaf6be2c9e5e6d605e70fe7471ee83
|
[] |
no_license
|
jianzonggaoshou/python_book_code
|
463a16918ea1699fb5bd29c5c8e822ca6a74f894
|
f31ab1a031c7eace2c29f5ced93f070e513d27d2
|
refs/heads/master
| 2021-06-17T02:47:20.206413
| 2019-07-07T03:23:44
| 2019-07-07T03:23:44
| 191,194,290
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 892
|
py
|
# Generated by Django 2.2 on 2018-12-13 06:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('article', '0003_auto_20181213_1335'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('commentator', models.CharField(max_length=90)),
('body', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='article.ArticlePost')),
],
options={
'ordering': ('-created',),
},
),
]
|
[
"xuzhen0913@163.com"
] |
xuzhen0913@163.com
|
98b926ea45f3575cfe9161a65a10d6b73fb0c7c1
|
3376f2dfa3be8bad53f1fa4d36de94bbc997765a
|
/sgt_test.py
|
c3336f423c84f676d1404dfe99b1858d8edf9b7c
|
[] |
no_license
|
mvlvrd/smoothing
|
59f1aff9567a105194cdb653c7fe6972c9af0c6b
|
211c05f2623a54633da1efdff75aa263036804f7
|
refs/heads/master
| 2020-03-28T21:10:13.235574
| 2018-06-02T14:46:31
| 2018-09-17T14:02:41
| 129,372,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 709
|
py
|
from smoothing import simple_good_turing
def _isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def test_file(fileName):
in_dict = {}
word = 0
with open(fileName, "r") as inFile:
for line in inFile:
r, n_r = [int(_) for _ in line.split()]
for _ in range(n_r):
in_dict[word] = r
word += 1
res = simple_good_turing(in_dict)
# for k,v in res.iteritems():
# print("{}:{}".format(k,v))
assert _isclose(sum(res.itervalues()), 1.), "norm_gt_dict is not properly normalized."
if __name__=="__main__":
test_file("chinese.txt")
test_file("prosody.txt")
|
[
"manolo.valverde@gmail.com"
] |
manolo.valverde@gmail.com
|
f5fbef25f2ef941a32b9b89b6c2cfa9eb46bb066
|
ba699e4fa14cc8ef62db21647df7c8dce66c03e9
|
/tests/scheduler/instructions/test_repeat.py
|
930bcba26aafb02cfd9b94998941fcf771c73d10
|
[] |
no_license
|
mattconway1984/pluto
|
d089da44fbea4b62ac21cbc74f0464292033ff07
|
3752360a37fc168995c5906a2a92a2c8182e8bfd
|
refs/heads/master
| 2020-06-13T13:46:56.453916
| 2020-03-11T11:07:29
| 2020-03-11T11:07:29
| 161,771,571
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,335
|
py
|
"""
Unit tests for the Repeat instruction
"""
#pylint:disable=missing-docstring
#pylint: disable=no-self-use
from unittest import TestCase
from unittest.mock import MagicMock
from time import sleep
from time import time as timenow
from fake_instruction import FakeInstruction, SillyError
from pluto.exceptions.exceptions import LogicError
from pluto.scheduler.instruction import PlutoInstructionRunner
from pluto.scheduler.instructions.repeat import (
RepeatForever,
RepeatTimes,
RepeatFor)
class TestRepeatForeverInstruction(TestCase):
"""
Test suite to test the RepeatForever instruction
"""
def test_repeat_forever_repeated_instruction_raises_error(self):
"""
When the repeated instructions run method raises an error, the
run method of the repeat instruction must not squash that error.
"""
fake_instruction = FakeInstruction(exception=SillyError())
instruction = RepeatForever(fake_instruction)
with self.assertRaises(SillyError):
instruction.run(MagicMock())
self.assertEqual(fake_instruction.run_count, 1)
def test_repeat_forever_every_time_is_too_short(self):
"""
When the repeated instructions run method takes a longer time to
execute than the @repeat_every argument passed to the repeat
instruction, an error is expected
"""
repeat_every = 0.1
fake_instruction = FakeInstruction(blocking_time=0.5)
instruction = RepeatForever(fake_instruction, repeat_every=repeat_every)
with self.assertRaises(LogicError):
instruction.run(MagicMock)
def test_repeat_forever_then_stop(self):
"""
RepeatForever can only be stopped by calling the stop() method of the
instruction. This tests that the instruction is repeated as expected
until the stop() method is called (number of repeats is unimportant).
"""
stop_after = 0.1
min_expected_repeats = 10
fake_instruction = FakeInstruction()
instruction = RepeatForever(fake_instruction)
runner = PlutoInstructionRunner(MagicMock(), instruction)
runner.start()
sleep(stop_after)
runner.stop()
self.assertGreater(fake_instruction.run_count, min_expected_repeats)
def test_repeat_forever_every_tenth_of_a_second(self):
"""
RepeatForever has a @repeat_every argument, which specifies how
frequently the repeated instruction should be repeated. Therefore,
when a RepeatForever is run for 1 second, repeating every 0.1 seconds,
the repeated instruction should be executed exactly 10 times.
"""
repeat_every = 0.1
repeat_for = 1
expected_repeats = 10
fake_instruction = FakeInstruction()
instruction = RepeatForever(fake_instruction, repeat_every=repeat_every)
runner = PlutoInstructionRunner(MagicMock(), instruction)
runner.start()
sleep(repeat_for)
runner.stop()
self.assertEqual(fake_instruction.run_count, expected_repeats)
def test_forever_times_stop_whilst_waiting_for_every_timer(self):
"""
When RepeatForever is repeating an instruction every n seconds, there
should be a period of grace when the instruction has finished executing
and the RepeatForever is waiting to repeat again. In this instance, if
the stop() API of the RepeatForever instruction is invoked, the
instruction should tear down almost immediately.
"""
repeat_every = 5
expected_duration = 0.25
fake_instruction = FakeInstruction()
instruction = RepeatForever(fake_instruction, repeat_every=repeat_every)
runner = PlutoInstructionRunner(MagicMock(), instruction)
start = timenow()
runner.start()
sleep(expected_duration)
runner.stop()
duration = timenow() - start
self.assertAlmostEqual(duration, expected_duration, 1)
class TestRepeatTimesInstruction(TestCase):
"""
Test suite to test the RepeatTimes instruction
"""
def test_repeat_times_repeated_instruction_raises_error(self):
"""
When the repeated instructions run method raises an error, the
run method of the repeat instruction must not squash that error.
"""
instruction = RepeatTimes(FakeInstruction(exception=SillyError()), 10)
with self.assertRaises(SillyError):
instruction.run(MagicMock())
def test_repeat_times_every_time_is_too_short(self):
"""
When the repeated instructions run method takes a longer time to
execute than the @repeat_every argument passed to the repeat
instruction, an error is expected
"""
repeat_every = 0.1
fake_instruction = FakeInstruction(blocking_time=0.5)
instruction = RepeatTimes(fake_instruction, 10, repeat_every)
with self.assertRaises(LogicError):
instruction.run(MagicMock)
def test_repeat_times_stop_early(self):
"""
RepeatTimes can be stopped by calling the stop() method of the
instruction. This tests that the instruction is repeated as expected
until the stop() method is called (number of repeats is unimportant).
"""
stop_after = 0.1
requested_iterations = 99999999
min_expected_repeats = 10
fake_instruction = FakeInstruction()
instruction = RepeatTimes(fake_instruction, requested_iterations)
runner = PlutoInstructionRunner(MagicMock(), instruction)
runner.start()
sleep(stop_after)
runner.stop()
self.assertGreater(fake_instruction.run_count, min_expected_repeats)
def test_repeat_ten_times(self):
"""
RepeatTimes has a @iterations argument, which specifies how
many repeat iterations to perform. This ensures the correct number
of iterations are completed.
"""
requested_iterations = 10
fake_instruction = FakeInstruction()
instruction = RepeatTimes(fake_instruction, requested_iterations)
runner = PlutoInstructionRunner(MagicMock(), instruction)
runner.start()
runner.wait()
self.assertEqual(fake_instruction.run_count, requested_iterations)
def test_repeat_ten_times_every_tenth_of_a_second(self):
"""
RepeatTimes has a @repeat_every argument, which specifies how
frequently the repeated instruction should be repeated. Therefore,
when a RepeatTimes is run for 1 second, repeating every 0.1 seconds,
the repeated instruction should be executed exactly 10 times.
"""
repeat_every = 0.1
requested_iterations = 10
expected_duration = 1
fake_instruction = FakeInstruction()
instruction = RepeatTimes(fake_instruction, requested_iterations, repeat_every=repeat_every)
runner = PlutoInstructionRunner(MagicMock(), instruction)
start = timenow()
runner.start()
runner.wait()
duration = timenow() - start
self.assertEqual(fake_instruction.run_count, requested_iterations)
self.assertEqual(int(duration), expected_duration)
def test_repeat_times_stop_whilst_waiting_for_every_timer(self):
"""
When RepeatTimes is repeating an instruction every n seconds, there
should be a period of grace when the instruction has finished executing
and the RepeatTimes is waiting to repeat again. In this instance, if
the stop() API of the RepeatForever instruction is invoked, the
instruction should tear down almost immediately.
"""
requested_iterations = 10
repeat_every = 5
expected_duration = 0.25
fake_instruction = FakeInstruction()
instruction = RepeatTimes(fake_instruction, requested_iterations, repeat_every=repeat_every)
runner = PlutoInstructionRunner(MagicMock(), instruction)
start = timenow()
runner.start()
sleep(expected_duration)
runner.stop()
duration = timenow() - start
self.assertAlmostEqual(duration, expected_duration, 1)
class TestRepeatForr(TestCase):
"""
Test suite to test the RepeatFor instruction
"""
def test_repeat_forever_repeated_instruction_raises_error(self):
"""
When the repeated instructions run method raises an error, the
run method of the repeat instruction must not squash that error.
"""
instruction = RepeatFor(FakeInstruction(exception=SillyError()), 10)
with self.assertRaises(SillyError):
instruction.run(MagicMock())
def test_repeat_for_every_time_is_too_short(self):
"""
When the repeated instructions run method takes a longer time to
execute than the @repeat_every argument passed to the repeat
instruction, an error is expected
"""
repeat_every = 0.1
fake_instruction = FakeInstruction(blocking_time=0.5)
instruction = RepeatFor(fake_instruction, 10, repeat_every)
with self.assertRaises(LogicError):
instruction.run(MagicMock)
def test_repeat_for_stop_early(self):
"""
RepeatFor can be stopped by calling the stop() method of the
instruction. This tests that the instruction is repeated as expected
until the stop() method is called (number of repeats is unimportant).
"""
stop_after = 0.1
requested_seconds = 100
min_expected_repeats = 10
fake_instruction = FakeInstruction()
instruction = RepeatFor(fake_instruction, requested_seconds)
runner = PlutoInstructionRunner(MagicMock(), instruction)
runner.start()
sleep(stop_after)
runner.stop()
self.assertGreater(fake_instruction.run_count, min_expected_repeats)
def test_repeat_one_second(self):
"""
RepeatFor has a @seconds argument, which specifies how long to run
the repeat for. This ensures the instruction was repeated for the
requested seconds.
"""
requested_seconds = 0.10
fake_instruction = FakeInstruction()
instruction = RepeatFor(fake_instruction, requested_seconds)
runner = PlutoInstructionRunner(MagicMock(), instruction)
start = timenow()
runner.start()
runner.wait()
duration = timenow() - start
self.assertEqual(round(duration, 1), round(requested_seconds, 1))
def test_repeat_for_half_a_second_every_tenth_of_a_second(self):
"""
RepeatFor has a @repeat_every argument, which specifies how
frequently the repeated instruction should be repeated. Therefore,
when a RepeatFor is run for 0.5 seconds, repeating every 0.1 seconds,
the repeated instruction should be executed exactly 5 times.
"""
repeat_every = 0.1
requested_seconds = 0.50
expected_repeats = 5
fake_instruction = FakeInstruction()
instruction = RepeatFor(fake_instruction, requested_seconds, repeat_every=repeat_every)
runner = PlutoInstructionRunner(MagicMock(), instruction)
runner.start()
runner.wait()
self.assertEqual(fake_instruction.run_count, expected_repeats)
def test_repeat_for_stop_whilst_waiting_for_every_timer(self):
"""
When RepeatFor is repeating an instruction every n seconds, there
should be a period of grace when the instruction has finished executing
and the RepeatTimes is waiting to repeat again. In this instance, if
the stop() API of the RepeatFor instruction is invoked, the
instruction should tear down almost immediately.
"""
requested_seconds = 10
repeat_every = 5
expected_duration = 0.25
fake_instruction = FakeInstruction()
instruction = RepeatFor(fake_instruction, requested_seconds, repeat_every=repeat_every)
runner = PlutoInstructionRunner(MagicMock(), instruction)
start = timenow()
runner.start()
sleep(expected_duration)
runner.stop()
duration = timenow() - start
self.assertAlmostEqual(duration, expected_duration, 1)
|
[
"matthew.conway@owlstone.co.uk"
] |
matthew.conway@owlstone.co.uk
|
ab66a7260154732cfdde6a27b5af3b653c0cda52
|
fcfb225ccaaef5e6312cad278320de1b19aa1dfd
|
/samples/client/petstore/python/test/test_category.py
|
793fbdf41b053abb4bfd2a8c388bfc8afba22a8d
|
[
"Apache-2.0"
] |
permissive
|
PrinceMerluza/swagger-codegen
|
2346df19c64ef358374c37036c67b930fa0e4b81
|
45c14614f261d020b6b1c10e8620a9f48a72700f
|
refs/heads/master
| 2023-09-01T20:06:21.448932
| 2021-05-11T08:31:59
| 2021-05-11T08:31:59
| 355,461,876
| 1
| 0
|
NOASSERTION
| 2023-09-13T17:10:02
| 2021-04-07T08:04:06
| null |
UTF-8
|
Python
| false
| false
| 1,197
|
py
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.category import Category
class TestCategory(unittest.TestCase):
""" Category unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCategory(self):
"""
Test Category
"""
model = swagger_client.models.category.Category()
if __name__ == '__main__':
unittest.main()
|
[
"scott.rw93@gmail.com"
] |
scott.rw93@gmail.com
|
0d64a0f3deba73b241e73e5880aff279501f72f7
|
202d1d737233eb1aa37cee07d76fc1804df1252d
|
/models/vgg.py
|
4c29d3bd4a9701c9614e9bad978f6d82b992a2f3
|
[
"MIT"
] |
permissive
|
bityangke/2D-kinectics
|
d116688e243f6da2fd839aaf118b0b84d202de5e
|
2ac4241b05ba34d3e182ea7b451a1a7a520a6028
|
refs/heads/master
| 2021-05-06T01:34:04.779115
| 2017-12-13T13:56:00
| 2017-12-13T13:56:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,513
|
py
|
"""
Copyright (c) 2017, Gurkirt Singh
This code and is available
under the terms of MIT License provided in LICENSE.
Please retain this notice and LICENSE if you use
this file (or any portion of it) in your project.
---------------------------------------------------------
Purpose of this script is to creat VGG16 network and define its forward pass
"""
import torch, pdb
import torch.nn as nn
# This function is copied from
# https://github.com/amdegroot/ssd.pytorch/blob/master/ssd.py
def vggconv(in_channels, batch_norm=False):
cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M', 512, 512, 512]
layers = []
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6, nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]
return layers
def vggnet(pretrained=False, num_classes = 1000, global_models_dir = '', num_channels=3):
r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
model = VGG16(vggconv(num_channels), num_classes)
conv_path = global_models_dir + '/vgg16_reducedfc.pth'
# model_path = global_models_dir + '/inception_v3_kinetics_tsn.pth'
# model_path = global_models_dir + '/inception_v3_pretrained_actnet_cls.pth'
print('=> From: ', conv_path)
print('MODEL TYPE is STD')
conv_dict = torch.load(conv_path)
model.conv_base.load_state_dict(conv_dict)
return model
else:
return VGG16(vggconv(), num_classes)
class VGG16(nn.Module):
def __init__(self, convs, num_classes):
super(VGG16, self).__init__()
self.num_classes = num_classes
self.conv_base = nn.ModuleList(convs)
self.extra_layers = nn.ModuleList([nn.MaxPool2d(kernel_size=2, stride=2, padding=0),
nn.Conv2d(1024, 1024, kernel_size=3, stride=3), nn.ReLU(inplace=True)])
ms = 3
self.classifier = nn.Sequential(nn.Dropout(0.6),
nn.Linear(1024*ms*ms, 4096),
nn.ReLU(True),
nn.Dropout(0.6),
nn.Linear(4096, num_classes)
)
def forward(self, x):
for k in range(len(self.conv_base)):
x = self.conv_base[k](x)
#print('xsize', x.size()) # 512x19x19
#pdb.set_trace()
for k in range(len(self.extra_layers)):
x = self.extra_layers[k](x)
#print('xsize', x.size()) #1024x3x3
x = x.view(x.size(0),-1)
#print('xsize', x.size())
return self.classifier(x)
|
[
"guru094@gmail.com"
] |
guru094@gmail.com
|
06b808e1cc46921c2b2f8f31cc8fc08b130c7b6e
|
9720714621636c2ef30d99427d3e99cad591d7fb
|
/Trabajos/deletrador.py
|
14d059250c8a953b567cd3923c08d37cd6f674f5
|
[] |
no_license
|
diegonavarrosanz1/1evaluacion
|
dbacabbdd887f8b62dfccc962bcd2393000ccb5b
|
285465da8b27f71a75867349f36ed600c74e529a
|
refs/heads/master
| 2021-08-20T05:59:26.183350
| 2017-11-28T10:51:36
| 2017-11-28T10:51:36
| 111,536,467
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
def deletrador():
nombre= raw_input("dime una palabra")
print "la palabra tiene", len(nombre), "letras"
vocales=u
for cont in range (0,len(nombre),1):
if nombre[cont]=='a'or nombre[cont]=='e'or nombre[cont]=='i'or nombre[cont]=='o'or nombre[cont]=='u':
vocales= vocales+1
print "de las cuales", vocales, "son vocales y ", len(nombre) - vocales, "consonante"
deletrador()
|
[
"noreply@github.com"
] |
diegonavarrosanz1.noreply@github.com
|
62578354b1389fd4459c0cc9d9e9b69928c37080
|
1f42df2bc6f7f6a67e4253b200af779602d9f83d
|
/mymap/serializers.py
|
9e0ec8b79384c6fa0efccc104b3c22a30bbe26d8
|
[] |
no_license
|
JingshiPeter/B512
|
812edae7395608b125d26c8c73565ca1fb98503c
|
86a97ad3307f30aa32cb16a530bdc7278f7b71a7
|
refs/heads/master
| 2016-09-15T12:45:18.159134
| 2015-12-01T11:59:45
| 2015-12-01T11:59:45
| 44,842,515
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,519
|
py
|
from rest_framework import serializers
from django.contrib.auth.models import User
from mymap.models import Customer, Order
class CustomerSerializer(serializers.HyperlinkedModelSerializer):
owner= serializers.ReadOnlyField(source='owner.username')
owner_id= serializers.ReadOnlyField(source='owner.id')
# owner= serializers.ReadOnlyField()
orders = serializers.HyperlinkedRelatedField(queryset=Order.objects.all(), view_name='order-detail', many=True)
# highlight = serializers.HyperlinkedIdentityField(view_name='customer-highlight', format='html')
class Meta:
model = Customer
fields = ('url','owner_id','id','owner','name', 'streetnumber', 'streetname',
'city', 'state', 'zipcode','orders')
class UserSerializer(serializers.HyperlinkedModelSerializer):
customers = serializers.HyperlinkedRelatedField(queryset=Customer.objects.all(), view_name='customer-detail', many=True)
class Meta:
model = User
fields = ('url', 'username', 'customers')
class OrderSerializer(serializers.HyperlinkedModelSerializer):
customer = serializers.ReadOnlyField(source='customer.name')
customer_id = serializers.ReadOnlyField(source='customer.id')
# highlight = serializers.HyperlinkedIdentityField(view_name='customer-highlight', format='html')
class Meta:
model = Order
fields = ('url','customer_id','customer','date','quantity','timewindows1','timewindowe1','timewindows2','timewindowe2')
|
[
"hanzi@utexas.edu"
] |
hanzi@utexas.edu
|
b4dce8f7776b27512b6d77626d6b3c06b1e117aa
|
d2b3af2914bcf5fa4c9b6a2a97af99c8dab5380c
|
/dqclibs/libs/libcint/testsuite/test_3c2e.py
|
0a8bd0578e87d0044f2adb9e6833e299b6a4c50a
|
[
"GPL-1.0-or-later",
"GPL-3.0-only",
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
diffqc/dqclibs
|
291044a96077b8b936bec584d9e19a7cb677c43f
|
6b0b2311217129f7d65190cab74557d01f974560
|
refs/heads/main
| 2023-07-15T16:39:13.051478
| 2023-06-27T14:50:35
| 2023-06-27T14:50:35
| 374,606,696
| 0
| 2
|
Apache-2.0
| 2023-06-27T14:50:37
| 2021-06-07T09:22:16
|
C
|
UTF-8
|
Python
| false
| false
| 9,957
|
py
|
#!/usr/bin/env python
# $Id$
# -*- coding: utf-8
from __future__ import print_function
'''
test libcint
'''
__author__ = "Qiming Sun <osirpt.sun@gmail.com>"
import sys
import os
import ctypes
import numpy
_cint = numpy.ctypeslib.load_library('libcint', '.')
PTR_LIGHT_SPEED = 0
PTR_COMMON_ORIG = 1
PTR_SHIELDING_ORIG = 4
PTR_RINV_ORIG = 4
PTR_RINV_ZETA = 7
PTR_ENV_START = 20
CHARGE_OF = 0
PTR_COORD = 1
NUC_MOD_OF = 2
PTR_ZETA = 3
RAD_GRIDS = 4
ANG_GRIDS = 5
ATM_SLOTS = 6
ATOM_OF = 0
ANG_OF = 1
NPRIM_OF = 2
NCTR_OF = 3
KAPPA_OF = 4
PTR_EXP = 5
PTR_COEFF = 6
BAS_SLOTS = 8
natm = 4
nbas = 0
atm = numpy.zeros((natm+1,ATM_SLOTS), dtype=numpy.int32)
bas = numpy.zeros((1000,BAS_SLOTS), dtype=numpy.int32)
env = numpy.zeros(10000)
off = PTR_ENV_START
for i in range(natm):
atm[i, CHARGE_OF] = (i+1)*2
atm[i, PTR_COORD] = off
env[off+0] = .2 * (i+1)
env[off+1] = .3 + (i+1) * .5
env[off+2] = .1 - (i+1) * .5
off += 3
off0 = off
# basis with kappa > 0
nh = 0
bas[nh,ATOM_OF ] = 0
bas[nh,ANG_OF ] = 1
bas[nh,KAPPA_OF] = 1
bas[nh,NPRIM_OF] = 1
bas[nh,NCTR_OF ] = 1
bas[nh,PTR_EXP] = off
env[off+0] = 1
bas[nh,PTR_COEFF] = off + 1
env[off+1] = 1
off += 2
nh += 1
bas[nh,ATOM_OF ] = 1
bas[nh,ANG_OF ] = 2
bas[nh,KAPPA_OF] = 2
bas[nh,NPRIM_OF] = 2
bas[nh,NCTR_OF ] = 2
bas[nh,PTR_EXP] = off
env[off+0] = 5
env[off+1] = 3
bas[nh,PTR_COEFF] = off + 2
env[off+2] = 1
env[off+3] = 2
env[off+4] = 4
env[off+5] = 1
off += 6
nh += 1
bas[nh,ATOM_OF ] = 2
bas[nh,ANG_OF ] = 3
bas[nh,KAPPA_OF] = 3
bas[nh,NPRIM_OF] = 1
bas[nh,NCTR_OF ] = 1
bas[nh,PTR_EXP ] = off
env[off+0] = 1
bas[nh,PTR_COEFF] = off + 1
env[off+1] = 1
off += 2
nh += 1
bas[nh,ATOM_OF ] = 3
bas[nh,ANG_OF ] = 4
bas[nh,KAPPA_OF] = 4
bas[nh,NPRIM_OF] = 1
bas[nh,NCTR_OF ] = 1
bas[nh,PTR_EXP ] = off
env[off+0] = .5
bas[nh,PTR_COEFF] = off + 1
env[off+1] = 1.
off = off + 2
nh += 1
nbas = nh
# basis with kappa < 0
n = off - off0
for i in range(n):
env[off+i] = env[off0+i]
for i in range(nh):
bas[i+nh,ATOM_OF ] = bas[i,ATOM_OF ]
bas[i+nh,ANG_OF ] = bas[i,ANG_OF ] - 1
bas[i+nh,KAPPA_OF] =-bas[i,KAPPA_OF]
bas[i+nh,NPRIM_OF] = bas[i,NPRIM_OF]
bas[i+nh,NCTR_OF ] = bas[i,NCTR_OF ]
bas[i+nh,PTR_EXP ] = bas[i,PTR_EXP ] + n
bas[i+nh,PTR_COEFF]= bas[i,PTR_COEFF] + n
env[bas[i+nh,PTR_COEFF]] /= 2 * env[bas[i,PTR_EXP]]
env[bas[5,PTR_COEFF]+0] = env[bas[1,PTR_COEFF]+0] / (2 * env[bas[1,PTR_EXP]+0])
env[bas[5,PTR_COEFF]+1] = env[bas[1,PTR_COEFF]+1] / (2 * env[bas[1,PTR_EXP]+1])
env[bas[5,PTR_COEFF]+2] = env[bas[1,PTR_COEFF]+2] / (2 * env[bas[1,PTR_EXP]+0])
env[bas[5,PTR_COEFF]+3] = env[bas[1,PTR_COEFF]+3] / (2 * env[bas[1,PTR_EXP]+1])
nfitid = nbas*2
off += n
bas[nfitid,ATOM_OF ] = 0
bas[nfitid,ANG_OF ] = 0
bas[nfitid,KAPPA_OF] = 0
bas[nfitid,NPRIM_OF] = 1
bas[nfitid,NCTR_OF ] = 1
bas[nfitid,PTR_EXP ] = off
env[off+0] = 0
off += 1
bas[nfitid,PTR_COEFF] = off
env[off+0] = 2 * numpy.sqrt(numpy.pi)
nfitid1 = nbas*2 + 1
off += n
bas[nfitid1,ATOM_OF ] = 0
bas[nfitid1,ANG_OF ] = 0
bas[nfitid1,KAPPA_OF] = 0
bas[nfitid1,NPRIM_OF] = 1
bas[nfitid1,NCTR_OF ] = 1
bas[nfitid1,PTR_EXP ] = off
env[off+0] = 0
off += 1
bas[nfitid1,PTR_COEFF] = off
env[off+0] = 2 * numpy.sqrt(numpy.pi)
natm = ctypes.c_int(natm)
nbas = ctypes.c_int(nbas)
c_atm = atm.ctypes.data_as(ctypes.c_void_p)
c_bas = bas.ctypes.data_as(ctypes.c_void_p)
c_env = env.ctypes.data_as(ctypes.c_void_p)
opt = ctypes.POINTER(ctypes.c_void_p)()
_cint.CINTlen_spinor.restype = ctypes.c_int
def close(v1, vref, count, place):
return round(abs(v1-vref)/count, place) == 0
def test_int3c2e_sph(name, fnref, vref, dim, place):
intor = getattr(_cint, name)
intoref = getattr(_cint, fnref)
intor.restype = ctypes.c_void_p
op = numpy.empty(1000000*dim)
pop = op.ctypes.data_as(ctypes.c_void_p)
opref = numpy.empty(1000000*dim)
pref = opref.ctypes.data_as(ctypes.c_void_p)
v1 = 0
cnt = 0
for k in range(nbas.value):
l = nfitid
bas[l,ATOM_OF] = bas[k,ATOM_OF]
for j in range(nbas.value):
for i in range(nbas.value):
di = (bas[i,ANG_OF] * 2 + 1) * bas[i,NCTR_OF]
dj = (bas[j,ANG_OF] * 2 + 1) * bas[j,NCTR_OF]
dk = (bas[k,ANG_OF] * 2 + 1) * bas[k,NCTR_OF]
nd = di*dj*dk*dim
shls = (ctypes.c_int * 4)(i, j, k, l)
intoref(pref, shls, c_atm, natm, c_bas, nbas, c_env, opt)
intor(pop, shls, c_atm, natm, c_bas, nbas, c_env, opt)
if not numpy.allclose(opref[:nd], op[:nd]):
print('Fail:', name, i,j,k)
v1 += abs(numpy.array(op[:nd])).sum()
cnt += nd
if close(v1, vref, cnt, place):
print("pass: ", name)
else:
print("* FAIL: ", name, ". err:", '%.16g' % abs(v1-vref), "/", vref)
def sf2spinor(mat, i, j, bas):
import pyscf.symm.cg
import scipy.linalg
assert(mat.ndim == 3)
l1 = bas[i,ANG_OF]
l2 = bas[j,ANG_OF]
d1 = bas[i,NCTR_OF]
d2 = bas[j,NCTR_OF]
u1a, u1b = pyscf.gto.mole.sph2spinor_l(l1)
u2a, u2b = pyscf.gto.mole.sph2spinor_l(l2)
u1a = scipy.linalg.block_diag(*((u1a,)*d1))
u1b = scipy.linalg.block_diag(*((u1b,)*d1))
u2a = scipy.linalg.block_diag(*((u2a,)*d2))
u2b = scipy.linalg.block_diag(*((u2b,)*d2))
u1 = numpy.vstack((u1a,u1b))
u2 = numpy.vstack((u2a,u2b))
m, n, k = mat.shape
matab = numpy.zeros((m*2,n*2,k))
matab[:m,:n,:] = matab[m:,n:,:] = mat
zmat = numpy.einsum('pjk,pi->ijk', matab, u1.conj())
zmat = numpy.einsum('ipk,pj->ijk', zmat, u2)
return zmat
def test_int3c2e_spinor(name, fnref, vref, dim, place):
abas = bas.copy()
abas[:,KAPPA_OF] = 0
c_bas = abas.ctypes.data_as(ctypes.c_void_p)
intor = getattr(_cint, name)
intoref = getattr(_cint, fnref)
intor.restype = ctypes.c_void_p
v1 = 0
cnt = 0
for k in range(nbas.value):
l = nfitid
for j in range(nbas.value):
for i in range(nbas.value):
di = (bas[i,ANG_OF] * 2 + 1) * bas[i,NCTR_OF]
dj = (bas[j,ANG_OF] * 2 + 1) * bas[j,NCTR_OF]
dk = (bas[k,ANG_OF] * 2 + 1) * bas[k,NCTR_OF]
shls = (ctypes.c_int * 4)(i, j, k, l)
opref = numpy.empty((di,dj,dk,dim), order='F')
intoref(opref.ctypes.data_as(ctypes.c_void_p), shls,
c_atm, natm, c_bas, nbas, c_env, opt)
zmat = sf2spinor(opref[:,:,:,0], i, j, bas)
di = (bas[i,ANG_OF] * 4 + 2) * bas[i,NCTR_OF]
dj = (bas[j,ANG_OF] * 4 + 2) * bas[j,NCTR_OF]
dk = (bas[k,ANG_OF] * 2 + 1) * bas[k,NCTR_OF]
op = numpy.empty((di,dj,dk,dim), order='F', dtype=numpy.complex)
intor(op.ctypes.data_as(ctypes.c_void_p), shls,
c_atm, natm, c_bas, nbas, c_env, opt)
if not numpy.allclose(zmat, op[:,:,:,0]):
print('Fail:', name, i,j,k)
v1 += abs(numpy.array(op)).sum()
cnt += op.size
if close(v1, vref, cnt, place):
print("pass: ", name)
else:
print("* FAIL: ", name, ". err:", '%.16g' % abs(v1-vref), "/", vref)
def test_int2c_sph(name, fnref, vref, dim, place):
intor = getattr(_cint, name)
intoref = getattr(_cint, fnref)
intor.restype = ctypes.c_void_p
op = numpy.empty(1000000*dim)
pop = op.ctypes.data_as(ctypes.c_void_p)
opref = numpy.empty(1000000*dim)
pref = opref.ctypes.data_as(ctypes.c_void_p)
v1 = 0
cnt = 0
for k in range(nbas.value):
for i in range(nbas.value):
j = nfitid1
bas[j,ATOM_OF] = bas[i,ATOM_OF]
di = (bas[i,ANG_OF] * 2 + 1) * bas[i,NCTR_OF]
dk = (bas[k,ANG_OF] * 2 + 1) * bas[k,NCTR_OF]
nd = di*dk*dim
shls = (ctypes.c_int * 3)(i, j, k)
intoref(pref, shls, c_atm, natm, c_bas, nbas, c_env, opt)
shls = (ctypes.c_int * 2)(i, k)
intor(pop, shls, c_atm, natm, c_bas, nbas, c_env, opt)
if not numpy.allclose(opref[:nd], op[:nd]):
print('Fail:', name, i,k)
v1 += abs(numpy.array(op[:nd])).sum()
cnt += nd
if close(v1, vref, cnt, place):
print("pass: ", name)
else:
print("* FAIL: ", name, ". err:", '%.16g' % abs(v1-vref), "/", vref)
if __name__ == "__main__":
if "--high-prec" in sys.argv:
def close(v1, vref, count, place):
return round(abs(v1-vref), place) == 0
for f in (('cint3c2e_sph', 'cint2e_sph', 1586.350797432699, 1, 10),
('cint3c2e_ip1_sph', 'cint2e_ip1_sph', 2242.052249267909, 3, 10),
('cint3c2e_ip2_sph', 'cint2e_ip2_sph', 1970.982483860059, 3, 10),
):
test_int3c2e_sph(*f)
if "--quick" not in sys.argv:
for f in (('cint3c2e', 'cint3c2e_sph', 4412.363002831547, 1, 10),
):
test_int3c2e_spinor(*f)
# for f in (('cint2c2e_sph', 'cint2e_sph', 782.3104849606677, 1, 10),
# ('cint2c2e_ip1_sph', 'cint2e_ip1_sph', 394.6515972715189, 3, 10),
# ('cint2c2e_ip2_sph', 'cint2e_ip2_sph', 394.6515972715189, 3, 10),
# ):
# test_int2c2e_sph(*f)
for f in (('cint2c2e_sph', 'cint3c2e_sph', 782.3104849606677, 1, 10),
('cint2c2e_ip1_sph', 'cint3c2e_ip1_sph', 394.6515972715189, 3, 10),
('cint2c2e_ip2_sph', 'cint3c2e_ip2_sph', 394.6515972715189, 3, 10),
('cint1e_ovlp_sph', 'cint3c1e_sph', 288.739411257669, 1, 10),
#('cint1e_kin_sph'*2.0, 'cint3c1e_p2_sph', 1662.148571297274, 1, 10),
('cint1e_r2_origj_sph', 'cint3c1e_r2_origk_sph', 1467.040217557744, 1, 10),
):
test_int2c_sph(*f)
|
[
"firman.kasim@gmail.com"
] |
firman.kasim@gmail.com
|
0707541654c4db1973b921facf3cb8fe979f8f29
|
9fc80c5a9521c0b95dfbd96f8d0ee7dfe9959a5b
|
/hermes.py
|
0ada278e8587efd9258aefa43e3ea5b7aee24455
|
[
"MIT"
] |
permissive
|
vihanchaudhry/hermes
|
e444a6b33971a5381ee51b077d4dbb57bbe00c67
|
87ffd6867a0a4ab64738fee68ab2e8cd35d61428
|
refs/heads/master
| 2021-01-18T09:55:57.188526
| 2016-03-05T06:39:41
| 2016-03-05T06:39:41
| 52,680,101
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
from run import app
from utils import geo
from utils import lyft
# get ArcGIS API token
arcgis_token = geo.get_token()
# geocode address - get address from frontend
locations = geo.geocode(arcgis_token, "Empire State Building")
# get 2-legged lyft token
lyft_public_token = lyft.get_public_token()
# get cost and ETA
eta = lyft.get_eta(lyft_public_token, locations)
# get 3-legged lyft token
# request lyft
app.run(debug=True)
|
[
"vihanchaudhry@gmail.com"
] |
vihanchaudhry@gmail.com
|
eb57ae2ffaf8eead62cb3066e056d86e4847ff16
|
09605a517c51f9538c6b6132e1fc95a57944ba8c
|
/src/flask_rest/models.py
|
0ae62c222d84446a05939dbcef22c77e1c003995
|
[] |
no_license
|
alex-pro27/flask-rest
|
d27272c0eae527da7470e8b43b98ef828c1265f3
|
711ed2b3600a10a2b37f2e5867a61fc10908f462
|
refs/heads/master
| 2020-04-18T08:47:52.854494
| 2019-01-24T17:37:07
| 2019-01-24T17:37:07
| 167,333,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,424
|
py
|
import binascii
import datetime
import os
import mongoengine
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.utils import cached_property
class Role(mongoengine.Document):
name = mongoengine.IntField(required=True, unique=True)
permissions = mongoengine.ListField()
meta = {'allow_inheritance': True}
class User(mongoengine.Document):
firstname = mongoengine.StringField()
lastname = mongoengine.StringField()
login = mongoengine.StringField(unique=True)
passwd = mongoengine.StringField(required=True)
reg_date = mongoengine.DateTimeField(default=datetime.datetime.now)
role = mongoengine.ListField(mongoengine.ReferenceField(Role))
active = mongoengine.BooleanField(default=True)
meta = {'allow_inheritance': True}
@cached_property
def token(self):
token = Token.objects(user_id=self.id).first()
if not token:
token = Token.objects.create(user=self)
return token
@property
def full_name(self):
return "{0} {1}".format(self.lastname, self.firstname)
def set_passwd(self, passwd):
self.passwd = generate_password_hash(passwd)
def check_passwd(self, passwd):
return check_password_hash(self.passwd, passwd)
class TokenManager(mongoengine.QuerySet):
def create(self, **kwargs):
assert kwargs.get("user"), "Do not specify the user parameter"
Token.objects(user_id=kwargs["user"].id).delete()
def gen_token_key():
token_key = Token.token_hex()
if Token.objects(token_key=token_key).first():
return gen_token_key()
return token_key
token = Token(user_id=kwargs["user"].id, token_key=gen_token_key())
token.save()
return token
class Token(mongoengine.Document):
DEFAULT_ENTROPY = 32
token_key = mongoengine.StringField(unique=True, required=True)
user_id = mongoengine.ObjectIdField()
meta = {'strict': False, 'queryset_class': TokenManager}
@cached_property
def user(self):
return User.objects(id=self.user_id).first()
@staticmethod
def token_bytes(nbytes=None):
if nbytes is None:
nbytes = Token.DEFAULT_ENTROPY
return os.urandom(nbytes)
@staticmethod
def token_hex(nbytes=None):
return binascii.hexlify(Token.token_bytes(nbytes)).decode('ascii')
|
[
"al.pro@mail.ru"
] |
al.pro@mail.ru
|
87ab42ad572197a04432b2035cd17f730119665d
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4312/codes/1745_3089.py
|
aeb925668ea927a90f7ef556434e6c5f25b40379
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
x = int(input("Insira um numero: "))
soma = 0
x = x
while(x != 0):
soma = soma + x
x = int(input("insira um numero: "))
print(soma)
if(soma > 0):
soma = "Direita"
elif(soma == 0):
soma = "Inicial"
else:
soma = "Esquerda"
print(soma)
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
918f199c07f0dc30ea9455812cb769708d264449
|
febe91c2fd51d990657a9d7c15ac267bd9488076
|
/ejercicio2.py
|
0654df64598f68071ac14a201e9a869feb36714f
|
[] |
no_license
|
jeancarlos96/Examen-Bimestral-Grupo-1
|
cc8b9097b7a4924076bdc90e2d6646175d6ffb22
|
51951cd103afd281023c8f4339658f59b16021d8
|
refs/heads/master
| 2021-01-17T11:40:19.445635
| 2016-06-09T15:02:35
| 2016-06-09T15:02:35
| 60,771,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
#Ejercicio 2
import math
def multiplos():
x= int(input("Ingrese el numero del cual desea conocer los multiplos: "))
cont= x
for i in range(x,1000):
if (i%x==0):
cont=x+i
print(cont)
else: i=x
archi=open('ejercicio2.txt','w')
archi.close()
archi=open('ejercicio2.txt','a')
archi.write("Los multiplos de " +str(x) +" son: " +str(cont))
archi.close()
multiplos()
|
[
"michu.arias17@hotmail.es"
] |
michu.arias17@hotmail.es
|
27bed4b3b9d5ab8a62d085ec97357fdbee16a0be
|
d1ddb9e9e75d42986eba239550364cff3d8f5203
|
/google-cloud-sdk/lib/surface/functions/logs/read.py
|
453e5d6d9593c450adddcbdd3b842106e95fa20d
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bopopescu/searchparty
|
8ecd702af0d610a7ad3a8df9c4d448f76f46c450
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
refs/heads/master
| 2022-11-19T14:44:55.421926
| 2017-07-28T14:55:43
| 2017-07-28T14:55:43
| 282,495,798
| 0
| 0
|
Apache-2.0
| 2020-07-25T17:48:53
| 2020-07-25T17:48:52
| null |
UTF-8
|
Python
| false
| false
| 5,713
|
py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'functions get-logs' command."""
from googlecloudsdk.api_lib.functions import util
from googlecloudsdk.api_lib.logging import common as logging_common
from googlecloudsdk.api_lib.logging import util as logging_util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.functions import flags
from googlecloudsdk.core import properties
class GetLogs(base.ListCommand):
"""Show logs produced by functions.
Display log entries produced by all functions running in a region, or by a
single function if it is specified through a command argument. By default,
when no extra flags are specified, the most recent 20 log entries
are displayed.
"""
SEVERITIES = ['DEBUG', 'INFO', 'ERROR']
@staticmethod
def Args(parser):
"""Register flags for this command."""
flags.AddRegionFlag(parser)
base.LIMIT_FLAG.RemoveFromParser(parser)
parser.add_argument(
'name', nargs='?',
help=('Name of the function which logs are to be displayed. If no name '
'is specified, logs from all functions are displayed.'))
parser.add_argument(
'--execution-id',
help=('Execution ID for which logs are to be displayed.'))
parser.add_argument(
'--start-time', required=False, type=arg_parsers.Datetime.Parse,
help=('Return only log entries which timestamps are not earlier than '
'the specified time. The timestamp must be in RFC3339 UTC "Zulu" '
'format. If --start-time is specified, the command returns '
'--limit earliest log entries which appeared after '
'--start-time.'))
parser.add_argument(
'--end-time', required=False, type=arg_parsers.Datetime.Parse,
help=('Return only log entries which timestamps are not later than '
'the specified time. The timestamp must be in RFC3339 UTC "Zulu" '
'format. If --end-time is specified but --start-time is not, the '
'command returns --limit latest log entries which appeared '
'before --end-time.'))
parser.add_argument(
'--limit', required=False, type=arg_parsers.BoundedInt(1, 1000),
default=20,
help=('Number of log entries to be fetched; must not be greater than '
'1000.'))
parser.add_argument(
'--min-log-level', choices=GetLogs.SEVERITIES,
help='Minimum level of logs to be fetched.')
@util.CatchHTTPErrorRaiseHTTPException
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
A generator of objects representing log entries.
"""
if not args.IsSpecified('format'):
args.format = self._Format(args)
return self._Run(args)
def _Run(self, args):
region = properties.VALUES.functions.region.Get()
log_filter = ['resource.type="cloud_function"',
'resource.labels.region="%s"' % region,
'logName:"cloud-functions"']
if args.name:
log_filter.append('resource.labels.function_name="%s"' % args.name)
if args.execution_id:
log_filter.append('labels.execution_id="%s"' % args.execution_id)
if args.min_log_level:
log_filter.append('severity>=%s' % args.min_log_level)
if args.start_time:
order = 'ASC'
log_filter.append(
'timestamp>="%s"' % logging_util.FormatTimestamp(args.start_time))
else:
order = 'DESC'
if args.end_time:
log_filter.append(
'timestamp<="%s"' % logging_util.FormatTimestamp(args.end_time))
log_filter = ' '.join(log_filter)
# TODO(b/36057251): Consider using paging for listing more than 1000 log
# entries. However, reversing the order of received latest N entries before
# a specified timestamp would be problematic with paging.
entries = logging_common.FetchLogs(
log_filter, order_by=order, limit=args.limit)
if order == 'DESC':
entries = reversed(list(entries)) # Force generator expansion with list.
for entry in entries:
row = {'log': entry.textPayload}
if entry.severity:
severity = str(entry.severity)
if severity in GetLogs.SEVERITIES:
# Use short form (first letter) for expected severities.
row['level'] = severity[0]
else:
# Print full form of unexpected severities.
row['level'] = severity
if entry.resource and entry.resource.labels:
for label in entry.resource.labels.additionalProperties:
if label.key == 'function_name':
row['name'] = label.value
if entry.labels:
for label in entry.labels.additionalProperties:
if label.key == 'execution_id':
row['execution_id'] = label.value
if entry.timestamp:
row['time_utc'] = util.FormatTimestamp(entry.timestamp)
yield row
def _Format(self, args):
return 'table(level,name,execution_id,time_utc,log)'
|
[
"vinvivo@users.noreply.github.com"
] |
vinvivo@users.noreply.github.com
|
bc89c6ed17eebba6e4b047a95db871b13a6b6d58
|
b4a39144b697ec14804ebf1fa686a72b7175d012
|
/venv/lib/python3.9/site-packages/consolekit/_types.py
|
9e82da0df2c87f42ec48b3dfb22e639dda174de1
|
[] |
no_license
|
felipe1297/networkPacketGetter
|
4993aacf840db7fef7c474b78da8b875c6a4ad46
|
64c071d6d53576e0fa6ed0fe544bbc4811546991
|
refs/heads/master
| 2023-04-09T13:50:23.722774
| 2021-04-13T04:52:31
| 2021-04-13T04:52:31
| 357,424,956
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,507
|
py
|
#!/usr/bin/env python3
#
# _types.py
#
# Copyright © 2020 Dominic Davis-Foster <dominic@davis-foster.co.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# stdlib
from typing import Any, Callable, Optional, Tuple, Union
# 3rd party
from click import Context, Parameter, ParamType
_ConvertibleType = Union[
type,
ParamType,
Tuple[Union[type, ParamType], ...],
Callable[[str], Any],
Callable[[Optional[str]], Any]
]
Callback = Callable[[Context, Parameter, str], Any]
|
[
"48300568+felipe1297@users.noreply.github.com"
] |
48300568+felipe1297@users.noreply.github.com
|
7b3dcba50763bb0d726e643e12be3630fd5d272c
|
0ad217b3be6b7c63ba1d8890f74f30a81f7234e0
|
/meta_stra_framwork/src/params_1.py
|
d0dc5a405b1c1040bc4fba6a4095baad95e24451
|
[] |
no_license
|
fagan2888/big
|
c1bf2cd01d405674482850f9ff7ad18d160c02cb
|
c0cc8c57b90f4b1da2d023abeea4c42f06cf242e
|
refs/heads/master
| 2022-11-17T21:45:23.436124
| 2020-07-11T14:15:09
| 2020-07-11T14:15:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,386
|
py
|
import sys,os
from collections import defaultdict
import tushare as ts
import pandas as pd
def get_code_list():
code_list = ts.get_hs300s()['code']
for i in range(len(code_list)):
if(str(code_list[i])[0]=='6'):
code_list[i] = str(code_list[i]) + '.XSHG'
else:
code_list[i] = str(code_list[i]) + '.XSHE'
return code_list.tolist()
def get_all_code_list():
all_code_list = pd.read_excel(now_file+'/all_code.xlsx',index_col = 0).values
return [x[0] for x in all_code_list.tolist()]
def get_st_code():
st = pd.read_excel('/Users/wode/Desktop/学校/系统方案备份/sig_inter.xlsx',index_col = 0)
return st[st['5day_fre']>0.5].index.tolist()
now_file = os.path.abspath(os.path.dirname(__file__))
up_file = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(up_file)
PARAMS = defaultdict(
begin_date = 20150101,#信号计算开始日期
code_list = ['000001.XSHE','002739.XSHE'],#,'600000.XSHG','002384.XSHE','300684.XSHE','002533.XSHE','002335.XSHE','002048.XSHE','002402.XSHE','300207.XSHE','002139.XSHE','002508.XSHE','300166.XSHE','300383.XSHE','300308.XSHE','300136.XSHE','002183.XSHE','300166.XSHE'],
#code_list = get_all_code_list(),#
#code_list = get_code_list(),#信号计算的股票池
get_code_data = True,#False#是否重新获得原始数据
#get_code_data = False,#是否重新获得原始数据
HS_code = '999999.XSHG',#'399300.XSHE',#信号中的大盘信号代码
signal_lf = [1,1,1,1,1], #分别对应下面五种信号的生命周期,阈值,交叉,趋势,比较,计数
#_signal_save_path = up_file+'/result/mul/', #信号结果储存地址
_index_save_path = up_file+'/index/',
# 信号的表达式,第一个为做多买入信号,第二个为做多卖出信号
# 信号的构建方法为
# 阈值型信号 指标名+#+阈值+#+方向(1为大于,0为小于)+&thre,如果为大盘信号,则在最后加上&HS
# 交叉型信号 指标1名+#+指标2名+#+方向(1为金叉,0为死叉)+&cross,如果为大盘信号,则在最后加上&HS
# 趋势型信号 指标1名+#+趋势延续天数+#+方向(1为上涨,0为下跌)+&trend,如果为大盘信号,则在最后加上&HS
# 比较型信号 指标1名+#+指标2名+#+方向(1为指标1大于指标2,0为指标1小于指标2)+&diff,如果为大盘信号,则在最后加上&HS
# 计数型信号 其他信号+&+时间对标信号+&次数+&其他信号类型&+时间对标信号类型+&+times 注意
# 比如 high#close_EMA_20#1&close_EMA_20#close_EMA_50#0&1&cross&cross× 在写其他信号和时间对标信号时不写类型
# 如果不想要时间对标信号,可以写一个永远不会成立的时间对标信号,比如low#high#1&diff,这样
# 计数型信号就会计算100天内其他信号发生的次数
# 信号组合可以使用+和*进行或和且逻辑运算,指标名称可在index_24中查询
#_Expression = ['close_MA_5#close_MA_10#1&cross+close_MA_5#close_MA_30#1&cross+close#close_shift_1_multiply_1.005#1&diff+close#close_shift_1_multiply_1.01#1&diff+close#close_shift_1_multiply_1.02#1&diff','close_MA_5#3250#0&HS&thre'],
#_Expression = ['K#40#1&thre&HS+close#close_shift_1_multiply_1.02#1&diff', 'K#40#0&thre&HS*MACD#3#0&trend'],
#_Expression = ['close_EMA_7#close_EMA_15#1&diff*close_EMA_15#close_EMA_25#1&diff*close_EMA_15#2#1&trend*close_EMA_25#2#1&trend*MACD#0#1&thre*close#close_shift_4#1&diff*K#40#1&thre&HS','MACD#0#0&thre+K#40#0&thre&HS'],#+close#close_MA_10#0&cross'],
#_Expression = ['close_EMA_7#close_EMA_15#1&diff*close_EMA_15#close_EMA_25#1&diff*close#2#0&trend&HS*MACD#3#1&trend*MB#3#1&trend*K#40#1&thre&HS', 'close_EMA_7#close_EMA_15#0&diff*close#2#1&trend&HS*MACD#3#0&trend*MB#3#0&trend*K#40#0&thre&HS'],
#'MACD#0#0&thre*K#40#0&thre&HS*MACD#0#0&thre+MACD#0#0&thre*K#40#0&thre&HS*close_EMA_25#2#1&trend+MACD#0#0&thre*K#40#0&thre&HS*K#40#0&thre&HS'],
#_Expression = ['close#2#0&trend&HS*MACD#2#1&trend*MB#2#1&trend','close#2#1&trend&HS*MACD#2#0&trend*MB#2#0&trend'],
#_Expression = ['close_EMA_12#close_EMA_26#1&cross*RSI_6#RSI_12#1&cross*J#D#1&cross','close_EMA_12#close_EMA_26#1&cross+RSI_6#RSI_12#1&cross+J#D#1&cross'],
#=['close_EMA_20#close_EMA_50#1&diff*close_EMA_20#close_EMA_50#1&cross*high#close_EMA_20#0&close_EMA_20#close_EMA_50#1&3&cross&cross×+close_EMA_20#close_EMA_50#1&diff*close_EMA_20#close_EMA_50#1&cross*low#close_EMA_50#1&close_EMA_20#close_EMA_50#1&3&cross&cross×',
#'close_EMA_20#close_EMA_50#0&diff*close_EMA_20#close_EMA_50#0&cross*high#close_EMA_20#1&close_EMA_20#close_EMA_50#0&1&cross&cross×'],
#_Expression = ['K#80#1&HS&thre*close#close_shift_1#1&diff*K#90#1&thre+K#80#1&HS&thre*close#close_shift_1#1&diff*D_shift_1#10#0&thre+K#80#1&HS&thre*close#close_shift_1#1&diff*D_shift_1#80#1&thre+K#80#1&HS&thre*close_MA_10_shift_1#close_MA_5#1&diff*K#90#1&thre+K#80#1&HS&thre*close_MA_10_shift_1#close_MA_5#1&diff*D_shift_1#10#0&thre+K#80#1&HS&thre*close_MA_10_shift_1#close_MA_5#1&diff*D_shift_1#80#1&thre+K#80#1&HS&thre*RSI_12#20#1&thre*RSI_12#40#0&thre*K#90#1&thre+K#80#1&HS&thre*RSI_12#20#1&thre*RSI_12#40#0&thre*D_shift_1#10#0&thre+K#80#1&HS&thre*RSI_12#20#1&thre*RSI_12#40#0&thre*D_shift_1#80#1&thre+D_shift_1#80#1&HS&thre*close#close_shift_1#1&diff*K#90#1&thre+D_shift_1#80#1&HS&thre*close#close_shift_1#1&diff*D_shift_1#10#0&thre+D_shift_1#80#1&HS&thre*close#close_shift_1#1&diff*D_shift_1#80#1&thre+D_shift_1#80#1&HS&thre*close_MA_10_shift_1#close_MA_5#1&diff*K#90#1&thre+D_shift_1#80#1&HS&thre*close_MA_10_shift_1#close_MA_5#1&diff*D_shift_1#10#0&thre+D_shift_1#80#1&HS&thre*close_MA_10_shift_1#close_MA_5#1&diff*D_shift_1#80#1&thre+D_shift_1#80#1&HS&thre*RSI_12#20#1&thre*RSI_12#40#0&thre*K#90#1&thre+D_shift_1#80#1&HS&thre*RSI_12#20#1&thre*RSI_12#40#0&thre*D_shift_1#10#0&thre+D_shift_1#80#1&HS&thre*RSI_12#20#1&thre*RSI_12#40#0&thre*D_shift_1#80#1&thre+D_shift_1#90#1&HS&thre*close#close_shift_1#1&diff*K#90#1&thre+D_shift_1#90#1&HS&thre*close#close_shift_1#1&diff*D_shift_1#10#0&thre+D_shift_1#90#1&HS&thre*close#close_shift_1#1&diff*D_shift_1#80#1&thre+D_shift_1#90#1&HS&thre*close_MA_10_shift_1#close_MA_5#1&diff*K#90#1&thre+D_shift_1#90#1&HS&thre*close_MA_10_shift_1#close_MA_5#1&diff*D_shift_1#10#0&thre+D_shift_1#90#1&HS&thre*close_MA_10_shift_1#close_MA_5#1&diff*D_shift_1#80#1&thre+D_shift_1#90#1&HS&thre*RSI_12#20#1&thre*RSI_12#40#0&thre*K#90#1&thre+D_shift_1#90#1&HS&thre*RSI_12#20#1&thre*RSI_12#40#0&thre*D_shift_1#10#0&thre+D_shift_1#90#1&HS&thre*RSI_12#20#1&thre*RSI_12#40#0&thre*D_shift_1#80#1&thre', 'close_MA_20#close#1&HS&diff*open#open_shift_1#1&HS&diff*D_shift_1#90#1&HS&thre+close_MA_20#close#1&HS&diff*open#open_shift_1#1&HS&diff*open#close#1&diff+close_MA_20#close#1&HS&diff*open#open_shift_1#1&HS&diff*K#10#0&thre+close_MA_20#close#1&HS&diff*D_shift_1#20#0&thre*D_shift_1#90#1&HS&thre+close_MA_20#close#1&HS&diff*D_shift_1#20#0&thre*open#close#1&diff+close_MA_20#close#1&HS&diff*D_shift_1#20#0&thre*K#10#0&thre+close_MA_20#close#1&HS&diff*D_shift_1#90#1&thre*D_shift_1#90#1&HS&thre+close_MA_20#close#1&HS&diff*D_shift_1#90#1&thre*open#close#1&diff+close_MA_20#close#1&HS&diff*D_shift_1#90#1&thre*K#10#0&thre+close_MA_20_shift_1#close_MA_20#1&HS&diff*open#open_shift_1#1&HS&diff*D_shift_1#90#1&HS&thre+close_MA_20_shift_1#close_MA_20#1&HS&diff*open#open_shift_1#1&HS&diff*open#close#1&diff+close_MA_20_shift_1#close_MA_20#1&HS&diff*open#open_shift_1#1&HS&diff*K#10#0&thre+close_MA_20_shift_1#close_MA_20#1&HS&diff*D_shift_1#20#0&thre*D_shift_1#90#1&HS&thre+close_MA_20_shift_1#close_MA_20#1&HS&diff*D_shift_1#20#0&thre*open#close#1&diff+close_MA_20_shift_1#close_MA_20#1&HS&diff*D_shift_1#20#0&thre*K#10#0&thre+close_MA_20_shift_1#close_MA_20#1&HS&diff*D_shift_1#90#1&thre*D_shift_1#90#1&HS&thre+close_MA_20_shift_1#close_MA_20#1&HS&diff*D_shift_1#90#1&thre*open#close#1&diff+close_MA_20_shift_1#close_MA_20#1&HS&diff*D_shift_1#90#1&thre*K#10#0&thre+D_shift_1#30#0&HS&thre*open#open_shift_1#1&HS&diff*D_shift_1#90#1&HS&thre+D_shift_1#30#0&HS&thre*open#open_shift_1#1&HS&diff*open#close#1&diff+D_shift_1#30#0&HS&thre*open#open_shift_1#1&HS&diff*K#10#0&thre+D_shift_1#30#0&HS&thre*D_shift_1#20#0&thre*D_shift_1#90#1&HS&thre+D_shift_1#30#0&HS&thre*D_shift_1#20#0&thre*open#close#1&diff+D_shift_1#30#0&HS&thre*D_shift_1#20#0&thre*K#10#0&thre+D_shift_1#30#0&HS&thre*D_shift_1#90#1&thre*D_shift_1#90#1&HS&thre+D_shift_1#30#0&HS&thre*D_shift_1#90#1&thre*open#close#1&diff+D_shift_1#30#0&HS&thre*D_shift_1#90#1&thre*K#10#0&thre'],
#armax最高
#_Expression = ['K#90#1&HS&thre*D_shift_1#70#1&thre+K#90#1&HS&thre*OBV#0#0&thre+K_shift_1#10#0&HS&thre*D_shift_1#70#1&thre+K_shift_1#10#0&HS&thre*OBV#0#0&thre+D_shift_1#10#0&HS&thre*D_shift_1#70#1&thre+D_shift_1#10#0&HS&thre*OBV#0#0&thre', 'close_MA_10_shift_1#close_MA_10#1&HS&diff*D_shift_1#80#1&HS&thre+close_MA_10_shift_1#close_MA_10#1&HS&diff*K_shift_1#90#1&thre+close_MA_10_shift_1#close_MA_10#1&HS&diff*OBV#0#0&thre+K_shift_1#20#0&HS&thre*D_shift_1#80#1&HS&thre+K_shift_1#20#0&HS&thre*K_shift_1#90#1&thre+K_shift_1#20#0&HS&thre*OBV#0#0&thre+D_shift_1#10#0&HS&thre*D_shift_1#80#1&HS&thre+D_shift_1#10#0&HS&thre*K_shift_1#90#1&thre+D_shift_1#10#0&HS&thre*OBV#0#0&thre'],
#armax最高简化
#_Expression = ['K#90#1&HS&thre+D_shift_1#10#0&HS&thre', 'close_MA_10_shift_1#close_MA_10#1&HS&diff*D_shift_1#80#1&HS&thre+close_MA_10_shift_1#close_MA_10#1&HS&diff*K_shift_1#90#1&thre+K_shift_1#20#0&HS&thre*D_shift_1#80#1&HS&thre+K_shift_1#20#0&HS&thre*K_shift_1#90#1&thre+D_shift_1#10#0&HS&thre*D_shift_1#80#1&HS&thre+D_shift_1#10#0&HS&thre*K_shift_1#90#1&thre'],
_Expression = ['K#90#1&HS&thre', 'close_MA_10_shift_1#close_MA_10#1&HS&diff*D_shift_1#80#1&HS&thre'],
#19最高
#_Expression = ['K#80#1&HS&thre*close_MA_10#close_MA_5_shift_1#1&diff*D#90#1&thre+K#80#1&HS&thre*close_MA_10#close_MA_5_shift_1#1&diff*D_shift_1#10#0&thre+K#80#1&HS&thre*close_MA_10#close_MA_5_shift_1#1&diff*RSI_12#80#1&thre*RSI_12#100#0&thre+K#80#1&HS&thre*close_MA_10_shift_1#close#1&diff*D#90#1&thre+K#80#1&HS&thre*close_MA_10_shift_1#close#1&diff*D_shift_1#10#0&thre+K#80#1&HS&thre*close_MA_10_shift_1#close#1&diff*RSI_12#80#1&thre*RSI_12#100#0&thre+K#80#1&HS&thre*OBV#OBV_shift_1#1&diff*D#90#1&thre+K#80#1&HS&thre*OBV#OBV_shift_1#1&diff*D_shift_1#10#0&thre+K#80#1&HS&thre*OBV#OBV_shift_1#1&diff*RSI_12#80#1&thre*RSI_12#100#0&thre+D_shift_1#80#1&HS&thre*close_MA_10#close_MA_5_shift_1#1&diff*D#90#1&thre+D_shift_1#80#1&HS&thre*close_MA_10#close_MA_5_shift_1#1&diff*D_shift_1#10#0&thre+D_shift_1#80#1&HS&thre*close_MA_10#close_MA_5_shift_1#1&diff*RSI_12#80#1&thre*RSI_12#100#0&thre+D_shift_1#80#1&HS&thre*close_MA_10_shift_1#close#1&diff*D#90#1&thre+D_shift_1#80#1&HS&thre*close_MA_10_shift_1#close#1&diff*D_shift_1#10#0&thre+D_shift_1#80#1&HS&thre*close_MA_10_shift_1#close#1&diff*RSI_12#80#1&thre*RSI_12#100#0&thre+D_shift_1#80#1&HS&thre*OBV#OBV_shift_1#1&diff*D#90#1&thre+D_shift_1#80#1&HS&thre*OBV#OBV_shift_1#1&diff*D_shift_1#10#0&thre+D_shift_1#80#1&HS&thre*OBV#OBV_shift_1#1&diff*RSI_12#80#1&thre*RSI_12#100#0&thre+D_shift_1#90#1&HS&thre*close_MA_10#close_MA_5_shift_1#1&diff*D#90#1&thre+D_shift_1#90#1&HS&thre*close_MA_10#close_MA_5_shift_1#1&diff*D_shift_1#10#0&thre+D_shift_1#90#1&HS&thre*close_MA_10#close_MA_5_shift_1#1&diff*RSI_12#80#1&thre*RSI_12#100#0&thre+D_shift_1#90#1&HS&thre*close_MA_10_shift_1#close#1&diff*D#90#1&thre+D_shift_1#90#1&HS&thre*close_MA_10_shift_1#close#1&diff*D_shift_1#10#0&thre+D_shift_1#90#1&HS&thre*close_MA_10_shift_1#close#1&diff*RSI_12#80#1&thre*RSI_12#100#0&thre+D_shift_1#90#1&HS&thre*OBV#OBV_shift_1#1&diff*D#90#1&thre+D_shift_1#90#1&HS&thre*OBV#OBV_shift_1#1&diff*D_shift_1#10#0&thre+D_shift_1#90#1&HS&thre*OBV#OBV_shift_1#1&diff*RSI_12#80#1&thre*RSI_12#100#0&thre', 'close_MA_10_shift_1#close_MA_10#1&HS&diff*D_shift_1#80#1&HS&thre*D#70#1&HS&thre+close_MA_10_shift_1#close_MA_10#1&HS&diff*D_shift_1#80#1&HS&thre*close_MA_5#close#1&diff+close_MA_10_shift_1#close_MA_10#1&HS&diff*open#close_MA_5#1&diff*D#70#1&HS&thre+close_MA_10_shift_1#close_MA_10#1&HS&diff*open#close_MA_5#1&diff*close_MA_5#close#1&diff+close_MA_10_shift_1#close_MA_10#1&HS&diff*K#80#1&thre*D#70#1&HS&thre+close_MA_10_shift_1#close_MA_10#1&HS&diff*K#80#1&thre*close_MA_5#close#1&diff+close_MA_20#close#1&HS&diff*D_shift_1#80#1&HS&thre*D#70#1&HS&thre+close_MA_20#close#1&HS&diff*D_shift_1#80#1&HS&thre*close_MA_5#close#1&diff+close_MA_20#close#1&HS&diff*open#close_MA_5#1&diff*D#70#1&HS&thre+close_MA_20#close#1&HS&diff*open#close_MA_5#1&diff*close_MA_5#close#1&diff+close_MA_20#close#1&HS&diff*K#80#1&thre*D#70#1&HS&thre+close_MA_20#close#1&HS&diff*K#80#1&thre*close_MA_5#close#1&diff+close_MA_20_shift_1#close_shift_1#1&HS&diff*D_shift_1#80#1&HS&thre*D#70#1&HS&thre+close_MA_20_shift_1#close_shift_1#1&HS&diff*D_shift_1#80#1&HS&thre*close_MA_5#close#1&diff+close_MA_20_shift_1#close_shift_1#1&HS&diff*open#close_MA_5#1&diff*D#70#1&HS&thre+close_MA_20_shift_1#close_shift_1#1&HS&diff*open#close_MA_5#1&diff*close_MA_5#close#1&diff+close_MA_20_shift_1#close_shift_1#1&HS&diff*K#80#1&thre*D#70#1&HS&thre+close_MA_20_shift_1#close_shift_1#1&HS&diff*K#80#1&thre*close_MA_5#close#1&diff'],
# 回测参数,目前设定是第二天开盘买入,当天收盘卖出,每次以资金的三分之一操作,默认
# 是下跌1%时候止损卖出
# 可以在strategy.py中更改
# 更多参数设置可以百度搜索rqalpha
_optimal = True,
_config = {
"base":
{
"benchmark": "399300.XSHE", #基准
"margin_multiplier": 1.4, #
"start_date": "2015-01-01", #回测开始日期
"end_date": "2020-06-26", #回测结束日期
"frequency": "1d", #回测频率
"accounts":{
"stock": 10000000000, #回测本金
#"future": "~",
}
},
"extra":{
#"log_level" : "verbose",
#"log_level": "code:info",
"log_level": "warning",
#"log_level": "error",
},
"mod":{
"sys_analyser":{
"enabled": True,
"report": True,
#"plot": True,
"plot": False
},
"sys_simulation":{
"enabled": True,
"signal": True,
"slippage": 0.0005, #滑点
#"slippage": 0.0,
"matching_type": "current_bar",
"price_limit": False,
"volume_limit": False,
"commission-multiplier": 0,
},
},
}
)
|
[
"lishangzhe@wordemotion.com"
] |
lishangzhe@wordemotion.com
|
b806946c1c6cd57f9c8fd6e0f0ee5c97cdae8692
|
6965d39e13b608527fa5d2db28aa0c5b91fe3c8a
|
/examples/compressor/train_comp.py
|
04a64389edfd5a94169df8e110a071c2f273826b
|
[
"Apache-2.0"
] |
permissive
|
leoauri/auraloss
|
91b8d269879c1185988f2c37d5b5e2392c8b21a6
|
0e3362674ae1b53aa61c6a631fb4e6970c5683c1
|
refs/heads/main
| 2023-07-27T21:16:00.305450
| 2021-09-12T14:38:33
| 2021-09-12T14:38:33
| 405,451,607
| 0
| 0
|
Apache-2.0
| 2021-09-11T18:15:45
| 2021-09-11T18:15:45
| null |
UTF-8
|
Python
| false
| false
| 2,693
|
py
|
import os
import glob
import torch
import torchsummary
import pytorch_lightning as pl
from argparse import ArgumentParser
from tcn import TCNModel
from data import SignalTrainLA2ADataset
parser = ArgumentParser()
# add PROGRAM level args
parser.add_argument("--root_dir", type=str, default="./data")
parser.add_argument("--preload", type=bool, default=False)
parser.add_argument("--sample_rate", type=int, default=44100)
parser.add_argument("--shuffle", type=bool, default=False)
parser.add_argument("--train_subset", type=str, default="train")
parser.add_argument("--val_subset", type=str, default="val")
parser.add_argument("--train_length", type=int, default=32768)
parser.add_argument("--eval_length", type=int, default=32768)
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--num_workers", type=int, default=0)
# add model specific args
parser = TCNModel.add_model_specific_args(parser)
# add all the available trainer options to argparse
parser = pl.Trainer.add_argparse_args(parser)
# parse them args
args = parser.parse_args()
# setup the dataloaders
train_dataset = SignalTrainLA2ADataset(
args.root_dir,
subset=args.train_subset,
half=True if args.precision == 16 else False,
preload=args.preload,
length=args.train_length,
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
shuffle=args.shuffle,
batch_size=args.batch_size,
num_workers=args.num_workers,
)
val_dataset = SignalTrainLA2ADataset(
args.root_dir,
preload=args.preload,
half=True if args.precision == 16 else False,
subset=args.val_subset,
length=args.eval_length,
)
val_dataloader = torch.utils.data.DataLoader(
val_dataset, shuffle=False, batch_size=2, num_workers=args.num_workers
)
past_logs = sorted(glob.glob(os.path.join("lightning_logs", "*")))
if len(past_logs) > 0:
version = int(os.path.basename(past_logs[-1]).split("_")[-1]) + 1
else:
version = 0
# the losses we will test
if args.train_loss is None:
losses = ["l1", "logcosh", "esr+dc", "stft", "mrstft", "rrstft"]
else:
losses = [args.train_loss]
for loss_fn in losses:
print(f"training with {loss_fn}")
# init logger
logdir = os.path.join("lightning_logs", f"version_{version}", loss_fn)
print(logdir)
args.default_root_dir = logdir
# init the trainer and model
trainer = pl.Trainer.from_argparse_args(args)
print(trainer.default_root_dir)
# set the seed
pl.seed_everything(42)
dict_args = vars(args)
dict_args["nparams"] = 2
dict_args["train_loss"] = loss_fn
model = TCNModel(**dict_args)
# train!
trainer.fit(model, train_dataloader, val_dataloader)
|
[
"csteinmetz1@gmail.com"
] |
csteinmetz1@gmail.com
|
ca1df705d15891320e03b64b9fdbeac9ac2ae78f
|
c71f0640577457cfd541499279e8427a06be343b
|
/ittest.py
|
7de2a72fb7301bc63fed292187e8302df6eb2df1
|
[] |
no_license
|
HanChinese-LIURUI/Weixin_1.0
|
9b0e63d913e45a067803c8d28d36dfce08ab3311
|
6267bda94e0424b281f8526ca14018366ebb149b
|
refs/heads/master
| 2021-02-18T09:47:16.600726
| 2020-03-05T14:22:44
| 2020-03-05T14:22:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
import itchat
# 自动回复
@itchat.msg_register(itchat.content.TEXT)
def text_reply(msg):
return "东小东回复数据:" + msg["Text"]
# 登入
itchat.auto_login()
# 保持运行
itchat.run()
|
[
"1090339852@qq.com"
] |
1090339852@qq.com
|
a1358efa1de189154c790b436ed12842e2a89408
|
348db1509ec31496ff86ceb59cea497e76c18102
|
/the_wall/app_wall/urls.py
|
196f4729905410819145582c909322f4e3ce8fd7
|
[] |
no_license
|
D-pitz/python_django
|
70973c0d2d8cd6036da723b26bc389087594dbbf
|
66a5f3f944f59bdd6e62c4eb92c257a707b6a34b
|
refs/heads/main
| 2023-06-19T02:11:08.837634
| 2021-06-28T22:07:51
| 2021-06-28T22:07:51
| 381,124,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.the_wall),
path('message', views.post_message),
path('comment/post/<int:post_id>', views.post_comment),
path('destroy/message/<int:post_id>', views.destroy_message),
path('destroy/comment/<int:comment_id>', views.destroy_comment)
]
|
[
"80120174+D-pitz@users.noreply.github.com"
] |
80120174+D-pitz@users.noreply.github.com
|
b0a5d5218c38b5027071ca4e22ab890c410cd4f8
|
8ddcebf0b4cb3277acbbb1aba4b2961d166a220f
|
/filelen.py
|
0b19a4d40568790dd3952bde579ea5032dfe486d
|
[] |
no_license
|
ShwetaMuthiyan/shweta1
|
25d34cceb57bf3dc294ca5c1d67ff81b1da83ef1
|
1bea075fdd26b84e1d26a350dc1a82b562e2bd58
|
refs/heads/master
| 2020-04-28T15:17:41.000617
| 2019-05-13T08:57:47
| 2019-05-13T08:57:47
| 175,367,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
#!/usr/bin/python
import io
f=input("enter file name:")
fd= io.FileIO(f)
line=fd.readline()
maxline= fd.readline()
minline=maxline
maxline=fd.readline()
if maxline>minline:
print(maxline)
else:
print(minline)
while line!= b'':
print(line)
line=fd.readline()
print("maxline is:",maxline)
print("minline is:",minline)
|
[
"noreply@github.com"
] |
ShwetaMuthiyan.noreply@github.com
|
580f7ba9ed0f5454dc4f1d8e0dfc93b457eec68b
|
ef31cc9260ba23cbe3d986becccc87b6df368b0f
|
/forBreakout2.py
|
df9e8a471133339d22a3ffab334ed92c318eb608
|
[] |
no_license
|
Whulph/Introduction
|
e810a0a5ef25082c37a384bffda70cdc054e43c0
|
2add70ae805fdf7348a51cb8c6ed66aa65cb5a42
|
refs/heads/main
| 2023-05-27T23:09:45.671258
| 2021-06-21T20:07:48
| 2021-06-21T20:07:48
| 376,899,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 14 15:44:50 2021
@author: Tyler
"""
import random
for i in range(1000):
print(random.choices('TH'))
|
[
"tylerwolf2290@gmail.com"
] |
tylerwolf2290@gmail.com
|
b3d951b5a7e7e7a8a0901753cca8c90b55b906c5
|
2927dcbbb0723e8e76710040407c5561fb6b0121
|
/01024/linux/entro.py/entro_macosx.py
|
1703d338ca7dc3f955f50acae7e6d29296bdd364
|
[] |
no_license
|
cafferta10/hardcode
|
9583599825010c16d46e199aaff9b15834730c65
|
2033e906d3a7850f88dda431f15a70f0981729b9
|
refs/heads/master
| 2021-05-22T01:06:19.072005
| 2019-10-26T03:28:44
| 2019-10-26T03:28:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,871
|
py
|
#!/usr/bin/env python
import sys
try:
from OpenGL.GL import *
from OpenGL.GLUT import *
except ImportError:
print "It seems that you don't have OpenGL.GL/GLUT bindings. Please install PyOpenGL."
sys.exit(1)
from math import *
from time import *
from AppKit import NSSound
from threading import *
p=[]
t=l=q=i=j=0
R=[1]*3
f=22050
b=''
glutInit(0)
glutInitDisplayMode(2)
glutGameModeString("1024x768:32")
if not glutGameModeGet(GLUT_GAME_MODE_POSSIBLE):
print "Game mode not possible"
sys.exit(0)
glutEnterGameMode()
class P:
pass
def K(*a):
global f
f=0
glutLeaveGameMode()
sys.exit(0)
def D():
global l,t,q,i,j,R
t=time()-z
d=t-l
glClear(16384)
b=sin(t/1.5)*.6+sin(t*5)*.4
h=(sin(t*.77)+sin(t))*pi
x=cos(h)*b
y=sin(h)*b
r=[max(.5,min(1,R[0]+d*sin(t*4))),max(.5,min(1,R[1]+d*sin(t*7))),max(.5,min(1,R[2]+d*sin(t*9)))]
q+=d*150
n=int(q)
for c in range(n):
a=P()
h=c/q
a.x=i+h*(x-i)
a.y=j+h*(y-j)
a.U=sin(h*t*4)*.01
a.T=cos(h*t*7)*.01
a.s=30
a.r=R[0]+h*(r[0]-R[0])
a.g=R[1]+h*(r[1]-R[1])
a.b=R[2]+h*(r[2]-R[2])
a.a=1
p.append(a)
i=x
j=y
R=r
q-=n
for a in p:
a.x+=a.U*d
a.y+=a.T*d
a.a-=d/7
a.s+=d*4
glColor4f(a.r,a.g,a.b,a.a)
glPointSize(a.s)
glBegin(0)
glVertex2f(a.x,a.y)
glEnd()
del p[0:-1500]
l=t
glutSwapBuffers()
glutPostRedisplay()
glutDisplayFunc(D)
glutKeyboardFunc(K)
d=[]
for y in range(32):
for x in range(32):
s=sqrt((16-x)**2+(16-y)**2)
v=max(1-.0625*s,0)
d.append(1)
d.append(v)
glBindTexture(3553,glGenTextures(1))
glTexParameteri(3553,33169,1)
glTexImage2D(3553,0,6408,32,32,0,6410,5126,d)
glTexEnvi(34913,34914,1)
glEnable(3042)
glEnable(34913)
glEnable(3553)
glBlendFunc(770,771)
glMatrixMode(5889)
glOrtho(-1.2,1.2,-1.2,1.2,-1,1)
glMatrixMode(5888)
def k():
while f:
if not sound.isPlaying():
sound.play()
sleep(0.001)
h=1.059
s=(5,12,8,12,5,12,8,7,0,7,3,7,0,7,2,3)
o=['F','O','R','M',chr(0x0),chr(0x2),chr(0xB1),chr(0x4E),'A','I','F','C','F','V','E','R',chr(0),chr(0),chr(0),chr(4),chr(0xA2),chr(0x80),chr(0x51),chr(0x40),'C','O','M','M',chr(0),chr(0),chr(0),chr(38),chr(0),chr(1),chr(0x0),chr(0x1),chr(0x58),chr(0x80),chr(0x0),chr(16),chr(0x40),chr(0x0d),chr(0xac),chr(0x44),chr(0x0),chr(0x0),chr(0x0),chr(0x0),chr(0x0),chr(0x0),'N','O','N','E',chr(14),'n','o','t',' ','c','o','m','p','r','e','s','s','e','d',chr(0),'S','S','N','D',chr(0),chr(0x02),chr(0xB1),chr(0x08),chr(0),chr(0),chr(0),chr(0),chr(0),chr(0),chr(0),chr(0)]
for t in o: b+=t
o=chr(0)*5512
for n in s:
t=''
g=.125*h**n
for y in range(2756):
v=int(2e4*sin(g*y))
t+=chr((v&0xff00)>>8)+chr(v&0xff)
b+=t
b+=o
aifff = open('1kintro_macosx.aiff', 'wb')
aifff.write(b)
aifff.close()
sound = NSSound.alloc()
sound.initWithContentsOfFile_byReference_("1kintro_macosx.aiff", True)
try:
sound.setLoops_(True)
sound.play()
except AttributeError:
Thread(target=k).start()
z=time()
glutMainLoop()
|
[
"youngthug@youngthug.com"
] |
youngthug@youngthug.com
|
3f151497a953c9cf780d199b58d23a21c0d27cca
|
ffb05b145989e01da075e2a607fb291955251f46
|
/pypers/pep318/moduledec.py
|
ccaa73c25a8ebde3ed0eb1ad03a259d4ddc48a02
|
[] |
no_license
|
micheles/papers
|
a5e7f2fa0cf305cd3f8face7c7ecc0db70ce7cc7
|
be9070f8b7e8192b84a102444b1238266bdc55a0
|
refs/heads/master
| 2023-06-07T16:46:46.306040
| 2018-07-14T04:17:51
| 2018-07-14T04:17:51
| 32,264,461
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,550
|
py
|
err=file('err','w')
def printerr(*args):
"For debugging purposes"
for a in args: print >> err, a,
print >> err
def importmodule(name,dic):
"""Given a module name and a dictionary, executes the module in a copy
of the dictionary and returns a new module."""
already_imported=sys.modules.get(name)
if already_imported: return already_imported # do nothing
fname=name+'.py'
dic=dic.copy()
execfile(fname,dic)
mod=types.ModuleType(name)
for k,v in dic.iteritems():
setattr(mod,k,v)
sys.modules[name]=mod
mod.__name__=name
mod.__file__=fname
return mod
class ModuleDecorator(Decorator,types.ModuleType):
def __init__(self,mod): # non-cooperative
self.undecorated=mod
for k,v in mod.__dict__.iteritems():
setattr(self,k,v)
decorate(self)
def __str__(self):
return '<module %s[%s]>' % (self.mod.__name__,self.__class__.__name__)
class DecoratedModule(ModuleDecorator): # defined for readability
pass
def callModuleDecorator(dec,*args):
if issubclass(dec,ModuleDecorator):
nargs=len(args)
if nargs==1:
mod=args[0]
elif nargs==2:
name,glob=args # a different object for each module
glob['object']=ClassDecorator(object)
mod=importmodule(name,glob)
else:
raise TypeError('%s() takes 1 or 2 arguments' % dec.__name__)
return type.__call__(dec,mod)
|
[
"michele.simionato@gmail.com"
] |
michele.simionato@gmail.com
|
830ec9e09303acb6d7a5d9ff2f7333cca0f90993
|
f130e20fe2a24c0d3747a7454e79f669a90fb08c
|
/Project 1 Opencv_FaceRecog/Step3_FaceRecog.py
|
278d9728a3fbcc288950149398d890464492265d
|
[] |
no_license
|
Khoanguyen99/Tot-Nghiep
|
d23794643c25d4db175b2dee9dd1c6d2ab80faef
|
faf269564aa19b91441f075b8e38de9279f831c2
|
refs/heads/main
| 2023-04-15T22:17:48.404909
| 2021-05-04T12:45:57
| 2021-05-04T12:45:57
| 352,339,043
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,316
|
py
|
import cv2
import numpy as np
import os
import sqlite3
from PIL import Image
# Doi chieu hình ảnh nhận diện với Thư viện nhận diện khuôn mặt
#Khuôn mặt ở đâu trên cam
face_cascade = cv2.CascadeClassifier('C:\opencv\sources\data\haarcascades\haarcascade_frontalface_default.xml')
#Khuôn mặt đấy là ai
recognizer = cv2.face.LBPHFaceRecognizer_create()
#Đối chiếu xem khuôn mặt nhận diện trên camera có trong tập dữ liệu đã được học hay không và nếu có thì là ai
recognizer.read(r"E:\Tot-Nghiep\Project 1 Opencv_FaceRecog\recognizer\trainingData.yml")
#Lấy thông tin bằng id từ database sqlite3
def getProfile(id):
conn = sqlite3.connect(r"E:\Tot-Nghiep\Project 1 Opencv_FaceRecog\dataFace.db")
query = "SELECT * FROM People WHERE ID=" +str(id)
cursor = conn.execute(query)
#Tạo profile để lưu giá trị lấy được từ database về
profile = None
for row in cursor:
profile = row
conn.close()
return profile
#Nhận diện khuôn mặt ở camera và kết hợp với dữ liệu xem người này là ai?
# Nếu có thì show tên, nếu không thì show Unknow
cap = cv2.VideoCapture(0)
fontface = cv2.FONT_HERSHEY_SIMPLEX
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray)
#vẽ hình vuông:
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x,y), (x+ w, y+ h), (0,255,0), 2)
roi_gray = gray[y: y+h, x: x+ w]
id,confidence = recognizer.predict(roi_gray) #Nhận diện trên khuôn mặt là ai, có thì trả về ID
if confidence< 40:
profile = getProfile(id)
if (profile != None):
cv2.putText(frame,"" +str(profile[1]), (x+10, y+h +30), fontface, 1, (0,255,0), 2)
else:
cv2.putText(frame,"Unknow", (x+10, y+h +30),fontface, 1, (0,0,255), 2)
cv2.imshow('image',frame)
#Sẽ không bị tắt liền khi mở lên, và chỉ bị ngắt khi dừng chương trình ở python hoặc nhấn "q"
#Will not turn off immediately unless you stop python or press "q"
if(cv2.waitKey(1) == ord('q')):
break;
cap.release()
cv2.destroyAllWindows()
|
[
"59419833+kingpantsu123@users.noreply.github.com"
] |
59419833+kingpantsu123@users.noreply.github.com
|
2e96e1276564e7f9b1df94b124a76859c1a9dacf
|
fa39a125126fc11fdf03405bef21fc25508629bd
|
/helpers.py
|
6651c2668fd68e0c6bad56aa32c7043e0f122e59
|
[] |
no_license
|
tenebrius1/finance
|
60ed4fd2696b936f9b0255c2a753bfb76fa4c0d6
|
6c6cf0bf4ae7a333f2623eead29617457e43fb45
|
refs/heads/master
| 2023-07-10T05:55:45.424323
| 2021-08-16T04:28:30
| 2021-08-16T04:28:30
| 228,144,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,711
|
py
|
import os
import requests
import urllib.parse
from flask import redirect, render_template, request, session
from functools import wraps
import pyEX as p
def apology(message, code=400):
"""Render message as an apology to user."""
def escape(s):
"""
Escape special characters.
https://github.com/jacebrowning/memegen#special-characters
"""
for old, new in [("-", "--"), (" ", "-"), ("_", "__"), ("?", "~q"),
("%", "~p"), ("#", "~h"), ("/", "~s"), ("\"", "''")]:
s = s.replace(old, new)
return s
return render_template("apology.html", top=code, bottom=escape(message)), code
def login_required(f):
"""
Decorate routes to require login.
http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is None:
return redirect("/login")
return f(*args, **kwargs)
return decorated_function
def lookup(symbol):
"""Look up quote for symbol."""
# Contact API
try:
api_key = os.environ.get("API_KEY")
response = requests.get(f"https://cloud.iexapis.com/stable/stock/{symbol}/quote?token={api_key}")
response.raise_for_status()
except requests.RequestException:
return None
# Parse response
try:
quote = response.json()
return {
"name": quote["companyName"],
"price": float(quote["latestPrice"]),
"symbol": quote["symbol"]
}
except (KeyError, TypeError, ValueError):
return None
def usd(value):
"""Format value as USD."""
return f"${value:,.2f}"
|
[
"noreply@github.com"
] |
tenebrius1.noreply@github.com
|
29447c019f83150cc8687ebfea6b9b70a41a69d7
|
81f72a2972dde1268895f583bce16e99da02a9e3
|
/syntax_analysis/temp.py
|
eed8140a3607c5d1b8fc6bf952f74a096c60a5e3
|
[] |
no_license
|
hanayashiki/mars_compiler
|
976abdf274da0cb3ef3292daed6c3f874ada708c
|
e833400cdf66c77cb415690cfc91ec061ed49f14
|
refs/heads/master
| 2021-01-20T22:44:31.881427
| 2017-09-08T06:32:18
| 2017-09-08T06:32:18
| 101,821,029
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
from syntax_analysis.symbol import *
from syntax_analysis.defsutils import *
class Temps:
num = 0
def new_name(self):
self.num += 1
return "%"+str(self.num-1)
def new_temp(self, type_name):
tmp_id = Id()
temp_type = Type()
temp_type.value = type_name
temp_type.width = get_width(type_name)
tmp_id.type = temp_type
tmp_id.name = self.new_name()
return tmp_id
|
[
"494450105@qq.com"
] |
494450105@qq.com
|
3d8982230e8bdc4e17f11cbd343469360d470201
|
26aeb2bf74bed12c5226e72b63997530161b3d21
|
/model.py
|
2ed3b8357a79a6cc4cd152098cacba50c2835f23
|
[] |
no_license
|
zsb87/MyResNet
|
b9a77976546a8a5a1488d42f7b93d441c8e47815
|
533084825a309ed84d992ee5a611af00d4559afa
|
refs/heads/master
| 2020-03-19T15:33:19.886291
| 2018-06-09T18:57:09
| 2018-06-09T18:57:09
| 136,675,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,765
|
py
|
import utils
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
from torch.autograd import Variable
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=10):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
# self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
# self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(256 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
# x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
# x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
print('model_urls')
print(model_urls['resnet18'])
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
[
"zhangshibo87@gmail.com"
] |
zhangshibo87@gmail.com
|
82281a71a622fc349f51c3b775734e0788b1413b
|
e0700dffa6299630a2fa275933beea8b223436c2
|
/classbased/first/forms.py
|
179f7f9bd91e4d323a16b2d00c8ea23d0100f811
|
[] |
no_license
|
kushalkombe/django_projects
|
e526a10de07c77c9f6c05e7c4fd711ddafc546c2
|
9ad5e241621ee2779e077e7a26e012137ce3f36e
|
refs/heads/master
| 2023-01-07T20:34:42.627526
| 2020-11-13T11:27:16
| 2020-11-13T11:27:16
| 283,565,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
from django import forms
from first.models import Emp
class EmpForm(forms.ModelForm):
class Meta:
model = Emp
fields = '__all__'
|
[
"kushalkombe@gmail.com"
] |
kushalkombe@gmail.com
|
1851dbc045fb64fd79266d7da8ca836f31797c87
|
c29e15cc3cc6d98671e2ceaf4e11a6db45970790
|
/C/tests.py
|
fba961fa519ab7443cb7d61770198a76d1de261c
|
[] |
no_license
|
perminovs/yandex-contest-2019-10
|
c889e0387ac839e04d6fb91e4864c5641340c473
|
f86cc6cbcd2c80057f0675bad75acb46ac465a60
|
refs/heads/master
| 2020-08-28T10:59:40.025389
| 2019-11-24T18:39:54
| 2019-11-24T18:39:54
| 217,679,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,970
|
py
|
import pytest
from .solution import build_minimum_spanning_tree, convert_graph, solve
@pytest.mark.parametrize(
('graph', 'expected_tree'),
[
(
# (1)--10--(2)
# \ /
# 40 20
# \ /
# (3)
[
(10, 1, 2),
(20, 2, 3),
(40, 1, 3),
],
# (1)--10--(2)
# /
# 20
# /
# (3)
{
(10, 1, 2),
(20, 2, 3),
},
),
(
# (1)---10--(2)
# | \ |
# | \ 50
# | 20 |
# 15 \ |
# | \ |
# (3)--90---(4)
# \ /
# 70 18
# \ /
# (5)
[
(10, 1, 2),
(20, 1, 4),
(15, 1, 3),
(50, 2, 4),
(90, 4, 3),
(70, 5, 3),
(18, 5, 4),
],
# (1)---10--(2)
# | \
# | \
# | 20
# 15 \
# | \
# (3) (4)
# /
# 18
# /
# (5)
{
(10, 1, 2),
(20, 1, 4),
(15, 1, 3),
(18, 5, 4),
},
)
]
)
def test_build_minimum_spanning_tree(graph, expected_tree):
assert build_minimum_spanning_tree(graph) == expected_tree
@pytest.mark.parametrize(
('nodes', 'branches', 'expected'),
[
(
{1: 10},
[],
{
(10, -1, 1),
}
),
(
{1: 10, 2: 20},
[],
{
(10, -1, 1),
(20, -1, 2),
}
),
(
{1: 10, 2: 20},
[
(15, 1, 2),
],
{
(15, 1, 2),
(10, -1, 1),
(20, -1, 2),
}
),
]
)
def test_convert_graph(nodes, branches, expected):
assert convert_graph(nodes, branches) == expected
@pytest.mark.parametrize(
('nodes', 'branches', 'expected'),
[
(
{1: 10},
[],
10,
),
(
{1: 10, 2: 20},
[],
30,
),
(
{1: 11, 2: 29},
[
(17, 1, 2),
],
28,
),
(
{1: 10, 2: 20, 3: 30},
[
(10, 1, 2),
(25, 2, 3),
],
10 + 10 + 25,
)
],
)
def test_solve(nodes, branches, expected):
assert solve(nodes, branches) == expected
|
[
"perminov-s-i@yandex.ru"
] |
perminov-s-i@yandex.ru
|
f454c9a3b4832592cf16f765dfe612cc195d9739
|
87278e84fb4cd4b8dedd4a42cf3a51d48e749ec4
|
/ch04/sort2.py
|
061b1e80b0625e2c24d03590d1e6fe35152b103d
|
[] |
no_license
|
dykim822/Python
|
083a6fb9be51e6cb3725a73ea8184f813f572abc
|
f6bd67d14e3a3a16934650cff6234e9cbad9ebce
|
refs/heads/main
| 2023-07-09T23:50:05.059533
| 2021-07-28T05:07:35
| 2021-07-28T05:07:35
| 369,375,985
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
l1 = [2, 7, 3, 6]
# l1.sort(): list 데이터가 변경
l2 = sorted(l1) # 원본 데이터는 변경하지 않고 정렬된 결과를 l2에 저장
print("원본", l1)
print("오름차순", l2)
l3 = sorted(l1, reverse=True) # 원본 데이터는 변경x
print("원본", l1)
print("내림차순", l3)
# l1.reverse(): 원본 데이터 순서를 반대로 한다-> 데이터가 바뀐다
l1.sort(reverse=True) # 원본 데이터를 내림차순으로 변경-> 데이터가 바뀐다
print("원본 변경", l1)
|
[
"dykim822@gmail.com"
] |
dykim822@gmail.com
|
40ef10b9e7807ef116efeda069823fd6a235a2b2
|
06b25df867b9a4741b4ca803eceb254aa50758e9
|
/editor_api/database/output/waterbal.py
|
59483bc0d08b19050c91bc1643fd841409a0191d
|
[
"MIT"
] |
permissive
|
jphuart/swatplus-automatic-workflow
|
e5ceaa745096926176d9fc45042f836e628d0504
|
dd2eeb7f882eb2d4ab7e1e5265c10b9beb93ddc4
|
refs/heads/master
| 2023-08-15T02:47:40.742352
| 2021-10-05T14:57:19
| 2021-10-05T14:57:19
| 282,170,706
| 0
| 0
| null | 2020-07-24T08:52:25
| 2020-07-24T08:52:24
| null |
UTF-8
|
Python
| false
| false
| 1,612
|
py
|
from peewee import *
from .base import OutputBase
class Waterbal(OutputBase):
precip = DoubleField(null=True)
snofall = DoubleField(null=True)
snomlt = DoubleField(null=True)
surq_gen = DoubleField(null=True)
latq = DoubleField(null=True)
wateryld = DoubleField(null=True)
perc = DoubleField(null=True)
et = DoubleField(null=True)
tloss = DoubleField(null=True)
eplant = DoubleField(null=True)
esoil = DoubleField(null=True)
surq_cont = DoubleField(null=True)
cn = DoubleField(null=True)
sw = DoubleField(null=True)
sw_300 = DoubleField(null=True)
snopack = DoubleField(null=True)
pet = DoubleField(null=True)
qtile = DoubleField(null=True)
irr = DoubleField(null=True)
surq_runon = DoubleField(null=True)
latq_runon = DoubleField(null=True)
overbank = DoubleField(null=True)
surq_cha = DoubleField(null=True)
surq_res = DoubleField(null=True)
surq_ls = DoubleField(null=True)
latq_cha = DoubleField(null=True)
latq_res = DoubleField(null=True)
latq_ls = DoubleField(null=True)
class Basin_wb_day(Waterbal):
pass
class Basin_wb_mon(Waterbal):
pass
class Basin_wb_yr(Waterbal):
pass
class Basin_wb_aa(Waterbal):
pass
class Lsunit_wb_day(Waterbal):
pass
class Lsunit_wb_mon(Waterbal):
pass
class Lsunit_wb_yr(Waterbal):
pass
class Lsunit_wb_aa(Waterbal):
pass
class Hru_wb_day(Waterbal):
pass
class Hru_wb_mon(Waterbal):
pass
class Hru_wb_yr(Waterbal):
pass
class Hru_wb_aa(Waterbal):
pass
class Hru_lte_wb_day(Waterbal):
pass
class Hru_lte_wb_mon(Waterbal):
pass
class Hru_lte_wb_yr(Waterbal):
pass
class Hru_lte_wb_aa(Waterbal):
pass
|
[
"celray.chawanda@outlook.com"
] |
celray.chawanda@outlook.com
|
db6fd40b23b2542124d03db026c6380c71e726e7
|
82205dd92ccb49f644f7e42ac4b0e9d3c8c1936e
|
/pdfmix/_version.py
|
2070d51ebd7636f5aa284f97fce49fb2827ed12b
|
[
"BSD-3-Clause"
] |
permissive
|
st3107/PDFmix
|
056a59602fd0f8bc49956abb7bd88ca31af6c933
|
10afc66e7829c7d275eda0746395aa612de6d8d6
|
refs/heads/main
| 2023-07-28T03:16:21.928751
| 2021-09-13T23:31:19
| 2021-09-13T23:31:19
| 395,450,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,454
|
py
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440-post"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "pdfmix/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
[
"st3107@columbia.edu"
] |
st3107@columbia.edu
|
5e74db6a3fe9fc60ffb7ba48edbaa73cd7968d65
|
987a68b9c196f39ba1810a2261cd4a08c35416a3
|
/Tree/337-house-robber-iii.py
|
68fa0b80ac4156e7a87dd92d22f656440755db24
|
[] |
no_license
|
xizhang77/LeetCode
|
c26e4699fbe1f2d2c4706b2e5ee82131be066ee5
|
ce68f5af57f772185211f4e81952d0345a6d23cb
|
refs/heads/master
| 2021-06-05T15:33:22.318833
| 2019-11-19T06:53:24
| 2019-11-19T06:53:24
| 135,076,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 755
|
py
|
# -*- coding: utf-8 -*-
'''
https://leetcode.com/problems/house-robber-iii/
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# Time and Space: O(n) where n is number if nodes
class Solution(object):
def dfs(self, root):
if not root:
return 0, 0
left, subleft = self.dfs( root.left )
right, subright = self.dfs( root.right )
woroot = left + right
return max( root.val+subleft+subright, woroot ), woroot
def rob(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return self.dfs( root )[0]
|
[
"xizhang1@cs.stonybrook.edu"
] |
xizhang1@cs.stonybrook.edu
|
86035528d529d138acd929937c57cc6d6804c6cc
|
c671b96c8fa648d81575b97881c1377340d02b8f
|
/pk_keras/image_classifier.py
|
2a0b16c0a1575467d2da705cd3f7a04cd4136e62
|
[] |
no_license
|
qiuqi06/python
|
2e7e6e850f5178c3c015c2224678d1a47e803008
|
1b8d039471464bc7863fed395b2338fd36201c76
|
refs/heads/master
| 2022-12-06T00:00:29.936309
| 2020-12-04T10:30:48
| 2020-12-04T10:30:48
| 160,703,195
| 0
| 0
| null | 2022-11-21T22:27:26
| 2018-12-06T16:40:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import RMSprop
from keras.utils import np_utils
np.random.seed(1337) # from reproducibility
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], -1) / 255
x_test = x_test.reshape(x_test.shape[0], -1) / 255
y_train = np_utils.to_categorical(y_train, nb_class=10)
y_test = np_utils.to_categorical(y_test, nb_classes=10)
model = Sequential([
Dense(32, input_dim=784),
Activation('relu'),
Dense(10),
Activation('softmax')
])
rmsprop = RMSprop(lr=0.0001, rho=0.9, epsilon=1e-08, decay=0)
model.compile(
optimizer=rmsprop,
loss='categorical_crossentropy',
metrics=['accuracy']
)
print('\nTraining----------------------------')
model.fit(x_train, y_train, nb_epoch=2, batch_size=32)
print('\ntesting-----------------')
loss, accuracy = model.evaluate(x_test, y_test)
print('loss', loss)
print('accuracy', accuracy)
|
[
"714106661@qq.com"
] |
714106661@qq.com
|
b4ef64857f5b4a0a131b001d1de4e655547d119b
|
fc0fbd4012da11c283d2087b55aea43bd0b68650
|
/Problems/Guessing game/task.py
|
1fc6c22a6213142cc71543124a95fd26883e0acd
|
[] |
no_license
|
cht-tsang/zookeeper
|
ad41e268f9401af51a858461e5b37692b47fe6d5
|
48ed0b766b3e5983c36a99a7a18628dd30f86972
|
refs/heads/master
| 2022-11-30T09:26:58.071229
| 2020-08-03T22:02:19
| 2020-08-03T22:02:19
| 274,208,655
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
set_number = 6557
a = int(input())
b = int(input())
print(set_number == (a * b))
|
[
"cht.chihungtsang@gmail.com"
] |
cht.chihungtsang@gmail.com
|
612a7404d4bb9f8ae55e9bfa2df2d6da0b975d36
|
376ef0073d0ab38f6735462cf64ac723d07e6499
|
/apps/bank/CipherUtil.py
|
66bd41e401c24775fb2483349b887b096cff62b3
|
[] |
no_license
|
Nautilus1993/NetworkSecurity
|
f6074628fe6a595cc49a78bad95804d0062bcec1
|
359f49d718643a788b812c0bc2eb2c9759463910
|
refs/heads/master
| 2020-07-20T21:06:27.358554
| 2019-06-02T10:16:26
| 2019-06-02T10:16:26
| 73,742,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,624
|
py
|
'''
Created on Mar 29, 2014
@author: sethjn
'''
import sys
sys.path.append("../..")
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA, HMAC
from Crypto.Protocol.KDF import PBKDF2
from Crypto.PublicKey import RSA
from Crypto.Cipher import AES
from playground.crypto import X509Certificate, Pkcs7Padding
class CIPHER_AES128_CBC(object):
def __init__(self, key, iv):
self.encrypter = AES.new(key, IV=iv, mode=AES.MODE_CBC)
self.decrypter = AES.new(key, IV=iv, mode=AES.MODE_CBC)
self.block_size = AES.block_size
def encrypt(self, data):
paddedData = Pkcs7Padding(self.block_size).padData(data)
return self.encrypter.encrypt(paddedData)
def decrypt(self, data):
paddedData = self.decrypter.decrypt(data)
return Pkcs7Padding(self.block_size).unpadData(paddedData)
class MAC_HMAC_SHA1(object):
MAC_SIZE = 20
def __init__(self, key):
self.__key = key#self.mac = HMAC.new(key)
def mac(self, data):
mac = HMAC.new(self.__key, digestmod=SHA)
mac.update(data)
return mac.digest()
def verifyMac(self, data, checkMac):
mac = self.mac(data)
return mac == checkMac
class RSA_SIGNATURE_MAC(object):
MAC_SIZE = 128
def __init__(self, key):
self.signer = PKCS1_v1_5.new(key)
self.verifier = PKCS1_v1_5.new(key.publicKey())
def mac(self, data):
digest = SHA.new(data).digest()
return self.signer.sign(digest)
def verifyMac(self, data, checkMac):
digest = SHA.new(data).digest()
return self.signer.verify(digest, checkMac)
class EncryptThenMac(object):
@staticmethod
def CreateMode(encMode, macMode):
return lambda k_enc, iv, k_mac: EncryptThenMac(encMode, macMode, k_enc, iv, k_mac)
def __init__(self, encMode, macMode, k_enc, iv, k_mac):
self.encrypter = encMode(k_enc, iv)
self.mac = macMode(k_mac)
def encrypt(self, data):
cipherText = self.encrypter.encrypt(data)
return cipherText + self.mac.mac(cipherText)
def decrypt(self, data):
cipherText, storedMac = data[:-self.mac.MAC_SIZE], data[-self.mac.MAC_SIZE:]
if not self.mac.verifyMac(cipherText, storedMac):
return None
return self.encrypter.decrypt(cipherText)
EncryptThenHmac = EncryptThenMac.CreateMode(CIPHER_AES128_CBC, MAC_HMAC_SHA1)
EncryptThenRsaSign = EncryptThenMac.CreateMode(CIPHER_AES128_CBC, RSA_SIGNATURE_MAC)
def DefaultSign(msg, rsaKey):
return PKCS1_v1_5.new(rsaKey).sign(SHA.new(msg))
|
[
"2271275281@qq.com"
] |
2271275281@qq.com
|
514c26d2c69b256dc7f73c676859b0254a445a40
|
bd553d50a233c043878e4209e0376eab1eb7176f
|
/Examples/1Intro/DataTypes/3List/sortDemo2.py
|
033cfb4f8a05487e54503d8fb5c46fd34aedcabe
|
[] |
no_license
|
jainendrak/python-training
|
3610434926774bca9c711a5e6c675d3ce28893ef
|
336221aab9aaa9624ad0f5ad046c44195105249e
|
refs/heads/master
| 2020-05-27T13:59:17.656236
| 2019-05-27T06:21:46
| 2019-05-27T06:21:46
| 188,648,975
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
li=[{'fname':'Sachin','lname':'Tendulkar','centuries':100},
{'fname':'Rahul','lname':'Dravid','centuries':88},
{'fname':'Saurav','lname':'Ganguly','centuries':62}]
li.sort(key=lambda d:d['fname'],reverse=True)
|
[
"jainendra.kumar@oracle.com"
] |
jainendra.kumar@oracle.com
|
9ae38f37629909e6511e8131e234e91163c22e51
|
d0bab7ad4ff9fbc54e0a64ca6a06873e888b0fc9
|
/src/libs/blueprint/python/py_src/mesh/examples/__init__.py
|
9036ffd31e8e7817976e6bebd7e5e4c086cc1fe4
|
[
"BSD-3-Clause"
] |
permissive
|
LLNL-Collaboration/conduit
|
4295469c73247b246be86c087397a99c9a760148
|
e80464ed6cd3706b39408d61499f8dcfcd41c2f3
|
refs/heads/master
| 2021-01-09T05:24:44.513812
| 2017-04-26T03:51:08
| 2017-04-26T03:51:08
| 80,760,783
| 2
| 1
| null | 2017-02-02T19:29:55
| 2017-02-02T19:29:55
| null |
UTF-8
|
Python
| false
| false
| 2,325
|
py
|
###############################################################################
# Copyright (c) 2014-2017, Lawrence Livermore National Security, LLC.
#
# Produced at the Lawrence Livermore National Laboratory
#
# LLNL-CODE-666778
#
# All rights reserved.
#
# This file is part of Conduit.
#
# For details, see: http://software.llnl.gov/conduit/.
#
# Please also read conduit/LICENSE
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the disclaimer below.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the disclaimer (as noted below) in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the LLNS/LLNL nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY,
# LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
###############################################################################
###############################################################################
# file: __init__.py
# Purpose: Main init for the conduit blueprint mesh examples module.
###############################################################################
from .conduit_blueprint_mesh_examples_python import *
|
[
"noreply@github.com"
] |
LLNL-Collaboration.noreply@github.com
|
fc4c169a3beac0967ff6a68a00b06432855ab674
|
46a12eb54a26cb00082f44b533d42dba35504482
|
/todo/models.py
|
4a5dafbe8485947b8a58f971cb5c6ba9a68145b4
|
[] |
no_license
|
Chrisss09/Django_todo
|
5cd14b0e75c112edc9efafafcb7604c9d67d84ff
|
83a616d40a284449f42e80f7c5b1cc341b66ef1d
|
refs/heads/master
| 2022-12-22T20:47:12.260440
| 2019-09-12T18:26:16
| 2019-09-12T18:26:16
| 207,083,922
| 0
| 0
| null | 2022-12-08T05:21:38
| 2019-09-08T08:33:32
|
Python
|
UTF-8
|
Python
| false
| false
| 258
|
py
|
from django.db import models
class Item(models.Model):
name = models.CharField(max_length=30, blank=False)
done = models.BooleanField(blank=False, default=False)
# Creating user friendly headings
def __str__(self):
return self.name
|
[
"chris.fowler73@hotmail.co.uk"
] |
chris.fowler73@hotmail.co.uk
|
dbe107e88bb307cd80fc4a1dd6efb29c55c50dd0
|
6b8c5b03cf12797eb36c952f88d97ee33725b1ae
|
/Python/examples/EXAMPLES/db_postgres_basics.py
|
29391563512c44dd67c2f01384236f8fcaeb660f
|
[] |
no_license
|
a8ksh4/junk
|
f1b6100afb5873e0b4931cd233ccee12b975222c
|
4535ccc5d9bd82104d0f5d3799bb006b205cb370
|
refs/heads/master
| 2021-01-23T13:36:55.992346
| 2017-04-29T01:19:09
| 2017-04-29T01:19:09
| 14,162,447
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
#!/usr/bin/env python
import sys
import psycopg2
pgconn = psycopg2.connect(
host="localhost",
dbname="jstrick",
user="scripts",
password='scripts',
)
pgcursor = pgconn.cursor()
# select first name, last name from all presidents
pgcursor.execute('''
select lname, fname
from presidents
''')
print "{0} rows in result set\n".format(pgcursor.rowcount)
for row in pgcursor.fetchmany(10):
print ' '.join(row)
print
party = 'Whig'
pgcursor.execute('''
select lname, fname
from presidents
where party = %s
''', (party,))
print pgcursor.fetchall()
pgconn.close()
|
[
"drnorris@DRNORRIS-MOBL.amr.corp.intel.com"
] |
drnorris@DRNORRIS-MOBL.amr.corp.intel.com
|
82f3fdc558f648a4cdba85bda400e50af17edc39
|
1f0690fd6780f44ba46076e958f56ba395dcbc20
|
/test/test_raw_shape_map.py
|
dc71bb7546d9a1fd64fd27f8b2838c45a900f988
|
[
"Apache-2.0"
] |
permissive
|
DaniFdezAlvarez/shexerp3
|
196ee4d20b4b92b64a71256571c06de3b4a5cf78
|
80c3bdaac856a88d53359f5996477994774d34e2
|
refs/heads/develop
| 2021-06-13T14:03:12.494569
| 2021-04-26T11:38:36
| 2021-04-26T11:38:36
| 187,663,813
| 3
| 2
|
Apache-2.0
| 2020-03-27T19:38:33
| 2019-05-20T15:07:48
|
Python
|
UTF-8
|
Python
| false
| false
| 4,183
|
py
|
import unittest
from shexer.shaper import Shaper
from test.const import G1, BASE_FILES, default_namespaces
from test.t_utils import file_vs_str_tunned_comparison
from shexer.consts import TURTLE
_BASE_DIR = BASE_FILES + "shape_map\\"
class TestRawShapeMap(unittest.TestCase):
def test_node(self):
shape_map = "<http://example.org/Jimmy>@<Person>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "a_node.shex",
str_target=str_result))
def test_prefixed_node(self):
shape_map = "ex:Jimmy@<Person>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "a_node.shex",
str_target=str_result))
def test_focus(self):
shape_map = "{FOCUS a foaf:Person}@<Person>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "focus_nodes.shex",
str_target=str_result))
def test_focus_wildcard(self):
shape_map = "{FOCUS foaf:name _}@<WithName>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "focus_and_wildcard.shex",
str_target=str_result))
def test_sparql_selector(self):
shape_map = "SPARQL \"select ?p where { ?p a foaf:Person }\"@<Person>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "focus_nodes.shex",
str_target=str_result))
def test_several_shapemap_items(self):
shape_map = "{FOCUS a foaf:Person}@<Person>\n{FOCUS a foaf:Document}@<Document>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "several_shm_items.shex",
str_target=str_result))
|
[
"danifdezalvarez@gmail.com"
] |
danifdezalvarez@gmail.com
|
df2f8a2cddaf79d3a1bcaed210fca4b55266aba4
|
1ecd2e7bebfc9ffbb01c09c88517b98841166abd
|
/Group Assignment 2/Group Assignment 2.py
|
49d5021e059365828dfa7b514f8b1796ef9ff21b
|
[
"MIT"
] |
permissive
|
brandonlee503/Algorithms-Assignments
|
4a375c704646396c8303d501fafc4e7cfdc1c4c5
|
4dee06bf4a675511db2269fa2e2c794dbaf3fbcc
|
refs/heads/master
| 2021-01-20T06:31:28.184357
| 2015-12-01T06:06:31
| 2015-12-01T06:06:31
| 43,657,395
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,898
|
py
|
import numpy as np
import sys
# Initialize read/write files
try:
filename = sys.argv[1]
inFile = open(filename, "r")
outFile = open("best-output.txt", "w+")
except:
print("Error: Must provide valid filename as a command line argument")
raise
try:
inArr = [int(x) for x in inFile.read().split()]
numRows = inArr[0]
numCols = inArr[1]
arr = inArr[2:]
arr = np.reshape(arr, (-1, numCols))
except:
print("Error: Invalid array")
raise
print(arr)
# Main algorithm, takes a 2D numpy array as an argument
def mostValuablePath(arr):
arrHeight = arr.shape[0] - 1
arrWidth = arr.shape[1] - 1
intermediateResults = []
intermediateResultsPath = []
finalResults = []
# Takes a "root" and treats every possible option as a tree branch,
# returning the best path
def bestPathFromRoot(x, y, curResult):
intermediateResults.append(curResult)
curResult = curResult + arr[y, x]
intermediateResultsPath.append([curResult, [y, x]])
if x > 0:
print str(y) + "," + str(x) + ": " + str(arr[y,x])
bestPathFromRoot(x-1, y, curResult)
if y > 0:
bestPathFromRoot(x, y-1, curResult)
#print intermediateResults
#print max(intermediateResults)
print intermediateResultsPath
return max(intermediateResults)
# Iterate through bottom row
for i in range(0, arrWidth):
intermediateResults = []
res = bestPathFromRoot(i, arrHeight, 0)
finalResults.append(res)
# Iterate through right column
for j in range(0, arrHeight):
intermediateResults = []
res = bestPathFromRoot(arrWidth, j, 0)
finalResults.append(res)
#print intermediateResultsPath[]
print intermediateResults
outFile.write(str(max(finalResults)))
return max(finalResults)
print(mostValuablePath(arr))
|
[
"leebran@onid.oregonstate.edu"
] |
leebran@onid.oregonstate.edu
|
370aad777b941f210c80d12c9d3732f007826792
|
438d98c64fe30602c320370cda5d84b4fd88733c
|
/apps/lotracker/migrations/0002_lot_site_type.py
|
d96cd221b9d49eb13988d2421b8949d5a3b52031
|
[] |
no_license
|
alishermuxtarov/lotracker
|
5d550eaa2558abed1c9629e19ec630bdb2ce39d4
|
1dd6074c64e2742cbe70fd85f56597d12b611489
|
refs/heads/master
| 2023-07-25T10:57:18.393095
| 2021-09-01T15:23:25
| 2021-09-01T15:23:25
| 402,108,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
# Generated by Django 3.0.2 on 2020-01-28 22:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lotracker', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='lot',
name='site_type',
field=models.SmallIntegerField(choices=[(0, 'dxarid'), (1, 'exarid')], db_index=True, default=1),
),
]
|
[
"alishermuxtarov@gmail.com"
] |
alishermuxtarov@gmail.com
|
9364cc969fff6da2dac4ca1b658984f1e30657cf
|
f7cddc360ae090c16724df372042b17fc0480e53
|
/Bounce_Game.py
|
887349c2df9c7bab13e2ea290ed8048a65a16193
|
[] |
no_license
|
LRBeaver/Udemy_Game
|
ebccf5d7263d194ad692093400daf9442275ab1a
|
2cc55dde0bb08180dd1d2bca518281fbd394713f
|
refs/heads/master
| 2021-01-01T03:58:08.685661
| 2016-04-14T12:47:29
| 2016-04-14T12:47:29
| 56,236,184
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,520
|
py
|
__author__ = 'lyndsay.beaver'
from tkinter import *
import random
import time
tk = Tk()
tk.title("Bounce")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=501, height=501, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10,10,25,25, fill = color)
self.canvas.move(self.id, 245, 100)
start=[-3, -2, -1, 0, 1, 2, 3]
random.shuffle(start)
self.x = start[0]
self.y = -3
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]:
return True
return False
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos=self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
#self.y = -3
self.hit_bottom = True
canvas.create_text(245, 100, text = "Game Over")
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.y = -3
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0,0,100,10, fill = color)
self.canvas.move(self.id, 200, 300)
self.x = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>', self.turn_left)
self.canvas.bind_all('<KeyPress-Right>', self.turn_right)
def draw(self):
self.canvas.move(self.id, self.x, 0)
pos = self.canvas.coords(self.id)
if pos[0] <= 0:
self.x = 0
if pos[2] >= self.canvas_width:
self.x = 0
def turn_left(self, evt):
self.x = -2
def turn_right(self, evt):
self.x = 2
paddle = Paddle(canvas, 'blue')
ball = Ball(canvas, paddle, 'red')
while 1:
if ball.hit_bottom == False:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.01)
|
[
"lrbeaver@gmail.com"
] |
lrbeaver@gmail.com
|
2eae88c0931c5308aff462f6129a717f741e927d
|
7037e16b5ee5f2cdff8f759d4ffcbed4cad3d3f5
|
/files/openingFiles.py
|
79f08e13944655d60ee64f089d2dc2cce86ac572
|
[] |
no_license
|
michalkasiarz/automate-the-boring-stuff-with-python
|
1fe0d3af7c5e57746d2d37aa110a5f1bd45ecf30
|
8fdd4c6945f116c3889000f2ad7357cacdf6ed16
|
refs/heads/master
| 2021-05-17T00:19:31.454433
| 2020-04-03T12:38:12
| 2020-04-03T12:38:12
| 250,532,982
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 920
|
py
|
import os
# opening files
helloFile = open(r"C:\Users\micha\Documents\Training\Hello.txt")
content = helloFile.read()
# printing file content to the console
print(content)
# closing the file
helloFile.close()
# readlines() method - returns a list of Strings
helloFile = open(r"C:\Users\micha\Documents\Training\Hello.txt")
print(helloFile.readlines())
helloFile.close()
# opening file in write mode
helloFile = open(r"C:\Users\micha\Documents\Training\HelloTwo.txt", "w")
helloFile.write("Hi, man!\n")
helloFile.write("Hi, man!\n")
helloFile.write("Hi, man!\n")
helloFile.close()
# another example with write mode
baconFile = open("bacon.txt", "w")
print("Saving data into " + str(os.getcwd()))
baconFile.write("Bacon is not a vegetable.")
baconFile.close()
# append mode
baconFile = open("bacon.txt", "a")
print("Saving data into " + str(os.getcwd()))
baconFile.write("Bacon is delicious!")
baconFile.close()
|
[
"michal.kasiarz@post.com"
] |
michal.kasiarz@post.com
|
d6eedd18bdb612e2388f9af8c6da6d95d9fdc575
|
b27cb13b6a543aa1d1727df455edd0224f31d837
|
/MinMaxPlayer.py
|
9793976447e604c474a6fe7636a0e80e3bb93959
|
[] |
no_license
|
esh4/TicTacToeRevisited
|
16cc74f5c72ea7fb88b2b6295ed2768a11231ae6
|
12a532b2fd89c69a7005b0f5b471a587864323be
|
refs/heads/master
| 2020-05-29T20:42:42.784486
| 2019-06-02T07:12:19
| 2019-06-02T07:12:19
| 189,360,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,206
|
py
|
from Player import Player
from inspect import getouterframes, currentframe
from GameBoard import GameBoard
class MinMaxPlayer(Player):
def __init__(self, name):
super().__init__(name)
self.mark = 'O'
def take_turn(self, board):
return {self.calculate_best_move(board): self.mark}
def calculate_best_move(self, board: GameBoard):
return self.score_move(board)[0]
def score_move(self, board: GameBoard, my_team_index=0, recursion_level=0):
teams = ['O', 'X']
game_over, winner = board.is_game_over()
if game_over:
if winner == 'O':
return None, 100 - recursion_level
elif winner == 'X':
return None, -100 + recursion_level
else:
return None, 0
else: # current board has no score so we score recursively
move_scores = []
for move in self.find_available_moves(board):
to_insert = {move: teams[my_team_index]}
child_board = board.insert_game_piece(to_insert)
score = self.score_move(child_board, my_team_index=abs(my_team_index - 1),
recursion_level=recursion_level+1)[1]
move_scores.append((move, score))
min_max_score = move_scores[0]
if recursion_level == 0:
print(move_scores)
print(min_max_score)
for move_score in move_scores:
# print(move_score, min_max_score, my_team_index == 0)
if my_team_index == 0: # maximizing
if move_score[1] > min_max_score[1]:
min_max_score = move_score
elif my_team_index == 1: # minimizing
if move_score[1] < min_max_score[1]:
min_max_score = move_score
return min_max_score
def find_available_moves(self, board: GameBoard):
ret = []
for col in range(board.size[0]):
for row in range(board.size[0]):
if board.get_board()[row][col] == ' ':
ret.append((row, col))
return ret
|
[
"eshel.sinclair@gmail.com"
] |
eshel.sinclair@gmail.com
|
cc4a055133966cef99cd6c84d82bc1cddacd4b3b
|
0181618f60c66b124eb57d1b8fe5a63b69e28f67
|
/Conference/migrations/0008_auto_20210502_1131.py
|
f326e0f0a9be6e559becc1a83eeb5240411f7643
|
[] |
no_license
|
Ronaldrani21/Event
|
eeb52936ee04c25916885c8f20acf195cab87f60
|
fa55e7f3bd956b887cb172175d46eb782b0bc044
|
refs/heads/master
| 2023-05-04T14:05:20.725560
| 2021-05-24T16:39:46
| 2021-05-24T16:39:46
| 363,681,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 780
|
py
|
# Generated by Django 3.1.7 on 2021-05-02 18:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Conference', '0007_auto_20210502_1116'),
]
operations = [
migrations.AddField(
model_name='registrationform',
name='Address',
field=models.CharField(blank=True, max_length=400, null=True),
),
migrations.AddField(
model_name='registrationform',
name='City',
field=models.CharField(blank=True, max_length=600),
),
migrations.AddField(
model_name='registrationform',
name='program',
field=models.CharField(blank=True, max_length=400, null=True),
),
]
|
[
"ronaldrani72@gmail.com"
] |
ronaldrani72@gmail.com
|
2bfe56446043b1d834e53efbeb0f640a27694bd2
|
95d32a98d0715816fd763c6df356069d91d74f33
|
/088.py
|
e79d78627a877069876425669e8c9e792ba9fd75
|
[] |
no_license
|
jod35/coding-challenges
|
1f65d08d92c143004f44eafd4922ec0dcb652a1f
|
21cfa2853dac70055d2b20155e03dff1c235ee02
|
refs/heads/master
| 2022-12-14T22:31:37.344450
| 2020-09-18T19:47:51
| 2020-09-18T19:47:51
| 291,939,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
sales_records={
"John":{"N":3056,"S":8465,"E":2694,"W":2964},
"Tom":{"N":4832,"S":6786,"E":4737,"W":3612},
"Anne":{"N":3904,"S":4802,"E":5820,"W":1859},
"Fiona":{"N":3904,"S":3645,"E":8821,"W":2451}
}
name=input("Enter a name whose datavyou would like to view: ")
print(sales_records[name])
region =input("Enter a region out of N,E,W,S: ")
print(sales_records[name][region])
option=input("Enter 'y' to change item in the column or 'n' to stop: ")
if option== "y":
value=input("Enter the new value fornthis column: ")
sales_records[name][region]=value
print(f"The new records are {sales_records[name]}")
|
[
"jodestrevin@gmail.com"
] |
jodestrevin@gmail.com
|
37199a230ddb408cf68ecd267319ed3c6880457e
|
12a18fd3ec8103a49de6dd0cc4cb4208a05092e5
|
/env/bin/easy_install
|
bb68c12434f288a721e62f002ba86993b691110d
|
[] |
no_license
|
LucasBarletta/FrontTemporario
|
8bbbc6f765eb8b1d32025718a0f14624fefdd046
|
ac4a5c9be09bcd169bf6d54d7692d88e2cc72804
|
refs/heads/master
| 2023-01-20T22:30:40.802508
| 2019-10-25T18:54:08
| 2019-10-25T18:54:08
| 217,597,121
| 0
| 0
| null | 2023-01-04T12:42:58
| 2019-10-25T18:56:41
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 279
|
#!/home/ubuntu/Downloads/Passa_pra_Frente-master/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"lubarlettamunhoz@gmail.com"
] |
lubarlettamunhoz@gmail.com
|
|
fb6be4f95db9881ac7bf6895a4874398b79461a5
|
64ce1850c481d5ce3a7515d6220d18566f603998
|
/MegaRest/IProtocol.py
|
6b8d781443923690c9d607fe1e04e3ec1366a38f
|
[] |
no_license
|
mvleandro/desafiohu1
|
6b70a956f7a6ab055aad7c91eb5becffa238c571
|
bd94a43e3f90f0495864e1be65e8f4cdb31d4352
|
refs/heads/master
| 2020-12-29T00:56:12.104209
| 2015-08-03T00:54:25
| 2015-08-03T00:54:25
| 39,531,632
| 0
| 1
| null | 2017-11-11T17:05:53
| 2015-07-22T21:38:44
|
CSS
|
UTF-8
|
Python
| false
| false
| 805
|
py
|
__author__ = 'mvleandro'
from abc import ABCMeta, abstractmethod
class IProtocol:
__metaclass__ = ABCMeta
action = ''
data = ''
client = tuple()
connection = object()
@abstractmethod
def request(self):
pass
@abstractmethod
def reply(self):
pass
@abstractmethod
def log(self):
pass
def __init__(self):
super(IProtocol,self).__init__();
def config(self, connection, client, buffer_data):
self.data = buffer_data
self.client = client
self.connection = connection
def run(self, connection, client, buffer_data):
self.data = buffer_data
self.client = client
self.connection = connection
self.request()
self.reply()
self.connection.close()
|
[
"mvleandro@gmail.com"
] |
mvleandro@gmail.com
|
c48d40beb14d7f2e88db8a78df0dc2265ba00d1c
|
66fda6586a902f8043b1f5e9532699babc7b591a
|
/lib_openshift/models/v1beta1_horizontal_pod_autoscaler.py
|
a1482453514234a535ecb09168dbe187eec0dfbe
|
[
"Apache-2.0"
] |
permissive
|
chouseknecht/lib_openshift
|
86eff74b4659f05dfbab1f07d2d7f42b21e2252d
|
02b0e4348631e088e72a982a55c214b30a4ab9d9
|
refs/heads/master
| 2020-12-11T05:23:17.081794
| 2016-07-28T20:15:39
| 2016-07-28T20:15:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,794
|
py
|
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1HorizontalPodAutoscaler(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
{
'class': 'ApisExtensionsV1beta1',
'type': 'create',
'method': 'create_horizontalpodautoscaler',
'namespaced': False
},
{
'class': 'ApisExtensionsV1beta1',
'type': 'create',
'method': 'create_namespaced_horizontalpodautoscaler',
'namespaced': True
},
{
'class': 'ApisExtensionsV1beta1',
'type': 'update',
'method': 'replace_namespaced_horizontalpodautoscaler',
'namespaced': True
},
{
'class': 'ApisExtensionsV1beta1',
'type': 'delete',
'method': 'delete_namespaced_horizontalpodautoscaler',
'namespaced': True
},
{
'class': 'ApisExtensionsV1beta1',
'type': 'read',
'method': 'get_namespaced_horizontalpodautoscaler',
'namespaced': True
},
]
def __init__(self, kind=None, api_version=None, metadata=None, spec=None, status=None):
"""
V1beta1HorizontalPodAutoscaler - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'kind': 'str',
'api_version': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta1HorizontalPodAutoscalerSpec',
'status': 'V1beta1HorizontalPodAutoscalerStatus'
}
self.attribute_map = {
'kind': 'kind',
'api_version': 'apiVersion',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
self._kind = kind
self._api_version = api_version
self._metadata = metadata
self._spec = spec
self._status = status
@property
def kind(self):
"""
Gets the kind of this V1beta1HorizontalPodAutoscaler.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1HorizontalPodAutoscaler.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1HorizontalPodAutoscaler.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1HorizontalPodAutoscaler.
:type: str
"""
self._kind = kind
@property
def api_version(self):
"""
Gets the api_version of this V1beta1HorizontalPodAutoscaler.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#resources
:return: The api_version of this V1beta1HorizontalPodAutoscaler.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1HorizontalPodAutoscaler.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1HorizontalPodAutoscaler.
:type: str
"""
self._api_version = api_version
@property
def metadata(self):
"""
Gets the metadata of this V1beta1HorizontalPodAutoscaler.
Standard object metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata
:return: The metadata of this V1beta1HorizontalPodAutoscaler.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1HorizontalPodAutoscaler.
Standard object metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata
:param metadata: The metadata of this V1beta1HorizontalPodAutoscaler.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""
Gets the spec of this V1beta1HorizontalPodAutoscaler.
behaviour of autoscaler. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status.
:return: The spec of this V1beta1HorizontalPodAutoscaler.
:rtype: V1beta1HorizontalPodAutoscalerSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""
Sets the spec of this V1beta1HorizontalPodAutoscaler.
behaviour of autoscaler. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status.
:param spec: The spec of this V1beta1HorizontalPodAutoscaler.
:type: V1beta1HorizontalPodAutoscalerSpec
"""
self._spec = spec
@property
def status(self):
"""
Gets the status of this V1beta1HorizontalPodAutoscaler.
current information about the autoscaler.
:return: The status of this V1beta1HorizontalPodAutoscaler.
:rtype: V1beta1HorizontalPodAutoscalerStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1beta1HorizontalPodAutoscaler.
current information about the autoscaler.
:param status: The status of this V1beta1HorizontalPodAutoscaler.
:type: V1beta1HorizontalPodAutoscalerStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"jdetiber@redhat.com"
] |
jdetiber@redhat.com
|
dd830a9bc53b903c706b7302302a1779a2f3eeec
|
ade271a57663f04ceff4d57e67c9315cd7069f9d
|
/migrations/versions/bd8d579d40d5_.py
|
8591e5f320dd8d3d0a906d537871d7866ba9babb
|
[] |
no_license
|
shreyaasridhar/TodoApplication
|
95290d0a56b62d9810253fa3ce000758dc245bc3
|
da2adf5063059510376df8517735d55d56f4b9bb
|
refs/heads/master
| 2022-06-17T23:48:07.333898
| 2020-05-10T16:26:29
| 2020-05-10T16:26:29
| 261,658,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 719
|
py
|
"""empty message
Revision ID: bd8d579d40d5
Revises:
Create Date: 2020-05-07 04:57:15.245093
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bd8d579d40d5'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('todos',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('todos')
# ### end Alembic commands ###
|
[
"shreyanju@gmail.com"
] |
shreyanju@gmail.com
|
81422ecd3aa4b76f78a73d2d066ee2f67a5ea59e
|
12ddac230bba06fafb34e68924a40ff201423763
|
/ghost/GAME.py
|
6db710e6adebb0fda0c13ff0eed2532179d826eb
|
[
"MIT"
] |
permissive
|
HesselTjeerdsma/PacmanFInder
|
4fc4ca66614138b7599171b102250c5b54d93933
|
efb1fbb0a9d39626ab0fc46732ba21b260d942b6
|
refs/heads/master
| 2022-11-07T17:34:49.589109
| 2017-12-04T12:41:07
| 2017-12-04T12:41:07
| 110,368,241
| 0
| 1
|
MIT
| 2022-10-28T11:36:25
| 2017-11-11T18:07:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,512
|
py
|
from PACMAN import *
from SERVER_LIB import *
from POZYX import *
from time import sleep
import os
import json
import socket
import _thread
import pypozyx
if __name__ == "__main__":
#Our player name
GAME.set_name("BIERoT");
#GAME.set_name(fish)
#Start HTTP server
Web = WebServer(ADDR,PORT);
GAME.set_Web(Web)
#Try to get serial com port
try:
serial_port = get_serial_ports()[0].device
#serial_port = "/dev/ttyACM0"
remote_id = None
except:
print("No com port")
serial_port = ""
pass
# Anchor data (necessary data for calibration)
"""
anchors = [DeviceCoordinates(0x6847, 1, Coordinates(2, 2050, 2499)),
DeviceCoordinates(0x6877, 1, Coordinates(2819, 22828, 2595)),
DeviceCoordinates(0x6170, 1, Coordinates(13745, 2, 2621)),
DeviceCoordinates(0x6169, 1, Coordinates(19923, 27836, 2655)),
DeviceCoordinates(0x682d, 1, Coordinates(31750, 2010, 2636)),
DeviceCoordinates(0x6147, 1, Coordinates(35166, 2080, 2658)),
DeviceCoordinates(0x6823, 1, Coordinates(26342, 27810, 2628)),
DeviceCoordinates(0x614b, 1, Coordinates(34639, 25070, 2692))]
anchors = [DeviceCoordinates(0x6871, 1, Coordinates(11260, 3070, 4110)),
DeviceCoordinates(0x6169, 1, Coordinates(11260, 11500, 4110)),
DeviceCoordinates(0x6844, 1, Coordinates(34364, 2723, 4110)),
DeviceCoordinates(0x6170, 1, Coordinates(34364, 10372, 4110))]
"""
anchors = [DeviceCoordinates(0x617E, 1, Coordinates(11260, 2723, 4110)),
DeviceCoordinates(0x6169, 1, Coordinates(11260, 10172, 4110)),
DeviceCoordinates(0x6844, 1, Coordinates(34364, 3070, 4110)),
DeviceCoordinates(0x6170, 1, Coordinates(34364, 11300, 4110))]
#DeviceCoordinates(0x6847, 1, Coordinates(10629,3918,2939))]
algorithm = POZYX_POS_ALG_TRACKING # positioning algorithm to use
dimension = POZYX_2_5D # positioning dimension
height = 1000 # height of device, required in 2.5D positioning
#Try connecting to the Pozyx
try:
pozyx_com = PozyxSerial(serial_port,debug_trace=True)
pozyx = Pozyx_Obj(pozyx_com, anchors, algorithm, dimension, height)
pozyx.setup()
GAME.set_pozyx(pozyx)
except Exception as e:
print("Unable to reach Pozyx on\n")
print(e)
print("Starting without...")
#raise SystemExit
except:
print("Exception2")
#raise SystemExit
#Serve all POST requests in a thread
_thread.start_new_thread(Web.run, ())
#Register to Service using GAME object
if(Web.register(GAME) == False):
print("Failed to register")
raise SystemExit
GAME.run()
|
[
"md.tuinstra@gmail.com"
] |
md.tuinstra@gmail.com
|
9da52726d92eb4935ff30dec325e71fc4fd40381
|
0ab9e007c7dd11420e671785f584031f47f1504b
|
/assets/src/ba_data/python/ba/_analytics.py
|
a3c25e81b1f640ccbe32f352db98842cffc130c3
|
[
"MIT"
] |
permissive
|
Dliwk/ballistica
|
7872e1e8d0f3a2d92c9a6c211acade3961cbafdd
|
73b18e449838c19c87fb86147a253300836cfe89
|
refs/heads/master
| 2023-07-21T23:28:43.380564
| 2023-01-18T18:40:44
| 2023-01-18T18:40:44
| 254,022,422
| 0
| 0
|
MIT
| 2020-04-09T12:20:04
| 2020-04-08T07:58:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,120
|
py
|
# Released under the MIT License. See LICENSE for details.
#
"""Functionality related to analytics."""
from __future__ import annotations
from typing import TYPE_CHECKING
import _ba
if TYPE_CHECKING:
pass
def game_begin_analytics() -> None:
"""Update analytics events for the start of a game."""
# pylint: disable=too-many-branches
# pylint: disable=cyclic-import
from ba._dualteamsession import DualTeamSession
from ba._freeforallsession import FreeForAllSession
from ba._coopsession import CoopSession
from ba._gameactivity import GameActivity
activity = _ba.getactivity(False)
session = _ba.getsession(False)
# Fail gracefully if we didn't cleanly get a session and game activity.
if not activity or not session or not isinstance(activity, GameActivity):
return
if isinstance(session, CoopSession):
campaign = session.campaign
assert campaign is not None
_ba.set_analytics_screen(
'Coop Game: '
+ campaign.name
+ ' '
+ campaign.getlevel(_ba.app.coop_session_args['level']).name
)
_ba.increment_analytics_count('Co-op round start')
if len(activity.players) == 1:
_ba.increment_analytics_count('Co-op round start 1 human player')
elif len(activity.players) == 2:
_ba.increment_analytics_count('Co-op round start 2 human players')
elif len(activity.players) == 3:
_ba.increment_analytics_count('Co-op round start 3 human players')
elif len(activity.players) >= 4:
_ba.increment_analytics_count('Co-op round start 4+ human players')
elif isinstance(session, DualTeamSession):
_ba.set_analytics_screen('Teams Game: ' + activity.getname())
_ba.increment_analytics_count('Teams round start')
if len(activity.players) == 1:
_ba.increment_analytics_count('Teams round start 1 human player')
elif 1 < len(activity.players) < 8:
_ba.increment_analytics_count(
'Teams round start '
+ str(len(activity.players))
+ ' human players'
)
elif len(activity.players) >= 8:
_ba.increment_analytics_count('Teams round start 8+ human players')
elif isinstance(session, FreeForAllSession):
_ba.set_analytics_screen('FreeForAll Game: ' + activity.getname())
_ba.increment_analytics_count('Free-for-all round start')
if len(activity.players) == 1:
_ba.increment_analytics_count(
'Free-for-all round start 1 human player'
)
elif 1 < len(activity.players) < 8:
_ba.increment_analytics_count(
'Free-for-all round start '
+ str(len(activity.players))
+ ' human players'
)
elif len(activity.players) >= 8:
_ba.increment_analytics_count(
'Free-for-all round start 8+ human players'
)
# For some analytics tracking on the c layer.
_ba.reset_game_activity_tracking()
|
[
"ericfroemling@gmail.com"
] |
ericfroemling@gmail.com
|
4b42170da5ff383b63f41803e63cb31e922a1d65
|
9f9082b2d84da1ade9952c829b8ec99e23db2b98
|
/server/fandogh/service/migrations/0002_auto_20180604_1712.py
|
c4663219ffab0aa5a8b0f31af7c44a5fec2bdaae
|
[
"MIT"
] |
permissive
|
RezaHaidari/fandogh
|
384c79fe7eb26e3a7e7f4bf4597e99fa90227921
|
6173ab9dee0e5b9756edf31149aad9af0e0d3564
|
refs/heads/master
| 2020-03-22T22:53:09.004039
| 2018-07-09T11:36:26
| 2018-07-09T11:36:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
# Generated by Django 2.0.4 on 2018-06-04 17:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('service', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='service',
name='owner',
),
migrations.DeleteModel(
name='Service',
),
]
|
[
"soroosh.sarabadani@gmail.com"
] |
soroosh.sarabadani@gmail.com
|
e761f5af56824db03b23922f9187f9265a112bfd
|
772f8d041f460f7e720309e6f494898ca7f884b2
|
/chatbot/settings.py
|
926c2bccef6a10b78e2dd414aa72af541c159622
|
[] |
no_license
|
jhonathascesar232/chatbot-com-python-e-django
|
47be582f5336270f6940c80f3304664b3f021c7b
|
26a078ac5a69908f2cf30dfb456ee56c6ead2de8
|
refs/heads/master
| 2020-12-15T16:34:54.942463
| 2019-12-12T05:23:03
| 2019-12-12T05:23:03
| 235,180,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,167
|
py
|
"""
Django settings for chatbot project.
Generated by 'django-admin startproject' using Django 2.0.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v$3vov!+y7ve=p$vy^dp0#la+lkk58!*w$)_6$6te46iro+7(%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'capturas',
'home',
'perguntas',
'usuarios',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chatbot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chatbot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"jhonathascesar232@gmail.com"
] |
jhonathascesar232@gmail.com
|
e0b90753082c999c680e1f29dd17c9d662a4eb71
|
4472e40c53ca3e1df4f9e477a6268133309b7597
|
/src/ensae_teaching_cs/special/einstein_prolog.py
|
72ce4e82ae5d15698400a18b21625248df9deedc
|
[
"MIT"
] |
permissive
|
amoussoubaruch/ensae_teaching_cs
|
289729742608da064f07a79b10cf6cce48de1b51
|
313a6ccb8756dbaa4c52724839b69af8a5f4476e
|
refs/heads/master
| 2021-01-16T19:31:49.734583
| 2016-09-09T08:29:58
| 2016-09-09T08:29:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,691
|
py
|
#-*- coding: utf-8 -*-
"""
@file
@brief This programs solves `Einstein's riddle <http://en.wikipedia.org/wiki/Zebra_Puzzle>`_ ou en
Français `Intégramme <http://fr.wikipedia.org/wiki/Int%C3%A9gramme>`_. The algorithm is based
on logic and its `clause <http://en.wikipedia.org/wiki/Clause_(logic)>`_.
"""
import copy
#: definition of all possible values (French terms)
#: colors
ttcouleur = ["jaune", "bleu", "rouge", "blanc", "vert"]
#: nationalities
ttnationalite = ["danois", "norvegien", "anglais", "allemand", "suedois"]
#: drinks
ttboisson = ["eau", "the", "lait", "cafe", "biere"]
#: smoke brand
ttcigare = ["Dunhill", "Blend", "Pall Mall", "Prince", "Bluemaster"]
#: animal
ttanimal = ["chats", "cheval", "oiseaux", "poisson", "chiens"]
#: all possibles values
ensemble = [ttcouleur, ttnationalite, ttboisson, ttcigare, ttanimal]
def permutation(nb):
"""
Compute all permutations of set [[ 1, 2, ..., nb ]].
Example for 3:
@code
[[0, 1, 2], [0, 2, 1], [1, 0, 2],
[1, 2, 0], [2, 0, 1], [2, 1, 0]]
@endcode
@param nb permutation over the set [[1..n]]
@return list of all possible permutations
@warning This method can be very long if nb is high (>10).
This function does something very similar to `itertools.permutations <https://docs.python.org/3/library/itertools.html#itertools.permutations>`_.
"""
per = []
p = [i for i in range(0, nb)]
while p[0] < nb:
next = False
for i in range(1, nb):
if p[i] in p[0:i]:
next = True
break
if not next:
per.append(copy.copy(p))
p[nb - 1] += 1
for j in range(nb - 1, 0, -1):
if p[j] >= nb:
p[j] = 0
p[j - 1] += 1
return per
class Rule:
"""
This class defines a constraint of the problem
or a clause (see `http://en.wikipedia.org/wiki/Clause_(logic)`)
There are 5 different types of clauses described by Einstein's enigma
each of them is described by a different class. There are defined by classes:
@ref cl RulePosition, @ref cl RuleEquivalence, @ref cl RuleVoisin,
@ref cl RuleAvant, @ref cl RuleEnsemble.
"""
def __init__(self):
"""
constructor
"""
#: name of the rule
self.name = None
#: set of clauses
self.set = None
def genere(self):
"""
generates all possible clauses (list of lists)
(l [0][0] et l [0][1]) ou (l [1][0] et l [1][1]),
a clause is a triplet of
(person, (property, category) )
"""
return None
def __str__(self):
"""
display
"""
if self.name is not None:
if "clauses" not in self.__dict__:
s = self.name + " \t: "
a = self.genere()
for al in a:
st = "\n ou " + str(al)
if len(st) > 260:
st = st[:260] + "..."
s += st
if len(s) > 1000:
break
return s
else:
s = self.name + " \t: " + str(self.set)
for al in self.clauses:
st = "\n ou " + str(al)
if len(st) > 260:
st = st[:260] + "..."
s += st
if len(s) > 1000:
break
return s
else:
return None
def combine(self, cl1, cl2):
"""
combine two clauses, two cases :
1. nothing in common or everything in common --> concatenation of clauses
2. a position or a property in common --> null clause
@param cl1 clause 1
@param cl2 clause 2
@return the new clause
A clause is a @ref cl Rule.
"""
# incompatibility
for p1 in cl1:
for p2 in cl2:
if p1[1][0] == p2[1][0]: # same property
if p1[0] != p2[0]: # but different positions
return None
if p1[0] == p2[0]: # same person
if p1[1][1] == p2[1][1] and p1[1][0] != p2[1][0]:
# same category but different properties
return None
# compatibility
r = copy.deepcopy(cl1)
for c in cl2:
if c not in r:
r.append(c)
return r
def combine_cross_sets(self, set1, set2):
"""
combines two sets of clauses
@param set1 set of clauses 1
@param set2 set of clauses 2
@return combination
"""
if len(set1) == 0:
return copy.deepcopy(set2)
if len(set2) == 0:
return copy.deepcopy(set1)
res = []
for cl1 in set1:
for cl2 in set2:
r = self.combine(cl1, cl2)
if r is not None:
res.append(r)
return res
class RulePosition (Rule):
"""
p1 at position
"""
def __init__(self, p1, pos):
"""
constructor
"""
self.set = [p1]
self.name = "position"
self.position = pos
def genere(self):
"""
overrides method ``genere``
"""
return [[(self.position, self.set[0])]]
class RuleEquivalence (Rule):
"""
p1 equivalent to p2
"""
def __init__(self, p1, p2):
"""
constructor
"""
self.set = [p1, p2]
self.name = "equivalence"
def genere(self):
"""
overrides method ``genere``
"""
l = []
for i in range(0, 5):
l.append([(i, self.set[0]), (i, self.set[1])])
return l
class RuleVoisin (Rule):
"""
p1 and p2 are neighbors
"""
def __init__(self, p1, p2):
"""
constructor
"""
self.set = [p1, p2]
self.name = "voisin"
def genere(self):
"""
overrides method ``genere``
"""
l = []
for i in range(0, 4):
l.append([(i, self.set[0]), (i + 1, self.set[1])])
l.append([(i + 1, self.set[0]), (i, self.set[1])])
return l
class RuleAvant (Rule):
"""
p1 before p2
"""
def __init__(self, p1, p2):
self.set = [p1, p2]
self.name = "avant"
def genere(self):
"""
overrides method ``genere``
"""
l = []
for j in range(1, 5):
for i in range(0, j):
l.append([(i, self.set[0]), (j, self.set[1])])
return l
class RuleEnsemble (Rule):
"""
permutation of the elements of a category
"""
def __init__(self, set, categorie):
"""
constructor
"""
self.set = [(s, categorie) for s in set]
self.name = "ensemble"
def genere(self):
"""
overrides method ``genere``
"""
l = []
per = permutation(5)
for p in per:
tl = []
for i in range(0, len(p)):
tl.append((i, self.set[p[i]]))
l.append(tl)
return l
class Enigma:
"""
this class solves the enigma
"""
def __init__(self, display=True):
"""
we describe the enigma using the classes we defined above
@param display if True, use print to print some information
"""
self.regle = []
self.regle.append(RulePosition(self.find("lait"), 2))
self.regle.append(RulePosition(self.find("norvegien"), 0))
self.regle.append(
RuleEquivalence(
self.find("Pall Mall"),
self.find("oiseaux")))
self.regle.append(
RuleEquivalence(
self.find("anglais"),
self.find("rouge")))
self.regle.append(
RuleEquivalence(
self.find("suedois"),
self.find("chiens")))
self.regle.append(
RuleEquivalence(
self.find("danois"),
self.find("the")))
self.regle.append(
RuleEquivalence(
self.find("vert"),
self.find("cafe")))
self.regle.append(
RuleEquivalence(
self.find("jaune"),
self.find("Dunhill")))
self.regle.append(
RuleEquivalence(
self.find("biere"),
self.find("Bluemaster")))
self.regle.append(
RuleEquivalence(
self.find("allemand"),
self.find("Prince")))
self.regle.append(
RuleVoisin(
self.find("Dunhill"),
self.find("cheval")))
self.regle.append(
RuleVoisin(
self.find("norvegien"),
self.find("bleu")))
self.regle.append(RuleVoisin(self.find("Blend"), self.find("eau")))
self.regle.append(RuleVoisin(self.find("Blend"), self.find("chats")))
self.regle.append(RuleAvant(self.find("vert"), self.find("blanc")))
self.regle.append(RuleEnsemble(ttcouleur, 0))
self.regle.append(RuleEnsemble(ttnationalite, 1))
self.regle.append(RuleEnsemble(ttboisson, 2))
self.regle.append(RuleEnsemble(ttcigare, 3))
self.regle.append(RuleEnsemble(ttanimal, 4))
for r in self.regle:
r.clauses = r.genere()
r.utilise = False
self.count = 0
def find(self, p):
"""
finds a clause in the different sets of clause (houses, colors, ...)
@param p clause
@return tuple (clause, position)
"""
for i in range(0, len(ensemble)):
if p in ensemble[i]:
return (p, i)
return None
def __str__(self):
"""
usual
"""
if "solution" not in self.__dict__ or self.solution is None or len(
self.solution) == 0:
if self.count > 0:
s = "solution impossible apres " + \
str(self.count) + " iterations \n"
else:
s = ""
for r in self.regle:
s += str(r) + "\n"
return s
else:
sr = ["solution, iteration " + str(self.count)]
matrix = [list(" " * 5) for _ in range(0, 5)]
for row in self.solution:
i = row[0]
j = row[1][1]
s = row[1][0]
matrix[i][j] = s + " " * (10 - len(s))
for row in matrix:
sr.append(", ".join(row))
classic = "\n".join(sr[1:])
html = classic.replace(",",
"</td><tr>").replace("\n",
"</td></tr>\n<tr><td>")
return sr[0] + "\n" + "\n".join([
classic,
"<table>",
"<tr><td>" + html + "</td></tr>",
"</table>"])
def solve(self, solution=[], logf=print): # solution = [ ]) :
"""
Solves the enigma by eploring in deepness,
the method is recursive
@param solution [] empty at the beginning, recursively used then
@return solution
"""
self.count += 1
if self.count % 10 == 0:
logf(
"*",
self.count,
" - properties in place : ",
len(solution) -
1)
if len(solution) == 25:
# we know the solution must contain 25 clauses,
# if are here than the problem is solved unless some
# incompatibility
for r in self.regle:
cl = r.combine_cross_sets([solution], r.clauses)
if cl is None or len(cl) == 0:
# the solution is incompatible with a solution
return None
self.solution = solution
return solution
# we are looking for the rule which generates the least possible clauses
# in order to reduce the number of possibilities as much as possible
# the research could be represented as a tree, we avoid creating two
# many paths
best = None
rule = None
for r in self.regle:
cl = r.combine_cross_sets([solution], r.clauses)
if cl is None:
# the solution is incompatible with a solution
return None
# we check rule r is bringing back some results
for c in cl:
if len(c) > len(solution):
break
else:
cl = None
if cl is not None and (best is None or len(best) > len(cl)):
best = cl
rule = r
if best is None:
# the solution is incompatible with a solution
return None
rule.utilise = True
# we test all clauses
for c in best:
r = self.solve(c, logf=logf)
if r is not None:
# we found
return r
rule.utilise = False # impossible
return None
if __name__ == "__main__":
en = Enigma()
print(en)
print("-----------------------------\n")
en.solve()
print("-----------------------------\n")
print(en)
|
[
"xavier.dupre@ensae.fr"
] |
xavier.dupre@ensae.fr
|
9b605c3191e034f487afcc1a57fce6a43125702b
|
d93638facfff67c79e7f446973dcfdb3a041e096
|
/week2/hrwros_ws/build/ur_description/catkin_generated/pkg.installspace.context.pc.py
|
a0b8d15bec21f31dbd7731805100ca81d3fc2942
|
[] |
no_license
|
MarzanShuvo/ROS
|
9f5b10826530bec0b63c0485d6d8e91c295fb1d7
|
5ec53e56d7e2437a078ee1c40c369b9d79669559
|
refs/heads/master
| 2021-05-21T23:26:49.734472
| 2020-04-16T09:24:37
| 2020-04-16T09:24:37
| 252,854,763
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ur_description"
PROJECT_SPACE_DIR = "/home/marzan/hrwros_ws/install"
PROJECT_VERSION = "1.2.7"
|
[
"marzanalam3@gmail.com"
] |
marzanalam3@gmail.com
|
257d15ffc6e4081017ba0fe7004899c110edcd28
|
eea690c8eea02f9d5d1145f79159952d8187d7be
|
/Projet/sonde3.py
|
d92be1c70b7ba54af7e44077900bb75a0c2a849d
|
[] |
no_license
|
qLex7/Administration-systeme
|
3ce7d121842dd16769ef2ea5c1aabba2d814794c
|
6046f58c85a3f426f3aff7ffa217ba6e07b51c4e
|
refs/heads/master
| 2020-03-13T21:39:56.155971
| 2018-04-30T12:02:22
| 2018-04-30T12:02:22
| 131,301,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,246
|
py
|
#!/usr/bin/python
# -*-coding: Latin-1 -*-
import os, sys
import subprocess as sp
import time
def sonde3():
data=list()
#------------------Nom machine------------------------
p=sp.Popen(["hostname"],stdout=sp.PIPE, stdin=sp.PIPE)
data.append(p.stdout.readline())
#--------------------------------CPU utilise--------------------------------------------
cmd= "top -b -n1 | grep 'Cpu' | cut -d' ' -f11 | sed 's/,/./g'"
p = sp.Popen(cmd,stdout=sp.PIPE,stderr=None,shell=True)
cpuDispo = float(p.stdout.readline())
cpuUtil = 100 - float(cpuDispo)
data.append("%.2f" % cpuUtil)
#-------------------------------MemTotal------------------------------------
cmd="grep MemTotal /proc/meminfo | cut -d: -f2 | sed 's/kB//'g"
p = sp.Popen(cmd,stdout=sp.PIPE,stderr=None,shell=True)
MemTot=p.stdout.readline()
#-----------------------------MemFree-------------------------------------------
cmd="grep MemFree /proc/meminfo | cut -d: -f2 | sed 's/kB//'g"
p = sp.Popen(cmd,stdout=sp.PIPE,stderr=None,shell=True)
MemDispo=p.stdout.readline()
#------------------RamUse----------------
MemFree=float(MemDispo)/float(MemTot)*100
MemUse=100-MemFree
data.append("%.2f" % MemUse)
#-------------------------Swap Total----------------------------
cmd="grep SwapTotal /proc/meminfo | cut -d: -f2 | sed 's/kB//g' | sed 's/ //g'"
p = sp.Popen(cmd,stdout=sp.PIPE,stderr=None,shell=True)
SwapTot=p.stdout.readline()
#---------------------------Swap Libre--------------------------
cmd="grep SwapFree /proc/meminfo | cut -d: -f2 | sed 's/kB//g' | sed 's/ //g'"
p = sp.Popen(cmd,stdout=sp.PIPE,stderr=None,shell=True)
SwapDispo=p.stdout.readline()
#-------------------SwapUse(%)--------------
SwapFree=float(SwapDispo)/float(SwapTot)*100
SwapUse=100-SwapFree
data.append("%.2f" % SwapUse)
#---------------Utilisation du Disque en %-------------------
cmd= "df $PWD | awk '/[0-9]%/{print $(NF-1)}' | sed 's/%//g'"
p = sp.Popen(cmd,stdout=sp.PIPE,stderr=None,shell=True)
data.append(p.stdout.readline())
#--------------Nombre de user connecté-------------
cmd="who -q | cut -d: -f2 | tail -1 | sed 's/ //g'"
p = sp.Popen(cmd,stdout=sp.PIPE,stderr=None,shell=True)
data.append(p.stdout.readline())
#-----------------Nombre processus---------------------
cmd="ps -elf | wc -l"
p = sp.Popen(cmd,stdout=sp.PIPE,stderr=None,shell=True)
data.append(p.stdout.readline())
#--------------------------Date-----------------------------------------
p=sp.Popen(["date", "+%Y-%m-%d:%H:%M:%S"],stdout=sp.PIPE, stdin=sp.PIPE)
data.append(p.stdout.readline())
#--------------------------Heure------------------------
p=sp.Popen(["date","+%H"],stdout=sp.PIPE, stdin=sp.PIPE)
data.append(p.stdout.readline())
mon_fichier = open("ressondee1.txt","w")
for item in data:
mon_fichier.write("%s\n" % item)
mon_fichier.close
|
[
"noreply@github.com"
] |
qLex7.noreply@github.com
|
cb92f14781465954db7435d74a0554aab3ad2cee
|
6632037358152602fab2c79abde181369bfeeae9
|
/Python Projects 2020/HankeyAH7.py
|
3bf92b13a39dde7dfa550c884032a3187270378c
|
[] |
no_license
|
abbihankey/Software-Development-Portfolio
|
d6a1c0df2ac6dd8ab2a8f874d368966a73936782
|
6bafd65b0689a6f480e5142c49eaa21e7366289a
|
refs/heads/main
| 2023-02-27T03:39:32.299641
| 2021-01-26T23:24:49
| 2021-01-26T23:24:49
| 332,872,797
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,515
|
py
|
CD_RW_PRICE = 16.50
DVD_RW_PRICE = 21.75
def main():
cd_customers_counter = 0
dvd_customers_counter = 0
total_payment_due = 0
print("Customer Name \tCode\tSpindle\tPayment Due")
print('')
try:
infile = open ("/users/abbih/desktop/disks.txt",'r')
customer_name = infile.readline()
while customer_name != '':
customer_name = customer_name.rstrip('\n')
print(customer_name, end='\t')
code = infile.readline()
code = code.rstrip('\n')
print(code, end='\t')
spindles = infile.readline()
spindles = int(spindles)
print(format(spindles, '3.0f'), end='\t')
if code == "c" or code == "C":
payment_due = spindles * CD_RW_PRICE
cd_customers_counter += 1
elif code == "d" or code == "D":
payment_due = spindles * DVD_RW_PRICE
dvd_customers_counter += 1
else:
payment_due = 0
total_payment_due += payment_due
if payment_due == 0:
print('invalid code')
else:
print('$', format(payment_due, '7,.2f'))
customer_name = infile.readline()
infile.close()
print('')
print('Total customers that purchased CD-RWs: ', cd_customers_counter)
print('Total number of DVD spindles sold: ',dvd_customers_counter)
print('')
print('Total amounts of payment due: ', end='')
print('$', format(total_payment_due, ',.2f'), sep='')
except IOError:
print('an error occured trying to open or read disks.txt')
main()
|
[
"67207650+abbihankey@users.noreply.github.com"
] |
67207650+abbihankey@users.noreply.github.com
|
033bcfd7db1fedf6e783f877671237695994b969
|
8217a1afb688085e647194d6858cd8fd6a61fe6c
|
/apps/bq/__init__.py
|
bb57c0744c3eeb8a6679e9c31425b39a01324a90
|
[] |
no_license
|
subin-shrestha/serp
|
a351b6e6d2150cf4776e40bbe836b77faec20d8f
|
12c3afb89fcfdcbd12bcccb4d8800451d1ba90b3
|
refs/heads/master
| 2023-06-11T17:03:43.150090
| 2021-07-11T17:12:20
| 2021-07-11T17:12:20
| 384,876,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27
|
py
|
from .base import BigQuery
|
[
"developer.shrestha@gmail.com"
] |
developer.shrestha@gmail.com
|
3d32ebb00bb6348dc738fcf15c63cb0e9e5a4f2f
|
ef0a3d92fcf3c9594164dd0403060fb4b6f8906c
|
/public/DemoFilterStudy/python/flask_app.py
|
be79699151e99af4c607a9097dda8c12590b8815
|
[
"MIT"
] |
permissive
|
npmcdn-to-unpkg-bot/showroom
|
a009574d7e5348893cd7834242226b62a7541638
|
82b510bb5f0b90e3812c5cef74a8641f4c63ba09
|
refs/heads/master
| 2020-12-31T03:16:49.004089
| 2016-09-02T03:26:46
| 2016-09-02T03:26:46
| 67,319,361
| 0
| 0
| null | 2016-09-04T01:19:39
| 2016-09-04T01:19:39
| null |
UTF-8
|
Python
| false
| false
| 6,913
|
py
|
import flask, tempfile, json
import numpy as np
from numpy.polynomial import Polynomial
import CP
# import skrf as rf
def WriteString2TempFile(text):
fp = tempfile.SpooledTemporaryFile(max_size=10000000, mode='r')
fp.write(text)
fp.seek(0)
return fp
app = flask.Flask(__name__)
app.debug = True
@app.route('/', methods=['GET'])
def get_tasks():
return flask.jsonify({'freq': 100})
# fileUrl = 'http://gongfan99.github.io/try.s2p'
# if fileUrl.split('.')[-1].lower() != 's2p':
# return 'It is not a s2p file'
# try:
# resource = urllib.request.urlopen(fileUrl)
# except:
# return 'Cannot retrieve file'
# content = resource.read().decode('utf-8')
# sFile = WriteString2TempFile(content)
# ntwk = rf.Network()
# try:
# ntwk.read_touchstone(sFile)
# except:
# return 's2p file format is not correct'
# sFile.close()
# originalFreq = ntwk.frequency.f;
# originalS21 = ntwk.s[::, 1, 0];
# originalS11 = ntwk.s[::, 0, 0];
# return flask.jsonify({'freq': originalFreq[:].tolist()})
@app.route('/<method>', methods=['POST'])
def get_task(method):
if method == "try":
sFile = WriteString2TempFile("hello sam")
fileContent = sFile.read()
sFile.close()
print(flask.request.is_json)
print(method, fileContent)
bodyJson = flask.request.get_json()
bodyJson['pythonVal'] = 'back from python'
print(json.dumps(bodyJson, separators = (',', ':')))
return json.dumps(bodyJson, separators = (',', ':'))
# ntwk = rf.Network(sFile);
# originalFreq = ntwk.frequency.f;
# originalS21 = ntwk.s[::, 1, 0];
# originalS11 = ntwk.s[::, 0, 0];
elif method == "SynthesizeFromTranZeros":
reqJson = flask.request.get_json()
# print(json.dumps(reqJson, separators = (',', ':')))
N = reqJson['N']
returnLoss= reqJson['returnLoss']
rootP = np.array([x[0] + 1j * x[1] for x in reqJson['rootP']])
epsilon, coefP, coefF, rootE = CP.ChebyshevP2EF(rootP, N, returnLoss)
coefE = Polynomial.fromroots(rootE).coef
epsilonE = epsilon
topology = np.array(reqJson['topology'])
matrixMethod = 5
targetMatrix, msg = CP.FPE2MComprehensive(epsilon, epsilonE, coefF, coefP, rootE, topology, refRootP = rootP, method = matrixMethod)
resJson = {'epsilon': [epsilon.real, epsilon.imag], 'coefP': [[x.real, x.imag] for x in coefP], 'coefF': [[x.real, x.imag] for x in coefF], 'coefE': [[x.real, x.imag] for x in coefE], 'targetMatrix': targetMatrix.tolist(), 'message': msg}
return json.dumps(resJson, separators = (',', ':'))
elif method == "ExtractMatrix":
reqJson = flask.request.get_json()
np.save("tempData3", np.array([reqJson]))
#print(json.dumps(reqJson, separators = (',', ':')))
freq = np.array(reqJson['freq']) * 1e6
S21_amp = 10 ** (np.array(reqJson['S21_db']) / 20)
S21 = S21_amp * (np.cos(np.array(reqJson['S21_angRad'])) + 1j * np.sin(np.array(reqJson['S21_angRad'])))
S11_amp = 10 ** (np.array(reqJson['S11_db']) / 20)
S11 = S11_amp * (np.cos(np.array(reqJson['S11_angRad'])) + 1j * np.sin(np.array(reqJson['S11_angRad'])))
N = reqJson['filterOrder']
tranZeros = [x[0] + 1j * x[1] for x in reqJson['tranZeros']]
numZeros = len(tranZeros)
filterOrder = np.hstack((np.zeros((N, )), 2 * np.ones((numZeros, ))))
w1 = (reqJson['centerFreq'] - reqJson['bandwidth'] / 2) * 1e9
w2 = (reqJson['centerFreq'] + reqJson['bandwidth'] / 2) * 1e9
# print(N, numZeros, filterOrder, w1, w2)
startFreq = reqJson['captureStartFreqGHz'] * 1e9
stopFreq = reqJson['captureStopFreqGHz'] * 1e9
isSymmetric = reqJson['isSymmetric']
extractMethod = 6
fc = (w1 + w2) / 4
epsilon, epsilonE, Qu, coefF, coefP, rootE, port1, port2 = CP.S2FP(freq, S21, S11, filterOrder, w1, w2, fc=fc, method=extractMethod, startFreq=startFreq, stopFreq=stopFreq, isSymmetric=isSymmetric)
if Qu == np.inf:
Qu = 1e9
# print(Qu)
topology = np.array(reqJson['topology'])
matrixMethod = 5
extractedMatrix, msg = CP.FPE2MComprehensive(epsilon, epsilonE, coefF, coefP, rootE, topology, refRootP = tranZeros, method = matrixMethod)
targetMatrix = np.array(reqJson['targetMatrix'])
deviateMatrix = targetMatrix - extractedMatrix
resJson = {'q': Qu, 'extractedMatrix': extractedMatrix.tolist(), 'deviateMatrix': deviateMatrix.tolist(), 'message': msg}
return json.dumps(resJson, separators = (',', ':'))
elif method == "SpaceMappingCalculate":
reqJson = flask.request.get_json()
np.save("tempData2", np.array([reqJson]))
B = np.array(reqJson['B'], dtype = float)
# print(np.around(B, 2))
h = np.array(reqJson['h'], dtype = float)
xc = np.array(reqJson['xc'], dtype = float)
xc_star = np.array(reqJson['xc_star'], dtype = float)
xf = np.array(reqJson['xf'], dtype = float)
lowerLimit = np.array(reqJson['lowerLimit'], dtype = float)
upperLimit = np.array(reqJson['upperLimit'], dtype = float)
f = xc - xc_star
B += np.array(np.mat(f).T * np.mat(h)) / h.dot(h)
h = np.linalg.solve(B, -f)
xf_old = xf.copy()
xf += h
xf = np.where(xf > lowerLimit, xf, lowerLimit)
xf = np.where(xf < upperLimit, xf, upperLimit)
h = xf - xf_old
if f.dot(f) < 4e-10 * len(xc):
toStop = 1
else:
toStop = 0
resJson = {'B': B.tolist(), 'h': h.tolist(), 'xf': xf.tolist(), 'f': f.tolist(), 'toStop': toStop}
return json.dumps(resJson, separators = (',', ':'))
elif method == "CoarseModelUpdate":
reqJson = flask.request.get_json()
np.save("tempData", np.array([reqJson]))
# reqJson = np.load('tempData.npy')[0]
dimension = np.array(reqJson['dimension'])
extractedMatrix = np.array(reqJson['extractedMatrix'])
topology = np.array(reqJson['topology'])
isSymmetric = reqJson['isSymmetric']
slopeM, invSlopeM, intepM = CP.CoarseModelUpdate(dimension, extractedMatrix, topology, isSymmetric)
resJson = {'slopeM': slopeM.tolist(), 'invSlopeM': invSlopeM.tolist(), 'intepM': intepM.tolist()}
return json.dumps(resJson, separators = (',', ':'))
else:
flask.abort(404)
@app.route('/shutdown', methods=['POST'])
def shutdown():
def shutdown_server():
func = flask.request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
shutdown_server()
return 'Server shutting down...'
if __name__ == '__main__':
app.run(port=4000, threaded=True)
|
[
"gongfan99@hotmail.com"
] |
gongfan99@hotmail.com
|
24ca64c791a6363b0fb20221fb90310f5b2726a0
|
24fe1f54fee3a3df952ca26cce839cc18124357a
|
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/cpinfra/smmhbprofile.py
|
f5a14103adc0a12a62409586a352062bfa85ab3b
|
[] |
no_license
|
aperiyed/servicegraph-cloudcenter
|
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
|
9eb7975f2f6835e1c0528563a771526896306392
|
refs/heads/master
| 2023-05-10T17:27:18.022381
| 2020-01-20T09:18:28
| 2020-01-20T09:18:28
| 235,065,676
| 0
| 0
| null | 2023-05-01T21:19:14
| 2020-01-20T09:36:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,962
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class SmmHBProfile(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.cpinfra.SmmHBProfile")
meta.moClassName = "cpinfraSmmHBProfile"
meta.rnFormat = "smmhbt"
meta.category = MoCategory.REGULAR
meta.label = "SmmHBProfile"
meta.writeAccessMask = 0x401002001
meta.readAccessMask = 0x401002001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.cpinfra.Inst")
meta.superClasses.add("cobra.model.cpinfra.AHBeatProfile")
meta.rnPrefixes = [
('smmhbt', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "handshaked", "handshaked", 45702, PropCategory.REGULAR)
prop.label = "Process has handshaked or not"
prop.isOper = True
prop.defaultValue = False
prop.defaultValueStr = "no"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("handshaked", prop)
prop = PropMeta("str", "hbEnabled", "hbEnabled", 45701, PropCategory.REGULAR)
prop.label = "Heartbeat Enabled or Disabled"
prop.isOper = True
prop.defaultValue = False
prop.defaultValueStr = "no"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("hbEnabled", prop)
prop = PropMeta("str", "lastHbCheckedTs", "lastHbCheckedTs", 45733, PropCategory.REGULAR)
prop.label = "Time of last HeartBeat Check"
prop.isOper = True
meta.props.add("lastHbCheckedTs", prop)
prop = PropMeta("str", "lastHbTs", "lastHbTs", 45700, PropCategory.REGULAR)
prop.label = "Time of last HeartBeat"
prop.isOper = True
meta.props.add("lastHbTs", prop)
prop = PropMeta("str", "maxloss", "maxloss", 45698, PropCategory.REGULAR)
prop.label = "Number of Heartbeat miss to consider service not responding"
prop.isOper = True
prop.defaultValue = 3
prop.defaultValueStr = "3"
meta.props.add("maxloss", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "numHbLost", "numHbLost", 45699, PropCategory.REGULAR)
prop.label = "Number of HeartBeats lost"
prop.isOper = True
meta.props.add("numHbLost", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "timeout", "timeout", 45697, PropCategory.REGULAR)
prop.label = "Heartbeat timeout in ms"
prop.isOper = True
prop.defaultValue = 5000
prop.defaultValueStr = "5000"
meta.props.add("timeout", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"rrishike@cisco.com"
] |
rrishike@cisco.com
|
bb2a8f31ab415392935e863391ce83b2fb47d10b
|
fdf2ed095c39c6e8a0c6427208e27fd54ec78567
|
/20160311 - 027/config.py
|
ba43c9306c00bf8274410887be334c0c438de817
|
[] |
no_license
|
emithongle/AddressSegmentation
|
f23278248ce3075dfd652f6382ac3d6805bb36be
|
101d5b38fa3f72e60f269819e3b69232d511870d
|
refs/heads/master
| 2021-01-10T12:38:41.734617
| 2016-03-31T03:00:32
| 2016-03-31T03:00:32
| 52,435,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,381
|
py
|
__author__ = 'Thong_Le'
import json
import datetime
def readFile(file):
strList = []
infile = open(file, encoding="utf-8")
for line in infile:
strList.append(line)
return strList
def loadJson(file):
try:
return json.loads(''.join(readFile(file + '.json')))
except:
return None
class TimeManage():
def __init__(self):
self.time = ''
def setTime(self, _time):
self.time = _time
def getTime(self):
return self.time
timeManage = TimeManage()
# ================================
folder_dictionary = '0. Dictionary'
files_dictionary = 'dictionary' # json
folder_data = '1. Data'
files_data = ['name.txt', 'address.txt', 'phone.txt']
folder_preprocessing = '2. Preprocessing'
files_preprocessed = ['preprocessed_name', 'preprocessed_address', 'preprocessed_phone'] # csv
folder_features = '3. Features'
files_features = ['name_features', 'address_features', 'phone_features'] # csv
files_traintest = ['training_data', 'testing_data']
folder_model = '4. Model'
file_model = None
folder_test = '5. Test'
file_full_address_test = 'testdata.txt'
file_term_test = 'termdata' # csv
folder_results = '6. Results'
file_segment_address_result = 'full_address_results.xlsx'
file_term_classify_result = 'term_results.xlsx'
file_model_result = 'test_model_results.xlsx'
folder_running_logs = 'running_logs'
file_log = 'logs.xlsx' # csv
# ==============================================
# number of executive run.py
nrun = 1
tmp = loadJson(folder_dictionary + '/' + files_dictionary)
nameTermSet = tmp['name-term-set']
addressTermSet = tmp['address-term-set']
phoneTermSet = tmp['phone-term-set']
asi = tmp['ascii']
unic = tmp['unicode']
upchars = tmp['upper-characters']
feature_func = 'feature'
preprocessing_func = 'preprocessing'
# Preprocessing
bpreprocessing = True
preprocessing_name = {'convert unicode to ascii': True, 'convert to lower': True,
'remove break line': True, 'remove space by space': True}
# Features
feature_names = ['%digits', '%max_digit_skip_0', '%ascii', 'first_character_ascii', 'first_character_digit']
# Model
model_type = 'Neuron Network'
model_config = {
'layers': [(100, 'Sigmoid'), (3, 'Softmax')],
'learning_rate': 0.01,
'learning_rule': 'adagrad',
'n_iter': 1000
}
# Testing Model
standard_data = True
nTesting = 10
|
[
"thong.le@emichat.com"
] |
thong.le@emichat.com
|
b46e4fb148f61daad9e93cfd7197d5964cefc117
|
8dd8f2221364d595608ba9b35bfb45de940f7f42
|
/hth/music/tests/test_admin.py
|
2a05fb9586a10f9d7ef53cab08df0df2baa433c2
|
[
"MIT"
] |
permissive
|
bhrutledge/jahhills.com
|
b7311a4cc2c9b7b640a71b70f009f42721f48b95
|
41446cd2c560bbc544798c424ef0ddc97bc2d2f5
|
refs/heads/master
| 2023-08-17T23:25:15.620046
| 2023-08-04T20:38:25
| 2023-08-04T20:38:25
| 22,381,151
| 1
| 1
|
MIT
| 2023-02-11T01:09:40
| 2014-07-29T13:40:46
|
Python
|
UTF-8
|
Python
| false
| false
| 11,361
|
py
|
import vcr
from hth.core.tests.utils import date_format, from_today
from hth.core.tests.selenium import AdminTestCase
from .factories import PublishedReleaseFactory
class ReleaseTestCase(AdminTestCase):
def test_can_create_release(self):
# Ryan logs into the admin
self.adminLogin()
# He creates an unpublished release
self.find_link('Releases').click()
self.find_link('ADD RELEASE').click()
self.find_name('title').send_keys('First release')
self.find_name('date').send_keys(date_format(from_today(1)))
self.find_name('cover_url').send_keys('http://localhost/cover.jpg')
self.find_name('player_code').send_keys('<iframe></iframe>')
self.find_name('description').send_keys('Release description')
self.find_name('credits').send_keys('Release credits')
self.find_name('_save').click()
self.assertIn('First release', self.find_tag('body').text)
# He verifies that it's not published
self.get_url('/music')
self.assertIn('Music', self.browser.title)
self.assertNotIn('First release', self.find_tag('body').text)
self.get_url('/music/first-release')
self.assertNotIn('First release', self.browser.title)
# He publishes the release
self.get_url('/admin')
self.find_link('Releases').click()
self.find_link('First release').click()
self.find_name('publish').click()
self.find_name('_save').click()
self.assertIn('First release', self.find_tag('body').text)
# He verifies that it was published
self.get_url('/music')
self.find_link('First release').click()
self.assertIn('First release', self.browser.title)
# TODO: Test absence/presence of details?
class SongTestCase(AdminTestCase):
def setUp(self):
super().setUp()
PublishedReleaseFactory.create(title='First release',
slug='first-release')
def test_can_create_song(self):
# Ryan logs into the admin
self.adminLogin()
# He adds a published song
self.find_link('Songs').click()
self.find_link('ADD SONG').click()
self.find_name('title').send_keys('First song')
self.find_name('description').send_keys('Song description')
self.find_name('player_code').send_keys('<iframe></iframe>')
self.find_name('credits').send_keys('Song credits')
self.find_name('lyrics').send_keys('Song lyrics')
self.find_name('publish').click()
self.find_name('_save').click()
self.assertIn('First song', self.find_tag('body').text)
# He adds an unpublished song
self.find_link('ADD SONG').click()
self.find_name('title').send_keys('Second song')
self.find_name('_save').click()
self.assertIn('Second song', self.find_tag('body').text)
# He verifies that only the published song is on the site
self.get_url('/songs')
self.assertIn('Songs', self.browser.title)
self.assertNotIn('Second song', self.find_tag('body').text)
self.find_link('First song').click()
self.assertIn('First song', self.browser.title)
# He adds the songs to the release
self.get_url('/admin')
self.find_link('Songs').click()
self.find_link('First song').click()
self.find_select('release').select_by_visible_text('First release')
self.find_name('track').send_keys('1')
self.find_name('_save').click()
self.find_link('Second song').click()
self.find_select('release').select_by_visible_text('First release')
self.find_name('track').send_keys('2')
self.find_name('_save').click()
# He verifies that only the published song is shown on the release
self.get_url('/music/first-release')
self.assertIn('First song', self.find_tag('body').text)
self.assertNotIn('Second song', self.find_tag('body').text)
class VideoTestCase(AdminTestCase):
# TODO: Duplicated in .test_models.VideoAutofillTestCase
CASSETTE = 'hth/music/tests/fixtures/cassettes/vimeo.yaml'
SOURCE_URL = 'https://vimeo.com/126794989'
PREVIEW_URL = 'http://i.vimeocdn.com/video/517362144_640.jpg'
EMBED_CODE = ('<iframe src="http://player.vimeo.com/video/126794989"'
' seamless allowfullscreen></iframe>\n')
def setUp(self):
super().setUp()
PublishedReleaseFactory.create(title='First release',
slug='first-release')
def test_can_create_video(self):
# Ryan logs into the admin
self.adminLogin()
# He adds a published video
self.find_link('Videos').click()
self.find_link('ADD VIDEO').click()
self.find_name('title').send_keys('First video')
self.find_name('source_url').send_keys('http://localhost')
self.find_name('embed_code').send_keys('<iframe></iframe>')
self.find_name('preview_url').send_keys('http://localhost/jpg')
self.find_name('description').send_keys('Video description')
self.find_name('credits').send_keys('Video credits')
self.find_name('publish').click()
self.find_name('_save').click()
self.assertIn('First video', self.find_tag('body').text)
# He adds an unpublished video
self.find_link('ADD VIDEO').click()
self.find_name('title').send_keys('Second video')
self.find_name('_save').click()
self.assertIn('Second video', self.find_tag('body').text)
# He verifies that only the published video is on the site
self.get_url('/video')
self.assertNotIn('Second video', self.find_tag('body').text)
self.find_link('First video').click()
self.assertIn('First video', self.browser.title)
# He adds the videos to the release
self.get_url('/admin')
self.find_link('Videos').click()
self.find_link('First video').click()
self.find_select('release').select_by_visible_text('First release')
self.find_name('_save').click()
self.find_link('Second video').click()
self.find_select('release').select_by_visible_text('First release')
self.find_name('_save').click()
# He verifies that only the published video is shown on the release
self.get_url('/music/first-release')
self.assertIn('First video', self.find_tag('body').text)
self.assertNotIn('Second video', self.find_tag('body').text)
self.find_link('First video').click()
self.assertIn('First video', self.browser.title)
@vcr.use_cassette(CASSETTE)
def test_autofill_from_source(self):
# Ryan logs into the admin
self.adminLogin()
# He adds a published video, without preview_url and embed_code
self.find_link('Videos').click()
self.find_link('ADD VIDEO').click()
self.find_name('title').send_keys('First video')
self.find_name('source_url').send_keys(self.SOURCE_URL)
self.find_name('publish').click()
self.find_name('_continue').click()
# He verifies that the preview_url and embed_code have been filled
self.assertEqual(self.PREVIEW_URL,
self.find_name('preview_url').get_attribute('value'))
self.assertEqual(self.EMBED_CODE.strip(),
self.find_name('embed_code').text)
# He verifies that the published video is on the site
self.get_url('/video')
self.find_link('First video').click()
self.assertIn('First video', self.browser.title)
class PressTestCase(AdminTestCase):
def setUp(self):
super().setUp()
PublishedReleaseFactory.create(title='First release',
slug='first-release')
def test_can_create_quote(self):
# Ryan logs into the admin
self.adminLogin()
# He adds a published quote
self.find_link('Press').click()
self.find_link('ADD PRESS').click()
self.find_name('title').send_keys('First source')
self.find_name('source_url').send_keys('http://example.com')
self.find_name('date').send_keys(date_format(from_today(-30)))
self.find_name('body').send_keys('First quote')
self.find_name('publish').click()
self.find_name('_save').click()
self.assertIn('First source', self.find_tag('body').text)
# He adds an unpublished quote
self.find_link('ADD PRESS').click()
self.find_name('title').send_keys('Second source')
self.find_name('source_url').send_keys('http://foo.com')
self.find_name('date').send_keys(date_format(from_today(-30)))
self.find_name('_save').click()
self.assertIn('Second source', self.find_tag('body').text)
# He verifies that only the published quote is on the site
self.get_url('/press')
self.assertIn('Press', self.browser.title)
self.assertIn('First source', self.find_tag('body').text)
self.assertIn('First quote', self.find_tag('body').text)
self.assertNotIn('Second source', self.find_tag('body').text)
self.assertNotIn('Second quote', self.find_tag('body').text)
# He adds the quotes to the release
self.get_url('/admin')
self.find_link('Press').click()
self.find_link('First source').click()
self.find_select('release').select_by_visible_text('First release')
self.find_name('_save').click()
self.find_link('Second source').click()
self.find_select('release').select_by_visible_text('First release')
self.find_name('_save').click()
# He verifies that only the published quote is shown on the release
self.get_url('/music/first-release')
self.assertIn('First source', self.find_tag('body').text)
self.assertNotIn('Second source', self.find_tag('body').text)
def test_can_create_post(self):
# Ryan logs into the admin
self.adminLogin()
# He adds a published press post
self.find_link('Press').click()
self.find_link('ADD PRESS').click()
self.find_name('title').send_keys('Post title')
self.find_name('body').send_keys('Post body')
self.find_name('date').send_keys(date_format(from_today(-30)))
self.find_name('quote').click()
self.find_name('publish').click()
self.find_name('_save').click()
self.assertIn('Post title', self.find_tag('body').text)
# He verifies that the post is on the site
self.get_url('/press')
self.assertIn('Press', self.browser.title)
self.assertIn('Post title', self.find_tag('body').text)
self.assertIn('Post body', self.find_tag('body').text)
# He adds the post to the release
self.get_url('/admin')
self.find_link('Press').click()
self.find_link('Post title').click()
self.find_select('release').select_by_visible_text('First release')
self.find_name('_save').click()
# He verifies that post is shown on the release
self.get_url('/music/first-release')
self.assertIn('Post title', self.find_tag('body').text)
|
[
"bhrutledge@gmail.com"
] |
bhrutledge@gmail.com
|
de4412cd89477a7a2bece3e61b2cd12e8c48dd4c
|
e33561bdec2fe3dec476fc5b2ebcd3ae99cfddc3
|
/3_match_making/match_making.py
|
07d5f26ee5dd94bdf3b8d185bb0ad8abb6a0a877
|
[] |
no_license
|
gustavolio/algorithm_project_exercicies
|
b5b1ccfdcfdecff54befc29eb849ac1533048642
|
64baf4ab397170d6b77fb6965436263f181b7948
|
refs/heads/master
| 2023-05-14T15:05:16.680610
| 2021-06-04T19:56:08
| 2021-06-04T19:56:08
| 366,484,218
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,426
|
py
|
__author__ = "Gustavo Lima de Oliveira"
__copyright__ = "Copyright 2021"
__license__ = "GPL"
__version__ = "1.0"
__email__ = "g.cdcomp@gmail.com"
import numpy as np
import sys
matrix = []
input_string = sys.stdin.read()
input_string = input_string.split("\n")
input_string = input_string[:-1]
for line in input_string:
new_line = []
for num in line.split(' '):
new_line.append(int(num))
matrix.append(new_line)
#input data iterator
reading_line = 0
#output string
output = ""
def get_maches(bach, spin):
bach.sort(reverse=True)
spin.sort(reverse=True)
i = abs(len(bach) - len(spin))
if i == 0:
i = len(bach)
bach = bach[i:]
spin = spin[i:]
if len(bach) != 0:
return "{} {}".format(i,min(bach)[0])
return "0"
n_cases = 0
while matrix[reading_line] != [0,0]:
n_cases += 1
#read num of bachelors and spinsters
n_bach = matrix[reading_line][0]
n_spin = matrix[reading_line][1]
reading_line += 1
#read bachelors and spinsters age
bach_age = matrix[reading_line: (reading_line + n_bach)]
reading_line += n_bach
spin_age = matrix[reading_line: (reading_line + n_spin)]
reading_line += n_spin
# print("Bach age: {}\nSpin age: {}\n".format(bach_age, spin_age))
# exit()
output += "Case {}: {}".format(n_cases, get_maches(bach_age, spin_age) + "\n")
output = output[:-1]
print(output)
|
[
"g.cdcomp@gmail.com"
] |
g.cdcomp@gmail.com
|
9c25eba5c4be4760d0bee0bd5f91c3e14c2723e6
|
7d19fc76ca4ab8dd65630888b2010737df40de03
|
/backend/pyramid/v1/www/www/app/core/session/providers/postgresql.py
|
245890275ee4a16e5f63dff5a33a2bf2739ce0c7
|
[] |
no_license
|
beowulf1416/mura
|
a75f1694c5b3e015bc1919a7ee5ed252b9331664
|
a56ed637428d39eb20fff67811a44a07cd064453
|
refs/heads/master
| 2020-04-16T14:36:45.378756
| 2019-03-03T14:36:24
| 2019-03-03T14:36:24
| 165,673,771
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,987
|
py
|
import logging
log = logging.getLogger(__name__)
from www.app.core.data.providers.postgresql import PostgreSQL
class SessionPostgreSQL(PostgreSQL):
def __init__(self, connection):
log.debug('SessionPostgreSQL::__init__()')
super().__init__(connection)
def session_create(self, session_id):
log.debug('SessionPostgreSQL::session_create()')
sql = 'select * from www.session_create(%s)'
connection = self.get_connection()
cn = connection['connection']
try:
c = cn.cursor()
c.execute(sql, (session_id, ))
cn.commit()
except Exception as e:
cn.rollback()
log.error(e)
def session_rehydrate(self, session_id):
log.debug('SessionPostgreSQL::session_rehydrate()')
sql = 'select * from www.session_get_vars(%s)'
connection = self.get_connection()
cn = connection['connection']
try:
c = cn.cursor()
c.execute(sql, (session_id, ))
return c.fetchall()
except Exception as e:
log.error(e)
def set_value(self, session_id, key, value):
log.debug('SessionPostgreSQL::set_value()')
sql = 'select * from www.session_set(%s,%s,%s)'
connection = self.get_connection()
cn = connection['connection']
try:
c = cn.cursor()
c.execute(sql, (session_id, key, value))
cn.commit()
except Exception as e:
cn.rollback()
log.error(e)
def session_clear(self, session_id):
log.debug('SessionPostgreSQL::session_clear()')
sql = 'select * from www.session_clear(%s)'
connection = self.get_connection()
cn = connection['connection']
try:
c = cn.cursor()
c.execute(sql, (session_id, ))
cn.commit()
except Exception as e:
cn.rollback()
log.error(e)
|
[
"beowulf1416@gmail.com"
] |
beowulf1416@gmail.com
|
08c0bf47856f23c7c5a4d9a6dd0c2054d7874622
|
21fb2e8c6d7a3a7d0fdfc14433e1ddc5968e010e
|
/vitor_django_admin_user_profile/mysite/core/migrations/0001_initial.py
|
255561ee4b2826a009877c6b7f17cb7603ec7d6b
|
[
"MIT"
] |
permissive
|
alicanakarin/django_projects
|
45d1703492622075347ce740738400bbbf1b02af
|
03ad88ddf83c632b7009494c6e53711495916d47
|
refs/heads/master
| 2023-01-23T07:39:10.250108
| 2020-11-30T12:07:32
| 2020-11-30T12:07:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-23 13:34
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location', models.CharField(blank=True, max_length=30)),
('birthdate', models.DateField(blank=True, null=True)),
('role', models.PositiveSmallIntegerField(blank=True, choices=[(1, 'Student'), (2, 'Teacher'), (3, 'Supervisor')], null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'profile',
'verbose_name_plural': 'profiles',
},
),
]
|
[
"selcukakarin@novumare.com.tr"
] |
selcukakarin@novumare.com.tr
|
19fa3ca24c4c2105a2dca826a476a7cfd62e1408
|
8494d080f8523695d5fd96bc4dec493784b97088
|
/experiments/migrations/0001_initial.py
|
ff0f69e7783cb0e6cc8e0a2efd5ac5f01450f651
|
[] |
no_license
|
mumbihere/mrt
|
8c6be6a7e72332654b6cbec43546634ea03da97d
|
6c6b806661909c78fec92dff4c8b89937c4f2f2b
|
refs/heads/master
| 2021-09-10T18:46:57.274518
| 2018-03-30T23:49:34
| 2018-03-30T23:49:34
| 108,175,265
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,588
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-07 05:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='readings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(default=django.utils.timezone.now)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='experiments.Event')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstname', models.CharField(max_length=200)),
('secondname', models.CharField(max_length=200)),
],
),
migrations.AddField(
model_name='readings',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='experiments.User'),
),
]
|
[
"monicahwambugu@gmail.com"
] |
monicahwambugu@gmail.com
|
d23a62be77841be567bbf45f1f607f06ec29e2f6
|
4f9e30387653a61cdc7c02fa1731c02aaea8aa59
|
/CS370/HackerRank-Crossword-Puzzle/crossword_puzzle.py
|
f5a7c3f130ba0bc7758c4d18462340bb41ada8f9
|
[] |
no_license
|
jcafiero/Courses
|
03a09e9f32e156b37de6da2a0b052ded99ca4d07
|
c82bc4bbc5657643dabc3a01fadfd961c33ebf5e
|
refs/heads/master
| 2022-02-17T14:40:27.073191
| 2019-10-06T23:58:23
| 2019-10-06T23:58:23
| 103,605,315
| 1
| 1
| null | 2019-10-07T00:05:39
| 2017-09-15T02:36:30
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 4,191
|
py
|
# Catherine Javadian, Jennifer Cafiero, and Jordana Approvato
# We pledge our honor that we have abided by the Stevens Honor System
solution = []
spaces = []
blanks = []
class Space:
def __init__(self, start, length, vertical, taken):
self.start = start
self.length = length
self.vertical = vertical
self.taken = taken
def main():
solution, spaces = readGrid() #reads the input and saves grid to solution and spaces variables
words = set(input().split(';')) #breaks input separated by semicolons and stores in words variable
findSol(solution, spaces, words) #computes solution based on input grid and words
for row in solution:
print("".join(row))#prints solved grid solution line-by-line
def readGrid():
'''Reads the grid and finds the spaces in the rows and columns'''
rows = []
for i in range(10):
row = input()
rows.append(row)
row_spaces = countSpaces(rows)
cols = []
for x in range(10):
col = ""
for y in range(10):
col += rows[y][x]
cols.append(col)
col_spaces = countSpaces(cols, vertical = True)
return [list(row) for row in rows], row_spaces + col_spaces
def countSpaces(lines, vertical=False):
'''Takes in a line of the grid and counts the spaces, and stores them in an array.
The input vertical has the value True if the line is a column.'''
spaces = []
n = 0
for line in lines:
for j in whereAreSpaces(line):
index, length = j
if (vertical == True):
start = (index, n)
else:
start = (n, index)
spaces.append(Space(start, length, vertical, False))
n += 1
return spaces
def whereAreSpaces(line):
'''Finds the spaces in the line and appends a tuple of the index and length to spaces'''
spaces = []
for blanks in line.split("+"):
length = len(blanks)
if length > 1:
index = 0
while index < length:
index = line.find(blanks, index)
if line == -1:
break
spaces.append((index, length))#appends the tuple of index and length to spaces array
index += length
return spaces
def findSol(grid, spaces, words):
'''Finds the solution of the puzzle. If words is empty (len 0), returns
True to indicate that the puzzle is solved.'''
if len(words) == 0:
return True
for word in words:
for space in spaces:
if canAddWord(grid, space, word):
before = addWord(grid, space, word)
space.taken = True
new_words = set(words)
new_words.remove(word)
if findSol(grid, spaces, new_words):
return True
addWord(grid, space, before) #will revert the grid if the solution is incorrect
space.taken = False
#if it reaches this far, breaks out of the for loop, and does not yet have a solution, it is unsolvable
return False
def canAddWord(grid, space, word):
'''Returns true if that word can be added to that place in the grid and
returns false if it cannot'''
if space. or len(word) != space.length:
return False
row, col = space.start
for i in word:
if not (grid[row][col] == "-" or grid[row][col] == i):
return False
row, col = nextSpace(row, col, space.vertical)
return True
def addWord(grid, space, word):
'''adds the given word to the grid and returns the new grid'''
new_grid = []
row, col = space.start
for char in word:
new_grid.append(grid[row][col])
grid[row][col] = char
row, col = nextSpace(row, col, space.vertical)
return new_grid
def nextSpace(row, col, vertical):
'''Returns the position of the next space, whether its vertical or horizontal'''
if vertical is True:
return (row + 1), col
else:
return row, (col + 1)
main()
|
[
"noreply@github.com"
] |
jcafiero.noreply@github.com
|
4bb831825988c63924d86c83523c9a9c0d21813d
|
abc1a497c41ddd8669c8c41da18af65d08ca54e4
|
/AnalysisE/fit/Cs137/1ns/delay_spectra_temp/2exprecomb/fit.py
|
ae8a15093edd6f37f5e6c5c42797d72bf20226e6
|
[] |
no_license
|
gerakolt/direxeno_privet
|
fcef5e3b654720e277c48935acc168472dfd8ecc
|
75e88fb1ed44fce32fce02677f64106121259f6d
|
refs/heads/master
| 2022-12-20T22:01:30.825891
| 2020-10-04T06:01:07
| 2020-10-04T06:01:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,907
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import time
import os
from fun import Sim, q0_model, make_P, model_area, Sim2, make_3D
import sys
from scipy.optimize import minimize
from scipy.stats import poisson, binom
from scipy.special import erf as erf
import warnings
from minimize import minimize, make_ps
pmts=[0,1,4,7,8,14]
path='/home/gerak/Desktop/DireXeno/190803/pulser/DelayRecon/'
delay_hs=[]
names=[]
delays=[]
for i in range(len(pmts)-1):
for j in range(i+1, len(pmts)):
data=np.load(path+'delay_hist{}-{}.npz'.format(pmts[i], pmts[j]))
delays.append(data['x']-data['m'])
delay_hs.append(data['h'])
names.append('{}_{}'.format(pmts[i], pmts[j]))
T_CsB=1564825612162-1564824285761
T_BG=1564874707904-1564826183355
path='/home/gerak/Desktop/DireXeno/190803/Cs137B/EventRecon/'
data=np.load(path+'H.npz')
H=data['H']
G=data['G']
spectrum=data['spectrum']
spectra=data['spectra']
left=data['left']
right=data['right']
t=np.arange(200)
dt=t[1]-t[0]
# path='/home/gerak/Desktop/DireXeno/190803/BG/EventRecon/'
# data=np.load(path+'H.npz')
# H_BG=data['H']*T_CsB/T_BG
# G_BG=data['G']*T_CsB/T_BG
# spectrum_BG=data['spectrum']*T_CsB/T_BG
# spectra_BG=data['spectra']*T_CsB/T_BG
N=60*662
rec=np.recarray(1, dtype=[
('Q', 'f8', len(pmts)),
('T', 'f8', len(pmts)),
('St', 'f8', len(pmts)),
('F', 'f8', 1),
('Tf', 'f8', 1),
('Ts', 'f8', 1),
('R', 'f8', 1),
('b', 'f8', 1),
])
def rec_to_p(rec):
p=np.array([])
for name in rec.dtype.names:
p=np.append(p, np.array(rec[name][0]))
return p
def p_to_rec(p):
for i, name in enumerate(rec.dtype.names):
if np.shape(rec[name][0])==(len(pmts),):
rec[name][0]=p[i*len(pmts):(i+1)*len(pmts)]
else:
if name=='b':
rec[name][0]=p[-1]
elif name=='R':
rec[name][0]=p[-2]
elif name=='Ts':
rec[name][0]=p[-3]
elif name=='Tf':
rec[name][0]=p[-4]
elif name=='F':
rec[name][0]=p[-5]
else:
print('fuck')
sys.exit()
return rec
counter=0
PEs=np.arange(len(spectra[:,0]))
GPEs=np.arange(len(spectrum))
l_min=1e10
params=np.zeros(6)
def L(p):
rec=p_to_rec(p)
global counter, l_min, params
counter+=1
nams=['Q', 'Ts', 'T', 'F', 'Tf', 'Ts', 'R', 'b']
for name in nams:
if np.any(rec[name]<0):
return 1e10*(1-np.amin(rec[name]))
nams=['F', 'R']
for name in nams:
if np.any(rec[name]>1):
return 1e10*(np.amax(rec[name]))
if rec['Ts'][0]>100:
return 1e10*rec['Ts'][0]
if np.any(rec['St'][0]<0.5):
return 1e10*(1+np.abs(np.amin(rec['St'][0])))
if np.any(rec['T'][0]<10):
return 1e10*(10-np.amin(rec['T'][0]))
if np.any(rec['b'][0]<1):
return 1e10*(1/np.abs(rec['b'][0]))
l=0
m=make_3D(t, N, rec['F'][0], rec['Tf'][0], rec['Ts'][0], rec['R'][0], rec['b'][0], rec['Q'][0], rec['T'][0], rec['St'][0])
model=np.sum(H[:,0,0])*np.ravel(m)
data=np.ravel(H[:,:100,:])
if np.any(model<0):
# print('Model<0')
# sys.exit()
return 1e10*(1-np.amin(model))
l+=np.sum(data*np.log((model+1e-10)/(data+1e-10))+data-model)
data=np.ravel(spectra)
lmda=np.sum(np.matmul(np.transpose(m, (2,1,0)), np.arange(np.shape(m)[0]).reshape(np.shape(m)[0], 1))[:,:,0], axis=1)
I=np.arange(len(PEs)*len(lmda))
model=poisson.pmf(PEs[I//len(lmda)], lmda[I%len(lmda)]).reshape(len(PEs), len(lmda))
model=np.ravel(model/np.amax(model, axis=0)*np.amax(spectra, axis=0))
l+=np.sum(data*np.log((model+1e-10)/(data+1e-10))+data-model)
for i in range(len(pmts)-1):
for j in range(i+1, len(pmts)):
x=delays[names=='{}_{}'.format(pmts[i], pmts[j])]
data=delay_hs[names=='{}_{}'.format(pmts[i], pmts[j])]
rng=np.nonzero(np.logical_and(x>x[np.argmax(data)]-3, x<x[np.argmax(data)]+3))[0]
model=(x[1]-x[0])*np.exp(-0.5*(x[rng]-rec['T'][0,j]+rec['T'][0,i])**2/(rec['St'][0,i]**2+rec['St'][0,j]**2))/np.sqrt(2*np.pi*(rec['St'][0,i]**2+rec['St'][0,j]**2))
model=model/np.amax(model)*np.amax(data)
data=data[rng]
l+=np.sum(data*np.log((model+1e-10)/(data+1e-10))+data-model)
if -l<l_min:
l_min=-l
np.savez('best_p', p=p, l_min=l_min)
# print('$$$$$$$$$$$ NEW best p $$$$$$$$$$$$$$$$$$$$')
# if True:
# print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
# print('iteration=', int(counter/(len(p)+1)), 'fanc=',-l)
# print('--------------------------------')
# print(rec)
params=np.vstack((params, np.append(p[-5:], -l)))
np.savez('params', params=params)
return -l
rec[0]=([0.15031393, 0.10126694, 0.0980743, 0.15169108, 0.13757894, 0.27692865],
[42.6965627, 42.79534384, 42.98685503, 42.85373486, 42.54194199, 42.92884848],
[0.85148873, 0.82144334, 0.75498879, 0.84165176, 1.09559689, 0.82225653],
0.10919454, 1.65475904, 32.72410862, 0.51185353, 5.27711599)
# p=minimize(L, make_ps(rec_to_p(rec)))
# rec=p_to_rec(p.x)
m=make_3D(t, N, rec['F'][0], rec['Tf'][0], rec['Ts'][0], rec['R'][0], rec['b'][0], rec['Q'][0], rec['T'][0], rec['St'][0])
s, GS, GS_spectrum, Gtrp, Gsng, GRtrp, GRsng=Sim(t, N, rec['F'][0], rec['Tf'][0], rec['Ts'][0], rec['R'][0], rec['b'][0], rec['Q'][0], rec['T'][0], rec['St'][0])
fig, ax=plt.subplots(2,3)
for i in range(len(pmts)):
np.ravel(ax)[i].plot(t, np.sum(H[:,:,i].T*np.arange(np.shape(H)[0]), axis=1), 'ko', label='Data - PMT{}'.format(pmts[i]))
np.ravel(ax)[i].plot(t[:100], np.sum(H[:,0,i])*np.sum(m[:,:,i].T*np.arange(np.shape(m)[0]), axis=1), 'r.-', label='2 exp model', linewidth=3)
np.ravel(ax)[i].plot(t, np.sum(H[:,0,i])*np.sum(s[:,:,i].T*np.arange(np.shape(s)[0]), axis=1), 'g.-', label='2 exp sim', linewidth=3)
np.ravel(ax)[i].legend(fontsize=15)
np.ravel(ax)[i].set_xlabel('Time [ns]', fontsize='15')
fig.text(0.04, 0.5, r'$N_{events}\sum_n nH_{ni}$', va='center', rotation='vertical', fontsize=15)
fig, ax=plt.subplots(2,3)
lmda=np.sum(np.matmul(np.transpose(m, (2,1,0)), np.arange(np.shape(m)[0]).reshape(np.shape(m)[0], 1))[:,:,0], axis=1)
I=np.arange(len(PEs)*len(lmda))
model=poisson.pmf(PEs[I//len(lmda)], lmda[I%len(lmda)]).reshape(len(PEs), len(lmda))
model=model/np.amax(model, axis=0)*np.amax(spectra, axis=0)
for i in range(len(pmts)):
np.ravel(ax)[i].plot(PEs, spectra[:,i], 'ko', label='spectrum - PMT{}'.format(pmts[i]))
# np.ravel(ax)[i].step(PEs, spectra_BG[:,i], label='BG'.format(pmts[i]))
np.ravel(ax)[i].plot(PEs, model[:,i], 'r-.')
np.ravel(ax)[i].legend()
fig, ax=plt.subplots(3,5)
k=0
for i in range(len(pmts)-1):
for j in range(i+1, len(pmts)):
x=delays[names=='{}_{}'.format(pmts[i], pmts[j])]
data=delay_hs[names=='{}_{}'.format(pmts[i], pmts[j])]
rng=np.nonzero(np.logical_and(x>x[np.argmax(data)]-7, x<x[np.argmax(data)]+7))
model=(x[1]-x[0])*np.exp(-0.5*(x-rec['T'][0,j]+rec['T'][0,i])**2/(rec['St'][0,i]**2+rec['St'][0,j]**2))/np.sqrt(2*np.pi*(rec['St'][0,i]**2+rec['St'][0,j]**2))
model=model/np.amax(model)*np.amax(data)
np.ravel(ax)[k].step(x, data, label='Delays {}_{}'.format(pmts[i], pmts[j]))
np.ravel(ax)[k].plot(x[rng], model[rng], 'r-.')
np.ravel(ax)[k].set_xlabel('Delay [ns]', fontsize='15')
np.ravel(ax)[k].legend(fontsize=15)
k+=1
x=np.arange(200)
fig, (ax1, ax2)=plt.subplots(1,2)
ax1.plot(x, np.sum(G.T*np.arange(np.shape(G)[0]), axis=1), 'ko', label='Global Data')
ax1.plot(x, np.sum(G[:,0])*np.sum(GS.T*np.arange(np.shape(GS)[0]), axis=1), 'r-.', label='Global 2 exp sim')
ax2.step(GPEs, spectrum)
GS_spectrum=GS_spectrum/np.amax(GS_spectrum)*np.amax(spectrum)
ax2.plot(GPEs, GS_spectrum, 'r-.')
plt.show()
|
[
"gerakolt@gmail.com"
] |
gerakolt@gmail.com
|
cf78300fe5a02b86bd8f9b37a4c8f65fc3eeb741
|
8353773fb3d8a544e3f0011ca6b78b3d194d2ce6
|
/pics/urls.py
|
1241168509927bbfc698e38a92b455dc840fef37
|
[] |
no_license
|
skbhagat40/picApp
|
36e9dfcf7b5ec560a0217d861990757ebe7a1b66
|
de48c6362eee705a3d5238c13f0bf5120e9f4780
|
refs/heads/master
| 2022-12-12T04:59:45.137332
| 2019-05-05T04:37:36
| 2019-05-05T04:37:36
| 180,146,647
| 0
| 0
| null | 2022-11-22T03:13:51
| 2019-04-08T12:42:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,214
|
py
|
from django.conf.urls import url,include
from django.contrib import admin
from django.urls import path
import pics.views as views
app_name = 'pics'
urlpatterns = [
url(r'^$',views.index,name='home'),
path('login',views.login_function,name='login'),
path('register',views.register,name='register'),
path('logout',views.logout_view,name='logout'),
path('add_album',views.CreateAlbum.as_view(),name="add_album"),
path('delete_album/<int:pk>',views.DeleteAlbum.as_view(),name='delete_album'),
path('update_album/<int:pk>',views.UpdateAlbum.as_view(),name='update_album'),
path('all_albums',views.ListView.as_view(),name='all_albums'),
path('<int:pk>',views.AlbumDetail.as_view(),name="detail"),
path('add_photos/<int:album_id>', views.CreateAlbumPhotos.as_view(), name="add_album_photos"),
path('delete_photo/<int:pk>', views.DeleteAlbumPhotos.as_view(), name='delete_album_photos'),
path('update_photo/<int:pk>', views.UpdateAlbumPhotos.as_view(), name='update_album_photos'),
path('<int:album_id>/all_albums_photos', views.ListViewPhotos.as_view(), name='all_albums_photos'),
path('photos/<int:pk>', views.AlbumDetailPhotos.as_view(), name="detail_photos"),
]
|
[
"skbhagat40@gmail.com"
] |
skbhagat40@gmail.com
|
3632d5c1b6ba9dc034f506f109c23baf9c7f3278
|
385ee88b22895d939c63e14c7b05efb1cf2ff49d
|
/conibtoky.py
|
487fe13675a909c5d010f2c9304400fbbe8adcff
|
[] |
no_license
|
ibikimam/mosh
|
f197f16df549684efefb3b025036bcd25ab29268
|
b9e28da720ba341778eb9186a5ae44002174b285
|
refs/heads/master
| 2020-06-05T10:09:48.418309
| 2019-07-23T20:08:06
| 2019-07-23T20:08:06
| 192,404,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 652
|
py
|
""" weight = input('please type your weight \n')
w = int(weight)
unit = input('please select (L)Lbs or (K)Kg \n')
u = unit
if u == "l" or "L":
total_weight= w*0.45
print(f"your weight in Kgs is {total_weight}Kg")
elif u == "k" or "K":
total_weight= w*(1/0.45)
print(f"your weight in pounds is {total_weight}lbs")"""
weight=int(input('Please type in your weight\n'))
unit = input('please select (L)Lbs or (K)Kg \n')
if unit.upper() == "L":
total_weight= weight * 0.45
print(f"your weight in Kgs is {total_weight} Kg")
else:
total_weight= weight / e0.45
print(f"your weight in pounds is {total_weight} lbs")
|
[
"ibik.imam@gmail.com"
] |
ibik.imam@gmail.com
|
8509361ecb36488d762ab9ea47cef6800b8fbbc9
|
4d70309ad7b3618d70217f6a26fd0100920b8da7
|
/TutoDjango/admin.py
|
0319b52de16aff38f7df6d1d83f6b43c9ce7a5a4
|
[] |
no_license
|
loicowatta/TutoDjango2
|
d4e6aee5ef948e75045be6c2a5ca18dc4b5d0c3e
|
e803b6801eb6a58c24a3688006c5b8b6096ed3ae
|
refs/heads/master
| 2021-04-26T23:12:06.427404
| 2018-03-05T16:58:59
| 2018-03-05T16:58:59
| 123,946,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
'''
Created on 26 févr. 2018
@author: loico
'''
from django.contrib import admin
from TutoDjango.models import Faculty, Campus, Job, Cursus,\
Employee, Student, Message
admin.site.register(Faculty)
admin.site.register(Campus)
admin.site.register(Job)
admin.site.register(Cursus)
admin.site.register(Employee)
admin.site.register(Student)
admin.site.register(Message)
|
[
"'loic.owatta@wattys-conseil.com'"
] |
'loic.owatta@wattys-conseil.com'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.