blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3a0a7d49156d2dbabc1b8d385991c4e185076a72 | 9152cd7756d3304a89e58f7dda4451240689a141 | /day1.py | 27a804a75d0874c021ffc0527af77af91188946a | [] | no_license | acemaster/TensorFlow-Training | 1ce4668e4c7564c59afaab2ec983bdbe9c2a86a6 | d121c77f57795e0be7f1ab6269d1bc0b3c157bf9 | refs/heads/master | 2021-01-22T03:40:40.690918 | 2017-06-17T08:51:09 | 2017-06-17T08:51:09 | 92,394,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,871 | py | '''
Day 1: Tensor flow tutorial
============================
Following the google tensorflow tutorial I am dividing it into days
based on how I learnt
Things learnt day1:
Tensors
Constants
Placeholder
Variables
Sessions
Linear model
Gradient descent minimizer
'''
import tensorflow as tf
import os
#Disabling SESS build warnings
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
#Constant addition
node1 = tf.constant(3.0,tf.float32)
node2 = tf.constant(4.0)
sess = tf.Session()
print(sess.run([node1,node2]))
#Placeholder addition and other ops
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a + b
print("adder node 1: ", sess.run(adder_node,{a:1,b:3}))
print("adder node 2: ",sess.run(adder_node,{a:[1,5],b:[3,10]}))
adder_triple_node = adder_node * 3 #Try adder_node ** 3
print("adder triple node 1: ",sess.run(adder_triple_node,{a:1,b:3}))
print("adder triple node 2: ",sess.run(adder_triple_node,{a:[1,5],b:[3,10]}))
#Liner model
W = tf.Variable([0.3],tf.float32)
b = tf.Variable([-0.3],tf.float32)
x = tf.placeholder(tf.float32)
linear_model = W*x + b
init = tf.global_variables_initializer()
sess.run(init)
print("Linear model test: ",sess.run(linear_model,{x:[1,2,3,4]}))
#loss function
y = tf.placeholder(tf.float32)
sq_delta = tf.square(linear_model-y)
loss = tf.reduce_sum(sq_delta)
print("Loss with input data [0,-1,-2,-3]: ",sess.run(loss,{x:[1,2,3,4],y:[0,-1,-2,-3]}))
#We fix W and b for making loss = 0
fixW = tf.assign(W,[-1])
fixb = tf.assign(b,[1])
sess.run([fixW,fixb])
print("Loss with input data [0,-1,-2,-3]: ",sess.run(loss,{x:[1,2,3,4],y:[0,-1,-2,-3]}))
#Machine learning using gradient descent
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
sess.run(init)
for i in range(1,1000):
sess.run(train,{x:[1,2,3,4],y:[0,-1,-2,-3]})
print("Learnt values of W and b: ",sess.run([W,b]))
| [
"vivekhtc25@gmail.com"
] | vivekhtc25@gmail.com |
86d1b9c5a774be054d02efb9bf98b6f6643622e2 | c36679186f669c6e3bd1c106c96d4a17be1f5ab1 | /Practice_Anisul/262.py | 72b78abd73be85c6f7a18102f388e4d97351f95b | [] | no_license | touhiduzzaman-tuhin/python-code-university-life | 60a3d671b200a6f5222c6d176c13c5f20f013509 | 6d2e3d90d430faa5c83fe79e7fb1ebe516994762 | refs/heads/master | 2023-03-22T15:18:10.636203 | 2021-03-06T18:52:04 | 2021-03-06T18:52:04 | 332,467,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | class A:
def display(self):
print("A Class")
class B:
def display(self):
print("B Class")
class C(A, B):
pass
c = C()
c.display() | [
"touhiduzzamantuhin95@gmail.com"
] | touhiduzzamantuhin95@gmail.com |
6d0e6203994419ca104557740b33c76da2ad5262 | a66f92f305956320b55c0a190506f57e3df45ac7 | /story/accounts/views.py | f3ea4b63cf138f320e02c045775454cfb282152f | [] | no_license | kopuskopecik/django-projects | a12380c37130cb20974c785a76a6f4a8fb18d238 | 4aceafaf5ff29761c3867c9f62c93c258d9c16ec | refs/heads/master | 2022-12-05T11:41:53.904378 | 2020-08-24T18:35:51 | 2020-08-24T18:35:51 | 289,997,905 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,161 | py | from django.shortcuts import render, redirect
from django.views.generic import CreateView, ListView, UpdateView, TemplateView
from django.contrib.auth import login
from .forms import TeacherSignUpForm, StudentSignUpForm
from .models import User
class SignUpView(TemplateView):
template_name = 'accounts/signup.html'
class TeacherSignUpView(CreateView):
model = User
form_class = TeacherSignUpForm
template_name = 'accounts/signup_form.html'
def get_context_data(self, **kwargs):
kwargs['user_type'] = 'teacher'
return super().get_context_data(**kwargs)
def form_valid(self, form):
user = form.save()
login(self.request, user)
return redirect('home')
class StudentSignUpView(CreateView):
model = User
form_class = StudentSignUpForm
template_name = 'accounts/signup_form.html'
def get_context_data(self, **kwargs):
kwargs['user_type'] = 'student'
return super().get_context_data(**kwargs)
def form_valid(self, form):
user = form.save()
login(self.request, user)
return redirect('profiles:ogrenci_profil', ogrenci_id = user.id)
| [
"kopuskopecik@gmail.com"
] | kopuskopecik@gmail.com |
962762574d63ff6f19cf72b4cd75c54ee73ff333 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03957/s373576489.py | 59846919a20b2bd2bfee075a9d156666298a1fae | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | def main():
S = list(input())
flag = False
for s in S:
if s == 'C':
flag = True
else:
pass
if flag and s == 'F':
print('Yes')
break
else:
print('No')
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ac9ee1ec2754f07e11fa2faf012ee9c3d6b29ea6 | a59ff1a95fff52e4849ae270390158fbcb716abb | /yowa/templates/39/lw_tz.py | 3731053b2f480d2cdcb260980b4bdb03fb8df371 | [] | no_license | cih315/iCrawler | 7e6090351fbcdb50078ee2cc3f32a1c1d7694252 | fe5e85b65cda00219d7864129d507fbb155f93a2 | refs/heads/master | 2021-01-22T07:23:14.716405 | 2012-07-09T08:53:34 | 2012-07-09T08:53:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,774 | py | #coding: utf-8
'''
Created on 2012-3-8
@author: joyce
'''
import re
import time
from pyquery import PyQuery
import Image
from yowa.templates.BaseTemplate import Base
from yowa.items import ContentItem
class Parser(Base):
name = '39_tz'
def extract(self):
item = ContentItem()
self.html = re.sub('<!--.*?-->', '', self.html)
title=self.hxs.select("//div[@class='tbrig']/h1/text()").extract()
content=self.hxs.select("//div[@class='tbrig']").extract()
tz_content=[]
count=0
all_content=''
for con in content:
doc=PyQuery(con)
tz_content.append(doc('p'))
for t_c in tz_content:
if count==0:
all_content=all_content+t_c.__unicode__()
all_content=all_content+"<p>================</p>"
count=count+1
else:
all_content=all_content+"<P>"+str(count)+" 楼<P>"
all_content=all_content+t_c.__unicode__()
all_content=all_content+"<p>-----------------</p>"
count=count+1
# if not all_content:
# content=self.hxs.select("//div[@class='tbrig']")[0].extract()
# doc=PyQuery(content)
# tz_content=doc('div.tbrig')
# tz_content.remove('span')
# tz_content=tz_content.__unicode__()
tz_author=self.hxs.select("//div[@class='tblef']/span/a/text()").extract()
release_time=self.hxs.select("//div[@class='tbrig']/span/b/text()").extract()
ob=re.compile(u'20\d\d.*\d\d')
release_time=ob.findall(release_time[0])
imgs = self.hxs.select("//div[@class='tbrig']")[0].select('./p/img/@src').extract()
img_all = []
for img in imgs:
if".gif" in img:
continue
if".GIF" in img:
continue
else:
img_all.append(self.getRealURI(img))
item['image_urls'] = img_all
item['title'] = self.title = title[0]
item['content'] = self.content = all_content
item['release_time'] = release_time[0]
# item['release_switch_time'] = self.release_switch_time = time.mktime(time.strptime(release_time[0],u'%Y-%m-%d %H:%M'))
item['source'] = u"39健康问答"
item['author'] = tz_author[0]
item['pic_url'] = ''
self.title = item['title']
self.content = item['content']
return item
def isMatch(self, ):
if len(self.title) > 0 and len(self.content) > 0:
return True
else:
return False | [
"stef-hw@163.com"
] | stef-hw@163.com |
1e41cb1c6d8eee4cd78d0692ef11b9c157b548db | 219992b56f8e5cd8b47534d98417dd8ac795110b | /src/FastPass-Mobile/done/NewRealease_VOctober_OrderReason.py | c2aad9e5544d4320fcd0d7da640e2efac7be84c3 | [] | no_license | haohaixingyun/dig-python | 63844877de0acad04d07d7119e381b9bb4a97395 | 4e8c3e3cb1ba98f39d65095b4d3b09ba115e586b | refs/heads/master | 2021-01-13T08:45:59.669829 | 2016-10-26T05:54:07 | 2016-10-26T05:54:07 | 71,970,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,499 | py | # coding = utf - 8
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.select import Select
import unittest
import time,sys
import login,C_screenshots
import HTMLTestRunner
class FastPass_Mobile(unittest.TestCase):
def setUp(self):
self.driver =webdriver.Chrome()
self.base_url = "http://sbybz2239.sby.ibm.com:19080/FastPassS2/"
self.verificationErrors = []
self.accept_next_alert = True
self.wait = WebDriverWait(self.driver, 10) # timeout after 10 seconds
def test_Case_NewRealease_VOctober_OrderReason(self):
print "Test case start:"
print "\n"
print "step1. open the home page"
driver = self.driver
wait = self.wait
driver.get(self.base_url + "fastpass.html")
driver.maximize_window()
now_url = driver.current_url
print now_url
assert now_url == 'http://sbybz2239.sby.ibm.com:19080/FastPassS2/fastpass.html' ,"URL is not correct."
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\image\\','NewRealease_VOctober_OrderReason_p1')
###capture screenshots
print "\n"
print "step2.login"
login.login(self,'Sales orders')
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\image\\','NewRealease_VOctober_OrderReason_p2')
driver.find_element_by_name("submit").click()
driver.implicitly_wait(10)
print "\n"
print "step3.Input 'SAP sales order number' field with '0052900555' and click 'Search'.."
driver.find_element_by_id("sap_sales_ord_num").clear()
driver.find_element_by_id("sap_sales_ord_num").send_keys("0052900555")
time.sleep(1)
driver.find_element_by_name("ibm-submit").submit()
time.sleep(1)
result = driver.title
assert result == 'FastPass | Sales orders - Sales order information' ,"The page did not be opened correct"
assert 'DEBIT' in driver.page_source ,"the data is not available"
assert 'Order type:' in driver.page_source ,"the data is not available"
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\image\\','NewRealease_VOctober_OrderReason_p3')
time.sleep(3)
print "\n"
print "step4.Click the 'Sales orders' option from the navigation panel."
driver.find_element_by_xpath("(//a[contains(text(),'Sales orders')])[1]").click()
driver.implicitly_wait(10)
time.sleep(3)
result = driver.title
assert result == 'FastPass | Sales order - Sales order search' ,"The page did not be opened correct"
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\image\\','NewRealease_VOctober_OrderReason_p4')
time.sleep(3)
print "\n"
print "step5.Input 'SAP sales order number' field with '0053218611' and click 'Search'.."
driver.find_element_by_id("sap_sales_ord_num").clear()
driver.find_element_by_id("sap_sales_ord_num").send_keys("0053218611")
time.sleep(1)
driver.find_element_by_name("ibm-submit").submit()
time.sleep(1)
result = driver.title
assert result == 'FastPass | Sales orders - Sales order information' ,"The page did not be opened correct"
assert 'STANDARD' in driver.page_source ,"the data is not available"
assert 'Order type:' in driver.page_source ,"the data is not available"
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\image\\','NewRealease_VOctober_OrderReason_p5')
time.sleep(3)
print "\n"
print "Test Case end with successfully!"
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == '__main__':
now = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))
testunit=unittest.TestSuite()
testunit.addTest(FastPass_Mobile("test_Case_NewRealease_VOctober_OrderReason"))
filename="C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\"+now+" FastPass_Test_Case_NewRealease_VOctober_OrderReason.html"
fp=file(filename,'wb')
runner = HTMLTestRunner.HTMLTestRunner(stream=fp,title='FastPass_Mobile Test Case',description='This is NewRealease_VOctober_OrderReason test case')
runner.run(testunit)
| [
"yunxinghai@hotmail.com"
] | yunxinghai@hotmail.com |
f75dc3c8ae9f08ddf33aebc98bdb2213508a2085 | 836f615cf2c2364688bd693e1b37c4f7861e4bd9 | /coding/python-master/ch10/exercise10.2.py | 07ec3b9043245d114007cdac23687da1b9e01703 | [] | no_license | yoSlick/Pythonista | 785c120bebbe52a00e76ff7b8c35d056256d4fb0 | 6bdf00d6d0c639b65dfb6755d835e3c88936999c | refs/heads/master | 2020-05-30T10:41:10.119259 | 2019-06-19T02:37:54 | 2019-06-19T02:37:54 | 189,678,146 | 1 | 0 | null | 2019-06-01T01:41:04 | 2019-06-01T01:41:04 | null | UTF-8 | Python | false | false | 890 | py | #Write a program to read through the mbox-short.txt and
#figure out the distribution by hour of the day for each of the messages.
#You can pull the hour out from the 'From ' line by finding the time and
#then splitting the string a second time using a colon.
#Once you have accumulated the counts for each hour
#print out the counts, sorted by hour as shown below.
fhand = open('mbox-short.txt')
my_dictionary = dict()
for line in fhand:
if line.startswith('From '):
at_position = line.find(':')
#print at_position
hour = line[at_position-2 : at_position]
#print hour
#timing.append(hour)
my_dictionary[hour] = my_dictionary.get(hour,0) + 1
#print my_dictionary #this is my key value dictionary
lst = list()
for key, val in my_dictionary.items():
lst.append((key, val))
lst.sort()
for key, val in lst:
print key, val
| [
"tdamdouni@users.noreply.github.com"
] | tdamdouni@users.noreply.github.com |
5c29c3079328d4dc0fbf764348fc38348c238dc0 | b609eba1236d6cd791d80f02beba6e8d82045c43 | /UnetForFashionMnistNew.py | 261408838e59ef1005ec5b4a4d3ad1e155f477f3 | [] | no_license | guanguanboy/fourier_optical_processing_linux | 83e39ca32dd69a592667cf563867b1a68c037486 | 2ec1facd6a5b511210dd3a184d4242392dbbdf80 | refs/heads/master | 2023-01-19T09:22:23.898666 | 2020-11-30T02:24:03 | 2020-11-30T02:24:03 | 317,089,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,182 | py | import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
# Down sampling module
def add_conv_stage(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=True):
return nn.Sequential(
nn.Conv2d(dim_in, dim_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias),
nn.ReLU(),
nn.Conv2d(dim_out, dim_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias),
nn.ReLU()
)
# Up sampling module
def upsample(ch_coarse, ch_fine):
return nn.Sequential(
nn.ConvTranspose2d(ch_coarse, ch_fine, 4, 2, 1, bias=False),
nn.ReLU()
)
class UNetForFashionMnistNew(nn.Module):
def __init__(self, in_channel, out_channel):
super(UNetForFashionMnistNew, self).__init__()
self.conv1 = add_conv_stage(in_channel, 32)
self.conv2 = add_conv_stage(32, 64)
self.conv3 = add_conv_stage(64, 128)
self.conv2m = add_conv_stage(128, 64)
self.conv1m = add_conv_stage(64, 32)
self.conv0 = nn.Sequential( #把conv0搞清楚
nn.Conv2d(32, out_channel, 3, 1, 1),
nn.ReLU()
)
self.max_pool = nn.MaxPool2d(2)
self.upsample32 = upsample(128, 64)
self.upsample21 = upsample(64, 32)
# initialization
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
m.bias.data.zero_()
if m.bias is not None:
nn.init.xavier_uniform(m.weight)
def forward(self, x):
# Encode
conv1_out = self.conv1(x)
conv2_out = self.conv2(self.max_pool(conv1_out))
conv3_out = self.conv3(self.max_pool(conv2_out))
conv3m_out_ = torch.cat((self.upsample32(conv3_out), conv2_out), 1)
conv2m_out = self.conv2m(conv3m_out_)
conv2m_out_ = torch.cat((self.upsample21(conv2m_out), conv1_out), 1)
conv1m_out = self.conv1m(conv2m_out_)
conv0_out = self.conv0(conv1m_out)
return conv0_out | [
"517445163@qq.com"
] | 517445163@qq.com |
84d653c92dff233db4ea640727ed7f8276eb705c | f39528e9bad8cfa78b38fcbb7a5b430ac0c7a942 | /Heavy_Neutrino/HeavyNeutrino_trilepton_M-10_V-0.00108_tau_pre2017_massiveAndCKM_LO_pre2017.py | 6d8db571b0274dac6cf0d3396b3c78e13fc037cb | [] | no_license | isildakbora/EXO-MC-REQUESTS | c0e3eb3a49b516476d37aa464c47304df14bed1e | 8771e32bbec079de787f7e5f11407e9e7ebe35d8 | refs/heads/master | 2021-04-12T11:11:03.982564 | 2019-04-29T15:12:34 | 2019-04-29T15:12:34 | 126,622,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py | import FWCore.ParameterSet.Config as cms
externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/V5_2.4.2/exo_heavyNeutrino/displaced_pre2017/v1/HeavyNeutrino_trilepton_M-10_V-0.00108_tau_pre2017_massiveAndCKM_LO_slc6_amd64_gcc481_CMSSW_7_1_30_tarball.tar.xz'),
nEvents = cms.untracked.uint32(1100),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8HadronizerFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
)
)
)
| [
"bora.isildak@cern.ch"
] | bora.isildak@cern.ch |
c0d8177ad9b7a9d907e515eee79a09ca87c07fd3 | c9d7d2d0c2e3170229c95fde8f651c59c7cdfd62 | /vocabBuilder.py | c4b2fd345812d3baef931288d932fed6d1bf1aef | [] | no_license | pratikaher88/AnomalyLegalDocuments | e16918476ba3a1fdb8715645aa19f4804727bfd5 | 5b2ee81d749d116ca33c8edf9b9edf1aa49da850 | refs/heads/master | 2020-03-13T13:54:50.314702 | 2019-10-20T19:48:32 | 2019-10-20T19:48:32 | 131,147,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,459 | py | import logging
import htmlParser
# import txtParser
import os.path
import sys
import gensim
import re
from stop_words import get_stop_words
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import RegexpTokenizer
import nltk
from gensim import corpora, models
tokenizer_old = nltk.data.load('tokenizers/punkt/english.pickle')
tokenizer = RegexpTokenizer(r'\w+')
# def updateModel(model, fileNameList):
# for trainingTxt in fileNameList:
# newSentence = gensim.models.word2vec.LineSentence(trainingTxt)
# model.build_vocab(newSentence, update=True)
# model.train(newSentence)
extraStopWords = ['bitbucket', 'atlassian', 'github']
en_stop = get_stop_words('en')
p_stemmer = PorterStemmer()
def cleanupDoc(fileString):
raw = fileString.lower()
tokens = tokenizer.tokenize(raw)
stoppedTokens = [i for i in tokens if i not in en_stop]
# stemmedTokens = [p_stemmer.stem(i) for i in stoppedTokens]
return ' '.join(stoppedTokens)
def generateVocabulary(fname):
f = open(fname, 'w+')
f.write(' ')
corporaFiles = os.listdir('./corpora')
for filename in corporaFiles:
with open('./corpora/'+filename, 'rb') as infile:
infileContent = infile.read().decode('utf-8')
f.write(cleanupDoc(infileContent))
f.close()
def initVocabModel():
generateVocabulary('./corpora/vocabulary.txt')
model = gensim.models.Word2Vec()
trainSentences = gensim.models.word2vec.LineSentence('./corpora/vocabulary.txt')
model.build_vocab(trainSentences)
model.train(trainSentences,total_examples=trainSentences.max_sentence_length, epochs=model.iter)
model.save('word2vecModel')
r = re.compile('[A-Za-z]+')
deleteSet = set(['a', 'an', 'and', 'or', 'the'])
def tokenize(ct):
resultSentences = list()
sentences = tokenizer_old.tokenize(ct)
for sentence in sentences:
tokens = r.findall(sentence.lower())
resultSentences.append(" ".join([token for token in tokens if token not in deleteSet]))
return resultSentences
def file2CorpusTxt(txtPrefix):
fileDirs = [t for t in os.listdir('.') if txtPrefix in t and 'Parsed' not in t]
for fileDir in fileDirs:
if 'atlassian' in fileDir or 'github' in fileDir :
for fname in os.listdir('./'+fileDir):
print(fname)
if 'html' not in fname:
continue
textList = htmlParser.extractTextInfo(fileDir, './'+fileDir+'/'+fname)[3]
print(">>>> translated file name = ", './corpora/vocab-'+txtPrefix+"-"+fname[:-5] + '.txt')
with open('./corpora/vocab-'+txtPrefix+"-"+fname[:-5] + '.txt', 'w+') as fVocab:
for div in textList:
for content in div:
sentences = tokenize(content)
for sentence in sentences:
fVocab.write(sentence + '\n')
elif 'apple' in fileDir:
for fname in os.listdir('./'+fileDir):
with open('./'+fileDir+'/'+fname) as f:
with open('./corpora/vocab-'+txtPrefix+'-'+fname[:-4], 'w+') as fVocab:
fVocab.write(f.read())
def loadModel():
return gensim.models.Word2Vec.load('word2vecModel')
if __name__ == '__main__':
# file2CorpusTxt('atlassian')
# file2CorpusTxt('github')
file2CorpusTxt('apple')
initVocabModel()
| [
"pratikaher88@gmail.com"
] | pratikaher88@gmail.com |
43fbb6b95ddcee9af4832ae849d95f7f42f36356 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/3582.py | ce5335c253ac258b9dcb35b8b5553aa1919f49c2 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | def compRows(cards1, cards2):
matchCount = 0
lastCard = 0
for card1 in cards1:
for card2 in cards2:
if card1 == card2:
lastCard = card1
matchCount += 1
if matchCount == 1:
return lastCard
elif matchCount == 0:
return -1 # volunteer cheated
else:
return 0 # bad magician
fi = open('A-small-attempt0.in')
casesT = fi.readline()
casesResults = []
for i in range(0, int(casesT)):
# read ans 1
ans1 = int(fi.readline())
# read arrangement
cards1 = []
for r in range(1, 5):
line = fi.readline()
if r == ans1:
cards1 = line[:-1].split(" ")
# read ans 2
ans2 = int(fi.readline())
# read arrangement
cards2 = []
for r in range(1, 5):
line = fi.readline()
if r == ans2:
cards2 = line[:-1].split(" ")
casesResults.append(compRows(cards1, cards2))
for i in range(len(casesResults)):
result = casesResults[i]
if result == 0:
mesg = "Bad magician!"
elif result == -1:
mesg = "Volunteer cheated!"
else:
mesg = result
print "Case #{0}: {1}".format(i + 1, mesg)
fi.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
32d5662180549aeb633963e30351126c9b6ce443 | c90ddd0930894c565197b739cd76140a7151fffd | /HLTrigger/Configuration/python/HLT_75e33/modules/hltEle5WP70GsfDetaL1SeededFilter_cfi.py | 50321074853fec578d1d2a3914760fa2554e55b0 | [
"Apache-2.0"
] | permissive | p2l1pfp/cmssw | 9cc6b111ff1935e49f86ec3da9f9b84fb13bbcdf | 9f0a3a22fe451c25114134c30ac1f5c1261f3183 | refs/heads/L1PF_12_5_X | 2023-08-17T00:38:15.374760 | 2023-06-13T12:55:57 | 2023-06-13T12:55:57 | 127,881,751 | 6 | 1 | Apache-2.0 | 2023-09-05T13:54:59 | 2018-04-03T09:10:17 | C++ | UTF-8 | Python | false | false | 1,273 | py | import FWCore.ParameterSet.Config as cms
hltEle5WP70GsfDetaL1SeededFilter = cms.EDFilter("HLTEgammaGenericQuadraticEtaFilter",
absEtaLowEdges = cms.vdouble(0.0, 0.8, 1.479, 2.1),
candTag = cms.InputTag("hltEle5WP70GsfOneOEMinusOneOPL1SeededFilter"),
doRhoCorrection = cms.bool(False),
effectiveAreas = cms.vdouble(0.0, 0.0, 0.0, 0.0),
energyLowEdges = cms.vdouble(0.0),
etaBoundaryEB12 = cms.double(0.8),
etaBoundaryEE12 = cms.double(2.1),
l1EGCand = cms.InputTag("hltEgammaCandidatesL1Seeded"),
lessThan = cms.bool(True),
ncandcut = cms.int32(1),
rhoMax = cms.double(99999999.0),
rhoScale = cms.double(1.0),
rhoTag = cms.InputTag(""),
saveTags = cms.bool(True),
thrOverE2EB1 = cms.vdouble(0.0),
thrOverE2EB2 = cms.vdouble(0.0),
thrOverE2EE1 = cms.vdouble(0.0),
thrOverE2EE2 = cms.vdouble(0.0),
thrOverEEB1 = cms.vdouble(0.0),
thrOverEEB2 = cms.vdouble(0.0),
thrOverEEE1 = cms.vdouble(0.0),
thrOverEEE2 = cms.vdouble(0.0),
thrRegularEB1 = cms.vdouble(0.003),
thrRegularEB2 = cms.vdouble(0.009),
thrRegularEE1 = cms.vdouble(0.003),
thrRegularEE2 = cms.vdouble(0.003),
useEt = cms.bool(False),
varTag = cms.InputTag("hltEgammaGsfTrackVarsL1Seeded","DetaSeed")
)
| [
"Thiago.Tomei@cern.ch"
] | Thiago.Tomei@cern.ch |
94a1d36dc402473a2758e4be0eb077ec6c5ade7e | 82fce9aae9e855a73f4e92d750e6a8df2ef877a5 | /Lab/venv/lib/python3.8/site-packages/OpenGL/GLX/ARB/get_proc_address.py | fbffe62551e7d91a5f9b9ce6ecd8697ee28b0f9a | [] | no_license | BartoszRudnik/GK | 1294f7708902e867dacd7da591b9f2e741bfe9e5 | 6dc09184a3af07143b9729e42a6f62f13da50128 | refs/heads/main | 2023-02-20T19:02:12.408974 | 2021-01-22T10:51:14 | 2021-01-22T10:51:14 | 307,847,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | '''OpenGL extension ARB.get_proc_address
This module customises the behaviour of the
OpenGL.raw.GLX.ARB.get_proc_address to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/get_proc_address.txt
'''
from OpenGL.raw.GLX.ARB.get_proc_address import _EXTENSION_NAME
def glInitGetProcAddressARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | [
"rudnik49@gmail.com"
] | rudnik49@gmail.com |
729beee0fb008b1f1986f7a370898bf55ef2c27b | 2bf56904829ab9d5e5aa49a50aeceaef620df643 | /tests/test_hdx_register.py | 2fb20883cf77d4c0348f2b39dca1ee08ebed4571 | [
"MIT"
] | permissive | OCHA-DAP/hdx-scraper-unosat-flood-portal | 501f53d43ead4fc46312fc46229c43c034787ed0 | 80b0bcd404993e4bd1dae442f794c9f86b6d5328 | refs/heads/master | 2021-12-25T06:00:02.327571 | 2021-12-22T20:07:36 | 2021-12-22T20:07:36 | 37,866,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# system
import os
import sys
dir = os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
sys.path.append(os.path.join(dir, 'scripts'))
# testing
import mock
import unittest
from mock import patch
# program
import config.load as Confi
#
# Global variables.
#
TEST_DATA = 'test_flood_portal_output.json'
# class CheckConfigFile(unittest.TestCase):
# '''Unit tests for checking if the config file is organized correctly.'''
# ## Structural tests.
# def test_wrapper_database_function_works(self):
# assert DB.Main() != False
| [
"luiscape@gmail.com"
] | luiscape@gmail.com |
157837b3e9b25401d96629c518ce8a96ea8eb441 | f337bc5f179b25969ba73e7680ffb0a0616e3b97 | /python/algorithm-python/section2/주사위 게임/AA.py | 51b20d0bd745f7058c45949121855d12c298f63d | [] | no_license | raiders032/PS | 31771c5496a70f4730402698f743bbdc501e49a3 | 08e1384655975b868e80521167ec876b96fa01c8 | refs/heads/master | 2023-06-08T10:21:00.230154 | 2023-06-04T01:38:08 | 2023-06-04T01:38:08 | 349,925,005 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | def price(x1, x2, x3):
if(x1 == x2 == x3):
return 10000+x1*1000
if(x1 == x2 != x3):
return 1000 + x1 * 100
if(x1 != x2 == x3):
return 1000 + x2 * 100
else:
return max(x1, x2, x3) * 100
n = int(input())
max_n = 0
for i in range(n):
x1, x2, x3 = map(int, input().split())
max_n = max(max_n, price(x1, x2, x3))
print(max_n) | [
"nameks@naver.com"
] | nameks@naver.com |
8a544066eb5b1fe3f4cc3786f6ee45c15f54202a | a066878b45eb9f5515f83cfaca160412b5b5a386 | /2020/200920/clean_code.py | 31c1b283b1101cbb698cf96c174c657b22519233 | [] | no_license | navill/Python_TIL | 9fec2074a5ac5ca8d6472369e4d8da81bd0e798d | 28d5147c8ebe93f201027f22fa070903bf155cc8 | refs/heads/master | 2022-12-27T07:49:26.721236 | 2020-10-10T07:17:51 | 2020-10-10T07:17:51 | 295,936,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | # ------------ [orthogonality] -------------
class BaseTokenizer:
def __init__(self, str_token):
self.str_token = str_token
def __iter__(self):
yield from self.str_token.split('-')
class UpperIterableMixin:
def __iter__(self):
return map(str.upper, super().__iter__())
class Tokenizer(UpperIterableMixin, BaseTokenizer):
pass
number_str = '1-2-3-4-5-6'
token = Tokenizer(number_str)
alphabet_str = 'a-b-c-d-e-f-g'
token2 = Tokenizer(alphabet_str)
print(list(token))
print(list(token2))
list1 = [1, 2, 3, 4, 5]
print(list(map(lambda x: x - 1, list1)))
print(list1)
# ----------------------------------
def calculate_price(base_price, tax, discount):
return (base_price * (1 + tax)) * (1 - discount)
def show_price(price):
return f"${price:.2f}"
def str_final_price(base_price, tax, discount, fmt_function=str):
return fmt_function(calculate_price(base_price, tax, discount))
print(str_final_price(1000, 0.2, 0.5))
print(str_final_price(1000, 0.2, 0.5, fmt_function=show_price))
# ------------ -------------
| [
"blue_jihoon@naver.com"
] | blue_jihoon@naver.com |
3c64f29e90c580af6d2149369335f23fa36c85ef | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_201/1959.py | 029c64e42d0ecfa4c5ecf1036abd49a7a6e6599f | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,529 | py | import math
# t = int(input()) # read a line with a single integer
# """ Use this for running a .in file on your pc.
file_x = open('C-small-1-attempt0.in', 'r').readlines()
t = int(file_x[0])
# """
output_file = open('stall.txt', 'a')
def stall_decide(n, k):
ls = 0
rs = 0
x_at = [0, n + 1]
# print(x_at)
"""
if n == k:
return ls, rs
"""
for i in range(k):
v = [abs(x[1] - x[0]) for x in zip(x_at[1:], x_at[:-1])]
z = v[:]
v.sort()
max_one = z.index(v[-1])
max_two = z.index(v[-1]) + 1
# print(max_one, max_two)
# print(max_one, max_two)
# max_diff = v[-1]
# print(max_one, max_two, max_diff, x_at[max_one], x_at[max_two])
new_x = x_at[max_one] + 1 + abs(int(((x_at[max_two] - 1)-(x_at[max_one] + 1))/2))
x_at.append(new_x)
x_at.sort()
# print(x_at)
if i == k - 1:
ls = x_at[x_at.index(new_x)] - 1 - x_at[x_at.index(new_x) - 1]
rs = x_at[x_at.index(new_x) + 1] - 1 - x_at[x_at.index(new_x)]
if ls > rs:
return ls, rs
else:
return rs, ls
for i in range(1, t + 1):
# s, m = [int(s) for s in input().split(" ")]
n, k = [(s1) for s1 in str(file_x[i]).split(" ")]
y, z = stall_decide(int(n), int(k))
# print("Case #{}: {} {}".format(i, y, z))
output_file.write("Case #{}: {} {}\n".format(i, y, z))
output_file.close() | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
6fbc98aa5f931e45b7ce106edb6eb5d918ec6fa5 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_270/ch165_2020_06_20_20_39_36_670494.py | 6f67445e7e22ca5805493fe91f33ecf1173791b7 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | def mais_populoso(dic):
counter = 0
country = 0
sum = 1
newdic = {}
for k,v in dic.items():
sum = 0
for i in v.values():
sum += i
if sum >= counter:
country = k
counter = sum
return country | [
"you@example.com"
] | you@example.com |
ae8e764c5a271f2a1b5ad65846cb1de72a5c4563 | 9958cfa7417eae5b7f62a8ce447b719bc67a3285 | /tests/test_plot.py | 0557052c814f291082c1abf3f7a82361c6206609 | [
"Apache-2.0"
] | permissive | zx-sdu/PhaseMap | a89a6d5b4a477fd45634d16893754b7f30b5061f | 11fbff34c1fe18cf03e209be79197b7e2fe325f5 | refs/heads/master | 2020-09-06T06:58:18.674373 | 2019-10-29T09:26:14 | 2019-10-29T09:26:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | # -*- coding: utf-8 -*-
# © 2015-2018, ETH Zurich, Institut für Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""Tests for the plot functions."""
# pylint: disable=redefined-outer-name,unused-wildcard-import
import pytest
import matplotlib
matplotlib.use('Agg')
import phasemap as pm
from phases import phase3
from plottest_helpers import *
@pytest.mark.plot
@pytest.mark.parametrize('scale_val', [None, (-3, 3)])
@pytest.mark.parametrize('plot_fct', [pm.plot.boxes, pm.plot.points])
def test_plots(assert_image_equal, plot_fct, scale_val):
res = pm.run(
phase3,
limits=[(0, 1)] * 2,
num_steps=2,
)
plot_fct(res, scale_val=scale_val)
assert_image_equal()
| [
"greschd@gmx.ch"
] | greschd@gmx.ch |
0fd3af772c0ec555c3c7b8e9baee30e46da50afd | 09c87fe780df6d1f9eb33799ed516a0bbd7ab1e3 | /Research/joint/joint2019/fullscreen_bug.py | 23c75f0f6727e1f997dd26fb7bbd00bdd2634898 | [
"MIT"
] | permissive | abulka/pynsource | 8ad412b85dc1acaeb83d7d34af8cc033c6baba91 | 979436525c57fdaeaa832e960985e0406e123587 | refs/heads/master | 2023-04-13T12:58:02.911318 | 2023-04-11T09:56:32 | 2023-04-11T09:56:32 | 32,249,425 | 271 | 46 | null | 2022-10-10T04:36:57 | 2015-03-15T07:21:43 | Python | UTF-8 | Python | false | false | 3,640 | py | import wx
# import wx.lib.ogl as ogl
import wx.html2 as webview
import time
import random
class AppFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, "Demo", size=(900, 800), style=wx.DEFAULT_FRAME_STYLE)
sizer = wx.BoxSizer(wx.VERTICAL)
# put stuff into sizer
self.original = wx.ClientDisplayRect()
self.pnl = DrawPanel(self, self.draw_test)
self.wv = wx.TextCtrl(self, style = wx.TE_MULTILINE)
font1 = wx.Font(14, wx.MODERN, wx.NORMAL, wx.NORMAL, False, 'Consolas')
self.wv.SetFont(font1)
self.sp = wx.TextCtrl(self)
font2 = wx.Font(28, wx.MODERN, wx.NORMAL, wx.NORMAL, False, 'Consolas')
self.sp.SetFont(font2)
sizer.Add(self.pnl, 2, wx.GROW)
sizer.Add( self.sp, 1, wx.ALL|wx.EXPAND, 5 )
sizer.Add(self.wv, 2, wx.GROW)
self.SetSizer( sizer )
self.Layout()
self.wv.WriteText(f"DisplaySize() = {wx.DisplaySize()}\n--------\n")
self.rectangles = None
self.Bind(wx.EVT_SIZE, self.OnResizeFrame)
self.Bind(wx.EVT_IDLE, self.idle)
# apply sizer
self.SetSizer(sizer)
self.SetAutoLayout(1)
self.Show(1)
def idle(self, event):
self.pnl.Refresh()
def OnResizeFrame(self, event):
# Proportionally constrained resize. Nice trick from http://stackoverflow.com/questions/6005960/resizing-a-wxpython-window
# hsize = event.GetSize()[0] * 0.75
# self.frame.SetSizeHints(minW=-1, minH=hsize, maxH=hsize)
# self.frame.SetTitle(str(event.GetSize()))
# print("GetClientSize()", self.GetClientSize(), wx.ClientDisplayRect(), wx.DisplaySize())
msg = f" <--- ClientDisplayRect() changed/broken?" if self.original != wx.ClientDisplayRect() else ""
self.wv.WriteText(f"frame.GetClientSize() {self.GetClientSize()} "
f"wx.ClientDisplayRect() {wx.ClientDisplayRect()} {msg}\n"
)
# if self.original != wx.ClientDisplayRect():
# self.wv.WriteText(f"BROKEN!!! ClientDisplayRect has always been {self.original} but now is {wx.ClientDisplayRect} !!?\n")
if self.GetClientSize() == wx.DisplaySize():
print("FULLSCREEN!!!")
# Make random rectangles
if not self.rectangles:
w = self.GetClientSize().width
h = self.GetClientSize().height
if w < 600: w = 600
if h < 400: h = 400
self.rectangles = makeRandomRectangles(w, h, 200)
event.Skip()
def draw_test(self, dc):
# Draw speed test
start = time.time()
dc.SetPen( wx.Pen("BLACK",1) )
dc.SetBrush( wx.Brush("RED") )
dc.DrawEllipseList(self.rectangles)
msg = "DrawTime: %0.2f seconds" % (time.time() - start)
# print(msg)
self.sp.SetValue(msg)
class DrawPanel(wx.Panel):
def __init__(self, parent, drawFun, log=None):
wx.Panel.__init__(self, parent, -1)
self.SetBackgroundColour(wx.WHITE)
self.log = log
self.drawFun = drawFun
self.Bind(wx.EVT_PAINT, self.OnPaint)
def OnPaint(self, evt):
dc = wx.PaintDC(self)
dc.Clear()
self.drawFun(dc)
def makeRandomRectangles(num, W, H):
rects = []
for i in range(num):
w = random.randint(10, int(W/2))
h = random.randint(10, int(H/2))
x = random.randint(0, W - w)
y = random.randint(0, H - h)
rects.append( (x, y, w, h) )
return rects
app = wx.App()
frame = AppFrame()
app.MainLoop()
app.Destroy()
| [
"abulka@gmail.com"
] | abulka@gmail.com |
7662adc09fe27f9c0fa161440fea86bd44660b8d | f62fd455e593a7ad203a5c268e23129473d968b6 | /ironic-7.0.2/ironic/api/middleware/parsable_error.py | b0de0530294456c0e4623e4a0543d29664a90ed6 | [
"Apache-2.0"
] | permissive | MinbinGong/OpenStack-Ocata | 5d17bcd47a46d48ff9e71e2055f667836174242f | 8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3 | refs/heads/master | 2021-06-23T05:24:37.799927 | 2017-08-14T04:33:05 | 2017-08-14T04:33:05 | 99,709,985 | 0 | 2 | null | 2020-07-22T22:06:22 | 2017-08-08T15:48:44 | Python | UTF-8 | Python | false | false | 3,886 | py | # -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Middleware to replace the plain text message body of an error
response with one formatted so the client can parse it.
Based on pecan.middleware.errordocument
"""
import json
from xml import etree as et
from oslo_log import log
import six
import webob
from ironic.common.i18n import _, _LE
LOG = log.getLogger(__name__)
class ParsableErrorMiddleware(object):
"""Replace error body with something the client can parse."""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
# Request for this state, modified by replace_start_response()
# and used when an error is being reported.
state = {}
def replacement_start_response(status, headers, exc_info=None):
"""Overrides the default response to make errors parsable."""
try:
status_code = int(status.split(' ')[0])
state['status_code'] = status_code
except (ValueError, TypeError): # pragma: nocover
raise Exception(_(
'ErrorDocumentMiddleware received an invalid '
'status %s') % status)
else:
if (state['status_code'] // 100) not in (2, 3):
# Remove some headers so we can replace them later
# when we have the full error message and can
# compute the length.
headers = [(h, v)
for (h, v) in headers
if h not in ('Content-Length', 'Content-Type')
]
# Save the headers in case we need to modify them.
state['headers'] = headers
return start_response(status, headers, exc_info)
app_iter = self.app(environ, replacement_start_response)
if (state['status_code'] // 100) not in (2, 3):
req = webob.Request(environ)
if (req.accept.best_match(['application/json', 'application/xml'])
== 'application/xml'):
try:
# simple check xml is valid
body = [et.ElementTree.tostring(
et.ElementTree.fromstring('<error_message>'
+ '\n'.join(app_iter)
+ '</error_message>'))]
except et.ElementTree.ParseError as err:
LOG.error(_LE('Error parsing HTTP response: %s'), err)
body = ['<error_message>%s' % state['status_code']
+ '</error_message>']
state['headers'].append(('Content-Type', 'application/xml'))
else:
if six.PY3:
app_iter = [i.decode('utf-8') for i in app_iter]
body = [json.dumps({'error_message': '\n'.join(app_iter)})]
if six.PY3:
body = [item.encode('utf-8') for item in body]
state['headers'].append(('Content-Type', 'application/json'))
state['headers'].append(('Content-Length', str(len(body[0]))))
else:
body = app_iter
return body
| [
"gongwayne@hotmail.com"
] | gongwayne@hotmail.com |
1979b0cccbfe05fd8f90daeed1b35268e2b09a70 | a81e4398efa00ed4ea727296868511a76d6c2066 | /osc_bge/school/migrations/0033_auto_20181207_0551.py | a33b718e11a5540707938e9fc0092bfef112414b | [
"MIT"
] | permissive | jisuhan3201/osc-bge | de6fcb1f96447c576a111d1bd5fdb404be1cb39f | 125c441d23d7f1fdb2d9b8f42f859082e757e25a | refs/heads/master | 2021-11-21T09:05:06.199386 | 2019-03-03T13:37:31 | 2019-03-03T13:37:31 | 155,510,866 | 0 | 0 | MIT | 2021-09-08T00:50:55 | 2018-10-31T06:40:36 | HTML | UTF-8 | Python | false | false | 477 | py | # Generated by Django 2.0.9 on 2018-12-07 05:51
from django.db import migrations, models
import osc_bge.school.models
class Migration(migrations.Migration):
dependencies = [
('school', '0032_auto_20181206_1448'),
]
operations = [
migrations.AlterField(
model_name='school',
name='image',
field=models.ImageField(blank=True, null=True, upload_to=osc_bge.school.models.school_directory_path),
),
]
| [
"jisu.han3201@gmail.com"
] | jisu.han3201@gmail.com |
f17bca954281c3149d2929d37a2df93c0093b7ac | c1267fbec95318184e7388cddf9b7085f797d514 | /2023/02 February/db02222023.py | 2782dbf787f2f5e6fed236317ca85f45cb0d3b52 | [
"MIT"
] | permissive | vishrutkmr7/DailyPracticeProblemsDIP | 1aedfd2e173847bf22989a6b0ec550acebb2bd86 | 2c365f633a1e1bee281fbdc314969f03b17ac9ec | refs/heads/master | 2023-05-31T23:49:52.135349 | 2023-05-28T09:32:12 | 2023-05-28T09:32:12 | 199,596,248 | 10 | 4 | MIT | 2022-11-02T21:31:59 | 2019-07-30T07:12:46 | Python | UTF-8 | Python | false | false | 726 | py | """
Given an integer array, nums, return true if all values within nums occur a unique number of times. Otherwise, return false.
Ex: Given the following nums…
nums = [1, 3, 3, 2, 2, 2], return true (1 appears once, 3 appears twice, two appears 3 times).
Ex: Given the following nums…
nums = [4, 10], return false (both 4 and 10 occur once).
"""
from collections import Counter
class Solution:
def uniqueOccurrences(self, nums: list[int]) -> bool:
count = Counter(nums)
return len(count.values()) == len(set(count.values()))
# Test Cases
if __name__ == "__main__":
solution = Solution()
print(solution.uniqueOccurrences([1, 3, 3, 2, 2, 2]))
print(solution.uniqueOccurrences([4, 10]))
| [
"vishrutkmr7@gmail.com"
] | vishrutkmr7@gmail.com |
bc846afb7f268e37bd99ff314352b44e13040537 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/indicator/legendgrouptitle/_text.py | c7ea420e2f17a2e602bbb16324622e956c2aceb4 | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 421 | py | import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="text", parent_name="indicator.legendgrouptitle", **kwargs
):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs
)
| [
"nicolas@plot.ly"
] | nicolas@plot.ly |
54374395da4a7afd877dbee7b8745c47328aa10c | 8a25ada37271acd5ea96d4a4e4e57f81bec221ac | /home/pi/TP-IoT/simple_light_sensor.py | 858999cd38895bcc1656e4ebc699256a75294f60 | [
"Apache-2.0"
] | permissive | lupyuen/RaspberryPiImage | 65cebead6a480c772ed7f0c4d0d4e08572860f08 | 664e8a74b4628d710feab5582ef59b344b9ffddd | refs/heads/master | 2021-01-20T02:12:27.897902 | 2016-11-17T17:32:30 | 2016-11-17T17:32:30 | 42,438,362 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 942 | py | #!/usr/bin/env python3
# Read the light sensor values.
# Based on GrovePi Example for using the Grove Button (http://www.seeedstudio.com/wiki/Grove_-_Button)
# Refer to https://www.raspberrypi.org/forums/viewtopic.php?f=28&t=107058&p=739228
import time
import RPi.GPIO as GPIO
# Set the pin numbering to the BCM (same as GPIO) numbering format.
GPIO.setmode(GPIO.BCM)
# Assumes that we connected the light sensor to GPIO pin 3.
lightSensor = 3
# We tell the system that the GPIO pin for the light sensor should
# be an input port, not output.
GPIO.setup(lightSensor, GPIO.IN)
time.sleep(1)
# Loop forever.
while True:
try:
# Read the light level and display it.
lightLevel = GPIO.input(lightSensor)
print("light level = {}".format(lightLevel))
# Wait 1 second and repeat the loop.
time.sleep(1)
except IOError: # Print "Error" if communication error encountered
print("Error")
| [
"lupyuen@gmail.com"
] | lupyuen@gmail.com |
7a60b90f1902966d66a768dc932e61031957b716 | f6c6085c34ac6e1b494ff039cd0173b47cc9c6c4 | /byceps/blueprints/monitoring/metrics/views.py | ea19b6d88c31cccec5be9307b6156fdfffab78ad | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | FakoorCo/byceps | 971042f598a12c00465b0c3a714f4ac7dbdc9d5b | 11290b154b83f5ac3a9530fb6cd752b3d7a3989b | refs/heads/main | 2023-01-23T18:53:59.930267 | 2020-11-30T01:09:20 | 2020-11-30T01:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | """
byceps.blueprints.monitoring.metrics
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Metrics export for `Prometheus <https://prometheus.io/>`_
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from flask import Response
from ....services.metrics import service as metrics_service
from ....util.framework.blueprint import create_blueprint
blueprint = create_blueprint('metrics', __name__)
@blueprint.route('')
def metrics():
"""Return metrics."""
metrics = metrics_service.collect_metrics()
lines = list(metrics_service.serialize(metrics))
return Response(lines, status=200, mimetype='text/plain; version=0.0.4')
| [
"homework@nwsnet.de"
] | homework@nwsnet.de |
6e4d9d2645fe4adc232bdec18ebbe14e3e03f15c | 44cae9fd5fba12f65dd753d0913eeb552419ac0e | /estrutura_repeticao/ex6.py | dba52cb158a6ec00596efc330228be72df7b9b61 | [] | no_license | aluisq/Python | 60edffa477f90bde67a76cc9306a8a5f39b82a4d | 2c065a01ef0fb2099f89f440f5d5ae2658cad7da | refs/heads/master | 2021-06-22T15:19:48.290092 | 2021-06-14T23:32:40 | 2021-06-14T23:32:40 | 214,283,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | x = 1
soma = 0
while x <= 10:
y = float(input("Digite um número: "))
soma += y # soma = soma + y
x += 1
print((soma/10)) | [
"noreply@github.com"
] | aluisq.noreply@github.com |
f5a686695b1b93e6cee223f9fe47545edddee19e | 250db406ad4a62e3d576e55b979bcfdc3407f226 | /Leetcode_250/Problem_167/my_solution.py | 999a42c06a015aa886c8e7eebc36232a9c15262d | [] | no_license | chenshanghao/Interview_preparation | 0830f0e461a2fe287b8ec24ae761974f50268767 | 4e7701d32990604c16ba18a8083c2108c0232306 | refs/heads/master | 2020-04-25T02:36:19.499364 | 2019-06-10T04:51:00 | 2019-06-10T04:51:00 | 172,446,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | class Solution:
def twoSum(self, numbers: List[int], target: int) -> List[int]:
left, right = 0, len(numbers) - 1
while(left < right):
if numbers[left] + numbers[right] == target:
return [left+1, right+1]
elif numbers[left] + numbers[right] < target:
left += 1
else:
right -= 1 | [
"21551021@zju.edu.cn"
] | 21551021@zju.edu.cn |
4b20767a240742d09a3fdeaea793c3095fe532f0 | 060c559cdfe39e3ff37bcc4f5113f30901a605d7 | /tf2onnx/tflite/Buffer.py | cf08050d4dcca36c13560a633dcf31b35a4d611c | [
"Apache-2.0"
] | permissive | onnx/tensorflow-onnx | 4402bc3416b1e191b122120aafcce5a5f396b160 | d5b7f39de66f3b4ff8731fd23b3f379ae731e601 | refs/heads/main | 2023-09-01T08:47:00.417130 | 2023-08-29T03:57:16 | 2023-08-29T03:57:16 | 125,098,252 | 2,068 | 454 | Apache-2.0 | 2023-08-29T03:57:18 | 2018-03-13T18:39:56 | Jupyter Notebook | UTF-8 | Python | false | false | 3,534 | py | # SPDX-License-Identifier: Apache-2.0
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Buffer(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Buffer()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsBuffer(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def BufferBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# Buffer
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Buffer
def Data(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Buffer
def DataAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Buffer
def DataLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Buffer
def DataIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
# Buffer
def Offset(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Buffer
def Size(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
def Start(builder): builder.StartObject(3)
def BufferStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddData(builder, data): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(data), 0)
def BufferAddData(builder, data):
"""This method is deprecated. Please switch to AddData."""
return AddData(builder, data)
def StartDataVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def BufferStartDataVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartDataVector(builder, numElems)
def AddOffset(builder, offset): builder.PrependUint64Slot(1, offset, 0)
def BufferAddOffset(builder, offset):
"""This method is deprecated. Please switch to AddOffset."""
return AddOffset(builder, offset)
def AddSize(builder, size): builder.PrependUint64Slot(2, size, 0)
def BufferAddSize(builder, size):
"""This method is deprecated. Please switch to AddSize."""
return AddSize(builder, size)
def End(builder): return builder.EndObject()
def BufferEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder) | [
"noreply@github.com"
] | onnx.noreply@github.com |
2e3ff7fec9b0686d70efc6a77a46d16d13b5cc18 | 4b89a7de426fb53b999b5f3834404215a90817df | /pyobjc-framework-Cocoa/PyObjCTest/test_nsprogressindicator.py | 9a2d3ea691cfcc68cd805a342c9e39b483ba0041 | [
"MIT"
] | permissive | peeyush-tm/pyobjc | a1f3ec167482566ddc7c895cfa2aca436109cf66 | da488946f6cc67a83dcc26c04484ca4f10fabc82 | refs/heads/master | 2021-01-20T19:26:06.015044 | 2016-05-22T14:53:37 | 2016-05-22T14:53:37 | 60,502,688 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,182 | py |
from PyObjCTools.TestSupport import *
from AppKit import *
class TestNSProgressIndicator (TestCase):
def testConstants(self):
self.assertEqual(NSProgressIndicatorPreferredThickness, 14)
self.assertEqual(NSProgressIndicatorPreferredSmallThickness, 10)
self.assertEqual(NSProgressIndicatorPreferredLargeThickness, 18)
self.assertEqual(NSProgressIndicatorPreferredAquaThickness, 12)
self.assertEqual(NSProgressIndicatorBarStyle, 0)
self.assertEqual(NSProgressIndicatorSpinningStyle, 1)
def testMethods(self):
self.assertResultIsBOOL(NSProgressIndicator.isIndeterminate)
self.assertArgIsBOOL(NSProgressIndicator.setIndeterminate_, 0)
self.assertResultIsBOOL(NSProgressIndicator.isBezeled)
self.assertArgIsBOOL(NSProgressIndicator.setBezeled_, 0)
self.assertResultIsBOOL(NSProgressIndicator.usesThreadedAnimation)
self.assertArgIsBOOL(NSProgressIndicator.setUsesThreadedAnimation_, 0)
self.assertResultIsBOOL(NSProgressIndicator.isDisplayedWhenStopped)
self.assertArgIsBOOL(NSProgressIndicator.setDisplayedWhenStopped_, 0)
if __name__ == "__main__":
main()
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
70161897d0b9b9e47caee51a09533707a689e503 | 72ec4755053c3ac1fb02e560ab404e76383a2b22 | /examples/boilerplates/base_test_case.py | 1c29fadc085b465373443644c953b7cff17af96a | [
"MIT"
] | permissive | Forsaj1/SeleniumBase | 7f541e5a09f81f1aaa3705df6b5eb9880970fe9f | a6db2e4866fa80f23738b1d9602915f4aefa50b1 | refs/heads/master | 2023-05-09T13:26:43.296339 | 2020-05-26T17:56:52 | 2020-05-26T17:56:52 | 267,113,203 | 0 | 0 | MIT | 2021-06-02T01:55:52 | 2020-05-26T17:52:19 | Python | UTF-8 | Python | false | false | 1,175 | py | '''
You can use this as a boilerplate for your test framework.
Define your customized library methods in a master class like this.
Then have all your test classes inherit it.
BaseTestCase will inherit SeleniumBase methods from BaseCase.
'''
from seleniumbase import BaseCase
class BaseTestCase(BaseCase):
def setUp(self):
super(BaseTestCase, self).setUp()
# <<< Add custom setUp code for tests AFTER the super().setUp() >>>
def tearDown(self):
self.save_teardown_screenshot()
# <<< Add custom tearDown code BEFORE the super().tearDown() >>>
super(BaseTestCase, self).tearDown()
def login(self):
# <<< Placeholder. Add your code here. >>>
# Reduce duplicate code in tests by having reusable methods like this.
# If the UI changes, the fix can be applied in one place.
pass
def example_method(self):
# <<< Placeholder. Add your code here. >>>
pass
'''
# Now you can do something like this in your test files:
from base_test_case import BaseTestCase
class MyTests(BaseTestCase):
def test_example(self):
self.login()
self.example_method()
'''
| [
"mdmintz@gmail.com"
] | mdmintz@gmail.com |
60775ba2cc1e4c3070e83cc5f6e404321cc6fa5f | fc3f784c8d00f419b11cbde660fe68a91fb080ca | /algoritm/20상반기 코딩테스트/.삼성 코테 준비/치킨 배달/bj15686.py | 5bed919d25670c6652e34abd134e5eb85cdd67e0 | [] | no_license | choo0618/TIL | 09f09c89c8141ba75bf92657ac39978913703637 | 70437a58015aecee8f3d86e6bfd0aa8dc11b5447 | refs/heads/master | 2021-06-25T07:01:34.246642 | 2020-12-21T04:57:13 | 2020-12-21T04:57:13 | 163,782,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | import sys
sys.stdin = open('bj15686.txt','r')
from itertools import combinations
N,M=map(int,input().split())
A=[[int(x)for x in input().split()]for y in range(N)]
C,H,c,h=[],[],0,0
for i in range(N):
for j in range(N):
if A[i][j]==1:H.append((i,j));h+=1
elif A[i][j]==2:C.append((i,j));c+=1
D=[[0]*h for _ in range(c)]
for i,(cy,cx) in enumerate(C):
for j,(hy,hx) in enumerate(H):
D[i][j]=abs(cy-hy)+abs(cx-hx)
R=10**9
for comb in combinations(range(c),M):
r=0
for x in range(h):r+=min(D[y][x]for y in comb)
R=min(R,r)
print(R)
| [
"choo0618@naver.com"
] | choo0618@naver.com |
c8e2cfc20d342ea1085c95b9f2ac6d9585334a5a | bd1db30fd3c593e8dc4f2e21de630668456ed28f | /educational-resources/algorithms/algo_practice/interactive-coding-challenges-master/bit_manipulation/get_next/test_get_next_largest.py | 0fd11e2c79164e4c6567d912069cdf7d9d05c838 | [
"MIT",
"GFDL-1.1-or-later",
"CC-BY-4.0",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | vicb1/miscellaneous-notes | c50d759729b4d0067b3c4cb51a69350db5a941b9 | eb63641a8156a4dcd78924b5d0f6f0618479ceaf | refs/heads/master | 2023-05-11T06:04:03.254582 | 2023-05-09T12:34:18 | 2023-05-09T12:34:18 | 227,648,115 | 1 | 0 | MIT | 2022-12-11T17:00:50 | 2019-12-12T16:20:38 | Python | UTF-8 | Python | false | false | 1,086 | py | from nose.tools import assert_equal, assert_raises
class TestBits(object):
def test_get_next_largest(self):
bits = Bits()
assert_raises(Exception, bits.get_next_largest, None)
assert_raises(Exception, bits.get_next_largest, 0)
assert_raises(Exception, bits.get_next_largest, -1)
num = int('011010111', base=2)
expected = int('011011011', base=2)
assert_equal(bits.get_next_largest(num), expected)
print('Success: test_get_next_largest')
def test_get_next_smallest(self):
bits = Bits()
assert_raises(Exception, bits.get_next_smallest, None)
assert_raises(Exception, bits.get_next_smallest, 0)
assert_raises(Exception, bits.get_next_smallest, -1)
num = int('011010111', base=2)
expected = int('011001111', base=2)
assert_equal(bits.get_next_smallest(num), expected)
print('Success: test_get_next_smallest')
def main():
test = TestBits()
test.test_get_next_largest()
test.test_get_next_smallest()
if __name__ == '__main__':
main() | [
"vbajenaru@gmail.com"
] | vbajenaru@gmail.com |
52ac7f2bf1f6689da84abdecdb6651c987a2e21e | 60b85430f588f60899228dd0d179d44c0d9c96e8 | /estados_civiles/migrations/0001_initial.py | 62dff7db438966f2d6015fc9a451592b2d6dc8d6 | [] | no_license | vlmarcelo/Sistema_Ugel | c69afa5d333fd528fa8e8635e801d9427ecd9851 | e69e5678a18114e4a8972a5f05c1d1b4f187d282 | refs/heads/master | 2020-04-19T04:36:57.224195 | 2016-08-23T05:32:15 | 2016-08-23T05:32:15 | 66,334,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-07-12 05:01
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='EstadoCivil',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(help_text='Escribir estado civil.', max_length=255, unique=True)),
('slug', models.SlugField(editable=False, max_length=255, unique=True)),
('fecha_registro', models.DateTimeField(auto_now_add=True)),
('fecha_ultima_actualizacion', models.DateTimeField(auto_now=True)),
('nombre_host', models.CharField(max_length=255)),
('direccion_ip', models.GenericIPAddressField(validators=[django.core.validators.validate_ipv46_address])),
],
options={
'ordering': ('nombre',),
'db_table': 'Estado_Civil',
'verbose_name': 'Estado Civil',
'verbose_name_plural': 'Estados Civiles',
},
),
]
| [
"victor.luis.marcelo@gmail.com"
] | victor.luis.marcelo@gmail.com |
2995b7deeef0681f5afe25c9a0a4c9fe1ba89ccd | a0c00de0b858afec162817eb18644e15a43aa132 | /nonwords/anchors.py | bb1dac632f9e521ad0465da39428dc7d6f09d1dc | [] | no_license | GiovanniCassani/discriminative_learning | 2babb522e6ef1f8045cf4920f120841cc143eead | 6336ae084317178c4552afedb6f2363fa464c3b2 | refs/heads/master | 2021-03-30T16:42:44.730328 | 2018-07-10T11:38:34 | 2018-07-10T11:38:34 | 117,959,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,524 | py | __author__ = 'GCassani'
import helpers as help
from collections import defaultdict
def map_anchors_to_idx(anchor_file):
"""
:param anchor_file: the path to the file containing the ordered list of anchor words, with their category
:return ids2anchors: a dictionary mapping numerical indices to anchor words
"""
ids2anchors = {}
with open(anchor_file, "r") as f:
for idx, line in enumerate(f):
if line.strip().islower():
ids2anchors[idx] = line.strip()
return ids2anchors
########################################################################################################################
def map_nonwords_to_anchors(correlation_file, ids2anchors):
"""
:param correlation_file: the path to the file storing correlations for each nonword
:param ids2anchors: a dictionary mapping row indices to anchor words
:return nonwords2anchors: a dictionary mapping each nonword to all anchors and the corresponding pairwise
correlation between the nonword and anchor semantic vector
"""
ids2nonwords = help.map_indices_to_test_words(correlation_file)
nonwords2anchors = defaultdict(dict)
with open(correlation_file, "r") as f:
for row_id, line in enumerate(f):
words = line.strip().split('\t')
for col_id, corr in enumerate(words):
if corr not in ids2nonwords.values():
nonwords2anchors[ids2nonwords[col_id]][ids2anchors[row_id]] = float(corr)
return nonwords2anchors
########################################################################################################################
def get_most_correlated_anchor(nonwords2anchors):
"""
:param nonwords2anchors: a dictionary mapping each nonword to all anchors and the corresponding pairwise
correlation between the nonword and anchor semantic vector
:return most_correlated: a dictionary mapping each nonword to the closest nouns and the highest noun correlation,
and to the closest verb and the highest verb correlation
"""
most_correlated = defaultdict(dict)
for nonword in nonwords2anchors:
best_noun, best_verb = ["none", "none"]
highest_noun, highest_verb = [0, 0]
for anchor in nonwords2anchors[nonword]:
base, pos = anchor.split(':')
corr = nonwords2anchors[nonword][anchor]
if pos == "N":
if corr > highest_noun:
highest_noun = corr
best_noun = anchor
else:
if corr > highest_noun:
highest_verb = corr
best_verb = anchor
most_correlated[nonword] = {"closest noun": best_noun,
"closest verb": best_verb,
"corr noun": highest_noun,
"corr verb": highest_verb}
return most_correlated
########################################################################################################################
def write_correlations(nonwords2anchors, output_file, ids2anchors, cond="minimalist", table_format="long"):
"""
:param nonwords2anchors: a dictionary mapping each nonword to all anchors and the corresponding pairwise
correlation between the nonword and anchor semantic vector
:param output_file: the path where the summary will be written to
:param ids2anchors: a dictionary mapping numerical indices to anchor words
:param cond: a string indicating the input used for the experiment
:param table_format: a string indicating how to print data to table, either 'long' or 'wide'. In the long
format, six columns are created, first the nonword followed by its intended pos tag,
then the condition, then the anchor word followed by its pos tag, then the correlation
between the nonword and the anchor. In the wide format, each anchor word is a different
column, with each nonword-anchor cell indicating the correlation between the generated
semantic vector for the nonword and the semantic vector for the anchor word. An extra
column indicates the condition.
"""
# make sure to only use anchor words (and no headers)
anchors = sorted(ids2anchors.values())
with open(output_file, "w") as f:
if table_format == "long":
f.write('\t'.join(["Nonword", "Target", "Condition", "Anchor", "Category", "Correlation"]))
f.write('\n')
for nonword in nonwords2anchors:
baseform, tag = nonword.split("|")
for anchor in sorted(nonwords2anchors[nonword]):
word, pos = anchor.split(":")
corr = str(nonwords2anchors[nonword][anchor])
f.write('\t'.join([baseform, tag, cond, word, pos, corr]))
f.write('\n')
elif table_format == "wide":
f.write("\t".join(["Nonword", "Target", "Condition", "\t".join(anchors)]))
f.write('\n')
for nonword in nonwords2anchors:
baseform, tag = nonword.split("|")
correlations = []
for anchor in sorted(nonwords2anchors[nonword]):
correlations.append(str(nonwords2anchors[nonword][anchor]))
f.write('\t'.join([baseform, tag, cond, "\t".join(correlations)]))
f.write('\n')
else:
raise ValueError("unrecognized format %s!" % table_format)
########################################################################################################################
def anchor_analysis(anchors_file, correlations_file, output_file, table_format="long", cond="minimalist"):
"""
:param anchors_file: the path to the file containing the ordered list of anchor words
:param correlations_file: the path containing the correlations between each nonword and all anchor words
:param output_file: the path to the file where the output is going to be written to
:param cond: a string indicating the input used for the experiment
:param table_format: a string indicating how to print data to table, either 'long' or 'wide'. In the long
format, six columns are created, first the nonword followed by its intended pos tag,
then the condition, then the anchor word followed by its pos tag, then the correlation
between the nonword and the anchor. In the wide format, each anchor word is a different
column, with each nonword-anchor cell indicating the correlation between the generated
semantic vector for the nonword and the semantic vector for the anchor word. An extra
column indicates the condition.
"""
ids2anchors = map_anchors_to_idx(anchors_file)
nonwords2anchors = map_nonwords_to_anchors(correlations_file, ids2anchors)
write_correlations(nonwords2anchors, output_file, ids2anchors, table_format=table_format, cond=cond)
| [
"cassani.giovanni@gmail.com"
] | cassani.giovanni@gmail.com |
ab00368f031db7efb2a6c7aedd7c904fb6612238 | 10ff8316d74c79c1c9463178984eb0be2846d7cf | /BIOE591/hw9/hw9.py | 21d863d4e916d79f16e975463d04fba9d752a866 | [] | no_license | tonychangmsu/IPython | 48776282dbcb565bd5dcc537298318d1bbc6ace1 | b601bdb10e996e9d4cce33459f3511bf34744c21 | refs/heads/master | 2016-09-05T20:52:10.558412 | 2014-11-26T22:58:36 | 2014-11-26T22:58:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,315 | py | #Title: BIOE 591 Homework 9
#Author: Tony Chang
#Abstract: Assignment 9 for ecological modeling covering land use and climate
#7. LAND USE AND CLIMATE
#
#Q: Suppose that 20% of the land area of Earth is deforested and the area subsequently desertifies. By about how much would Earth's average surface temperature change?
'''
Variables:
a: albedo of Earth (entire)
a = solar flux reflected from Earth to space/ solar flux incident on Earth
R_s: albedo of Earth (surface)
R_s = solar flux reflected from Earth's surface to the atmosphere/ solar flux incident on Earth's surface
R_a: albedo of Earth (atmosphere)
A_s: absorption of surface
A_a: absorption of atmosphere
T_a: transmission coefficient of atmosphere
T_a = 1 - R_a - A_a
a = R_a + (((1 - R_a -A_a)**2) * R_s)/(1 - (R_a * R_s))
f: total fraction of the incoming flux that is absorbed in the atmosphere
f = A_a * (1 + ((1 - R_a - A_a)*R_s)/(1 - R_a*R_s))
'''
a = 0.3
f = 86/343
#Direct measurements of the other variables
A_a = 0.23
R_s = 0.16
R_a = 0.26
R_s_forest = 0.15
R_s_desert = 0.25
#total surface area of the earth that is forest is 6%
F_s = 0.06
delta_R_s = F_s*(R_s_desert-R_s_forest)
delta_a = 0.26*delta_R_s
a_new = a + delta_a
#using equations from Problem III.6
def T_n(n, T_o):
return(((n+1)**(1/4))*T_o)
#determine a 2 level atmosphere energy balance
omega = 1372 #radiation from sun (W/m2)
sigma = 5.67e-8 #Stefan-Boltzman constant (J/m2 sec K4)
#a = 0.3 #Earth's albedo (%) ###(a=0.39 Hartmann 2005; Freedman and Kaufmann 2002)
a = a_new
n = 2 #number of layers represented in atmosphere
T_o = ((omega*(1-a))/(4*sigma))**(1/4)
T_s = T_n(n, T_o)
print("The estimated mean surface temperature of Earth based on the initial model is: %0.1f K \n"%(T_s))
#adding more parameters to account for the overestimate.
#Considerations are: 1. energy absorption by atmosphere, 2. latent heat flux, 3. Narrow band allowed to penetrate atmosphere
F_w = 20 #portion of IR emitted from the surface that is radiated directly to space
F_s = 86 #portion of the solar flux absorbed in the atmosphere
F_s = 0.55*(omega/4) - a*(omega/4)
F_e = 80 #flux of latent heat leaving Earth's surface
F_c = 17 #flux of convective heat leaving Earth's surface
W = (a*(omega/4)) + (sigma*T_o**4) + F_w - (omega/4)
W = 0 #initially ignoring the waste heat
T_o_hat = (((omega*(1-a))/(4*sigma)) - (F_w/sigma))**(1/4)
T_1_hat = (((2*sigma*(T_o_hat**4)) - (0.5*F_e) - (0.7*F_s))/sigma)**(1/4)
T_s_hat = (((2*sigma*(T_1_hat**4)) - (sigma*(T_o_hat**4)) + F_w - F_c - (0.5*F_e) - (0.3*F_s))/sigma)**(1/4)
print(' T_o: %0.1f K \n T_1: %0.1f K \n T_s: %0.1f K \n' %(T_o_hat,T_1_hat,T_s_hat))
'''
Exercise 4: Climate is affected by the $CO_2$ content of the atmosphere (see Problem III.8). By roughly what percentage will the atmospheric concentration of $CO_2$ immediately increase if the deforestation and subsequent burning of the cleared vegetation occur so rapidly that ocean uptake of $CO_2$ can be ignored? See Appendix, Section XII.2; assume that only tropical and temperate forests are cut and that for every 3 $km^2$ of tropical deforestation, there is 1 $km^2$ of temperate deforestation.
'''
#unclear question statement, does this mean that deforestation equals the npp from the ocean? If so, can we just subtract ocean npp from the total respired carbon per year?
#total tropical forest area
A_trf = 24.5e12 #m^2
npp_trf_by_A = 0.83 #kg(C)/m^2/yr
npp_trf = A_trf * npp_trf_by_A #kg/yr
#total temperate forest area
A_tef = 12.0e12 #m^2
npp_tef_by_A = 0.56 #kg(C)/m^2/yr
npp_tef = A_tef * npp_tef_by_A
#total ocean area
A_ocean = 332e12
npp_ocean_by_A = 0.057
npp_ocean = A_ocean * npp_ocean_by_A
#the question first requires solving how much tropical and temperate forest area would have to be deforested to equal the npp of the ocean, where tropical to temperate forest deforestation occurs at a 3:1 ratio
A_deforest = npp_ocean/(3 * npp_trf_by_A + npp_tef_by_A)
#solve for the total mass of plant burned
trop_bio_area = 18.8 #kg(c)/m2
trop_biomass = 3*A_deforest*trop_bio_area
temp_bio_area = 14.6 #kg(c)/m2
temp_biomass = A_deforest*temp_bio_area
total_biomass_lost = trop_biomass+temp_biomass
#total CO2 in atmosphere = 735e12 kg(C)
C_a = 735e12
#assume that the total water content in wood is roughly 40%, then there is only 60% dry carbon content
#if 1kg burning wood resulted in a 1.9 kg CO2 release?
CO2_burned_wood = total_biomass_lost * .6 * 1.9
percent_change = CO2_burned_wood/C_a
print('The total percent change of burning %0.1e kg(C) of deforested wood would result in a %0.1f%% increase in atmospheric CO2'%(total_biomass_lost,percent_change*100))
# import numpy as np
# area = np.array([24.5,12,12,8,15,9,8,18,24,14,2,2.5,332,.4,26.6,.6,1.4])
# npp = np.array([0.83,0.56,0.36,0.27,0.32,0.23,0.065,0.032,0.015,0.29,1.13,0.23,0.057,0.23,0.16,0.9,0.81])
# total_npp = np.sum(area*1e12*npp)
# area[0] = area[0]-(3*A_deforest/1e12)
# area[1] = area[1]-(A_deforest/1e12)
# new_total_npp = np.sum(area*1e12*npp)
# total_C_notsequestered = total_npp-new_total_npp #kg(C)
# #now determine the total amount of CO2 emitted per year
# #from appendix XIII.1
# C_emitted = (50+20+10+5.3+.2+.1+.1)*1e12
# new_C_emitted = C_emitted + total_C_notsequestered
#solution is 70, which would require an increase of 514.5e12 kg(c)% | [
"tony.chang@msu.montana.edu"
] | tony.chang@msu.montana.edu |
b3d22b54f6baf0b6d6cd78feb321a39bd6d10d4b | ee880f62a8ffc1b8544695d3bc1f4bcf809965ab | /calc.py | cebaee7bb9bc0dd01c7bab414e194344c330248f | [] | no_license | kosyachniy/twianalysis | 1ba3ba6319cbeedf4f19e83ff31f01ced8b26e54 | 514a0ebb7829a4abb340d499e1151b5c55f26f80 | refs/heads/master | 2021-01-01T16:01:29.477061 | 2017-07-29T18:31:25 | 2017-07-29T18:31:25 | 97,756,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | from func import *
x=numread('table', 1).T
for i in range(len(x[0])):
x[0][i]=1
x=x.T
y=numread('table', 1).T[0].T
re=np.linalg.inv(x.T.dot(x)).dot(x.T)
w=re.dot(y)
np.savetxt('data/'+mas[ii][0]+'-weights.csv', w, delimiter=',')
print(w) | [
"polozhev@mail.ru"
] | polozhev@mail.ru |
515e7e68f0132b22225050c44a721645b4871252 | d83748a6ee73f5a3a871d5509be0ec3d9ec40140 | /archive/asgi.py | 1cb5492eac41d450d1c28aaa87d101325fc5e9af | [] | no_license | cabilangan112/archive | 613839e6dd54d9b607d8605a2c60bb9595589e02 | 2e78cfa543a85c46ad3400b2ce6edeb3a98ef359 | refs/heads/main | 2023-06-28T00:57:17.403427 | 2021-05-10T08:32:48 | 2021-05-10T08:32:48 | 359,016,399 | 2 | 0 | null | 2021-05-10T08:32:38 | 2021-04-18T01:10:41 | CSS | UTF-8 | Python | false | false | 391 | py | """
ASGI config for archive project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'archive.settings')
application = get_asgi_application()
| [
"jassencabilangan@gmail.com"
] | jassencabilangan@gmail.com |
6a17c8f11587895d920d018d437d5fa91f3906da | 1b8a99a4ff80da51dc81dd8354bf9bf1cbd25a8b | /2020/super_ugly_number.py | dffd537e08fc31b6573b450402b0e4f63ebc6653 | [] | no_license | eronekogin/leetcode | ea639eebe0cd70af9eb4cba59bc68f636d7b3e0c | edb870f83f0c4568cce0cacec04ee70cf6b545bf | refs/heads/master | 2023-08-16T10:35:57.164176 | 2023-08-14T11:25:33 | 2023-08-14T11:25:33 | 163,679,450 | 0 | 0 | null | 2021-09-09T12:04:44 | 2018-12-31T15:33:06 | Python | UTF-8 | Python | false | false | 1,536 | py | """
https://leetcode.com/problems/super-ugly-number/
"""
from typing import List
import heapq
class Solution:
def nthSuperUglyNumber(self, n: int, primes: List[int]) -> int:
pLen = len(primes)
idxs, vals, nums = [0] * pLen, primes[:], [1] * n
for i in range(1, n):
nums[i] = num = min(vals)
for j, val in enumerate(vals):
if num == val:
idxs[j] += 1
vals[j] = nums[idxs[j]] * primes[j]
return nums[-1]
def nthSuperUglyNumber2(self, n: int, primes: List[int]) -> int:
"""
Use heaq to get the minimum value in logn time.
We provide a list of generators to heapq.merge, so that it will return
a generator of integers. Each time this generator is called, it will
try to call the next method of any of its generators if its value is
not currently visable, otherwise it will simply use the previously
pulled value. Then it compares those values use heap sort and return
the smallest integer from them. In our case it is the next ugly number.
Make sure to handle the duplicate case.
"""
uglies = [1]
def gen_ugly(prime: int) -> int:
for ugly in uglies:
yield ugly * prime
for ugly in heapq.merge(*map(gen_ugly, primes)):
if len(uglies) == n:
return uglies[-1]
if ugly != uglies[-1]: # Handle duplicates.
uglies.append(ugly)
| [
"“mengyu.jiang@gmail.com"
] | “mengyu.jiang@gmail.com |
b5a7ced6f889b95efdfb45e4cdd555bb7de43a7f | dea5535a0f72ac365c71e3d07d76b4c697716ea7 | /ist2018_src/phase2_main.py | be7c2ca7dfca997dca4c8e9414022a743201f3d2 | [] | no_license | akondrahman/IaCExtraction | f88fd52302451b728292f47731a069a2f1252a40 | 0d6ceabb30e04c620032427198c0360631d4c68d | refs/heads/master | 2020-07-02T06:33:34.498940 | 2018-12-09T03:44:46 | 2018-12-09T03:44:46 | 67,666,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,663 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 7 12:05:36 2016
author: akond
"""
import warnings
import Utility , numpy as np , sklearn_models
print "Started at:", Utility.giveTimeStamp()
'''
Deprecating warnings will be suppressed
'''
warnings.filterwarnings("ignore", category=DeprecationWarning)
dataset_file="/Users/akond/Documents/AkondOneDrive/OneDrive/IaC_Mining/Dataset/LOCKED_WIKIMEDIA_23_REPOS_DATASET.csv"
full_dataset_from_csv = Utility.getDatasetFromCSV(dataset_file)
full_rows, full_cols = np.shape(full_dataset_from_csv)
## we will skip the first column, as it has file names
feature_cols = full_cols - 2 ## the last couln is null, and have to skip bug count, so two colums to skip
all_features = full_dataset_from_csv[:, 1:feature_cols]
print "Glimpse at features (10th entry in dataset): \n", all_features[9]
print "-"*50
dataset_for_labels = Utility.getDatasetFromCSV(dataset_file, False)
label_cols = full_cols - 1
all_labels = dataset_for_labels[:, label_cols]
print "Glimpse at labels (10th entry in dataset):", all_labels[9]
print "-"*50
formatted_labels = Utility.assignNumericLabels(all_labels)
print "Glimpse at labels (10th entry in label list):", formatted_labels[9]
print "-"*50
### use randomized logi. regression to get the features
selected_indices_for_features = sklearn_models.getElgiibleFeatures(all_features, formatted_labels)
print "The selected indicies are: \n", selected_indices_for_features
print "The selected feature names: ", Utility.printFeatureName(selected_indices_for_features)
print "-"*50
### select the features based on feature indicies, and also perform log transformation
selected_features = Utility.createLogTransformedSelectedFeatures(all_features, selected_indices_for_features)
print "Selected (log-transformed) feature dataset size:", np.shape(selected_features)
print "Glimpse at (log-transformed) selected features(10th entry in label list): \n", selected_features[9]
print "-"*50
'''
thsi paper https://www.cs.utah.edu/~piyush/teaching/cross-validation-kohavi.pdf
with 6000+ citations says to use 10 fold validation , so will use
10 fold validation instaed of bootstrap
'''
fold2Use =10
'''
Single iteration zone : turn off 'performIterativeModeling()'
while running this
'''
# this method runs the classifiers once
sklearn_models.performModeling(selected_features, formatted_labels, fold2Use)
print "-"*50
'''
Multiple iteration zone : turn off 'performModeling()'
while running this
'''
# this method runs the classifiers 'iteration' number of times
#iteration=1000
#sklearn_models.performIterativeModeling(selected_features, formatted_labels, fold2Use, iteration)
#print "-"*50
| [
"akond.rahman.buet@gmail.com"
] | akond.rahman.buet@gmail.com |
5d59cc42c0d95e97b23f1caf639560ac18cc66bd | b299e95c1660aeb9933d83b15689d39e3af60456 | /www_douyin_com/utils/tools.py | 6d32dbcbb2af21d7b3d22d6a5409a0ac2e51a25d | [] | no_license | beckfun/spider_world | 537e700ec84e00efb18fbe4c78ace18f0fe54151 | 06a39872d7dfaf1421b19dbed78ad3c8139f0a19 | refs/heads/master | 2020-05-04T09:06:19.995407 | 2019-04-02T01:19:48 | 2019-04-02T01:19:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | #!/usr/bin/env python
# coding:utf-8
def params2str(params):
query = ""
for k, v in params.items():
query += "%s=%s&" % (k, v)
query = query.strip("&")
return query
| [
"funblessu@gmail.com"
] | funblessu@gmail.com |
1ba782ee86f1df3a8010f878207ded50f85e67c2 | 53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61 | /.history/EMR/zhzd_3_20190604103715.py | 16d556fa417a0a4ee072efbef2262cbc1a3bf503 | [] | no_license | cyc19950621/python | 4add54894dc81187211aa8d45e5115903b69a182 | d184b83e73334a37d413306d3694e14a19580cb0 | refs/heads/master | 2020-04-11T20:39:34.641303 | 2019-07-02T12:54:49 | 2019-07-02T12:54:49 | 162,078,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | import time
import math
import os
import sys
import os, os.path,shutil
import codecs
import EMRdef
import re
import pandas as pd
emrtxts = EMRdef.txttq(u'D:\DeepLearning ER\EHRzhzd')#txt目录提取
ryzd=[]
for emrtxt in emrtxts:
f = open(emrtxt,'r',errors="ignore")#中文加入errors
emrpath = os.path.basename(emrtxt)
emrpath = os.path.splitext(emrpath)[0]#提取目录
pattern =r'\s*\d+、+\s?(.*)'
c=re.compile(pattern)
output=[]
for line in f.readlines():
line1=line.strip('\n')
line2 = ''.join(line1)
line2 = line2.strip( )
line3=c.findall(line2)
line3=''.join(line3)
line4 = str(line3)
out = line4
out= re.sub(r'右侧|两侧|双侧|左侧|急性发作|急性|右|左|双','',out)
out = re.sub(r'肺肺','肺',out)
output.append(out)
output=EMRdef.delre(output)
output1='\n'.join(output)
EMRdef.text_create(r'D:\DeepLearning ER\EHRzhzd2','.txt',emrpath,output1)
| [
"1044801968@qq.com"
] | 1044801968@qq.com |
0dd7e87e8f4a0f6d3085fe3240e400cd3d1f0260 | 28de04457e8ebcd1b34494db07bde8a3f25d8cf1 | /easy/single_number_136.py | e9dc867034f2281236ddaf7853dffefb3b4c5d37 | [] | no_license | YangXinNewlife/LeetCode | 1df4218eef6b81db81bf2f0548d0a18bc9a5d672 | 20d3d0aa325d79c716acfc75daef32f8d4f9f1ad | refs/heads/master | 2023-08-16T23:18:29.776539 | 2023-08-15T15:53:30 | 2023-08-15T15:53:30 | 70,552,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | # -*- coding:utf-8 -*-
__author__ = 'yangxin_ryan'
"""
Soultions:
性质一:
交换律 a ^ b = b ^ a,
性质二:
a ^ a = 0。
于是我们可以将所有元素依次做异或操作,相同元素异或结果为0,
因此最终剩下的元素就为Single Number。
异或操作符 a ^ b
"""
class SingleNumber(object):
def singleNumber(self, nums) -> int:
"""
根据时间复杂度以及空间复杂度的要求:
Your algorithm should have a linear runtime complexity.
Could you implement it without using extra memory?
这里建议使用位运算的异或操作
:param nums:
:return:
"""
result = 0
for i in range(len(nums)):
result ^= nums[i]
return result
| [
"yangxin03@youxin.com"
] | yangxin03@youxin.com |
fbb79b8d1f98b623d37cf93e1a1d50a522970e61 | b3ac12dfbb8fa74500b406a0907337011d4aac72 | /goldcoin/cmds/wallet.py | 02d92a17468b8fd4b3525f5acefd62c393fb63ec | [
"Apache-2.0"
] | permissive | chia-os/goldcoin-blockchain | ab62add5396b7734c11d3c37c41776994489d5e7 | 5c294688dbbe995ae1d4422803f6fcf3e1cc6077 | refs/heads/main | 2023-08-11T23:58:53.617051 | 2021-09-12T15:33:26 | 2021-09-12T15:33:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,967 | py | from typing import Optional
import click
@click.group("wallet", short_help="Manage your wallet")
def wallet_cmd() -> None:
pass
@wallet_cmd.command("get_transaction", short_help="Get a transaction")
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
@click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True)
@click.option("-tx", "--tx_id", help="transaction id to search for", type=str, required=True)
@click.option("--verbose", "-v", count=True, type=int)
def get_transaction_cmd(wallet_rpc_port: Optional[int], fingerprint: int, id: int, tx_id: str, verbose: int) -> None:
extra_params = {"id": id, "tx_id": tx_id, "verbose": verbose}
import asyncio
from .wallet_funcs import execute_with_wallet, get_transaction
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, get_transaction))
@wallet_cmd.command("get_transactions", short_help="Get all transactions")
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
@click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True)
@click.option(
"-o",
"--offset",
help="Skip transactions from the beginning of the list",
type=int,
default=0,
show_default=True,
required=True,
)
@click.option("--verbose", "-v", count=True, type=int)
def get_transactions_cmd(wallet_rpc_port: Optional[int], fingerprint: int, id: int, offset: int, verbose: bool) -> None:
extra_params = {"id": id, "verbose": verbose, "offset": offset}
import asyncio
from .wallet_funcs import execute_with_wallet, get_transactions
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, get_transactions))
@wallet_cmd.command("send", short_help="Send goldcoin to another wallet")
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
@click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True)
@click.option("-a", "--amount", help="How much goldcoin to send, in ozt", type=str, required=True)
@click.option(
"-m",
"--fee",
help="Set the fees for the transaction, in ozt",
type=str,
default="0",
show_default=True,
required=True,
)
@click.option("-t", "--address", help="Address to send the ozt", type=str, required=True)
@click.option(
"-o", "--override", help="Submits transaction without checking for unusual values", is_flag=True, default=False
)
def send_cmd(
wallet_rpc_port: Optional[int], fingerprint: int, id: int, amount: str, fee: str, address: str, override: bool
) -> None:
extra_params = {"id": id, "amount": amount, "fee": fee, "address": address, "override": override}
import asyncio
from .wallet_funcs import execute_with_wallet, send
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, send))
@wallet_cmd.command("show", short_help="Show wallet information")
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
def show_cmd(wallet_rpc_port: Optional[int], fingerprint: int) -> None:
import asyncio
from .wallet_funcs import execute_with_wallet, print_balances
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, {}, print_balances))
@wallet_cmd.command("get_address", short_help="Get a wallet receive address")
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
@click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
def get_address_cmd(wallet_rpc_port: Optional[int], id, fingerprint: int) -> None:
extra_params = {"id": id}
import asyncio
from .wallet_funcs import execute_with_wallet, get_address
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, get_address))
@wallet_cmd.command(
"delete_unconfirmed_transactions", short_help="Deletes all unconfirmed transactions for this wallet ID"
)
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
@click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
def delete_unconfirmed_transactions_cmd(wallet_rpc_port: Optional[int], id, fingerprint: int) -> None:
extra_params = {"id": id}
import asyncio
from .wallet_funcs import execute_with_wallet, delete_unconfirmed_transactions
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, delete_unconfirmed_transactions))
| [
"faurepierre78@yahoo.com"
] | faurepierre78@yahoo.com |
1f1699b0713e442d8b77826932578fd4d79d92f9 | cfd69c9c9680c4cb5c562f8c23930eff77d15003 | /py/satchless/contrib/pricing/simpleqty/admin.py | f341102ed46302b3a834542ff75db14728d72233 | [] | no_license | tbarbugli/satchless_heroku_skeleton | 13f980f91447ddcb18ee914826102da6b91fb74a | 9ba864a1e9ce652bad19efc0e4b0649c9077fe67 | refs/heads/master | 2020-08-05T03:44:22.645671 | 2011-10-04T18:56:16 | 2011-10-04T18:56:16 | 2,500,419 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,525 | py | from django import forms
from django.forms.models import BaseInlineFormSet
from django.contrib import admin
from satchless.forms.widgets import DecimalInput
from . import models
class PriceQtyOverrideForm(forms.ModelForm):
class Meta:
widgets = {
'min_qty': DecimalInput(),
'price': DecimalInput(min_decimal_places=2),
}
class PriceQtyOverrideInline(admin.TabularInline):
model = models.PriceQtyOverride
form = PriceQtyOverrideForm
class VariantOffsetForm(forms.ModelForm):
class Meta:
widgets = {
'price_offset': DecimalInput(min_decimal_places=2),
}
class VariantOffsetFormSet(BaseInlineFormSet):
def __init__(self, *args, **kwargs):
super(VariantOffsetFormSet, self).__init__(*args, **kwargs)
if self.instance and self.instance.pk:
variants = self.instance.product.get_subtype_instance().variants.all()
for form in self.forms:
form.fields['variant'].queryset = variants
class VariantPriceOffsetInline(admin.TabularInline):
model = models.VariantPriceOffset
form = VariantOffsetForm
formset = VariantOffsetFormSet
class ProductPriceForm(forms.ModelForm):
class Meta:
widgets = {
'price': DecimalInput(min_decimal_places=2),
}
class ProductPriceAdmin(admin.ModelAdmin):
inlines = [PriceQtyOverrideInline, VariantPriceOffsetInline]
form = ProductPriceForm
admin.site.register(models.ProductPrice, ProductPriceAdmin)
| [
"tbarbugli@gmail.com"
] | tbarbugli@gmail.com |
2250cf06f14d89978cd2cf49e2c110c88d35dff3 | 7f44a279773732b183963349d146a8dd9a195b88 | /wagtail_demo/wsgi.py | c96ce8460fc498102e3757c97358afa1f8f2e778 | [] | no_license | pseudobabble/cms-boilerplate | f138060e2f25721191289eb261185136ae9cf6bd | 3923a8ebe1541118c5551b0996557f241943831f | refs/heads/master | 2022-12-28T01:30:49.554898 | 2020-10-15T15:23:10 | 2020-10-15T15:23:10 | 283,308,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for wagtail_demo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wagtail_demo.settings.dev")
application = get_wsgi_application()
| [
"harryjohnson500@gmail.com"
] | harryjohnson500@gmail.com |
5dca2df226b83ea614a61e2e8e3b9b96f7840af3 | 1620e0af4a522db2bac16ef9c02ac5b5a4569d70 | /Ekeopara_Praise/Phase 2/FILE I & O/Day83 Tasks/Task2.py | cccd54d0f43a58e0be18fbffc56248623c8c718f | [
"MIT"
] | permissive | Ekeopara-Praise/python-challenge-solutions | cda07902c9ffc09ba770ae7776e5e01026406a05 | 068b67c05524b5c5a0d6084315eca3424c768421 | refs/heads/master | 2022-12-15T15:29:03.031583 | 2020-09-25T06:46:27 | 2020-09-25T06:46:27 | 263,758,530 | 2 | 0 | null | 2020-05-13T22:37:33 | 2020-05-13T22:37:32 | null | UTF-8 | Python | false | false | 221 | py | '''2. Write a Python program to remove newline characters from a file. '''
filename = 'examples/files/numbers.txt'
with open(filename, 'r') as fh:
for line in fh:
line = line.rstrip("\n")
print(line) | [
"ekeoparapraise@gmail.com"
] | ekeoparapraise@gmail.com |
e6559be7c99dcb318adafcd91b933330fa02bd34 | 3e83fb81ed10f0202c2ad9a86b58f96a11c0ce81 | /weights/create_v2_no_top.py | 4bc685a496895f935e4af46a2efc0e841898bddc | [
"Apache-2.0"
] | permissive | Arith2/MobileNetworks | 7289a50d507f3830372f53503c9e12c11990601a | 8d9a44b2954cef2b8b63b7c87b7c3cafc6fd1e5c | refs/heads/master | 2020-04-15T02:03:17.390403 | 2018-10-05T03:44:22 | 2018-10-05T03:44:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | import h5py
from shutil import copyfile
'''
Change fn path here and then execute script to create a weight
file with the last 5 layers removed.
'''
base = "mobilenet_v2_"
alphas = ["1_0", "3_5", "5_0", "7_5"]
sizes = [96, 128, 160, 192, 224]
end_str = "_tf.h5"
# last 2 largest models
# alphas = ["1_3", "1_4"] # alphas = ["1_3", "1_4"]
# sizes = [224] # sizes = [224]
for alpha in alphas:
for size in sizes:
fn = base + alpha + "_" + str(size) + end_str
print("Working on file : %s" % fn)
new_fn = fn[:-3] + "_no_top.h5"
copyfile(fn, new_fn)
f = h5py.File(new_fn)
layer_names_keep = f.attrs['layer_names'][:-6]
layer_names_drop = f.attrs['layer_names'][-6:]
for fn in layer_names_drop:
del f[fn]
f.attrs['layer_names'] = layer_names_keep
f.close()
print("Created 'No-Top' Weights for %s" % fn)
| [
"titu1994@gmail.com"
] | titu1994@gmail.com |
5e4bd6d8201c67b24b01c9de94babd847f2b9fcb | adf9d2a857970fe9d6cec22392ede9f12df07fb2 | /StationID.py | ade74722f4e22e2fd3e2f30463de72800e1eaebd | [] | no_license | DrZedd42/TomBedroom | 6326014d6d2e149ea7b3fada35965c62b7650ee1 | 0b7720ff0e143ed0853f0bb9cd1544b0c6f4bf7e | refs/heads/main | 2023-08-16T19:27:34.318533 | 2021-09-28T21:31:33 | 2021-09-28T21:31:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,578 | py | #This code imports the necessary modules.
import os
from RandFunct import random_number
from RandFunct2 import random_number2
import datetime
from boto3 import Session
from botocore.exceptions import BotoCoreError, ClientError
from contextlib import closing
import os
import sys
import subprocess
from tempfile import gettempdir
import boto3
from subprocess import call
from TextGetter import GetWebText
right_now = datetime.datetime.now().isoformat()
list = []
for i in right_now:
if i.isnumeric():
list.append(i)
tim = ("".join(list))
srchstr = 'C:\\Users\\mysti\\Coding\\TomBedroom'
wordcon = GetWebText()
x1 = len(wordcon)
phrslst = []
for ctr in range(12):
wdlst = []
w1 = wordcon[random_number(x1)]
w2 = wordcon[random_number(x1)]
#w3 = wordcon[random_number(x1)]
bstr = ""
astr = w1[:3]
for rep in range(random_number(8)):
bstr = astr * rep
w1a = ""
w1a = bstr + w1
wdlst.append(w1a)
bstr = ""
astr = w2[:3]
for rep in range(random_number(8)):
bstr = astr * rep
w2a = ""
w2a = bstr + w2
wdlst.append(w2a)
#bstr = ""
#astr = w3[:3]
#for rep in range(random_number(8)):
#bstr = astr * rep
#w3 = ""
#w3a = bstr + w3
#wdlst.append(w3a)
spchstr = ""
for elem in wdlst:
spchstr += elem
phrslst.append(spchstr)
print(phrslst)
print("")
for citr in range(len(phrslst)):
paragraph = ""
speaktex = ""
print("Cycle: " + str(citr+1))
print("")
right_now = datetime.datetime.now().isoformat()
list = []
for i in right_now:
if i.isnumeric():
list.append(i)
tim = ("".join(list))
accessKey = ""
secretKey = ""
infile = open("asckey.m3u", "r")
aline = infile.readline()
while aline:
try:
accessKey += (aline)
aline = infile.readline()
except:
print("Text error-- passing over line.")
accessKey = accessKey[20:41].strip()
infile.close()
infile = open("datatex.m3u", "r")
aline = infile.readline()
while aline:
try:
secretKey += (aline)
aline = infile.readline()
except:
print("Text error-- passing over line.")
secretKey = secretKey[37:77].strip()
infile.close()
polly = boto3.Session(
aws_access_key_id= accessKey,
aws_secret_access_key= secretKey,
region_name='us-west-2').client('polly')
#vox = random_number(2)
#voxlst = ["Joanna", "Matthew", "Vitória", "Ricardo", "Naja", "Mads", "Léa", "Mathieu", "Mizuki", "Takumi", "Seoyeon", "Zhiyu", "Penélope", "Miguel" ]
#voxlst = ['Nicole', 'Kevin', 'Enrique', 'Tatyana', 'Russell', 'Olivia', 'Lotte', 'Geraint', 'Carmen', 'Mads', 'Penelope', 'Mia', 'Joanna', 'Matthew', 'Brian', 'Seoyeon', 'Ruben', 'Ricardo', 'Maxim', 'Lea', 'Giorgio', 'Carla', 'Naja', 'Maja', 'Astrid', 'Ivy', 'Kimberly', 'Chantal', 'Amy', 'Vicki', 'Marlene', 'Ewa', 'Conchita', 'Camila', 'Karl', 'Zeina', 'Miguel', 'Mathieu', 'Justin', 'Lucia', 'Jacek', 'Bianca', 'Takumi', 'Ines', 'Gwyneth', 'Cristiano', 'Mizuki', 'Celine', 'Zhiyu', 'Jan', 'Liv', 'Joey', 'Raveena', 'Filiz', 'Dora', 'Salli', 'Aditi', 'Vitoria', 'Emma', 'Lupe', 'Hans', 'Kendra', 'Gabrielle']
voxlst = ['Carla', 'Emma', 'Raveena', 'Marlene', 'Mathieu', 'Nicole']
voxlst = ['Matthew']
voxch = random_number(len(voxlst))
voxstr = voxlst[voxch]
outaud = "GeneratedAudioMix_" + voxstr + "_" + tim + ".mp3"
speaktex = phrslst[citr]
try:
# Request speech synthesis
response = polly.synthesize_speech(Text=speaktex, OutputFormat="mp3",
VoiceId=voxstr)
#except (BotoCoreError, ClientError) as error:
# The service returned an error, exit gracefully
#print(error)
#sys.exit(-1)
#try:
# Access the audio stream from the response
# if "AudioStream" in response:
# Note: Closing the stream is important because the service throttles on the
# number of parallel connections. Here we are using contextlib.closing to
# ensure the close method of the stream object will be called automatically
# at the end of the with statement's scope.
with closing(response["AudioStream"]) as stream:
#output = os.path.join(gettempdir(), outaud)
output = ("C:\\Users\\mysti\\Coding\\TomBedroom\\" + outaud)
#try:
# Open a file for writing the output as a binary stream
with open(output, "wb") as file:
file.write(stream.read())
except:
# Could not write to file, exit gracefully
#print(error)
#sys.exit(-1)
print("")
print("Error processing speech.")
print("")
#else:
# The response didn't contain audio data, exit gracefully
#print("")
#print("Could not stream audio")
#print("")
#sys.exit(-1)
# Play the audio using the platform's default player
#if sys.platform == "win32":
#os.startfile(output)
#else:
# The following works on macOS and Linux. (Darwin = mac, xdg-open = linux).
#opener = "open" if sys.platform == "darwin" else "xdg-open"
#subprocess.call([opener, output])
print("")
print("Your spoken audio has been generated.")
print("")
call(["python", "WakeUpTom.py"])
## THE GHOST OF THE SHADOW ##
| [
"mystifiedthomas@gmail.com"
] | mystifiedthomas@gmail.com |
a1f7d4ef77c26c4af789ed97971fbdfc94b76145 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5640146288377856_1/Python/joegunrok/a.py | b407e6f7744fde204125d42519c1b7eeebed844a | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | #!/usr/bin/python
import sys
import math
VERBOSE = True
sin = sys.stdin
line = lambda : sin.readline().strip()
class case(object):
def __init__(self, number):
self.number = number
self.r = ""
def __enter__(self):
return self
def __exit__(self, *arg):
print "Case #%s: %s" % (self.number, self.r)
def parse_result(out):
return out.readline().strip()
def main():
TEST_CASES = int(line())
for CASE_NUMBER in range(1, TEST_CASES+1):
with case(CASE_NUMBER) as CASE:
_run(CASE, **parse())
def parse():
R, C, W = map(int, sin.readline().strip().split())
return dict(R=R, C=C, W=W)
def _run(CASE, R=None, C=None, W=None):
r = 0
c = C
while c:
if c > W:
if c % W != 0:
c -= c % W
else:
c -= W
r += 1
elif R == 1:
r += W
c = 0
else:
r += 1
c = C
R -= 1
CASE.r = r
if __name__ == "__main__":
main() | [
"eewestman@gmail.com"
] | eewestman@gmail.com |
05de536c710bd6a066305240b7f79a0c0711d295 | 1a1c14b7a9ffc9f3ef4a64ca52cb8c40ae9f7f4b | /app/orignal/forms.py | 5bca6277d300cc59c561c49d0242d263a672b80c | [] | no_license | boyl/mblog | 71008ba45cc0625427dcf15c88a447188467d41d | 5933111dca855f74248d3fec2857fb7c5afa81fd | refs/heads/master | 2022-12-10T22:00:16.507135 | 2018-06-28T06:49:57 | 2018-06-28T06:49:57 | 137,972,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,691 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField
from wtforms.validators import DataRequired, Length, ValidationError, Email, EqualTo
from app.models import User
from flask_babel import lazy_gettext as _l
class LoginForm(FlaskForm):
username = StringField(_l('Username'), validators=[DataRequired()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
remember_me = BooleanField(_l('Remember Me'))
submit = SubmitField(_l('Sign In'))
class RegistrationForm(FlaskForm):
username = StringField(_l('Username'), validators=[DataRequired()])
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
password2 = PasswordField(_l('Repeat Password'), validators=[DataRequired(), EqualTo('password')])
submit = SubmitField(_l('Register'))
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError(_l('Please use a different username.'))
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError(_l('Please use a different email address.'))
class EditProfileForm(FlaskForm):
username = StringField(_l('Username'), validators=[DataRequired()])
about_me = TextAreaField(_l('About me'), validators=[Length(min=0, max=140)])
submit = SubmitField(_l('Submit'))
def __init__(self, original_username, *args, **kwargs):
super(EditProfileForm, self).__init__(*args, **kwargs)
self.original_username = original_username
def validate_username(self, username):
if username.data != self.original_username:
user = User.query.filter_by(username=self.username.data).first()
if user is not None:
raise ValidationError(_l('Please use a different username.'))
class PostForm(FlaskForm):
post = TextAreaField(_l('Say something'), validators=[
DataRequired(), Length(min=1, max=140)])
submit = SubmitField(_l('Submit'))
class ResetPasswordRequestForm(FlaskForm):
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
submit = SubmitField(_l('Request Password Reset'))
class ResetPasswordForm(FlaskForm):
password = PasswordField(_l('Password'), validators=[DataRequired()])
password2 = PasswordField(
_l('Repeat Password'), validators=[DataRequired(), EqualTo('password')]
)
submit = SubmitField(_l('Request Password Reset'))
| [
"1228417956@qq.com"
] | 1228417956@qq.com |
aab417d01fa146f1f488588072981419ffa7afab | af165d72ac206153c9b16499e88f9c9f30a69c9a | /studies/mixture_feasibility/pure_mixture_optimisation/force_balance/expanded_set/h_mix_rho_x_rho_pure/run_server.py | 96d2549a98f9e4f2abbf7292eb776176199a7b9f | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | openforcefield/nistdataselection | bc4b443ec3377e9f7f953bcf6535118423ff96ea | d797d597f4ff528a7219d58daa8ef6508d438b24 | refs/heads/master | 2020-06-12T01:05:22.757026 | 2020-05-18T22:44:10 | 2020-05-18T22:44:10 | 194,144,523 | 3 | 0 | MIT | 2020-05-19T02:10:56 | 2019-06-27T18:28:23 | Python | UTF-8 | Python | false | false | 1,513 | py | #!/usr/bin/env python3
import shutil
from os import path
from evaluator import unit
from evaluator.backends import QueueWorkerResources
from evaluator.backends.dask import DaskLSFBackend
from evaluator.server import EvaluatorServer
def main():
working_directory = "working_directory"
# Remove any existing data.
if path.isdir(working_directory):
shutil.rmtree(working_directory)
# Set up a backend to run the calculations on. This assume running
# on a HPC resources with the LSF queue system installed.
queue_resources = QueueWorkerResources(
number_of_threads=1,
number_of_gpus=1,
preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
per_thread_memory_limit=5 * unit.gigabyte,
wallclock_time_limit="05:59",
)
worker_script_commands = ["conda activate forcebalance", "module load cuda/10.1"]
calculation_backend = DaskLSFBackend(
minimum_number_of_workers=1,
maximum_number_of_workers=50,
resources_per_worker=queue_resources,
queue_name="gpuqueue",
setup_script_commands=worker_script_commands,
adaptive_interval="1000ms",
)
with calculation_backend:
server = EvaluatorServer(
calculation_backend=calculation_backend,
working_directory=working_directory,
port=8006,
)
# Tell the server to start listening for estimation requests.
server.start()
if __name__ == "__main__":
main()
| [
"simon.boothroyd@colorado.edu"
] | simon.boothroyd@colorado.edu |
82278040284b728c089ccc80f7ad36819cc5084f | 7911da973079f325a515cd2ee66f7590a9f32e48 | /totspecial.py | d259fd3c65641d1e8470f9b47097cc036c6acbdc | [] | no_license | Ponkiruthika112/Guvi | 5d2ff3dcf55d6c52c0f09a1e577d8b11632c7a92 | 319e5b4dab5654fabc25ef15c1d528f76d833c15 | refs/heads/master | 2020-04-21T06:05:03.581658 | 2018-08-02T05:53:48 | 2018-08-02T05:53:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | str=raw_input('Enter your line: \n')
c=0
sym=",./<>?!@~`#$%^&*()_+-={}[]:;"
for x in str:
if x in sym:
c=c+1
print 'The total character in the string is:',c
| [
"noreply@github.com"
] | Ponkiruthika112.noreply@github.com |
3bf09d84ef3536cc663ba9aa1b6bc6f3e6323a37 | dc4eebdbb6f2167bbd4619538a5821a67533949d | /blaze/blaze/compute/air/passes.py | f090c0c62386baebeeaa4fec11418d013b65239a | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | amiit/Delivery-Optimization | 7e99b872bb077f2e9db2d5f72ba0878dd951879b | 5d6a34b166406b70ece41bc9368dc132915321e4 | refs/heads/master | 2021-01-20T16:04:38.880506 | 2014-05-11T03:26:20 | 2014-05-11T03:26:20 | 19,635,694 | 1 | 0 | null | 2014-05-11T03:26:20 | 2014-05-10T07:08:55 | Python | UTF-8 | Python | false | false | 869 | py | """
Passes that massage expression graphs into execution kernels.
"""
from __future__ import absolute_import, division, print_function
from functools import partial
from .prettyprint import verbose
from .frontend import (translate, partitioning, coercions, ckernel_impls,
ckernel_lift, allocation, ckernel_prepare,
ckernel_rewrite)
from ...io.sql.air import rewrite_sql
passes = [
translate,
partitioning.annotate_all_kernels,
partitioning.partition,
partitioning.annotate_roots,
# erasure, # TODO: erase shape from ops
# cache, # TODO:
coercions,
# TODO: Make the below compile-time passes !
ckernel_prepare.prepare_local_execution,
ckernel_impls,
allocation,
ckernel_lift,
ckernel_rewrite,
rewrite_sql,
]
debug_passes = [partial(verbose, p) for p in passes]
| [
"sudheesh1995@outlook.com"
] | sudheesh1995@outlook.com |
02a4e9e0d9088dd24f058dc02fdd1b4110a55e49 | ab4b2d14e3cc24190f909212ed6bc88f365efc89 | /models/DenseASPP/denseaspp.py | e26a190c68053148b950be9124e551171e36011a | [] | no_license | curtis18/NAIC_AI-RS | 0409e82767c59e08cf740475d04456ed199381a8 | c997203af6c5d31df9419be8cde8583b027ded71 | refs/heads/master | 2023-07-19T12:35:55.969698 | 2021-03-03T11:15:44 | 2021-03-03T11:15:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,000 | py | '''
[description]
DenseASPP
'''
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
from models.DenseASPP import resnet101
class _DenseASPPHead(nn.Module):
def __init__(self, in_channels, class_num, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs):
super(_DenseASPPHead, self).__init__()
self.dense_aspp_block = _DenseASPPBlock(in_channels, 256, 64, norm_layer, norm_kwargs)
self.block = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(in_channels + 5 * 64, class_num, 1)
)
def forward(self, x):
x = self.dense_aspp_block(x)
return self.block(x)
class _DenseASPPBlock(nn.Module):
def __init__(self, in_channels, inter_channels1, inter_channels2,
norm_layer=nn.BatchNorm2d, norm_kwargs=None):
super(_DenseASPPBlock, self).__init__()
self.aspp_3 = _DenseASPPConv(in_channels, inter_channels1, inter_channels2, 3, 0.1,
norm_layer, norm_kwargs)
self.aspp_6 = _DenseASPPConv(in_channels + inter_channels2 * 1, inter_channels1, inter_channels2, 6, 0.1,
norm_layer, norm_kwargs)
self.aspp_12 = _DenseASPPConv(in_channels + inter_channels2 * 2, inter_channels1, inter_channels2, 12, 0.1,
norm_layer, norm_kwargs)
self.aspp_18 = _DenseASPPConv(in_channels + inter_channels2 * 3, inter_channels1, inter_channels2, 18, 0.1,
norm_layer, norm_kwargs)
self.aspp_24 = _DenseASPPConv(in_channels + inter_channels2 * 4, inter_channels1, inter_channels2, 24, 0.1,
norm_layer, norm_kwargs)
def forward(self, x):
aspp3 = self.aspp_3(x)
x = torch.cat([aspp3, x], dim=1)
aspp6 = self.aspp_6(x)
x = torch.cat([aspp6, x], dim=1)
aspp12 = self.aspp_12(x)
x = torch.cat([aspp12, x], dim=1)
aspp18 = self.aspp_18(x)
x = torch.cat([aspp18, x], dim=1)
aspp24 = self.aspp_24(x)
x = torch.cat([aspp24, x], dim=1)
return x
class _DenseASPPConv(nn.Sequential):
def __init__(self, in_channels, inter_channels, out_channels, atrous_rate,
drop_rate=0.1, norm_layer=nn.BatchNorm2d, norm_kwargs=None):
super(_DenseASPPConv, self).__init__()
self.add_module('conv1', nn.Conv2d(in_channels, inter_channels, 1)),
self.add_module('bn1', norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs))),
self.add_module('relu1', nn.ReLU(True)),
self.add_module('conv2', nn.Conv2d(inter_channels, out_channels, 3, dilation=atrous_rate, padding=atrous_rate)),
self.add_module('bn2', norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs))),
self.add_module('relu2', nn.ReLU(True)),
self.drop_rate = drop_rate
def forward(self, x):
features = super(_DenseASPPConv, self).forward(x)
if self.drop_rate > 0:
features = F.dropout(features, p=self.drop_rate, training=self.training)
return features
class DenseASPP(nn.Module):
def __init__(self, class_num, bn_momentum=0.01):
super(DenseASPP, self).__init__()
self.Resnet101 = resnet101.get_resnet101(dilation=[1, 1, 1, 2], bn_momentum=bn_momentum, is_fpn=False)
self.head = _DenseASPPHead(2048, class_num=class_num, norm_layer=nn.BatchNorm2d)
def forward(self, input):
x = self.Resnet101(input)
pred = self.head(x)
output= F.interpolate(pred, size=input.size()[2:4], mode='bilinear', align_corners=True)
return output
def main():
num_classes = 10
in_batch, inchannel, in_h, in_w = 4, 3, 128, 128
x = torch.randn(in_batch, inchannel, in_h, in_w)
net = DenseASPP(class_num=num_classes)
out = net(x)
print(out.shape)
if __name__ == '__main__':
main()
| [
"530781348@qq.com"
] | 530781348@qq.com |
dc2de005a0ec0ccb7415c5e259146d730c62a845 | e01ecfd94d464fe7cde454a70c17cdd968caf6e7 | /booking/migrations/0013_auto_20201225_1053.py | d1f8dc4c69e78702ba7523821a6a4d1f2aaab9c9 | [] | no_license | dhita-irma/travel-booking | 21a241c48fb09707607fec96a8c6e25490a95819 | babaabd4ce081d8c1adb29b2f74e496bfa4a58b5 | refs/heads/master | 2023-06-23T00:36:06.444584 | 2023-06-18T17:29:41 | 2023-06-18T17:29:41 | 298,986,699 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | # Generated by Django 3.1.1 on 2020-12-25 02:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('booking', '0012_auto_20201121_1257'),
]
operations = [
migrations.AddField(
model_name='destination',
name='image_url',
field=models.URLField(blank=True),
),
migrations.AlterField(
model_name='orderitem',
name='reservation_date',
field=models.DateField(),
),
]
| [
"dhita.irma@gmail.com"
] | dhita.irma@gmail.com |
25df0df1f1e84499316959ecee89e86faf8783df | e523652e0379f291f675e5cba4c1f667a3ac3b19 | /commands/at | 1515fb9b4cc8e97309e916ff808a928400a4d3b6 | [
"Apache-2.0"
] | permissive | sbp/saxo | 735bac23c8d214b85ca48c5c43bc12b1531ce137 | 27030c57ed565db1aafd801576555ae64893d637 | refs/heads/master | 2023-09-01T09:08:13.633734 | 2023-08-29T12:51:40 | 2023-08-29T12:51:40 | 9,411,794 | 25 | 13 | Apache-2.0 | 2021-06-19T15:09:44 | 2013-04-13T10:06:52 | Python | UTF-8 | Python | false | false | 1,992 | #!/usr/bin/env python3
# http://inamidst.com/saxo/
# Created by Sean B. Palmer
import calendar
import datetime
import re
import time
import saxo
r_arg = re.compile("(\d{2}):(\d{2})(?::(\d{2}))?(Z|[+-]\d{1,2})(?: (.*))?")
@saxo.pipe
def main(arg):
if not arg:
return "Set a reminder at a certain time. Must match " + r_arg.pattern
match = r_arg.match(arg)
if not match:
return "Sorry, input must match " + r_arg.pattern
hour = int(match.group(1))
if hour > 23:
return "Sorry, hour must be between 0 and 23 inclusive"
minute = int(match.group(2))
if minute > 59:
return "Sorry, minute must be between 0 and 59 inclusive"
second = int(match.group(3) or 0)
if second > 59:
return "Sorry, second must be between 0 and 59 inclusive"
tz = match.group(4)
tz = 0 if (tz == "Z") else int(tz)
if (tz > 12) or (tz < -12):
return "Sorry, timezone must be between -12 and +12 inclusive"
message = match.group(5)
utcnow = datetime.datetime.utcnow()
# print("utcnow", utcnow)
delta = datetime.timedelta(minutes=tz * 60)
now = utcnow + delta
# print("now", now)
base = datetime.datetime(
year=now.year,
month=now.month,
day=now.day,
hour=hour,
minute=minute,
second=second)
# print("base", base)
base = base - delta
# print("base - delta", base)
if base < utcnow:
base += datetime.timedelta(minutes=24 * 60)
unixtime = calendar.timegm(base.utctimetuple())
nick = saxo.env("nick")
sender = saxo.env("sender")
if not (nick or sender):
return "Sorry, couldn't set a reminder!"
if message:
message = nick + ": " + message
else:
message = nick + "!"
args = (unixtime, "msg", (sender, message))
saxo.client("schedule", *args)
when = time.strftime("%d %b %Y %H:%M:%S UTC", time.gmtime(unixtime))
return "%s: Will remind at %s" % (nick, when)
| [
"sean@miscoranda.com"
] | sean@miscoranda.com | |
d7258f824aa1bf2a056737f6c0ce18f8df9267cf | 6017802a27c2170a81838faffe65a450081ef142 | /addons-own/fso_crm_facebook_leads/models/frst_zverzeichnis.py | 9dbeecf530db0efaf0119065976d41714d7f0a9a | [] | no_license | OpenAT/online | 5fe6f997ab6a37cfb90b7651a6d3cef81ff35e73 | de4a1fea0b4e60226fc51bd731388e88d2e25b3f | refs/heads/o8 | 2023-08-06T21:40:37.006251 | 2023-07-19T15:07:33 | 2023-07-19T15:07:33 | 50,013,003 | 5 | 9 | null | 2023-02-16T00:33:21 | 2016-01-20T07:34:28 | JavaScript | UTF-8 | Python | false | false | 959 | py | # -*- coding: utf-8 -*-
from openerp import models, fields
import logging
logger = logging.getLogger(__name__)
class FRSTzVerzeichnis(models.Model):
_inherit = "frst.zverzeichnis"
crm_fb_form_ids = fields.One2many(string="Facebook Lead Forms",
comodel_name="crm.facebook.form", inverse_name='frst_zverzeichnis_id',
readonly=True,
help="Facebook Leads Forms that may use this CDS leave in the crm.lead "
"creation process")
crm_lead_ids = fields.One2many(string="Leads",
comodel_name="crm.lead", inverse_name='frst_zverzeichnis_id',
readonly=True,
help="Facebook Leads Forms that may use this CDS leave in the crm.lead "
"creation process")
| [
"michaelkarrer81@gmail.com"
] | michaelkarrer81@gmail.com |
1fe4645a37a349b4d223c5833afa26a62e1b311b | 118304fd351cc9ec406bce835c9a1d3f55098208 | /relation_network/__init__.py | cad3ee713600f8a7b61a839b8979af714a7d0e91 | [] | no_license | hb-research/relation-network-tensorflow | 3ba73fef4b7d037ac00656d0422432d3d1bbcedd | 454dcf2ee3064b1c5c28d2969734d15457990dbb | refs/heads/master | 2021-05-08T14:14:40.409802 | 2018-01-30T11:55:22 | 2018-01-30T11:55:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,124 | py |
from hbconfig import Config
import tensorflow as tf
from .encoder import Encoder
from .relation import RN
class Graph:
def __init__(self, mode, dtype=tf.float32):
self.mode = mode
self.dtype = dtype
def build(self,
input=None,
question=None):
input_lengths = tf.reduce_sum(
tf.to_int32(tf.not_equal(input, Config.data.PAD_ID)), axis=2,
name="input_lengths")
question_lengths = tf.reduce_sum(
tf.to_int32(tf.not_equal(question, Config.data.PAD_ID)), axis=1,
name="question_lengths")
embedding_input, embedding_question = self._build_embed(input, question)
facts, question = self._build_input_module(embedding_input, input_lengths,
embedding_question, question_lengths)
output = self._build_relational_module(facts, question)
return output
def _build_embed(self, input, question):
with tf.variable_scope ("embeddings", dtype=self.dtype) as scope:
embedding = tf.get_variable(
"word_embedding", [Config.data.vocab_size, Config.model.embed_dim],
dtype=self.dtype, trainable=False)
embedding_input = tf.nn.embedding_lookup(embedding, input)
embedding_question = tf.nn.embedding_lookup(embedding, question)
return embedding_input, embedding_question
def _build_input_module(self, embedding_input, input_lengths,
embedding_question, question_lengths):
encoder = Encoder(
encoder_type=Config.model.encoder_type,
num_layers=Config.model.num_layers,
cell_type=Config.model.cell_type,
num_units=Config.model.num_units,
dropout=Config.model.dropout)
with tf.variable_scope("input-module"):
facts = []
with tf.variable_scope("facts", reuse=tf.AUTO_REUSE):
embedding_input_transpose = tf.transpose(embedding_input, [1, 0, 2, 3])
embedding_sentences = tf.unstack(embedding_input_transpose, num=Config.data.max_fact_count)
input_lengths_transpose = tf.transpose(input_lengths, [1, 0])
sentence_lengths = tf.unstack(input_lengths_transpose, num=Config.data.max_fact_count)
for embedding_sentence, sentence_length in zip(embedding_sentences, sentence_lengths):
_, fact = encoder.build(embedding_sentence, sentence_length, scope="fact-encoder")
facts.append(fact)
with tf.variable_scope("input-module"):
_, question = encoder.build(
embedding_question, question_lengths, scope="question-encoder")
return facts, question
def _build_relational_module(self, facts, question):
with tf.variable_scope("relational-network-module"):
rn = RN(g_units=Config.model.g_units,
f_units=Config.model.f_units + [Config.data.vocab_size])
return rn.build(facts, question)
| [
"humanbrain.djlee@gmail.com"
] | humanbrain.djlee@gmail.com |
5c1b70cc902783ee9d67ed92c671de5f7a05d505 | e1700995ea8302a374604d9f7da4f3b472dde62f | /future/wip_sound_create_ramp_amplitude_modulation.py | a7ddea16c4ebc3791fdb19468301b53079ec7126 | [
"Unlicense"
] | permissive | ofgulban/minimalist_psychopy_examples | 4e069b15a807941a86c0d3f21d3dbdfb0758f072 | 71864ca7f829f4846d1aa002754117565b6549ba | refs/heads/master | 2020-12-05T08:22:57.619189 | 2020-03-05T01:11:21 | 2020-03-05T01:11:21 | 66,953,025 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,396 | py | """CreateTones_MM.m Python port.
Notes
-----
I have converted an old matlab script to python and actually never really used
it afterwards. Therefore anyone using this script should test whether sounds
are created properly.
"""
import os
import numpy as np
from scipy.io import wavfile
# Parameters
out_dir = "/home/faruk/Git/minimalist_psychopy_examples/future/test"
samp_freq = 44100 # Sampling frequency
sound_dur = 800 # Duration of sounds in ms
# -----------------------------------------------------------------------------
s = int(samp_freq * sound_dur / 1000) # Nr. samples for sound duration
time_vector = np.arange(1, s + 1) / float(samp_freq)
nr_freq = 8
sound_freq_vector = 2 ** (np.linspace(np.math.log(200, 2),
np.math.log(10000, 2), nr_freq))
sound_freq_vector = np.insert(sound_freq_vector, 6, 6000, axis=0)
# -----------------------------------------------------------------------------
# Jitter main tones
for i in range(0, len(sound_freq_vector)):
sourceFreq = sound_freq_vector[i*3]
for j in range(0, 1):
sound_freq_vector = np.insert(sound_freq_vector, i * 3 + j,
sourceFreq / (2 ** 0.1), axis=0)
sound_freq_vector = np.insert(sound_freq_vector, i * 3 + j + 2,
sourceFreq * (2 ** 0.1), axis=0)
sound_freq_vector = np.round(sound_freq_vector)
# -----------------------------------------------------------------------------
# Amplitude modulation
amp_mod_freq = 8 # Amplitude modulation frequency
amp_mod_depth = 1 # Amplitude modulation depth (strenght, amplitude)
ampl = 0.95
full_time_vector = np.array([])
for i, freq in enumerate(sound_freq_vector):
sound_i = ampl * np.sin(2 * np.pi * freq * time_vector)
# Add amplitude modulation
modulator = amp_mod_depth * np.sin(2 * np.pi * amp_mod_freq * time_vector)
sound_i = ((1 + modulator) * sound_i) / 2
full_time_vector = np.concatenate([full_time_vector, sound_i])
# Save output
out_path = os.path.join(out_dir, "TEST_{}.wav".format(str(i).zfill(2)))
wavfile.write(out_path, samp_freq, sound_i)
# =============================================================================
def ramp_sound(in_vector, samp_freq, ramp_time):
"""RampSound.m implementation."""
nr_samples = len(in_vector)
ramp_nr_samples = round(ramp_time * samp_freq / 1000)
ramp_on = np.linspace(0, 1, ramp_nr_samples)
ramp_off = np.linspace(1, 0, ramp_nr_samples)
in_vector_temp = np.copy(in_vector)
# Add ramp
in_vector_temp[0:ramp_nr_samples] = in_vector[0:ramp_nr_samples] * ramp_on
in_vector_temp[nr_samples - ramp_nr_samples:nr_samples] = (
in_vector[nr_samples - ramp_nr_samples:nr_samples] * ramp_off)
in_vector = in_vector_temp
return in_vector
# =============================================================================
# Preproc_sounds.mm implementation
# Parameters
energy = 200
ramp_time = 10
# Add ramp
for i in range(0, len(sound_freq_vector)):
tmp = full_time_vector[i*s:i*s+s]
tmp = ramp_sound(tmp, samp_freq, ramp_time)
en_s = np.sum(tmp**2, axis=0)
tmp = tmp * np.sqrt(energy / en_s)
tmp = tmp - tmp.mean()
out_name = 'TEST_{}_ramped.wav'.format(str(i).zfill(2))
out_path = os.path.join(out_dir, out_name)
wavfile.write(out_path, samp_freq, tmp)
print("Finished.")
| [
"farukgulban@gmail.com"
] | farukgulban@gmail.com |
09ccef026731618d7ae61d0260d5128d9a508915 | f31d229affd79347bd5216bd33ff28f11b5c7b01 | /apps/controllerx/devices/trust.py | 020b095882fbd745334f68e97d68abf761855de6 | [
"MIT"
] | permissive | cargial/controllerx | 911c24901da2d54afadffc372825e4e2d73b2646 | 9d48bbbe9537f58f7d2e3f61af7e329ceb20d030 | refs/heads/master | 2022-04-23T12:36:51.144086 | 2020-04-12T14:22:32 | 2020-04-12T14:22:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | from core import LightController, MediaPlayerController
from const import Light, MediaPlayer
class ZYCT202LightController(LightController):
"""
This controller does not send hold action for on/off
"""
def get_z2m_actions_mapping(self):
return {
"on": Light.ON,
"up-press": Light.HOLD_BRIGHTNESS_UP,
"down-press": Light.HOLD_BRIGHTNESS_DOWN,
"off": Light.OFF,
"stop": Light.RELEASE,
}
class ZYCT202MediaPlayerController(MediaPlayerController):
def get_z2m_actions_mapping(self):
return {
"on": MediaPlayer.PLAY_PAUSE,
"up-press": MediaPlayer.HOLD_VOLUME_UP,
"down-press": MediaPlayer.HOLD_VOLUME_DOWN,
"off": MediaPlayer.PLAY_PAUSE,
"stop": MediaPlayer.RELEASE,
}
| [
"xaviml.93@gmail.com"
] | xaviml.93@gmail.com |
8a4768832ac41a26f325621a9f9ac1b52353befa | 98e887138c810a403f6651ef420bc63b827acca2 | /py/bitwise_one.py | a05dcf7f7c97fbd41508aa97bffaee470927beb8 | [] | no_license | lraulin/algorithms | ebd1de5d84b451890d3f243acd5bc8cb8f764ae9 | c4b809d8db3be1dbebdbdb3a9f1721223558a41c | refs/heads/master | 2021-07-04T11:44:55.675002 | 2020-07-29T20:03:27 | 2020-07-29T20:03:27 | 130,787,500 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | """
Using the Python language, have the function BitwiseOne(strArr) take the array
of strings stored in strArr, which will only contain two strings of equal
length that represent binary numbers, and return a final binary string that
performed the bitwise OR operation on both strings. A bitwise OR operation
places a 0 in the new string where there are zeroes in both binary strings,
otherwise it places a 1 in that spot. For example: if strArr is ["1001",
"0100"] then your program should return the string "1101"
"""
def BitwiseOne(strArr):
num0 = int(strArr[0], base=2)
num1 = int(strArr[1], base=2)
return str(bin(num0 | num1))[2:]
BitwiseOne(["100", "000"])
| [
"leeraulin@gmail.com"
] | leeraulin@gmail.com |
2ed33f202c38570746342c2857baf0e65865d4c7 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/335/usersdata/297/99446/submittedfiles/matriz1.py | 096abd14b917081b4be59c8e9d2f60fab8661719 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,053 | py | # -*- coding: utf-8 -*-
#entrada
import numpy as np
matriz=[]
m=int(input('digite o numero de linhas da matriz que voceh deseja recortar: '))
n=int(input('digite o numero de colunas da matriz que voceh deseja recortar: '))
for i in range(0,m,1):
linha=[]
for j in range(0,n,1):
linha.append(int(input('digite o valor do elemento da linha%d e da coluna%d desejada: '%((j+1),(i+1)))))
matriz.append(linha)
linhaszeradas=0
linhaszeradas2=0
colunaszeradas=0
colunaszeradas2=0
#corte superior
for i in range(0,m,1) :
y=int(sum(matriz[i]))
if y == 1 :
break
else :
linhaszeradas=linhaszeradas+1
for i in range(0,linhaszeradas,1):
del matriz[i]
#corte inferior
for i in range(m-linhaszeradas-1,0,-1) :
r=int(sum(matriz[i]))
if r == 1 :
break
else :
linhaszeradas2=linhaszeradas2+1
for i in range(linhaszeradas2,0,-1):
del matriz[i]
t=0
#corte direito
for i in range(0,m-linhaszeradas-linhaszeradas2,1):
for j in range(0,j,1) :
t=t+matriz[i][j]
if t == 1 :
break
else :
colunaszeradas=colunaszeradas+1
for j in range(0,n-colunaszeradas,1):
print(matriz[j][i])
del matriz[j][i]
print(colunaszeradas)
'''#corte esquerdo
for j in range(n-colunaszeradas-1,0,-1) :
s=int(sum(matriz[j]))
if s == 1 :
break
else :
colunaszeradas2=colunaszeradas2+1
for i in range(colunaszeradas2,0,-1):
del matriz[j]
#saida'''
print(matriz)
'''matriz=[]
m=int(input('digite o numero de linhas da matriz que voceh deseja recortar: '))
n=int(input('digite o numero de colunas da matriz que voceh deseja recortar: '))
for i in range(0,m,1):
linha=[]
for j in range(0,n,1):
linha.append(int(input('digite o valor do elemento da linha %d desejada: '%(j+1))))
matriz.append(linha)
indice_superior=m-1
indice_inferior=0
indice_superior=0
indice_superior=n-1
for i in range(0,m,1):
encontrou_na_linha = False
for j in range(0,n,1):
if matriz[i][j]==1 :'''
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
f78bf7c8d0cc09bce8f088ef0286354e424de4b4 | 3b93f91703a36f8ec8fd1767e719f3e3523ab6f1 | /Amendments/scipts for graph construction/graph construction 0522/05.adjacency_matrix_and_transition_states copy.py | 4e9b605470ce4b461e1be6982bc20a269c9a4b0a | [] | no_license | SutirthaChakraborty/Real-Time-Music-Driven-Dancing-Robot | 66655b1ed1d000499096295587c9c902a636b688 | 98ab75e8ab199a56f1e80854a891fcf4425dd042 | refs/heads/master | 2023-02-13T01:57:56.185295 | 2021-01-04T19:12:33 | 2021-01-04T19:12:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,470 | py | import pickle
import numpy as np
with open('Amendments/scipts for graph construction/graph construction 0522/00.primitive_compact.txt', 'rb') as f:
primitive_compact = pickle.load(f)
#print(primitive_compact)
with open('Amendments/scipts for graph construction/graph construction 0522/03.stable_typical_sequences_symmetrical.txt', 'rb') as f:
stable_typical_sequences = pickle.load(f)
#print(stable_typical_sequence_redundant_supress, len(stable_typical_sequence_redundant_supress))
with open('Amendments/scipts for graph construction/graph construction 0522/04.nx5_matrix_and_interface.txt', 'rb') as f:
nx5_matrix = pickle.load(f)
interface = pickle.load(f)
with open('Amendments/scipts for graph construction/graph construction 0522/03.symmetric_reference.txt', 'rb') as f:
symmetric_reference = pickle.load(f)
#print(incidence_matrix, interface)
states_in_typical_sequence = []
for sequence in stable_typical_sequences:
for state in sequence:
if state > 0:
if not state in states_in_typical_sequence:
states_in_typical_sequence.append(state)
else:
for original_state in symmetric_reference[state]:
if not original_state in states_in_typical_sequence:
states_in_typical_sequence.append(original_state)
sequence_head = []
for sequence in stable_typical_sequences:
sequence_head.append(sequence[0])
#print(states_in_typical_sequence, len(states_in_typical_sequence), sequence_head, len(sequence_head))
sequence_tail = []
for sequence in stable_typical_sequences:
sequence_tail.append(sequence[-1])
transition_states = []
for state in list(set(primitive_compact + [state for state in sequence_tail if state < 0])):
if state not in states_in_typical_sequence or state in sequence_head or state in sequence_tail:
transition_states.append(state)
#print(sequence_head, sequence_tail)
transition_state_idx_dict = {}
for idx, state in enumerate(transition_states):
transition_state_idx_dict[state] = idx
#print(transition_state_idx_dict)
print(transition_states, len(transition_states))
adjacency_matrix = np.zeros((len(transition_states), len(transition_states)))
for i in range(adjacency_matrix.shape[0]):
if transition_states[i] > 0:
next_options = [primitive_compact[(j+1)%len(primitive_compact)] for j in range(len(primitive_compact)) if primitive_compact[j] == transition_states[i]]
for option in next_options:
if (option in transition_state_idx_dict) and (option not in sequence_tail):
adjacency_matrix[i][transition_state_idx_dict[option]] = 1
if transition_states[i] < 0:
for original_state in symmetric_reference[transition_states[i]]:
next_options = [primitive_compact[(j+1)%len(primitive_compact)] for j in range(len(primitive_compact)) if primitive_compact[j] == original_state]
for option in next_options:
if (option in transition_state_idx_dict) and (option not in sequence_tail):
adjacency_matrix[i][transition_state_idx_dict[option]] = 1
#print(np.sum(adjacency_matrix, axis=1))
for state in sequence_head:
for i in range(adjacency_matrix.shape[0]):
if not (transition_states[i] in sequence_head or transition_states[i] in sequence_tail):
#if not adjacency_matrix[i, transition_state_idx_dict[state]] == 1:
adjacency_matrix[i, transition_state_idx_dict[state]] = 2
#print(np.sum(adjacency_matrix, axis=1))
for state in sequence_tail:
for j in range(adjacency_matrix.shape[1]):
if not (transition_states[j] in sequence_head or transition_states[j] in sequence_tail):
if not adjacency_matrix[transition_state_idx_dict[state], j] == 1:
adjacency_matrix[transition_state_idx_dict[state], j] = 2
#print(np.sum(adjacency_matrix, axis=1))
print(adjacency_matrix[transition_state_idx_dict[329]][transition_state_idx_dict[107]])
print(adjacency_matrix.shape)
#print(sequence_head)
#print(sequence_tail)
count = 0
for i in range(adjacency_matrix.shape[0]):
if not transition_states[i] in sequence_head:
if np.sum(adjacency_matrix[i] == 1)<1:
count += 1
print(count)
with open('Amendments/scipts for graph construction/graph construction 0522/05.adjacency_matrix_and_transition_states.txt', 'wb') as f:
pickle.dump(adjacency_matrix, f, protocol=2)
pickle.dump(transition_states, f, protocol=2)
| [
"zhaojw1998@outlook.com"
] | zhaojw1998@outlook.com |
b7c5d02b0d0982e013d2d6a37f8f2dd106e39ea2 | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/client/gui/shared/gui_items/dossier/achievements/AimerAchievement.py | b728bb2f7a03a4015eaeba6e5ff92a16acfe48ca | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 869 | py | # 2017.02.03 21:52:23 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/shared/gui_items/dossier/achievements/AimerAchievement.py
from dossiers2.ui.achievements import ACHIEVEMENT_BLOCK as _AB
from abstract import SeriesAchievement
from abstract.mixins import NoProgressBar
class AimerAchievement(NoProgressBar, SeriesAchievement):
def __init__(self, dossier, value = None):
SeriesAchievement.__init__(self, 'aimer', _AB.SINGLE, dossier, value)
def _getCounterRecordNames(self):
return ((_AB.TOTAL, 'maxAimerSeries'), (_AB.TOTAL, 'maxAimerSeries'))
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\gui\shared\gui_items\dossier\achievements\AimerAchievement.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:52:23 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
c65aad38816966146e85c8f8693a8b5962ad6d14 | f40086079bdcb465da32bfc4c244d0a699a735e3 | /informatics/previous/sungjoonpark/series_04/uʍop_ǝpᴉsdn_ʎ˥ǝʇǝ˥dɯoɔ_ƃuᴉɥʇǝɯos_ɹoɟ_ʍou_pu∀.py | 97d0d32fc2576e42986f02901391b2ac601c95dc | [] | no_license | isk02206/python | e6dfc1e219ae3a51bde80fed75412bed98b3defe | b2fc6d1aa1155c0758883677eb2e37d9f92a4382 | refs/heads/master | 2022-12-06T15:14:55.264792 | 2020-09-02T01:02:11 | 2020-09-02T01:02:11 | 292,142,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | number_of_lines = int(input())
word_list = {"A":"∀", "B":"q", "C":"Ɔ", "D":"p", "E":"Ǝ", "F":"Ⅎ", "G":"פ", "H":"H", "I":"I", "J":"ſ", "K":"ʞ", "L":"˥", "M":"W", "N":"N", "O":"O", "P":"Ԁ", "Q":"Ό", "R":"ɹ", "S":"S", "T":"┴", "U":"∩", "V":"Λ", "W":"M", "X":"X", "Y":"⅄", "Z":"Z", "a":"ɐ", "b":"q", "c":"ɔ", "d":"p", "e":"ǝ", "f":"ɟ", "g":"ƃ", "h":"ɥ", "i":"ᴉ", "j":"ɾ", "k":"ʞ", "l":"˥", "m":"ɯ", "n":"u", "o":"o", "p":"d", "q":"b", "r":"ɹ", "s":"s", "t":"ʇ", "u":"n", "v":"ʌ", "w":"ʍ", "x":"x", "y":"ʎ", "z":"z", "0":"0", "1":"Ɩ", "2":"ᄅ", "3":"Ɛ", "4":"ㄣ", "5":"ϛ", "6":"9", "7":"ㄥ", "8":"8", "9":"6", "?":"¿", "!":"¡", ".":"˙", "-":"-", "(":")", ")":"(", "<":">", ">":"<", "%":"%", "$":"$", "'":",", ",":"'", ":":":", "_":"‾", " ":" " }
message = ""
for i in range(number_of_lines):
previous_message = input()
message += "\n"
for last_message in previous_message:
for j in range(0, len(last_message)):
list_of_words = word_list[last_message[j]]
message += list_of_words
print(message[::-1].strip()) | [
"67949037+isk02206@users.noreply.github.com"
] | 67949037+isk02206@users.noreply.github.com |
5fdff3610a155b00dd686d36586d03fc167dfc48 | e4aab0a71dc5c047d8b1576380b16364e03e7c0d | /core/__init__.py | d5b32c3d823943de5412b3ff571b5ca219f3ed34 | [
"Apache-2.0"
] | permissive | Joecastra/Watcher3 | 8ca66c44846030f0eb771d9d6ddeb9c37f637a4e | ce25d475f83ed36d6772f0cc35ef020d5e47c94b | refs/heads/master | 2021-01-19T11:05:55.454351 | 2017-04-10T20:17:24 | 2017-04-10T20:17:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py | # Paths to local things
PROG_PATH = None
CONF_FILE = 'config.cfg'
LOG_DIR = 'logs'
PLUGIN_DIR = 'plugins'
DB_FILE = 'watcher.sqlite'
THEME = 'Default'
# Paths to internet things
GIT_URL = 'https://github.com/nosmokingbandit/watcher3'
GIT_REPO = 'https://github.com/nosmokingbandit/watcher3.git'
GIT_API = 'https://api.github.com/repos/nosmokingbandit/watcher3'
# Server settings
SERVER_ADDRESS = None
SERVER_PORT = None
URL_BASE = ''
# Update info
UPDATE_STATUS = None
UPDATE_LAST_CHECKED = None
UPDATING = False
CURRENT_HASH = None
# Search Scheduler info
NEXT_SEARCH = None
# Store settings after write. Reduces reads from file.
CONFIG = None
# A list of notification data
NOTIFICATIONS = []
# Rate limiting
TMDB_TOKENS = 35
TMDB_LAST_FILL = None
# Global Media Constants
RESOLUTIONS = ['BluRay-4K', 'BluRay-1080P', 'BluRay-720P',
'WebDL-4K', 'WebDL-1080P', 'WebDL-720P',
'WebRip-4K', 'WebRip-1080P', 'WebRip-720P',
'DVD-SD',
'Screener-1080P', 'Screener-720P',
'Telesync-SD', 'CAM-SD']
| [
"nosmokingbandit@gmail.com"
] | nosmokingbandit@gmail.com |
ad1770d198e972bde83bdee3f5639cffa5f26e38 | 0ad9fea49857f16ed8ff3836b568a31d17c8d3c9 | /src/challenge_book/millionaire_apac2008.py | 0159c210c1a5c1f9777550cb47f83cf94083d1b1 | [] | no_license | nannany/python | 38ce9a105a7f94d76fbf95799664c1fd93d9de6e | 18c7b2e06fcea361ef1dab2e3f55fbc67c4486ae | refs/heads/master | 2020-05-23T09:05:13.780089 | 2019-03-21T05:39:48 | 2019-03-21T05:39:48 | 80,430,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | import copy
global M, P, X
def solve():
n = 1 << M
prev = [0 for k in range(n + 1)]
nxt = [0 for k in range(n + 1)]
prev[n] = 1.0
for r in range(0, M):
for l in range(0, n + 1):
jub = min(l, n - l)
t = 0.0
for j in range(0, jub + 1):
t = max(t, P * prev[l + j] + (1 - P) * prev[l - j])
nxt[l] = t
prev = copy.deepcopy(nxt)
return prev[int(X * n / 1000000)]
if __name__ == '__main__':
N = int(input())
for i in range(1, N + 1):
inputs = input().split()
M = int(inputs[0])
P = float(inputs[1])
X = int(inputs[2])
ans = solve()
print("Case #" + str(i) + ": " + ('%0.6f' % ans))
| [
"ymym1990ymym@gmail.com"
] | ymym1990ymym@gmail.com |
323f8614d6a0a848f8b2f9a49b7a28f69dcbb5af | c981cc8c8dfe05fc5a57915e9b4d8479a9a6bbf2 | /py/lvmspec/quicklook/qas.py | f3aed35436999b11b6613a261ecd2d85690ae5fb | [
"BSD-3-Clause"
] | permissive | sdss/lvmspec | af25dae6ae87ea1355aa8f9075e2e174e4599eb7 | befd6991537c4947fdf63ca262937f2bb845148f | refs/heads/master | 2021-06-25T02:13:27.038449 | 2019-10-28T20:35:38 | 2019-10-28T20:35:38 | 102,514,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,866 | py | from lvmspec.quicklook import qllogger
from lvmspec.quicklook import qlexceptions
import collections
import numpy as np
from enum import Enum
class QASeverity(Enum):
ALARM=30
WARNING=20
NORMAL=0
class MonitoringAlg:
""" Simple base class for monitoring algorithms """
def __init__(self,name,inptype,config,logger=None):
if logger is None:
self.m_log=qllogger.QLLogger().getlog(name)
else:
self.m_log=logger
self.__inpType__=type(inptype)
self.name=name
self.config=config
self.__deviation=None
self.m_log.debug("initializing Monitoring alg {}".format(name))
def __call__(self,*args,**kwargs):
res=self.run(*args,**kwargs)
res["QA_STATUS"]="UNKNOWN"
cargs=self.config['kwargs']
params=cargs['param']
metrics=res["METRICS"] if 'METRICS' in res else None
if metrics is None:
metrics={}
res["METRICS"]=metrics
deviation=None
reskey="RESULT"
QARESULTKEY="QA_STATUS"
if "SAMI_QASTATUSKEY" in cargs:
QARESULTKEY=cargs["SAMI_QASTATUSKEY"]
if "SAMI_RESULTKEY" in cargs:
reskey=cargs["SAMI_RESULTKEY"]
if reskey in metrics and "REFERENCE" in params:
current=metrics[reskey]
old=params["REFERENCE"]
currlist=isinstance(current,(np.ndarray,collections.Sequence))
oldlist=isinstance(old,(np.ndarray,collections.Sequence))
if currlist != oldlist: # different types
self.m_log.critical("QL {} : REFERENCE({}) and RESULT({}) are of different types!".format(self.name,type(old),type(current)))
elif currlist: #both are lists
if len(old)==len(current):
self.__deviation=[c-o for c,o in zip(current,old)]
else:
self.m_log.critical("QL {} : REFERENCE({}) and RESULT({}) are of different length!".format(self.name,len(old),len(current)))
else: # both are scalars
self.__deviation=current-old
# check RANGES given in config and set QA_STATUS keyword
# it should be a sorted overlapping list of range tuples in the form [ ((interval),QASeverity),((-1.0,1.0),QASeverity.NORMAL),(-2.0,2.0),QAStatus.WARNING)]
# for multiple results, thresholds should be a list of lists as given above (one range list per result)
# intervals should be non overlapping.
# lower bound is inclusive upper bound is exclusive
# first matching interval will be used
# if no interval contains the deviation, it will be set to QASeverity.ALARM
# if RANGES or REFERENCE are not given in config, QA_STATUS will be set to UNKNOWN
def findThr(d,t):
val=QASeverity.ALARM
for l in t:
if d>=l[0][0] and d<l[0][1]:
val=l[1]
return val
if self.__deviation is not None and "RANGES" in cargs:
thr=cargs["RANGES"]
metrics[QARESULTKEY]="ERROR"
thrlist=isinstance(thr[0][0][0],(np.ndarray,collections.Sequence)) #multiple threshols for multiple results
devlist=isinstance(self.__deviation,(np.ndarray,collections.Sequence))
if devlist!=thrlist and len(thr)!=1: #different types and thresholds are a list
self.m_log.critical("QL {} : dimension of RANGES({}) and RESULTS({}) are incompatible! Check configuration RANGES={}, RESULTS={}".format(self.name,len(thr),len(self.__deviation),
thr,current))
return res
else: #they are of the same type
if devlist: # if results are a list
if len(thr)==1: # check all results against same thresholds
metrics[QARESULTKEY]=[findThr(d,thr) for d in self.__deviation]
else: # each result has its own thresholds
metrics[QARESULTKEY]=[str(findThr(d,t)) for d,t in zip(self.__deviation,thr)]
else: #result is a scalar
metrics[QARESULTKEY]=str(findThr(self.__deviation,thr))
return res
def run(self,*argv,**kwargs):
pass
def is_compatible(self,Type):
return isinstance(Type,self.__inpType__)
def check_reference():
return self.__deviation
def get_default_config(self):
""" return a dictionary of 3-tuples,
field 0 is the name of the parameter
field 1 is the default value of the parameter
field 2 is the comment for human readable format.
Field 2 can be used for QLF to dynamically setup the display"""
return None
| [
"havok2063@hotmail.com"
] | havok2063@hotmail.com |
629f4d1822ca707541fbb6ffbb6844a6460e9065 | b94ab99f9c1f8bbb99afd23e1bfcd2332060b4bd | /library/migrations/0008_auto_20170729_0959.py | 62f936fe2c1943ed9632dcad3e57927adb30d277 | [] | no_license | georgecai904/bookshelf | e54ccae00d4ee48e91ca1564a425ba4586b52d93 | 0002207dc8ca586ce1127d3ea98bb53102d043df | refs/heads/master | 2021-01-02T22:52:26.046535 | 2017-08-05T15:32:13 | 2017-08-05T15:32:13 | 99,409,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,122 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-29 09:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('library', '0007_auto_20170729_0618'),
]
operations = [
migrations.AlterField(
model_name='author',
name='name',
field=models.CharField(max_length=30, unique=True),
),
migrations.AlterField(
model_name='book',
name='name',
field=models.CharField(max_length=30, unique=True),
),
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(max_length=30, unique=True),
),
migrations.AlterField(
model_name='country',
name='name',
field=models.CharField(max_length=30, unique=True),
),
migrations.AlterField(
model_name='publisher',
name='name',
field=models.CharField(max_length=30, unique=True),
),
]
| [
"georgemail608@gmail.com"
] | georgemail608@gmail.com |
81a48e318e660d14a7e7a5655936741a57062220 | d93c91e904470b46e04a4eadb8c459f9c245bb5a | /navimumbai_scrape/housing/housing/settings.py | ec88861dd919057e3bed104da391338fceb7e399 | [] | no_license | nbourses/scrappers | 3de3cd8a5408349b0ac683846b9b7276156fb08a | cde168a914f83cd491dffe85ea24aa48f5840a08 | refs/heads/master | 2021-03-30T15:38:29.096213 | 2020-03-25T03:23:56 | 2020-03-25T03:23:56 | 63,677,541 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,174 | py | # -*- coding: utf-8 -*-
# Scrapy settings for housing project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Googlebot'
SPIDER_MODULES = ['housing.spiders']
NEWSPIDER_MODULE = 'housing.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'housing.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'housing.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'housing.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"karanchudasama1@gmail.com"
] | karanchudasama1@gmail.com |
3676b7a2b7076a6753decc540ac302493b6dc54f | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/list_kms_by_tags_request_body.py | 0177eee342fde910dbb6073e69d721f6ad1caf20 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,539 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListKmsByTagsRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'limit': 'str',
'offset': 'str',
'action': 'str',
'tags': 'list[Tag]',
'matches': 'list[TagItem]',
'sequence': 'str'
}
attribute_map = {
'limit': 'limit',
'offset': 'offset',
'action': 'action',
'tags': 'tags',
'matches': 'matches',
'sequence': 'sequence'
}
def __init__(self, limit=None, offset=None, action=None, tags=None, matches=None, sequence=None):
"""ListKmsByTagsRequestBody - a model defined in huaweicloud sdk"""
self._limit = None
self._offset = None
self._action = None
self._tags = None
self._matches = None
self._sequence = None
self.discriminator = None
if limit is not None:
self.limit = limit
if offset is not None:
self.offset = offset
if action is not None:
self.action = action
if tags is not None:
self.tags = tags
if matches is not None:
self.matches = matches
if sequence is not None:
self.sequence = sequence
@property
def limit(self):
"""Gets the limit of this ListKmsByTagsRequestBody.
查询记录数(“action”为“count”时,无需设置此参数),如果“action”为“filter”,默认为“10”。 limit的取值范围为“1-1000”。
:return: The limit of this ListKmsByTagsRequestBody.
:rtype: str
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListKmsByTagsRequestBody.
查询记录数(“action”为“count”时,无需设置此参数),如果“action”为“filter”,默认为“10”。 limit的取值范围为“1-1000”。
:param limit: The limit of this ListKmsByTagsRequestBody.
:type: str
"""
self._limit = limit
@property
def offset(self):
"""Gets the offset of this ListKmsByTagsRequestBody.
索引位置。从offset指定的下一条数据开始查询。查询第一页数据时,将查询前一页数据时响应体中的值带入此参数(“action”为“count”时,无需设置此参数)。如果“action”为“filter”,offset默认为“0”。 offset必须为数字,不能为负数。
:return: The offset of this ListKmsByTagsRequestBody.
:rtype: str
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListKmsByTagsRequestBody.
索引位置。从offset指定的下一条数据开始查询。查询第一页数据时,将查询前一页数据时响应体中的值带入此参数(“action”为“count”时,无需设置此参数)。如果“action”为“filter”,offset默认为“0”。 offset必须为数字,不能为负数。
:param offset: The offset of this ListKmsByTagsRequestBody.
:type: str
"""
self._offset = offset
@property
def action(self):
"""Gets the action of this ListKmsByTagsRequestBody.
操作标识(可设置为“filter”或者“count”)。 - filter:表示过滤。 - count:表示查询总条数。
:return: The action of this ListKmsByTagsRequestBody.
:rtype: str
"""
return self._action
@action.setter
def action(self, action):
"""Sets the action of this ListKmsByTagsRequestBody.
操作标识(可设置为“filter”或者“count”)。 - filter:表示过滤。 - count:表示查询总条数。
:param action: The action of this ListKmsByTagsRequestBody.
:type: str
"""
self._action = action
@property
def tags(self):
"""Gets the tags of this ListKmsByTagsRequestBody.
标签列表,key和value键值对的集合。 - key:表示标签键,一个密钥下最多包含10个key,key不能为空,不能重复,同一个key中value不能重复。key最大长度为36个字符。 - value:表示标签值。每个值最大长度43个字符,value之间为“与”的关系。
:return: The tags of this ListKmsByTagsRequestBody.
:rtype: list[Tag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this ListKmsByTagsRequestBody.
标签列表,key和value键值对的集合。 - key:表示标签键,一个密钥下最多包含10个key,key不能为空,不能重复,同一个key中value不能重复。key最大长度为36个字符。 - value:表示标签值。每个值最大长度43个字符,value之间为“与”的关系。
:param tags: The tags of this ListKmsByTagsRequestBody.
:type: list[Tag]
"""
self._tags = tags
@property
def matches(self):
"""Gets the matches of this ListKmsByTagsRequestBody.
搜索字段。 - key为要匹配的字段,例如:resource_name等。 - value为匹配的值,最大长度为255个字符,不能为空。
:return: The matches of this ListKmsByTagsRequestBody.
:rtype: list[TagItem]
"""
return self._matches
@matches.setter
def matches(self, matches):
"""Sets the matches of this ListKmsByTagsRequestBody.
搜索字段。 - key为要匹配的字段,例如:resource_name等。 - value为匹配的值,最大长度为255个字符,不能为空。
:param matches: The matches of this ListKmsByTagsRequestBody.
:type: list[TagItem]
"""
self._matches = matches
@property
def sequence(self):
"""Gets the sequence of this ListKmsByTagsRequestBody.
请求消息序列号,36字节序列号。 例如:919c82d4-8046-4722-9094-35c3c6524cff
:return: The sequence of this ListKmsByTagsRequestBody.
:rtype: str
"""
return self._sequence
@sequence.setter
def sequence(self, sequence):
"""Sets the sequence of this ListKmsByTagsRequestBody.
请求消息序列号,36字节序列号。 例如:919c82d4-8046-4722-9094-35c3c6524cff
:param sequence: The sequence of this ListKmsByTagsRequestBody.
:type: str
"""
self._sequence = sequence
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListKmsByTagsRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
2e582e39cf683f6d822e5acc9fffc297193390df | f68732bc40a7a90c3a1082e4b3a4154518acafbb | /script/dbus/systemBus/power/016_getBatteryTimeToFull.py | 500baed85bfe43017b362bb1c100f2529922089a | [] | no_license | lizhouquan1017/dbus_demo | 94238a2307e44dabde9f4a4dd0cf8ec217260867 | af8442845e722b258a095e9a1afec9dddfb175bf | refs/heads/master | 2023-02-11T19:46:27.884936 | 2021-01-08T05:27:18 | 2021-01-08T05:27:18 | 327,162,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | # -*- coding: utf-8 -*-
# ****************************************************
# @Test Case ID: 016_getBatteryTimeToFull
# @Test Description: 电池充满时间,单位为秒
# @Test Condition:
# @Test Step: 1.读取电池充满时间值
# @Test Result: 1.检查读取成功;
# @Test Remark:
# @Author: ut000511
# *****************************************************
import pytest
from frame.base import OSBase
from aw.dbus.systemBus import power
class TestCase(OSBase):
def setUp(self):
self.Step("预制条件1:无")
@pytest.mark.public
def test_step(self):
self.Step("步骤1:读取电池充满时间值并检查读取成功")
power.getBatteryTimeToFull()
def tearDown(self):
self.Step("收尾:无")
| [
"lizhouquan@uniontech.com"
] | lizhouquan@uniontech.com |
315e03e1041afec56a4b5bb6c6b03894ea88d1b0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/107/usersdata/181/51617/submittedfiles/questao3.py | a6ff0c7365b01676a4570d6fe271aa08d0dc7de6 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | # -*- coding: utf-8 -*-
p=int(input('digite o valor p:'))
q=int(input('digite o valor q'))
i=2
while i<p and i<q and q>p:
if p%i!=0 and q%i!=0:
if q=p+2:
print('S')
else:
print('N')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
5fed7de56e2f093f2a95588904d5a8cf257e807e | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.0_rd=0.8_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=28/sched.py | 1cb68b6709451b85044bc86a04374702a2f590ac | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | -X FMLP -Q 0 -L 1 63 200
-X FMLP -Q 0 -L 1 52 150
-X FMLP -Q 0 -L 1 50 250
-X FMLP -Q 0 -L 1 44 200
-X FMLP -Q 1 -L 1 43 250
-X FMLP -Q 1 -L 1 38 125
-X FMLP -Q 1 -L 1 36 250
-X FMLP -Q 1 -L 1 23 175
-X FMLP -Q 2 -L 1 20 100
-X FMLP -Q 2 -L 1 20 125
-X FMLP -Q 2 -L 1 19 100
-X FMLP -Q 3 -L 1 18 175
-X FMLP -Q 3 -L 1 18 100
-X FMLP -Q 3 -L 1 17 175
16 175
15 150
6 150
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
3cd71ee69d7af85db210bc8100e4e902cefcc895 | 0ddcfcbfc3faa81c79e320c34c35a972dab86498 | /tests/test_string_compression.py | 768e016404081b7d7e38622a8e2449024a45b5fa | [] | no_license | IvanWoo/coding-interview-questions | 3311da45895ac4f3c394b22530079c79a9215a1c | 1312305b199b65a11804a000432ebe28d1fba87e | refs/heads/master | 2023-08-09T19:46:28.278111 | 2023-06-21T01:47:07 | 2023-06-21T01:47:07 | 135,307,912 | 0 | 0 | null | 2023-07-20T12:14:38 | 2018-05-29T14:24:43 | Python | UTF-8 | Python | false | false | 489 | py | import pytest
from puzzles.string_compression import compress
@pytest.mark.parametrize(
"chars, expected",
[
(["a", "a", "b", "b", "c", "c", "c"], ["a", "2", "b", "2", "c", "3"]),
(["a"], ["a"]),
(
["a", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b"],
["a", "b", "1", "2"],
),
],
)
def test_compress(chars, expected):
n = len(expected)
assert compress(chars) == n
assert chars[:n] == expected
| [
"tyivanwu@gmail.com"
] | tyivanwu@gmail.com |
251e1378e3ce367434d296f1bc324cfae536b8cf | 989f011a784015e1a33c41362ab4ec06e92b3339 | /examples/05_control_structures/eem.py | 87385c464ae7dea8b819ea734351eba8af67edfd | [] | no_license | yevgeniy-voloshin/pyneng-online-jun-jul-2017 | b0be9df7d379e24b654172c1bc3f5cc0bdbbcd2f | 050e43d7f582528189005c1b7c34970352e968f1 | refs/heads/master | 2021-01-21T16:22:27.347769 | 2017-05-19T17:35:16 | 2017-05-19T17:35:16 | 91,885,650 | 1 | 0 | null | 2017-05-20T11:46:28 | 2017-05-20T11:46:28 | null | UTF-8 | Python | false | false | 179 | py | import sys
config = sys.argv[1]
with open(config, 'r') as file:
for (i, command) in enumerate(file, 1):
print 'action %04d cli command "%s"' % (i, command.rstrip())
| [
"pyneng.course@gmail.com"
] | pyneng.course@gmail.com |
e371afb85090f500871bb82d8f8a6380bb7be83b | 625f2f86f2b2e07cb35204d9b3232427bf462a09 | /HIRun2017PP/trigger/hlt_Pythia8_EmEnrDijet30_pp_CUETP8M1_5020GeV/crabConfig.py | c1ca06602ccee55fe7fd7020cdbfd8e1920c98f0 | [] | no_license | ttrk/production | abb84c423a076fd9966276b7ed4350936c755e0b | f8a64c9c38de215802799365f0f7a99e1ee78276 | refs/heads/master | 2023-02-08T23:48:56.355141 | 2023-01-26T08:46:22 | 2023-01-26T08:46:22 | 52,877,406 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,921 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = "hlt_Pythia8_EmEnrDijet30_pp_CUETP8M1_5020GeV_RefPP5TeV2017_V6_fgBitOff"
config.General.transferLogs = False
config.General.transferOutputs = True
config.section_("JobType")
config.JobType.pluginName = "Analysis"
config.JobType.psetName = "hlt.py"
config.JobType.maxMemoryMB = 2500 # request high memory machines, 2500 is the maximum guaranteed number.
config.JobType.maxJobRuntimeMin = 2800 # request longer runtime, ~47 hours. 2800 is the maximum guaranteed number.
# CMSSW_9_2_12_patch1
# HLT instructions : https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideGlobalHLT?rev=3077#Running_the_HLT_with_CMSSW_9_2_X
# hlt.py is the result of ./createHLT.sh + turn off process.dqmOutput
# turn off some L1 EG bits.
# disable the fgBit (set to 1) : https://github.com/cms-sw/cmssw/blob/CMSSW_9_2_X/L1Trigger/L1TCalorimeter/src/firmware/Stage2Layer2EGammaAlgorithmFirmwareImp1.cc#L231
# disable the shapeBit (set to 1) : https://github.com/cms-sw/cmssw/blob/CMSSW_9_2_X/L1Trigger/L1TCalorimeter/src/firmware/Stage2Layer2EGammaAlgorithmFirmwareImp1.cc#L233
config.section_("Data")
config.Data.inputDataset = "/Pythia8_EmEnrDijet30_pp_CUETP8M1_5020GeV/gsfs-RAW_20171002-f5c51db4d3712eae044fcbfa9ecb19c0/USER"
config.Data.inputDBS = "phys03"
config.Data.splitting = "FileBased"
config.Data.unitsPerJob = 10
config.Data.totalUnits = -1
config.Data.publication = False
config.JobType.outputFiles = ["openHLT.root"]
config.JobType.numCores = 4
config.Data.outputDatasetTag = "hlt_RefPP5TeV2017_V6_fgBitOff"
config.Data.outLFNDirBase = "/store/user/katatar/HIRun2017PP/"
config.section_("Site")
config.Site.storageSite = "T2_US_MIT"
config.Site.whitelist = ["T2_US_MIT"]
#config.Site.ignoreGlobalBlacklist = True
config.section_("Debug")
config.Debug.extraJDL = ["+CMS_ALLOW_OVERFLOW=False"]
| [
"tatark@mit.edu"
] | tatark@mit.edu |
fbab4f01ddc98f6873c06610de03b2332cd6f849 | 2eff5a335be48682379a363a05441fed72d80dc5 | /vespa/pulse/auto_gui/basic_info.py | 504b9df6d595411a939e8e9f3c52551426b19733 | [
"BSD-3-Clause"
] | permissive | teddychao/vespa | 0d998b03b42daf95808d8115da1a2e2205629cd4 | 6d3e84a206ec427ac1304e70c7fadf817432956b | refs/heads/main | 2023-08-31T08:19:31.244055 | 2021-10-26T00:25:27 | 2021-10-26T00:25:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,652 | py | # -*- coding: UTF-8 -*-
#
# generated by wxGlade 0.9.3 on Wed Sep 11 12:52:36 2019
#
import wx
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class PanelBasicInfo(wx.Panel):
def __init__(self, *args, **kwds):
# begin wxGlade: PanelBasicInfo.__init__
kwds["style"] = kwds.get("style", 0)
wx.Panel.__init__(self, *args, **kwds)
self.label_18 = wx.StaticText(self, wx.ID_ANY, "Name:")
self.TextName = wx.TextCtrl(self, wx.ID_ANY, "")
self.label_19 = wx.StaticText(self, wx.ID_ANY, "UUID:")
self.StaticTextUUID = wx.StaticText(self, wx.ID_ANY, "aaaaaaaa-1111-bbbb-2222-cccccccccccc")
self.label_20 = wx.StaticText(self, wx.ID_ANY, "Creator:")
self.TextCreator = wx.TextCtrl(self, wx.ID_ANY, "")
self.Label_22 = wx.StaticText(self, wx.ID_ANY, "Created:")
self.StaticTextCreated = wx.StaticText(self, wx.ID_ANY, "01 January 1001", style=wx.ALIGN_CENTER)
self.TextComment = wx.TextCtrl(self, wx.ID_ANY, "", style=wx.TE_MULTILINE | wx.TE_WORDWRAP)
self.LabelMachineSpecsSummary = wx.StaticText(self, wx.ID_ANY, "SpecsNamePlaceholder")
self.ButtonEditMachineSpecs = wx.Button(self, wx.ID_ANY, "Edit...")
self.TextCalcResolution = wx.TextCtrl(self, wx.ID_ANY, "")
self.label_3 = wx.StaticText(self, wx.ID_ANY, " steps")
self.ChoiceBandwidthType = wx.Choice(self, wx.ID_ANY, choices=["FW at Half Height", "FW at Maximum", "FW at Minimum"])
self.ChoiceGyromagneticNuclei = wx.Choice(self, wx.ID_ANY, choices=["1H", "13C", "17O", "19F", "23Na", "31P", "129Xe"])
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_TEXT_ENTER, self.on_name, self.TextName)
self.Bind(wx.EVT_TEXT_ENTER, self.on_investigator, self.TextCreator)
self.Bind(wx.EVT_BUTTON, self.on_edit_machine_specs, self.ButtonEditMachineSpecs)
# end wxGlade
def __set_properties(self):
# begin wxGlade: PanelBasicInfo.__set_properties
self.TextName.SetMinSize((275, -1))
self.TextComment.SetMinSize((100, 50))
self.ChoiceBandwidthType.SetSelection(0)
self.ChoiceGyromagneticNuclei.SetSelection(0)
# end wxGlade
def __do_layout(self):
# begin wxGlade: PanelBasicInfo.__do_layout
sizer_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_22 = wx.BoxSizer(wx.VERTICAL)
sizer_global_params = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, "Global Parameters"), wx.HORIZONTAL)
grid_sizer_2 = wx.FlexGridSizer(3, 3, 5, 5)
sizer_machine_specs = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, "Machine Specs"), wx.HORIZONTAL)
sizer_1_copy = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, "Design Comments"), wx.HORIZONTAL)
grid_sizer_1 = wx.FlexGridSizer(4, 2, 5, 5)
grid_sizer_1.Add(self.label_18, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.TextName, 0, wx.ALIGN_BOTTOM | wx.EXPAND, 0)
grid_sizer_1.Add(self.label_19, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.StaticTextUUID, 0, wx.ALIGN_CENTER_VERTICAL, 0)
grid_sizer_1.Add(self.label_20, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.TextCreator, 0, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, 0)
grid_sizer_1.Add(self.Label_22, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.StaticTextCreated, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_22.Add(grid_sizer_1, 0, wx.BOTTOM | wx.EXPAND | wx.LEFT | wx.TOP, 6)
sizer_1_copy.Add(self.TextComment, 1, wx.EXPAND | wx.LEFT, 2)
sizer_22.Add(sizer_1_copy, 1, wx.BOTTOM | wx.EXPAND | wx.TOP, 6)
sizer_machine_specs.Add(self.LabelMachineSpecsSummary, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_machine_specs.Add(self.ButtonEditMachineSpecs, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 10)
sizer_22.Add(sizer_machine_specs, 0, wx.ALL | wx.EXPAND, 4)
label_1 = wx.StaticText(self, wx.ID_ANY, "Calculation Resolution: ")
grid_sizer_2.Add(label_1, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT | wx.ALL, 2)
grid_sizer_2.Add(self.TextCalcResolution, 0, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, 0)
grid_sizer_2.Add(self.label_3, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 2)
label_2 = wx.StaticText(self, wx.ID_ANY, "Bandwidth Type: ")
grid_sizer_2.Add(label_2, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT | wx.ALL, 2)
grid_sizer_2.Add(self.ChoiceBandwidthType, 0, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, 0)
grid_sizer_2.Add((20, 20), 0, wx.ALIGN_CENTER_VERTICAL, 0)
label_4 = wx.StaticText(self, wx.ID_ANY, "Gyromagnetic Nuclei: ")
grid_sizer_2.Add(label_4, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_2.Add(self.ChoiceGyromagneticNuclei, 0, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, 0)
grid_sizer_2.Add((20, 20), 0, 0, 0)
sizer_global_params.Add(grid_sizer_2, 1, 0, 0)
sizer_22.Add(sizer_global_params, 0, wx.ALL | wx.EXPAND, 4)
sizer_1.Add(sizer_22, 0, wx.ALL | wx.EXPAND, 4)
sizer_1.Add((20, 20), 1, wx.EXPAND, 0)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
self.Layout()
# end wxGlade
def on_name(self, event): # wxGlade: PanelBasicInfo.<event_handler>
print("Event handler 'on_name' not implemented!")
event.Skip()
def on_investigator(self, event): # wxGlade: PanelBasicInfo.<event_handler>
print("Event handler 'on_investigator' not implemented!")
event.Skip()
def on_edit_machine_specs(self, event): # wxGlade: PanelBasicInfo.<event_handler>
print("Event handler 'on_edit_machine_specs' not implemented!")
event.Skip()
# end of class PanelBasicInfo
class MyFrame2(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame2.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.PanelBasicInfo = PanelBasicInfo(self, wx.ID_ANY)
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrame2.__set_properties
self.SetTitle("frame_3")
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrame2.__do_layout
sizer_18 = wx.BoxSizer(wx.VERTICAL)
sizer_18.Add(self.PanelBasicInfo, 1, wx.EXPAND, 0)
self.SetSizer(sizer_18)
sizer_18.Fit(self)
self.Layout()
# end wxGlade
# end of class MyFrame2
| [
"bsoher@briansoher.om"
] | bsoher@briansoher.om |
8a4c152a9aba8ac4fb54e47f7750930f9949ee4f | 27b4d1b7723845812111a0c6c659ef87c8da2755 | /数据结构和算法/栈和队列/01.py | 81c64a47b1be6f554346915349c8f601c803ed6b | [] | no_license | NAMEs/Python_Note | 59a6eff7b4287aaef04bd69fbd4af3faf56cccb4 | f560e00af37c4f22546abc4c2756e7037adcc40c | refs/heads/master | 2022-04-11T09:32:17.512962 | 2020-03-17T09:30:58 | 2020-03-17T09:30:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | """使用列表实现栈"""
class Stack(object):
"""栈"""
def __init__(self):
"""初始化一个栈"""
# 私有,禁止类外部访问
self.__items = []
def push(self, item):
"""添加一个新元素到栈顶"""
self.__items.append(item)
def pop(self):
"""弹出栈顶元素"""
self.__items.pop()
def peek(self):
"""返回栈顶元素"""
# 判断列表是否为空,如果不判断,当列表为空时,返回会报错
if self.__items:
return self.__items[len(self.__items) - 1]
# return self.__items[-1] # 与上面效果一样
else:
return None
def is_empty(self):
"""判断栈是否为空"""
return self.__items == []
# return not self.__items # 与上面效果一样
def size(self):
"""返回栈的元素个数"""
return len(self.__items)
if __name__ == '__main__':
s = Stack()
s.push('one')
s.push('two')
s.push('three')
print(s.size())
print(s.peek())
print("*" * 50)
s.pop()
print(s.size())
print(s.peek()) | [
"1558255789@qq.com"
] | 1558255789@qq.com |
21098d5945a967a5ba915d18a33af1575af4d417 | 25c5f1deffd366f25eb8fc063f62cdb2218e8e9e | /app/views.py | cc8e69d2a1cff3382f98e20de2ac2f03efd57b4c | [
"MIT"
] | permissive | patrickbeeson/has-it-ever-been | f5ae3888dd198fee5876a6cdd79f96e77b5441ea | 68082c4fffafaf9b4488fa28465961d93d3dd85c | refs/heads/master | 2016-09-10T23:54:02.272713 | 2015-02-19T14:43:18 | 2015-02-19T14:43:18 | 31,019,153 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,730 | py | import os
import requests
from geopy.geocoders import Nominatim
from flask import Flask, render_template, flash
from . import app
from .forms import LocationForm
app.config.from_object(os.environ['APP_SETTINGS'])
WUNDERGROUND_BASE_URL = app.config['WUNDERGROUND_BASE_URL']
WUNDERGROUND_API_KEY = app.config['WUNDERGROUND_API_KEY']
# base urls
CONDITIONS_BASE_URL = '{}{}/conditions/q/'.format(
WUNDERGROUND_BASE_URL,
WUNDERGROUND_API_KEY
)
ALMANAC_BASE_URL = '{}{}/almanac/q/'.format(
WUNDERGROUND_BASE_URL,
WUNDERGROUND_API_KEY
)
def geocode_location(location):
"Get lat and lon coordinates for a zip code"
try:
geolocator = Nominatim()
location = geolocator.geocode(location)
except Exception as e:
print('There was a problem geocoding this address: {}'.format(e))
return location
def get_current_temp(lat, lon):
"Get the current temp for a given location"
r = requests.get('{base}{lat},{lon}.json'.format(
base=CONDITIONS_BASE_URL,
lat=lat,
lon=lon)
)
json_string = r.json()
current_temp = json_string['current_observation']['temp_f']
return int(current_temp)
def get_almanac_data(lat, lon):
"Get the almanac data for a given location"
r = requests.get('{base}{lat},{lon}.json'.format(
base=ALMANAC_BASE_URL,
lat=lat,
lon=lon)
)
json_string = r.json()
almanac_data = {}
almanac_data['record_high'] = json_string['almanac']['temp_high']['record']['F']
almanac_data['record_low'] = json_string['almanac']['temp_low']['record']['F']
almanac_data['record_high_year'] = json_string['almanac']['temp_high']['recordyear']
almanac_data['record_low_year'] = json_string['almanac']['temp_low']['recordyear']
return almanac_data
@app.route('/', methods=['GET', 'POST'])
def home():
"""
Homepage view
"""
form = LocationForm()
if form.validate_on_submit():
temp_choice = form.temp_choice.data
location = geocode_location(form.location.data)
lat = location.latitude
lon = location.longitude
print(lat, lon)
current_temp = get_current_temp(lat, lon)
almanac_data = get_almanac_data(lat, lon)
record_high = int(almanac_data['record_high'])
record_low = int(almanac_data['record_low'])
record_high_year = int(almanac_data['record_high_year'])
record_low_year = int(almanac_data['record_low_year'])
temp_diff_high_above = current_temp - record_high
temp_diff_high_below = record_high - current_temp
temp_diff_low_above = current_temp - record_low
temp_diff_low_below = record_low - current_temp
if temp_choice == 'hot':
if current_temp >= record_high:
flash(
"""It's never been this hot!
Currently, it's {} degrees, which is {} degrees above the
record of {}, set in {}.""".format(
current_temp,
temp_diff_high_above,
record_high,
record_high_year)
)
else:
flash(
"""It's been this hot before.
Currently, it's {} degrees, which is {} degrees below the
record of {}, set in {}.""".format(
current_temp,
temp_diff_high_below,
record_high,
record_high_year)
)
else:
if current_temp <= record_low:
flash(
"""It's never been this cold before.
Currently, it's {} degrees, which is {} degrees below the
record of {}, set in {}.""".format(
current_temp,
temp_diff_low_below,
record_low,
record_low_year)
)
else:
flash(
"""It's been this cold before.
Currently, it's {} degrees, which is {} degrees above the
record of {}, set in {}.""".format(
current_temp,
temp_diff_low_above,
record_low,
record_low_year)
)
return render_template(
'index.html',
form=form,
current_temp=current_temp,
record_high=record_high,
record_low=record_low
)
return render_template('index.html', form=form)
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'), 404
| [
"patrickbeeson@gmail.com"
] | patrickbeeson@gmail.com |
8e34857047e004f9363625d66d51110bd33f62f4 | e97e727972149063b3a1e56b38961d0f2f30ed95 | /test/test_invoice_resource.py | 44859b4f3da61041966f881e6c41b4df750ba0f9 | [] | no_license | knetikmedia/knetikcloud-python-client | f3a485f21c6f3e733a864194c9acf048943dece7 | 834a24415385c906732437970db105e1bc71bde4 | refs/heads/master | 2021-01-12T10:23:35.307479 | 2018-03-14T16:04:24 | 2018-03-14T16:04:24 | 76,418,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | # coding: utf-8
"""
Knetik Platform API Documentation latest
This is the spec for the Knetik API. Use this in conjunction with the documentation found at https://knetikcloud.com.
OpenAPI spec version: latest
Contact: support@knetik.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import knetik_cloud
from knetik_cloud.rest import ApiException
from knetik_cloud.models.invoice_resource import InvoiceResource
class TestInvoiceResource(unittest.TestCase):
""" InvoiceResource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testInvoiceResource(self):
"""
Test InvoiceResource
"""
# FIXME: construct object with mandatory attributes with example values
#model = knetik_cloud.models.invoice_resource.InvoiceResource()
pass
if __name__ == '__main__':
unittest.main()
| [
"shawn.stout@knetik.com"
] | shawn.stout@knetik.com |
1f0e2aef17b5c302c10d50c0ebfc0715076ffcc0 | 9d69d37c930821f4ebf265f3c1f214c2cc558502 | /calculate_bflops.py | 5a98c9378f34132f838e9850fc398a1379cc0d5b | [
"Apache-2.0"
] | permissive | mayanks888/mAP | 255b35e25384659dfaf97e6e3eec53bafb5bb3cc | 7e6a6c4b916223e737d30c76ebb11a75ed15d984 | refs/heads/master | 2023-01-29T11:05:21.099541 | 2020-12-13T20:54:44 | 2020-12-13T20:54:44 | 278,285,070 | 0 | 0 | Apache-2.0 | 2020-07-09T06:44:56 | 2020-07-09T06:44:55 | null | UTF-8 | Python | false | false | 335 | py | from torchvision.models import resnet18
from torchscope import scope
from torchprofile import profile_macs
import torch
from torchvision.models import resnet18
model = resnet18()
inputs = torch.randn(1, 3, 224, 224)
# model = resnet18()
# scope(model, input_size=(3, 224, 224))
macs = profile_macs(model, inputs)
print(macs/10**9)
| [
"mayank.sati@gwmidc.in"
] | mayank.sati@gwmidc.in |
eb24c3689c4d0677a31308afdf0026604e8fe3e4 | e17966d3831e9f99527fb44c50c77c90e08694de | /cmasher/__version__.py | 21d09d2c21a6dc7c8396e5271ac3d7421f787025 | [
"BSD-3-Clause"
] | permissive | ajdittmann/CMasher | 85e4f94e45f962c247d2f8e04a1e966b01b77248 | 1ad6a5eaa3b473ef5eb3d56dfd3ecb8e887f16cd | refs/heads/master | 2023-03-27T12:46:30.052679 | 2021-03-22T23:56:47 | 2021-03-22T23:56:47 | 277,986,917 | 0 | 0 | BSD-3-Clause | 2020-07-08T04:10:55 | 2020-07-08T04:10:54 | null | UTF-8 | Python | false | false | 194 | py | # -*- coding: utf-8 -*-
"""
CMasher Version
===============
Stores the different versions of the *CMasher* package.
"""
# %% VERSIONS
# Default/Latest/Current version
__version__ = '1.5.10'
| [
"ellert_vandervelden@outlook.com"
] | ellert_vandervelden@outlook.com |
3d06a66d5f6beb9502767f673cf7aa21a8a08c22 | 054043e4b151459235c63cca32fc54e16ad4d619 | /register/migrations/0001_initial.py | f5f43a1b970f5b9ad01324d6dd5321f5a1fefb4e | [] | no_license | venugopalgodavarthi/model-26-6-2021 | 82c19d67cff39979ddefdd1b65004d55a3ad9afe | b742218f95ec393832c17c3201171789bf0bb4d0 | refs/heads/main | 2023-06-01T09:41:44.132863 | 2021-06-26T13:09:15 | 2021-06-26T13:09:15 | 380,504,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | # Generated by Django 3.2.4 on 2021-06-24 11:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='student',
fields=[
('studentid', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=20, null=True)),
('email', models.EmailField(max_length=30, null=True, unique=True)),
('phone', models.BigIntegerField()),
],
),
]
| [
"venugopalgodavarthi@gmail.com"
] | venugopalgodavarthi@gmail.com |
cc14fbc13731e05cccf742c316368597a01da85f | 91f4078045a57eaaafe0b172909d7041e829941c | /arjuna/interact/gui/dispatcher/driver/melement.py | 8675c2cde6286c26f89c26b18244d30d5402e30b | [
"Apache-2.0"
] | permissive | amiablea2/arjuna | 0d06d1dfb34309f4b6f39b17298f7acb6c3c48c9 | af74e0882216881ceca0a10f26442165ffc43287 | refs/heads/master | 2023-08-21T20:04:30.416303 | 2021-10-27T06:41:40 | 2021-10-27T06:41:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | # This file is a part of Arjuna
# Copyright 2015-2021 Rahul Verma
# Website: www.RahulVerma.net
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class MultiElement:
def __init__(self, elements):
self.__elements = elements
def get_size(self):
return len(self.__elements)
def get_element_at_index(self, index):
return self.__elements[index] | [
"rahulverma81@gmail.com"
] | rahulverma81@gmail.com |
267c39e45ebd826ade1f9d2bbfb65815c58ef979 | de17634e6b149d5828c1c78f7f5f5e1f6c17c4d0 | /nnvm/tvm/topi/python/topi/cuda/reduction.py | 932f2aae30988d071f5448db84d0ce46584e99b9 | [
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | starimpact/mxnet_v1.0.0 | e135cc9e4c2711314d03cf1281a72b755f53144e | fcd6f7398ef811c3f8b01e7c9c16fb25c8d202bd | refs/heads/bv1.0.0 | 2022-11-10T09:09:11.966942 | 2018-07-13T04:59:30 | 2018-07-13T04:59:30 | 120,399,107 | 8 | 4 | Apache-2.0 | 2022-11-02T20:24:32 | 2018-02-06T03:54:35 | C++ | UTF-8 | Python | false | false | 4,553 | py | # pylint: disable=invalid-name,unused-variable,too-many-locals,len-as-condition
"""Schedule for reduce operators"""
from __future__ import absolute_import as _abs
import tvm
from .. import tag
from .. import generic
def _schedule_reduce(op, sch, is_idx_reduce=False):
if is_idx_reduce:
data_out = op.input_tensors[0]
else:
data_in = op.input_tensors[0]
data_out = op.output(0)
assert len(sch[data_out].op.reduce_axis) > 0, "reduce_axis must be bigger than zero!"
if len(sch[data_out].op.axis) > 0:
all_reduce = False
num_thread = 32
target = tvm.target.current_target()
if target and target.target_name == "opencl":
# without it, CL_INVALID_WORK_GROUP_SIZE occured when running test_topi_reduce.py
# don't know why
num_thread = 16
block_x = tvm.thread_axis("blockIdx.x")
thread_x = tvm.thread_axis((0, num_thread), "threadIdx.x")
thread_y = tvm.thread_axis((0, num_thread), "threadIdx.y")
else:
all_reduce = True
num_thread = tvm.target.current_target(allow_none=False).max_num_threads
thread_x = tvm.thread_axis((0, num_thread), "threadIdx.x")
# Fuse and refactor the reduce axis
fused_reduce = sch[data_out].fuse(*[sch[data_out].op.reduce_axis[i]
for i in range(len(sch[data_out].op.reduce_axis))])
ko, ki = sch[data_out].split(fused_reduce, factor=num_thread)
if is_idx_reduce:
data_out_rf, _ = sch.rfactor(data_out, ki)
else:
data_out_rf = sch.rfactor(data_out, ki)
tx = sch[data_out].op.reduce_axis[0]
sch[data_out].bind(tx, thread_x)
sch[data_out_rf].compute_at(sch[data_out], tx)
if is_idx_reduce:
real_output = op.output(0)
temp_idx_input = data_out.op.output(0)
temp_val_input = data_out.op.output(1)
else:
real_output = data_out
if not all_reduce:
# Fuse and split the axis
fused_outer = sch[real_output].fuse(*[sch[real_output].op.axis[i]
for i in range(len(sch[real_output].op.axis))])
bx, outer_in = sch[real_output].split(fused_outer, factor=num_thread)
# Bind the axes to threads and blocks
sch[real_output].bind(outer_in, thread_y)
sch[real_output].bind(bx, block_x)
if is_idx_reduce:
sch[temp_idx_input].compute_at(sch[real_output], outer_in)
sch[temp_val_input].compute_at(sch[real_output], outer_in)
else:
if is_idx_reduce:
sch[temp_idx_input].compute_at(sch[real_output],
sch[real_output].op.axis[0])
sch[temp_val_input].compute_at(sch[real_output],
sch[real_output].op.axis[0])
sch[real_output].set_store_predicate(thread_x.equal(0))
return sch
@generic.schedule_reduce.register(["cuda", "gpu"])
def schedule_reduce(outs):
"""Schedule for inject->reduce->bcast ops.
Parameters
----------
outs: Array of Tensor
The computation graph description of reduce in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
sch = tvm.create_schedule([x.op for x in outs])
def traverse_before_reduce(operator):
if isinstance(operator, tvm.tensor.PlaceholderOp):
return
elif tag.is_injective(operator.tag):
sch[operator].compute_inline()
for tensor in operator.input_tensors:
traverse_before_reduce(tensor.op)
else:
raise RuntimeError("Unsupported operator: %s" % operator.tag)
def traverse_after_reduce(operator):
if tag.is_broadcast(operator.tag):
raise RuntimeError("Not yet support ewise after reduce")
elif operator.tag == 'comm_reduce':
_schedule_reduce(operator, sch, is_idx_reduce=False)
for tensor in operator.input_tensors:
traverse_before_reduce(tensor.op)
elif operator.tag == 'comm_reduce_idx':
_schedule_reduce(operator, sch, is_idx_reduce=True)
for tensor in operator.input_tensors[0].op.input_tensors:
traverse_before_reduce(tensor.op)
else:
raise RuntimeError("Unsupported operator: %s" % operator.tag)
traverse_after_reduce(outs[0].op)
return sch
| [
"mingzhang@deepglint.com"
] | mingzhang@deepglint.com |
5083a3578474d87dfd88ab17800d45c3fb795027 | c492c405f0535cb4eada74d9099b395f8e9701c3 | /demo/views.py | 632317c9aed9ecab66fb78ab81ca23bd2486876c | [] | no_license | Spirovanni/PyTut | a6a0713dcd100bbd35af21022e5b95f0894badf0 | 51489b7550ad8b4a70548de268624f806f827dc4 | refs/heads/master | 2020-09-16T15:39:55.081384 | 2019-11-29T03:39:42 | 2019-11-29T03:39:42 | 223,816,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | from rest_framework import viewsets
from .serializers import BookSerializer
from .models import Book
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
class BookViewSet(viewsets.ModelViewSet):
serializer_class = BookSerializer
queryset = Book.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
| [
"blackshieldsx@gmail.com"
] | blackshieldsx@gmail.com |
0a1c16f58a6f77dcffb925a339c0b7ba09f6c60d | 472578974401c83509d81ea4d832fc3fd821f295 | /python资料/day8.10/day08/demo03.py | edfb98720234e86ad0b52226fac53d5e3ba8c0b1 | [
"MIT"
] | permissive | why1679158278/python-stu | f038ec89e9c3c7cc80dc0ff83b76e7c3078e279e | 0d95451f17e1d583d460b3698047dbe1a6910703 | refs/heads/master | 2023-01-05T04:34:56.128363 | 2020-11-06T09:05:16 | 2020-11-06T09:05:16 | 298,263,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | """
返回值
函数的结果
def 函数名():
....
return 数据
变量 = 函数名()
"""
# 创建美元转换为人民币的函数
def usd_to_cny(usd):
"""
美元转换为人民币
:param usd: 美元
:return:人民币
"""
# 2. 逻辑处理 - 美元 * 6.99
cny = usd * 6.99
return cny # 给
# 收
result = usd_to_cny(5)
print(result)
| [
"1679158278@qq.com"
] | 1679158278@qq.com |
f90199df94ec7861984ba2c0f35500a518d153f9 | 89888a3a314cf0511a3a6b6b1570d869c02751ba | /Libs/read_test_case.py | 80c9675296c2f7df3fa3b3829fc4396ea5271fa1 | [] | no_license | Shyam-Personal/framework_task | 15b751d91de4a236a78f53deb40b06d457a73abb | d11157d93ef1265f89aa0992f920929f1cf9f9d3 | refs/heads/master | 2021-04-12T12:24:52.867416 | 2018-03-23T18:44:44 | 2018-03-23T18:44:44 | 126,526,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | import json
import os
class read_test_case(object):
def __init__(self, path, file):
self.file = file
self.path = path
def tc_dict(self):
return json.load(open(os.path.join(self.path,"testcases", self.file)))
def key_value(self, d1, key):
try:
return d1[key]
except KeyError:
return ""
if __name__ == "__main__":
obj = read_test_case("test1.tc")
d1 = obj.tc_dict()
print(obj.key_value(d1, "name")) | [
"shyamdeshmukh1@gmail.com"
] | shyamdeshmukh1@gmail.com |
1fec5ed74a5a72d21571f08241b32bb706e5577a | 72e77e4de5c9b7eb19974cf7ece1e09fb5bdeda9 | /grpc/demo/grpc_product/greeter_client.py | f601ecc18372798cbaa6310b8b20fd6e15898612 | [] | no_license | fei090620/learning_record | 966dc08e7da5958f63a34af99d597fc609e94b6a | 1dfeeb2044a101a4427277988e178b9f11b57c42 | refs/heads/master | 2020-04-01T20:32:50.339577 | 2018-03-26T08:58:15 | 2018-03-26T08:58:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,308 | py | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the GRPC helloworld.Greeter client."""
from __future__ import print_function
import grpc
import sys
import helloworld_pb2
import helloworld_pb2_grpc
def run():
# read in certificate
with open('server.crt') as f:
trusted_certs = f.read().encode()
# channel = grpc.insecure_channel('localhost:50051')
# create credentials
credentials = grpc.ssl_channel_credentials(root_certificates=trusted_certs)
channel = grpc.secure_channel('localhost:50051', credentials)
try:
grpc.channel_ready_future(channel).result(timeout=10)
except grpc.FutureTimeoutError:
sys.exit('Error connecting to server')
else:
stub = helloworld_pb2_grpc.GreeterStub(channel)
try:
response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'))
except grpc.RpcError as e:
print('Greeter client failed with {0}: {1}'.format(e.code(), e.details()))
else:
print("Greeter client received: " + response.message)
# timeout
try:
response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'), timeout=0.0000001)
except grpc.RpcError as e:
print('Greeter client failed with {0}: {1}'.format(e.code(), e.details()))
else:
print("Greeter client received: " + response.message)
# error
try:
response = stub.SayHello(helloworld_pb2.HelloRequest(name='error'))
except grpc.RpcError as e:
print('Greeter client failed with {0}: {1}'.format(e.code(), e.details()))
else:
print("Greeter client received: " + response.message)
if __name__ == '__main__':
run()
| [
"bestrenxs@gmail.com"
] | bestrenxs@gmail.com |
a2ff7f7f5884f9e66092d17bd72c55aa4c224e5a | 008ca163e1d59a0f6c2895c2b8ec1e1c42f6b2e8 | /programacionconcodigo/lab4/obsoleto/pruebas_a_work.py | 52af7779409ba3628d6bb0fab0771218d96c2853 | [] | no_license | hortegab/backup_pruebas_gnuradio | d7c21a627e4e29a1a04b763e6f81a6c838533d60 | 1fefca379dbd24ca66253ab5623f6c95fc1598b5 | refs/heads/master | 2023-08-03T05:47:03.130312 | 2021-09-10T15:18:32 | 2021-09-10T15:18:32 | 181,668,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | import numpy as np
# Deinifimos N=8 muestras de dos senales entrantes
# sabemos que si esto funcional para senales reales tambien funcionara para
# complejas. Por lo tanto trabajaremos solo para el caso de senales reales
senal0=np.array([1.,2.,3.,4.,5.,6.,7.,8.])
senal1=np.array([2.,2.,2.,2.,2.,2.,2.,2.])
# Las senales entrantes realmente llegan en forma matricial
in_sig= np.array([senal0,senal1])
# la senal saliente tambien es matricial, aun cuando sea una matriz de una sola fila
out_sig=np.array([[0.,0.,0.,0.,0.,0.,0.,0.]])
# Escribimos el codigo a probar, sin usar "self" porque esto no es una clase
escala=0.5
def work(input_items, output_items):
in0 = input_items[0]
in1 = input_items[1]
out0 = output_items[0]
out0[:]=(in0+in1)*escala
return len(out0)
# Por fin comprobamos la función
d=work(in_sig,out_sig)
print "senal en la entrada 1: ", in_sig[0]
print "senal en la entrada 2: ", in_sig[1]
print "senal en la salida: ", out_sig[0] | [
"comdiguis@saber.uis.edu.co"
] | comdiguis@saber.uis.edu.co |
b21e067038dc1a2d46c9219966c0c1225487096e | d3cbf02ebab6a3748cceaf56757b0ec7390921ce | /cta_fof/migrations/0008_holding_trade_market.py | 3bc46ec9e25c88442b9327a6e3103500d099eda6 | [] | no_license | rexcorp01/inv | 69d6ec96c1f9206b3ae14b6b13bd3123e13ed3a6 | 99462cea1f8b027bc9e38d79a99e9194d1e72548 | refs/heads/master | 2023-09-05T11:46:48.804587 | 2021-11-04T06:34:16 | 2021-11-04T06:34:16 | 426,082,304 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | # Generated by Django 3.2.3 on 2021-06-04 19:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cta_fof', '0007_holding_trade_market'),
]
operations = [
]
| [
"31942935+PengchuanC@users.noreply.github.com"
] | 31942935+PengchuanC@users.noreply.github.com |
cc160b1b0478446ba0daec4a0fe9e63453df3d96 | 056bbd43d6ef47d1f7da89a7e49c0853911cc221 | /ABC/ABC140-B.py | 5d7c1a905b93b72314bc9a7ea7cc8b140f98cc42 | [] | no_license | Haruka0522/AtCoder | d7c2e29d9a5ded2cf60bb4fba49fc0d2e14e0edb | 67c5bc188e581379e401c7c0a8a81be74a6e231d | refs/heads/master | 2023-07-28T09:56:51.273614 | 2020-09-19T13:18:01 | 2020-09-19T13:18:01 | 185,110,789 | 0 | 0 | null | 2023-07-06T21:57:23 | 2019-05-06T02:35:03 | Python | UTF-8 | Python | false | false | 303 | py | N = int(input())
A_list = list(map(int,input().split()))
B_list = list(map(int,input().split()))
C_list = list(map(int,input().split()))
ans = 0
for i in range(N):
ans += B_list[A_list[i]-1]
if i < N-1:
if A_list[i]+1==A_list[i+1]:
ans += C_list[A_list[i]-1]
print(ans)
| [
"harukapc1@gmail.com"
] | harukapc1@gmail.com |
a67ba5fe1e037e2d8c74ec71062fd983dcb70c91 | 0dcf78e319956f2cb2327c5cb47bd6d65e59a51b | /Python3/Array/CoinChange2/test_518.py | 6f52aa1c858782fa749a50af7e7f7c192a94306b | [] | no_license | daviddwlee84/LeetCode | 70edd09a64a6f61492aa06d927e1ec3ab6a8fbc6 | da1774fd07b7326e66d9478b3d2619e0499ac2b7 | refs/heads/master | 2023-05-11T03:16:32.568625 | 2023-05-08T05:11:57 | 2023-05-09T05:11:57 | 134,676,851 | 14 | 4 | null | 2018-05-29T14:50:22 | 2018-05-24T07:18:31 | Python | UTF-8 | Python | false | false | 422 | py | from Naive518 import Solution as naive
from DP518 import Solution as DP
testcase = [
(5, [1, 2, 5], 4),
(3, [2], 0),
(10, [10], 1),
(0, [], 1),
# (500, [3, 5, 7, 8, 9, 10, 11], 87)
]
def test_naive():
for amount, coins, ans in testcase:
assert naive().change(amount, coins) == ans
def test_DP():
for amount, coins, ans in testcase:
assert DP().change(amount, coins) == ans
| [
"daviddwlee84@gmail.com"
] | daviddwlee84@gmail.com |
9115b5c288687fec4c86da3869e4d1a2783962e3 | e7b7505c084e2c2608cbda472bc193d4a0153248 | /LeetcodeNew/Tree/LC_958_Check_Completeness_of_a_Binary_Tree.py | d04c51987f2442e2b5b1c449ddcd3c2509873dc2 | [] | no_license | Taoge123/OptimizedLeetcode | 8e5c1cd07904dfce1248bc3e3f960d2f48057a5d | 3e50f6a936b98ad75c47d7c1719e69163c648235 | refs/heads/master | 2023-02-27T21:13:40.450089 | 2023-02-07T04:11:09 | 2023-02-07T04:11:09 | 170,044,224 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,987 | py |
"""
Given a binary tree, determine if it is a complete binary tree.
Definition of a complete binary tree from Wikipedia:
In a complete binary tree every level, except possibly the last,
is completely filled, and all nodes in the last level are as far left as possible.
It can have between 1 and 2h nodes inclusive at the last level h.
Example 1:
Input: [1,2,3,4,5,6]
Output: true
Explanation: Every level before the last is full (ie. levels with node-values {1} and {2, 3}),
and all nodes in the last level ({4, 5, 6}) are as far left as possible.
Example 2:
Input: [1,2,3,4,5,null,7]
Output: false
Explanation: The node with value 7 isn't as far left as possible.
"""
"""
Use BFS to do a level order traversal,
add childrens to the bfs queue,
until we met the first empty node.
For a complete binary tree,
there should not be any node after we met an empty one.
"""
import collections
class SolutionLee:
def isCompleteTree(self, root):
bfs = [root]
i = 0
while bfs[i]:
bfs.append(bfs[i].left)
bfs.append(bfs[i].right)
i += 1
return not any(bfs[i:])
"""
Traverse the tree one level at a time, adding both children of each node to the queue as we go.
Keep a count of how many non-null nodes are left in the queue.
The first time we encounter a null node popped from the queue,
if the counter shows nothing left, the tree is complete - otherwise not.
"""
class Solution2:
def isCompleteTree(self, root):
count = 1 if root else 0
nodes = [root]
while nodes:
x = nodes.pop(0)
if not x:
return count == 0
count -= 1
nodes += [x.left, x.right]
count = count + 1 if x.left else count
count = count + 1 if x.right else count
return True
class Solution3:
def isCompleteTree(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if not root: return False
q = collections.deque([root])
while q[0]:
x = q.popleft()
q.append(x.left)
q.append(x.right)
for node in q:
if node: return False
return True
class SolutionOfficial:
def isCompleteTree(self, root):
nodes = [(root, 1)]
i = 0
while i < len(nodes):
node, v = nodes[i]
i += 1
if node:
nodes.append((node.left, 2*v))
nodes.append((node.right, 2*v+1))
return nodes[-1][1] == len(nodes)
class Solution:
def isCompleteTree(self, root):
if not root: return True
q = collections.deque([root])
missing = False
while q:
size = len(q)
while size > 0:
size -= 1
node = q.popleft()
if node:
if missing: return False
q.append(node.left)
q.append(node.right)
else:
missing = True
return True
| [
"taocheng984@gmail.com"
] | taocheng984@gmail.com |
7eac3f128300bda11262a4c11f5c619d83e8e1a7 | 9ff036f0cb2c5a9850af69181d016f5ca11ab63d | /week-03/day-3/envelpoe_star.py | a263f191630fd903dfd5fb21b7cb7c86b7779640 | [] | no_license | balintnem3th/balintnem3th | bd48f8aa7e79c718049f50e52bdd6be9ccf7b251 | 58bf033314466d96ed5b81b3642f04149fc463b0 | refs/heads/master | 2021-05-11T08:06:20.545375 | 2018-03-25T23:58:32 | 2018-03-25T23:58:32 | 118,041,790 | 0 | 0 | null | 2018-01-18T21:42:43 | 2018-01-18T21:42:43 | null | UTF-8 | Python | false | false | 815 | py | from tkinter import *
root = Tk()
canvas = Canvas(root, width='300', height='300')
canvas.pack()
# reproduce this:
# [https://github.com/greenfox-academy/teaching-materials/blob/master/workshop/drawing/envelope-star/r2.png]
def q1_eye_lash(z1,z2,z3,z4):
center_line = canvas.create_line(z1,z2,z3,z4,fill="green")
for x in range(15):
z1 = x*10
z2 = 150
z3 = 150-x/20
z4 = 150-x*10
q1_eye_lash(z1,z2,z3,z4)
for x in range(15):
z1 = 150-x*10
z2 = 150
z3 = 150+x/20
z4 = 300-x*10
q1_eye_lash(z1,z2,z3,z4)
for x in range(15):
z1 = 150+x*10
z2 = 150
z3 = 150-x/20
z4 = 300-x*10
q1_eye_lash(z1,z2,z3,z4)
for x in range(16):
z1 = 300-x*10
z2 = 150
z3 = 150-x/20
z4 = 150-x*10
q1_eye_lash(z1,z2,z3,z4)
root.mainloop() | [
"balint.nem3th@gmail.com"
] | balint.nem3th@gmail.com |
f20d30ef45d86d65e29d9861c69fcf41644f89ff | e7d65f8773a8c736fc9e41e843d7da6da5cc2e0b | /examples/example_plot_intact.py | 2f4007416734befcc832cc26144b67cef097ef02 | [
"BSD-3-Clause"
] | permissive | hanbei969/Py3plex | 768e86b16ca00044fcb4188e01edf32c332c8a2a | 1ef3e0e6d468d24bd6e6aec3bd68f20b9d9686bb | refs/heads/master | 2021-01-03T18:19:24.049457 | 2020-02-12T16:51:14 | 2020-02-12T16:51:14 | 240,188,307 | 1 | 0 | BSD-3-Clause | 2020-02-13T05:57:16 | 2020-02-13T05:57:16 | null | UTF-8 | Python | false | false | 2,408 | py | ### simple plot of a larger file
from py3plex.visualization.multilayer import *
from py3plex.visualization.colors import all_color_names,colors_default
from py3plex.core import multinet
## string layout for larger network -----------------------------------
multilayer_network = multinet.multi_layer_network().load_network("../datasets/intact02.gpickle",input_type="gpickle",directed=False).add_dummy_layers()
multilayer_network.basic_stats()
## use embedding to first initialize the nodes..
from py3plex.wrappers import train_node2vec_embedding
from py3plex.visualization.embedding_visualization import embedding_visualization,embedding_tools
from py3plex.algorithms.community_detection import community_wrapper as cw
from collections import Counter
## call a specific n2v compiled binary
train_node2vec_embedding.call_node2vec_binary("../datasets/IntactEdgelistedges.txt","../datasets/test_embedding.emb",binary="../bin/node2vec",weighted=False)
## preprocess and check embedding -- for speed, install parallel tsne from https://github.com/DmitryUlyanov/Multicore-TSNE, py3plex knows how to use it.
multilayer_network.load_embedding("../datasets/test_embedding.emb")
output_positions = embedding_tools.get_2d_coordinates_tsne(multilayer_network,output_format="pos_dict")
## custom layouts are part of the custom coordinate option
layout_parameters = {"iterations":200}
layout_parameters['pos'] = output_positions ## assign parameters
network_colors, graph = multilayer_network.get_layers(style="hairball")
partition = cw.louvain_communities(multilayer_network)
## select top n communities by size
top_n = 10
partition_counts = dict(Counter(partition.values()))
top_n_communities = list(partition_counts.keys())[0:top_n]
## assign node colors
color_mappings = dict(zip(top_n_communities,[x for x in colors_default if x != "black"][0:top_n]))
network_colors = [color_mappings[partition[x]] if partition[x] in top_n_communities else "black" for x in multilayer_network.get_nodes()]
f = plt.figure()
# gravity=0.2,strongGravityMode=False,barnesHutTheta=1.2,edgeWeightInfluence=1,scalingRatio=2.0
hairball_plot(graph,network_colors,layout_algorithm="custom_coordinates",layout_parameters=layout_parameters,nodesize=0.02,alpha_channel=0.30,edge_width=0.001,scale_by_size=False)
f.savefig("../datasets/intact.png", bbox_inches='tight',dpi=300)
f.savefig("../datasets/intact.pdf", bbox_inches='tight')
| [
"skrljblaz@gmail.com"
] | skrljblaz@gmail.com |
bc7d05d91be45b9c55a3ff1ba2deff1a32941e7b | e21210c8279f4a3d98c68b87c1c1e2f961bf7991 | /src/analyze/logistic_regression.py | 2b6f226d7590e78efb6ce469d0958665bea2ff39 | [
"MIT"
] | permissive | jiangqn/TextVAE | 1a14830a123597ef710ee9024bdd18166a69d2c1 | e10df55ad145df78467b664889c8d84fa53a8e21 | refs/heads/master | 2023-02-24T03:39:06.720455 | 2021-02-02T07:14:27 | 2021-02-02T07:14:27 | 284,409,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,517 | py | import torch
from torch import nn
from torch.utils.data.dataloader import DataLoader
from src.analyze.dataset import ClassificationDataset
class LogisticRegression(nn.Module):
def __init__(self, latent_size: int, output_size: int) -> None:
super(LogisticRegression, self).__init__()
self.linear = nn.Linear(in_features=latent_size, out_features=output_size)
def forward(self, latent_variable: torch.Tensor) -> torch.Tensor:
"""
:param latent_variable: torch.FloatTensor (batch_size, latent_size)
:return logit: torch.FloatTensor (batch_size, output_size)
"""
logit = self.linear(latent_variable)
return logit
def get_prediction(self, latent_variable: torch.Tensor) -> torch.Tensor:
"""
:param latent_variable: torch.FloatTensor (batch_size, latent_size)
:return prediction: torch.LongTensor (batch_size,)
"""
logit = self.linear(latent_variable)
prediction = logit.argmax(dim=-1)
return prediction
def get_probability(self, latent_variable: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""
:param latent_variable: torch.FloatTensor (batch_size, latent_size)
:param target: torch.LongTensor (batch_size,)
:return prob: torch.FloatTensor (batch,)
"""
logit = self.linear(latent_variable)
prob = torch.softmax(logit, dim=-1)
prob = torch.gather(prob, 1, target.unsqueeze(-1)).squeeze(-1)
return prob | [
"1935448858@qq.com"
] | 1935448858@qq.com |
9978e29b6034b06932854040298227c4df1dd411 | 93154611a2da78612c275929db12fca51a699d6b | /scripts/geodata/countries/constants.py | 0f2923aa91c5f98f3e83e465ac441c07b61e8798 | [
"MIT",
"CC-BY-4.0"
] | permissive | rinigus/deb-libpostal | d61e5b2fd03b16b9e86d1b91dc8b8b8909fb19d3 | 0229f9e2d23d3c422899a5253fb66ece0cb62399 | refs/heads/master | 2022-06-29T21:35:41.002845 | 2022-06-22T20:06:04 | 2022-06-22T20:06:04 | 213,193,268 | 0 | 0 | MIT | 2022-06-22T20:06:05 | 2019-10-06T15:28:44 | C | UTF-8 | Python | false | false | 5,794 | py |
class Countries(object):
AFGHANISTAN = 'af'
ALAND_ISLANDS = 'ax'
ALBANIA = 'al'
ALGERIA = 'dz'
AMERICAN_SAMOA = 'as'
ANDORRA = 'ad'
ANGOLA = 'ao'
ANGUILLA = 'ai'
ANTARCTICA = 'aq'
ANTIGUA_AND_BARBUDA = 'ag'
ARGENTINA = 'ar'
ARMENIA = 'am'
ARUBA = 'aw'
AUSTRALIA = 'au'
AUSTRIA = 'at'
AZERBAIJAN = 'az'
BAHAMAS = 'bs'
BAHRAIN = 'bh'
BANGLADESH = 'bd'
BARBADOS = 'bb'
BELARUS = 'by'
BELGIUM = 'be'
BELIZE = 'bz'
BENIN = 'bj'
BERMUDA = 'bm'
BHUTAN = 'bt'
BOLIVIA = 'bo'
BONAIRE = 'bq'
BOSNIA_AND_HERZEGOVINA = 'bq'
BOTSWANA = 'bw'
BOUVET_ISLAND = 'bv'
BRAZIL = 'br'
BRITISH_INDIAN_OCEAN_TERRITORY = 'io'
BRITISH_VIRGIN_ISLANDS = 'vg'
BRUNEI_DARUSSALAM = 'bn'
BULGARIA = 'bg'
BURKINA_FASO = 'bf'
BURUNDI = 'bi'
CAMBODIA = 'kh'
CAMEROON = 'cm'
CANADA = 'ca'
CAPE_VERDE = 'cv'
CAYMAN_ISLANDS = 'ky'
CENTRAL_AFRICAN_REPUBLIC = 'cf'
CHAD = 'td'
CHILE = 'cl'
CHINA = 'cn'
CHRISTMAS_ISLAND = 'cx'
COCOS_KEELING_ISLANDS = 'cc'
COLOMBIA = 'co'
COMOROS = 'km'
COOK_ISLANDS = 'ck'
COSTA_RICA = 'cr'
COTE_DIVOIRE = 'ci'
CROATIA = 'hr'
CUBA = 'cu'
CURACAO = 'cw'
CYPRUS = 'cy'
CZECH_REPUBLIC = 'cz'
DENMARK = 'dk'
DEMOCRATIC_REPUBLIC_OF_THE_CONGO = 'cd'
DJIBOUTI = 'dj'
DOMINICA = 'dm'
DOMINICAN_REPUBLIC = 'do'
ECUADOR = 'ec'
EGYPT = 'eg'
EL_SALVADOR = 'sv'
EQUATORIAL_GUINEA = 'gq'
ERITREA = 'er'
ESTONIA = 'ee'
ETHIOPIA = 'et'
FALKLAND_ISLANDS_MALVINAS = 'fk'
FAROE_ISLANDS = 'fo'
FEDERATED_STATES_OF_MICRONESIA = 'fm'
FIJI = 'fj'
FINLAND = 'fi'
FRANCE = 'fr'
FRENCH_GUIANA = 'gf'
FRENCH_POLYNESIA = 'pf'
FRENCH_SOUTHERN_TERRITORIES = 'tf'
GABON = 'ga'
GAMBIA = 'gm'
GEORGIA = 'ge'
GERMANY = 'de'
GHANA = 'gh'
GIBRALTAR = 'gi'
GREECE = 'gr'
GREENLAND = 'gl'
GRENADA = 'gd'
GUADELOUPE = 'gp'
GUAM = 'gu'
GUATEMALA = 'gt'
GUERNSEY = 'gg'
GUINEA = 'gn'
GUINEA_BISSAU = 'gw'
GUYANA = 'gy'
HAITI = 'ht'
HEARD_ISLAND_AND_MCDONALD_ISLANDS = 'hm'
HONDURAS = 'hn'
HONG_KONG = 'hk'
HUNGARY = 'hu'
ICELAND = 'is'
INDIA = 'in'
INDONESIA = 'id'
IRAN = 'ir'
IRAQ = 'iq'
IRELAND = 'ie'
ISLE_OF_MAN = 'im'
ISRAEL = 'il'
ITALY = 'it'
JAMAICA = 'jm'
JAPAN = 'jp'
JERSEY = 'je'
JORDAN = 'jo'
KAZAKHSTAN = 'kz'
KENYA = 'ke'
KIRIBATI = 'ki'
KUWAIT = 'kw'
KYRGYZSTAN = 'kg'
LAOS = 'la'
LATVIA = 'lv'
LEBANON = 'lb'
LESOTHO = 'ls'
LIBERIA = 'lr'
LIBYA = 'ly'
LIECHTENSTEIN = 'li'
LITHUANIA = 'lt'
LUXEMBOURG = 'lu'
MACAO = 'mo'
MACEDONIA = 'mk'
MADAGASCAR = 'mg'
MALAWI = 'mw'
MALAYSIA = 'my'
MALDIVES = 'mv'
MALI = 'ml'
MALTA = 'mt'
MARSHALL_ISLANDS = 'mh'
MARTINIQUE = 'mq'
MAURITANIA = 'mr'
MAURITIUS = 'mu'
MAYOTTE = 'yt'
MEXICO = 'mx'
MOLDOVA = 'md'
MONACO = 'mc'
MONGOLIA = 'mn'
MONTENEGRO = 'me'
MONTSERRAT = 'ms'
MOROCCO = 'ma'
MOZAMBIQUE = 'mz'
MYANMAR = 'mm'
NAMIBIA = 'na'
NAURU = 'nr'
NEPAL = 'np'
NETHERLANDS = 'nl'
NEW_CALEDONIA = 'nc'
NEW_ZEALAND = 'nz'
NICARAGUA = 'ni'
NIGER = 'ne'
NIGERIA = 'ng'
NIUE = 'nu'
NORFOLK_ISLAND = 'nf'
NORTH_KOREA = 'kp'
NORTHERN_MARIANA_ISLANDS = 'mp'
NORWAY = 'no'
OMAN = 'om'
PAKISTAN = 'pk'
PALAU = 'pw'
PALESTINE = 'ps'
PANAMA = 'pa'
PAPUA_NEW_GUINEA = 'pg'
PARAGUAY = 'py'
PERU = 'pe'
PHILIPPINES = 'ph'
PITCAIRN_ISLANDS = 'pn'
POLAND = 'pl'
PORTUGAL = 'pt'
PUERTO_RICO = 'pr'
QATAR = 'qa'
REPUBLIC_OF_CONGO = 'cg'
REUNION = 're'
ROMANIA = 'ro'
RUSSIA = 'ru'
RWANDA = 'rw'
SAINT_BARTHELEMY = 'bl'
SAINT_HELENA_ASCENSION_AND_TRISTAN_DA_CUNHA = 'sh'
SAINT_KITTS_AND_NEVIS = 'kn'
SAINT_LUCIA = 'lc'
SAINT_MARTIN = 'mf'
SAINT_PIERRE_AND_MIQUELON = 'pm'
SAINT_VINCENT_AND_THE_GRENADINES = 'vc'
SAMOA = 'ws'
SAN_MARINO = 'sm'
SAO_TOME_AND_PRINCIPE = 'st'
SAUDI_ARABIA = 'sa'
SENEGAL = 'sn'
SERBIA = 'rs'
SEYCHELLES = 'sc'
SIERRA_LEONE = 'sl'
SINGAPORE = 'sg'
SINT_MAARTEN = 'sx'
SLOVAKIA = 'sk'
SLOVENIA = 'si'
SOLOMON_ISLANDS = 'sb'
SOMALIA = 'so'
SOUTH_AFRICA = 'za'
SOUTH_GEORGIA_AND_THE_SOUTH_SANDWICH_ISLANDS = 'gs'
SOUTH_KOREA = 'kr'
SOUTH_SUDAN = 'ss'
SPAIN = 'es'
SRI_LANKA = 'lk'
SUDAN = 'sd'
SURINAME = 'sr'
SVALBARD_AND_JAN_MAYEN = 'sj'
SWAZILAND = 'sz'
SWEDEN = 'se'
SWITZERLAND = 'ch'
SYRIA = 'sy'
TAIWAN = 'tw'
TAJIKISTAN = 'tj'
TANZANIA = 'tz'
THAILAND = 'th'
TIMOR_LESTE = 'tl'
TOGO = 'tg'
TOKELAU = 'tk'
TONGA = 'to'
TRINIDAD_AND_TOBAGO = 'tt'
TUNISIA = 'tn'
TURKEY = 'tr'
TURKMENISTAN = 'tm'
TURKS_AND_CAICOS_ISLANDS = 'tc'
TUVALU = 'tv'
UGANDA = 'ug'
UKRAINE = 'ua'
UNITED_ARAB_EMIRATES = 'ae'
UNITED_KINGDOM = 'gb'
UNITED_STATES = 'us'
UNITED_STATES_MINOR_OUTLYING_ISLANDS = 'um'
URUGUAY = 'uy'
US_VIRGIN_ISLANDS = 'vi'
UZBEKISTAN = 'uz'
VANUATU = 'vu'
VATICAN = 'va'
VENEZUELA = 've'
VIETNAM = 'vn'
WALLIS_AND_FUTUNA = 'wf'
WESTERN_SAHARA = 'eh'
YEMEN = 'ye'
ZAMBIA = 'zm'
ZIMBABWE = 'zw'
FORMER_SOVIET_UNION_COUNTRIES = set([RUSSIA, UKRAINE, BELARUS, KAZAKHSTAN, AZERBAIJAN, KYRGYZSTAN, GEORGIA, UZBEKISTAN, ARMENIA, TAJIKISTAN, MOLDOVA, TURKMENISTAN, LATVIA, LITHUANIA, ESTONIA])
CJK_COUNTRIES = set([CHINA, JAPAN, SOUTH_KOREA, TAIWAN, HONG_KONG, MACAO])
| [
"albarrentine@gmail.com"
] | albarrentine@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.