blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ab874f54709718eb18eb3c5f718ae9204a92281a
|
b4bb9a937e0904db89c6496389f49ae555258fc5
|
/apps/messages.py
|
b446216ae5a1105af91ba51c24960a4feb5e9fa3
|
[] |
no_license
|
vitoralves/python-api
|
3e1f5f77ba61e0df2770c9d24240b46ee9c37449
|
125172ee7906392c49884f8e8fdf21bc9aa60c2c
|
refs/heads/master
| 2020-05-24T04:21:48.857073
| 2019-05-22T16:19:52
| 2019-05-22T16:19:52
| 187,090,895
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
MSG_FIELD_REQUIRED = 'Campo obrigatório.'
MSG_INVALID_DATA = 'Ocorreu um erro nos campos informados.'
MSG_DOES_NOT_EXIST = 'Este(a) {} não existe.'
MSG_EXCEPTION = 'Ocorreu um erro no servidor. Contate o administrador.'
MSG_ALREADY_EXISTS = 'Já existe um(a) {} com estes dados.'
MSG_NO_DATA = "Os dados não podem ser nulos."
MSG_PASSWORD_WRONG = "As senhas não conferem."
MSG_RESOURCE_CREATED = "{} criado com sucesso."
MSG_RESOURCE_FETCHED_PAGINATED = 'Lista os/as {} paginados(as).'
MSG_RESOURCE_FETCHED = '{} retornado(a).'
MSG_RESOURCE_UPDATED = '{} atualizado(a).'
MSG_RESOURCE_DELETED = '{} deletado(a).'
MSG_TOKEN_CREATED = 'Token criado.'
MSG_INVALID_CREDENTIALS = 'As credenciais estão inválidas para log in.'
MSG_TOKEN_EXPIRED = 'Token expirou.'
MSG_PERMISSION_DENIED = 'Permissão negada.'
MSG_TOKEN_CREATED = 'Token criado.'
MSG_INVALID_CREDENTIALS = 'As credenciais estão inválidas para log in.'
MSG_TOKEN_EXPIRED = 'Token expirou.'
MSG_PERMISSION_DENIED = 'Permissão negada.'
|
[
"="
] |
=
|
fe31ab89f3e3accf47cecdd7b82dfdfe1dc82ed0
|
66e6360325b781ed0791868765f1fd8a6303726f
|
/TB2009/WorkDirectory/5161 Profile Check/Profile_108541.py
|
2258d75bb8e0e950291c863f0631348a9989fb97
|
[] |
no_license
|
alintulu/FHead2011PhysicsProject
|
c969639b212d569198d8fce2f424ce866dcfa881
|
2568633d349810574354ad61b0abab24a40e510e
|
refs/heads/master
| 2022-04-28T14:19:30.534282
| 2020-04-23T17:17:32
| 2020-04-23T17:17:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,101
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("ProfileCleanedMIP")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("HcalTBSource",
fileNames = cms.untracked.vstring("file:/tmp/chenyi/HTB_108541.root"),
streams = cms.untracked.vstring('Chunk699', 'HCAL_Trigger', 'HCAL_SlowData', 'HCAL_QADCTDC', 'HCAL_DCC021')
)
process.tbunpack = cms.EDFilter("HcalTBObjectUnpacker",
#IncludeUnmatchedHits = cms.untracked.bool(False),
HcalTriggerFED = cms.untracked.int32(1),
HcalVLSBFED = cms.untracked.int32(699),
HcalTDCFED = cms.untracked.int32(8),
HcalQADCFED = cms.untracked.int32(8),
HcalSlowDataFED = cms.untracked.int32(3),
ConfigurationFile = cms.untracked.string('configQADCTDC_TB2009.txt')
)
process.vlsbinfo = cms.EDProducer("VLSBInformationProducer",
minSample = cms.untracked.uint32(0),
maxSample = cms.untracked.uint32(31),
baselineSamples = cms.untracked.uint32(2),
useMotherBoard0 = cms.untracked.bool(True),
useMotherBoard1 = cms.untracked.bool(True),
useMotherBoard2 = cms.untracked.bool(False),
useMotherBoard3 = cms.untracked.bool(True),
usePedestalMean = cms.untracked.bool(True),
pedestalMean = cms.untracked.string('PedestalMean_108541.txt'),
mip = cms.untracked.string('SecondaryMIP.txt'),
roughmip = cms.untracked.string('PercentageCorrectedGeV.txt'),
secondaryShift = cms.untracked.string("PercentageCorrectedGeV_SecondaryShift.txt"),
beamEnergy = cms.untracked.double(150),
adcMap = cms.untracked.string('FinalAdcMapping_All.txt'),
lowestSampleSubtraction = cms.untracked.bool(True),
numberOfSamplesForSubtraction = cms.untracked.int32(16),
numberOfSamplesToSkip = cms.untracked.int32(16)
)
process.averagecharge = cms.EDAnalyzer("FillAverageChargeLayerAnalyzer",
output = cms.untracked.string("TotalEnergy_108541.root"),
textOutput = cms.untracked.bool(True),
interpolate = cms.untracked.bool(False) # interpolate for missing channels by averaging neighboring channels
)
process.averagecharge_interpolated = cms.EDAnalyzer("FillAverageChargeLayerAnalyzer",
output = cms.untracked.string("TotalEnergy_Interpolated_108541.root"),
textOutput = cms.untracked.bool(True),
interpolate = cms.untracked.bool(True)
)
process.filladc = cms.EDAnalyzer("FillAdcDistributionAnalyzer",
invert = cms.untracked.bool(False),
highdef = cms.untracked.bool(True),
divideMIP = cms.untracked.bool(False),
baselineSubtraction = cms.untracked.bool(True),
output = cms.untracked.string("AdcDistribution_108541.root")
)
process.ABCcut = cms.EDFilter("SingleTowerParticleFilter")
process.MessageLogger = cms.Service("MessageLogger",
default = cms.untracked.PSet(
reportEvery = cms.untracked.int32(239)
)
)
process.muonveto = cms.EDFilter("MuonVetoFilter")
process.p = cms.Path(
process.tbunpack *
process.ABCcut *
process.vlsbinfo *
process.muonveto *
process.averagecharge
# process.averagecharge_interpolated *
# process.filladc
)
|
[
"yichen@positron01.hep.caltech.edu"
] |
yichen@positron01.hep.caltech.edu
|
43a44eb94d4c3cdc0eb12a66ca6aeb7e6f8ab7c6
|
49253f12cea4b2ec1df4d68876c3c330fec3f52b
|
/001_数据结构相关/001_set集合_交集_并集_差集_对称差集.py
|
f1a21ae3614ae074ef531ce11370b4832eeadf37
|
[] |
no_license
|
FelixZFB/Python_development_skills_summary
|
b2877652a5396936a28d5c65fb407df201ffa158
|
998679496de8385bda34734f83d927a7d340876a
|
refs/heads/master
| 2020-06-09T16:58:51.242686
| 2020-02-27T07:02:48
| 2020-02-27T07:02:48
| 193,472,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
# -*- coding:utf-8 -*-
#
# 集合支持一系列标准操作,包括并集、交集、差集和对称差集,例如:
# a = t | s # t 和 s的并集
# b = t & s # t 和 s的交集
# c = t – s # 求差集(项在t中,但不在s中)
# d = t ^ s # 对称差集(项在t或s中,但不会同时出现在二者中)
a = [1, 5, 10, 15, 10]
b = [1, 5, 10, 9, 12]
# 集合会去掉重复元素
print(set(a))
print("*" * 50)
# 并集
c1 = set(a) | set(b)
print(c1)
# 交集
c2 = set(a) & set(b)
print(c2)
# 交集
c3 = set(a) - set(b)
print(c3)
c3 = set(b) - set(a)
print(c3)
# 对称差集
c4 = set(a) ^ set(b)
print(c4)
|
[
"18200116656@qq.com"
] |
18200116656@qq.com
|
81baef8090682ce775be599e4786806b1672e33f
|
8a7abed7c441600a66bf2ef9135ff3a367ac0eb2
|
/website/goals/migrations/0001_initial.py
|
00da811bea795906390ec6595dd4df58f5432e91
|
[] |
no_license
|
mrooney/mikesgoals
|
094d30160817879243b7539df5a3759d19583edc
|
dd0b0aee7ce20d43852cf694bc1ecb5af23dde94
|
refs/heads/master
| 2023-04-09T16:10:16.008923
| 2022-07-07T17:33:00
| 2022-07-07T17:33:00
| 4,474,379
| 2
| 0
| null | 2023-03-31T14:38:43
| 2012-05-28T20:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,019
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Goal'
db.create_table('goals_goal', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.TextField')()),
('frequency', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('goals', ['Goal'])
def backwards(self, orm):
# Deleting model 'Goal'
db.delete_table('goals_goal')
models = {
'goals.goal': {
'Meta': {'object_name': 'Goal'},
'frequency': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['goals']
|
[
"mrooney.github@rowk.com"
] |
mrooney.github@rowk.com
|
6009fe56a5b567eb3751301b21273398f872f28d
|
b13c57843cb8886c6f5d630ca099ad9130b26f25
|
/python/장고/first.py
|
94cc29b0619ffd718f3cfc6ee9a510900562b741
|
[] |
no_license
|
schw240/07.27-12.1_CLOUD
|
6b563318f7208b843a13634a1cf46206197d6dfc
|
8b4dc2d31e5d2ba96bde143116aba3ba0dad7a49
|
refs/heads/master
| 2023-03-25T15:44:03.555567
| 2021-03-30T02:09:32
| 2021-03-30T02:09:32
| 282,791,349
| 4
| 0
| null | 2021-03-19T15:00:00
| 2020-07-27T04:10:56
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
from http.server import BaseHTTPRequestHandler, HTTPServer
import datetime
class HelloHandler(BaseHTTPRequestHandler):
def do_GET(self):
print(self.path)
if self.path == '/my':
self.send_response(200)
self.end_headers()
self.wfile.write("MyPage!".encode('utf-8'))
elif self.path == '/portfolio':
self.send_response(200)
self.end_headers()
self.wfile.write("Portfolio!".encode('utf-8'))
elif self.path == '/':
html = f"""
<html>
<head>
<title>나의홈페이지</title>
</head>
<body>
<h1>안녕하세요~ 저의 웹사이트에 오신걸 환영합니다.</h1>
<h2>{datetime.datetime.now()}</h2>
</body>
</html>
"""
self.send_response(200)
self.send_header("content-type", "text/html; charset=UTF-8")
self.end_headers()
self.wfile.write(html.encode('utf-8'))
else:
self.send_response(404)
self.end_headers()
self.wfile.write("404".encode('utf-8'))
if __name__== '__main__':
server = HTTPServer(('', 8888), HelloHandler)
print("Start Server")
server.serve_forever()
|
[
"schw240@gmail.com"
] |
schw240@gmail.com
|
ad6cc0a08e8ba3d2ad47ab45d0395df6b071594b
|
006341ca12525aa0979d6101600e78c4bd9532ab
|
/CMS/Zope-3.2.1/Dependencies/zope.app-Zope-3.2.1/zope.app/i18n/filters.py
|
2807d5ca114aaa6b7749be72ef8b4ab16fdbd8fe
|
[
"ZPL-2.1",
"Python-2.0",
"ICU",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0"
] |
permissive
|
germanfriday/code-examples-sandbox
|
d0f29e20a3eed1f8430d06441ac2d33bac5e4253
|
4c538584703754c956ca66392fdcecf0a0ca2314
|
refs/heads/main
| 2023-05-30T22:21:57.918503
| 2021-06-15T15:06:47
| 2021-06-15T15:06:47
| 377,200,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,811
|
py
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Translation Domain Message Export and Import Filters
$Id: filters.py 38178 2005-08-30 21:50:19Z mj $
"""
__docformat__ = 'restructuredtext'
import time, re
from types import StringTypes
from zope.interface import implements
from zope.i18n.interfaces import IMessageExportFilter, IMessageImportFilter
from zope.app.i18n.interfaces import ILocalTranslationDomain
class ParseError(Exception):
def __init__(self, state, lineno):
Exception.__init__(self, state, lineno)
self.state = state
self.lineno = lineno
def __str__(self):
return "state %s, line %s" % (self.state, self.lineno)
class GettextExportFilter(object):
implements(IMessageExportFilter)
__used_for__ = ILocalTranslationDomain
def __init__(self, domain):
self.domain = domain
def exportMessages(self, languages):
'See IMessageExportFilter'
domain = self.domain.domain
if isinstance(languages, StringTypes):
language = languages
elif len(languages) == 1:
language = languages[0]
else:
raise TypeError(
'Only one language at a time is supported for gettext export.')
dt = time.time()
dt = time.localtime(dt)
dt = time.strftime('%Y/%m/%d %H:%M', dt)
output = _file_header %(dt, language.encode('UTF-8'),
domain.encode('UTF-8'))
for msgid in self.domain.getMessageIds():
msgstr = self.domain.translate(msgid, target_language=language)
msgstr = msgstr.encode('UTF-8')
msgid = msgid.encode('UTF-8')
output += _msg_template %(msgid, msgstr)
return output
class GettextImportFilter(object):
implements(IMessageImportFilter)
__used_for__ = ILocalTranslationDomain
def __init__(self, domain):
self.domain = domain
def importMessages(self, languages, file):
'See IMessageImportFilter'
if isinstance(languages, StringTypes):
language = languages
elif len(languages) == 1:
language = languages[0]
else:
raise TypeError(
'Only one language at a time is supported for gettext export.')
result = parseGetText(file.readlines())[3]
headers = parserHeaders(''.join(result[('',)][1]))
del result[('',)]
charset = extractCharset(headers['content-type'])
for msg in result.items():
msgid = unicode(''.join(msg[0]), charset)
msgid = msgid.replace('\\n', '\n')
msgstr = unicode(''.join(msg[1][1]), charset)
msgstr = msgstr.replace('\\n', '\n')
self.domain.addMessage(msgid, msgstr, language)
def extractCharset(header):
charset = header.split('charset=')[-1]
return charset.lower()
def parserHeaders(headers_text):
headers = {}
for line in headers_text.split('\\n'):
name = line.split(':')[0]
value = ''.join(line.split(':')[1:])
headers[name.lower()] = value
return headers
def parseGetText(content):
# The regular expressions
com = re.compile('^#.*')
msgid = re.compile(r'^ *msgid *"(.*?[^\\]*)"')
msgstr = re.compile(r'^ *msgstr *"(.*?[^\\]*)"')
re_str = re.compile(r'^ *"(.*?[^\\])"')
blank = re.compile(r'^\s*$')
trans = {}
pointer = 0
state = 0
COM, MSGID, MSGSTR = [], [], []
while pointer < len(content):
line = content[pointer]
#print 'STATE:', state
#print 'LINE:', line, content[pointer].strip()
if state == 0:
COM, MSGID, MSGSTR = [], [], []
if com.match(line):
COM.append(line.strip())
state = 1
pointer = pointer + 1
elif msgid.match(line):
MSGID.append(msgid.match(line).group(1))
state = 2
pointer = pointer + 1
elif blank.match(line):
pointer = pointer + 1
else:
raise ParseError(0, pointer + 1)
elif state == 1:
if com.match(line):
COM.append(line.strip())
state = 1
pointer = pointer + 1
elif msgid.match(line):
MSGID.append(msgid.match(line).group(1))
state = 2
pointer = pointer + 1
elif blank.match(line):
pointer = pointer + 1
else:
raise ParseError(1, pointer + 1)
elif state == 2:
if com.match(line):
COM.append(line.strip())
state = 2
pointer = pointer + 1
elif re_str.match(line):
MSGID.append(re_str.match(line).group(1))
state = 2
pointer = pointer + 1
elif msgstr.match(line):
MSGSTR.append(msgstr.match(line).group(1))
state = 3
pointer = pointer + 1
elif blank.match(line):
pointer = pointer + 1
else:
raise ParseError(2, pointer + 1)
elif state == 3:
if com.match(line) or msgid.match(line):
# print "\nEn", language, "detected", MSGID
trans[tuple(MSGID)] = (COM, MSGSTR)
state = 0
elif re_str.match(line):
MSGSTR.append(re_str.match(line).group(1))
state = 3
pointer = pointer + 1
elif blank.match(line):
pointer = pointer + 1
else:
raise ParseError(3, pointer + 1)
# the last also goes in
if tuple(MSGID):
trans[tuple(MSGID)] = (COM, MSGSTR)
return COM, MSGID, MSGSTR, trans
_file_header = '''
msgid ""
msgstr ""
"Project-Id-Version: Zope 3\\n"
"PO-Revision-Date: %s\\n"
"Last-Translator: Zope 3 Gettext Export Filter\\n"
"Zope-Language: %s\\n"
"Zope-Domain: %s\\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=UTF-8\\n"
"Content-Transfer-Encoding: 8bit\\n"
'''
_msg_template = '''
msgid "%s"
msgstr "%s"
'''
|
[
"chris@thegermanfriday.com"
] |
chris@thegermanfriday.com
|
4c1fac6ffc39bfa3667bc5a2ef3b71ca0e4f0283
|
bee2af5228232ce94f418b61810cecd93af62615
|
/movies/tests.py
|
a6adfa1859a67fd17757470bea1d839c9c970cc3
|
[] |
no_license
|
thuitafaith/djangoapp
|
b64c2e1a05c67b1135d4d9dd7975c17522238a69
|
e06280b34a7b1ec012d0baab6f0fb153875a39b4
|
refs/heads/master
| 2022-12-11T19:06:08.540528
| 2019-08-29T12:36:45
| 2019-08-29T12:36:45
| 203,321,071
| 0
| 0
| null | 2022-11-22T04:13:07
| 2019-08-20T07:15:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,600
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from .models import Editor,Article,tags
import datetime as dt
# Create your tests here.
class EditorTestClass(TestCase):
# set up method
def setUp(self):
self.faith=Editor(first_name='faith',last_name='thuita',email='faith.thuita@moringaschool.com')
# testing instance
def test_instance(self):
self.assertTrue(isinstance(self.faith,Editor))
# testing save method
def test_save_method(self):
self.faith.save_editor()
editors = Editor.objects.all()
self.assertTrue(len(editors)>0)
class ArticleTestClass(TestCase):
def setUp(self):
# creating a new editor and saving it
self.faith= Editor(first_name='faith',last_name='thuita',email='faith.thuita@moringaschool.com')
self.faith.save_editor()
# creating a new tag saving it
self.new_tag = tags(name='testing')
self.new_tag.save()
self.new_article = Article(title='Test Article',post= 'this is a random test post',editor=self.faith)
self.new_article.save()
self.new_article.tags.add(self.new_tag)
def tearDown(self):
Editor.objects.all().delete()
tags.objects.all().delete()
Article.objects.all().delete()
def test_get_news_today(self):
today_news = Article.todays_news()
self.assertTrue(len(today_news) > 0)
def test_get_news_by_date(self):
test_date = '2017-03-17'
date = dt.datetime.strptime(test_date, '%Y-%m-%d').date()
|
[
"thuitamuthoni15@gmail.com"
] |
thuitamuthoni15@gmail.com
|
f287244a91e88664b5d41777c7749b04894158ea
|
f4b16d247195621a5413aab56919b4e623b604b8
|
/src/faimes/urban/dataimport/opinionmakers/settings.py
|
ed4e0587d66bb5c2e6bf895523ee08c2b6023e75
|
[] |
no_license
|
IMIO/faimes.urban.dataimport
|
cc1a7e3050538f409c29e3031a175e8d1a96c7db
|
67fcaa14a5951df7cbaf64b59794aab0a2b88f7f
|
refs/heads/master
| 2021-01-10T17:52:51.975421
| 2017-03-16T13:27:27
| 2017-03-16T13:27:27
| 52,949,709
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 904
|
py
|
# -*- coding: utf-8 -*-
from faimes.urban.dataimport.opinionmakers.importer import OpinionMakersImporter
from imio.urban.dataimport.access.settings import AccessImporterFromImportSettings
class OpinionMakersImporterFromImportSettings(AccessImporterFromImportSettings):
""" """
def __init__(self, settings_form, importer_class=OpinionMakersImporter):
"""
"""
super(OpinionMakersImporterFromImportSettings, self).__init__(settings_form, importer_class)
def get_importer_settings(self):
"""
Return the access file to read.
"""
settings = super(OpinionMakersImporterFromImportSettings, self).get_importer_settings()
access_settings = {
'db_name': 'Tab_Urba 97.mdb',
'table_name': 'CONSUL',
'key_column': 'Sigle',
}
settings.update(access_settings)
return settings
|
[
"delcourt.simon@gmail.com"
] |
delcourt.simon@gmail.com
|
06d163a2fe5ead35d5e572263a70fde2496f201a
|
745197407e81606718c4cdbedb6a81b5e8edf50b
|
/tests/texttest/TestSelf/TestData/GUI/CopyTestPermission/TargetApp/printpermissions.py
|
982669999d14a181bf22034492a6efd8f0066ec8
|
[] |
no_license
|
dineshkummarc/texttest-3.22
|
5b986c4f6cc11fd553dab173c7f2e90590e7fcf0
|
85c3d3627082cdc5860d9a8468687acb499a7293
|
refs/heads/master
| 2021-01-23T20:44:35.653866
| 2012-06-25T07:52:13
| 2012-06-25T07:52:13
| 4,779,248
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
#!/usr/bin/env python
import os
if os.name == "posix":
os.system("fake_executable.py 2> /dev/null")
else:
os.system("fake_executable.py 2> nul")
|
[
"dineshkummarc@gmail.com"
] |
dineshkummarc@gmail.com
|
41966c4c82d82d656d5fa42250f7a8267dfc0855
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_3_neat/16_0_3_solomon_wzs_coin_jam2.py
|
dd210cf970f87c5ec2c5810a2df187cfd1dd819d
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,771
|
py
|
#!/usr/bin/python2
import math
prime_list = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 113,
193, 241, 257, 337, 353, 401, 433, 449, 577, 593, 641,
673, 769, 881, 929, 977, 1009, 1153, 1201, 1217, 1249,
1297, 1361, 1409, 1489, 1553, 1601, 1697, 1777, 1873,
1889, 2017, 2081, 2113, 2129, 2161, 2273, 2417, 2593,
2609, 2657, 2689, 2753, 2801, 2833, 2897, 3041, 3089,
3121, 3137, 3169, 3217, 3313, 3329, 3361, 3457, 3617,
3697, 3761, 3793, 3889, 4001, 4049, 4129, 4177, 4241,
4273, 4289, 4337, 4481, 4513, 4561, 4657, 4673, 4721,
4801, 4817, 4993, 5009, 5153, 5233, 5281, 5297, 5393,
5441, 5521, 5569, 5857, 5953, 6113, 6257, 6337, 6353,
6449, 6481, 6529, 6577, 6673, 6689, 6737, 6833, 6961,
6977, 7057, 7121, 7297, 7393, 7457, 7489, 7537, 7649,
7681, 7793, 7841, 7873, 7937, 8017, 8081, 8161, 8209,
8273, 8353, 8369, 8513, 8609, 8641, 8689, 8737, 8753,
8849, 8929, 9041, 9137, 9281, 9377, 9473, 9521, 9601,
9649, 9697, 9857]
def montgomery(n, p, m):
r = n % m
k = 1
while p > 1:
if p & 1 != 0:
k = (k * r) % m
r = (r * r) % m
p /= 2
return (r * k) % m
def is_prime(n):
if n < 2:
return False
for i in xrange(len(prime_list)):
if n % prime_list[i] == 0 or montgomery(prime_list[i], n - 1, n) != 1:
return False
return True
def f(n, j):
res = ""
for x in xrange(int("1%s1" % ("0" * (n - 2)), 2),
int("1%s1" % ("1" * (n - 2)), 2) + 1,
2):
s = bin(x)[2:]
ok = True
for i in xrange(2, 11, 1):
n = int(s, i)
if is_prime(n):
ok = False
break
if ok:
l = [0] * 9
for i in xrange(2, 11, 1):
n = int(s, i)
ok = False
for k in xrange(2, min(int(math.sqrt(n)), 1000000)):
if n % k == 0:
ok = True
l[i - 2] = str(k)
break
if not ok:
break
if ok:
res += "%s %s\n" % (s, " ".join(l))
j -= 1
if j == 0:
return res[0:len(res)-1]
import sys
fd = open(sys.argv[1], "rb")
t = int(fd.readline().strip())
for i in xrange(1, t + 1):
line = fd.readline().strip()
arr = line.split(" ")
n = int(arr[0])
j = int(arr[1])
res = f(n, j)
print "Case #%d:\n%s" % (i, res)
fd.close()
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
c429802a9089f13c1454fc1561fb824738bee9ed
|
35a2c7e6a01dc7f75116519e4521880416f2a9f2
|
/tag/migrations/0002_value.py
|
2824ada9ec86e752a45838963178796c90938761
|
[] |
no_license
|
engrogerio/edr-rest
|
ae977857d86aab3ef5b40e4d2be2e24abda97cb9
|
a1115a1cd80c0531a85545681b0d3a70b97c529e
|
refs/heads/master
| 2021-01-12T06:35:47.059448
| 2016-12-26T16:20:14
| 2016-12-26T16:20:14
| 77,392,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,377
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-09-07 15:42
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('form', '0002_auto_20160907_1542'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tag', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Value',
fields=[
('created_when', models.DateTimeField(default=datetime.datetime.now)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('numeric', models.DecimalField(decimal_places=10, max_digits=20)),
('text', models.CharField(max_length=1000)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('inspection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='form.Inspection')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tag.Tag')),
],
options={
'abstract': False,
},
),
]
|
[
"eng.rogerio@gmail.com"
] |
eng.rogerio@gmail.com
|
2db93df279e2e7651e7f462a9d558dc444be41b7
|
b42b8f2bfadd25c51cbb12054bc6df42943b7536
|
/venv/Scripts/easy_install-3.7-script.py
|
d0dad9ea927377d5b3c3ccd6ddf55aeec430b305
|
[] |
no_license
|
sharikgrg/week4.Gazorpazorp
|
4b785f281334a6060d6edc8a195a58c072fb5a75
|
0f168a0df81703a8950e375081cafd2e766595fb
|
refs/heads/master
| 2020-08-03T22:41:43.373137
| 2019-09-30T15:35:29
| 2019-09-30T15:35:29
| 211,907,335
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
#!"C:\Users\Sharik Gurung\OneDrive - Sparta Global Limited\PYTHON\gazorpazorp-space-station\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"SGurung@spartaglobal.com"
] |
SGurung@spartaglobal.com
|
25fe704f5be77484a077c570572772385b9cdd39
|
27c27208a167f089bb8ce4027dedb3fcc72e8e8a
|
/ProjectEuler/Solutions/Problems 50-100/Q075.py
|
b1510ace6b6e74c60b66d6a3e138b7926017acc7
|
[] |
no_license
|
stankiewiczm/contests
|
fd4347e7b84c8c7ec41ba9746723036d86e2373c
|
85ed40f91bd3eef16e02e8fd45fe1c9b2df2887e
|
refs/heads/master
| 2021-05-10T16:46:41.993515
| 2018-02-16T09:04:15
| 2018-02-16T09:04:15
| 118,587,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
from numpy import *
# m^2+n^2, 2mn, m^2-n^2
LIM = 1500000; Count = zeros(LIM+1, int);
def GCD(a,b):
while (b > 0):
c = a-(a/b)*b;
a = b;
b = c;
return a;
m = 1; M2 = 1;
while (2*M2 < LIM):
n = m%2+1;
while (2*M2+2*m*n < LIM) and (n < m):
if GCD(m,n) == 1:
p = 2*M2+2*m*n;
for k in range(1, LIM/p+1):
Count[p*k] += 1;
n += 2;
m += 1;
M2 = m*m;
print sum(Count==1)
|
[
"mstankiewicz@gmail.com"
] |
mstankiewicz@gmail.com
|
3f6d20b2b0368bc1fce9ed4428930b1693f2765e
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4134/codes/1723_2505.py
|
8e06f5ecd7a38c37363f35f111d10476601ae390
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
from math import*
ang = eval(input("angulo:"))
k = int(input("numero de termos:"))
soma = 0
i = 0
fim = k - 1
while(i <= fim):
soma = soma+(-1)**i*((ang**(2*i+1)/factorial(2*i+1)))
i = i + 1
print(round(soma, 10))
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
e31dea602a2885d6f6b29d64376f9e3e2a16e75e
|
57391fbdde43c3d2e8628613d9003c65ff8abf9d
|
/Exercicios/ex050.py
|
c2deb6d939dc7f9a9690a0cbb5d9e7af53d18167
|
[] |
no_license
|
JoaolSoares/CursoEmVideo_python
|
082a6aff52414cdcc7ee94d76c3af0ac2cb2aaf5
|
aa9d6553ca890a6d9369e60504290193d1c0fb54
|
refs/heads/main
| 2023-07-15T07:39:57.299061
| 2021-08-26T20:04:22
| 2021-08-26T20:04:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
soma = 0
for c in range(1, 7):
n1 = int(input('Diga um {}º numero: ' .format(c)))
if n1 % 2 == 0:
soma += n1
print('A soma de todos os numeros pares é de: \033[1;34m{}\033[m'. format(soma))
|
[
"joaolucassoaresk@outlook.com"
] |
joaolucassoaresk@outlook.com
|
347d0ea9561448fc30d4a289a796fa6453ad8a76
|
08120ee05b086d11ac46a21473f3b9f573ae169f
|
/gcloud/google-cloud-sdk/.install/.backup/lib/surface/projects/add_iam_policy_binding.py
|
c25bf659de28f724ec44d284cf9b7e902abe6009
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
harrystaley/TAMUSA_CSCI4349_Week9_Honeypot
|
52f7d5b38af8612b7b0c02b48d0a41d707e0b623
|
bd3eb7dfdcddfb267976e3abe4c6c8fe71e1772c
|
refs/heads/master
| 2022-11-25T09:27:23.079258
| 2018-11-19T06:04:07
| 2018-11-19T06:04:07
| 157,814,799
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,025
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to add IAM policy binding for a resource."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.cloudresourcemanager import projects_api
from googlecloudsdk.api_lib.util import http_retry
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.iam import iam_util
from googlecloudsdk.command_lib.projects import flags
from googlecloudsdk.command_lib.projects import util as command_lib_util
from googlecloudsdk.command_lib.resource_manager import completers
import six.moves.http_client
@base.ReleaseTracks(base.ReleaseTrack.GA)
class AddIamPolicyBinding(base.Command):
"""Add IAM policy binding for a project.
Adds a policy binding to the IAM policy of a project,
given a project ID and the binding.
"""
detailed_help = iam_util.GetDetailedHelpForAddIamPolicyBinding(
'project', 'example-project-id-1')
@staticmethod
def Args(parser):
flags.GetProjectFlag('add IAM policy binding to').AddToParser(parser)
iam_util.AddArgsForAddIamPolicyBinding(
parser,
role_completer=completers.ProjectsIamRolesCompleter)
@http_retry.RetryOnHttpStatus(six.moves.http_client.CONFLICT)
def Run(self, args):
project_ref = command_lib_util.ParseProject(args.id)
return projects_api.AddIamPolicyBinding(project_ref, args.member, args.role)
|
[
"staleyh@gmail.com"
] |
staleyh@gmail.com
|
105b66682da75be919d969965dcd0c11bb4617ce
|
80d50ea48e10674b1b7d3f583a1c4b7d0b01200f
|
/src/datadog_api_client/v1/model/monitor_search_response_counts.py
|
94a81e62a22817294ef78c86f9fecc7290984a77
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] |
permissive
|
DataDog/datadog-api-client-python
|
3e01fa630278ad0b5c7005f08b7f61d07aa87345
|
392de360e7de659ee25e4a6753706820ca7c6a92
|
refs/heads/master
| 2023-09-01T20:32:37.718187
| 2023-09-01T14:42:04
| 2023-09-01T14:42:04
| 193,793,657
| 82
| 36
|
Apache-2.0
| 2023-09-14T18:22:39
| 2019-06-25T22:52:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,063
|
py
|
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import annotations
from typing import Union, TYPE_CHECKING
from datadog_api_client.model_utils import (
ModelNormal,
cached_property,
unset,
UnsetType,
)
if TYPE_CHECKING:
from datadog_api_client.v1.model.monitor_search_count import MonitorSearchCount
class MonitorSearchResponseCounts(ModelNormal):
@cached_property
def openapi_types(_):
from datadog_api_client.v1.model.monitor_search_count import MonitorSearchCount
return {
"muted": (MonitorSearchCount,),
"status": (MonitorSearchCount,),
"tag": (MonitorSearchCount,),
"type": (MonitorSearchCount,),
}
attribute_map = {
"muted": "muted",
"status": "status",
"tag": "tag",
"type": "type",
}
def __init__(
self_,
muted: Union[MonitorSearchCount, UnsetType] = unset,
status: Union[MonitorSearchCount, UnsetType] = unset,
tag: Union[MonitorSearchCount, UnsetType] = unset,
type: Union[MonitorSearchCount, UnsetType] = unset,
**kwargs,
):
"""
The counts of monitors per different criteria.
:param muted: Search facets.
:type muted: MonitorSearchCount, optional
:param status: Search facets.
:type status: MonitorSearchCount, optional
:param tag: Search facets.
:type tag: MonitorSearchCount, optional
:param type: Search facets.
:type type: MonitorSearchCount, optional
"""
if muted is not unset:
kwargs["muted"] = muted
if status is not unset:
kwargs["status"] = status
if tag is not unset:
kwargs["tag"] = tag
if type is not unset:
kwargs["type"] = type
super().__init__(kwargs)
|
[
"noreply@github.com"
] |
DataDog.noreply@github.com
|
ee7611e405952a6d724354ab56524138152af431
|
f8d3f814067415485bb439d7fe92dc2bbe22a048
|
/solem/pcv_book/graphcut.py
|
242ac3449953f8cca3ec94fabb66d20ceecfa821
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
gmonkman/python
|
2f9ab8f159c01f6235c86cb0cd52062cd3fdedd3
|
9123aa6baf538b662143b9098d963d55165e8409
|
refs/heads/master
| 2023-04-09T15:53:29.746676
| 2022-11-26T20:35:21
| 2022-11-26T20:35:21
| 60,254,898
| 0
| 2
| null | 2023-03-24T22:58:39
| 2016-06-02T10:25:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,413
|
py
|
from pylab import *
from numpy import *
from pygraph.classes.digraph import digraph
from pygraph.algorithms.minmax import maximum_flow
import bayes
"""
Graph Cut image segmentation using max-flow/min-cut.
"""
def build_bayes_graph(im, labels, sigma=1e2, kappa=1):
""" Build a graph from 4-neighborhood of pixels.
Foreground and background is determined from
labels (1 for foreground, -1 for background, 0 otherwise)
and is modeled with naive Bayes classifiers."""
m, n = im.shape[:2]
# RGB vector version (one pixel per row)
vim = im.reshape((-1, 3))
# RGB for foreground and background
foreground = im[labels == 1].reshape((-1, 3))
background = im[labels == -1].reshape((-1, 3))
train_data = [foreground, background]
# train naive Bayes classifier
bc = bayes.BayesClassifier()
bc.train(train_data)
# get probabilities for all pixels
bc_lables, prob = bc.classify(vim)
prob_fg = prob[0]
prob_bg = prob[1]
# create graph with m*n+2 nodes
gr = digraph()
gr.add_nodes(range(m * n + 2))
source = m * n # second to last is source
sink = m * n + 1 # last node is sink
# normalize
for i in range(vim.shape[0]):
vim[i] = vim[i] / (linalg.norm(vim[i]) + 1e-9)
# go through all nodes and add edges
for i in range(m * n):
# add edge from source
gr.add_edge((source, i), wt=(prob_fg[i] / (prob_fg[i] + prob_bg[i])))
# add edge to sink
gr.add_edge((i, sink), wt=(prob_bg[i] / (prob_fg[i] + prob_bg[i])))
# add edges to neighbors
if i % n != 0: # left exists
edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i - 1])**2) / sigma)
gr.add_edge((i, i - 1), wt=edge_wt)
if (i + 1) % n != 0: # right exists
edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i + 1])**2) / sigma)
gr.add_edge((i, i + 1), wt=edge_wt)
if i // n != 0: # up exists
edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i - n])**2) / sigma)
gr.add_edge((i, i - n), wt=edge_wt)
if i // n != m - 1: # down exists
edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i + n])**2) / sigma)
gr.add_edge((i, i + n), wt=edge_wt)
return gr
def cut_graph(gr, imsize):
""" Solve max flow of graph gr and return binary
labels of the resulting segmentation."""
m, n = imsize
source = m * n # second to last is source
sink = m * n + 1 # last is sink
# cut the graph
flows, cuts = maximum_flow(gr, source, sink)
# convert graph to image with labels
res = zeros(m * n)
for pos, label in cuts.items()[:-2]: # don't add source/sink
res[pos] = label
return res.reshape((m, n))
def save_as_pdf(gr, filename, show_weights=False):
from pygraph.readwrite.dot import write
import gv
dot = write(gr, weighted=show_weights)
gvv = gv.readstring(dot)
gv.layout(gvv, 'fdp')
gv.render(gvv, 'pdf', filename)
def show_labeling(im, labels):
""" Show image with foreground and background areas.
labels = 1 for foreground, -1 for background, 0 otherwise."""
imshow(im)
contour(labels, [-0.5, 0.5])
contourf(labels, [-1, -0.5], colors='b', alpha=0.25)
contourf(labels, [0.5, 1], colors='r', alpha=0.25)
# axis('off')
xticks([])
yticks([])
|
[
"gmonkman@mistymountains.biz"
] |
gmonkman@mistymountains.biz
|
59aeb4698e5be1a9660b979dcf41c2e3880deca6
|
14bb0b5d7478d3a8740cbc15cc7870fcd1fa8207
|
/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/common_v1.py
|
c8dcb7ba231cf3f57f8b8a5dd3782e2a124fbac7
|
[
"Apache-2.0"
] |
permissive
|
terigrossheim/tensorflow
|
2be34891c99e0fcf88cf8418632f24676f1620a7
|
ed9d45f096097c77664815c361c75e73af4f32d4
|
refs/heads/master
| 2022-11-06T12:08:10.099807
| 2020-06-29T12:10:56
| 2020-06-29T12:35:24
| 275,867,898
| 1
| 0
|
Apache-2.0
| 2020-06-29T16:21:41
| 2020-06-29T16:21:39
| null |
UTF-8
|
Python
| false
| false
| 4,320
|
py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Serves as a common "main" function for all the SavedModel tests.
There is a fair amount of setup needed to initialize tensorflow and get it
into a proper TF2 execution mode. This hides that boilerplate.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from tensorflow.python import pywrap_mlir # pylint: disable=g-direct-tensorflow-import
# Use /tmp to make debugging the tests easier (see README.md)
flags.DEFINE_string('save_model_path', '', 'Path to save the model to.')
FLAGS = flags.FLAGS
def set_tf_options():
# Default TF1.x uses reference variables that are not supported by SavedModel
# v1 Importer. To use SavedModel V1 Importer, resource variables should be
# enabled.
tf.enable_resource_variables()
tf.compat.v1.disable_eager_execution()
# This function needs to take a "create_module_fn", as opposed to just the
# module itself, because the creation of the module has to be delayed until
# after absl and tensorflow have run various initialization steps.
def do_test(signature_def_map,
init_op=None,
canonicalize=False,
show_debug_info=False):
"""Runs test.
1. Performs absl and tf "main"-like initialization that must run before almost
anything else.
2. Converts signature_def_map to SavedModel V1
3. Converts SavedModel V1 to MLIR
4. Prints the textual MLIR to stdout (it is expected that the caller will have
FileCheck checks in its file to check this output).
This is only for use by the MLIR SavedModel importer tests.
Args:
signature_def_map: A map from string key to signature_def. The key will be
used as function name in the resulting MLIR.
init_op: The initializer op for the saved model. If set, it will generate a
initializer graph in the resulting MLIR.
canonicalize: If true, canonicalizer will be run on the resulting MLIR.
show_debug_info: If true, shows debug locations in the resulting MLIR.
"""
# Make LOG(ERROR) in C++ code show up on the console.
# All `Status` passed around in the C++ API seem to eventually go into
# `LOG(ERROR)`, so this makes them print out by default.
logging.set_stderrthreshold('error')
def app_main(argv):
"""Function passed to absl.app.run."""
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if FLAGS.save_model_path:
save_model_path = FLAGS.save_model_path
else:
save_model_path = tempfile.mktemp(suffix='.saved_model')
sess = tf.Session()
sess.run(tf.initializers.global_variables())
builder = tf.saved_model.builder.SavedModelBuilder(save_model_path)
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map,
main_op=init_op,
strip_default_attrs=True)
builder.save()
logging.info('Saved model to: %s', save_model_path)
# TODO(b/153507667): Set the following boolean flag once the hoisting
# variables logic from SavedModel importer is removed.
lift_variables = False
mlir = pywrap_mlir.experimental_convert_saved_model_v1_to_mlir(
save_model_path, ','.join([tf.saved_model.tag_constants.SERVING]),
lift_variables, show_debug_info)
if canonicalize:
mlir = pywrap_mlir.experimental_run_pass_pipeline(mlir, 'canonicalize',
show_debug_info)
print(mlir)
app.run(app_main)
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
c30392e2bb7b8ca47fa86eecc06d3ba2ebbf67c5
|
b6472217400cfce4d12e50a06cd5cfc9e4deee1f
|
/sites/top/api/rest/WlbItemDeleteRequest.py
|
90181cc7828d9d8c9ed09c35a46a07e62a9e7a08
|
[] |
no_license
|
topwinner/topwinner
|
2d76cab853b481a4963826b6253f3fb0e578a51b
|
83c996b898cf5cfe6c862c9adb76a3d6a581f164
|
refs/heads/master
| 2021-01-22T22:50:09.653079
| 2012-08-26T19:11:16
| 2012-08-26T19:11:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
'''
Created by auto_sdk on 2012-08-26 16:43:44
'''
from top.api.base import RestApi
class WlbItemDeleteRequest(RestApi):
def __init__(self,domain,port):
RestApi.__init__(self,domain, port)
self.item_id = None
self.user_nick = None
def getapiname(self):
return 'taobao.wlb.item.delete'
|
[
"timo.jiang@qq.com"
] |
timo.jiang@qq.com
|
a33002ee62b9f1e34ed9eabcd27de694c1e05a29
|
00f1f01f218fddc30a4194e999f0b48c45c47012
|
/elements/resources/migrations/0001_initial.py
|
fb4b807194a3f2905b8e8ba7d9f27baedea4299e
|
[] |
no_license
|
mikpanko/grakon
|
495659317c5933a95650b3f9000aab73e7335a13
|
6c64432c366a6ad44fb7227f22498335bd193f37
|
refs/heads/master
| 2020-12-26T00:19:52.799388
| 2013-07-28T02:33:19
| 2013-07-28T02:33:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,184
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_table('elements_entityresource', 'resources_entityresource')
if not db.dry_run:
# For permissions to work properly after migrating
orm['contenttypes.contenttype'].objects.filter(app_label='elements', model='EntityResource').update(app_label='elements.resources')
def backwards(self, orm):
db.rename_table('resources_entityresource', 'elements_entityresource')
if not db.dry_run:
# For permissions to work properly after migrating
orm['contenttypes.contenttype'].objects.filter(app_label='elements.resources', model='EntityResource').update(app_label='elements')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'resources.entityresource': {
'Meta': {'unique_together': "(('content_type', 'entity_id', 'resource'),)", 'object_name': 'EntityResource'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'})
}
}
complete_apps = ['resources']
|
[
"sergkop@gmail.com"
] |
sergkop@gmail.com
|
1dcaa9207f2ccf6e23d755d436896b1aef624ac1
|
a170461845f5b240daf2090810b4be706191f837
|
/pyqt/DemoFullCode-PythonQt/chap12QtChart/Demo12_2ChartConfig/myDialogPen.py
|
4cdbf17519ebcd8973fd4577bfea498efc83ca6b
|
[] |
no_license
|
longhuarst/QTDemo
|
ec3873f85434c61cd2a8af7e568570d62c2e6da8
|
34f87f4b2337a140122b7c38937ab4fcf5f10575
|
refs/heads/master
| 2022-04-25T10:59:54.434587
| 2020-04-26T16:55:29
| 2020-04-26T16:55:29
| 259,048,398
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,026
|
py
|
# -*- coding: utf-8 -*-
import sys
from PyQt5.QtWidgets import QApplication, QDialog,QColorDialog
from PyQt5.QtCore import pyqtSlot,Qt
##from PyQt5.QtWidgets import
from PyQt5.QtGui import QPen, QPalette,QColor
from ui_QWDialogPen import Ui_QWDialogPen
class QmyDialogPen(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.ui=Ui_QWDialogPen()
self.ui.setupUi(self) #构造UI界面
self.__pen=QPen()
##“线型”ComboBox的选择项设置
self.ui.comboPenStyle.clear()
self.ui.comboPenStyle.addItem("NoPen",0)
self.ui.comboPenStyle.addItem("SolidLine",1)
self.ui.comboPenStyle.addItem("DashLine",2)
self.ui.comboPenStyle.addItem("DotLine",3)
self.ui.comboPenStyle.addItem("DashDotLine",4)
self.ui.comboPenStyle.addItem("DashDotDotLine",5)
self.ui.comboPenStyle.addItem("CustomDashLine",6)
self.ui.comboPenStyle.setCurrentIndex(1)
##=================自定义接口函数====================
def setPen(self,pen): ##设置pen
self.__pen=pen
self.ui.spinWidth.setValue(pen.width()) #线宽
i=int(pen.style()) #枚举类型转换为整型
self.ui.comboPenStyle.setCurrentIndex(i)
color=pen.color() #QColor
## self.ui.btnColor.setAutoFillBackground(True)
qss="background-color: rgb(%d, %d, %d)"%(
color.red(),color.green(),color.blue())
self.ui.btnColor.setStyleSheet(qss) #使用样式表设置按钮背景色
def getPen(self): ##返回pen
index=self.ui.comboPenStyle.currentIndex()
self.__pen.setStyle(Qt.PenStyle(index)) #线型
self.__pen.setWidth(self.ui.spinWidth.value()) #线宽
color=self.ui.btnColor.palette().color(QPalette.Button)
self.__pen.setColor(color) #颜色
return self.__pen
@staticmethod ##类函数,或静态函数
def staticGetPen(iniPen):
# 不能有参数self,不能与类的成员函数同名,也就是不能命名为getPen()
Dlg=QmyDialogPen() #创建一个对话框
Dlg.setPen(iniPen) #设置初始化QPen
pen=iniPen
ok=False
ret=Dlg.exec() #模态显示对话框
if ret==QDialog.Accepted:
pen=Dlg.getPen() #获取pen
ok=True
return pen ,ok #返回设置的QPen对象
## ==========由connectSlotsByName()自动连接的槽函数============
@pyqtSlot() ##选择颜色
def on_btnColor_clicked(self):
color=QColorDialog.getColor()
if color.isValid(): #用样式表设置QPushButton的背景色
qss="background-color: rgb(%d, %d, %d);"%(
color.red(),color.green(),color.blue())
self.ui.btnColor.setStyleSheet(qss)
## ============窗体测试程序 ================================
if __name__ == "__main__":
app = QApplication(sys.argv)
iniPen=QPen(Qt.blue)
pen=QmyDialogPen.staticGetPen(iniPen) #测试类函数调用
sys.exit(app.exec_())
|
[
"841105197@qq.com"
] |
841105197@qq.com
|
449a4e9073d7775f05349340826f0d6e53ce9997
|
19da1a56f137a08772c347cf974be54e9c23c053
|
/lib/adafruit_motor/servo.py
|
0c46abd369009f496e2dd3f194a68ec1901f43f5
|
[] |
no_license
|
mk53202/mk53202-timeclock-pyportal
|
d94f45a9d186190a4bc6130077baa6743a816ef3
|
230a858d429f8197c00cab3e67dcfd3b295ffbe0
|
refs/heads/master
| 2021-02-04T05:38:25.533292
| 2020-02-27T22:45:56
| 2020-02-27T22:45:56
| 243,626,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,061
|
py
|
# The MIT License (MIT)
#
# Copyright (c) 2017 Scott Shawcroft for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_motor.servo`
====================================================
Servos are motor based actuators that incorporate a feedback loop into the design. These feedback
loops enable pulse width modulated control to determine position or rotational speed.
* Author(s): Scott Shawcroft
"""
__version__ = "2.0.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Motor.git"
# We disable the too few public methods check because this is a private base class for the two types
# of servos.
class _BaseServo: # pylint: disable-msg=too-few-public-methods
"""Shared base class that handles pulse output based on a value between 0 and 1.0
:param ~pulseio.PWMOut pwm_out: PWM output object.
:param int min_pulse: The minimum pulse length of the servo in microseconds.
:param int max_pulse: The maximum pulse length of the servo in microseconds."""
def __init__(self, pwm_out, *, min_pulse=750, max_pulse=2250):
self._pwm_out = pwm_out
self.set_pulse_width_range(min_pulse, max_pulse)
def set_pulse_width_range(self, min_pulse=750, max_pulse=2250):
"""Change min and max pulse widths."""
self._min_duty = int((min_pulse * self._pwm_out.frequency) / 1000000 * 0xffff)
max_duty = (max_pulse * self._pwm_out.frequency) / 1000000 * 0xffff
self._duty_range = int(max_duty - self._min_duty)
@property
def fraction(self):
"""Pulse width expressed as fraction between 0.0 (`min_pulse`) and 1.0 (`max_pulse`).
For conventional servos, corresponds to the servo position as a fraction
of the actuation range. Is None when servo is diabled (pulsewidth of 0ms).
"""
if self._pwm_out.duty_cycle == 0: # Special case for disabled servos
return None
return (self._pwm_out.duty_cycle - self._min_duty) / self._duty_range
@fraction.setter
def fraction(self, value):
if value is None:
self._pwm_out.duty_cycle = 0 # disable the motor
return
if not 0.0 <= value <= 1.0:
raise ValueError("Must be 0.0 to 1.0")
duty_cycle = self._min_duty + int(value * self._duty_range)
self._pwm_out.duty_cycle = duty_cycle
class Servo(_BaseServo):
"""Control the position of a servo.
:param ~pulseio.PWMOut pwm_out: PWM output object.
:param int actuation_range: The physical range of motion of the servo in degrees, \
for the given ``min_pulse`` and ``max_pulse`` values.
:param int min_pulse: The minimum pulse width of the servo in microseconds.
:param int max_pulse: The maximum pulse width of the servo in microseconds.
``actuation_range`` is an exposed property and can be changed at any time:
.. code-block:: python
servo = Servo(pwm)
servo.actuation_range = 135
The specified pulse width range of a servo has historically been 1000-2000us,
for a 90 degree range of motion. But nearly all modern servos have a 170-180
degree range, and the pulse widths can go well out of the range to achieve this
extended motion. The default values here of ``750`` and ``2250`` typically give
135 degrees of motion. You can set ``actuation_range`` to correspond to the
actual range of motion you observe with your given ``min_pulse`` and ``max_pulse``
values.
.. warning:: You can extend the pulse width above and below these limits to
get a wider range of movement. But if you go too low or too high,
the servo mechanism may hit the end stops, buzz, and draw extra current as it stalls.
Test carefully to find the safe minimum and maximum.
"""
def __init__(self, pwm_out, *, actuation_range=180, min_pulse=750, max_pulse=2250):
super().__init__(pwm_out, min_pulse=min_pulse, max_pulse=max_pulse)
self.actuation_range = actuation_range
"""The physical range of motion of the servo in degrees."""
self._pwm = pwm_out
@property
def angle(self):
"""The servo angle in degrees. Must be in the range ``0`` to ``actuation_range``.
Is None when servo is disabled."""
if self.fraction is None: # special case for disabled servos
return None
return self.actuation_range * self.fraction
@angle.setter
def angle(self, new_angle):
if new_angle is None: # disable the servo by sending 0 signal
self.fraction = None
return
if new_angle < 0 or new_angle > self.actuation_range:
raise ValueError("Angle out of range")
self.fraction = new_angle / self.actuation_range
class ContinuousServo(_BaseServo):
"""Control a continuous rotation servo.
:param int min_pulse: The minimum pulse width of the servo in microseconds.
:param int max_pulse: The maximum pulse width of the servo in microseconds."""
@property
def throttle(self):
"""How much power is being delivered to the motor. Values range from ``-1.0`` (full
throttle reverse) to ``1.0`` (full throttle forwards.) ``0`` will stop the motor from
spinning."""
return self.fraction * 2 - 1
@throttle.setter
def throttle(self, value):
if value > 1.0 or value < -1.0:
raise ValueError("Throttle must be between -1.0 and 1.0")
if value is None:
raise ValueError("Continuous servos cannot spin freely")
self.fraction = (value + 1) / 2
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.throttle = 0
def deinit(self):
"""Stop using the servo."""
self.throttle = 0
|
[
"mkoster@stack41.com"
] |
mkoster@stack41.com
|
f0202a4f34472c4c3be1f395aaae592e9ea9f454
|
7d9f92fba6af53bd385e0b4173134241c9998fff
|
/items/admin.py
|
418f8f0f830fcebdebd5feddc8bd7ec707691ed5
|
[] |
no_license
|
ljarufe/intifil
|
856f77c6ece7f444fd331a3eff3c35260201f78f
|
d478a8a1309d526a2508ca7b559e16de03aaa384
|
refs/heads/master
| 2021-01-02T09:09:13.613026
| 2013-10-21T17:00:03
| 2013-10-21T17:00:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from modeltranslation.admin import TranslationAdmin
from common.admin import BasePermissionAdmin
from items.models import Category, Photo, HomePhoto, Item, Video, SubItem
class CategoryAdmin(BasePermissionAdmin, TranslationAdmin):
"""
Category model admin
"""
list_display = ("name", "slug",)
class PhotoInLine(admin.TabularInline):
"""
Photo inline model admin
"""
model = Photo
exclude = ("name",)
class VideoInLine(admin.TabularInline):
"""
Video inline model admin
"""
model = Video
exclude = ("name",)
class HomePhotoAdmin(admin.ModelAdmin):
"""
Home photo model admin
"""
list_display = ("get_item", "get_shape_display",)
class ItemAdmin(TranslationAdmin):
"""
Item model admin
"""
list_display = ("name", "category", "order",)
list_display_links = ("name", "category")
list_editable = ('order', )
list_filter = ("category",)
exclude = ('order',)
def save_model(self, request, obj, form, change):
"""
Guarda un nuevo item de la página de inicio con el orden por defecto
al final de la lista
"""
if not change:
if form.cleaned_data["home_photo"]:
obj.order = Item.get_default_order()
obj.save()
class SubItemAdmin(TranslationAdmin):
"""
Subitem model admin
"""
list_display = ("name", "item", "order",)
list_display_links = ("name", "item")
list_editable = ('order', )
list_filter = ("item",)
inlines = [PhotoInLine, VideoInLine,]
class PhotoVideoAdmin(TranslationAdmin):
"""
Photo and video model admin
"""
list_display = ("name", "subitem", "order",)
list_display_links = ("name", "subitem")
list_editable = ('order', )
list_filter = ("subitem",)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Photo, PhotoVideoAdmin)
admin.site.register(HomePhoto, HomePhotoAdmin)
admin.site.register(Item, ItemAdmin)
admin.site.register(SubItem, SubItemAdmin)
admin.site.register(Video, PhotoVideoAdmin)
|
[
"luisjarufe@gmail.com"
] |
luisjarufe@gmail.com
|
e07ad01c23c45836b064759f00be7e07f68f04e8
|
f04a36fdaa415c6a47d3727e783b2dce11e3dd43
|
/blog/views.py
|
8ae814785c3273121fdfa345ef1043693a0d0a73
|
[
"BSD-3-Clause"
] |
permissive
|
hellprise/cook_blog
|
e9486452cc53a1300fce5ea9ea54dbe5c0408bf0
|
d55734af1625256f940e55d267beb38d911bfda4
|
refs/heads/main
| 2023-06-25T21:43:20.284389
| 2021-07-28T14:36:45
| 2021-07-28T14:36:45
| 390,378,042
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
from django.shortcuts import render
from django.views.generic import ListView, DetailView
from blog.models import Post
class PostListView(ListView):
model = Post
def get_queryset(self):
return Post.objects.filter(category__slug=self.kwargs.get('slug')).select_related('category')
class PostDetailView(DetailView):
model = Post
context_object_name = 'post'
slug_url_kwarg = 'post_slug'
def home(request):
return render(request, 'base.html')
|
[
"you@example.com"
] |
you@example.com
|
95a4b7f4ef92f184eefee95bceee085fc44064e8
|
ecd2c20608e1f4a1646c87767762bd72db618d65
|
/photo_blog/settings.py
|
a119b668239d31d500b4fa6a3be1f70c0a501c4a
|
[] |
no_license
|
RianGirard/photo_blog
|
129858ee32cbc2ff0521c8219b72b9d83c015726
|
e461fa62abe027965b7143cce544d25634d5bf9c
|
refs/heads/master
| 2023-06-20T14:36:38.040663
| 2021-07-21T01:02:13
| 2021-07-21T01:02:13
| 383,640,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,867
|
py
|
"""
Django settings for photo_blog project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from decouple import config
config.encoding = 'cp1251'
SECRET_KEY = config('SECRET_KEY')
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
MEDIA_URL = '/media/' # for image upload
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') # ditto
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'blog',
'profiles',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites', # required for django-allauth
'allauth', # ditto
'allauth.account', # ditto
'allauth.socialaccount', # ditto
# 'allauth.socialaccount.providers.github', # ditto
'sorl.thumbnail', # required for sorl.thumbnail
'crispy_forms',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'photo_blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'photo_blog/templates')], # added this in: os.path.join(BASE_DIR, '[mysite]/templates')
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'photo_blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = ( # added this
os.path.join(BASE_DIR, 'photo_blog/static'),
)
# following are parameters for django-allauth:
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SITE_ID = 1
LOGIN_URL = '/login'
LOGIN_REDIRECT_URL = '/'
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_LOGOUT_ON_GET = True
ACCOUNT_LOGIN_ON_PASSWORD_RESET = True
ACCOUNT_LOGOUT_REDIRECT = '/'
ACCOUNT_PRESERVE_USERNAME_CASING = False
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = True
ACCOUNT_SIGNUP_REDIRECT_URL = '/'
ACCOUNT_USERNAME_MIN_LENGTH = 2
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend"
)
# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # for PROD
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # for DEV
|
[
"riangirard@gmail.com"
] |
riangirard@gmail.com
|
5e0d645e8d8db30e316d5aab006e9160adad1df9
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_impromptus.py
|
fee549bf75880518cd29b8bb36287ecde035b251
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
#calss header
class _IMPROMPTUS():
def __init__(self,):
self.name = "IMPROMPTUS"
self.definitions = impromptu
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['impromptu']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
4e5edd8fae1f0b8969b8d01ebb9cdc696f1cb1e4
|
0abc546a1442cae56ddcdc43f85497b37fc89036
|
/scripts/graph_check_transitivity.py
|
b1793d5f4b9923cd0e824952d489b64036bc0a11
|
[] |
no_license
|
yangjl/cgat
|
01a535531f381ace0afb9ed8dc3a0fcff6290446
|
01758b19aa1b0883f0e648f495b570f1b6159be4
|
refs/heads/master
| 2021-01-18T03:55:14.250603
| 2014-02-24T10:32:45
| 2014-02-24T10:32:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,395
|
py
|
'''
graph_check_transitivity.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python graph_check_transitivity.py --help
Type::
python graph_check_transitivity.py --help
for command line help.
Command line options
--------------------
'''
import os
import sys
import string
import re
import getopt
import time
import optparse
import math
import tempfile
""" program $Id: graph_check_transitivity.py 2782 2009-09-10 11:40:29Z andreas $
python graph_check_transitivity < graph.in
check whether all edges in a graph are transitive, i.e.,
for every two edges A->B and B->C check whether A->C exists.
Edges are taken to be undirected.
"""
import CGAT.Experiment as E
import CGAT.Histogram as Histogram
def main( argv = None ):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv == None: argv = sys.argv
parser = E.OptionParser( version = "%prog version: $Id: graph_check_transitivity.py 2782 2009-09-10 11:40:29Z andreas $")
parser.add_option("--filename-missing", dest="filename_missing", type="string",
help="missing entries.")
parser.add_option("--filename-found", dest="filename_found", type="string",
help="found entries.")
parser.add_option("--report-step1", dest="report_step1", type="int",
help="report interval for input.")
parser.add_option("--report-step2", dest="report_step2", type="int",
help="report interval for processing.")
parser.add_option("--use-subsets", dest="subsets", action="store_true",
help="do subset calculation. Third field contains a redundancy code.")
parser.set_defaults(
filename_missing = None,
filename_found = None,
report_step1 = 100000,
report_step2 = 10000,
subsets = False,
)
(options, args) = E.Start( parser )
# retrieve data
vals = {}
niterations = 0
ninput = 0
for line in sys.stdin:
if line[0] == "#": continue
niterations += 1
if options.loglevel >= 1 and (niterations % options.report_step1 == 0):
options.stdlog.write( "# input: %i\n" % (niterations))
options.stdlog.flush()
v1, v2, w = line[:-1].split("\t")[:3]
if v1 == v2: continue
if v1 not in vals: vals[v1] = []
if v2 not in vals: vals[v2] = []
if not options.subsets:
w = ninput
vals[v1].append( (v2, w) )
vals[v2].append( (v1, w) )
ninput += 1
## make everything unique
for key, v1 in vals.items():
vals[key] = tuple(set(v1))
keys = vals.keys()
keys.sort()
niterations = 0
nkeys = len(keys)
missing = []
ntotal = 0
nfound = 0
counted = {}
nremoved = 0
if options.filename_found:
outfile_found = open(options.filename_found, "w")
for v1 in keys:
niterations += 1
if options.loglevel >= 1 and (niterations % options.report_step2 == 0):
options.stdlog.write( "# loop: %i\n" % (niterations))
options.stdlog.flush()
for v2, c2 in vals[v1]:
## only to half-symmetric test
for v3, c3 in vals[v2]:
if (c2, c3) in counted:
nremoved += 1
# print "v1=", v1, "v2=", v2, "v3=", v3, "c2=", c2, "c3=", c3, "removed"
continue
## do not do self-comparisons
if v1 == v3: continue
if c2 == c3: continue
counted[(c2,c3)] = True
ntotal += 1
if v3 in map(lambda x: x[0], vals[v1]) or v1 in map(lambda x: x[0], vals[v3]):
nfound += 1
if options.filename_found:
outfile_found.write( "\t".join( (v1, v2, v3) ) + "\n" )
# print "v1=", v1, "v2=", v2, "v3=", v3, "c2=", c2, "c3=", c3, "found"
else:
missing.append( (v1, v2, v3) )
# print "v1=", v1, "v2=", v2, "v3=", v3, "c2=", c2, "c3=", c3, "missing"
nmissing = len(missing)
options.stdout.write( "number of egdes\t%i\n" % ninput)
options.stdout.write( "number of vertices\t%i\n" % nkeys)
options.stdout.write( "number of removed triplets\t%i\n" % nremoved)
options.stdout.write( "number of tested triplets\t%i\t%6.4f\n" % (ntotal, float(ntotal) / float(ntotal)))
options.stdout.write( "number of realized triplets\t%i\t%6.4f\n" % (nfound, float(nfound) / float(ntotal)))
options.stdout.write( "number of incomplete triplets\t%i\t%6.4f\n" % (nmissing, float(nmissing) / float(ntotal)))
if options.filename_missing:
outfile = open(options.filename_missing, "w")
for v1, v2, v3 in missing:
outfile.write( "\t".join( (v1, v2, v3) ) + "\n")
outfile.close()
if options.filename_found:
outfile_found.close()
E.Stop()
if __name__ == "__main__":
sys.exit( main( sys.argv) )
|
[
"andreas.heger@gmail.com"
] |
andreas.heger@gmail.com
|
b695dc1cd6ac27aeb81909e86ad63a50c0fac5c4
|
23611933f0faba84fc82a1bc0a85d97cf45aba99
|
/google-cloud-sdk/lib/surface/compute/instance_groups/describe.py
|
8a88e0e197d87deb862c3ee4c7fd71f847b772b4
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
KaranToor/MA450
|
1f112d1caccebdc04702a77d5a6cee867c15f75c
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
refs/heads/master
| 2021-06-21T06:17:42.585908
| 2020-12-24T00:36:28
| 2020-12-24T00:36:28
| 79,285,433
| 1
| 1
|
Apache-2.0
| 2020-12-24T00:38:09
| 2017-01-18T00:05:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,985
|
py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for describing instance groups."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import instance_groups_utils
class Describe(base_classes.MultiScopeDescriber):
"""Describe an instance group."""
SCOPES = (base_classes.ScopeType.regional_scope,
base_classes.ScopeType.zonal_scope)
@property
def global_service(self):
return None
@property
def regional_service(self):
return self.compute.regionInstanceGroups
@property
def zonal_service(self):
return self.compute.instanceGroups
@property
def global_resource_type(self):
return None
@property
def regional_resource_type(self):
return 'regionInstanceGroups'
@property
def zonal_resource_type(self):
return 'instanceGroups'
@staticmethod
def Args(parser):
base_classes.MultiScopeDescriber.AddScopeArgs(
parser, 'instanceGroups', Describe.SCOPES)
def ComputeDynamicProperties(self, args, items):
return instance_groups_utils.ComputeInstanceGroupManagerMembership(
compute=self.compute,
project=self.project,
http=self.http,
batch_url=self.batch_url,
items=items,
filter_mode=instance_groups_utils.InstanceGroupFilteringMode.ALL_GROUPS)
Describe.detailed_help = base_classes.GetMultiScopeDescriberHelp(
'instance group', Describe.SCOPES)
|
[
"toork@uw.edu"
] |
toork@uw.edu
|
e36d5216b192e842d632a87650507221796a33e3
|
bcee50b3cbaf7a8000dffb7326cf467ae432b626
|
/basic/15650/nm_2_dfs.py
|
cfe1b263b0584c44a10c3b12c47bba7fd97e0bce
|
[] |
no_license
|
entrekid/algorithms
|
53e5e563f6350b76047d8163ecd6e623dbe6e8d1
|
64377821718b3e44faf6a05be4d3ebf99b674489
|
refs/heads/master
| 2022-04-06T21:49:42.081981
| 2020-03-03T14:58:52
| 2020-03-03T14:58:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
n, m = map(int, input().split())
check_list = [False] * n
num_list = [elem + 1 for elem in range(n)]
result_list = []
def nm_dfs2(num):
if num == m:
print(*result_list)
return
for iter in range(n):
if check_list[iter] == True:
continue
check_list[iter] = True
result_list.append(num_list[iter])
nm_dfs2(num + 1)
result_list.pop()
for j in range(iter + 1, n):
check_list[j] = False
nm_dfs2(0)
|
[
"root@LAPTOP-S2FAKB33.localdomain"
] |
root@LAPTOP-S2FAKB33.localdomain
|
005e6a8d7f20ae9bcc7a387f6cf8b691bc2da6d2
|
aaa3ab0c89f558a33ddcad9bcc5a687049dbc599
|
/backend/src/websocket/socket.py
|
c7efe44db002bc33abccdeaebe9cf23e1008b529
|
[] |
no_license
|
vetordev/Hypersup
|
5d059282971bf45f54f8be49071984371f98aabe
|
961ac24209a3772fef5016ca851f82bc2fc40bd1
|
refs/heads/master
| 2021-02-16T18:40:06.197712
| 2020-03-18T22:20:13
| 2020-03-18T22:20:13
| 245,034,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
from flask import request
class Socket:
def __init__(self, socket, app):
self.socket = socket
self.app = app
def run(self):
@self.socket.on('connect')
def connect():
print('New Connection; Id: ${id}'.format(id=request.sid))
|
[
"you@example.com"
] |
you@example.com
|
1d9c3616c035da8730928b2c6d124ebe273b931d
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/torch/distributed/checkpoint/state_dict_saver.py
|
a99cd129aeb637da7d11cb88ad101de0a72d8c56
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937
| 2023-08-03T00:40:33
| 2023-08-03T04:14:52
| 65,600,975
| 77,092
| 24,610
|
NOASSERTION
| 2023-09-14T21:58:39
| 2016-08-13T05:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 4,458
|
py
|
from typing import Optional
import torch
import torch.distributed as dist
from .planner import SavePlanner
from .default_planner import DefaultSavePlanner
from .storage import (
StorageWriter,
)
from .metadata import Metadata, STATE_DICT_TYPE
from .utils import _DistWrapper
__all__ = ["save_state_dict"]
def save_state_dict(
state_dict: STATE_DICT_TYPE,
storage_writer: StorageWriter,
process_group: Optional[dist.ProcessGroup] = None,
coordinator_rank: int = 0,
no_dist: bool = False,
planner: Optional[SavePlanner] = None,
) -> Metadata:
"""
Saves a distributed model in SPMD style.
This function is different from ``torch.save()`` as it handles
``ShardedTensor`` by having each rank only save their local shards.
.. warning::
There is no guarantees of Backwards Compatibility across PyTorch versions
for saved state_dicts.
.. warning::
If using the `process_group` argument, make sure that only its ranks
call `save_state_dict` and that all data in state_dict belong to it.
.. note::
When saving checkpoint for FSDP's `ShardingStrategy.HYBRID_SHARD`, only one of
the shard_group should be calling `save_state_dict` and the corresponding process
group needs to be passed in.
.. note::
This function can be used to save a state_dict without having a process group
initialized by passing ``no_dist=True``.
Args:
state_dict (Dict[str, Any]): The state_dict to save.
storage_writer (StorageWriter):
Instance of StorageWrite use to perform writes.
process_group (ProcessGroup):
ProcessGroup to be used for cross-rank synchronization.
coordinator_rank (int): Rank to use to coordinate the checkpoint.
rank0 is used by default.
no_dist (bool): If ``True``, distributed checkpoint will not save
in SPMD style. (Default: ``False``)
Returns:
Metadata: Metadata object for the saved checkpoint.
Example:
>>> # xdoctest: +SKIP
>>> my_model = MyModule()
>>> model_state_dict = my_model.state_dict()
>>> fs_storage_writer = torch.distributed.checkpoint.FileSystemWriter("/checkpoint/1")
>>> torch.distributed.checkpoint.save_state_dict(
>>> state_dict=model_state_dict,
>>> storage_writer=fs_storage_writer,
>>> )
.. note::
save_state_dict uses collectives to coordinate writes across ranks.
For NCCL-based process groups, internal tensor representations of
objects must be moved to the GPU device before communication takes place.
In this case, the device used is given by ``torch.cuda.current_device()``
and it is the user's responsibility to ensure that this is set so that
each rank has an individual GPU, via ``torch.cuda.set_device()``.
"""
torch._C._log_api_usage_once("torch.distributed.checkpoint.save_state_dict")
distW = _DistWrapper(process_group, not no_dist, coordinator_rank)
if planner is None:
planner = DefaultSavePlanner()
assert planner is not None
global_metatadata = None
def local_step():
assert planner is not None
planner.set_up_planner(state_dict, distW.is_coordinator)
storage_writer.set_up_storage_writer(distW.is_coordinator)
local_plan = planner.create_local_plan()
local_plan = storage_writer.prepare_local_plan(local_plan)
return local_plan
def global_step(all_local_plans):
nonlocal global_metatadata
assert planner is not None
all_local_plans, global_metatadata = planner.create_global_plan(
all_local_plans
)
all_local_plans = storage_writer.prepare_global_plan(all_local_plans)
return all_local_plans
central_plan = distW.reduce_scatter("plan", local_step, global_step)
def write_data():
assert planner is not None
final_local_plan = planner.finish_plan(central_plan)
all_writes = storage_writer.write_data(final_local_plan, planner)
all_writes.wait()
return all_writes.value()
def finish_checkpoint(all_results):
assert global_metatadata is not None
storage_writer.finish(metadata=global_metatadata, results=all_results)
return global_metatadata
return distW.all_reduce("write", write_data, finish_checkpoint)
|
[
"pytorchmergebot@users.noreply.github.com"
] |
pytorchmergebot@users.noreply.github.com
|
82a0e0d28994984b8a494fad02e967299d94d678
|
eb817a5a5fd66d00906d2ac2574e2ef749780877
|
/defining_classes/demos_metaclasses.py
|
b2fd1f30b2205a42c5e9f106569b3de0e8110ce2
|
[
"MIT"
] |
permissive
|
Minkov/python-oop-2021-02
|
5afcc356f59196fdfcfd217b455b8621176f578b
|
bd387dde165f4338eed66c4bc0b4b516ee085340
|
refs/heads/main
| 2023-04-01T08:07:39.096457
| 2021-04-05T18:24:40
| 2021-04-05T18:24:40
| 341,306,261
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 649
|
py
|
class Singleton(type):
__instances = {}
def __new__(cls, *args, **kwargs):
return super().__new__(cls, *args, **kwargs)
def __call__(cls, *args, **kwargs):
if cls not in cls.__instances:
cls.__instances[cls] = super().__call__(*args, **kwargs)
return cls.__instances[cls]
class PersonFactory(metaclass=Singleton):
pass
p = PersonFactory()
print(PersonFactory() == PersonFactory())
print(PersonFactory() == PersonFactory())
print(PersonFactory() == PersonFactory())
print(PersonFactory() == PersonFactory())
print(PersonFactory() == PersonFactory())
print(PersonFactory() == PersonFactory())
|
[
"DonchoMinkov@gmail.com"
] |
DonchoMinkov@gmail.com
|
e1059ae6e9b86f602d1bc6205a6ed704ffdc4962
|
5845ee6d82d9f691e846360fa267b9cca6829d99
|
/supervised_learning/0x0F-word_embeddings/0-bag_of_words.py
|
637623c05195091bb4a31ba366e5d15fe022ab76
|
[] |
no_license
|
jlassi1/holbertonschool-machine_learning
|
6e8c11ebaf2fd57e101bd0b20b7d83358cc15374
|
d45e18bcbe1898a1585e4b7b61f3a7af9f00e787
|
refs/heads/main
| 2023-07-02T20:25:52.216926
| 2021-08-11T14:19:49
| 2021-08-11T14:19:49
| 317,224,593
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
#!/usr/bin/env python3
""" 0. Bag Of Words """
from sklearn.feature_extraction.text import CountVectorizer
def bag_of_words(sentences, vocab=None):
"""function that creates a bag of words embedding matrix"""
vectorizer = CountVectorizer(vocabulary=vocab)
X = vectorizer.fit_transform(sentences)
features = vectorizer.get_feature_names()
embeddings = X.toarray()
return embeddings, features
|
[
"khawlajlassi1990@gmail.com"
] |
khawlajlassi1990@gmail.com
|
ee864bf4f45435d16fd37093d8533828dfc9fe61
|
ad469d0ca144c485fc0cdcfb2ebfdd0bddf86271
|
/src/models/base.py
|
54694b4039a9f44b73fa58b3fa5fc83c93fa823d
|
[] |
no_license
|
ngxbac/Kaggle-Google-Landmark-2019
|
3e8a29e83e835b29262df439b9af12ca27cee768
|
274864e2778acde9007c096607c113c268882343
|
refs/heads/master
| 2020-05-31T04:37:32.003023
| 2019-06-04T00:41:51
| 2019-06-04T00:41:51
| 190,102,248
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,154
|
py
|
import torch
import torch.nn as nn
import torchvision.models as models
class Net(nn.Module):
def __init__(self, num_classes=100, norm=True, scale=True):
super(Net,self).__init__()
self.extractor = Extractor()
self.embedding = Embedding()
self.classifier = Classifier(num_classes)
self.s = nn.Parameter(torch.FloatTensor([10]))
self.norm = norm
self.scale = scale
def forward(self, x):
x = self.extractor(x)
x = self.embedding(x)
if self.norm:
x = self.l2_norm(x)
if self.scale:
x = self.s * x
x = self.classifier(x)
return x
def extract(self, x):
x = self.extractor(x)
x = self.embedding(x)
x = self.l2_norm(x)
return x
def l2_norm(self,input):
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-10)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output
def weight_norm(self):
w = self.classifier.fc.weight.data
norm = w.norm(p=2, dim=1, keepdim=True)
self.classifier.fc.weight.data = w.div(norm.expand_as(w))
class Extractor(nn.Module):
def __init__(self):
super(Extractor,self).__init__()
basenet = models.resnet50(pretrained=True)
self.extractor = nn.Sequential(*list(basenet.children())[:-1])
for param in self.extractor.parameters():
param.requires_grad = False
def forward(self, x):
x = self.extractor(x)
x = x.view(x.size(0), -1)
return x
class Embedding(nn.Module):
def __init__(self):
super(Embedding,self).__init__()
self.fc = nn.Linear(2048, 2048)
def forward(self, x):
x = self.fc(x)
return x
class Classifier(nn.Module):
def __init__(self, num_classes):
super(Classifier,self).__init__()
self.fc = nn.Linear(2048, num_classes, bias=False)
def forward(self, x):
x = self.fc(x)
return x
|
[
"ngxbac.dt@gmail.com"
] |
ngxbac.dt@gmail.com
|
e0496f50c98467811842743bdcac4c7f1dc14c9e
|
c424ffe3c31422e72810b4865f482d505d145e87
|
/fliermailses/models.py
|
7eaea73f99fb1b029fe3303c6f16d0ab41e0e949
|
[
"BSD-2-Clause"
] |
permissive
|
hdknr/fliermail-ses
|
d49724b7f1eb648a806e4301738db96a50e098ca
|
91366535b1a0890b4766c09d70aee1ec5387f7f0
|
refs/heads/master
| 2020-06-19T04:57:02.261919
| 2018-03-15T05:18:16
| 2018-03-15T05:18:16
| 94,177,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,760
|
py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from . import defs, methods, querysets
class Service(defs.Service, methods.Service):
class Meta:
verbose_name = _('SES Service')
verbose_name_plural = _('SES Service')
def __str__(self):
return self.name
class Source(defs.Source, methods.Source):
service = models.ForeignKey(
Service, verbose_name=_('Service'), help_text=_('Service Help'),
on_delete=models.SET_NULL,
null=True, blank=True, default=None, )
class Meta:
verbose_name = _('SES Source')
verbose_name_plural = _('SES Source')
def __str__(self):
return "ses:{0}".format(self.address)
class Topic(defs.Topic):
source = models.ForeignKey(
Source, null=True, blank=True, default=None,
on_delete=models.SET_NULL, )
class Meta:
verbose_name = _('SNS Topic')
verbose_name_plural = _('SNS Topic')
unique_together = (('source', 'topic', ), )
def __str__(self):
return u"{0} {1}".format(
self.source.__str__(),
self.get_topic_display())
class Notification(defs.Notification, methods.Notification):
topic = models.ForeignKey(
Topic, null=True, blank=True, default=None,
on_delete=models.SET_NULL, )
class Meta:
verbose_name = _('Notification')
verbose_name_plural = _('Notification')
objects = querysets.NotificationQuerySet.as_manager()
class Certificate(defs.Certificate, methods.Certificate):
service = models.ForeignKey(
Service, on_delete=models.CASCADE, )
class Meta:
verbose_name = _('SES Certificate')
verbose_name_plural = _('SES Certificate')
|
[
"gmail@hdknr.com"
] |
gmail@hdknr.com
|
df2ffa0accf83f4363cc11f2b219eb6f5a74b0c3
|
dd834845a2ab346dafd04f3beb4ba0916b64dc51
|
/test_case/task/test_200smart_sanity_clear_001.py
|
fc61417bcb137b08429c8f21631cfea146deaf4b
|
[] |
no_license
|
Lewescaiyong/auto_test_framework
|
ae51726b705fbf125c30fce447c7c75510597047
|
2d3490393737b3e5f086cb6623369b988ffce67f
|
refs/heads/master
| 2020-11-25T09:18:29.209261
| 2020-02-10T13:48:12
| 2020-02-10T13:48:12
| 228,590,729
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,443
|
py
|
#!/usr/bin/env python
from lib.exceptions.check_exception import CheckException
from lib.base.script.integration_test.case_mw import CaseMW
class Test200SmartSanityClear001(CaseMW):
"""Clear OB
No.: test_200smart_sanity_clear_001
Preconditions:
1. Open Micro/WINr;
2. Set up connection with PLC;
3. Download a project which has OB,DB,SDB;
Step actions:
1. Clear program block;
2. Compare;
Expected results:
1. Clear successful;
2. The OB is different;
Priority: H
Author: Cai, Yong
ChangeInfo: Cai, Yong 2019-09-20 create
"""
def prepare(self):
"""the preparation before executing the test steps
Args:
Example:
Return:
Author: Cai, Yong
IsInterface: False
ChangeInfo: Cai, Yong 2019-09-20 create
"""
super(Test200SmartSanityClear001, self).prepare()
self.logger.info('Preconditions:')
self.logger.info('1. Open Micro/WINr; ')
self.logger.info('2. Set up connection with PLC;')
self.logger.info('3. Download a project which has OB,DB,SDB;')
self.MicroWIN.test_prepare('ob_db_sdb_01.smart', False)
def process(self):
"""execute the test steps
Args:
Example:
Return:
Author: Cai, Yong
IsInterface: False
ChangeInfo: Cai, Yong 2019-09-20 create
"""
super(Test200SmartSanityClear001, self).process()
self.logger.info('Step actions:')
self.logger.info('1. Clear program block;')
result1 = self.PLC['1'].plc_clear('ob')
self.logger.info('2. Compare;')
result2 = self.MicroWIN.compare_with_plc()
self.logger.info('Expected results:')
self.logger.info('1. Clear successful;')
if result1['code'] != 0:
raise CheckException('1. Clear OB failed;')
self.logger.info('2. The OB is different;')
if not ((not result2['ob']) and result2['db'] and result2['sdb']):
self.logger.info('Compare result: %s' % result2)
raise CheckException('Compare failed;')
def cleanup(self):
"""clean up after performing the test steps
Args:
Example:
Return:
Author: Cai, Yong
IsInterface: False
ChangeInfo: Cai, Yong 2019-09-20 create
"""
super(Test200SmartSanityClear001, self).cleanup()
|
[
"1351153527@qq.com"
] |
1351153527@qq.com
|
a89d9222bee0ded8bd36c1c69d2dacb9bfb28e01
|
7a6a2076cffbbd47316818b37ddf22a932002065
|
/python/702 - Search in a Sorted Array of Unknown Size/main.py
|
f23ffb8bc239c9335e262a01b41c66efce7866a5
|
[] |
no_license
|
or0986113303/LeetCodeLearn
|
6bd0aa16c8c80581e1c85032aca0f7a055f5e234
|
96fdc45d15b4150cefe12361b236de6aae3bdc6a
|
refs/heads/develop
| 2023-06-14T01:30:41.103572
| 2021-07-01T08:59:08
| 2021-07-01T08:59:08
| 291,066,699
| 0
| 0
| null | 2020-08-31T02:44:26
| 2020-08-28T14:25:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,577
|
py
|
# """
# This is ArrayReader's API interface.
# You should not implement it, or speculate about its implementation
# """
#class ArrayReader(object):
# def get(self, index):
# """
# :type index: int
# :rtype int
# """
class Solution(object):
def fibosearch(self, source, target):
fibo1 = 1
fibo2 = 0
fibosum = fibo1 + fibo2
offset = -1
capacity = 0
resulttmp = float('-inf')
while resulttmp < target:
fibo2 = fibo1
fibo1 = fibosum
fibosum = fibo1 + fibo2
resulttmp = source.get(fibosum)
capacity = fibosum + 1
print(capacity)
while fibosum > 1:
operatorindex = min(fibo2 + offset, capacity - 1)
if source.get(operatorindex) == target:
return operatorindex
elif source.get(operatorindex) > target:
fibosum = fibo1
fibo1 = fibo2
fibo2 = fibosum - fibo1
else :
fibo2 = fibo1
fibo1 = fibosum
fibosum = fibo1 + fibo2
offset = operatorindex
return -1
def search(self, reader, target):
"""
:type reader: ArrayReader
:type target: int
:rtype: int
"""
if reader is None:
return -1
elif reader.get(0) == target:
return 0
result = self.fibosearch(reader, target)
print(result)
return result
|
[
"or0986113303@gmail.com"
] |
or0986113303@gmail.com
|
81bf3c105d1a1393058d90b3633bcebdd5ae4fbf
|
9b1446b26e81a79c303f9799fb6a91785c7adb03
|
/.history/Code/histogram_20200120113537.py
|
4f52929b9fac6bf129f57f7e695e94974d77475a
|
[] |
no_license
|
SamirIngley/CS1.2-Tweet-Gen
|
017ea15b1113881a156ff24682828bc654eb6c81
|
bcd95fa63e05849cbf8e36230d8e31032b99daaa
|
refs/heads/master
| 2020-12-14T20:19:57.733290
| 2020-08-04T23:19:23
| 2020-08-04T23:19:23
| 234,856,234
| 0
| 0
| null | 2020-06-05T21:13:04
| 2020-01-19T07:05:55
|
Python
|
UTF-8
|
Python
| false
| false
| 696
|
py
|
def list_histo(source):
''' Takes text. Stores each item in text compares each item to the rest of the words in
text and keeps a running total. Used list account for no repeats.
'''
histo = []
used = []
text = source.split()
print(text)
for word in text:
counter = 0
if word in used:
continue
used.append(word)
for word2 in text:
if word == word2:
counter += 1
instance = [word, counter]
histo.append(instance)
print(histo)
return histo
if __name__ == '__main__':
source = 'one fish two fish red fish blue fish'
list_histo(source)
|
[
"samir.ingle7@gmail.com"
] |
samir.ingle7@gmail.com
|
e872d8089a62b5d92696f6668390f4ab68945df9
|
6547d657706c041f2a87b0680936dd3d473ad328
|
/httprunner/cli.py
|
f60004271687446d2bcfb3af3c86d5de03b91a41
|
[
"Apache-2.0"
] |
permissive
|
lixiaofeng1993/httprunner
|
62c01f6b5adb8e3eded564947ac196938e3c88fb
|
15c5d89605dc2d54fc624c3468be85eebcc8446e
|
refs/heads/master
| 2020-07-26T09:18:35.310008
| 2019-10-21T16:03:50
| 2019-10-21T16:03:50
| 208,601,514
| 1
| 0
|
Apache-2.0
| 2019-09-15T13:54:13
| 2019-09-15T13:54:13
| null |
UTF-8
|
Python
| false
| false
| 6,813
|
py
|
# encoding: utf-8
def main_hrun():
""" API test: parse command line options and run commands.
"""
import sys
import argparse
from httprunner.logger import color_print
from httprunner import __description__, __version__
from httprunner.api import HttpRunner
from httprunner.compat import is_py2
from httprunner.validator import validate_json_file
from httprunner.utils import (create_scaffold, get_python2_retire_msg,
prettify_json_file)
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument(
'-V', '--version', dest='version', action='store_true',
help="show version")
parser.add_argument(
'testcase_paths', nargs='*',
help="testcase file path")
parser.add_argument(
'--log-level', default='INFO',
help="Specify logging level, default is INFO.")
parser.add_argument(
'--log-file',
help="Write logs to specified file path.")
parser.add_argument(
'--dot-env-path',
help="Specify .env file path, which is useful for keeping sensitive data.")
parser.add_argument(
'--report-template',
help="specify report template path.")
parser.add_argument(
'--report-dir',
help="specify report save directory.")
parser.add_argument(
'--failfast', action='store_true', default=False,
help="Stop the test run on the first error or failure.")
parser.add_argument(
'--save-tests', action='store_true', default=False,
help="Save loaded tests and parsed tests to JSON file.")
parser.add_argument(
'--startproject',
help="Specify new project name.")
parser.add_argument(
'--validate', nargs='*',
help="Validate JSON testcase format.")
parser.add_argument(
'--prettify', nargs='*',
help="Prettify JSON testcase format.")
args = parser.parse_args()
if is_py2:
color_print(get_python2_retire_msg(), "YELLOW")
if args.version:
color_print("{}".format(__version__), "GREEN")
exit(0)
if args.validate:
validate_json_file(args.validate)
exit(0)
if args.prettify:
prettify_json_file(args.prettify)
exit(0)
project_name = args.startproject
if project_name:
create_scaffold(project_name)
exit(0)
runner = HttpRunner(
failfast=args.failfast,
save_tests=args.save_tests,
report_template=args.report_template,
report_dir=args.report_dir,
log_level=args.log_level,
log_file=args.log_file
)
try:
for path in args.testcase_paths:
runner.run(path, dot_env_path=args.dot_env_path)
except Exception:
color_print("!!!!!!!!!! exception stage: {} !!!!!!!!!!".format(runner.exception_stage), "YELLOW")
raise
if runner.summary and runner.summary["success"]:
sys.exit(0)
else:
sys.exit(1)
def main_locust():
""" Performance test with locust: parse command line options and run commands.
"""
try:
# monkey patch ssl at beginning to avoid RecursionError when running locust.
from gevent import monkey; monkey.patch_ssl()
import multiprocessing
import sys
from httprunner import logger
from httprunner import locusts
except ImportError:
msg = "Locust is not installed, install first and try again.\n"
msg += "install command: pip install locustio"
print(msg)
exit(1)
sys.argv[0] = 'locust'
if len(sys.argv) == 1:
sys.argv.extend(["-h"])
if sys.argv[1] in ["-h", "--help", "-V", "--version"]:
locusts.start_locust_main()
sys.exit(0)
# set logging level
if "-L" in sys.argv:
loglevel_index = sys.argv.index('-L') + 1
elif "--loglevel" in sys.argv:
loglevel_index = sys.argv.index('--loglevel') + 1
else:
loglevel_index = None
if loglevel_index and loglevel_index < len(sys.argv):
loglevel = sys.argv[loglevel_index]
else:
# default
loglevel = "WARNING"
logger.setup_logger(loglevel)
# get testcase file path
try:
if "-f" in sys.argv:
testcase_index = sys.argv.index('-f') + 1
elif "--locustfile" in sys.argv:
testcase_index = sys.argv.index('--locustfile') + 1
else:
testcase_index = None
assert testcase_index and testcase_index < len(sys.argv)
except AssertionError:
print("Testcase file is not specified, exit.")
sys.exit(1)
testcase_file_path = sys.argv[testcase_index]
sys.argv[testcase_index] = locusts.parse_locustfile(testcase_file_path)
if "--processes" in sys.argv:
""" locusts -f locustfile.py --processes 4
"""
if "--no-web" in sys.argv:
logger.log_error("conflict parameter args: --processes & --no-web. \nexit.")
sys.exit(1)
processes_index = sys.argv.index('--processes')
processes_count_index = processes_index + 1
if processes_count_index >= len(sys.argv):
""" do not specify processes count explicitly
locusts -f locustfile.py --processes
"""
processes_count = multiprocessing.cpu_count()
logger.log_warning("processes count not specified, use {} by default.".format(processes_count))
else:
try:
""" locusts -f locustfile.py --processes 4 """
processes_count = int(sys.argv[processes_count_index])
sys.argv.pop(processes_count_index)
except ValueError:
""" locusts -f locustfile.py --processes -P 8888 """
processes_count = multiprocessing.cpu_count()
logger.log_warning("processes count not specified, use {} by default.".format(processes_count))
sys.argv.pop(processes_index)
locusts.run_locusts_with_processes(sys.argv, processes_count)
else:
locusts.start_locust_main()
if __name__ == "__main__":
""" debugging mode
"""
import sys
import os
if len(sys.argv) == 0:
exit(0)
sys.path.insert(0, os.getcwd())
cmd = sys.argv.pop(1)
if cmd in ["hrun", "httprunner", "ate"]:
main_hrun()
elif cmd in ["locust", "locusts"]:
main_locust()
else:
from httprunner.logger import color_print
color_print("Miss debugging type.", "RED")
example = "\n".join([
"e.g.",
"python -m httprunner.cli hrun /path/to/testcase_file",
"python -m httprunner.cli locusts -f /path/to/testcase_file"
])
color_print(example, "yellow")
|
[
"mail@debugtalk.com"
] |
mail@debugtalk.com
|
79f998c1ae08f5eac4dccac29ea00bf209c906d0
|
60044c76b631e622edb28f3a74971ce06211fac5
|
/Python-for-Everybody/Python-Data-Structures/list.py
|
fa31bc357f500aa7cefac067eb8f807c1c0089d0
|
[] |
no_license
|
NestorMonroy/Courses-coursera
|
8d45a858c79567d74f013ac27ac33d47e43abb96
|
98ac1aa5bb0cd9da5cea5be02995d5b65c779201
|
refs/heads/master
| 2023-08-14T13:36:07.348994
| 2021-09-22T06:13:57
| 2021-09-22T06:13:57
| 327,753,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,064
|
py
|
"""
List are mutable
String are "inmutable" - we cannont change the contents of a string-
we must make a new string to make any change
List are "mutable" we can change an element of a list using the index
operator
"""
fruit = ['Banana']
fruit[0]= 'b' # error
x = fruit.lower()
print(x)
lotto = [2, 15, 26, 41, 63 ]
print(lotto)
lotto[2]= 28
print(lotto)
# How long is a list
greet = 'Hello Boke'
print(len(greet))
x = [1, 4, 'joe', 99]
print(len(x))
# using the range function
"""
The range function returns a list of numbers that range from
zero to one less than the parameter value
We can construct an index loop using for and integer iterator
"""
print(range(4))
friends = ['joel', 'david', 'jon']
print(len(friends))
print(range(len(friends)))
# A tale of two loops
friends = ['joel', 'david', 'jon']
for friend in friends:
print('Happy new year: ', friend)
for i in range(len(friends)):
friend = friends[i]
print('Happy new year: ', friend)
print(len(friends))
print(range(len(friends)))
|
[
"nestor.monroy.90@gmail.com"
] |
nestor.monroy.90@gmail.com
|
015a8e9ef9d42e0845eedd82384f1664674a5957
|
3be42b83a15d022f5863c96ec26e21bac0f7c27e
|
/tensorflow_probability/python/mcmc/legacy_random_walk_metropolis_test.py
|
cc0e6d73a93c859b63903599869a1b5536077d7b
|
[
"Apache-2.0"
] |
permissive
|
ogrisel/probability
|
846f5c13cddee5cf167b215e651b7479003f15d2
|
8f67456798615f9bf60ced2ce6db5d3dba3515fe
|
refs/heads/master
| 2022-11-09T10:53:23.000918
| 2020-07-01T23:16:03
| 2020-07-01T23:17:25
| 276,580,359
| 2
| 1
|
Apache-2.0
| 2020-07-02T07:37:58
| 2020-07-02T07:37:57
| null |
UTF-8
|
Python
| false
| false
| 6,468
|
py
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for RandomWalkMetropolisNormal and RandomWalkMetropolisUniform."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import test_util
@test_util.test_all_tf_execution_regimes
class RWMTest(test_util.TestCase):
def testRWM1DUniform(self):
"""Sampling from the Standard Normal Distribution."""
dtype = np.float32
target = tfd.Normal(loc=dtype(0), scale=dtype(1))
samples, _ = tfp.mcmc.sample_chain(
num_results=2000,
current_state=dtype(1),
kernel=tfp.mcmc.RandomWalkMetropolis(
target.log_prob,
new_state_fn=tfp.mcmc.random_walk_uniform_fn(scale=dtype(2.)),
seed=test_util.test_seed()),
num_burnin_steps=500,
parallel_iterations=1) # For determinism.
sample_mean = tf.math.reduce_mean(samples, axis=0)
sample_std = tf.math.reduce_std(samples, axis=0)
[sample_mean_, sample_std_] = self.evaluate([sample_mean, sample_std])
self.assertAllClose(0., sample_mean_, atol=0.17, rtol=0.)
self.assertAllClose(1., sample_std_, atol=0.2, rtol=0.)
def testRWM1DNormal(self):
"""Sampling from the Standard Normal Distribution with adaptation."""
dtype = np.float32
target = tfd.Normal(loc=dtype(0), scale=dtype(1))
samples, _ = tfp.mcmc.sample_chain(
num_results=500,
current_state=dtype([1] * 8), # 8 parallel chains
kernel=tfp.mcmc.RandomWalkMetropolis(
target.log_prob,
seed=test_util.test_seed()),
num_burnin_steps=500,
parallel_iterations=1) # For determinism.
sample_mean = tf.math.reduce_mean(samples, axis=(0, 1))
sample_std = tf.math.reduce_std(samples, axis=(0, 1))
[sample_mean_, sample_std_] = self.evaluate([sample_mean, sample_std])
self.assertAllClose(0., sample_mean_, atol=0.2, rtol=0.)
self.assertAllClose(1., sample_std_, atol=0.2, rtol=0.)
def testRWM1DCauchy(self):
"""Sampling from the Standard Normal Distribution using Cauchy proposal."""
dtype = np.float32
num_burnin_steps = 750
num_chain_results = 400
target = tfd.Normal(loc=dtype(0), scale=dtype(1))
def cauchy_new_state_fn(scale, dtype):
cauchy = tfd.Cauchy(loc=dtype(0), scale=dtype(scale))
def _fn(state_parts, seed):
seed_stream = tfp.util.SeedStream(
seed, salt='RandomWalkCauchyIncrement')
next_state_parts = [
state + cauchy.sample(state.shape, seed=seed_stream())
for state in state_parts]
return next_state_parts
return _fn
samples, _ = tfp.mcmc.sample_chain(
num_results=num_chain_results,
num_burnin_steps=num_burnin_steps,
current_state=dtype([1] * 8), # 8 parallel chains
kernel=tfp.mcmc.RandomWalkMetropolis(
target.log_prob,
new_state_fn=cauchy_new_state_fn(scale=0.5, dtype=dtype),
seed=test_util.test_seed()),
parallel_iterations=1) # For determinism.
sample_mean = tf.math.reduce_mean(samples, axis=(0, 1))
sample_std = tf.math.reduce_std(samples, axis=(0, 1))
[sample_mean_, sample_std_] = self.evaluate([sample_mean, sample_std])
self.assertAllClose(0., sample_mean_, atol=0.2, rtol=0.)
self.assertAllClose(1., sample_std_, atol=0.2, rtol=0.)
def testRWM2DNormal(self):
"""Sampling from a 2-D Multivariate Normal distribution."""
dtype = np.float32
true_mean = dtype([0, 0])
true_cov = dtype([[1, 0.5], [0.5, 1]])
num_results = 500
num_chains = 100
# Target distribution is defined through the Cholesky decomposition
chol = tf.linalg.cholesky(true_cov)
target = tfd.MultivariateNormalTriL(loc=true_mean, scale_tril=chol)
# Assume that the state is passed as a list of 1-d tensors `x` and `y`.
# Then the target log-density is defined as follows:
def target_log_prob(x, y):
# Stack the input tensors together
z = tf.stack([x, y], axis=-1) - true_mean
return target.log_prob(tf.squeeze(z))
# Initial state of the chain
init_state = [np.ones([num_chains, 1], dtype=dtype),
np.ones([num_chains, 1], dtype=dtype)]
# Run Random Walk Metropolis with normal proposal for `num_results`
# iterations for `num_chains` independent chains:
states, _ = tfp.mcmc.sample_chain(
num_results=num_results,
current_state=init_state,
kernel=tfp.mcmc.RandomWalkMetropolis(
target_log_prob_fn=target_log_prob,
seed=test_util.test_seed()),
num_burnin_steps=200,
num_steps_between_results=1,
parallel_iterations=1)
states = tf.stack(states, axis=-1)
sample_mean = tf.math.reduce_mean(states, axis=[0, 1])
x = states - sample_mean
sample_cov = tf.math.reduce_mean(
tf.linalg.matmul(x, x, transpose_a=True), axis=[0, 1])
[sample_mean_, sample_cov_] = self.evaluate([
sample_mean, sample_cov])
self.assertAllClose(np.squeeze(sample_mean_), true_mean, atol=0.1, rtol=0.1)
self.assertAllClose(np.squeeze(sample_cov_), true_cov, atol=0.1, rtol=0.1)
def testRWMIsCalibrated(self):
rwm = tfp.mcmc.RandomWalkMetropolis(
target_log_prob_fn=lambda x: -tf.square(x) / 2.,
)
self.assertTrue(rwm.is_calibrated)
def testUncalibratedRWIsNotCalibrated(self):
uncal_rw = tfp.mcmc.UncalibratedRandomWalk(
target_log_prob_fn=lambda x: -tf.square(x) / 2.,
)
self.assertFalse(uncal_rw.is_calibrated)
if __name__ == '__main__':
tf.test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
1ecb996f4097f56f0ce63ab0d6dedf6b7f3b0ff8
|
80a3d98eae1d755d6914b5cbde63fd10f5cc2046
|
/autox/autox_video/mmaction2/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x4x1_64e_ucf101_rgb.py
|
48df87cc320b51fd2cd980cd78eade24f3d1d968
|
[
"Apache-2.0"
] |
permissive
|
4paradigm/AutoX
|
efda57b51b586209e1d58e1dab7d0797083aadc5
|
7eab9f4744329a225ff01bb5ec360c4662e1e52e
|
refs/heads/master
| 2023-05-24T00:53:37.109036
| 2023-02-14T14:21:50
| 2023-02-14T14:21:50
| 388,068,949
| 752
| 162
|
Apache-2.0
| 2022-07-12T08:28:09
| 2021-07-21T09:45:41
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,034
|
py
|
_base_ = [
'../../_base_/models/slowonly_r50.py',
'../../_base_/schedules/sgd_150e_warmup.py',
'../../_base_/default_runtime.py'
]
# model settings
model = dict(cls_head=dict(num_classes=101))
# dataset settings
dataset_type = 'RawframeDataset'
data_root = 'data/ucf101/rawframes/'
data_root_val = 'data/ucf101/rawframes/'
split = 1 # official train/test splits. valid numbers: 1, 2, 3
ann_file_train = f'data/ucf101/ucf101_train_split_{split}_rawframes.txt'
ann_file_val = f'data/ucf101/ucf101_val_split_{split}_rawframes.txt'
ann_file_test = f'data/ucf101/ucf101_val_split_{split}_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='SampleFrames', clip_len=8, frame_interval=4, num_clips=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(
type='SampleFrames',
clip_len=8,
frame_interval=4,
num_clips=1,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(
type='SampleFrames',
clip_len=8,
frame_interval=4,
num_clips=10,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='ThreeCrop', crop_size=256),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=8,
workers_per_gpu=2,
test_dataloader=dict(videos_per_gpu=1),
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
pipeline=test_pipeline))
evaluation = dict(
interval=1, metrics=['top_k_accuracy', 'mean_class_accuracy'])
# optimizer
optimizer = dict(lr=0.1) # this lr is used for 8 gpus
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
total_epochs = 64
# runtime settings
work_dir = './work_dirs/slowonly_r50_8x4x1_64e_ucf101_rgb'
|
[
"caixiaochen@4ParadigmdeMacBook-Pro.local"
] |
caixiaochen@4ParadigmdeMacBook-Pro.local
|
5b18fbd4b0a8183ff967c046a05f8f8ac468e3eb
|
2711e7408e590648ac6a51725c2177a56c566403
|
/smilebuddies/urls.py
|
ea9397e69f37780d921d593336f630dad2ff758f
|
[] |
no_license
|
SeedyROM/smilebuddies
|
457415c1c843b495d92bdb925b0597411f1222c2
|
6ba4827205ce48c1b19786c9e32b9993cf8b43aa
|
refs/heads/master
| 2020-03-21T15:29:13.592031
| 2018-06-26T10:38:38
| 2018-06-26T10:38:38
| 138,715,243
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 881
|
py
|
"""smilebuddies URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.views.generic import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
path('', TemplateView.as_view(template_name='landing.html'), name='landing')
]
|
[
"rallokkcaz@gmail.com"
] |
rallokkcaz@gmail.com
|
e5850cab963a2bed4094268fcad193eda0cd489c
|
717171ed7a14ad60dd42d62fe0dd217a0c0c50fd
|
/19年7月/7.18/url编码和解码.py
|
44e1a5f421f2f103c0c08b57f4de71423a436f54
|
[] |
no_license
|
friedlich/python
|
6e9513193227e4e9ee3e30429f173b55b9cdb85d
|
1654ef4f616fe7cb9fffe79d1e6e7d7721c861ac
|
refs/heads/master
| 2020-09-04T14:34:48.237404
| 2019-11-18T14:54:44
| 2019-11-18T14:54:44
| 219,756,451
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,798
|
py
|
# Python进行URL解码
# 所用模块:urllib
# 所用函数:urllib.unquote()
from urllib.request import quote, unquote
# import urllib # 这样不行
rawurl = "%E6%B2%B3%E6%BA%90"
url = unquote(rawurl)
print(url)
print(quote("河源"))
print(type(quote('河源')))
# URL为何要编码、解码?
# 通常如果一样东西需要编码,说明这样东西并不适合传输。原因多种多样,如Size过大,包含隐私数据。对于Url来说,之所以要进行编码,
# 是因为Url中有些字符会引起歧义。
# 例如,Url参数字符串中使用key=value键值对这样的形式来传参,键值对之间以&符号分隔,如/s?q=abc&ie=utf-8。如果你的value字符串中
# 包含了=或者&,那么势必会造成接收Url的服务器解析错误,因此必须将引起歧义的&和=符号进行转义,也就是对其进行编码。
# 又如,Url的编码格式采用的是ASCII码,而不是Unicode,这也就是说你不能在Url中包含任何非ASCII字符,例如中文。否则如果客户端浏览器
# 和服务端浏览器支持的字符集不同的情况下,中文可能会造成问题。
# -*- coding: utf-8 -*-
# @File : urldecode_demo.py
# @Date : 2018-05-11
from urllib.request import quote, unquote
# 编码
url1 = "https://www.baidu.com/s?wd=中国"
# utf8编码,指定安全字符
ret1 = quote(url1, safe=";/?:@&=+$,", encoding="utf-8")
print(ret1)
print(type(ret1))
# https://www.baidu.com/s?wd=%E4%B8%AD%E5%9B%BD
# gbk编码
ret2 = quote(url1, encoding="gbk")
print(ret2)
print(type(ret2))
# https%3A//www.baidu.com/s%3Fwd%3D%D6%D0%B9%FA
# 解码
url3 = "https://www.baidu.com/s?wd=%E4%B8%AD%E5%9B%BD"
print(unquote(url3))
url4 = 'https%3A//www.baidu.com/s%3Fwd%3D%D6%D0%B9%FA'
print(unquote(url4, encoding='gbk'))
|
[
"1164166295@qq.com"
] |
1164166295@qq.com
|
69e17f4c855e3719a67fb44ed072035427f7e853
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/glue_read_2/workflow-run_get.py
|
eb26a1136104d518e28d211b93a913de8e86b4f2
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import execute_two_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/glue/get-workflow-run.html
if __name__ == '__main__':
"""
get-workflow-runs : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/glue/get-workflow-runs.html
resume-workflow-run : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/glue/resume-workflow-run.html
start-workflow-run : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/glue/start-workflow-run.html
stop-workflow-run : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/glue/stop-workflow-run.html
"""
parameter_display_string = """
# name : Name of the workflow being run.
# run-id : The ID of the workflow run.
"""
execute_two_parameter("glue", "get-workflow-run", "name", "run-id", parameter_display_string)
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
4d7fbb683f749be440f1e3f86814a797b247768e
|
47fc606bcdfe5b563409386c94f745f920408851
|
/src/python/twitter/common/python/marshaller.py
|
b5c29a06a99c6afbea083559b3636740c63a4085
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
ewhauser/commons
|
2ef443c4f0be2fbbf1ff3226ed35058a7cc8254a
|
0777b346cf1b32722b7b5f6ae9e6593fe185de22
|
refs/heads/master
| 2021-01-18T06:00:06.901691
| 2013-06-11T22:14:55
| 2013-06-11T22:14:55
| 1,741,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,032
|
py
|
from imp import get_magic
import marshal
import struct
import time
from twitter.common.lang import Compatibility
class CodeTimestamp(object):
TIMESTAMP_RANGE = (4, 8)
@classmethod
def from_timestamp(timestamp):
return CodeTimestamp(timestamp)
@classmethod
def from_object(pyc_object):
stamp = time.localtime(
struct.unpack('I', pyc_object[slice(*CodeTimestamp.TIMESTAMP_RANGE)])[0])
return CodeTimestamp(stamp)
def __init__(self, stamp=time.time()):
self._stamp = stamp
def to_object(self):
return struct.pack('I', self._stamp)
class CodeMarshaller(object):
class InvalidCode(Exception): pass
MAGIC = struct.unpack('I', get_magic())[0]
MAGIC_RANGE = (0, 4)
TIMESTAMP_RANGE = (4, 8)
@staticmethod
def from_pyc(pyc):
if not isinstance(pyc, Compatibility.bytes) and not hasattr(pyc, 'read'):
raise CodeMarshaller.InvalidCode(
"CodeMarshaller.from_pyc expects a code or file-like object!")
if not isinstance(pyc, Compatibility.bytes):
pyc = pyc.read()
pyc_magic = struct.unpack('I', pyc[slice(*CodeMarshaller.MAGIC_RANGE)])[0]
if pyc_magic != CodeMarshaller.MAGIC:
raise CodeMarshaller.InvalidCode("Bad magic number! Got 0x%X" % pyc_magic)
stamp = time.localtime(struct.unpack('I', pyc[slice(*CodeMarshaller.TIMESTAMP_RANGE)])[0])
try:
code = marshal.loads(pyc[8:])
except ValueError as e:
raise CodeMarshaller.InvalidCode("Unmarshaling error! %s" % e)
return CodeMarshaller(code, stamp)
@staticmethod
def from_py(py, filename):
stamp = int(time.time())
code = compile(py, filename, 'exec')
return CodeMarshaller(code, stamp)
def __init__(self, code, stamp):
self._code = code
self._stamp = stamp
@property
def code(self):
return self._code
def to_pyc(self):
sio = Compatibility.BytesIO()
sio.write(struct.pack('I', CodeMarshaller.MAGIC))
sio.write(struct.pack('I', self._stamp))
sio.write(marshal.dumps(self._code))
return sio.getvalue()
|
[
"jsirois@twitter.com"
] |
jsirois@twitter.com
|
640b1ecbbff09f8d8ae3a1a9b0aa9c8146f0a093
|
4ba6207a7e4aa84da494e0f6d811eca606659b73
|
/groupster/migrations/0003_jobseeker_resume.py
|
5f0af9769b89646d52c1f168f716bf3a2099c0e6
|
[] |
no_license
|
jkol36/groupster
|
da5d9d4b882cd9df7a4b187b65cdc3fe8175e794
|
5967cb7b2689dec760727c7534ff0f73a6901ba4
|
refs/heads/master
| 2021-01-02T09:19:49.841001
| 2015-06-10T18:57:37
| 2015-06-10T18:57:37
| 35,061,183
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('groupster', '0002_remove_jobseeker_resume'),
]
operations = [
migrations.AddField(
model_name='jobseeker',
name='resume',
field=models.FileField(default=None, upload_to=b''),
),
]
|
[
"jonathankolman@gmail.com"
] |
jonathankolman@gmail.com
|
67bffd0980d1ea7f4201ae6348603c60f4fb7966
|
42fa1862effc3e494859904b76c43ce2bcd623a0
|
/idealised_box_simulations_paper2b.py
|
94394f21530d4fa8c134d0b1ed14dcc4aec1a8ec
|
[] |
no_license
|
PaulHalloran/desktop_python_scripts
|
3e83aedf3e232da610b5f7477e4d7e8fb0253f99
|
325e923527278a5c3e9ab8c978f29b2816dab087
|
refs/heads/master
| 2021-01-01T19:52:06.828997
| 2015-06-27T21:14:10
| 2015-06-27T21:14:10
| 38,155,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,527
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib as mpl
results = np.genfromtxt('/home/ph290/box_modelling/boxmodel_6_box_back_to_basics/results/spg_box_model_qump_results_3.csv',delimiter = ',')
results_stg = np.genfromtxt('/home/ph290/box_modelling/boxmodel_6_box_back_to_basics/results/stg_box_model_qump_results_3.csv',delimiter = ',')
forcing_dir = '/home/ph290/box_modelling/boxmodel_6_box_back_to_basics/forcing_data/co2/'
co2_tmp = np.genfromtxt(forcing_dir+'rcp85_1.txt',delimiter = ',')
co2 = np.zeros([co2_tmp.shape[0],4])
co2[:,0] = np.genfromtxt(forcing_dir+'rcp85_1.txt',delimiter = ',')[:,1]
co2[:,1] = np.genfromtxt(forcing_dir+'rcp85_2.txt',delimiter = ',')[:,1]
co2[:,2] = np.genfromtxt(forcing_dir+'rcp85_3.txt',delimiter = ',')[:,1]
rcp85_yr = np.genfromtxt(forcing_dir+'historical_and_rcp85_atm_co2.txt',delimiter = ',')[:,0]
rcp85 = np.genfromtxt(forcing_dir+'historical_and_rcp85_atm_co2.txt',delimiter = ',')[:,1]
mpl.rcdefaults()
font = {'family' : 'monospace',
'weight' : 'bold',
'family' : 'serif',
'size' : 14}
mpl.rc('font', **font)
plt.close('all')
fig, (ax1, ax2) = plt.subplots(1,2,figsize=(10, 4))
leg_lab = ['y = 1.0285**x +c','y = 1.0265**x +c','y = 1.0305**x +c']
for i in range(3):
ax1.plot(co2[:,i],linewidth = 6,alpha= 0.4,label = leg_lab[i])
ax1.legend(loc = 2,prop={'size':10, 'family' : 'normal','weight' : 'bold'},ncol = 1).draw_frame(False)
#ax1.plot(rcp85_yr-1860,rcp85,'k',linewidth = 6,alpha= 0.4)
ax1.set_xlim([0,240])
ax1.set_ylim([200,1800])
ax1.set_ylabel('atm. CO$_2$ (ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax1.set_xlabel('year', multialignment='center',fontweight='bold',fontsize = 14)
for i in range(3):
ax2.plot(results[:,0]-results[0,0],results[:,i+1],linewidth = 6,alpha= 0.4)
ax2b = ax2.twinx()
for i in range(3):
ax2b.plot(results[:,0]-results[0,0],results_stg[:,i+1],linewidth = 6,alpha= 0.4,linestyle = '--')
leg_lab2 = ['Subpolar N. Atlantic (left axis)','Subtropical/equatorial (right axis)']
tmp = ax2.plot([0,0],'k',linewidth = 6,alpha= 0.4,label = leg_lab2[0])
tmp2 = ax2.plot([0,0],'k',linewidth = 6,alpha= 0.4,linestyle = '--',label = leg_lab2[1])
ax2.legend(loc = 2,prop={'size':10, 'family' : 'normal','weight' : 'bold'},ncol = 1).draw_frame(False)
tmp.pop(0).remove()
tmp2.pop(0).remove()
ax2.set_ylim([10,31])
ax2.set_ylabel('atm. [CO$_2$] minus ocean [CO$_2$]\n(ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax2.set_xlabel('year', multialignment='center',fontweight='bold',fontsize = 14)
ax2.set_xlim([0,240])
#plt.arrow(0,0,0,1, shape='full', lw=3, length_includes_head=True, head_width=.01)
a1 = matplotlib.patches.Arrow(0.5-0.01,0.5+0.01,0.05,0.0, width=0.8,edgecolor='none',facecolor='gray',fill=True,transform=fig.transFigure, figure=fig,alpha=0.25)
fig.lines.extend([a1])
fig.canvas.draw()
plt.tight_layout()
plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/mechanism_1b.png')
plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/mechanism_1b.pdf')
plt.show(block = False)
#plt.close('all')
'''
spg-stg difference plots
'''
#for i in range(4):
# ax1.plot(co2[:,i],linewidth = 6,alpha= 0.4)
#
#ax1.set_ylabel('atm. CO$_2$ (ppm)', multialignment='center',fontweight='bold',fontsize = 14)
#ax1.set_xlabel('year', multialignment='center',fontweight='bold',fontsize = 14)
#plt.close('all')
colours = ['b','r']
fig, (ax1) = plt.subplots(1,1,figsize=(5, 4))
ax1.plot(results[:,0]-results[0,0],results[:,i+1],linewidth = 6,alpha= 0.4,linestyle = '-',color=colours[0])
ax2 = ax1.twinx()
ax2.plot(results[:,0]-results[0,0],results_stg[:,i+1],linewidth = 6,alpha= 0.4,linestyle = '--',color=colours[1])
ax1.set_xlim([150,160])
min1 = 22
max1 = 27
min1b = -1
max1b = 4
ax1.set_ylim([min1,max1])
ax2.set_ylim([min1b,max1b])
ax1.set_ylabel('atm. [CO$_2$] minus ocean [CO$_2$]\n(ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax1.set_ylabel('Subtropical atm. [CO$_2$] minus ocean [CO$_2$]\n(ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax1.set_xlabel('year', multialignment='center',fontweight='bold',fontsize = 14)
plt.tight_layout()
#plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/mechanism_2.png')
#plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/mechanism_2.pdf')
plt.show(block = False)
#plt.close('all')
'''
2
'''
fig, (ax1) = plt.subplots(1,1,figsize=(5, 4))
ax1.plot(results[:,0]-results[0,0],results[:,i+1],linewidth = 6,alpha= 0.4,linestyle = '-',color=colours[0])
ax1.plot(results[:,0]-results[0,0],results_stg[:,i+1],linewidth = 6,alpha= 0.4,linestyle = '--',color=colours[1])
ax1.set_xlim([155,165])
#min1 = 100
#max1 = 160
ax1.set_ylim([min1,max1])
ax1.set_ylabel('atm. [CO$_2$] minus ocean [CO$_2$]\n(ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax1.set_ylabel('Subtropical atm. [CO$_2$] minus ocean [CO$_2$]\n(ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax1.set_xlabel('year', multialignment='center',fontweight='bold',fontsize = 14)
plt.tight_layout()
#plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/mechanism_2.png')
#plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/mechanism_2.pdf')
plt.show(block = False)
#plt.close('all')
'''
3
'''
fig, (ax1) = plt.subplots(1,1,figsize=(5, 4))
ax1.plot(results[:,0]-results[0,0],results[:,i+1],linewidth = 6,alpha= 0.4,linestyle = '-',color=colours[0])
ax1.plot(results[:,0]-results[0,0],results_stg[:,i+1],linewidth = 6,alpha= 0.4,linestyle = '--',color=colours[1])
ax1.set_xlim([170,180])
#min1 = 100
#max1 = 160
ax1.set_ylim([min1,max1])
ax1.set_ylabel('atm. [CO$_2$] minus ocean [CO$_2$]\n(ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax1.set_ylabel('Subtropical atm. [CO$_2$] minus ocean [CO$_2$]\n(ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax1.set_xlabel('year', multialignment='center',fontweight='bold',fontsize = 14)
plt.tight_layout()
#plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/mechanism_2.png')
#plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/mechanism_2.pdf')
plt.show(block = False)
#plt.close('all')
results = np.genfromtxt('/home/ph290/box_modelling/boxmodel_6_box_back_to_basics/results/rcp85_spg_box_model_qump_results_3.csv',delimiter = ',')
results_stg = np.genfromtxt('/home/ph290/box_modelling/boxmodel_6_box_back_to_basics/results/rcp85_stg_box_model_qump_results_3.csv',delimiter = ',')
mpl.rcdefaults()
font = {'family' : 'monospace',
'weight' : 'bold',
'family' : 'serif',
'size' : 14}
mpl.rc('font', **font)
plt.close('all')
fig, (ax1) = plt.subplots(1,1,figsize=(5, 4))
for i in range(1):
ax1.plot(results[:,0]-results[0,0],results[:,i+1],'k',linewidth = 6,alpha= 0.4)
ax1b = ax1.twinx()
for i in range(1):
ax1b.plot(results[:,0]-results[0,0],results_stg[:,i+1],'k',linewidth = 6,alpha= 0.4,linestyle = '--')
ax1.set_ylabel('spg atm. [CO$_2$] minus ocean [CO$_2$]\n(ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax1b.set_ylabel('stg (dasshed) atm. [CO$_2$] minus ocean [CO$_2$]\n(ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax1.set_xlabel('year', multialignment='center',fontweight='bold',fontsize = 14)
ax1.set_xlim([0,240])
plt.tight_layout()
plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/rcp_85.png')
plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/rcp_85.pdf')
plt.show(block = False)
#plt.close('all')
|
[
"paul.halloran@gmail.com"
] |
paul.halloran@gmail.com
|
52c45fcb6941676bb95e51b20065f7003e69df4e
|
502e97f0ec4f287b8280a546e7f2555ff3a5a1fd
|
/cnn_3d/loss_ssim.py
|
1f9e166d4af572dad02709668df737d66c13e862
|
[] |
no_license
|
carlasailer/cnn_ct_pet
|
d350692be03432e025e33db6296ac33b36bedf08
|
4e256bb73f7ea0ab046c231762001b9f3535bb00
|
refs/heads/master
| 2020-12-18T23:11:24.048337
| 2020-01-22T10:40:52
| 2020-01-22T10:40:52
| 235,549,036
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,202
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 25 14:25:02 2019
@author: s1287
"""
import h5py
import os
import keras.backend as K
import numpy as np
def calc_ssim_git(y_true, y_pred):
"""structural similarity measurement system."""
## K1, K2 are two constants, much smaller than 1
K1 = 0.04
K2 = 0.06
## mean, std, correlation
mu_x = K.mean(y_pred)
mu_y = K.mean(y_true)
sig_x = K.std(y_pred)
sig_y = K.std(y_true)
sig_xy = (sig_x * sig_y) ** 0.5
## L, number of pixels, C1, C2, two constants
L = 33
C1 = (K1 * L) ** 2
C2 = (K2 * L) ** 2
ssim = (2 * mu_x * mu_y + C1) * (2 * sig_xy * C2) * 1.0 / ((mu_x ** 2 + mu_y ** 2 + C1) * (sig_x ** 2 + sig_y ** 2 + C2))
return ssim
def calc_ssim(y_true, y_pred):
"""Calculates the structured similarity of two images, ssim is in the range [-1,1]
Parameters:
y_true voxel used for calculation of SSIM
y_pred voxel used for calculation of SSIM
Returns:
ssim_value value of the structured similarity between the two images
"""
# size = y_true.shape
# print('The shape is:')
# print(size)
single_ssim = []
try:
for slice_nr in range(0, y_true.shape[0]):
# slice_ssim = compare_ssim(y_true[slice_nr,:,:], y_pred[slice_nr,:,:], win_size=3)
slice_ssim = compare_ssim(y_true[slice_nr,:,:], y_pred[slice_nr,:,:], win_size=3, gaussian_weights=True)
single_ssim.append(slice_ssim)
ssim_mean = np.mean(single_ssim)
except IndexError:
ssim_mean = 0
return ssim_mean
#def calc_ssim_multichannel (y_true, y_pred):#
# return compare_ssim(y_true, y_pred, multichannel=True, win_size=3)
def ssim_fct(y_true, y_pred):
"""wrapper function to fit into the Keras framework
Parameters:
y_true ground truth voxel
y_pred voxel predicted by network
Returns:
ssim value of the structural similarity, suited as loss function
"""
def ssim(y_true, y_pred):
return -calc_ssim(K.squeeze(y_true), K.squeeze(y_pred))
return ssim
if __name__ == '__main__':
contents = os.listdir('/home/s1287/no_backup/s1287/results_interp/patches_for_CNN/')
filename_test = '/home/s1287/no_backup/s1287/results_interp/patches_for_CNN/' + contents[0]
filename_training = '/home/s1287/no_backup/s1287/results_interp/patches_for_CNN/' + contents[1]
with h5py.File(filename_training, 'r') as file:
training_CT = np.array(file.get('CT'))
training_PET = np.array(file.get('PET'))
with h5py.File(filename_test, 'r') as file:
test_CT = np.array(file.get('CT'))
test_PET = np.array(file.get('PET'))
train_data = training_CT
train_labels = training_PET
test_data = test_CT
test_labels = test_PET
example_PET1 = train_labels[0]
example_PET2 = train_labels[1]
current_ssim = calc_ssim(example_PET1, example_PET2)
current_ssim1 = calc_ssim_multichannel(example_PET1, example_PET2)
print(current_ssim)
print('SSIM Multichannel %d' %current_ssim1)
|
[
"40063163+carlasailer@users.noreply.github.com"
] |
40063163+carlasailer@users.noreply.github.com
|
05a8191a0221fcf44c3631cb1ae3b634e90a6c50
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/fractions_20200802103056.py
|
a8e741def594e4049345cfbf9c195c01f24b8d0d
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
def fractions(numerator,denominator):
if denominator == 0 :
return str(numerator)
number = numerator / denominator
if numerator % denominator == 0:
return str(numerator // denominator)
newStr = str(number)
print(newStr)
largeStr = newStr.split(".")
if len(largeStr[1]) > 1:
return largeStr[0] + "." + '(' + largeStr[1][0] + ')'
return newStr
def frac(numerator,denominator):
res = ""
# create a map to store already seen remainders
# remainder is used as key and its position in result is stored as value
# position for cases like 1/6
mp = {}
# find the first remainder
rem = numerator / denominator
print(rem)
# keep finding the remainder until the
print(frac(-4,333))
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
d804293a9bb22f13def744ccad3cf0bcce62647f
|
0fa7b9328e04d2ff5a2b607d9ec6962b7ee97532
|
/vi_lib/lib/torchutils/test/test_models.py
|
afef409c72474e1be1ac61ba78474b7a8a8e86e3
|
[] |
no_license
|
aaronpmishkin/normalizing_flows
|
4b12bcbe85f400bb27d21e93d8a3c35d9e5df90c
|
249f0d99fee6d07783a2a3a595cfeb439af8c599
|
refs/heads/master
| 2020-04-09T01:09:40.906963
| 2018-12-14T07:47:08
| 2018-12-14T07:47:08
| 159,893,931
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,472
|
py
|
import unittest
import torch
import torchutils.models as models
from torchutils.params import bp2v
from torch.nn.utils import vector_to_parameters as v2p
from torch.nn.utils import parameters_to_vector as p2v
class MLPTestCase(unittest.TestCase):
def assertAllClose(self, a, b):
self.assertTrue(torch.allclose(a, b, 0.01))
def get_dummy_inputs(self, n, indim, hiddim, outdim, s):
torch.manual_seed(0)
mlp = models.MLP(indim, hiddim, outdim)
x = torch.rand(n, indim)
noise = torch.randn(s, models.num_params(mlp))
return mlp, x, noise
def test_num_params(self):
self.assertEqual(models.num_params(models.MLP(10,[],1)), (10+1))
self.assertEqual(models.num_params(models.MLP(10,[1],1)), (10+1) + (1+1))
self.assertEqual(models.num_params(models.MLP(10,[2],1)), (10+1)*2 + (2+1))
def test_interface_forward(self):
mlp, x, _, = self.get_dummy_inputs(7, 5, [], 1, 3)
y = mlp(x)
self.assertTrue(y.shape[0] == x.shape[0])
self.assertTrue(y.shape[1] == 1)
def test_interface_forward_with_noise(self):
n, s = 7, 3
mlp, x, noise = self.get_dummy_inputs(n, 5, [], 1, s)
print(list(mlp.parameters()))
y = mlp(x, noise)
self.assertTrue(list(y.shape) == [s, n, 1])
mlp, x, noise = self.get_dummy_inputs(n, 5, [11], 1, s)
y = mlp(x, noise)
self.assertTrue(list(y.shape) == [s, n, 1])
def test_backward_with_noise(self):
n, s = 7, 3
def manual_gradient(mlp, x, noise):
mu = p2v(mlp.parameters())
gs = []
for sid in range(s):
v2p((noise[sid,:] + mu).contiguous(), mlp.parameters())
g = torch.autograd.grad(torch.sum(mlp(x)), mlp.parameters())
print([gg.shape for gg in g])
gs.append(bp2v(g, 0))
v2p(mu, mlp.parameters())
return sum(gs)
mlp, x, noise = self.get_dummy_inputs(n, 5, [], 1, s)
grad1 = p2v(torch.autograd.grad(torch.sum(mlp(x, noise)), mlp.parameters()))
grad2 = manual_gradient(mlp, x, noise)
self.assertAllClose(grad1, grad2)
mlp, x, noise = self.get_dummy_inputs(n, 5, [11], 1, s)
grad1 = p2v(torch.autograd.grad(torch.sum(mlp(x, noise)), mlp.parameters()))
grad2 = manual_gradient(mlp, x, noise)
self.assertAllClose(grad1, grad2)
|
[
"aaronpmishkin@gmail.com"
] |
aaronpmishkin@gmail.com
|
00dccca5378c4cc542d8e54c54e252e22ed0e38f
|
5d4841bd3160418d3deb88b241edc22c7b7eab18
|
/server/serving/package_scanner.py
|
10cab85c3a049a56099c34414e74816f80bf0b21
|
[] |
no_license
|
cxbn12/ntu-nlp
|
2493523bb886facfd661dd4194082ccd653496ae
|
de98f636919267a3701383636ccb31ccf108f28b
|
refs/heads/master
| 2022-03-16T08:32:24.302783
| 2019-11-04T15:31:56
| 2019-11-04T15:31:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,314
|
py
|
import os.path as osp
import sys
from pathlib import Path
def scan_package(path, base_dir='.'):
"""
Scan for all the python packages under a certain path. Note that this
will automatically append the scan path to the PYTHONPATH. You should be
careful if there is some packages with the same name. In the case of a
name collision, latter scanned packages will not be imported.
Args:
path (str): The path which all the packages under it will be
imported. You should provide the package path rather than the
package name.
base_dir (str, optional): The base directory to be used as a import root.
Assume the project structure is like:
.
├── package1
│ └── foo.py
└── setup.py
Without setting base_dir, which will automatically take your
scan root as the import root.
>>> scan_package('package1')
Which is equivalent to
>>> import foo
If you specify the scan root,
>>> scan_package('package1', 'package1')
this function will use the given root:
>>> import package1.foo
However, you should never let a scan root to be empty if the package
to be scanned is a regular package (with __init__.py inside).
.
├── package2
│ ├── __init__.py
│ └── foo.py
└── setup.py
This will raise a ValueError:
>>> scan_package('package2', 'package2')
Which is equivalent to
>>> import .
Raise:
ValueError:
- path does not exist
- base_dir does not exist
- base_dir is not valid for importing
"""
abs_path = osp.abspath(path)
if not osp.exists(abs_path):
raise ValueError('Parameter `path`: {} not exist'.format(abs_path))
if not osp.exists(base_dir):
raise ValueError('Parameter `base_dir`: {} does not exist'.format(base_dir))
base_dir = osp.abspath(base_dir)
if not abs_path.startswith(base_dir):
raise ValueError('`path`: {} is not a subdirectory of `base_dir`: {}'
.format(abs_path, base_dir))
# mark the base directory as source root
sys.path.insert(0, base_dir)
# scan for all **/*.py file under certain dir
modules = [f for f in Path(abs_path).rglob('*.py') if f.is_file()]
# set **/__init__.py to the package name
modules = [f.parent if f.name == '__init__.py' else f for f in modules]
# import all modules
for module in modules:
module_rel_path = module.relative_to(base_dir)
# check for invalid regular package import
if str(module_rel_path) == '.':
raise ValueError('You may want to import package {} with the scan root as the package, '
', which will cause a importing error. Please try some scan roots outside'
'the package')
else:
module_name = '.'.join(module_rel_path.with_suffix('').parts)
# check if the package has been imported
if module_name not in sys.modules.keys():
__import__(module_name)
|
[
"YLI056@e.ntu.edu.sg"
] |
YLI056@e.ntu.edu.sg
|
cdaacfbe7fce884d91c74e79e4a520fdf8185bea
|
382ce68736c1dee91dcb5eb7846eff10519d2b70
|
/etcewrappers/utils/iperfserver.py
|
f780cf8d9b291281079960623c45cbb9d682bb1a
|
[] |
permissive
|
adjacentlink/python-etce
|
4345c7bd719f18022fdb96b0c30efc529948f87c
|
72d58535e230f3178b1cab9616a3412514dabaf3
|
refs/heads/master
| 2023-08-18T05:08:53.519074
| 2022-11-17T16:47:44
| 2022-11-17T16:47:44
| 103,570,572
| 7
| 4
|
BSD-3-Clause
| 2022-10-11T11:13:42
| 2017-09-14T19:01:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,528
|
py
|
#
# Copyright (c) 2015-2018,2020 - Adjacent Link LLC, Bridgewater, New Jersey
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Adjacent Link LLC nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import, division, print_function
import time
from etce.wrapper import Wrapper
class IPerfServer(Wrapper):
"""
Execute iperf as a server. The iperfserver file should contain, at
most, one line of iperf common and server options. The iperf server
command will be built as 'iperf -s [file options] [arg values]. Lines
starting with "#" is ignored as comments. If multiple non-comment
lines are found, only the last one is used.
"""
def register(self, registrar):
registrar.register_infile_name('iperfserver.conf')
registrar.register_outfile_name('iperfserver.log')
registrar.register_argument(
'interval',
None,
'iperf measurement interval (iperf -i switch ' \
'argument)')
registrar.register_argument(
'bufferlen',
None,
'iperf buffer length (iperf -l switch argument)')
def run(self, ctx):
if not ctx.args.infile:
return
# run as daemon, log to output file and add argument specified via input file
argstr = '-D -o %s' % ctx.args.outfile
if ctx.args.interval is not None:
argstr += ' -i %d ' % ctx.args.interval
if ctx.args.bufferlen is not None:
argstr += ' -l %d ' % ctx.args.bufferlen
fileargstr = ''
serverarglines = [line.strip() for line
in open(ctx.args.infile).readlines()
if len(line.strip()) > 0
and line[0] != '#']
# take the last non-comment line as the iperf input
if len(serverarglines) > 0:
fileargstr = serverarglines[-1]
argstr = '-s %s %s' % (fileargstr, argstr)
ctx.run('iperf', argstr)
def stop(self, ctx):
ctx.stop()
# iperfserver takes some time to close down
time.sleep(5)
|
[
"eschreiber@adjacentlink.com"
] |
eschreiber@adjacentlink.com
|
b104d48e41d9130046b0c49a32c62beba8f2a35d
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/85/usersdata/179/58876/submittedfiles/funcoes1.py
|
e9ffe616ef1b13fe73cb6d7961c81ad1912c3af5
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,154
|
py
|
# -*- coding: utf-8 -*-
def crescente(a):
cont=0
for i in range(0,len(a),1):
if a[i]>[i+1]:
cont=cont+1
if cont==len(a):
return(true)
else:
return(false)
def decrescente(a):
cont=0
for i in range(0,len(a),1):
if a[i]<[i+1]:
cont=cont+1
if cont==len(a):
return(true)
else:
return(false)
def consecutivo(a):
cont=0
for i in range(0,len(a),1):
if a[i]==[i+1]:
cont=cont+1
if cont==len(a):
return(true)
else:
return(false)
b=[]
c=[]
d=[]
n=int(input('digite o valor de n :'))
for i in range(0,n,1):
valor=int(input('digite o valor :'))
b.append(valor)
if crescente(b):
print('S')
else:
print('N')
for i in range(0,n,1):
valor=int(input('digite o valor :'))
c.append(valor)
if decrescente(c):
print('S')
else:
print('N')
for i in range(0,n,1):
valor=int(input('digite o valor :'))
d.append(valor)
if consecultivo(d):
print('S')
else:
print('N')
#escreva as demais funções
#escreva o programa principal
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
73b8253035b13946cdbafdad3f3ff53fae1a417a
|
a14dd601cde67f67d0ba38dfd1362f7c0109cef1
|
/arrays/leetcode/grid/set-matrix-zeroes-73.py
|
4b6d885e0787eeebbf94701b9d37fb1cd5bc4ce0
|
[] |
no_license
|
Meaha7/dsa
|
d5ea1615f05dae32671af1f1c112f0c759056473
|
fa80219ff8a6f4429fcf104310f4169d007af712
|
refs/heads/main
| 2023-09-03T18:52:41.950294
| 2021-11-05T09:14:42
| 2021-11-05T09:14:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,232
|
py
|
grids = [
[[1, 1, 1], [1, 0, 1], [1, 1, 1]],
[[0, 1, 2, 0], [3, 4, 5, 2], [1, 3, 1, 5]],
[[1, 2, 3, 4], [5, 0, 7, 8], [0, 10, 11, 12], [13, 14, 15, 0]]
]
# T=mn,S=m+n
def main(grid):
m, n = len(grid), len(grid[0])
rows, cols = set(), set()
for i in range(m):
for j in range(n):
if not grid[i][j]:
rows.add(i)
cols.add(j)
for i in range(m):
for j in range(n):
if i in rows or j in cols:
grid[i][j] = 0
return grid
for grid in grids:
print(main(grid))
print()
# T=mn,S=1
def main(grid):
m, n = len(grid), len(grid[0])
fr, fc = False, False
for i in range(m):
for j in range(n):
if not grid[i][j]:
if not i:
fr = True
if not j:
fc = True
grid[i][0] = grid[0][j] = 0
for i in range(1, m):
for j in range(1, n):
if not grid[i][0] or not grid[0][j]:
grid[i][j] = 0
if fr:
for j in range(n):
grid[0][j] = 0
if fc:
for i in range(m):
grid[i][0] = 0
for grid in grids:
main(grid)
print(grid)
|
[
"nikhilgoyal104ah4@gmail.com"
] |
nikhilgoyal104ah4@gmail.com
|
82a29e952d943526f88af2dd50b7eda0da44f165
|
a38aa3779c16f31d02a2df031fd4ce072facaeb9
|
/project/utils.py
|
7ae54df72e5e3e66e59363eb3dbee5eab2359549
|
[
"MIT"
] |
permissive
|
nikifkon-old/csa-almaty-bot
|
a0a39673dfa39eb5f6ac6dd58eea08008d52c350
|
f18d087c86b3b90171dec080e780e330d62e711a
|
refs/heads/master
| 2022-11-30T07:16:45.839562
| 2020-08-19T09:48:39
| 2020-08-19T09:48:39
| 288,692,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
QUESTION_CHAR = "❓"
EXCLAMATION_CHAR = "❗️"
SEARCH_CHAR = "🔎"
BACK_CHAR = "🔙"
MENU_CHAR = "☰"
BACK_TO_MENU_TEXT = "{prefix} Вернуться к списку категорий".format(prefix=MENU_CHAR)
BACK_TO_SEARCH_RESULT = "{prefix} Вернуться к результатам поиска".format(prefix=BACK_CHAR)
OPEN_SEARCH = "{prefix} Найти вопрос".format(prefix=SEARCH_CHAR)
TRY_SEARCH_AGAIN = "{prefix} Попробовать найти ещё раз".format(prefix=SEARCH_CHAR)
|
[
"kostya.nik.3854@gmail.com"
] |
kostya.nik.3854@gmail.com
|
ae336a597ede11303d18e76036cbc9ac291953b5
|
6c90112e7d21086ef06432bb417bdb339fed4c33
|
/django-tally/api/models.py
|
11af0648223a22b4581387c627995055a13352e3
|
[
"MIT"
] |
permissive
|
blakelobato/BetterBusinessByReview
|
9767a04cf1b1a8a8e96cdea634a24887182834ff
|
1f8f0a03dc24a661b112b60fed1946142d918294
|
refs/heads/master
| 2022-04-04T00:08:37.474620
| 2020-02-06T21:01:00
| 2020-02-06T21:01:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,205
|
py
|
from django.db import models
from django.conf import settings
from django.core.validators import int_list_validator
from django.contrib.auth.models import User
# Create your models here.
class Url(models.Model):
id = models.IntegerField(primary_key=True, )
url = models.CharField(max_length=5000)
# created = models.DateTimeField(auto_now_add=True)#saved on first input into database
# updated = models.DateTimeField(auto_now=True)
date = models.DateTimeField(auto_now_add=True)#saved on first input into database
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE,)
word_phrase = models.CharField(max_length=50)
high_rating_score = models.DecimalField(max_digits=3, decimal_places=2, null=True)
low_rating_score = models.DecimalField(max_digits=3, decimal_places=2, null=True)
def __str__(self):
return '{}'.format(self.url)
class WordListAPI(models.Model):
id = models.IntegerField(primary_key=True)
word_phrase = models.CharField(max_length=50)
high_rating_score = models.DecimalField(max_digits=3, decimal_places=2)
low_rating_score = models.DecimalField(max_digits=3, decimal_places=2)
|
[
"LilySu@users.noreply.github.com"
] |
LilySu@users.noreply.github.com
|
deae7399994f02fc02cd2a1de41c3876a0a42f3d
|
d5005de630cbfcac46b6f90be845a827a029ff0d
|
/urlshortner/api/serializer.py
|
c040926e02219c805df9c6c192f55d7729c0b142
|
[] |
no_license
|
mahinm20/url-shortner
|
d4b18917a002aa12f4fdd1f6f3e2bf026b34f0ad
|
ea084f96136d5810b8ad6d53bf0acc1a8291b782
|
refs/heads/master
| 2023-08-11T07:35:04.804424
| 2021-09-14T09:37:27
| 2021-09-14T09:37:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
from django.db.models import fields
from rest_framework.serializers import ModelSerializer
from .models import Link
class LinkSerializer(ModelSerializer):
class Meta:
model=Link
fields='__all__'
|
[
"mahinmalhotra20@gmail.com"
] |
mahinmalhotra20@gmail.com
|
40d8671c94da3a301dcd8dd73470c1af8be6c4dc
|
4f2cdd9a34fce873ff5995436edf403b38fb2ea5
|
/Data-Structures/List/Part2/P003.py
|
b6642ac9b5001105f692e511ac814eb924a9b9b2
|
[] |
no_license
|
sanjeevseera/Python-Practice
|
001068e9cd144c52f403a026e26e9942b56848b0
|
5ad502c0117582d5e3abd434a169d23c22ef8419
|
refs/heads/master
| 2021-12-11T17:24:21.136652
| 2021-08-17T10:25:01
| 2021-08-17T10:25:01
| 153,397,297
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
"""
Write a Python program to generate all permutations of a list in Python.
"""
import itertools
print(list(itertools.permutations([1,2,3])))
|
[
"seerasanjeev@gmail.com"
] |
seerasanjeev@gmail.com
|
7b375c81b77e9b35c1623c3699790ed98d0b9a61
|
5c90b31943aff36cab344574b16575025e649b7e
|
/examples/tour_examples/xkcd_tour.py
|
73632b0471a64d556c17914eda6f7e0bd123423f
|
[
"MIT"
] |
permissive
|
766/SeleniumBase
|
7e23adb3d40cf3d9912e2ff0f4dd56c2fafdb29b
|
b81e7b93e16a9abee6d2386f55c97843aa90a7d9
|
refs/heads/master
| 2020-08-22T08:54:47.269550
| 2019-12-06T13:44:17
| 2019-12-06T13:44:17
| 216,360,246
| 1
| 0
|
MIT
| 2019-12-06T13:44:18
| 2019-10-20T12:43:47
| null |
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_basic(self):
self.open('https://xkcd.com/1117/')
self.assert_element('img[alt="My Sky"]')
self.create_shepherd_tour()
self.add_tour_step("Welcome to XKCD!")
self.add_tour_step("This is the XKCD logo.", "#masthead img")
self.add_tour_step("Here's the daily webcomic.", "#comic img")
self.add_tour_step("This is the title.", "#ctitle", alignment="top")
self.add_tour_step("Click here for the next comic.", 'a[rel="next"]')
self.add_tour_step("Click here for the previous one.", 'a[rel="prev"]')
self.add_tour_step("Learn about the author here.", 'a[rel="author"]')
self.add_tour_step("Click here for the license.", 'a[rel="license"]')
self.add_tour_step("Click for a random comic.", 'a[href*="/random/"]')
self.add_tour_step("Thanks for taking this tour!")
self.export_tour(filename="xkcd_tour.js") # Exports the tour
self.play_tour() # Plays the tour
|
[
"mdmintz@gmail.com"
] |
mdmintz@gmail.com
|
41e48a86030f730e374988d7f00909bc2d3b0cc9
|
53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61
|
/.history/EMR/age_sex_20190618092905.py
|
f95b9e064c825ab7d3d8a555a7f973fcb638f23b
|
[] |
no_license
|
cyc19950621/python
|
4add54894dc81187211aa8d45e5115903b69a182
|
d184b83e73334a37d413306d3694e14a19580cb0
|
refs/heads/master
| 2020-04-11T20:39:34.641303
| 2019-07-02T12:54:49
| 2019-07-02T12:54:49
| 162,078,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
import time
import math
import os
import sys
import os, os.path,shutil
import codecs
import EMRdef
import re
emrtxts = EMRdef.txttq(u'D:\DeepLearning ER\EHR-all')#txt目录提取
for emrtxt in emrtxts:
f = open(emrtxt,'r',errors="ignore")#中文加入errors
emrtxt = os.path.basename(emrtxt)
emrtxt_str = re.findall(r'(^.+?)\_',emrtxt)#提取ID
emrtxt = "".join(emrtxt_str)#转成str
out = []
for line in f.readlines():
if line=='男':
out.append(line)
elif line.‘女'
out.append(line)
if line.find('岁')>-1:
line = re.sub('岁','',line)
lien = ''.join(line)
out.append(line)
break
output = ' '.join(out)
EMRdef.text_create(r'D:\DeepLearning ER\EHRbase','.txt' ,emrtxt,output)
|
[
"1044801968@qq.com"
] |
1044801968@qq.com
|
dc3a3df04d9eba2f8895e74b91128c8c0b6b8a41
|
6413fe58b04ac2a7efe1e56050ad42d0e688adc6
|
/tempenv/lib/python3.7/site-packages/plotly/validators/scattergeo/marker/colorbar/_title.py
|
e584a0b07ef22e6fd0a89b476a1df8aef97c2e3d
|
[
"MIT"
] |
permissive
|
tytechortz/Denver_temperature
|
7f91e0ac649f9584147d59193568f6ec7efe3a77
|
9d9ea31cd7ec003e8431dcbb10a3320be272996d
|
refs/heads/master
| 2022-12-09T06:22:14.963463
| 2019-10-09T16:30:52
| 2019-10-09T16:30:52
| 170,581,559
| 1
| 0
|
MIT
| 2022-06-21T23:04:21
| 2019-02-13T21:22:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,264
|
py
|
import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(
self,
plotly_name='title',
parent_name='scattergeo.marker.colorbar',
**kwargs
):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Title'),
data_docs=kwargs.pop(
'data_docs', """
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
"""
),
**kwargs
)
|
[
"jmswank7@gmail.com"
] |
jmswank7@gmail.com
|
8849959f26a02a64d2d77a028c48084c8fc9310d
|
955060597d643c695dff53b6cff0ea649db68a94
|
/dequorum/urls.py
|
44cfd19538837d6340d8c57944e6fc065b461a4c
|
[
"BSD-2-Clause"
] |
permissive
|
pombredanne/django-dequorum
|
e99386fd01d640776d3ac6f2851c4ddc15316713
|
b790e9b8b0920581a48c67679648a6df811e505b
|
refs/heads/master
| 2021-01-18T10:10:33.571111
| 2013-12-13T23:04:16
| 2013-12-13T23:04:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
from django.conf.urls import patterns, include, url
from nap import api
urlpatterns = patterns('',
(u'^$', 'django.shortcuts.render', {'template_name': 'dequorum/index.html'}),
(u'^api/', include(api.APIS['dequorum'].patterns(flat=True))),
)
|
[
"curtis@tinbrain.net"
] |
curtis@tinbrain.net
|
9b07e90479e6556a9f1310bbceee661ebf9051fc
|
0107160f73c6f46a0c693f0aa8b2b22bb04aaa07
|
/flex/redis.py
|
8a0c1594fac0646ca5ab8f762a94f41024245212
|
[
"MIT"
] |
permissive
|
centergy/flex
|
8a9054171a121671e09646a88259c947d0d87cc4
|
4fc11d3ad48e4b5016f53256015e3eed2157daae
|
refs/heads/master
| 2022-12-13T06:01:09.561457
| 2018-08-22T20:32:34
| 2018-08-22T20:32:34
| 145,748,684
| 0
| 0
|
MIT
| 2022-12-08T00:45:07
| 2018-08-22T18:40:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,958
|
py
|
from flask import current_app
from threading import Lock
from flex.utils.module_loading import import_string
__all__ = ('RedisManager', 'redis')
class _Connector(object):
__slots__ = ('app', 'lock', '_client', 'config')
def __init__(self, app, config):
self.app = app
self.config = config
self._client = None
self.lock = Lock()
@property
def client(self):
with self.lock:
if self._client is None:
cls = self.config.CLIENT_CLASS
if isinstance(cls, str):
cls = import_string(cls)
self._client = cls.from_url(
self.config.URL,
**self.config.CLIENT_OPTIONS
)
return self._client
class RedisManager(object):
__slots__ = ('_app', )
config_prefix = 'REDIS_'
default_config = dict(
url='redis://localhost:6379/0',
client_class='redis.StrictRedis',
client_options={}
)
def __init__(self, app=None):
self._app = None
if app is not None:
self.init_app(app)
self._app = app
@property
def _redis_client(self):
try:
return self._get_app().extensions['redis'].client
except KeyError:
raise RuntimeError('Redis not setup on app.')
def _get_app(self, app=None):
"""Helper method that implements the logic to look up an application."""
if app is not None:
return app
if current_app:
return current_app
if self._app is not None:
return self._app
raise RuntimeError(
'Application not registered on cache instance and no application'\
'bound to current context'
)
def init_app(self, app, **kwargs):
config = app.config.namespace(self.config_prefix)
config.setdefaults(self.default_config)
app.extensions['redis'] = _Connector(app, config)
def __getattr__(self, name):
return getattr(self._redis_client, name)
def __getitem__(self, name):
return self._redis_client[name]
def __setitem__(self, name, value):
self._redis_client[name] = value
def __delitem__(self, name):
del self._redis_client[name]
redis = RedisManager()
|
[
"davidmkyalo@gmail.com"
] |
davidmkyalo@gmail.com
|
494729e6f2f30c78583ca65070a1387032401821
|
2b86301d5ad3fecaa5a300cabfe6b4dfc82b78ed
|
/venv/Lib/site-packages/cassiopeia/transformers/championmastery.py
|
c45ee0d705e5dc57e8ccf720ebd5f8d5dd952cb4
|
[
"MIT"
] |
permissive
|
sserrot/champion_relationships
|
72823bbe73e15973007e032470d7efdf72af3be0
|
91315d6b7f6e7e678d9f8083b4b3e63574e97d2b
|
refs/heads/master
| 2022-12-21T05:15:36.780768
| 2021-12-05T15:19:09
| 2021-12-05T15:19:09
| 71,414,425
| 1
| 2
|
MIT
| 2022-12-18T07:42:59
| 2016-10-20T01:35:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,956
|
py
|
from typing import Type, TypeVar
from copy import deepcopy
from datapipelines import DataTransformer, PipelineContext
from ..core.championmastery import ChampionMasteryData, ChampionMasteryListData, ChampionMastery, ChampionMasteries
from ..dto.championmastery import ChampionMasteryDto, ChampionMasteryListDto
T = TypeVar("T")
F = TypeVar("F")
class ChampionMasteryTransformer(DataTransformer):
@DataTransformer.dispatch
def transform(self, target_type: Type[T], value: F, context: PipelineContext = None) -> T:
pass
# Dto to Data
@transform.register(ChampionMasteryDto, ChampionMasteryData)
def champion_mastery_dto_to_data(self, value: ChampionMasteryDto, context: PipelineContext = None) -> ChampionMasteryData:
return ChampionMasteryData(**value)
@transform.register(ChampionMasteryListDto, ChampionMasteryListData)
def champion_mastery_list_dto_to_data(self, value: ChampionMasteryListDto, context: PipelineContext = None) -> ChampionMasteryListData:
data = deepcopy(value)
data["masteries"] = [self.champion_mastery_dto_to_data(c) for c in data["masteries"]]
for c in data["masteries"]:
c(region=data["region"])
data = data["masteries"]
return ChampionMasteryListData(data, region=value["region"], summoner_id=value["summonerId"])
# Data to Core
#@transform.register(ChampionMasteryData, ChampionMastery)
def champion_mastery_data_to_core(self, value: ChampionMasteryData, context: PipelineContext = None) -> ChampionMastery:
return ChampionMastery.from_data(value)
#@transform.register(ChampionMasteryListData, ChampionMasteries)
def champion_mastery_list_data_to_core(self, value: ChampionMasteryListData, context: PipelineContext = None) -> ChampionMasteries:
return ChampionMasteries.from_data(*[self.champion_mastery_data_to_core(cm) for cm in value], region=value.region, summoner=value.summoner_id)
|
[
"sserrot@users.noreply.github.com"
] |
sserrot@users.noreply.github.com
|
0a2a6e6a68e79bebbef374d63bfd4e57a41093db
|
eb87c8b1ce8591d207643d3924b7939228f1a4fe
|
/conformance_suite/test_assign_test_var.py
|
b3a5f3ec6ae764f29359d631f46cf82e492d26f7
|
[] |
no_license
|
brownplt/insta-model
|
06543b43dde89913c219d476ced0f51a439add7b
|
85e2c794ec4b1befa19ecb85f2c8d2509ec8cf42
|
refs/heads/main
| 2023-08-30T19:06:58.083150
| 2023-05-03T18:53:58
| 2023-05-10T22:29:18
| 387,500,638
| 5
| 0
| null | 2022-04-23T23:06:52
| 2021-07-19T14:53:09
|
Racket
|
UTF-8
|
Python
| false
| false
| 414
|
py
|
# test_assign_test_var.py
# This should pass.
from typing import Optional
def f(x: Optional[int]) -> int:
if x is None:
x = 1
return x
# def test_assign_test_var(self):
# codestr = """
# from typing import Optional
# def f(x: Optional[int]) -> int:
# if x is None:
# x = 1
# return x
# """
# self.compile(codestr, modname="foo")
|
[
"lukuangchen1024@gmail.com"
] |
lukuangchen1024@gmail.com
|
818aa3abf6f0f26c357550965b482be18aa0a2b7
|
4ac57cc07c50d1cc4dbf4894b77783fa03a8c7b1
|
/4-case-study-sunlight-in-austin/9_daily_hours_of_clear_sky.py
|
127206faced7ca4e185d3d0c5346c054b778c6ed
|
[] |
no_license
|
OCulzac/pandas-foundations
|
905fa778beee5e9d8210716abcc06eeeaf02b8b9
|
f13e7270dfcbb661da7a2fa3f26b4001df5eadc9
|
refs/heads/master
| 2020-05-19T10:44:58.816172
| 2019-05-05T04:09:45
| 2019-05-05T04:09:45
| 184,977,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,090
|
py
|
""" Daily hours of clear sky
In a previous exercise, you analyzed the 'sky_condition' column to explore the difference in temperature on sunny days compared to overcast days. Recall that a 'sky_condition' of 'CLR' represents a sunny day. In this exercise, you will explore sunny days in greater detail. Specifically, you will use a box plot to visualize the fraction of days that are sunny.
The 'sky_condition' column is recorded hourly. Your job is to resample this column appropriately such that you can extract the number of sunny hours in a day and the number of total hours. Then, you can divide the number of sunny hours by the number of total hours, and generate a box plot of the resulting fraction.
As before, df_clean is available for you in the workspace.
Instructions 1/3
Get the cases in df_clean where the sky is clear. That is, when 'sky_condition' equals 'CLR', assigning to is_sky_clear.
Resample is_sky_clear by day, assigning to resampled. """
# Using df_clean, when is sky_condition 'CLR'?
is_sky_clear = df_clean['sky_condition'].str.contains('CLR')
# Resample is_sky_clear by day
resampled = is_sky_clear.resample('D')
# See the result
print(resampled)
""" Instructions 2/3
35 XP
2
3
Calculate the number of measured sunny hours per day as the sum of resampled, assigning to sunny_hours.
Calculate the total number of measured hours per day as the count of resampled, assigning to total_hours.
Calculate the fraction of hours per day that were sunny as the ratio of sunny hours to total hours.
"""
# From previous step
is_sky_clear = df_clean['sky_condition'] == 'CLR'
resampled = is_sky_clear.resample('D')
# Calculate the number of sunny hours per day
sunny_hours = resampled.sum()
# Calculate the number of measured hours per day
total_hours = resampled.count()
# Calculate the fraction of hours per day that were sunny
sunny_fraction = sunny_hours / total_hours
""" Instructions 3/3
30 XP
3
Draw a box plot of sunny_fraction using .plot() with kind set to `'box'``. """
# Make a box plot of sunny_fraction
sunny_fraction.plot(kind='box')
plt.show()
|
[
"oronculzac@gmail.com"
] |
oronculzac@gmail.com
|
2f6956dd6f187273f31b75d5d6429b5d5d23c030
|
7a13a9def50e3d87d74f7d3a2b990cd9bc1acda1
|
/accounts/admin.py
|
1ae86e995a6ecd33ad7fd7b61e36b1ee99444204
|
[] |
no_license
|
anandrajB/speedy-scanner
|
a97bfe16feef483db9e2fe77a2b1639e1dea8707
|
fd5d4fd7b3ba600d975ae2aaf73ae81e1d0e3632
|
refs/heads/master
| 2023-08-31T09:07:46.802433
| 2021-09-20T12:21:22
| 2021-09-20T12:21:22
| 374,634,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,063
|
py
|
from django.contrib import admin
from django import forms
from django.contrib import admin
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from .models import MyUser, Profile, File, Batch
class UserCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
password1 = forms.CharField(label="Password", widget=forms.PasswordInput)
password2 = forms.CharField(
label="Password confirmation", widget=forms.PasswordInput
)
class Meta:
model = MyUser
fields = ("email", "phone")
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = MyUser
fields = ("email", "phone", "password", "is_active", "is_admin")
def clean_password(self):
return self.initial["password"]
class UserAdmin(BaseUserAdmin):
# The forms to add and change user instances
form = UserChangeForm
add_form = UserCreationForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ("email", "phone", "is_admin")
list_filter = ("is_admin",)
fieldsets = (
(None, {"fields": ("email", "password")}),
("Personal info", {"fields": ("phone",)}),
("Permissions", {"fields": ("is_active", "is_admin",)}),
)
# add_fieldsets is not a standard ModelAdmin attribute. UserAdmin
# overrides get_fieldsets to use this attribute when creating a user.
add_fieldsets = (
(
None,
{
"classes": ("wide",),
"fields": ("email", "phone", "password1", "password2"),
},
),
)
search_fields = ("email",)
ordering = ("email",)
filter_horizontal = ()
# Now register the new UserAdmin...
admin.site.register(MyUser, UserAdmin)
# ... and, since we're not using Django's built-in permissions,
# unregister the Group model from admin.
admin.site.unregister(Group)
admin.site.register(Profile)
admin.site.register(Batch)
admin.site.register(File)
|
[
"anand98.ar@gmail.com"
] |
anand98.ar@gmail.com
|
cc6895b8b702d18633c777f02493a8fe29b851f5
|
05263538c3ad0f577cdbbdb9bac87dcf450230ce
|
/alexa/ask-sdk/ask_sdk_model/dialog/elicit_slot_directive.py
|
837a97063497b6119b45f42914b105a8118715ce
|
[] |
no_license
|
blairharper/ISS-GoogleMap-project
|
cea027324fc675a9a309b5277de99fc0265dcb80
|
3df119036b454a0bb219af2d703195f4154a2471
|
refs/heads/master
| 2020-03-21T16:47:21.046174
| 2018-10-24T08:05:57
| 2018-10-24T08:05:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,761
|
py
|
# coding: utf-8
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.directive import Directive
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
from ask_sdk_model.intent import Intent
class ElicitSlotDirective(Directive):
"""
NOTE: This class is auto generated.
Do not edit the class manually.
:type updated_intent: (optional) ask_sdk_model.intent.Intent
:type slot_to_elicit: (optional) str
"""
deserialized_types = {
'object_type': 'str',
'updated_intent': 'ask_sdk_model.intent.Intent',
'slot_to_elicit': 'str'
}
attribute_map = {
'object_type': 'type',
'updated_intent': 'updatedIntent',
'slot_to_elicit': 'slotToElicit'
}
def __init__(self, updated_intent=None, slot_to_elicit=None): # noqa: E501
# type: (Optional[Intent], Optional[str]) -> None
"""
:type updated_intent: (optional) ask_sdk_model.intent.Intent
:type slot_to_elicit: (optional) str
"""
self.__discriminator_value = "Dialog.ElicitSlot"
self.object_type = self.__discriminator_value
super(ElicitSlotDirective, self).__init__(object_type=self.__discriminator_value) # noqa: E501
self.updated_intent = updated_intent
self.slot_to_elicit = slot_to_elicit
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, ElicitSlotDirective):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
[
"blair.harper@gmail.com"
] |
blair.harper@gmail.com
|
3e29d4d7c333026e5344ef9516e21f5e220cfd24
|
f98de2db6b24d30d64f1145c7d8da4a40385a87f
|
/packages/grid_control_cms/lumi_tools.py
|
50eb266c862e2c20ae303976fae5474ea14c2247
|
[] |
no_license
|
greyxray/grid-control
|
f9f453491fe7bc506d4cfc240afaa364ba9db84b
|
ed10fdb6ff604006a5d52dcd43c2e55c9e962c0a
|
refs/heads/master
| 2020-04-15T13:15:21.103357
| 2019-01-08T18:23:07
| 2019-01-08T18:23:07
| 164,709,043
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,665
|
py
|
# | Copyright 2010-2016 Karlsruhe Institute of Technology
# |
# | Licensed under the Apache License, Version 2.0 (the "License");
# | you may not use this file except in compliance with the License.
# | You may obtain a copy of the License at
# |
# | http://www.apache.org/licenses/LICENSE-2.0
# |
# | Unless required by applicable law or agreed to in writing, software
# | distributed under the License is distributed on an "AS IS" BASIS,
# | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# | See the License for the specific language governing permissions and
# | limitations under the License.
import os
from python_compat import imap, json, lmap, sort_inplace
def makeint(x):
if x.strip().upper() not in ['', 'MAX', 'MIN']:
return int(x)
def parseLumiFromJSON(data, select = ''):
runs = json.loads(data)
rr = lmap(makeint, select.split('-') + [''])[:2]
for run in imap(int, runs.keys()):
if (rr[0] and run < rr[0]) or (rr[1] and run > rr[1]):
continue
for lumi in runs[str(run)]:
yield ([run, lumi[0]], [run, lumi[1]])
def keyLumi(a):
return tuple(a[0])
def mergeLumi(rlrange):
""" Merge consecutive lumi sections
>>> mergeLumi([([1, 11], [1, 20]), ([1, 1], [1, 10]), ([1, 22], [1, 30])])
[([1, 1], [1, 20]), ([1, 22], [1, 30])]
>>> mergeLumi([([1, 1], [2, 2]), ([2, 3], [2, 10]), ([2, 11], [4, 30])])
[([1, 1], [4, 30])]
"""
sort_inplace(rlrange, keyLumi)
i = 0
while i < len(rlrange) - 1:
(end_run, end_lumi) = rlrange[i][1]
(start_next_run, start_next_lumi) = rlrange[i+1][0]
if (end_run == start_next_run) and (end_lumi == start_next_lumi - 1):
rlrange[i] = (rlrange[i][0], rlrange[i + 1][1])
del rlrange[i+1]
else:
i += 1
return rlrange
def parseLumiFromString(rlrange):
""" Parse user supplied lumi info into easier to handle format
>>> lmap(parseLumiFromString, ['1', '1-', '-1', '1-2'])
[([1, None], [1, None]), ([1, None], [None, None]), ([None, None], [1, None]), ([1, None], [2, None])]
>>> lmap(parseLumiFromString, ['1:5', '1:5-', '-1:5', '1:5-2:6'])
[([1, 5], [1, 5]), ([1, 5], [None, None]), ([None, None], [1, 5]), ([1, 5], [2, 6])]
>>> lmap(parseLumiFromString, ['1-:5', ':5-1', ':5-:6'])
[([1, None], [None, 5]), ([None, 5], [1, None]), ([None, 5], [None, 6])]
>>> lmap(parseLumiFromString, ['1:5-2', '1-2:5'])
[([1, 5], [2, None]), ([1, None], [2, 5])]
"""
def parseRunLumi(rl):
if ':' in rl:
return lmap(makeint, rl.split(':'))
else:
return [makeint(rl), None]
if '-' in rlrange:
return tuple(imap(parseRunLumi, rlrange.split('-')))
else:
tmp = parseRunLumi(rlrange)
return (tmp, tmp)
def parseLumiFilter(lumiexpr):
if lumiexpr == '':
return None
lumis = []
from grid_control.config import ConfigError
for token in imap(str.strip, lumiexpr.split(',')):
token = lmap(str.strip, token.split('|'))
if True in imap(str.isalpha, token[0].lower().replace('min', '').replace('max', '')):
if len(token) == 1:
token.append('')
try:
json_fn = os.path.normpath(os.path.expandvars(os.path.expanduser(token[0].strip())))
json_fp = open(json_fn)
lumis.extend(parseLumiFromJSON(json_fp.read(), token[1]))
json_fp.close()
except Exception:
raise ConfigError('Could not process lumi filter file: %r (filter: %r)' % tuple(token))
else:
try:
lumis.append(parseLumiFromString(token[0]))
except Exception:
raise ConfigError('Could not process lumi filter expression:\n\t%s' % token[0])
return mergeLumi(lumis)
def filterLumiFilter(runs, lumifilter):
""" Filter lumifilter for entries that contain the given runs
>>> formatLumi(filterLumiFilter([2,3,6], [([1, None], [2, None]), ([4, 1], [4, None]), ([5, 1], [None,3])]))
['1:MIN-2:MAX', '5:1-9999999:3']
>>> formatLumi(filterLumiFilter([2,3,6], [([1, 1], [2, 2]), ([3, 1], [5, 2]), ([5, 2], [7,3])]))
['1:1-2:2', '3:1-5:2', '5:2-7:3']
"""
for filterEntry in lumifilter:
(sel_start, sel_end) = (filterEntry[0][0], filterEntry[1][0])
for run in runs:
if (sel_start is None) or (run >= sel_start):
if (sel_end is None) or (run <= sel_end):
yield filterEntry
break
def selectRun(run, lumifilter):
""" Check if lumifilter selects the given run/lumi
>>> selectRun(1, [([1, None], [2, None])])
True
>>> selectRun(2, [([1, 3], [5, 12])])
True
>>> selectRun(6, [([1, 3], [5, 12])])
False
>>> selectRun(9, [([3, 23], [None, None])])
True
"""
for (sel_start, sel_end) in lumifilter:
(sel_start_run, sel_end_run) = (sel_start[0], sel_end[0])
if (sel_start_run is None) or (run >= sel_start_run):
if (sel_end_run is None) or (run <= sel_end_run):
return True
return False
def selectLumi(run_lumi, lumifilter):
""" Check if lumifilter selects the given run/lumi
>>> selectLumi((1,2), [([1, None], [2, None])])
True
>>> selectLumi((1,2), [([1, 3], [5, 12])])
False
>>> selectLumi((2,1), [([1, 3], [5, 12])])
True
>>> selectLumi((9,2), [([3, 23], [None, None])])
True
"""
(run, lumi) = run_lumi
for (sel_start, sel_end) in lumifilter:
(sel_start_run, sel_start_lumi) = sel_start
(sel_end_run, sel_end_lumi) = sel_end
if (sel_start_run is None) or (run >= sel_start_run):
if (sel_end_run is None) or (run <= sel_end_run):
# At this point, run_lumi is contained in the selected run
if (sel_start_run is not None) and (run > sel_start_run):
sel_start_lumi = None
if (sel_start_lumi is None) or (lumi >= sel_start_lumi):
if (sel_end_run is not None) and (run < sel_end_run):
sel_end_lumi = None
if (sel_end_lumi is None) or (lumi <= sel_end_lumi):
return True
return False
def formatLumi(lumifilter):
""" Check if lumifilter selects the given run/lumi
>>> formatLumi(imap(parseLumiFromString, ['1', '1-', '-1', '1-2']))
['1:MIN-1:MAX', '1:MIN-9999999:MAX', '1:MIN-1:MAX', '1:MIN-2:MAX']
>>> formatLumi(imap(parseLumiFromString, ['1:5', '1:5-', '-1:5', '1:5-2:6']))
['1:5-1:5', '1:5-9999999:MAX', '1:MIN-1:5', '1:5-2:6']
>>> formatLumi(imap(parseLumiFromString, ['1-:5', ':5-1', ':5-:6']))
['1:MIN-9999999:5', '1:5-1:MAX', '1:5-9999999:6']
>>> formatLumi(imap(parseLumiFromString, ['1:5-2', '1-2:5']))
['1:5-2:MAX', '1:MIN-2:5']
"""
def formatRange(rlrange):
(start, end) = rlrange
default = lambda x, d: (x, d)[x is None]
start = [default(start[0], '1'), default(start[1], 'MIN')]
end = [default(end[0], '9999999'), default(end[1], 'MAX')]
return str.join('-', imap(lambda x: '%s:%s' % tuple(x), (start, end)))
if lumifilter:
return lmap(formatRange, lumifilter)
return ''
def strLumi(lumifilter):
return str.join(',', formatLumi(lumifilter))
if __name__ == '__main__':
import doctest
doctest.testmod()
|
[
"stober@cern.ch"
] |
stober@cern.ch
|
afc06ae4b405fbce9055d076027588304160a0e4
|
83b242997a1560214285fd38ab4d39a0b1210ddc
|
/SOL4Py/network/ZThreadedTCPServer.py
|
add7627d5756461363417a09cff04384cc3dbf66
|
[] |
no_license
|
ivartz/vid2fft
|
0a25d853e178b43fd0a5f765934887963f5c37f9
|
1b6ec82de04f86819ab4c1056d4f9d9bde1ed9c8
|
refs/heads/master
| 2020-08-07T21:44:28.745553
| 2019-10-08T09:18:41
| 2019-10-08T09:18:41
| 213,594,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,963
|
py
|
#/******************************************************************************
#
# Copyright (c) 2018 Antillia.com TOSHIYUKI ARAI. ALL RIGHTS RESERVED.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#******************************************************************************/
# 2018/09/20
# ZThreadedTCPServer.py
# encoding utf-8
# Simple TCPServer example to accept single TCPClien
# See https://docs.python.org/3/library/socketserver.html
# See also: https://gist.github.com/arthurafarias/7258a2b83433dfda013f1954aaecd50a#file-server-py
import os
import sys
import time
import socketserver
import threading
import traceback
from SOL4Py.ZSingleton import *
##
# Simple TCPServer thread class, which handles a stream request from a TCP client.
#
class ZThreadedTCPServer(threading.Thread, ZSingleton):
#---------------------------------------------------------
# Inner class starts.
# Define your subclass derived from StreanRequestHandler
class _TCPRequestHandler(socketserver.StreamRequestHandler):
# Define your own handle method if needed.
def handle(self):
print(self.__class__.__name__ + self.handle.__name__ + " start")
print("Curent thread name:{}".format(threading.current_thread().name))
try:
while True:
print("Curent thread name:{}".format(threading.current_thread().name))
bytes = self.rfile.readline().strip()
if len(bytes) == 0:
print("breaking handle loop")
break
ZSingleton.get_instance().request_handle_callback(bytes, self.wfile)
self.request.close()
except:
traceback.print_exc()
# Inner class ends.
##
#
# Constructor
def __init__(self, ipaddress, port, request_handler_class = None):
super(ZThreadedTCPServer, self).__init__()
print(self.__class__.__name__ + "::" + self.run.__name__ + " start")
ZSingleton.set_instance(self)
print("IPAddress:{} Port:{}".format(ipaddress, port))
self.server_address = (ipaddress, port)
if request_handler_class == None:
# Register the default request handler class: self._TCPRequestHandler.
self.sock_server = socketserver.TCPServer(self.server_address, self._TCPRequestHandler)
else:
self.sock_server = socketserver.TCPServer(self.server_address, request_handler_class)
self.sock_server.allow_reuse_address = True
# Please redefine your own method 'request_handle_callback' in a subclass derived from this class.
def request_handle_callback(self, bytes, writer):
text = bytes.decode("utf-8")
import datetime
now = datetime.datetime.now()
print("Recieved at {} data :{}".format(now, text))
reply = "OK"
breply = reply.encode("utf-8")
writer.write(breply)
# Thread main procedure.
def run(self):
print(self.__class__.__name__ + "::" + self.run.__name__ + " start")
if self.sock_server != None:
self.sock_server.serve_forever()
print(self.__class__.__name__ + "::" + self.run.__name__ + " end")
# Shdown and close server_socket.
def close(self):
if self.sock_server != None:
self.sock_server.shutdown()
print("sock_server shutdown")
self.sock_server.server_close()
print("sock_server close")
|
[
"djloek@gmail.com"
] |
djloek@gmail.com
|
84c806a6c6711ceb7dc060bcec0926b8246fdadb
|
e0980f704a573894350e285f66f4cf390837238e
|
/.history/rocketman/settings/production_20210104181634.py
|
89446f098f49d16a5372b9f81e7bc516ac235f9c
|
[] |
no_license
|
rucpata/WagtailWebsite
|
28008474ec779d12ef43bceb61827168274a8b61
|
5aa44f51592f49c9a708fc5515ad877c6a29dfd9
|
refs/heads/main
| 2023-02-09T15:30:02.133415
| 2021-01-05T14:55:45
| 2021-01-05T14:55:45
| 303,961,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
import os
from .base import *
DEBUG = False
SECRET_KEY = '$^8&x#8a5!7@r!#6ov9bfl(j8k^6+$v-1x+*#!uqf(=^n+*$w3'
ALLOWED_HOSTS = ['localhost', 'rocketman.naukawagtail.com', '*']
cwd=os.getcwd()
CASHES = {
'default': {
'BA'
}
}
try:
from .local import *
except ImportError:
pass
|
[
"rucinska.patrycja@gmail.com"
] |
rucinska.patrycja@gmail.com
|
2c197d376b5580c493f3dddf7bdbd0b7cfbe9d98
|
7b12eb45c1ea76ad9c186b858b5dfebf2c5b862a
|
/.history/DEBER_20210905000450.py
|
c9d0c0d9993b0f24c7ecbb3cf98c786e2d4f0c05
|
[
"MIT"
] |
permissive
|
Alopezm5/PROYECTO-PARTE-1
|
a1dce04009b24852c1c60e69bdf602ad3af0574b
|
bd7a8594edf08d41c6ca544cf6bac01ea4fcb684
|
refs/heads/main
| 2023-07-25T11:22:17.994770
| 2021-09-07T03:27:34
| 2021-09-07T03:27:34
| 403,670,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,825
|
py
|
import os
class Empresa():
def __init__(self,nom="",ruc=0,dire="",tele=0,ciud="",tipEmpr=""):
self.nombre=nom
self.ruc=ruc
self.direccion=dire
self.telefono=tele
self.ciudad=ciud
self.tipoEmpresa=tipEmpr
def datosEmpresa(self):#3
self.nombre=input("Ingresar nombre de la empresa: ")
self.ruc=int(input("Ingresar ruc de la empresa: "))
self.direccion=input("Ingresar la direccion de la empresa: ")
self.telefono=int(input("Ingresar el numero de telefono de la empresa: "))
self.ciudad=input("Ingresar ciudad donde esta la empresa: ")
self.tipoEmpresa=input("Ingresar tipo de empresa publica o privada: ")
def mostrarEmpresa(self):
print("")
print("Empresa")
print("La empresa de nombre {}\n De RUC #{} \n Está ubicada en {}\n Se puede comunicar al #{}\n Está empresa esta en la ciudad de {}\n Es una entidad {}".format(self.nombre,self.ruc,self.direccion, self.telefono,self.ciudad, self.tipoEmpresa))
class Empleado(Empresa):
def __init__(self,nom="",cedu=0,dire="",tele=0,email="",estado="",profe=""):
self.nombre=nom
self.cedula=cedu
self.direccion=dire
self.telefono=tele
self.correo=email
self.estadocivil=estado
self.profesion=profe
def empleado(self):
self.nombre=input("Ingresar nombre del empleado: ")
self.cedula=int(input("Ingresar numero de cedula del empleado: "))
self.direccion=input("Ingresar la direccion del empleado: ")
self.telefono=int(input("Ingresar numero de contacto del empleado: "))
self.correo=input("Ingresar correo personal del empleado: ")
def empleadoObrero(self):
self.estadocivil=input("Ingresar estado civil del empleado: ")
def empleadoOficina(self):
self.profesion=input("Ingresar profesion del empleado: ")
def mostrarempleado(self):
print("El empleado: {} con # de C.I. {} \n Con direccion {}, y numero de contacto{}\n Y correo {}".format(self.nombre,self.cedula,self.direccion,self.telefono,self.correo))
class Departamento(Empleado):
def __init__(self,dep=""):
self.departamento=dep
def departa(self):
self.departamento=input("Ingresar el departamento al que pertenece el empleado: ")
def mostrarDeparta(self):
print("El empleado pertenece al departamento de: {}".format(self.departamento))
class Pagos(Empleado):
def __init__(self, desper=0,valhora=0,hotraba=0,extra=0,suel=0,hrecar=0,hextra=0,pres=0,mcou=0,valho=0,sobtiem=0,comofi=0,antobre=0,iemple=0,cuopres=0,tot=0,liquid=0,cuota=0,anti=0,comi=0,fNomina="",fIngreso="",iess=0):
self.permisos=desper
self.valorhora=valhora
self.horastrabajadas=hotraba
self.valextra=extra
self.sueldo= suel
self.horasRecargo= hrecar
self.horasExtraordinarias=hextra
self.prestamo= pres
self.mesCuota= mcou
self.valor_hora= valho
self.sobretiempo=sobtiem
self.comEmpOficina = comofi
self.antiEmpObrero = antobre
self.iessEmpleado = iemple
self.cuotaPrestamo=cuopres
self.totdes = tot
self.liquidoRecibir = liquid
self.mesCuota=cuota
self.antiguedad=anti
self.comision=comi
self.fechaNomina=fNomina
self.fechaIngreso=fIngreso
self.iess=iess
def pagoNormal(self):
self.sueldo=float(input("Ingresar sueldo del trabajador: $ "))
self.prestamo=float(input("Ingresar monto del prestamo que ha generado el empleado: $ "))
self.mesCuota=int(input("Ingresar meses a diferir el prestamo: "))
self.comision=float(input("Ingresar valor de la comsion: "))
self.antiguedad=int(input("Ingresar antiguedad: "))
self.iess=float(input("Ingresar valor del iees recordar que debe ser porcentuado Ejemplo si quiere decir 20% debe ingresar 0.20"))
def pagoExtra(self):
self.horasRecargo=int(input("Ingresar horas de recargo: "))
self.horasExtraordinarias=int(input("Ingresar horas extraordinarias: "))
self.fechaNomina=float(input("Ingresar fecha de nomida (formato año-mes-dia): "))
self.fechaIngreso=float(input("Ingresar fecha de ingreso (formato año-mes-dia): "))
def calculoSueldo(self):
self.valor_hora=self.sueldo/240
self.sobretiempo= self.valor_hora * (self.horasRecargo*0.50+self.horasExtraordinarias*2)
self.comEmpOficina = self.comision*self.sueldo
self.antiEmpObrero = self.antiguedad*(self.fechaNomina - self.fechaIngreso)/365*self.sueldo
self.iessEmpleado = self.iess*(self.sueldo+self.sobretiempo)
self.cuotaPrestamo=self.prestamo/self.mesCuota
self.toting = self.sueldo+self.sobretiempo+ self.comEmpOficina + self.antiEmpObrero
self.totdes = self.iessEmpleado + self.prestamo
self.liquidoRecibir = self.toting - self.totdes
def mostrarSueldo(self):
print("SUELDO BASE")
print("El empleado tiene un sueldo de {}".format(self.sueldo))
print("")
print("SOBRETIEMPO")
print("El valor de sobretiempo es de {}, con {} horas extras trabajadas".format(self.sobretiempo,self.horasExtraordinarias))
print("")
print("PRESTAMO")
print("El valor de prestamo es de {}, a ser pagado en {} meses, con cuotas de {}".format(self.p))
emp=Empresa()
emp.datosEmpresa()
os.system ("cls")
emple=Empleado()
emple.empleado()
os.system ("cls")
emple.empleadoObrero()
emple.empleadoOficina()
os.system ("cls")
depa=Departamento()
depa.departa()
pag=Pagos()
pag.pagoNormal()
pag.pagoExtra()
pag.calculoSueldo()
os.system ("cls")
emp.mostrarEmpresa()
print("")
emple.mostrarempleado()
print("")
pag.mostrarSueldo()
|
[
"85761855+Alopezm5@users.noreply.github.com"
] |
85761855+Alopezm5@users.noreply.github.com
|
159e1bbb69f50777b2ba294e3298a272b72dcb2a
|
7f4c82f7eb8d2805e378586f14e214cdaacfdb4a
|
/books/model/CommentList.py
|
4c2ece791575dd51f9a9ea502c5e8bd24457084a
|
[
"MIT"
] |
permissive
|
deepubansal/books-python-wrappers
|
5a922267ec8382b3542638d894c96f4891b57bf5
|
51210c8d557a32564f976a56214d3c0807f46a90
|
refs/heads/master
| 2022-12-05T11:25:01.694021
| 2020-08-29T07:35:23
| 2020-08-29T07:35:23
| 288,738,813
| 0
| 0
|
MIT
| 2020-08-29T07:35:24
| 2020-08-19T13:26:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,042
|
py
|
#$Id$
from books.model.PageContext import PageContext
class CommentList:
"""This class is used to create object for comments."""
def __init__(self):
"""Initialize parameters for Comments list."""
self.comments = []
self.page_context = PageContext()
def set_comments(self, comment):
"""Set comments.
Args:
comment(instance): Comment object.
"""
self.comments.append(comment)
def get_comments(self):
"""Get comments.
Returns:
list: List of comments object.
"""
return self.comments
def set_page_context(self, page_context):
"""Set page context.
Args:
page_context(instance): Page context object.
"""
self.page_context = page_context
def get_page_context(self):
"""Get page context.
Returns:
instance: Page context object.
"""
return self.page_context
|
[
"sahaya.ramesh@zohocorp.com"
] |
sahaya.ramesh@zohocorp.com
|
aee868eb2597469429538bbd075d10a018a753ac
|
6fe2d3c27c4cb498b7ad6d9411cc8fa69f4a38f8
|
/algorithms/algorithms-python/leetcode_medium/Question_111_Combination_Sum_III.py
|
96ac21f6e928162be84fa4ea48977d9e38d1fd35
|
[] |
no_license
|
Lanceolata/code
|
aae54af632a212c878ce45b11dab919bba55bcb3
|
f7d5a7de27c3cc8a7a4abf63eab9ff9b21d512fb
|
refs/heads/master
| 2022-09-01T04:26:56.190829
| 2021-07-29T05:14:40
| 2021-07-29T05:14:40
| 87,202,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
#!/usr/bin/python
# coding: utf-8
class Solution(object):
def combinationSum3(self, k, n):
"""
:type k: int
:type n: int
:rtype: List[List[int]]
"""
vec = []
res = []
self.helper(k, n, 1, vec, res)
return res
def helper(self, k, n, l, vec, res):
if n < 0:
return
if n == 0 and len(vec) == k:
res.append(vec[:])
return
for i in range(l, 10):
vec.append(i)
self.helper(k, n - i, i + 1, vec, res)
vec.pop()
|
[
"lanceolatayuan@gmail.com"
] |
lanceolatayuan@gmail.com
|
d7e694d8b7e339f353fe621aef7be75b1bd0d979
|
9a1b033774e371bd6442048f43e862dfb71abed7
|
/Lists As Stacks And Queues/Exercises/Cups_and_Bottles.py
|
57a94258fc2f957d4054343b06ab2bb9d026c989
|
[] |
no_license
|
mialskywalker/PythonAdvanced
|
ea4fde32ba201f6999cd0d59d1a95f00fb5f674b
|
c74ad063154c94b247aaf73b7104df9c6033b1a5
|
refs/heads/master
| 2023-03-09T00:13:28.471328
| 2021-02-24T15:21:11
| 2021-02-24T15:21:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 727
|
py
|
from collections import deque
cups_capacity = deque(int(el) for el in input().split())
bottles_capacity = [int(el) for el in input().split()]
wasted_water = 0
while True:
if not cups_capacity or not bottles_capacity:
break
bottle = bottles_capacity.pop()
cup = cups_capacity.popleft()
total = bottle - cup
if total >= 0:
wasted_water += total
elif total < 0:
cups_capacity.appendleft(abs(total))
if not cups_capacity:
print(f"Bottles: {' '.join(map(str, bottles_capacity))}")
print(f"Wasted litters of water: {wasted_water}")
elif not bottles_capacity:
print(f"Cups: {' '.join(map(str, cups_capacity))}")
print(f"Wasted litters of water: {wasted_water}")
|
[
"kalqga123@gmail.com"
] |
kalqga123@gmail.com
|
da07c9bf4e4dfa6fedec67e45efc284753925f26
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02952/s489061530.py
|
c11b8cfc760ea2a0ea44bc3ca92b0888dbd71b04
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
s=str(input())
n=len(s)
s=int(s)
if n==1:
print(s)
elif n==2:
print(9)
elif n==3:
print(10+s-100)
elif n==4:
print(909)
elif n==5:
print(910+s-10000)
else:
print(90909)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
fa20d3ae1f8e6295713b6a8f217a871b4d843616
|
b6f4393777d4f6a3a8b599700ce3405be76c4bc4
|
/Apple-Music/Leticia/api/models.py
|
eb394de811af6473a82f9e7f5f7aa8d11e8e4c24
|
[] |
no_license
|
azatnt/Apple-Music-rest_framework-
|
b13897dd40337384469df269cdf46bd085487442
|
09b7e602078a6d82f63725b757bb657afd221776
|
refs/heads/main
| 2023-02-10T16:45:47.618860
| 2021-01-14T14:37:25
| 2021-01-14T14:37:25
| 326,934,387
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
from django.db import models
import string
import random
def generate_unique_code():
length = 6
while True:
code = ''.join(random.choices(string.ascii_uppercase, k=length))
if Room.objects.filter(code=code).count() == 0:
break
return code
class Room(models.Model):
code = models.CharField(
max_length=8, default=generate_unique_code, unique=True)
host = models.CharField(max_length=50, unique=True)
guest_can_pause = models.BooleanField(default=False, null=False)
votes_to_skip = models.IntegerField(null=False, default=2)
created_at = models.DateTimeField(auto_now_add=True)
current_song = models.CharField(max_length=50, null=True)
def __str__(self):
return self.code
|
[
"58590243+pr1nce07@users.noreply.github.com"
] |
58590243+pr1nce07@users.noreply.github.com
|
4226d913c82fc3fd3d68a44df6697fe697b6cc5c
|
ca5b5c217e0053645c2664d777699e9a5050715e
|
/tex/gen_links.py
|
2a733f8bffe856d7ac4c2dffecd46daa7733bfae
|
[
"MIT"
] |
permissive
|
rodluger/starrynight
|
1405ffdb5a0dd0fefc0ae34e7cdaf7eab4735356
|
d3f015e466621189cb271d4d18b538430b14a557
|
refs/heads/master
| 2021-10-26T03:32:15.220725
| 2021-10-22T15:16:48
| 2021-10-22T15:16:48
| 236,542,672
| 7
| 1
|
MIT
| 2020-06-03T19:51:10
| 2020-01-27T16:58:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 881
|
py
|
from __future__ import print_function
import subprocess
import os
# Generate the github links
hash = subprocess.check_output(["git", "rev-parse", "HEAD"]).decode("utf-8")[:-1]
slug = "rodluger/starrynight"
with open("gitlinks.tex", "w") as f:
print(
r"\newcommand{\codelink}[1]{\href{https://github.com/%s/blob/%s/tex/figures/#1.py}{\codeicon}\,\,}"
% (slug, hash),
file=f,
)
print(
r"\newcommand{\animlink}[1]{\href{https://github.com/%s/blob/%s/tex/figures/#1.gif}{\animicon}\,\,}"
% (slug, hash),
file=f,
)
print(
r"\newcommand{\prooflink}[1]{\href{https://github.com/%s/blob/%s/tex/proofs/#1.ipynb}{\raisebox{-0.1em}{\prooficon}}}"
% (slug, hash),
file=f,
)
print(
r"\newcommand{\cilink}[1]{\href{https://dev.azure.com/%s/_build}{#1}}" % (slug),
file=f,
)
|
[
"rodluger@gmail.com"
] |
rodluger@gmail.com
|
a40845fe784984a2a2ef36f79556424959d0fcd3
|
5689bffe9a9594e52f934542994db464ed095d71
|
/08_unittest/test05_assert_exercises.py
|
1d6a1bd6fa8d1033455a67c98cde5e33428fe349
|
[] |
no_license
|
WenhaoChen0907/Web-automation
|
5c0e2c61e247f32b0e5f2f2a33c9f8cc6e73dc20
|
5488f2c62016f02c934b709e7e9e6ea831d9891c
|
refs/heads/master
| 2023-03-07T13:31:10.265019
| 2021-02-15T06:33:50
| 2021-02-15T06:33:50
| 338,986,720
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,618
|
py
|
# iwebshop正向登录代码练习
import unittest
import sys
import time
from time import sleep
from selenium import webdriver
class IwebLogin(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.maximize_window()
self.driver.implicitly_wait(30)
self.driver.get("http://localhost/iwebshop/")
def tearDown(self):
sleep(2)
self.driver.quit()
def testLogin(self):
driver = self.driver
driver.find_element_by_link_text("登录").click()
driver.find_element_by_css_selector("input[alt*='邮箱']").send_keys("admin")
driver.find_element_by_css_selector("input[alt*='密码']").send_keys("123456")
driver.find_element_by_css_selector(".submit_login").click()
sleep(3)
# 获取登陆信息
text = driver.find_element_by_css_selector(".loginfo").text
# 断言
try:
self.assertIn("admin", text)
except AssertionError:
# driver.get_screenshot_as_file("../images/img2.jpg")
# 图片名称添加动态时间-加时间戳的写法,-推荐
now = time.strftime("%Y_%m_%d %H_%M_%S")
# 图片名称添加断言错误日志
rep = sys.exc_info()[1]
driver.get_screenshot_as_file("../images/%s--%s.jpg" % (now, rep))
# 抛出异常
raise AssertionError
sleep(3)
driver.find_element_by_css_selector(".reg").click()
if __name__ == '__main__':
# 调用main方法执行unitetest内所有test开头方法
unittest.main()
|
[
"18738127274@163.com"
] |
18738127274@163.com
|
61b227fb19c0098e0d8449df91b59cc77ac3049d
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_142/662.py
|
e253a45877c6e9142d258233d25715ca05f57e07
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,452
|
py
|
if __name__ == "__main__":
with open("A-small-attempt1.in", 'r') as inputf:
outputf=open("A_out.out",'w')
line=inputf.readline()
line=line.rstrip('\n')
test_num=int(line)
for test in range(test_num):
line = inputf.readline()
line = line.rstrip('\n')
n = int(line)
analysis = [[[[]]],[[[]]]]
j = [0, 0]
for i in range(n):
line = inputf.readline()
line = line.rstrip('\n')
temp = line[0]
analysis[i][0][0]=temp
count = 0
char_c = len(line)
for char in line:
if char == temp:
count = count + 1
else:
analysis[i][j[i]].append(count)
temp = char
j[i] = j[i]+1
count = 1
analysis[i].append([temp])
char_c = char_c-1
if char_c == 0:
analysis[i][j[i]].append(count)
change = 0
pos = True
if j[0]!=j[1]:
result = "Case #%d: Fegla Won"%(test+1)
outputf.write(result)
pos = False
else:
for k in range(j[0]+1):
if analysis[0][k][0] != analysis[1][k][0]:
result = "Case #%d: Fegla Won"%(test+1)
outputf.write(result)
pos = False
break
else:
if analysis[0][k][1] > analysis[1][k][1]:
change = change + analysis[0][k][1] - analysis[1][k][1]
else:
change = change - analysis[0][k][1] + analysis[1][k][1]
if pos == True:
result = "Case #%d: %d" %(test+1, change)
outputf.write(result)
if test != test_num - 1:
outputf.write('\n')
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
9706c26c8869c0333343a0dae2cbbd2467b37e93
|
94a2c4417c1fdd8577a75b09a17912ebae129e6c
|
/test/test_prop_is.py
|
e211fe127da287bfb4f0504b7a588929a7f6c795
|
[
"MIT"
] |
permissive
|
slavaGanzin/ramda.py
|
ad88a3cf6e7eb1461d4a09aad35ae1c18ca32db8
|
634bfbe0dcb300315ded327756cb3e33241589b8
|
refs/heads/master
| 2023-01-23T04:43:48.485314
| 2023-01-06T10:11:53
| 2023-01-06T10:11:53
| 142,413,822
| 68
| 7
|
MIT
| 2021-12-22T13:59:56
| 2018-07-26T08:43:31
|
Python
|
UTF-8
|
Python
| false
| false
| 278
|
py
|
from ramda import *
from ramda.private.asserts import *
from numbers import Number
def test_prop_is():
assert_equal(prop_is(Number, "x", {"x": 1, "y": 2}), True)
assert_equal(prop_is(Number, "x", {"x": "foo"}), False)
assert_equal(prop_is(Number, "x", {}), False)
|
[
"slava.ganzin@gmail.com"
] |
slava.ganzin@gmail.com
|
326f720d3f00ce6fea68425c9d1ebfbc2906b8df
|
9de9bcd87e3f15f743de436d669feb979e55f005
|
/timesketch/lib/analyzers/ssh_sessionizer_test.py
|
a432041fce5b9ee04a020c5228287f633afbcdc1
|
[
"Apache-2.0"
] |
permissive
|
jorlamd/timesketch
|
97b1f08e9797837672a51bc817426ae61f5fb529
|
c7704bede82747d42a8579a264d2b385b93d6dee
|
refs/heads/master
| 2020-12-04T02:54:57.496194
| 2019-11-12T21:07:21
| 2019-11-12T21:07:21
| 230,008,261
| 0
| 0
|
Apache-2.0
| 2019-12-24T22:09:17
| 2019-12-24T22:09:16
| null |
UTF-8
|
Python
| false
| false
| 6,551
|
py
|
"""Tests for SSHSessionizerSketchPlugin"""
from __future__ import unicode_literals
import mock
from timesketch.lib.analyzers.ssh_sessionizer import SSHSessionizerSketchPlugin
from timesketch.lib.testlib import BaseTest
from timesketch.lib.testlib import MockDataStore
# TODO _create_mock_event will be renamed in another pull request. It's name
# should be also changed here.
from timesketch.lib.analyzers.sequence_sessionizer_test \
import _create_mock_event
# Message attributes for events that represent one mock SSH session.
one_ssh_session_args = [{
'message':
'[sshd] [1]: Connection from 1.1.1.1 port 1 on 1.1.1.1 port 1'
}, {
'message': '[sshd] [1]: Accepted certificate ID'
}]
# Message attributes for events that represent two mock SSH sessions.
many_ssh_session_args = [{
'message':
'[sshd] [1]: Connection from 1.1.1.1 port 1 on 1.1.1.1 port 1'
}, {
'message': '[sshd] [1]: Accepted certificate ID'
}, {
'message':
'[sshd] [2]: Connection from 2.2.2.2 port 2 on 2.2.2.2 port 2'
}, {
'message': '[sshd] [2]: Accepted certificate ID'
}]
# Message attributes for a SSH event that is not a connection SSH event
no_ssh_session_args = [{
'message': '[sshd] [0]: Loaded keys'
}]
class TestSSHSessionizerPlugin(BaseTest):
"""Tests the functionality of the ssh sessionizing sketch analyzer."""
@mock.patch('timesketch.lib.analyzers.interface.ElasticsearchDataStore',
MockDataStore)
def test_sessionizer(self):
"""Test basic ssh sessionizer functionality."""
index = 'test_index'
sketch_id = 1
sessionizer = SSHSessionizerSketchPlugin(index, sketch_id)
self.assertIsInstance(sessionizer, SSHSessionizerSketchPlugin)
self.assertEqual(index, sessionizer.index_name)
self.assertEqual(sketch_id, sessionizer.sketch.id)
@mock.patch('timesketch.lib.analyzers.interface.ElasticsearchDataStore',
MockDataStore)
def test_session_starts_with_connection_event(self):
"""Test a session is created if it starts with SSH connection event."""
index = 'test_index'
sketch_id = 1
sessionizer = SSHSessionizerSketchPlugin(index, sketch_id)
sessionizer.datastore.client = mock.Mock()
datastore = sessionizer.datastore
_create_mock_event(datastore, 0, 1, one_ssh_session_args)
message = sessionizer.run()
self.assertEqual(
message,
'Sessionizing completed, number of ssh_session sessions created: 1'
)
session_id = '1.1.1.1_1'
#pylint: disable=unexpected-keyword-arg
event = datastore.get_event('test_index', '0', stored_events=True)
self.assertEqual(event['_source']['session_id']['ssh_session'],
session_id)
@mock.patch('timesketch.lib.analyzers.interface.ElasticsearchDataStore',
MockDataStore)
def test_all_events_from_session_are_labeled(self):
"""Test one SSH session of events is finded and allocated correctly."""
index = 'test_index'
sketch_id = 1
sessionizer = SSHSessionizerSketchPlugin(index, sketch_id)
sessionizer.datastore.client = mock.Mock()
datastore = sessionizer.datastore
_create_mock_event(datastore, 0, 2, one_ssh_session_args, [1])
message = sessionizer.run()
self.assertEqual(
message,
'Sessionizing completed, number of ssh_session sessions created: 1'
)
session_id = '1.1.1.1_1'
#pylint: disable=unexpected-keyword-arg
event = datastore.get_event('test_index', '0', stored_events=True)
self.assertEqual(event['_source']['session_id']['ssh_session'],
session_id)
event = datastore.get_event('test_index', '101', stored_events=True)
self.assertEqual(event['_source']['session_id']['ssh_session'],
session_id)
@mock.patch('timesketch.lib.analyzers.interface.ElasticsearchDataStore',
MockDataStore)
def test_session_doesnt_start_with_no_connection_event(self):
"""Test a session is not created if it doesn't start with SSH connection
event."""
index = 'test_index'
sketch_id = 1
sessionizer = SSHSessionizerSketchPlugin(index, sketch_id)
sessionizer.datastore.client = mock.Mock()
datastore = sessionizer.datastore
_create_mock_event(datastore, 0, 1, no_ssh_session_args)
message = sessionizer.run()
self.assertEqual(
message,
'Sessionizing completed, number of ssh_session sessions created: 0'
)
#pylint: disable=unexpected-keyword-arg
event = datastore.get_event('test_index', '0', stored_events=True)
self.assertNotIn('session_id', event['_source'])
@mock.patch('timesketch.lib.analyzers.interface.ElasticsearchDataStore',
MockDataStore)
def test_multiple_sessions(self):
"""Test multiple sessions are found and allocated correctly."""
index = 'test_index'
sketch_id = 1
sessionizer = SSHSessionizerSketchPlugin(index, sketch_id)
sessionizer.datastore.client = mock.Mock()
datastore = sessionizer.datastore
_create_mock_event(datastore,
0,
4,
many_ssh_session_args,
time_diffs=[1, 1, 1])
message = sessionizer.run()
self.assertEqual(
message,
'Sessionizing completed, number of ssh_session sessions created: 2'
)
session_id_1 = '1.1.1.1_1'
session_id_2 = '2.2.2.2_2'
#pylint: disable=unexpected-keyword-arg
event = datastore.get_event('test_index', '0', stored_events=True)
self.assertEqual(event['_source']['session_id']['ssh_session'],
session_id_1)
event = datastore.get_event('test_index', '101', stored_events=True)
self.assertEqual(event['_source']['session_id']['ssh_session'],
session_id_1)
event = datastore.get_event('test_index', '202', stored_events=True)
self.assertEqual(event['_source']['session_id']['ssh_session'],
session_id_2)
event = datastore.get_event('test_index', '303', stored_events=True)
self.assertEqual(event['_source']['session_id']['ssh_session'],
session_id_2)
|
[
"tomchop@gmail.com"
] |
tomchop@gmail.com
|
b3eecc48b5a6655fb0ae16960cff65aa207ed89d
|
a6ef13387c24c719a0dcfeb173521cd70beac282
|
/devops/day4/ding_Robot.py
|
8f01ac1bc53391322f4ad3edd35ab0fd70672935
|
[] |
no_license
|
youjiahe/python
|
f60472d61daf58b7f5bb6aa557949de4babf8c9c
|
74eb4c5ba211ae5ffed2040576e5eead75d16e7d
|
refs/heads/master
| 2020-03-31T02:35:55.787809
| 2019-12-02T16:32:54
| 2019-12-02T16:32:54
| 151,831,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 744
|
py
|
#!/usr/bin/env python3
import json
import requests
import sys
def send_msg(url,remiders,msg):
headers = {'Content-Type':'application/json;charset=utf-8'}
data={
"msgtype": "text", # 发送消息类型为文本
"at": {
"atMobiles": reminders,
"isAtAll": False, # 不@所有人
},
"text": {
"content": msg, # 消息正文
}
}
r = requests.post(url,data=json.dumps(data),headers=headers)
return r.text
if __name__ == '__main__':
msg = sys.argv[1]
reminders= ['13676240551']
url = 'https://oapi.dingtalk.com/robot/send?access_token=47f4ae71f59ee1624cf30a4f6a4641fac15478aeec406c7f952556906096d790'
print(send_msg(url,reminders,msg))
|
[
"youjiahe@163.com"
] |
youjiahe@163.com
|
15fcc498298fb27365a93e3595794528564152ce
|
9a2fd5e27d3f811cb18763ed388c2d56ae9907b6
|
/爬虫练习/gupiao.py
|
ee4595015acd484f424596fda32dc78170398d30
|
[] |
no_license
|
wzc-ob/PycharmProjects
|
5297ce60bade883495e5dbdb614131d31c47682e
|
09f5ad6004dbdc83d456cabd78b769fde13d5357
|
refs/heads/master
| 2020-05-05T07:12:38.789400
| 2019-04-06T10:06:08
| 2019-04-06T10:06:08
| 179,817,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,007
|
py
|
import re
import requests
from bs4 import BeautifulSoup
import traceback
def getHTMLText(url,code = 'UTF-8'):
try:
kv = {'user-agent': 'Mozilla/5.0'}
r = requests.get(url, headers=kv, timeout=30)
r.encoding = code
# print(r.text)
return r.text
except:
return ""
def getStockList(lst,stockURL):
html = getHTMLText(stockURL,'GB2312')
soup = BeautifulSoup(html,'html.parser')
a = soup.find_all('a')
for i in a:
try:
href = i.attrs['href']
lst.append(re.findall(r'[s][hz]\d{6}',href)[0])
except:
continue
def getStockInfo(lst,stockURL,fpath):
count = 0
for stock in lst:
url = stockURL +stock +".html"
print(url)
html = getHTMLText(url)
try:
if html =='':
continue
infoDict = {}
soup = BeautifulSoup(html,'html.parser')
stockInfo = soup.find('div',attrs={'class':'stock-bets'})
name = stockInfo.find_all(attrs = {'class':'bets-name'})[0]
print(name.text.split()[0])
infoDict.update({'股票名称':name.text.split()[0]})
keyList = stockInfo.find_all('dt')
valueList = stockInfo.find_all('dd')
for i in range(len(keyList)):
key = keyList[i].text
val = valueList[i].text
infoDict[key] = val
with open(fpath,'a',encoding='UTF-8') as f:
f.write(str(infoDict) +'\n')
count = count+1
print('\r当前进度:{:.2f}%'.format(count*100/len(lst)),end='')
except:
traceback.print_exc()
continue
def main():
stock_list_url = 'http://quote.eastmoney.com/stocklist.html'
stock_info_url = 'https://gupiao.baidu.com/stock/'
output_file = 'E://BaiduStockInfo(1).txt'
slist = []
getStockList(slist,stock_list_url)
getStockInfo(slist,stock_info_url,output_file)
main()
|
[
"43775612+wzc-ob@users.noreply.github.com"
] |
43775612+wzc-ob@users.noreply.github.com
|
edcb1a2c177f6634d25b679f32eaa3d10997b8ca
|
b6aed63c49d24b4c3e2d5be6795ecbcf0a793653
|
/examples/feature_engineering/get_scdv.py
|
13cd8123885cea7a8d6159052e017ea37f9643c2
|
[] |
no_license
|
sidhee-hande/nlp-recipes-ja
|
713f053a3cc907a314c6575a0ce65de2b36076c9
|
8ac5e898864137841de8b03c11da34815009af24
|
refs/heads/master
| 2023-04-25T03:41:33.536244
| 2021-04-10T23:07:45
| 2021-04-10T23:07:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
from konoha import WordTokenizer
import neologdn
import numpy as np
from utils_nlp.dataset.livedoor import load_pandas_df
from utils_nlp.features import scdv
from utils_nlp.models.pretrained_embeddings.word2vec import load_pretrained_vectors
if __name__ == '__main__':
df = load_pandas_df(nrows=10)
# Normalization
df['text'] = df['text'].apply(neologdn.normalize)
tokenizer = WordTokenizer('MeCab')
docs = np.array([
map(str, tokenizer.tokenize(text)) for text in df['text']
])
print(docs.shape)
# (10,)
word_vec = load_pretrained_vectors('data')
scdv = scdv.create(docs, word_vec, n_components=10)
print(scdv.shape)
# (10, 3000)
|
[
"upura0@gmail.com"
] |
upura0@gmail.com
|
71bf52c3f75e834fe7938987cc7b559aa46b54db
|
ab0e9b543852bc2d3c828b2351c30d1626f0b321
|
/CustomProceduralRiggingTool/CustomProceduralRigTool/rigLib/base/controlShape/unitSliderControl.py
|
f55103622c28f26d51caf910f83abbbaf7302f2a
|
[] |
no_license
|
tHeBeStXu/CustomProceduralRigTool
|
397011b9519a3e5382aec5aee6115f3e6a14a802
|
003fa61b460d8e76c026f47913ebdab5c0cbfef8
|
refs/heads/master
| 2021-07-13T09:02:07.697909
| 2020-07-09T07:28:27
| 2020-07-09T07:28:27
| 157,082,564
| 15
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
import maya.cmds as cmds
def createShape(prefix=''):
"""
create a unit slider for blend operation
:param prefix: str, prefix of the control
:param scale: float, scale of the control
:return: str, ctrlBox of the unitSliderControl
"""
Ctrl = cmds.circle(radius=0.2, nr=(1, 0, 0), n=prefix + '_Ctrl')[0]
cmds.transformLimits(Ctrl, tx=(0, 0), ty=(0, 1), tz=(0, 0), etx=(1, 1), ety=(1, 1), etz=(1, 1))
CtrlBox = cmds.curve(d=1, p=[(0, 0, 0), (0, 1, 0)], k=[0, 1], n=prefix + '_CtrlBox')
parentCrvShape = cmds.listRelatives(CtrlBox, s=1)
cmds.setAttr(parentCrvShape[0] + '.template', 1)
cmds.parent(Ctrl, CtrlBox)
cmds.makeIdentity(CtrlBox, apply=1, t=1, r=1, s=1, n=0)
cmds.select(cl=1)
return CtrlBox
|
[
"328665042@qq.com"
] |
328665042@qq.com
|
249edc0e5fb7c5fae23b6d8c5752ffa60b404a5b
|
60aa3bcf5ace0282210685e74ee8ed31debe1769
|
/base/lib/uu.py
|
6ee9f9acad9bccce569ad6152e8da80b8e368319
|
[] |
no_license
|
TheBreadGuy/sims4-ai-engine
|
42afc79b8c02527353cc084117a4b8da900ebdb4
|
865212e841c716dc4364e0dba286f02af8d716e8
|
refs/heads/master
| 2023-03-16T00:57:45.672706
| 2016-05-01T17:26:01
| 2016-05-01T17:26:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,505
|
py
|
import binascii
import os
import sys
__all__ = ['Error', 'encode', 'decode']
class Error(Exception):
__qualname__ = 'Error'
def encode(in_file, out_file, name=None, mode=None):
opened_files = []
try:
if in_file == '-':
in_file = sys.stdin.buffer
elif isinstance(in_file, str):
if name is None:
name = os.path.basename(in_file)
if mode is None:
try:
mode = os.stat(in_file).st_mode
except AttributeError:
pass
in_file = open(in_file, 'rb')
opened_files.append(in_file)
if out_file == '-':
out_file = sys.stdout.buffer
elif isinstance(out_file, str):
out_file = open(out_file, 'wb')
opened_files.append(out_file)
if name is None:
name = '-'
if mode is None:
mode = 438
out_file.write(('begin %o %s\n' % (mode & 511, name)).encode('ascii'))
data = in_file.read(45)
while len(data) > 0:
out_file.write(binascii.b2a_uu(data))
data = in_file.read(45)
out_file.write(b' \nend\n')
finally:
for f in opened_files:
f.close()
def decode(in_file, out_file=None, mode=None, quiet=False):
opened_files = []
if in_file == '-':
in_file = sys.stdin.buffer
elif isinstance(in_file, str):
in_file = open(in_file, 'rb')
opened_files.append(in_file)
try:
while True:
hdr = in_file.readline()
if not hdr:
raise Error('No valid begin line found in input file')
if not hdr.startswith(b'begin'):
continue
hdrfields = hdr.split(b' ', 2)
if len(hdrfields) == 3 and hdrfields[0] == b'begin':
try:
int(hdrfields[1], 8)
break
except ValueError:
pass
if out_file is None:
out_file = hdrfields[2].rstrip(b' \t\r\n\x0c').decode('ascii')
if os.path.exists(out_file):
raise Error('Cannot overwrite existing file: %s' % out_file)
if mode is None:
mode = int(hdrfields[1], 8)
if out_file == '-':
out_file = sys.stdout.buffer
elif isinstance(out_file, str):
fp = open(out_file, 'wb')
try:
os.path.chmod(out_file, mode)
except AttributeError:
pass
out_file = fp
opened_files.append(out_file)
s = in_file.readline()
while s:
while s.strip(b' \t\r\n\x0c') != b'end':
try:
data = binascii.a2b_uu(s)
except binascii.Error as v:
nbytes = ((s[0] - 32 & 63)*4 + 5)//3
data = binascii.a2b_uu(s[:nbytes])
while not quiet:
sys.stderr.write('Warning: %s\n' % v)
out_file.write(data)
s = in_file.readline()
while not s:
raise Error('Truncated input file')
finally:
for f in opened_files:
f.close()
def test():
import optparse
parser = optparse.OptionParser(usage='usage: %prog [-d] [-t] [input [output]]')
parser.add_option('-d', '--decode', dest='decode', help='Decode (instead of encode)?', default=False, action='store_true')
parser.add_option('-t', '--text', dest='text', help='data is text, encoded format unix-compatible text?', default=False, action='store_true')
(options, args) = parser.parse_args()
if len(args) > 2:
parser.error('incorrect number of arguments')
sys.exit(1)
input = sys.stdin.buffer
output = sys.stdout.buffer
if len(args) > 0:
input = args[0]
if len(args) > 1:
output = args[1]
if options.decode:
if options.text:
if isinstance(output, str):
output = open(output, 'wb')
else:
print(sys.argv[0], ': cannot do -t to stdout')
sys.exit(1)
decode(input, output)
else:
if options.text:
if isinstance(input, str):
input = open(input, 'rb')
else:
print(sys.argv[0], ': cannot do -t from stdin')
sys.exit(1)
encode(input, output)
if __name__ == '__main__':
test()
|
[
"jp@bellgeorge.com"
] |
jp@bellgeorge.com
|
92df5f0ae14e23c0600fd57b407368f340103547
|
4b431704fa58900a7b848aada3d10949be76ba65
|
/student/views.py
|
8ed0983ae38fa181f3a834d1c67585b80d645e7b
|
[] |
no_license
|
priyankaonly1/Session_project
|
1b5e48a77753cfa87c93fff7463d758cf0f1dcd8
|
41529270c0390627824b6de1aed6fdf4bb75a95c
|
refs/heads/main
| 2023-06-03T04:21:38.411008
| 2021-06-17T10:32:13
| 2021-06-17T10:32:13
| 377,792,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 677
|
py
|
from django.shortcuts import render
# Create your views here.
def setsession(request):
request.session['name'] = 'sonam'
request.session['lname'] = 'Jha'
return render(request, 'student/setsession.html')
# def delsession(request):
# if 'name' in request.session:
# del request.session['name']
# return render(request, 'student/delsession.html')
def getsession(request):
name = request.session.get('name')
lname = request.session.get('lname')
return render(request, 'student/getsession.html', {'name':name, 'lname':lname})
def delsession(request):
request.session.flush()
return render(request, 'student/delsession.html')
|
[
"priyankabiswasonly1@gmail.com"
] |
priyankabiswasonly1@gmail.com
|
5f78dc2017f0e9588d5ed2188d02785b189ec637
|
0bb474290e13814c2498c086780da5096453da05
|
/abc133/E/main.py
|
20212d42d80ef6a27ba00b0743cbd41a23b91777
|
[] |
no_license
|
ddtkra/atcoder
|
49b6205bf1bf6a50106b4ae94d2206a324f278e0
|
eb57c144b5c2dbdd4abc432ecd8b1b3386244e30
|
refs/heads/master
| 2022-01-25T15:38:10.415959
| 2020-03-18T09:22:08
| 2020-03-18T09:22:08
| 208,825,724
| 1
| 0
| null | 2022-01-21T20:10:20
| 2019-09-16T14:51:01
|
Python
|
UTF-8
|
Python
| false
| false
| 790
|
py
|
#!/usr/bin/env python3
import sys
MOD = 1000000007 # type: int
def solve(N: int, K: int, a: "List[int]", b: "List[int]"):
return
# Generated by 1.1.4 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
K = int(next(tokens)) # type: int
a = [int()] * (N-1) # type: "List[int]"
b = [int()] * (N-1) # type: "List[int]"
for i in range(N-1):
a[i] = int(next(tokens))
b[i] = int(next(tokens))
solve(N, K, a, b)
if __name__ == '__main__':
main()
|
[
"deritefully@gmail.com"
] |
deritefully@gmail.com
|
ee5638f427e266afc5d5855606c34b7c76ac09b2
|
c68d36ed1d36ede96a5a22e1052c73b8515feaae
|
/HyperNews Portal/task/hypernews/news/views.py
|
3cefca44c6ba79a74ca61d32697ea15338fb602a
|
[] |
no_license
|
wangpengda1210/HyperNews-Portal
|
dd531889666794c11158dc92a9dcdb03293d409b
|
436e257dd315999187650dedf3dce2ff12267a77
|
refs/heads/main
| 2023-03-03T03:22:59.644304
| 2021-02-09T00:19:23
| 2021-02-09T00:19:23
| 336,978,367
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,124
|
py
|
from django.shortcuts import render
from django.views import View
from django.http import Http404, QueryDict
from django.shortcuts import redirect
import datetime
from collections import defaultdict
import json
from hypernews.settings import NEWS_JSON_PATH
with open(NEWS_JSON_PATH, 'r') as f:
news_list = json.load(f)
for news in news_list:
news['created'] = datetime.datetime.strptime(news['created'], '%Y-%m-%d %H:%M:%S')
# Create your views here.
class IndexView(View):
def get(self, request, *args, **kwargs):
return redirect("news/")
class NewsContentView(View):
def get(self, request, link, *args, **kwargs):
for news in news_list:
if int(link) == news['link']:
return render(request, 'news/news_content.html',
context={
'title': news['title'],
'created': news['created'],
'text': news['text']
})
raise Http404
class AllNewsView(View):
def get(self, request, *args, **kwargs):
query_dict = request.GET
keyword = query_dict['q'] if 'q' in query_dict else ''
times = defaultdict()
for news in news_list:
if keyword in news['title']:
times.setdefault(news['created'].date(), []).append(news)
time_dict = [{'created': key, 'value': value} for key, value in times.items()]
return render(request, 'news/news_all.html',
context={'time_dict': time_dict})
class CreateNewsView(View):
def get(self, request, *args, **kwargs):
return render(request, 'news/news_create.html')
def post(self, request, *args, **kwargs):
title = request.POST.get('title')
text = request.POST.get('text')
created = datetime.datetime.now()
news_list.append({'title': title,
'text': text,
'created': created,
'link': len(news_list) + 1})
return redirect('/news/')
|
[
"515484505@qq.com"
] |
515484505@qq.com
|
85fd333d2b6f43110d9c7b7171b122dfcdc0a466
|
e19527d95fb2105a09bc1435146a1148bfe01476
|
/utils/general.py
|
122f37a50fc4e4ba87c3765b807a74616dfeb9fd
|
[] |
no_license
|
shuaih7/ishop_ocr
|
7da1bc8f3f764853d7c0151e784b821cc3d4b58c
|
57e80d336f1362adefeb57a13fa4ca4d2cfd265f
|
refs/heads/main
| 2023-02-22T15:50:36.294246
| 2021-01-28T03:46:36
| 2021-01-28T03:46:36
| 329,258,528
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,469
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on 01.24.2021
Created on 01.24.2021
Author: haoshaui@handaotech.com
'''
import os
import cv2
import sys
import numpy as np
abs_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(abs_path)
def draw_results(image, results, isClosed=True, size=0.6, color=(0,255,0), thickness=3):
font = cv2.FONT_HERSHEY_SIMPLEX
for result in results:
line = np.array(result[0], dtype=np.int32)
pt = (int(line[0][0]), int(line[0][1]))
line = line.reshape((-1,1,2))
image = cv2.polylines(image, [line], isClosed=isClosed, color=color, thickness=thickness)
image = cv2.putText(image, result[1][0], pt, fontFace=font,
fontScale=size, color=color, thickness=max(1,thickness-1))
return image
def draw_polylines(image, polylines, texts=None, isClosed=True, size=0.6, color=(0,255,0), thickness=3):
font = cv2.FONT_HERSHEY_SIMPLEX
polylines = np.array(polylines, dtype=np.int32)#.reshape((-1,1,2))
for i, line in enumerate(polylines):
pt = (int(line[0][0]), int(line[0][1]))
line = line.reshape((-1,1,2))
image = cv2.polylines(image, [line], isClosed=isClosed, color=color, thickness=thickness)
if texts is not None:
image = cv2.putText(image, texts[i], pt, fontFace=font,
fontScale=size, color=color, thickness=max(1,thickness-1))
return image
def draw_texts(image, texts, positions, size=0.6, color=(0,255,0), thickness=3):
font = cv2.FONT_HERSHEY_SIMPLEX
for pos, text in zip(positions, texts):
pt = (int(pos[0]), int(pos[1]))
image = cv2.putText(image, text, pt, fontFace=font, fontScale=size, color=color, thickness=max(1,thickness-1))
return image
def draw_boxes(image, boxes=[], scale=(1.0,1.0), color=(255,0,0), thickness=2):
if len(boxes) == 0: return image
for box in boxes:
start_point = (int(box[0]*scale[1]), int(box[1]*scale[0]))
end_point = (int(box[2]*scale[1]), int(box[3]*scale[0]))
image = cv2.rectangle(image, start_point, end_point, color=color, thickness=thickness)
return image
def create_background(size, seed=0):
image = np.ones(size, dtype=np.uint8) * seed
save_dir = os.path.join(abs_path, "icon")
save_name = os.path.join(save_dir, "background.jpg")
cv2.imwrite(save_name, image)
def transparent_background(img_file, save_name, thresh=10):
image = cv2.imread(img_file, cv2.IMREAD_COLOR)
image_gray = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE)
trans_image = np.zeros((image.shape[0],image.shape[1],4), dtype=np.uint8)
alpha = np.ones(image_gray.shape, dtype=np.uint8) * 255
alpha[image_gray>(255-thresh)] = 0
trans_image[:,:,:3] = image
trans_image[:,:,-1] = alpha
cv2.imwrite(save_name, trans_image)
print("Done")
def resize_image(img_file, save_name, size=(100,100)):
image = cv2.imread(img_file, -1)
image = cv2.resize(image, size, interpolation=cv2.INTER_CUBIC)
cv2.imwrite(save_name, image)
print("Done")
if __name__ == "__main__":
#create_background((352,352))
img_file = r"C:\Users\shuai\Documents\GitHub\FabricUI\FabricUI\icon\folder.jpg"
save_name = r"C:\Users\shuai\Documents\GitHub\FabricUI\FabricUI\icon\folder_icon.png"
#resize_image(img_file, save_name)
transparent_background(img_file, save_name)
|
[
"shuaih7@gmail.com"
] |
shuaih7@gmail.com
|
df0df3a114e599c36a4d9a1fef81af871183c836
|
c82a04b8aa975b1596e48e13deaf5f11a2ae94ba
|
/test.py
|
99b9847323d2a912600184ba1f913a0369ba9259
|
[
"MIT"
] |
permissive
|
budsus/CodeSearchNet
|
466e6d06b8b0f08f418906151af6018cc7253ca1
|
d79d0fde2569e4ed7ab0454e3b019fba3d6c7b90
|
refs/heads/master
| 2023-03-17T07:48:40.451414
| 2019-12-12T13:08:47
| 2019-12-12T13:08:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
import torch
x = torch.randn(10, 5)
print(x)
labels = torch.LongTensor([1,2,3,3,0,0,0,0,0,0])
n_classes = x.shape[-1]
one_hot = torch.nn.functional.one_hot(labels, n_classes)
print(one_hot)
print(x * one_hot)
compare = (x * one_hot).sum(-1).unsqueeze(-1).repeat(1, n_classes)
print(compare)
compared_scores = x >= compare
print(compared_scores)
rr = 1 / compared_scores.float().sum(-1)
print(rr)
mrr = rr.mean()
print(mrr)
|
[
"bentrevett@gmail.com"
] |
bentrevett@gmail.com
|
3f1e7c2be5e4aad81dc3c4cc8973865624a09628
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03645/s021155194.py
|
844491319f19594eb094e23da7af0f647cb6eb7c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
n, m = map(int, input().split())
root_map = dict()
root_map[1] = set()
root_map[n] = set()
for i in range(m):
a, b = map(int, input().split())
if a == 1 or a == n:
root_map[a].add(b)
if b == 1 or b == n:
root_map[b].add(a)
for i in root_map[1]:
if i in root_map[n]:
print("POSSIBLE")
break
else:
print("IMPOSSIBLE")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
b8ea6089fbf982c699ef0f102f4a0842d32f6a53
|
24caa6710105a060fab2e17147e6d56609939011
|
/03-Python_Data_Science_Toolbox_(Part_1)/02-Default_arguments,_variable-length_arguments_and_scope/01-Pop_quiz_on_understanding_scope.py
|
359f983d8c5327a8ca9e09fa52071fdeceb8fece
|
[] |
no_license
|
inverseundefined/DataCamp
|
99607022ad3f899d7681ad1f70fcedab290e269a
|
7226b6b6f41888c3610a884db9a226e013d37e56
|
refs/heads/master
| 2022-01-10T00:53:21.714908
| 2019-07-24T13:27:49
| 2019-07-24T13:27:49
| 198,280,648
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,022
|
py
|
'''
Pop quiz on understanding scope
In this exercise, you will practice what you've learned about scope in functions. The variable num has been predefined as 5, alongside the following function definitions:
def func1():
num = 3
print(num)
def func2():
global num
double_num = num * 2
num = 6
print(double_num)
Try calling func1() and func2() in the shell, then answer the following questions:
What are the values printed out when you call func1() and func2()?
What is the value of num in the global scope after calling func1() and func2()?
Instructions
50 XP
Possible Answers
func1() prints out 3, func2() prints out 6, and the value of num in the global scope is 3.
func1() prints out 3, func2() prints out 3, and the value of num in the global scope is 3.
func1() prints out 3, func2() prints out 10, and the value of num in the global scope is 10.
-> func1() prints out 3, func2() prints out 10, and the value of num in the global scope is 6.
Take Hint (-15 XP)
'''
|
[
"inversedrivenundefined@gmail.com"
] |
inversedrivenundefined@gmail.com
|
24524f83587d385ff97aec5e49d9379dfb3f883b
|
b8085ef607da70023214f105eb27bdbc713e596f
|
/Day2/Slots.py
|
db6ff0a4f8e7383e149a01736bdb559e14f236c2
|
[] |
no_license
|
artheadsweden/python_adv_april19
|
893c9ec76e8505a580439b7a2fd7aa2776503c77
|
04eecd25d4a291dddd608d94968b217fed7b88d8
|
refs/heads/master
| 2020-05-07T13:41:15.545033
| 2019-04-11T18:47:22
| 2019-04-11T18:47:22
| 180,559,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 682
|
py
|
from pympler import asizeof
class NoSlots:
def __init__(self, name, identifier):
self.name = name
self.identifier = identifier
class WithSlots:
__slots__ = ['name', 'identifier']
def __init__(self, name, identifier):
self.name = name
self.identifier = identifier
def main():
no_slots = [NoSlots(str(n), n) for n in range(100_000)]
size1 = round(asizeof.asizeof(no_slots)/1024/1024, 2)
print("No slots", size1, "mb")
with_slots = [WithSlots(str(n), n) for n in range(100_000)]
size2 = round(asizeof.asizeof(with_slots)/1024/1024, 2)
print("With slots", size2, "mb")
if __name__ == '__main__':
main()
|
[
"joakim@arthead.se"
] |
joakim@arthead.se
|
8e8665d33a8f3df1f93560af494176e055b876a4
|
81207a57ae84b2b786b373d9eaa89e04ca662473
|
/scripts/update_index.py
|
b4fa72f135adfbde5960f9e2c3f51b20f42df2a6
|
[
"MIT"
] |
permissive
|
ncarkaci/acoustid-server
|
9a9187db34c25a4eedbe297564f9d13f05b9c907
|
bb0098016d210be8d04ee64d9b42ed80bb947280
|
refs/heads/master
| 2020-07-22T18:25:46.258746
| 2019-09-05T11:05:01
| 2019-09-05T11:05:01
| 207,288,602
| 1
| 0
|
MIT
| 2019-09-09T10:58:51
| 2019-09-09T10:58:51
| null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
#!/usr/bin/env python
# Copyright (C) 2011-2012 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
from contextlib import closing
from acoustid.script import run_script
from acoustid.data.fingerprint import update_fingerprint_index
def main(script, opts, args):
with closing(script.engine.connect()) as db:
update_fingerprint_index(db, script.index)
run_script(main)
|
[
"lalinsky@gmail.com"
] |
lalinsky@gmail.com
|
47b13cbf68cba49d07c499ee6026f47fc228aece
|
353def93fa77384ee3a5e3de98cfed318c480634
|
/.history/week02/1/proxy/proxy/spiders/maoyan_20200705155519.py
|
5832d2f7ffe5ee7f1c5b3c601dddf5c249d1eb51
|
[] |
no_license
|
ydbB/Python001-class01
|
d680abc3ea1ccaeb610751e3488421417d381156
|
ad80037ccfc68d39125fa94d2747ab7394ac1be8
|
refs/heads/master
| 2022-11-25T11:27:45.077139
| 2020-07-19T12:35:12
| 2020-07-19T12:35:12
| 272,783,233
| 0
| 0
| null | 2020-06-16T18:28:15
| 2020-06-16T18:28:15
| null |
UTF-8
|
Python
| false
| false
| 1,400
|
py
|
import scrapy
from proxy.items import ProxyItem
import lxml.etree
class MaoyanSpider(scrapy.Spider):
name = 'maoyan'
allowed_domains = ['maoyan.com']
start_urls = ['http://maoyan.com/']
header =
# def parse(self, response):
# pass
def start_requests(self):
url = f'https://maoyan.com/board/4'
yield scrapy.Request(url=url,headers=self.header,callback=self.parse)
def parse(self, response):
selector = lxml.etree.HTML(response.text)
item =ProxyItem()
for i in range(0,10):
link = selector.xpath('//*[@id="app"]/div/div/div[1]/dl/dd[i]/div/div/div[1]/p[1]/a').get('href')
name = selector.xpath('//*[@id="app"]/div/div/div[1]/dl/dd[i]/div/div/div[1]/p[1]/a').get('title')
time = selector.xpath('//*[@id="app"]/div/div/div[1]/dl/dd[i]/div/div/div[1]/p[3]').text
item['films_name'] = name
item['release_time'] = time
print(link)
yield scrapy.Request(url=link, headers = self.header, meta={'item':item},callback=self.parse1)
def parse1(self, response):
item = response.meta['item']
selector = lxml.etree.HTML(response.text)
type = selector.xpath('/html/body/div[3]/div/div[2]/div[1]/ul/li[1]').text.replace('\n',' ')
print(type)
item['films_type'] = type
print(item)
yield item
|
[
"31039587+ydbB@users.noreply.github.com"
] |
31039587+ydbB@users.noreply.github.com
|
85658af6a7b79e5450b577beccbc06522bd0f00d
|
25c1bba5c9954ab757fed0ce3236cd6b3bd50c59
|
/BUILD.cr.py
|
47ccf5633ca37f891f4761834ecae7183d4632fb
|
[] |
no_license
|
craftr-build/craftr-chaiscript
|
c09e32e7ddd72c75d482cd3b627f2183cceaf379
|
09e6434016915c9745e3c841076ad193cdebb9dd
|
refs/heads/master
| 2021-09-04T17:35:54.491031
| 2018-01-20T14:31:21
| 2018-01-20T14:31:21
| 118,172,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,092
|
py
|
import craftr, {fmt, glob, path} from 'craftr'
import cxx from '@craftr/cxx'
source_dir = craftr.options.get('chaiscript.source_dir')
gitref = craftr.options.get('chaiscript.gitref', 'v6.0.0')
if not source_dir:
url = fmt('https://github.com/ChaiScript/ChaiScript/archive/{gitref}.zip')
source_dir = path.join(craftr.get_source_archive(url), 'ChaiScript-' + gitref.lstrip('v'))
defines = []
if craftr.options.get('chaiscript.no_threads', True):
defines.append('CHAISCRIPT_NO_THREADS')
if craftr.options.get('chaiscript.no_protect_dividebyzero', False):
defines.append('CHAISCRIPT_NO_PROTECT_DIVIDEBYZERO')
cxx.prebuilt(
name = 'chaiscript',
includes = [path.join(source_dir, 'include')],
defines = defines
)
cxx.library(
name = 'chaiscript-static',
public_deps = [':chaiscript'],
explicit = True,
srcs = glob('static_libs/*.cpp', parent=source_dir),
cpp_std = 'c++11',
options = dict(
msvc_compile_flags = ['/bigobj']
)
)
cxx.binary(
name = 'main',
deps = [':chaiscript-static'],
explicit = True,
srcs = [path.join(source_dir, 'src/main.cpp')]
)
|
[
"rosensteinniklas@gmail.com"
] |
rosensteinniklas@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.